2 * Copyright (c) 1982, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)vmmeter.h 8.2 (Berkeley) 7/10/94
30 * $FreeBSD: src/sys/sys/vmmeter.h,v 1.21.2.2 2002/10/10 19:28:21 dillon Exp $
33 #ifndef _VM_VM_PAGE2_H_
34 #define _VM_VM_PAGE2_H_
38 #ifndef _SYS_VMMETER_H_
39 #include <sys/vmmeter.h>
42 #include <sys/queue.h>
44 #ifndef _VM_VM_PAGE_H_
45 #include <vm/vm_page.h>
47 #ifndef _SYS_SPINLOCK_H_
48 #include <sys/spinlock.h>
50 #ifndef _SYS_SPINLOCK2_H_
51 #include <sys/spinlock2.h>
55 * Return TRUE if we are under our severe low-free-pages threshold
57 * This causes user processes to stall to avoid exhausting memory that
58 * the kernel might need.
60 * reserved < severe < minimum < target < paging_target
64 vm_page_count_severe(void)
66 return (vmstats
.v_free_severe
>
67 vmstats
.v_free_count
+ vmstats
.v_cache_count
||
68 vmstats
.v_free_reserved
> vmstats
.v_free_count
);
72 * Return TRUE if we are under our minimum low-free-pages threshold.
73 * This activates the pageout demon. The pageout demon tries to
74 * reach the target but may stop once it satisfies the minimum.
76 * reserved < severe < minimum < target < paging_target
80 vm_page_count_min(int donotcount
)
82 return (vmstats
.v_free_min
+ donotcount
>
83 (vmstats
.v_free_count
+ vmstats
.v_cache_count
) ||
84 vmstats
.v_free_reserved
> vmstats
.v_free_count
);
88 * Return TRUE if we are under our free page target. The pageout demon
89 * tries to reach the target but may stop once it gets past the min.
91 * User threads doing normal allocations might wait based on this
92 * function but MUST NOT wait in a loop based on this function as the
93 * VM load may prevent the target from being reached.
97 vm_page_count_target(void)
99 return (vmstats
.v_free_target
>
100 (vmstats
.v_free_count
+ vmstats
.v_cache_count
) ||
101 vmstats
.v_free_reserved
> vmstats
.v_free_count
);
105 * Return the number of pages the pageout daemon needs to move into the
106 * cache or free lists. A negative number means we have sufficient free
109 * The target free+cache is greater than vm_page_count_target(). The
110 * frontend uses vm_page_count_target() while the backend continue freeing
111 * based on vm_paging_target().
113 * This function DOES NOT return TRUE or FALSE.
117 vm_paging_target(void)
120 (vmstats
.v_free_target
+ vmstats
.v_cache_min
) -
121 (vmstats
.v_free_count
+ vmstats
.v_cache_count
)
126 * Return TRUE if hysteresis dictates we should nominally wakeup the
127 * pageout daemon to start working on freeing up some memory. This
128 * routine should NOT be used to determine when to block on the VM system.
129 * We want to wakeup the pageout daemon before we might otherwise block.
131 * Paging begins when cache+free drops below cache_min + free_min.
135 vm_paging_needed(void)
137 if (vmstats
.v_free_min
+ vmstats
.v_cache_min
>
138 vmstats
.v_free_count
+ vmstats
.v_cache_count
) {
141 if (vmstats
.v_free_min
> vmstats
.v_free_count
)
148 vm_page_event(vm_page_t m
, vm_page_event_t event
)
150 if (m
->flags
& PG_ACTIONLIST
)
151 vm_page_event_internal(m
, event
);
156 vm_page_init_action(vm_page_t m
, vm_page_action_t action
,
157 void (*func
)(vm_page_t
, vm_page_action_t
), void *data
)
165 * Clear dirty bits in the VM page but truncate the
166 * end to a DEV_BSIZE'd boundary.
168 * Used when reading data in, typically via getpages.
169 * The partial device block at the end of the truncation
170 * range should not lose its dirty bit.
172 * NOTE: This function does not clear the pmap modified bit.
176 vm_page_clear_dirty_end_nonincl(vm_page_t m
, int base
, int size
)
178 size
= (base
+ size
) & ~DEV_BMASK
;
180 vm_page_clear_dirty(m
, base
, size
- base
);
184 * Clear dirty bits in the VM page but truncate the
185 * beginning to a DEV_BSIZE'd boundary.
187 * Used when truncating a buffer. The partial device
188 * block at the beginning of the truncation range
189 * should not lose its dirty bit.
191 * NOTE: This function does not clear the pmap modified bit.
195 vm_page_clear_dirty_beg_nonincl(vm_page_t m
, int base
, int size
)
198 base
= (base
+ DEV_BMASK
) & ~DEV_BMASK
;
200 vm_page_clear_dirty(m
, base
, size
- base
);
205 vm_page_spin_lock(vm_page_t m
)
212 vm_page_spin_unlock(vm_page_t m
)
218 * Wire a vm_page that is already wired. Does not require a busied
223 vm_page_wire_quick(vm_page_t m
)
225 if (atomic_fetchadd_int(&m
->wire_count
, 1) == 0)
226 panic("vm_page_wire_quick: wire_count was 0");
230 * Unwire a vm_page quickly, does not require a busied page.
232 * This routine refuses to drop the wire_count to 0 and will return
233 * TRUE if it would have had to (instead of decrementing it to 0).
234 * The caller can then busy the page and deal with it.
238 vm_page_unwire_quick(vm_page_t m
)
240 KKASSERT(m
->wire_count
> 0);
242 u_int wire_count
= m
->wire_count
;
247 if (atomic_cmpset_int(&m
->wire_count
, wire_count
, wire_count
- 1))
253 * Functions implemented as macros
257 vm_page_flag_set(vm_page_t m
, unsigned int bits
)
259 atomic_set_int(&(m
)->flags
, bits
);
263 vm_page_flag_clear(vm_page_t m
, unsigned int bits
)
265 atomic_clear_int(&(m
)->flags
, bits
);
269 * Wakeup anyone waiting for the page after potentially unbusying
270 * (hard or soft) or doing other work on a page that might make a
271 * waiter ready. The setting of PG_WANTED is integrated into the
272 * related flags and it can't be set once the flags are already
273 * clear, so there should be no races here.
277 vm_page_flash(vm_page_t m
)
279 if (m
->flags
& PG_WANTED
) {
280 vm_page_flag_clear(m
, PG_WANTED
);
286 * Reduce the protection of a page. This routine never raises the
287 * protection and therefore can be safely called if the page is already
288 * at VM_PROT_NONE (it will be a NOP effectively ).
290 * VM_PROT_NONE will remove all user mappings of a page. This is often
291 * necessary when a page changes state (for example, turns into a copy-on-write
292 * page or needs to be frozen for write I/O) in order to force a fault, or
293 * to force a page's dirty bits to be synchronized and avoid hardware
294 * (modified/accessed) bit update races with pmap changes.
296 * Since 'prot' is usually a constant, this inline usually winds up optimizing
297 * out the primary conditional.
299 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have
300 * been cleared. Callers should be aware that other page related elements
301 * might have changed, however.
304 vm_page_protect(vm_page_t m
, int prot
)
306 KKASSERT(m
->flags
& PG_BUSY
);
307 if (prot
== VM_PROT_NONE
) {
308 if (m
->flags
& (PG_WRITEABLE
|PG_MAPPED
)) {
309 pmap_page_protect(m
, VM_PROT_NONE
);
310 /* PG_WRITEABLE & PG_MAPPED cleared by call */
312 } else if ((prot
== VM_PROT_READ
) && (m
->flags
& PG_WRITEABLE
)) {
313 pmap_page_protect(m
, VM_PROT_READ
);
314 /* PG_WRITEABLE cleared by call */
319 * Zero-fill the specified page. The entire contents of the page will be
322 static __inline boolean_t
323 vm_page_zero_fill(vm_page_t m
)
325 pmap_zero_page(VM_PAGE_TO_PHYS(m
));
330 * Copy the contents of src_m to dest_m. The pages must be stable but spl
331 * and other protections depend on context.
334 vm_page_copy(vm_page_t src_m
, vm_page_t dest_m
)
336 pmap_copy_page(VM_PAGE_TO_PHYS(src_m
), VM_PAGE_TO_PHYS(dest_m
));
337 dest_m
->valid
= VM_PAGE_BITS_ALL
;
338 dest_m
->dirty
= VM_PAGE_BITS_ALL
;
342 * Free a page. The page must be marked BUSY.
344 * Always clear PG_ZERO when freeing a page, which ensures the flag is not
345 * set unless we are absolutely certain the page is zerod. This is
346 * particularly important when the vm_page_alloc*() code moves pages from
347 * PQ_CACHE to PQ_FREE.
350 vm_page_free(vm_page_t m
)
352 vm_page_flag_clear(m
, PG_ZERO
);
357 * Free a page to the zerod-pages queue. The caller must ensure that the
358 * page has been zerod.
361 vm_page_free_zero(vm_page_t m
)
365 char *p
= (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m
));
368 for (i
= 0; i
< PAGE_SIZE
; i
++) {
370 panic("non-zero page in vm_page_free_zero()");
375 vm_page_flag_set(m
, PG_ZERO
);
380 * Set page to not be dirty. Note: does not clear pmap modify bits .
383 vm_page_undirty(vm_page_t m
)
389 #endif /* _VM_VM_PAGE2_H_ */