kernel: Add a few forgotten crit_exit()s and fix a wrong crit_enter().
[dragonfly.git] / sys / vm / vm_object.h
blob4ae6fad09471bfa731e23c1562ee13af17ff6561
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
60 * $FreeBSD: src/sys/vm/vm_object.h,v 1.63.2.3 2003/05/26 19:17:56 alc Exp $
64 * Virtual memory object module definitions.
67 #ifndef _VM_VM_OBJECT_H_
68 #define _VM_VM_OBJECT_H_
70 #ifndef _SYS_TYPES_H_
71 #include <sys/types.h>
72 #endif
73 #if defined(_KERNEL) && !defined(_SYS_SYSTM_H_)
74 #include <sys/systm.h>
75 #endif
76 #ifndef _SYS_QUEUE_H_
77 #include <sys/queue.h>
78 #endif
79 #ifndef _SYS_TREE_H_
80 #include <sys/tree.h>
81 #endif
82 #ifndef _SYS_THREAD_H_
83 #include <sys/thread.h>
84 #endif
85 #include <machine/atomic.h>
86 #ifndef _VM_VM_H_
87 #include <vm/vm.h>
88 #endif
89 #ifndef _VM_VM_PAGE_H_
90 #include <vm/vm_page.h>
91 #endif
92 #ifndef _SYS_THREAD_H_
93 #include <sys/thread.h>
94 #endif
96 #ifdef _KERNEL
98 #ifndef _SYS_THREAD2_H_
99 #include <sys/thread2.h>
100 #endif
102 #ifndef _SYS_REFCOUNT_H_
103 #include <sys/refcount.h>
104 #endif
106 #endif
108 struct swblock;
109 struct swblock_rb_tree;
110 int rb_swblock_compare(struct swblock *, struct swblock *);
112 RB_PROTOTYPE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
113 vm_pindex_t);
115 enum obj_type {
116 OBJT_DEFAULT,
117 OBJT_SWAP, /* object backed by swap blocks */
118 OBJT_VNODE, /* object backed by file pages (vnode) */
119 OBJT_DEVICE, /* object backed by device pages */
120 OBJT_PHYS, /* object backed by physical pages */
121 OBJT_DEAD, /* dead object */
122 OBJT_MARKER /* marker object */
124 typedef u_char objtype_t;
127 * vm_object A VM object which represents an arbitrarily sized
128 * data store.
130 * Locking requirements:
131 * vmobj_token for object_list
133 * vm_object_hold/drop() for most vm_object related operations.
135 * OBJ_CHAINLOCK to avoid chain/shadow object collisions
137 struct vm_object {
138 TAILQ_ENTRY(vm_object) object_list; /* vmobj_token */
139 LIST_HEAD(, vm_object) shadow_head; /* objects we are a shadow for */
140 LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
141 RB_HEAD(vm_page_rb_tree, vm_page) rb_memq; /* resident pages */
142 int generation; /* generation ID */
143 vm_pindex_t size; /* Object size */
144 int ref_count;
145 int shadow_count; /* count of objs we are a shadow for */
146 objtype_t type; /* type of pager */
147 u_short flags; /* see below */
148 u_short pg_color; /* color of first page in obj */
149 u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
150 int resident_page_count; /* number of resident pages */
151 u_int agg_pv_list_count; /* aggregate pv list count */
152 struct vm_object *backing_object; /* object that I'm a shadow of */
153 vm_ooffset_t backing_object_offset;/* Offset in backing object */
154 void *handle; /* control handle: vp, etc */
155 int hold_count; /* count prevents destruction */
157 #if defined(DEBUG_LOCKS)
159 * Record threads holding a vm_object
162 #define VMOBJ_DEBUG_ARRAY_SIZE (32)
163 u_int debug_hold_bitmap;
164 thread_t debug_hold_thrs[VMOBJ_DEBUG_ARRAY_SIZE];
165 char *debug_hold_file[VMOBJ_DEBUG_ARRAY_SIZE];
166 int debug_hold_line[VMOBJ_DEBUG_ARRAY_SIZE];
167 u_int debug_hold_ovfl;
168 #endif
170 union {
172 * Device pager
174 * devp_pglist - list of allocated pages
176 struct {
177 TAILQ_HEAD(, vm_page) devp_pglist;
178 } devp;
179 } un_pager;
182 * OBJT_SWAP and OBJT_VNODE VM objects may have swap backing
183 * store. For vnodes the swap backing store acts as a fast
184 * data cache but the vnode contains the official data.
186 RB_HEAD(swblock_rb_tree, swblock) swblock_root;
187 int swblock_count;
188 struct lwkt_token token;
192 * Flags
194 #define OBJ_CHAINLOCK 0x0001 /* backing_object/shadow changing */
195 #define OBJ_CHAINWANT 0x0002
196 #define OBJ_ACTIVE 0x0004 /* active objects */
197 #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
198 #define OBJ_NOSPLIT 0x0010 /* dont split this object */
199 #define OBJ_UNUSED0040 0x0040
200 #define OBJ_WRITEABLE 0x0080 /* object has been made writable */
201 #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
202 #define OBJ_CLEANING 0x0200
203 #define OBJ_DEADWNT 0x1000 /* waiting because object is dead */
204 #define OBJ_ONEMAPPING 0x2000 /* flag single vm_map_entry mapping */
205 #define OBJ_NOMSYNC 0x4000 /* disable msync() system call */
207 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
208 #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
210 #ifdef _KERNEL
212 #define OBJPC_SYNC 0x1 /* sync I/O */
213 #define OBJPC_INVAL 0x2 /* invalidate */
214 #define OBJPC_NOSYNC 0x4 /* skip if PG_NOSYNC */
217 * Used to chain vm_object deallocations
219 struct vm_object_dealloc_list {
220 struct vm_object_dealloc_list *next;
221 vm_object_t object;
224 TAILQ_HEAD(object_q, vm_object);
226 extern struct object_q vm_object_list; /* list of allocated objects */
228 /* lock for object list and count */
230 extern struct vm_object kernel_object; /* the single kernel object */
232 #endif /* _KERNEL */
234 #ifdef _KERNEL
236 static __inline void
237 vm_object_set_flag(vm_object_t object, u_int bits)
239 atomic_set_short(&object->flags, bits);
242 static __inline void
243 vm_object_clear_flag(vm_object_t object, u_int bits)
245 atomic_clear_short(&object->flags, bits);
248 static __inline void
249 vm_object_pip_add(vm_object_t object, u_int i)
251 refcount_acquire_n(&object->paging_in_progress, (u_int)i);
254 static __inline void
255 vm_object_pip_wakeup_n(vm_object_t object, u_int i)
257 refcount_release_wakeup_n(&object->paging_in_progress, i);
260 static __inline void
261 vm_object_pip_wakeup(vm_object_t object)
263 vm_object_pip_wakeup_n(object, 1);
266 static __inline void
267 vm_object_pip_wait(vm_object_t object, char *waitid)
269 refcount_wait(&object->paging_in_progress, waitid);
272 static __inline lwkt_token_t
273 vm_object_token(vm_object_t obj)
275 return (&obj->token);
278 vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
279 vm_object_t vm_object_allocate_hold (objtype_t, vm_pindex_t);
280 void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
281 boolean_t vm_object_coalesce (vm_object_t, vm_pindex_t, vm_size_t, vm_size_t);
282 void vm_object_collapse (vm_object_t, struct vm_object_dealloc_list **);
283 void vm_object_deallocate (vm_object_t);
284 void vm_object_deallocate_locked (vm_object_t);
285 void vm_object_deallocate_list(struct vm_object_dealloc_list **);
286 void vm_object_terminate (vm_object_t);
287 void vm_object_set_writeable_dirty (vm_object_t);
288 void vm_object_init (void);
289 void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
290 void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
291 void vm_object_pmap_copy (vm_object_t, vm_pindex_t, vm_pindex_t);
292 void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t);
293 void vm_object_pmap_remove (vm_object_t, vm_pindex_t, vm_pindex_t);
294 void vm_object_reference_locked (vm_object_t);
295 void vm_object_chain_wait (vm_object_t);
296 void vm_object_chain_acquire(vm_object_t object);
297 void vm_object_chain_release(vm_object_t object);
298 void vm_object_chain_release_all(vm_object_t object, vm_object_t stopobj);
299 void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t, int);
300 void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
301 void vm_object_init2 (void);
302 vm_page_t vm_fault_object_page(vm_object_t, vm_ooffset_t,
303 vm_prot_t, int, int *);
304 void vm_object_dead_sleep(vm_object_t, const char *);
305 void vm_object_dead_wakeup(vm_object_t);
306 void vm_object_lock_swap(void);
307 void vm_object_lock(vm_object_t);
308 void vm_object_lock_shared(vm_object_t);
309 void vm_object_unlock(vm_object_t);
311 #ifndef DEBUG_LOCKS
312 void vm_object_hold(vm_object_t);
313 int vm_object_hold_try(vm_object_t);
314 void vm_object_hold_shared(vm_object_t);
315 #else
316 #define vm_object_hold(obj) \
317 debugvm_object_hold(obj, __FILE__, __LINE__)
318 void debugvm_object_hold(vm_object_t, char *, int);
319 #define vm_object_hold_try(obj) \
320 debugvm_object_hold_try(obj, __FILE__, __LINE__)
321 int debugvm_object_hold_try(vm_object_t, char *, int);
322 #define vm_object_hold_shared(obj) \
323 debugvm_object_hold_shared(obj, __FILE__, __LINE__)
324 void debugvm_object_hold_shared(vm_object_t, char *, int);
325 #endif
327 void vm_object_drop(vm_object_t);
329 #endif /* _KERNEL */
331 #endif /* _VM_VM_OBJECT_H_ */