sbin/hammer: Rename hammer_parsedevs() to hammer_parse_blkdevs()
[dragonfly.git] / sys / vm / vm_object.h
blobb650211bef152255a8b99721f0cc25ce59302bd3
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
60 * $FreeBSD: src/sys/vm/vm_object.h,v 1.63.2.3 2003/05/26 19:17:56 alc Exp $
64 * Virtual memory object module definitions.
67 #ifndef _VM_VM_OBJECT_H_
68 #define _VM_VM_OBJECT_H_
70 #ifndef _SYS_TYPES_H_
71 #include <sys/types.h>
72 #endif
73 #if defined(_KERNEL) && !defined(_SYS_SYSTM_H_)
74 #include <sys/systm.h>
75 #endif
76 #ifndef _SYS_QUEUE_H_
77 #include <sys/queue.h>
78 #endif
79 #ifndef _SYS_TREE_H_
80 #include <sys/tree.h>
81 #endif
82 #ifndef _SYS_THREAD_H_
83 #include <sys/thread.h>
84 #endif
85 #ifndef _MACHINE_PMAP_H_
86 #include <machine/pmap.h>
87 #endif
88 #ifndef _CPU_ATOMIC_H_
89 #include <machine/atomic.h>
90 #endif
91 #ifndef _VM_VM_H_
92 #include <vm/vm.h>
93 #endif
94 #ifndef _VM_VM_PAGE_H_
95 #include <vm/vm_page.h>
96 #endif
98 #ifdef _KERNEL
100 #ifndef _SYS_THREAD2_H_
101 #include <sys/thread2.h>
102 #endif
104 #ifndef _SYS_REFCOUNT_H_
105 #include <sys/refcount.h>
106 #endif
108 #endif
110 struct swblock;
111 struct swblock_rb_tree;
112 int rb_swblock_compare(struct swblock *, struct swblock *);
114 RB_PROTOTYPE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
115 vm_pindex_t);
117 enum obj_type {
118 OBJT_DEFAULT,
119 OBJT_SWAP, /* object backed by swap blocks */
120 OBJT_VNODE, /* object backed by file pages (vnode) */
121 OBJT_DEVICE, /* object backed by device pages */
122 OBJT_MGTDEVICE,
123 OBJT_PHYS, /* object backed by physical pages */
124 OBJT_DEAD, /* dead object */
125 OBJT_MARKER /* marker object */
127 typedef u_char objtype_t;
130 * A VM object which represents an arbitrarily sized data store.
132 * NOTE:
133 * shadow_head is only used by OBJT_DEFAULT or OBJT_SWAP objects.
134 * OBJT_VNODE objects explicitly do not keep track of who is shadowing
135 * them.
137 * LOCKING:
138 * vmobj_tokens[n] for object_list, hashed by address.
140 * vm_object_hold/drop() for most vm_object related operations.
141 * OBJ_CHAINLOCK to avoid chain/shadow object collisions.
143 struct vm_object {
144 TAILQ_ENTRY(vm_object) object_list; /* locked by vmobj_tokens[n] */
145 LIST_HEAD(, vm_object) shadow_head; /* objects we are a shadow for */
146 LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
147 RB_HEAD(vm_page_rb_tree, vm_page) rb_memq; /* resident pages */
148 int generation; /* generation ID */
149 vm_pindex_t size; /* Object size */
150 int ref_count;
151 int shadow_count; /* count of objs we are a shadow for */
152 vm_memattr_t memattr; /* default memory attribute for pages */
153 objtype_t type; /* type of pager */
154 u_short flags; /* see below */
155 u_short pg_color; /* color of first page in obj */
156 u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
157 long resident_page_count; /* number of resident pages */
158 u_int unused01;
159 struct vm_object *backing_object; /* object that I'm a shadow of */
160 vm_ooffset_t backing_object_offset;/* Offset in backing object */
161 TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
162 void *handle; /* control handle: vp, etc */
163 int hold_count; /* count prevents destruction */
165 #if defined(DEBUG_LOCKS)
167 * Record threads holding a vm_object
170 #define VMOBJ_DEBUG_ARRAY_SIZE (32)
171 char debug_hold_thrs[VMOBJ_DEBUG_ARRAY_SIZE][64];
172 const char *debug_hold_file[VMOBJ_DEBUG_ARRAY_SIZE];
173 int debug_hold_line[VMOBJ_DEBUG_ARRAY_SIZE];
174 int debug_index;
175 #endif
177 union {
179 * Device pager
181 * devp_pglist - list of allocated pages
183 struct {
184 TAILQ_HEAD(, vm_page) devp_pglist;
185 struct cdev_pager_ops *ops;
186 struct cdev *dev;
187 } devp;
188 } un_pager;
191 * OBJT_SWAP and OBJT_VNODE VM objects may have swap backing
192 * store. For vnodes the swap backing store acts as a fast
193 * data cache but the vnode contains the official data.
195 RB_HEAD(swblock_rb_tree, swblock) swblock_root;
196 long swblock_count;
197 struct lwkt_token token;
198 struct md_object md; /* machine specific (typ pmap) */
199 uint32_t chainlk;/* chaining lock */
203 * Flags
205 * NOTE: OBJ_ONEMAPPING only applies to DEFAULT and SWAP objects. It
206 * may be gratuitously re-cleared in other cases but will already be
207 * clear in those cases. It might not be set on other object types
208 * (particularly OBJT_VNODE).
210 #define OBJ_UNUSED0001 0x0001 /* backing_object/shadow changing */
211 #define OBJ_ONSHADOW 0x0002 /* backing_object on shadow list */
212 #define OBJ_ACTIVE 0x0004 /* active objects */
213 #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
214 #define OBJ_NOSPLIT 0x0010 /* dont split this object */
215 #define OBJ_UNUSED0040 0x0040
216 #define OBJ_WRITEABLE 0x0080 /* object has been made writable */
217 #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
218 #define OBJ_CLEANING 0x0200
219 #define OBJ_DEADWNT 0x1000 /* waiting because object is dead */
220 #define OBJ_ONEMAPPING 0x2000 /* flag single vm_map_entry mapping */
221 #define OBJ_NOMSYNC 0x4000 /* disable msync() system call */
223 #define CHAINLK_EXCL 0x80000000
224 #define CHAINLK_WAIT 0x40000000
225 #define CHAINLK_EXCLREQ 0x20000000
226 #define CHAINLK_MASK 0x1FFFFFFF
228 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
229 #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
231 #define VMOBJ_HSIZE 64
232 #define VMOBJ_HMASK (VMOBJ_HSIZE - 1)
233 #define VMOBJ_HASH(obj) (&vm_object_hash[((intptr_t)(obj) >> 8) & VMOBJ_HMASK])
235 #ifdef _KERNEL
237 #define OBJPC_SYNC 0x1 /* sync I/O */
238 #define OBJPC_INVAL 0x2 /* invalidate */
239 #define OBJPC_NOSYNC 0x4 /* skip if PG_NOSYNC */
242 * Used to chain vm_object deallocations
244 struct vm_object_dealloc_list {
245 struct vm_object_dealloc_list *next;
246 vm_object_t object;
249 TAILQ_HEAD(object_q, vm_object);
251 struct vm_object_hash {
252 struct object_q list;
253 struct lwkt_token token;
254 } __cachealign;
256 extern struct vm_object_hash vm_object_hash[VMOBJ_HSIZE];
258 /* lock for object list and count */
260 extern struct vm_object kernel_object; /* the single kernel object */
261 extern int vm_shared_fault;
263 #endif /* _KERNEL */
265 #ifdef _KERNEL
267 #define VM_OBJECT_LOCK(object) vm_object_hold(object)
268 #define VM_OBJECT_UNLOCK(object) vm_object_drop(object)
270 static __inline void
271 vm_object_set_flag(vm_object_t object, u_int bits)
273 atomic_set_short(&object->flags, bits);
276 static __inline void
277 vm_object_clear_flag(vm_object_t object, u_int bits)
279 atomic_clear_short(&object->flags, bits);
282 static __inline void
283 vm_object_pip_add(vm_object_t object, u_int i)
285 refcount_acquire_n(&object->paging_in_progress, i);
288 static __inline void
289 vm_object_pip_wakeup_n(vm_object_t object, u_int i)
291 refcount_release_wakeup_n(&object->paging_in_progress, i);
294 static __inline void
295 vm_object_pip_wakeup(vm_object_t object)
297 vm_object_pip_wakeup_n(object, 1);
300 static __inline void
301 vm_object_pip_wait(vm_object_t object, char *waitid)
303 refcount_wait(&object->paging_in_progress, waitid);
306 static __inline lwkt_token_t
307 vm_object_token(vm_object_t obj)
309 return (&obj->token);
312 vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
313 vm_object_t vm_object_allocate_hold (objtype_t, vm_pindex_t);
314 void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
315 boolean_t vm_object_coalesce (vm_object_t, vm_pindex_t, vm_size_t, vm_size_t);
316 void vm_object_collapse (vm_object_t, struct vm_object_dealloc_list **);
317 void vm_object_deallocate_list(struct vm_object_dealloc_list **);
318 void vm_object_terminate (vm_object_t);
319 void vm_object_set_writeable_dirty (vm_object_t);
320 void vm_object_init(vm_object_t, vm_pindex_t);
321 void vm_object_init1 (void);
322 void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, int);
323 void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
324 void vm_object_pmap_copy (vm_object_t, vm_pindex_t, vm_pindex_t);
325 void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t);
326 void vm_object_pmap_remove (vm_object_t, vm_pindex_t, vm_pindex_t);
327 void vm_object_chain_wait (vm_object_t object, int shared);
328 void vm_object_chain_acquire(vm_object_t object, int shared);
329 void vm_object_chain_release(vm_object_t object);
330 void vm_object_chain_release_all(vm_object_t object, vm_object_t stopobj);
331 void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t, int);
332 void vm_object_madvise (vm_object_t, vm_pindex_t, vm_pindex_t, int);
333 void vm_object_init2 (void);
334 vm_page_t vm_fault_object_page(vm_object_t, vm_ooffset_t,
335 vm_prot_t, int, int *, int *);
336 void vm_object_lock_swap(void);
337 void vm_object_lock(vm_object_t);
338 void vm_object_lock_shared(vm_object_t);
339 void vm_object_unlock(vm_object_t);
341 #if defined(DEBUG_LOCKS)
343 #define VMOBJDEBUG(x) debug ## x
344 #define VMOBJDBARGS , char *file, int line
345 #define VMOBJDBFWD , file, line
347 #define vm_object_hold(obj) \
348 debugvm_object_hold(obj, __FILE__, __LINE__)
349 #define vm_object_hold_try(obj) \
350 debugvm_object_hold_try(obj, __FILE__, __LINE__)
351 #define vm_object_hold_shared(obj) \
352 debugvm_object_hold_shared(obj, __FILE__, __LINE__)
353 #define vm_object_drop(obj) \
354 debugvm_object_drop(obj, __FILE__, __LINE__)
355 #define vm_object_reference_quick(obj) \
356 debugvm_object_reference_quick(obj, __FILE__, __LINE__)
357 #define vm_object_reference_locked(obj) \
358 debugvm_object_reference_locked(obj, __FILE__, __LINE__)
359 #define vm_object_deallocate(obj) \
360 debugvm_object_deallocate(obj, __FILE__, __LINE__)
361 #define vm_object_deallocate_locked(obj) \
362 debugvm_object_deallocate_locked(obj, __FILE__, __LINE__)
364 #else
366 #define VMOBJDEBUG(x) x
367 #define VMOBJDBARGS
368 #define VMOBJDBFWD
370 #endif
372 void VMOBJDEBUG(vm_object_hold)(vm_object_t object VMOBJDBARGS);
373 int VMOBJDEBUG(vm_object_hold_try)(vm_object_t object VMOBJDBARGS);
374 void VMOBJDEBUG(vm_object_hold_shared)(vm_object_t object VMOBJDBARGS);
375 void VMOBJDEBUG(vm_object_drop)(vm_object_t object VMOBJDBARGS);
376 void VMOBJDEBUG(vm_object_reference_quick)(vm_object_t object VMOBJDBARGS);
377 void VMOBJDEBUG(vm_object_reference_locked)(vm_object_t object VMOBJDBARGS);
378 void VMOBJDEBUG(vm_object_deallocate)(vm_object_t object VMOBJDBARGS);
379 void VMOBJDEBUG(vm_object_deallocate_locked)(vm_object_t object VMOBJDBARGS);
381 void vm_object_upgrade(vm_object_t);
382 void vm_object_downgrade(vm_object_t);
384 #endif /* _KERNEL */
386 #endif /* _VM_VM_OBJECT_H_ */