Always do PCI_REROUTE_INTERRUPT.
[dragonfly.git] / sys / vm / vm_object.h
blob7bccfd66d83512bc62a0b78252c1ba0c0bee695b
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_object.h,v 1.63.2.3 2003/05/26 19:17:56 alc Exp $
65 * $DragonFly: src/sys/vm/vm_object.h,v 1.14 2007/06/08 02:00:47 dillon Exp $
69 * Virtual memory object module definitions.
72 #ifndef _VM_VM_OBJECT_H_
73 #define _VM_VM_OBJECT_H_
75 #ifndef _SYS_TYPES_H_
76 #include <sys/types.h>
77 #endif
78 #if defined(_KERNEL) && !defined(_SYS_SYSTM_H_)
79 #include <sys/systm.h>
80 #endif
81 #ifndef _SYS_QUEUE_H_
82 #include <sys/queue.h>
83 #endif
84 #ifndef _MACHINE_ATOMIC_H_
85 #include <machine/atomic.h>
86 #endif
87 #ifndef _VM_VM_H_
88 #include <vm/vm.h>
89 #endif
90 #ifndef _VM_VM_PAGE_H_
91 #include <vm/vm_page.h>
92 #endif
94 #ifdef _KERNEL
96 #ifndef _SYS_THREAD2_H_
97 #include <sys/thread2.h>
98 #endif
100 #endif
102 enum obj_type {
103 OBJT_DEFAULT,
104 OBJT_SWAP, /* object backed by swap blocks */
105 OBJT_VNODE, /* object backed by file pages (vnode) */
106 OBJT_DEVICE, /* object backed by device pages */
107 OBJT_PHYS, /* object backed by physical pages */
108 OBJT_DEAD /* dead object */
110 typedef u_char objtype_t;
113 * vm_object_lock A lock covering byte ranges within a VM object
116 struct vm_object_lock {
117 struct vm_object_lock *next;
118 int type; /* type of lock */
119 int waiting; /* someone is waiting on the lock */
120 off_t base; /* byte offset into object */
121 off_t bytes; /* extent in bytes */
124 #define VMOBJ_LOCK_SHARED 1
125 #define VMOBJ_LOCK_EXCL 2
128 * vm_object A VM object which represents an arbitrarily sized
129 * data store.
131 struct vm_object {
132 TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
133 LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
134 LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
135 RB_HEAD(vm_page_rb_tree, vm_page) rb_memq; /* resident pages */
136 int generation; /* generation ID */
137 vm_size_t size; /* Object size */
138 int ref_count; /* How many refs?? */
139 int shadow_count; /* how many objects that this is a shadow for */
140 int hash_rand; /* vm hash table randomizer */
141 objtype_t type; /* type of pager */
142 u_short flags; /* see below */
143 u_short pg_color; /* color of first page in obj */
144 u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
145 int resident_page_count; /* number of resident pages */
146 struct vm_object *backing_object; /* object that I'm a shadow of */
147 vm_ooffset_t backing_object_offset;/* Offset in backing object */
148 TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
149 void *handle;
150 vm_object_lock_t range_locks;
151 union {
153 * Device pager
155 * devp_pglist - list of allocated pages
157 struct {
158 TAILQ_HEAD(, vm_page) devp_pglist;
159 } devp;
162 * Swap pager
164 * swp_bcount - number of swap 'swblock' metablocks, each
165 * contains up to 16 swapblk assignments.
166 * see vm/swap_pager.h
168 struct {
169 int swp_bcount;
170 } swp;
171 } un_pager;
175 * Flags
177 #define OBJ_ACTIVE 0x0004 /* active objects */
178 #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
179 #define OBJ_NOSPLIT 0x0010 /* dont split this object */
180 #define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
181 #define OBJ_WRITEABLE 0x0080 /* object has been made writable */
182 #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
183 #define OBJ_CLEANING 0x0200
184 #define OBJ_DEADWNT 0x1000 /* waiting because object is dead */
185 #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
187 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
188 #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
190 #ifdef _KERNEL
192 #define OBJPC_SYNC 0x1 /* sync I/O */
193 #define OBJPC_INVAL 0x2 /* invalidate */
194 #define OBJPC_NOSYNC 0x4 /* skip if PG_NOSYNC */
196 TAILQ_HEAD(object_q, vm_object);
198 extern struct object_q vm_object_list; /* list of allocated objects */
200 /* lock for object list and count */
202 extern struct vm_object kernel_object; /* the single kernel object */
204 #endif /* _KERNEL */
206 #ifdef _KERNEL
208 static __inline void
209 vm_object_set_flag(vm_object_t object, u_int bits)
211 atomic_set_short(&object->flags, bits);
214 static __inline void
215 vm_object_clear_flag(vm_object_t object, u_int bits)
217 atomic_clear_short(&object->flags, bits);
220 static __inline void
221 vm_object_pip_add(vm_object_t object, int i)
223 atomic_add_short(&object->paging_in_progress, i);
226 static __inline void
227 vm_object_pip_subtract(vm_object_t object, int i)
229 atomic_subtract_short(&object->paging_in_progress, i);
232 static __inline void
233 vm_object_pip_wakeup(vm_object_t object)
235 atomic_subtract_short(&object->paging_in_progress, 1);
236 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
237 vm_object_clear_flag(object, OBJ_PIPWNT);
238 wakeup(object);
242 static __inline void
243 vm_object_pip_wakeupn(vm_object_t object, int i)
245 if (i)
246 atomic_subtract_short(&object->paging_in_progress, i);
247 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
248 vm_object_clear_flag(object, OBJ_PIPWNT);
249 wakeup(object);
253 static __inline void
254 vm_object_pip_sleep(vm_object_t object, char *waitid)
256 if (object->paging_in_progress) {
257 crit_enter();
258 if (object->paging_in_progress) {
259 vm_object_set_flag(object, OBJ_PIPWNT);
260 tsleep(object, 0, waitid, 0);
262 crit_exit();
266 static __inline void
267 vm_object_pip_wait(vm_object_t object, char *waitid)
269 while (object->paging_in_progress)
270 vm_object_pip_sleep(object, waitid);
273 vm_object_t vm_object_allocate (objtype_t, vm_size_t);
274 void _vm_object_allocate (objtype_t, vm_size_t, vm_object_t);
275 boolean_t vm_object_coalesce (vm_object_t, vm_pindex_t, vm_size_t, vm_size_t);
276 void vm_object_collapse (vm_object_t);
277 void vm_object_deallocate (vm_object_t);
278 void vm_object_terminate (vm_object_t);
279 void vm_object_set_writeable_dirty (vm_object_t);
280 void vm_object_init (void);
281 void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
282 void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
283 void vm_object_pmap_copy (vm_object_t, vm_pindex_t, vm_pindex_t);
284 void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t);
285 void vm_object_pmap_remove (vm_object_t, vm_pindex_t, vm_pindex_t);
286 void vm_object_reference (vm_object_t);
287 void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
288 void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
289 void vm_object_init2 (void);
290 vm_page_t vm_fault_object_page(vm_object_t, vm_ooffset_t, vm_prot_t, int, int *);
291 void vm_object_dead_sleep(vm_object_t, const char *);
292 void vm_object_dead_wakeup(vm_object_t);
294 #endif /* _KERNEL */
296 #endif /* _VM_VM_OBJECT_H_ */