Fix malloc->kmalloc leftover to fix kernel without VGA_NO_MODE_CHANGE
[dragonfly.git] / sys / vm / vm_object.h
blob9da954bb8de419e08413f4a23c8e5e2bbbae92a0
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_object.h,v 1.63.2.3 2003/05/26 19:17:56 alc Exp $
65 * $DragonFly: src/sys/vm/vm_object.h,v 1.10 2006/05/20 02:42:15 dillon Exp $
69 * Virtual memory object module definitions.
72 #ifndef _VM_VM_OBJECT_H_
73 #define _VM_VM_OBJECT_H_
75 #ifndef _SYS_TYPES_H_
76 #include <sys/types.h>
77 #endif
78 #if defined(_KERNEL) && !defined(_SYS_SYSTM_H_)
79 #include <sys/systm.h>
80 #endif
81 #ifndef _SYS_QUEUE_H_
82 #include <sys/queue.h>
83 #endif
84 #ifndef _MACHINE_ATOMIC_H_
85 #include <machine/atomic.h>
86 #endif
87 #ifndef _VM_VM_H_
88 #include <vm/vm.h>
89 #endif
91 #ifdef _KERNEL
93 #ifndef _SYS_THREAD2_H_
94 #include <sys/thread2.h>
95 #endif
97 #endif
99 enum obj_type {
100 OBJT_DEFAULT,
101 OBJT_SWAP, /* object backed by swap blocks */
102 OBJT_VNODE, /* object backed by file pages (vnode) */
103 OBJT_DEVICE, /* object backed by device pages */
104 OBJT_PHYS, /* object backed by physical pages */
105 OBJT_DEAD /* dead object */
107 typedef u_char objtype_t;
110 * vm_object_lock A lock covering byte ranges within a VM object
113 struct vm_object_lock {
114 struct vm_object_lock *next;
115 int type; /* type of lock */
116 int waiting; /* someone is waiting on the lock */
117 off_t base; /* byte offset into object */
118 off_t bytes; /* extent in bytes */
121 #define VMOBJ_LOCK_SHARED 1
122 #define VMOBJ_LOCK_EXCL 2
125 * vm_object A VM object which represents an arbitrarily sized
126 * data store.
128 struct vm_object {
129 TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
130 LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
131 LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
132 TAILQ_HEAD(, vm_page) memq; /* list of resident pages */
133 int generation; /* generation ID */
134 vm_size_t size; /* Object size */
135 int ref_count; /* How many refs?? */
136 int shadow_count; /* how many objects that this is a shadow for */
137 int hash_rand; /* vm hash table randomizer */
138 objtype_t type; /* type of pager */
139 u_short flags; /* see below */
140 u_short pg_color; /* color of first page in obj */
141 u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
142 int resident_page_count; /* number of resident pages */
143 struct vm_object *backing_object; /* object that I'm a shadow of */
144 vm_ooffset_t backing_object_offset;/* Offset in backing object */
145 TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
146 void *handle;
147 vm_object_lock_t range_locks;
148 union {
150 * Device pager
152 * devp_pglist - list of allocated pages
154 struct {
155 TAILQ_HEAD(, vm_page) devp_pglist;
156 } devp;
159 * Swap pager
161 * swp_bcount - number of swap 'swblock' metablocks, each
162 * contains up to 16 swapblk assignments.
163 * see vm/swap_pager.h
165 struct {
166 int swp_bcount;
167 } swp;
168 } un_pager;
172 * Flags
174 #define OBJ_ACTIVE 0x0004 /* active objects */
175 #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
176 #define OBJ_NOSPLIT 0x0010 /* dont split this object */
177 #define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
178 #define OBJ_WRITEABLE 0x0080 /* object has been made writable */
179 #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
180 #define OBJ_CLEANING 0x0200
181 #define OBJ_UNUSED1000 0x1000
182 #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
184 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
185 #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
187 #ifdef _KERNEL
189 #define OBJPC_SYNC 0x1 /* sync I/O */
190 #define OBJPC_INVAL 0x2 /* invalidate */
191 #define OBJPC_NOSYNC 0x4 /* skip if PG_NOSYNC */
193 TAILQ_HEAD(object_q, vm_object);
195 extern struct object_q vm_object_list; /* list of allocated objects */
197 /* lock for object list and count */
199 extern vm_object_t kernel_object; /* the single kernel object */
200 extern vm_object_t kmem_object;
202 #endif /* _KERNEL */
204 #ifdef _KERNEL
206 static __inline void
207 vm_object_set_flag(vm_object_t object, u_int bits)
209 atomic_set_short(&object->flags, bits);
212 static __inline void
213 vm_object_clear_flag(vm_object_t object, u_int bits)
215 atomic_clear_short(&object->flags, bits);
218 static __inline void
219 vm_object_pip_add(vm_object_t object, int i)
221 atomic_add_short(&object->paging_in_progress, i);
224 static __inline void
225 vm_object_pip_subtract(vm_object_t object, int i)
227 atomic_subtract_short(&object->paging_in_progress, i);
230 static __inline void
231 vm_object_pip_wakeup(vm_object_t object)
233 atomic_subtract_short(&object->paging_in_progress, 1);
234 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
235 vm_object_clear_flag(object, OBJ_PIPWNT);
236 wakeup(object);
240 static __inline void
241 vm_object_pip_wakeupn(vm_object_t object, int i)
243 if (i)
244 atomic_subtract_short(&object->paging_in_progress, i);
245 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
246 vm_object_clear_flag(object, OBJ_PIPWNT);
247 wakeup(object);
251 static __inline void
252 vm_object_pip_sleep(vm_object_t object, char *waitid)
254 if (object->paging_in_progress) {
255 crit_enter();
256 if (object->paging_in_progress) {
257 vm_object_set_flag(object, OBJ_PIPWNT);
258 tsleep(object, 0, waitid, 0);
260 crit_exit();
264 static __inline void
265 vm_object_pip_wait(vm_object_t object, char *waitid)
267 while (object->paging_in_progress)
268 vm_object_pip_sleep(object, waitid);
271 vm_object_t vm_object_allocate (objtype_t, vm_size_t);
272 void _vm_object_allocate (objtype_t, vm_size_t, vm_object_t);
273 boolean_t vm_object_coalesce (vm_object_t, vm_pindex_t, vm_size_t, vm_size_t);
274 void vm_object_collapse (vm_object_t);
275 void vm_object_deallocate (vm_object_t);
276 void vm_object_terminate (vm_object_t);
277 void vm_object_set_writeable_dirty (vm_object_t);
278 void vm_object_init (void);
279 void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
280 void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
281 void vm_object_pmap_copy (vm_object_t, vm_pindex_t, vm_pindex_t);
282 void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t);
283 void vm_object_pmap_remove (vm_object_t, vm_pindex_t, vm_pindex_t);
284 void vm_object_reference (vm_object_t);
285 void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
286 void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
287 void vm_object_init2 (void);
288 #endif /* _KERNEL */
290 #endif /* _VM_VM_OBJECT_H_ */