Update struct linux_shmid64_ds for 64bits archs.
[netbsd-mini2440.git] / sys / uvm / uvm_fault_i.h
blob9a3c2ee1cf6fdff523989fd008f66258b92f478b
1 /* $NetBSD: uvm_fault_i.h,v 1.23 2007/02/22 06:05:01 thorpej Exp $ */
3 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * from: Id: uvm_fault_i.h,v 1.1.6.1 1997/12/08 16:07:12 chuck Exp
37 #ifndef _UVM_UVM_FAULT_I_H_
38 #define _UVM_UVM_FAULT_I_H_
41 * uvm_fault_i.h: fault inline functions
45 * uvmfault_unlockmaps: unlock the maps
48 static __inline void
49 uvmfault_unlockmaps(struct uvm_faultinfo *ufi, bool write_locked)
52 * ufi can be NULL when this isn't really a fault,
53 * but merely paging in anon data.
56 if (ufi == NULL) {
57 return;
60 if (write_locked) {
61 vm_map_unlock(ufi->map);
62 } else {
63 vm_map_unlock_read(ufi->map);
68 * uvmfault_unlockall: unlock everything passed in.
70 * => maps must be read-locked (not write-locked).
73 static __inline void
74 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
75 struct uvm_object *uobj, struct vm_anon *anon)
78 if (anon)
79 mutex_exit(&anon->an_lock);
80 if (uobj)
81 mutex_exit(&uobj->vmobjlock);
82 if (amap)
83 amap_unlock(amap);
84 uvmfault_unlockmaps(ufi, false);
88 * uvmfault_lookup: lookup a virtual address in a map
90 * => caller must provide a uvm_faultinfo structure with the IN
91 * params properly filled in
92 * => we will lookup the map entry (handling submaps) as we go
93 * => if the lookup is a success we will return with the maps locked
94 * => if "write_lock" is true, we write_lock the map, otherwise we only
95 * get a read lock.
96 * => note that submaps can only appear in the kernel and they are
97 * required to use the same virtual addresses as the map they
98 * are referenced by (thus address translation between the main
99 * map and the submap is unnecessary).
102 static __inline bool
103 uvmfault_lookup(struct uvm_faultinfo *ufi, bool write_lock)
105 struct vm_map *tmpmap;
108 * init ufi values for lookup.
111 ufi->map = ufi->orig_map;
112 ufi->size = ufi->orig_size;
115 * keep going down levels until we are done. note that there can
116 * only be two levels so we won't loop very long.
119 /*CONSTCOND*/
120 while (1) {
122 * Make sure this is not an "interrupt safe" map.
123 * Such maps are never supposed to be involved in
124 * a fault.
126 if (ufi->map->flags & VM_MAP_INTRSAFE)
127 return (false);
130 * lock map
132 if (write_lock) {
133 vm_map_lock(ufi->map);
134 } else {
135 vm_map_lock_read(ufi->map);
139 * lookup
141 if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
142 &ufi->entry)) {
143 uvmfault_unlockmaps(ufi, write_lock);
144 return(false);
148 * reduce size if necessary
150 if (ufi->entry->end - ufi->orig_rvaddr < ufi->size)
151 ufi->size = ufi->entry->end - ufi->orig_rvaddr;
154 * submap? replace map with the submap and lookup again.
155 * note: VAs in submaps must match VAs in main map.
157 if (UVM_ET_ISSUBMAP(ufi->entry)) {
158 tmpmap = ufi->entry->object.sub_map;
159 if (write_lock) {
160 vm_map_unlock(ufi->map);
161 } else {
162 vm_map_unlock_read(ufi->map);
164 ufi->map = tmpmap;
165 continue;
169 * got it!
172 ufi->mapv = ufi->map->timestamp;
173 return(true);
175 } /* while loop */
177 /*NOTREACHED*/
181 * uvmfault_relock: attempt to relock the same version of the map
183 * => fault data structures should be unlocked before calling.
184 * => if a success (true) maps will be locked after call.
187 static __inline bool
188 uvmfault_relock(struct uvm_faultinfo *ufi)
191 * ufi can be NULL when this isn't really a fault,
192 * but merely paging in anon data.
195 if (ufi == NULL) {
196 return true;
199 uvmexp.fltrelck++;
202 * relock map. fail if version mismatch (in which case nothing
203 * gets locked).
206 vm_map_lock_read(ufi->map);
207 if (ufi->mapv != ufi->map->timestamp) {
208 vm_map_unlock_read(ufi->map);
209 return(false);
212 uvmexp.fltrelckok++;
213 return(true);
216 #endif /* _UVM_UVM_FAULT_I_H_ */