Update struct linux_shmid64_ds for 64bits archs.
[netbsd-mini2440.git] / sys / uvm / uvm_vnode.c
blob816f5b042e033c9373766e1e235f37a52d8e538d
1 /* $NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $ */
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California.
7 * Copyright (c) 1990 University of Utah.
9 * All rights reserved.
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by Charles D. Cranor,
26 * Washington University, the University of California, Berkeley and
27 * its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
49 * uvm_vnode.c: the vnode pager.
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $");
55 #include "fs_nfs.h"
56 #include "opt_uvmhist.h"
57 #include "opt_ddb.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/proc.h>
63 #include <sys/malloc.h>
64 #include <sys/vnode.h>
65 #include <sys/disklabel.h>
66 #include <sys/ioctl.h>
67 #include <sys/fcntl.h>
68 #include <sys/conf.h>
69 #include <sys/pool.h>
70 #include <sys/mount.h>
72 #include <miscfs/specfs/specdev.h>
74 #include <uvm/uvm.h>
75 #include <uvm/uvm_readahead.h>
78 * functions
81 static void uvn_detach(struct uvm_object *);
82 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83 int, vm_prot_t, int, int);
84 static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
85 static void uvn_reference(struct uvm_object *);
87 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
88 int);
91 * master pager structure
94 const struct uvm_pagerops uvm_vnodeops = {
95 .pgo_reference = uvn_reference,
96 .pgo_detach = uvn_detach,
97 .pgo_get = uvn_get,
98 .pgo_put = uvn_put,
102 * the ops!
106 * uvn_reference
108 * duplicate a reference to a VM object. Note that the reference
109 * count must already be at least one (the passed in reference) so
110 * there is no chance of the uvn being killed or locked out here.
112 * => caller must call with object unlocked.
113 * => caller must be using the same accessprot as was used at attach time
116 static void
117 uvn_reference(struct uvm_object *uobj)
119 VREF((struct vnode *)uobj);
124 * uvn_detach
126 * remove a reference to a VM object.
128 * => caller must call with object unlocked and map locked.
131 static void
132 uvn_detach(struct uvm_object *uobj)
134 vrele((struct vnode *)uobj);
138 * uvn_put: flush page data to backing store.
140 * => object must be locked on entry! VOP_PUTPAGES must unlock it.
141 * => flags: PGO_SYNCIO -- use sync. I/O
142 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
145 static int
146 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
148 struct vnode *vp = (struct vnode *)uobj;
149 int error;
151 KASSERT(mutex_owned(&vp->v_interlock));
152 error = VOP_PUTPAGES(vp, offlo, offhi, flags);
154 return error;
159 * uvn_get: get pages (synchronously) from backing store
161 * => prefer map unlocked (not required)
162 * => object must be locked! we will _unlock_ it before starting any I/O.
163 * => flags: PGO_ALLPAGES: get all of the pages
164 * PGO_LOCKED: fault data structures are locked
165 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
166 * => NOTE: caller must check for released pages!!
169 static int
170 uvn_get(struct uvm_object *uobj, voff_t offset,
171 struct vm_page **pps /* IN/OUT */,
172 int *npagesp /* IN (OUT if PGO_LOCKED)*/,
173 int centeridx, vm_prot_t access_type, int advice, int flags)
175 struct vnode *vp = (struct vnode *)uobj;
176 int error;
178 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
180 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
182 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
183 vn_ra_allocctx(vp);
184 uvm_ra_request(vp->v_ractx, advice, uobj, offset,
185 *npagesp << PAGE_SHIFT);
188 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
189 access_type, advice, flags);
191 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) ||
192 (flags & PGO_LOCKED) == 0);
193 return error;
198 * uvn_findpages:
199 * return the page for the uobj and offset requested, allocating if needed.
200 * => uobj must be locked.
201 * => returned pages will be BUSY.
205 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
206 struct vm_page **pgs, int flags)
208 int i, count, found, npages, rv;
210 count = found = 0;
211 npages = *npagesp;
212 if (flags & UFP_BACKWARD) {
213 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
214 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
215 if (rv == 0) {
216 if (flags & UFP_DIRTYONLY)
217 break;
218 } else
219 found++;
220 count++;
222 } else {
223 for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
224 rv = uvn_findpage(uobj, offset, &pgs[i], flags);
225 if (rv == 0) {
226 if (flags & UFP_DIRTYONLY)
227 break;
228 } else
229 found++;
230 count++;
233 *npagesp = count;
234 return (found);
237 static int
238 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
239 int flags)
241 struct vm_page *pg;
242 bool dirty;
243 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
244 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
246 if (*pgp != NULL) {
247 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
248 return 0;
250 for (;;) {
251 /* look for an existing page */
252 pg = uvm_pagelookup(uobj, offset);
254 /* nope? allocate one now */
255 if (pg == NULL) {
256 if (flags & UFP_NOALLOC) {
257 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
258 return 0;
260 pg = uvm_pagealloc(uobj, offset, NULL, 0);
261 if (pg == NULL) {
262 if (flags & UFP_NOWAIT) {
263 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
264 return 0;
266 mutex_exit(&uobj->vmobjlock);
267 uvm_wait("uvn_fp1");
268 mutex_enter(&uobj->vmobjlock);
269 continue;
271 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
272 break;
273 } else if (flags & UFP_NOCACHE) {
274 UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
275 return 0;
278 /* page is there, see if we need to wait on it */
279 if ((pg->flags & PG_BUSY) != 0) {
280 if (flags & UFP_NOWAIT) {
281 UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
282 return 0;
284 pg->flags |= PG_WANTED;
285 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
286 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
287 "uvn_fp2", 0);
288 mutex_enter(&uobj->vmobjlock);
289 continue;
292 /* skip PG_RDONLY pages if requested */
293 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
294 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
295 return 0;
298 /* stop on clean pages if requested */
299 if (flags & UFP_DIRTYONLY) {
300 dirty = pmap_clear_modify(pg) ||
301 (pg->flags & PG_CLEAN) == 0;
302 pg->flags |= PG_CLEAN;
303 if (!dirty) {
304 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
305 return 0;
309 /* mark the page BUSY and we're done. */
310 pg->flags |= PG_BUSY;
311 UVM_PAGE_OWN(pg, "uvn_findpage");
312 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
313 break;
315 *pgp = pg;
316 return 1;
320 * uvm_vnp_setsize: grow or shrink a vnode uobj
322 * grow => just update size value
323 * shrink => toss un-needed pages
325 * => we assume that the caller has a reference of some sort to the
326 * vnode in question so that it will not be yanked out from under
327 * us.
330 void
331 uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
333 struct uvm_object *uobj = &vp->v_uobj;
334 voff_t pgend = round_page(newsize);
335 voff_t oldsize;
336 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
338 mutex_enter(&uobj->vmobjlock);
339 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
340 vp, vp->v_size, newsize, 0);
343 * now check if the size has changed: if we shrink we had better
344 * toss some pages...
347 KASSERT(newsize != VSIZENOTSET);
348 KASSERT(vp->v_size <= vp->v_writesize);
349 KASSERT(vp->v_size == vp->v_writesize ||
350 newsize == vp->v_writesize || newsize <= vp->v_size);
352 oldsize = vp->v_writesize;
353 KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
355 if (oldsize > pgend) {
356 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
357 mutex_enter(&uobj->vmobjlock);
359 vp->v_size = vp->v_writesize = newsize;
360 mutex_exit(&uobj->vmobjlock);
363 void
364 uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
367 mutex_enter(&vp->v_interlock);
368 KASSERT(newsize != VSIZENOTSET);
369 KASSERT(vp->v_size != VSIZENOTSET);
370 KASSERT(vp->v_writesize != VSIZENOTSET);
371 KASSERT(vp->v_size <= vp->v_writesize);
372 KASSERT(vp->v_size <= newsize);
373 vp->v_writesize = newsize;
374 mutex_exit(&vp->v_interlock);
378 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
381 void
382 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
384 void *win;
385 int flags;
388 * XXXUBC invent kzero() and use it
391 while (len) {
392 vsize_t bytelen = len;
394 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
395 UBC_WRITE);
396 memset(win, 0, bytelen);
397 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
398 ubc_release(win, flags);
400 off += bytelen;
401 len -= bytelen;
405 bool
406 uvn_text_p(struct uvm_object *uobj)
408 struct vnode *vp = (struct vnode *)uobj;
410 return (vp->v_iflag & VI_EXECMAP) != 0;
413 bool
414 uvn_clean_p(struct uvm_object *uobj)
416 struct vnode *vp = (struct vnode *)uobj;
418 return (vp->v_iflag & VI_ONWORKLST) == 0;
421 bool
422 uvn_needs_writefault_p(struct uvm_object *uobj)
424 struct vnode *vp = (struct vnode *)uobj;
426 return uvn_clean_p(uobj) ||
427 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;