Pre-2.0 release, synchronize with HAMMER 61H
[dragonfly.git] / sys / vfs / mfs / mfs_vnops.c
blob87cfd536d194155a6b53cc716398f9c72908ae94
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * @(#)mfs_vnops.c 8.11 (Berkeley) 5/22/95
34 * $FreeBSD: src/sys/ufs/mfs/mfs_vnops.c,v 1.47.2.1 2001/05/22 02:06:43 bp Exp $
35 * $DragonFly: src/sys/vfs/mfs/mfs_vnops.c,v 1.37 2007/08/13 17:31:56 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/malloc.h>
45 #include <sys/sysproto.h>
46 #include <sys/mman.h>
47 #include <sys/conf.h>
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_pager.h>
53 #include <vm/vnode_pager.h>
55 #include <sys/buf2.h>
56 #include <sys/thread2.h>
58 #include "mfsnode.h"
59 #include "mfs_extern.h"
61 static int mfs_badop (struct vop_generic_args *);
62 static int mfs_bmap (struct vop_bmap_args *);
63 static int mfs_close (struct vop_close_args *);
64 static int mfs_fsync (struct vop_fsync_args *);
65 static int mfs_freeblks (struct vop_freeblks_args *);
66 static int mfs_inactive (struct vop_inactive_args *); /* XXX */
67 static int mfs_open (struct vop_open_args *);
68 static int mfs_reclaim (struct vop_reclaim_args *); /* XXX */
69 static int mfs_print (struct vop_print_args *); /* XXX */
70 static int mfs_strategy (struct vop_strategy_args *); /* XXX */
71 static int mfs_getpages (struct vop_getpages_args *); /* XXX */
73 * mfs vnode operations. Note: the vops here are used for the MFS block
74 * device, not for operations on files (MFS calls the ffs mount code for that)
76 static struct vop_ops mfs_vnode_vops = {
77 .vop_default = mfs_badop,
78 .vop_bmap = mfs_bmap,
79 .vop_close = mfs_close,
80 .vop_freeblks = mfs_freeblks,
81 .vop_fsync = mfs_fsync,
82 .vop_getpages = mfs_getpages,
83 .vop_inactive = mfs_inactive,
84 .vop_ioctl = (void *)vop_enotty,
85 .vop_open = mfs_open,
86 .vop_print = mfs_print,
87 .vop_reclaim = mfs_reclaim,
88 .vop_strategy = mfs_strategy,
91 struct vop_ops *mfs_vnode_vops_p = &mfs_vnode_vops;
93 VNODEOP_SET(mfs_vnode_vops);
96 * Vnode Operations.
98 * Open called to allow memory filesystem to initialize and
99 * validate before actual IO. Record our process identifier
100 * so we can tell when we are doing I/O to ourself.
102 * NOTE: new device sequencing. mounts check the device reference count
103 * before calling open, so we must associate the device in open and
104 * disassociate it in close rather then faking it when we created the vnode.
106 * mfs_open(struct vnode *a_vp, int a_mode, struct ucred *a_cred,
107 * struct file *a_fp)
109 /* ARGSUSED */
110 static int
111 mfs_open(struct vop_open_args *ap)
113 struct vnode *vp = ap->a_vp;
115 if (vp->v_type != VCHR)
116 panic("mfs_open not VCHR");
117 v_associate_rdev(vp, get_dev(vp->v_umajor, vp->v_uminor));
118 return (vop_stdopen(ap));
121 static int
122 mfs_fsync(struct vop_fsync_args *ap)
124 return (VOCALL(&spec_vnode_vops, &ap->a_head));
128 * mfs_freeblks() - hook to allow us to free physical memory.
130 * We implement the BUF_CMD_FREEBLKS strategy. We can't just madvise()
131 * here because we have to do it in the correct order vs other bio
132 * requests, so we queue it.
134 * Note: geteblk() sets B_INVAL. We leave it set to guarentee buffer
135 * throw-away on brelse()? XXX
137 * mfs_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
139 static int
140 mfs_freeblks(struct vop_freeblks_args *ap)
142 struct buf *bp;
143 struct vnode *vp = ap->a_vp;
145 bp = geteblk(ap->a_length);
146 bp->b_flags |= B_ASYNC;
147 bp->b_cmd = BUF_CMD_FREEBLKS;
148 bp->b_bio1.bio_offset = ap->a_offset;
149 bp->b_bcount = ap->a_length;
150 BUF_KERNPROC(bp);
151 vn_strategy(vp, &bp->b_bio1);
152 return(0);
156 * Pass I/O requests to the memory filesystem process.
158 * mfs_strategy(struct vnode *a_vp, struct bio *a_bio)
160 static int
161 mfs_strategy(struct vop_strategy_args *ap)
163 struct bio *bio = ap->a_bio;
164 struct buf *bp = bio->bio_buf;
165 struct mfsnode *mfsp;
166 struct thread *td = curthread; /* XXX */
168 mfsp = ap->a_vp->v_rdev->si_drv1;
169 if (mfsp == NULL) {
170 bp->b_error = ENXIO;
171 bp->b_flags |= B_ERROR;
172 biodone(bio);
173 return(0);
177 * splbio required for queueing/dequeueing, in case of forwarded
178 * BPs from bio interrupts (?). It may not be necessary.
181 crit_enter();
183 if (mfsp->mfs_td == NULL) {
185 * mini-root. Note: BUF_CMD_FREEBLKS not supported at the
186 * moment, since we do not know what kind of dataspace
187 * b_data is in.
189 caddr_t base;
191 base = mfsp->mfs_baseoff + bio->bio_offset;
192 switch(bp->b_cmd) {
193 case BUF_CMD_FREEBLKS:
194 break;
195 case BUF_CMD_READ:
196 bcopy(base, bp->b_data, bp->b_bcount);
197 break;
198 case BUF_CMD_WRITE:
199 bcopy(bp->b_data, base, bp->b_bcount);
200 break;
201 default:
202 panic("mfs: bad b_cmd %d\n", bp->b_cmd);
204 biodone(bio);
205 } else if (mfsp->mfs_td == td) {
207 * VOP to self
209 crit_exit();
210 mfs_doio(bio, mfsp);
211 crit_enter();
212 } else {
214 * VOP from some other process, queue to MFS process and
215 * wake it up.
217 bioq_insert_tail(&mfsp->bio_queue, bio);
218 wakeup((caddr_t)mfsp);
220 crit_exit();
221 return (0);
225 * Memory file system I/O.
227 * Trivial on the HP since buffer has already been mapping into KVA space.
229 * Read and Write are handled with a simple copyin and copyout.
231 * We also partially support VOP_FREEBLKS(). We can't implement
232 * completely -- for example, on fragments or inode metadata, but we can
233 * implement it for page-aligned requests.
235 void
236 mfs_doio(struct bio *bio, struct mfsnode *mfsp)
238 struct buf *bp = bio->bio_buf;
239 caddr_t base = mfsp->mfs_baseoff + bio->bio_offset;
240 int bytes;
242 switch(bp->b_cmd) {
243 case BUF_CMD_FREEBLKS:
245 * Implement FREEBLKS, which allows the filesystem to tell
246 * a block device when blocks are no longer needed (like when
247 * a file is deleted). We use the hook to MADV_FREE the VM.
248 * This makes an MFS filesystem work as well or better then
249 * a sun-style swap-mounted filesystem.
251 bytes = bp->b_bcount;
253 if ((vm_offset_t)base & PAGE_MASK) {
254 int n = PAGE_SIZE - ((vm_offset_t)base & PAGE_MASK);
255 bytes -= n;
256 base += n;
258 if (bytes > 0) {
259 struct madvise_args uap;
261 bytes &= ~PAGE_MASK;
262 if (bytes != 0) {
263 bzero(&uap, sizeof(uap));
264 uap.addr = base;
265 uap.len = bytes;
266 uap.behav = MADV_FREE;
267 sys_madvise(&uap);
270 bp->b_error = 0;
271 break;
272 case BUF_CMD_READ:
274 * Read data from our 'memory' disk
276 bp->b_error = copyin(base, bp->b_data, bp->b_bcount);
277 break;
278 case BUF_CMD_WRITE:
280 * Write data to our 'memory' disk
282 bp->b_error = copyout(bp->b_data, base, bp->b_bcount);
283 break;
284 default:
285 panic("mfs: bad b_cmd %d\n", bp->b_cmd);
287 if (bp->b_error)
288 bp->b_flags |= B_ERROR;
289 biodone(bio);
293 * This is a noop, simply returning what one has been given.
295 * mfs_bmap(struct vnode *a_vp, off_t a_loffset,
296 * off_t *a_doffsetp, int *a_runp, int *a_runb)
298 static int
299 mfs_bmap(struct vop_bmap_args *ap)
301 if (ap->a_doffsetp != NULL)
302 *ap->a_doffsetp = ap->a_loffset;
303 if (ap->a_runp != NULL)
304 *ap->a_runp = 0;
305 if (ap->a_runb != NULL)
306 *ap->a_runb = 0;
307 return (0);
311 * Memory filesystem close routine
313 * mfs_close(struct vnode *a_vp, int a_fflag)
315 /* ARGSUSED */
316 static int
317 mfs_close(struct vop_close_args *ap)
319 struct vnode *vp = ap->a_vp;
320 struct mfsnode *mfsp = VTOMFS(vp);
321 struct bio *bio;
322 int error = 0;
325 * Finish any pending I/O requests.
327 while ((bio = bioq_first(&mfsp->bio_queue)) != NULL) {
328 bioq_remove(&mfsp->bio_queue, bio);
329 mfs_doio(bio, mfsp);
330 wakeup((caddr_t)bio->bio_buf);
334 * We really only care about the last close
336 if (vp->v_opencount > 1)
337 goto done;
340 * Synchronize any remaining buffers and then destroy them.
342 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0)
343 goto done;
346 * Get rid of the pseudo-backing object. Since the object is
347 * not directly memory mapped, we don't have to worry about
348 * synchronizing it.
350 if (vp->v_object)
351 vm_pager_deallocate(vp->v_object);
354 * There should be no way to have any more uses of this
355 * vnode, so if we find any other uses, it is a panic.
357 if (vp->v_sysref.refcnt > 1)
358 kprintf("mfs_close: ref count %d > 1\n", vp->v_sysref.refcnt);
359 if (vp->v_sysref.refcnt > 1 || (bioq_first(&mfsp->bio_queue) != NULL))
360 panic("mfs_close");
362 * Send a request to the filesystem server to exit.
364 mfsp->mfs_active = 0;
365 v_release_rdev(vp);
366 if (mfsp->mfs_dev) {
367 destroy_dev(mfsp->mfs_dev);
368 mfsp->mfs_dev = NULL;
370 wakeup((caddr_t)mfsp);
371 done:
372 vop_stdclose(ap);
373 return (error);
377 * Memory filesystem inactive routine
379 * mfs_inactive(struct vnode *a_vp)
381 /* ARGSUSED */
382 static int
383 mfs_inactive(struct vop_inactive_args *ap)
385 struct vnode *vp = ap->a_vp;
386 struct mfsnode *mfsp = VTOMFS(vp);
388 if (bioq_first(&mfsp->bio_queue) != NULL)
389 panic("mfs_inactive: not inactive (next buffer %p)",
390 bioq_first(&mfsp->bio_queue));
391 return (0);
395 * Reclaim a memory filesystem devvp so that it can be reused.
397 * mfs_reclaim(struct vnode *a_vp)
399 static int
400 mfs_reclaim(struct vop_reclaim_args *ap)
402 struct vnode *vp = ap->a_vp;
404 FREE(vp->v_data, M_MFSNODE);
405 vp->v_data = NULL;
406 return (0);
410 * Print out the contents of an mfsnode.
412 * mfs_print(struct vnode *a_vp)
414 static int
415 mfs_print(struct vop_print_args *ap)
417 struct mfsnode *mfsp = VTOMFS(ap->a_vp);
419 kprintf("tag VT_MFS, td %p, base %p, size %ld\n",
420 mfsp->mfs_td, (void *)mfsp->mfs_baseoff, mfsp->mfs_size);
421 return (0);
425 * Block device bad operation
427 static int
428 mfs_badop(struct vop_generic_args *ap)
430 int i;
432 kprintf("mfs_badop[%s]\n", ap->a_desc->sd_name);
433 i = vop_defaultop(ap);
434 kprintf("mfs_badop[%s] = %d\n", ap->a_desc->sd_name, i);
435 return (i);
438 static int
439 mfs_getpages(struct vop_getpages_args *ap)
441 return (VOCALL(&spec_vnode_vops, &ap->a_head));