Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / vfs_default.c
blobd46bf811ef47c73de49fa8f56ff73cbb401d5749
1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/conf.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/unistd.h>
52 #include <sys/vnode.h>
53 #include <sys/poll.h>
55 #include <vm/vm.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_extern.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vnode_pager.h>
64 static int vop_nolookup(struct vop_lookup_args *);
65 static int vop_nostrategy(struct vop_strategy_args *);
68 * This vnode table stores what we want to do if the filesystem doesn't
69 * implement a particular VOP.
71 * If there is no specific entry here, we will return EOPNOTSUPP.
75 struct vop_vector default_vnodeops = {
76 .vop_default = NULL,
77 .vop_bypass = VOP_EOPNOTSUPP,
79 .vop_advlock = vop_stdadvlock,
80 .vop_advlockasync = vop_stdadvlockasync,
81 .vop_bmap = vop_stdbmap,
82 .vop_close = VOP_NULL,
83 .vop_fsync = VOP_NULL,
84 .vop_getpages = vop_stdgetpages,
85 .vop_getwritemount = vop_stdgetwritemount,
86 .vop_inactive = VOP_NULL,
87 .vop_ioctl = VOP_ENOTTY,
88 .vop_kqfilter = vop_stdkqfilter,
89 .vop_islocked = vop_stdislocked,
90 .vop_lease = VOP_NULL,
91 .vop_lock1 = vop_stdlock,
92 .vop_lookup = vop_nolookup,
93 .vop_open = VOP_NULL,
94 .vop_pathconf = VOP_EINVAL,
95 .vop_poll = vop_nopoll,
96 .vop_putpages = vop_stdputpages,
97 .vop_readlink = VOP_EINVAL,
98 .vop_revoke = VOP_PANIC,
99 .vop_strategy = vop_nostrategy,
100 .vop_unlock = vop_stdunlock,
101 .vop_vptofh = vop_stdvptofh,
105 * Series of placeholder functions for various error returns for
106 * VOPs.
110 vop_eopnotsupp(struct vop_generic_args *ap)
113 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
116 return (EOPNOTSUPP);
120 vop_ebadf(struct vop_generic_args *ap)
123 return (EBADF);
127 vop_enotty(struct vop_generic_args *ap)
130 return (ENOTTY);
134 vop_einval(struct vop_generic_args *ap)
137 return (EINVAL);
141 vop_null(struct vop_generic_args *ap)
144 return (0);
148 * Helper function to panic on some bad VOPs in some filesystems.
151 vop_panic(struct vop_generic_args *ap)
154 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
158 * vop_std<something> and vop_no<something> are default functions for use by
159 * filesystems that need the "default reasonable" implementation for a
160 * particular operation.
162 * The documentation for the operations they implement exists (if it exists)
163 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
167 * Default vop for filesystems that do not support name lookup
169 static int
170 vop_nolookup(ap)
171 struct vop_lookup_args /* {
172 struct vnode *a_dvp;
173 struct vnode **a_vpp;
174 struct componentname *a_cnp;
175 } */ *ap;
178 *ap->a_vpp = NULL;
179 return (ENOTDIR);
183 * vop_nostrategy:
185 * Strategy routine for VFS devices that have none.
187 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
188 * routine. Typically this is done for a BIO_READ strategy call.
189 * Typically B_INVAL is assumed to already be clear prior to a write
190 * and should not be cleared manually unless you just made the buffer
191 * invalid. BIO_ERROR should be cleared either way.
194 static int
195 vop_nostrategy (struct vop_strategy_args *ap)
197 printf("No strategy for buffer at %p\n", ap->a_bp);
198 vprint("vnode", ap->a_vp);
199 ap->a_bp->b_ioflags |= BIO_ERROR;
200 ap->a_bp->b_error = EOPNOTSUPP;
201 bufdone(ap->a_bp);
202 return (EOPNOTSUPP);
206 * Advisory record locking support
209 vop_stdadvlock(struct vop_advlock_args *ap)
211 struct vnode *vp;
212 struct ucred *cred;
213 struct vattr vattr;
214 int error;
216 vp = ap->a_vp;
217 cred = curthread->td_ucred;
218 vn_lock(vp, LK_SHARED | LK_RETRY);
219 error = VOP_GETATTR(vp, &vattr, cred);
220 VOP_UNLOCK(vp, 0);
221 if (error)
222 return (error);
224 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
228 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
230 struct vnode *vp;
231 struct ucred *cred;
232 struct vattr vattr;
233 int error;
235 vp = ap->a_vp;
236 cred = curthread->td_ucred;
237 vn_lock(vp, LK_SHARED | LK_RETRY);
238 error = VOP_GETATTR(vp, &vattr, cred);
239 VOP_UNLOCK(vp, 0);
240 if (error)
241 return (error);
243 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
247 * vop_stdpathconf:
249 * Standard implementation of POSIX pathconf, to get information about limits
250 * for a filesystem.
251 * Override per filesystem for the case where the filesystem has smaller
252 * limits.
255 vop_stdpathconf(ap)
256 struct vop_pathconf_args /* {
257 struct vnode *a_vp;
258 int a_name;
259 int *a_retval;
260 } */ *ap;
263 switch (ap->a_name) {
264 case _PC_NAME_MAX:
265 *ap->a_retval = NAME_MAX;
266 return (0);
267 case _PC_PATH_MAX:
268 *ap->a_retval = PATH_MAX;
269 return (0);
270 case _PC_LINK_MAX:
271 *ap->a_retval = LINK_MAX;
272 return (0);
273 case _PC_MAX_CANON:
274 *ap->a_retval = MAX_CANON;
275 return (0);
276 case _PC_MAX_INPUT:
277 *ap->a_retval = MAX_INPUT;
278 return (0);
279 case _PC_PIPE_BUF:
280 *ap->a_retval = PIPE_BUF;
281 return (0);
282 case _PC_CHOWN_RESTRICTED:
283 *ap->a_retval = 1;
284 return (0);
285 case _PC_VDISABLE:
286 *ap->a_retval = _POSIX_VDISABLE;
287 return (0);
288 default:
289 return (EINVAL);
291 /* NOTREACHED */
295 * Standard lock, unlock and islocked functions.
298 vop_stdlock(ap)
299 struct vop_lock1_args /* {
300 struct vnode *a_vp;
301 int a_flags;
302 char *file;
303 int line;
304 } */ *ap;
306 struct vnode *vp = ap->a_vp;
308 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
309 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
310 ap->a_line));
313 /* See above. */
315 vop_stdunlock(ap)
316 struct vop_unlock_args /* {
317 struct vnode *a_vp;
318 int a_flags;
319 } */ *ap;
321 struct vnode *vp = ap->a_vp;
323 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
326 /* See above. */
328 vop_stdislocked(ap)
329 struct vop_islocked_args /* {
330 struct vnode *a_vp;
331 } */ *ap;
334 return (lockstatus(ap->a_vp->v_vnlock));
338 * Return true for select/poll.
341 vop_nopoll(ap)
342 struct vop_poll_args /* {
343 struct vnode *a_vp;
344 int a_events;
345 struct ucred *a_cred;
346 struct thread *a_td;
347 } */ *ap;
350 * Return true for read/write. If the user asked for something
351 * special, return POLLNVAL, so that clients have a way of
352 * determining reliably whether or not the extended
353 * functionality is present without hard-coding knowledge
354 * of specific filesystem implementations.
355 * Stay in sync with kern_conf.c::no_poll().
357 if (ap->a_events & ~POLLSTANDARD)
358 return (POLLNVAL);
360 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
364 * Implement poll for local filesystems that support it.
367 vop_stdpoll(ap)
368 struct vop_poll_args /* {
369 struct vnode *a_vp;
370 int a_events;
371 struct ucred *a_cred;
372 struct thread *a_td;
373 } */ *ap;
375 if (ap->a_events & ~POLLSTANDARD)
376 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
377 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
381 * Return our mount point, as we will take charge of the writes.
384 vop_stdgetwritemount(ap)
385 struct vop_getwritemount_args /* {
386 struct vnode *a_vp;
387 struct mount **a_mpp;
388 } */ *ap;
390 struct mount *mp;
393 * XXX Since this is called unlocked we may be recycled while
394 * attempting to ref the mount. If this is the case or mountpoint
395 * will be set to NULL. We only have to prevent this call from
396 * returning with a ref to an incorrect mountpoint. It is not
397 * harmful to return with a ref to our previous mountpoint.
399 mp = ap->a_vp->v_mount;
400 if (mp != NULL) {
401 vfs_ref(mp);
402 if (mp != ap->a_vp->v_mount) {
403 vfs_rel(mp);
404 mp = NULL;
407 *(ap->a_mpp) = mp;
408 return (0);
411 /* XXX Needs good comment and VOP_BMAP(9) manpage */
413 vop_stdbmap(ap)
414 struct vop_bmap_args /* {
415 struct vnode *a_vp;
416 daddr_t a_bn;
417 struct bufobj **a_bop;
418 daddr_t *a_bnp;
419 int *a_runp;
420 int *a_runb;
421 } */ *ap;
424 if (ap->a_bop != NULL)
425 *ap->a_bop = &ap->a_vp->v_bufobj;
426 if (ap->a_bnp != NULL)
427 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
428 if (ap->a_runp != NULL)
429 *ap->a_runp = 0;
430 if (ap->a_runb != NULL)
431 *ap->a_runb = 0;
432 return (0);
436 vop_stdfsync(ap)
437 struct vop_fsync_args /* {
438 struct vnode *a_vp;
439 struct ucred *a_cred;
440 int a_waitfor;
441 struct thread *a_td;
442 } */ *ap;
444 struct vnode *vp = ap->a_vp;
445 struct buf *bp;
446 struct bufobj *bo;
447 struct buf *nbp;
448 int error = 0;
449 int maxretry = 1000; /* large, arbitrarily chosen */
451 bo = &vp->v_bufobj;
452 BO_LOCK(bo);
453 loop1:
455 * MARK/SCAN initialization to avoid infinite loops.
457 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
458 bp->b_vflags &= ~BV_SCANNED;
459 bp->b_error = 0;
463 * Flush all dirty buffers associated with a vnode.
465 loop2:
466 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
467 if ((bp->b_vflags & BV_SCANNED) != 0)
468 continue;
469 bp->b_vflags |= BV_SCANNED;
470 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
471 continue;
472 BO_UNLOCK(bo);
473 KASSERT(bp->b_bufobj == bo,
474 ("bp %p wrong b_bufobj %p should be %p",
475 bp, bp->b_bufobj, bo));
476 if ((bp->b_flags & B_DELWRI) == 0)
477 panic("fsync: not dirty");
478 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
479 vfs_bio_awrite(bp);
480 } else {
481 bremfree(bp);
482 bawrite(bp);
484 BO_LOCK(bo);
485 goto loop2;
489 * If synchronous the caller expects us to completely resolve all
490 * dirty buffers in the system. Wait for in-progress I/O to
491 * complete (which could include background bitmap writes), then
492 * retry if dirty blocks still exist.
494 if (ap->a_waitfor == MNT_WAIT) {
495 bufobj_wwait(bo, 0, 0);
496 if (bo->bo_dirty.bv_cnt > 0) {
498 * If we are unable to write any of these buffers
499 * then we fail now rather than trying endlessly
500 * to write them out.
502 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
503 if ((error = bp->b_error) == 0)
504 continue;
505 if (error == 0 && --maxretry >= 0)
506 goto loop1;
507 error = EAGAIN;
510 BO_UNLOCK(bo);
511 if (error == EAGAIN)
512 vprint("fsync: giving up on dirty", vp);
514 return (error);
517 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
519 vop_stdgetpages(ap)
520 struct vop_getpages_args /* {
521 struct vnode *a_vp;
522 vm_page_t *a_m;
523 int a_count;
524 int a_reqpage;
525 vm_ooffset_t a_offset;
526 } */ *ap;
529 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
530 ap->a_count, ap->a_reqpage);
534 vop_stdkqfilter(struct vop_kqfilter_args *ap)
536 return vfs_kqfilter(ap);
539 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
541 vop_stdputpages(ap)
542 struct vop_putpages_args /* {
543 struct vnode *a_vp;
544 vm_page_t *a_m;
545 int a_count;
546 int a_sync;
547 int *a_rtvals;
548 vm_ooffset_t a_offset;
549 } */ *ap;
552 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
553 ap->a_sync, ap->a_rtvals);
557 vop_stdvptofh(struct vop_vptofh_args *ap)
559 return (EOPNOTSUPP);
563 * vfs default ops
564 * used to fill the vfs function table to get reasonable default return values.
567 vfs_stdroot (mp, flags, vpp, td)
568 struct mount *mp;
569 int flags;
570 struct vnode **vpp;
571 struct thread *td;
574 return (EOPNOTSUPP);
578 vfs_stdstatfs (mp, sbp, td)
579 struct mount *mp;
580 struct statfs *sbp;
581 struct thread *td;
584 return (EOPNOTSUPP);
588 vfs_stdquotactl (mp, cmds, uid, arg, td)
589 struct mount *mp;
590 int cmds;
591 uid_t uid;
592 void *arg;
593 struct thread *td;
596 return (EOPNOTSUPP);
600 vfs_stdsync(mp, waitfor, td)
601 struct mount *mp;
602 int waitfor;
603 struct thread *td;
605 struct vnode *vp, *mvp;
606 int error, lockreq, allerror = 0;
608 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
609 if (waitfor != MNT_WAIT)
610 lockreq |= LK_NOWAIT;
612 * Force stale buffer cache information to be flushed.
614 MNT_ILOCK(mp);
615 loop:
616 MNT_VNODE_FOREACH(vp, mp, mvp) {
617 /* bv_cnt is an acceptable race here. */
618 if (vp->v_bufobj.bo_dirty.bv_cnt == 0)
619 continue;
620 VI_LOCK(vp);
621 MNT_IUNLOCK(mp);
622 if ((error = vget(vp, lockreq, td)) != 0) {
623 MNT_ILOCK(mp);
624 if (error == ENOENT) {
625 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
626 goto loop;
628 continue;
630 error = VOP_FSYNC(vp, waitfor, td);
631 if (error)
632 allerror = error;
634 /* Do not turn this into vput. td is not always curthread. */
635 VOP_UNLOCK(vp, 0);
636 vrele(vp);
637 MNT_ILOCK(mp);
639 MNT_IUNLOCK(mp);
640 return (allerror);
644 vfs_stdnosync (mp, waitfor, td)
645 struct mount *mp;
646 int waitfor;
647 struct thread *td;
650 return (0);
654 vfs_stdvget (mp, ino, flags, vpp)
655 struct mount *mp;
656 ino_t ino;
657 int flags;
658 struct vnode **vpp;
661 return (EOPNOTSUPP);
665 vfs_stdfhtovp (mp, fhp, vpp)
666 struct mount *mp;
667 struct fid *fhp;
668 struct vnode **vpp;
671 return (EOPNOTSUPP);
675 vfs_stdinit (vfsp)
676 struct vfsconf *vfsp;
679 return (0);
683 vfs_stduninit (vfsp)
684 struct vfsconf *vfsp;
687 return(0);
691 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td)
692 struct mount *mp;
693 int cmd;
694 struct vnode *filename_vp;
695 int attrnamespace;
696 const char *attrname;
697 struct thread *td;
700 if (filename_vp != NULL)
701 VOP_UNLOCK(filename_vp, 0);
702 return (EOPNOTSUPP);
706 vfs_stdsysctl(mp, op, req)
707 struct mount *mp;
708 fsctlop_t op;
709 struct sysctl_req *req;
712 return (EOPNOTSUPP);
715 /* end of vfs default ops */