printf(1): don't use getopt, second take
[dragonfly.git] / sys / vfs / union / union_vnops.c
blobae6378cbda530199d6ae562527f7d48f545c321b
1 /*
2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry.
3 * Copyright (c) 1992, 1993, 1994, 1995
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * Jan-Simon Pendry.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95
38 * $FreeBSD: src/sys/miscfs/union/union_vnops.c,v 1.72 1999/12/15 23:02:14 eivind Exp $
39 * $DragonFly: src/sys/vfs/union/union_vnops.c,v 1.39 2007/11/20 21:03:51 dillon Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/fcntl.h>
46 #include <sys/stat.h>
47 #include <sys/kernel.h>
48 #include <sys/vnode.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/malloc.h>
52 #include <sys/buf.h>
53 #include <sys/lock.h>
54 #include <sys/sysctl.h>
55 #include "union.h"
57 #include <vm/vm.h>
58 #include <vm/vnode_pager.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
63 int uniondebug = 0;
65 #if UDEBUG_ENABLED
66 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RW, &uniondebug, 0, "");
67 #else
68 SYSCTL_INT(_vfs, OID_AUTO, uniondebug, CTLFLAG_RD, &uniondebug, 0, "");
69 #endif
71 static int union_access (struct vop_access_args *ap);
72 static int union_advlock (struct vop_advlock_args *ap);
73 static int union_bmap (struct vop_bmap_args *ap);
74 static int union_close (struct vop_close_args *ap);
75 static int union_create (struct vop_old_create_args *ap);
76 static int union_fsync (struct vop_fsync_args *ap);
77 static int union_getattr (struct vop_getattr_args *ap);
78 static int union_inactive (struct vop_inactive_args *ap);
79 static int union_ioctl (struct vop_ioctl_args *ap);
80 static int union_link (struct vop_old_link_args *ap);
81 static int union_lookup (struct vop_old_lookup_args *ap);
82 static int union_lookup1 (struct vnode *udvp, struct vnode **dvp,
83 struct vnode **vpp,
84 struct componentname *cnp);
85 static int union_mkdir (struct vop_old_mkdir_args *ap);
86 static int union_mknod (struct vop_old_mknod_args *ap);
87 static int union_mmap (struct vop_mmap_args *ap);
88 static int union_open (struct vop_open_args *ap);
89 static int union_pathconf (struct vop_pathconf_args *ap);
90 static int union_print (struct vop_print_args *ap);
91 static int union_read (struct vop_read_args *ap);
92 static int union_readdir (struct vop_readdir_args *ap);
93 static int union_readlink (struct vop_readlink_args *ap);
94 static int union_reclaim (struct vop_reclaim_args *ap);
95 static int union_remove (struct vop_old_remove_args *ap);
96 static int union_rename (struct vop_old_rename_args *ap);
97 static int union_rmdir (struct vop_old_rmdir_args *ap);
98 static int union_poll (struct vop_poll_args *ap);
99 static int union_setattr (struct vop_setattr_args *ap);
100 static int union_strategy (struct vop_strategy_args *ap);
101 static int union_getpages (struct vop_getpages_args *ap);
102 static int union_putpages (struct vop_putpages_args *ap);
103 static int union_symlink (struct vop_old_symlink_args *ap);
104 static int union_whiteout (struct vop_old_whiteout_args *ap);
105 static int union_write (struct vop_read_args *ap);
107 static __inline
108 struct vnode *
109 union_lock_upper(struct union_node *un, struct thread *td)
111 struct vnode *uppervp;
113 if ((uppervp = un->un_uppervp) != NULL) {
114 vref(uppervp);
115 vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
117 KASSERT((uppervp == NULL || uppervp->v_sysref.refcnt > 0), ("uppervp usecount is 0"));
118 return(uppervp);
121 static __inline
122 struct vnode *
123 union_ref_upper(struct union_node *un)
125 struct vnode *uppervp;
127 if ((uppervp = un->un_uppervp) != NULL) {
128 vref(uppervp);
129 if (uppervp->v_flag & VRECLAIMED) {
130 vrele(uppervp);
131 return (NULLVP);
134 return (uppervp);
137 static __inline
138 void
139 union_unlock_upper(struct vnode *uppervp, struct thread *td)
141 vput(uppervp);
144 static __inline
145 struct vnode *
146 union_lock_other(struct union_node *un, struct thread *td)
148 struct vnode *vp;
150 if (un->un_uppervp != NULL) {
151 vp = union_lock_upper(un, td);
152 } else if ((vp = un->un_lowervp) != NULL) {
153 vref(vp);
154 vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
156 return(vp);
159 static __inline
160 void
161 union_unlock_other(struct vnode *vp, struct thread *td)
163 vput(vp);
167 * union_lookup:
169 * udvp must be exclusively locked on call and will remain
170 * exclusively locked on return. This is the mount point
171 * for out filesystem.
173 * dvp Our base directory, locked and referenced.
174 * The passed dvp will be dereferenced and unlocked on return
175 * and a new dvp will be returned which is locked and
176 * referenced in the same variable.
178 * vpp is filled in with the result if no error occured,
179 * locked and ref'd.
181 * If an error is returned, *vpp is set to NULLVP. If no
182 * error occurs, *vpp is returned with a reference and an
183 * exclusive lock.
186 static int
187 union_lookup1(struct vnode *udvp, struct vnode **pdvp, struct vnode **vpp,
188 struct componentname *cnp)
190 int error;
191 struct thread *td = cnp->cn_td;
192 struct vnode *dvp = *pdvp;
193 struct vnode *tdvp;
194 struct mount *mp;
197 * If stepping up the directory tree, check for going
198 * back across the mount point, in which case do what
199 * lookup would do by stepping back down the mount
200 * hierarchy.
202 if (cnp->cn_flags & CNP_ISDOTDOT) {
203 while ((dvp != udvp) && (dvp->v_flag & VROOT)) {
205 * Don't do the NOCROSSMOUNT check
206 * at this level. By definition,
207 * union fs deals with namespaces, not
208 * filesystems.
210 tdvp = dvp;
211 dvp = dvp->v_mount->mnt_vnodecovered;
212 vref(dvp);
213 vput(tdvp);
214 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
219 * Set return dvp to be the upperdvp 'parent directory.
221 *pdvp = dvp;
224 * If the VOP_LOOKUP call generates an error, tdvp is invalid and no
225 * changes will have been made to dvp, so we are set to return.
228 error = VOP_LOOKUP(dvp, &tdvp, cnp);
229 if (error) {
230 UDEBUG(("dvp %p error %d flags %lx\n", dvp, error, cnp->cn_flags));
231 *vpp = NULL;
232 return (error);
236 * The parent directory will have been unlocked, unless lookup
237 * found the last component or if dvp == tdvp (tdvp must be locked).
239 * We want our dvp to remain locked and ref'd. We also want tdvp
240 * to remain locked and ref'd.
242 UDEBUG(("parentdir %p result %p flag %lx\n", dvp, tdvp, cnp->cn_flags));
244 #if 0
245 if (dvp != tdvp && (cnp->cn_flags & CNP_XXXISLASTCN) == 0)
246 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
247 #endif
250 * Lastly check if the current node is a mount point in
251 * which case walk up the mount hierarchy making sure not to
252 * bump into the root of the mount tree (ie. dvp != udvp).
254 * We use dvp as a temporary variable here, it is no longer related
255 * to the dvp above. However, we have to ensure that both *pdvp and
256 * tdvp are locked on return.
259 dvp = tdvp;
260 while (
261 dvp != udvp &&
262 (dvp->v_type == VDIR) &&
263 (mp = dvp->v_mountedhere)
265 int relock_pdvp = 0;
267 if (vfs_busy(mp, 0))
268 continue;
270 if (dvp == *pdvp)
271 relock_pdvp = 1;
272 vput(dvp);
273 dvp = NULL;
274 error = VFS_ROOT(mp, &dvp);
276 vfs_unbusy(mp);
278 if (relock_pdvp)
279 vn_lock(*pdvp, LK_EXCLUSIVE | LK_RETRY);
281 if (error) {
282 *vpp = NULL;
283 return (error);
286 *vpp = dvp;
287 return (0);
291 * union_lookup(struct vnode *a_dvp, struct vnode **a_vpp,
292 * struct componentname *a_cnp)
294 static int
295 union_lookup(struct vop_old_lookup_args *ap)
297 int error;
298 int uerror, lerror;
299 struct vnode *uppervp, *lowervp;
300 struct vnode *upperdvp, *lowerdvp;
301 struct vnode *dvp = ap->a_dvp; /* starting dir */
302 struct union_node *dun = VTOUNION(dvp); /* associated union node */
303 struct componentname *cnp = ap->a_cnp;
304 struct thread *td = cnp->cn_td;
305 int lockparent = cnp->cn_flags & CNP_LOCKPARENT;
306 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount);
307 struct ucred *saved_cred = NULL;
308 int iswhiteout;
309 struct vattr va;
311 *ap->a_vpp = NULLVP;
314 * Disallow write attemps to the filesystem mounted read-only.
316 if ((dvp->v_mount->mnt_flag & MNT_RDONLY) &&
317 (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME)) {
318 return (EROFS);
322 * For any lookup's we do, always return with the parent locked
324 cnp->cn_flags |= CNP_LOCKPARENT;
326 lowerdvp = dun->un_lowervp;
327 uppervp = NULLVP;
328 lowervp = NULLVP;
329 iswhiteout = 0;
331 uerror = ENOENT;
332 lerror = ENOENT;
335 * Get a private lock on uppervp and a reference, effectively
336 * taking it out of the union_node's control.
338 * We must lock upperdvp while holding our lock on dvp
339 * to avoid a deadlock.
341 upperdvp = union_lock_upper(dun, td);
344 * do the lookup in the upper level.
345 * if that level comsumes additional pathnames,
346 * then assume that something special is going
347 * on and just return that vnode.
349 if (upperdvp != NULLVP) {
351 * We do not have to worry about the DOTDOT case, we've
352 * already unlocked dvp.
354 UDEBUG(("A %p\n", upperdvp));
357 * Do the lookup. We must supply a locked and referenced
358 * upperdvp to the function and will get a new locked and
359 * referenced upperdvp back with the old having been
360 * dereferenced.
362 * If an error is returned, uppervp will be NULLVP. If no
363 * error occurs, uppervp will be the locked and referenced
364 * return vnode or possibly NULL, depending on what is being
365 * requested. It is possible that the returned uppervp
366 * will be the same as upperdvp.
368 uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp);
369 UDEBUG((
370 "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n",
371 uerror,
372 upperdvp,
373 upperdvp->v_sysref.refcnt,
374 vn_islocked(upperdvp),
375 uppervp,
376 (uppervp ? uppervp->v_sysref.refcnt : -99),
377 (uppervp ? vn_islocked(uppervp) : -99)
381 * Disallow write attemps to the filesystem mounted read-only.
383 if (uerror == EJUSTRETURN &&
384 (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
385 (cnp->cn_nameiop == NAMEI_CREATE || cnp->cn_nameiop == NAMEI_RENAME)) {
386 error = EROFS;
387 goto out;
391 * Special case. If cn_consume != 0 skip out. The result
392 * of the lookup is transfered to our return variable. If
393 * an error occured we have to throw away the results.
396 if (cnp->cn_consume != 0) {
397 if ((error = uerror) == 0) {
398 *ap->a_vpp = uppervp;
399 uppervp = NULL;
401 goto out;
405 * Calculate whiteout, fall through
408 if (uerror == ENOENT || uerror == EJUSTRETURN) {
409 if (cnp->cn_flags & CNP_ISWHITEOUT) {
410 iswhiteout = 1;
411 } else if (lowerdvp != NULLVP) {
412 int terror;
414 terror = VOP_GETATTR(upperdvp, &va);
415 if (terror == 0 && (va.va_flags & OPAQUE))
416 iswhiteout = 1;
422 * in a similar way to the upper layer, do the lookup
423 * in the lower layer. this time, if there is some
424 * component magic going on, then vput whatever we got
425 * back from the upper layer and return the lower vnode
426 * instead.
429 if (lowerdvp != NULLVP && !iswhiteout) {
430 int nameiop;
432 UDEBUG(("B %p\n", lowerdvp));
435 * Force only LOOKUPs on the lower node, since
436 * we won't be making changes to it anyway.
438 nameiop = cnp->cn_nameiop;
439 cnp->cn_nameiop = NAMEI_LOOKUP;
440 if (um->um_op == UNMNT_BELOW) {
441 saved_cred = cnp->cn_cred;
442 cnp->cn_cred = um->um_cred;
446 * We shouldn't have to worry about locking interactions
447 * between the lower layer and our union layer (w.r.t.
448 * `..' processing) because we don't futz with lowervp
449 * locks in the union-node instantiation code path.
451 * union_lookup1() requires lowervp to be locked on entry,
452 * and it will be unlocked on return. The ref count will
453 * not change. On return lowervp doesn't represent anything
454 * to us so we NULL it out.
456 vref(lowerdvp);
457 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY);
458 lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp);
459 if (lowerdvp == lowervp)
460 vrele(lowerdvp);
461 else
462 vput(lowerdvp);
463 lowerdvp = NULL; /* lowerdvp invalid after vput */
465 if (um->um_op == UNMNT_BELOW)
466 cnp->cn_cred = saved_cred;
467 cnp->cn_nameiop = nameiop;
469 if (cnp->cn_consume != 0 || lerror == EACCES) {
470 if ((error = lerror) == 0) {
471 *ap->a_vpp = lowervp;
472 lowervp = NULL;
474 goto out;
476 } else {
477 UDEBUG(("C %p\n", lowerdvp));
478 if ((cnp->cn_flags & CNP_ISDOTDOT) && dun->un_pvp != NULLVP) {
479 if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) {
480 vref(lowervp);
481 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY);
482 lerror = 0;
488 * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp.
490 * 1. If both layers returned an error, select the upper layer.
492 * 2. If the upper layer faile and the bottom layer succeeded,
493 * two subcases occur:
495 * a. The bottom vnode is not a directory, in which case
496 * just return a new union vnode referencing an
497 * empty top layer and the existing bottom layer.
499 * b. The button vnode is a directory, in which case
500 * create a new directory in the top layer and
501 * and fall through to case 3.
503 * 3. If the top layer succeeded then return a new union
504 * vnode referencing whatever the new top layer and
505 * whatever the bottom layer returned.
508 /* case 1. */
509 if ((uerror != 0) && (lerror != 0)) {
510 error = uerror;
511 goto out;
514 /* case 2. */
515 if (uerror != 0 /* && (lerror == 0) */ ) {
516 if (lowervp->v_type == VDIR) { /* case 2b. */
517 KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL"));
519 * oops, uppervp has a problem, we may have to shadow.
521 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp);
522 if (uerror) {
523 error = uerror;
524 goto out;
530 * Must call union_allocvp with both the upper and lower vnodes
531 * referenced and the upper vnode locked. ap->a_vpp is returned
532 * referenced and locked. lowervp, uppervp, and upperdvp are
533 * absorbed by union_allocvp() whether it succeeds or fails.
535 * upperdvp is the parent directory of uppervp which may be
536 * different, depending on the path, from dvp->un_uppervp. That's
537 * why it is a separate argument. Note that it must be unlocked.
539 * dvp must be locked on entry to the call and will be locked on
540 * return.
543 if (uppervp && uppervp != upperdvp)
544 vn_unlock(uppervp);
545 if (lowervp)
546 vn_unlock(lowervp);
547 if (upperdvp)
548 vn_unlock(upperdvp);
550 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp,
551 uppervp, lowervp, 1);
553 UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_sysref.refcnt) : -99));
555 uppervp = NULL;
556 upperdvp = NULL;
557 lowervp = NULL;
560 * Termination Code
562 * - put away any extra junk laying around. Note that lowervp
563 * (if not NULL) will never be the same as *ap->a_vp and
564 * neither will uppervp, because when we set that state we
565 * NULL-out lowervp or uppervp. On the otherhand, upperdvp
566 * may match uppervp or *ap->a_vpp.
568 * - relock/unlock dvp if appropriate.
571 out:
572 if (upperdvp) {
573 if (upperdvp == uppervp || upperdvp == *ap->a_vpp)
574 vrele(upperdvp);
575 else
576 vput(upperdvp);
579 if (uppervp)
580 vput(uppervp);
582 if (lowervp)
583 vput(lowervp);
586 * Restore LOCKPARENT state
589 if (!lockparent)
590 cnp->cn_flags &= ~CNP_LOCKPARENT;
592 UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp,
593 ((*ap->a_vpp) ? (*ap->a_vpp)->v_sysref.refcnt : -99),
594 lowervp, uppervp));
597 * dvp lock state, determine whether to relock dvp. dvp is expected
598 * to be locked on return if:
600 * - there was an error (except not EJUSTRETURN), or
601 * - we hit the last component and lockparent is true
603 * dvp_is_locked is the current state of the dvp lock, not counting
604 * the possibility that *ap->a_vpp == dvp (in which case it is locked
605 * anyway). Note that *ap->a_vpp == dvp only if no error occured.
608 if (*ap->a_vpp != dvp) {
609 if ((error == 0 || error == EJUSTRETURN) && !lockparent) {
610 vn_unlock(dvp);
615 * Diagnostics
618 #ifdef DIAGNOSTIC
619 if (cnp->cn_namelen == 1 &&
620 cnp->cn_nameptr[0] == '.' &&
621 *ap->a_vpp != dvp) {
622 panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp);
624 #endif
626 return (error);
630 * union_create:
632 * a_dvp is locked on entry and remains locked on return. a_vpp is returned
633 * locked if no error occurs, otherwise it is garbage.
635 * union_create(struct vnode *a_dvp, struct vnode **a_vpp,
636 * struct componentname *a_cnp, struct vattr *a_vap)
638 static int
639 union_create(struct vop_old_create_args *ap)
641 struct union_node *dun = VTOUNION(ap->a_dvp);
642 struct componentname *cnp = ap->a_cnp;
643 struct thread *td = cnp->cn_td;
644 struct vnode *dvp;
645 int error = EROFS;
647 if ((dvp = union_lock_upper(dun, td)) != NULL) {
648 struct vnode *vp;
649 struct mount *mp;
651 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap);
652 if (error == 0) {
653 mp = ap->a_dvp->v_mount;
654 vn_unlock(vp);
655 UDEBUG(("ALLOCVP-1 FROM %p REFS %d\n", vp, vp->v_sysref.refcnt));
656 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP,
657 cnp, vp, NULLVP, 1);
658 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_sysref.refcnt));
660 union_unlock_upper(dvp, td);
662 return (error);
666 * union_whiteout(struct vnode *a_dvp, struct componentname *a_cnp,
667 * int a_flags)
669 static int
670 union_whiteout(struct vop_old_whiteout_args *ap)
672 struct union_node *un = VTOUNION(ap->a_dvp);
673 struct componentname *cnp = ap->a_cnp;
674 struct vnode *uppervp;
675 int error = EOPNOTSUPP;
677 if ((uppervp = union_lock_upper(un, cnp->cn_td)) != NULLVP) {
678 error = VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags);
679 union_unlock_upper(uppervp, cnp->cn_td);
681 return(error);
685 * union_mknod:
687 * a_dvp is locked on entry and should remain locked on return.
688 * a_vpp is garbagre whether an error occurs or not.
690 * union_mknod(struct vnode *a_dvp, struct vnode **a_vpp,
691 * struct componentname *a_cnp, struct vattr *a_vap)
693 static int
694 union_mknod(struct vop_old_mknod_args *ap)
696 struct union_node *dun = VTOUNION(ap->a_dvp);
697 struct componentname *cnp = ap->a_cnp;
698 struct vnode *dvp;
699 int error = EROFS;
701 if ((dvp = union_lock_upper(dun, cnp->cn_td)) != NULL) {
702 error = VOP_MKNOD(dvp, ap->a_vpp, cnp, ap->a_vap);
703 union_unlock_upper(dvp, cnp->cn_td);
705 return (error);
709 * union_open:
711 * run open VOP. When opening the underlying vnode we have to mimic
712 * vn_open. What we *really* need to do to avoid screwups if the
713 * open semantics change is to call vn_open(). For example, ufs blows
714 * up if you open a file but do not vmio it prior to writing.
716 * union_open(struct vnode *a_vp, int a_mode,
717 * struct ucred *a_cred, struct thread *a_td)
719 static int
720 union_open(struct vop_open_args *ap)
722 struct union_node *un = VTOUNION(ap->a_vp);
723 struct vnode *tvp;
724 int mode = ap->a_mode;
725 struct ucred *cred = ap->a_cred;
726 struct thread *td = ap->a_td;
727 int error = 0;
728 int tvpisupper = 1;
731 * If there is an existing upper vp then simply open that.
732 * The upper vp takes precedence over the lower vp. When opening
733 * a lower vp for writing copy it to the uppervp and then open the
734 * uppervp.
736 * At the end of this section tvp will be left locked.
738 if ((tvp = union_lock_upper(un, td)) == NULLVP) {
740 * If the lower vnode is being opened for writing, then
741 * copy the file contents to the upper vnode and open that,
742 * otherwise can simply open the lower vnode.
744 tvp = un->un_lowervp;
745 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) {
746 int docopy = !(mode & O_TRUNC);
747 error = union_copyup(un, docopy, cred, td);
748 tvp = union_lock_upper(un, td);
749 } else {
750 un->un_openl++;
751 vref(tvp);
752 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY);
753 tvpisupper = 0;
758 * We are holding the correct vnode, open it. Note
759 * that in DragonFly, VOP_OPEN is responsible for associating
760 * a VM object with the vnode if the vnode is mappable or the
761 * underlying filesystem uses buffer cache calls on it.
763 if (error == 0)
764 error = VOP_OPEN(tvp, mode, cred, NULL);
767 * Release any locks held
769 if (tvpisupper) {
770 if (tvp)
771 union_unlock_upper(tvp, td);
772 } else {
773 vput(tvp);
775 return (error);
779 * union_close:
781 * It is unclear whether a_vp is passed locked or unlocked. Whatever
782 * the case we do not change it.
784 * union_close(struct vnode *a_vp, int a_fflag, struct ucred *a_cred,
785 * struct thread *a_td)
787 static int
788 union_close(struct vop_close_args *ap)
790 struct union_node *un = VTOUNION(ap->a_vp);
791 struct vnode *vp;
793 if ((vp = un->un_uppervp) == NULLVP) {
794 #ifdef UNION_DIAGNOSTIC
795 if (un->un_openl <= 0)
796 panic("union: un_openl cnt");
797 #endif
798 --un->un_openl;
799 vp = un->un_lowervp;
801 ap->a_head.a_ops = *vp->v_ops;
802 ap->a_vp = vp;
803 return(vop_close_ap(ap));
807 * Check access permission on the union vnode.
808 * The access check being enforced is to check
809 * against both the underlying vnode, and any
810 * copied vnode. This ensures that no additional
811 * file permissions are given away simply because
812 * the user caused an implicit file copy.
814 * union_access(struct vnode *a_vp, int a_mode,
815 * struct ucred *a_cred, struct thread *a_td)
817 static int
818 union_access(struct vop_access_args *ap)
820 struct union_node *un = VTOUNION(ap->a_vp);
821 struct thread *td = ap->a_td;
822 int error = EACCES;
823 struct vnode *vp;
826 * Disallow write attempts on filesystems mounted read-only.
828 if ((ap->a_mode & VWRITE) &&
829 (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) {
830 switch (ap->a_vp->v_type) {
831 case VREG:
832 case VDIR:
833 case VLNK:
834 return (EROFS);
835 default:
836 break;
840 if ((vp = union_lock_upper(un, td)) != NULLVP) {
841 ap->a_head.a_ops = *vp->v_ops;
842 ap->a_vp = vp;
843 error = vop_access_ap(ap);
844 union_unlock_upper(vp, td);
845 return(error);
848 if ((vp = un->un_lowervp) != NULLVP) {
849 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
850 ap->a_head.a_ops = *vp->v_ops;
851 ap->a_vp = vp;
854 * Remove VWRITE from a_mode if our mount point is RW, because
855 * we want to allow writes and lowervp may be read-only.
857 if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0)
858 ap->a_mode &= ~VWRITE;
860 error = vop_access_ap(ap);
861 if (error == 0) {
862 struct union_mount *um;
864 um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount);
866 if (um->um_op == UNMNT_BELOW) {
867 ap->a_cred = um->um_cred;
868 error = vop_access_ap(ap);
871 vn_unlock(vp);
873 return(error);
877 * We handle getattr only to change the fsid and
878 * track object sizes
880 * It's not clear whether VOP_GETATTR is to be
881 * called with the vnode locked or not. stat() calls
882 * it with (vp) locked, and fstat calls it with
883 * (vp) unlocked.
885 * Because of this we cannot use our normal locking functions
886 * if we do not intend to lock the main a_vp node. At the moment
887 * we are running without any specific locking at all, but beware
888 * to any programmer that care must be taken if locking is added
889 * to this function.
891 * union_getattr(struct vnode *a_vp, struct vattr *a_vap,
892 * struct ucred *a_cred, struct thread *a_td)
894 static int
895 union_getattr(struct vop_getattr_args *ap)
897 int error;
898 struct union_node *un = VTOUNION(ap->a_vp);
899 struct vnode *vp;
900 struct vattr *vap;
901 struct vattr va;
904 * Some programs walk the filesystem hierarchy by counting
905 * links to directories to avoid stat'ing all the time.
906 * This means the link count on directories needs to be "correct".
907 * The only way to do that is to call getattr on both layers
908 * and fix up the link count. The link count will not necessarily
909 * be accurate but will be large enough to defeat the tree walkers.
912 vap = ap->a_vap;
914 if ((vp = un->un_uppervp) != NULLVP) {
915 error = VOP_GETATTR(vp, vap);
916 if (error)
917 return (error);
918 /* XXX isn't this dangerouso without a lock? */
919 union_newsize(ap->a_vp, vap->va_size, VNOVAL);
922 if (vp == NULLVP) {
923 vp = un->un_lowervp;
924 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) {
925 vp = un->un_lowervp;
926 vap = &va;
927 } else {
928 vp = NULLVP;
931 if (vp != NULLVP) {
932 error = VOP_GETATTR(vp, vap);
933 if (error)
934 return (error);
935 /* XXX isn't this dangerous without a lock? */
936 union_newsize(ap->a_vp, VNOVAL, vap->va_size);
939 if ((vap != ap->a_vap) && (vap->va_type == VDIR))
940 ap->a_vap->va_nlink += vap->va_nlink;
941 return (0);
945 * union_setattr(struct vnode *a_vp, struct vattr *a_vap,
946 * struct ucred *a_cred, struct thread *a_td)
948 static int
949 union_setattr(struct vop_setattr_args *ap)
951 struct union_node *un = VTOUNION(ap->a_vp);
952 struct thread *td = ap->a_td;
953 struct vattr *vap = ap->a_vap;
954 struct vnode *uppervp;
955 int error;
958 * Disallow write attempts on filesystems mounted read-only.
960 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) &&
961 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
962 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
963 vap->va_mtime.tv_sec != VNOVAL ||
964 vap->va_mode != (mode_t)VNOVAL)) {
965 return (EROFS);
969 * Handle case of truncating lower object to zero size,
970 * by creating a zero length upper object. This is to
971 * handle the case of open with O_TRUNC and O_CREAT.
973 if (un->un_uppervp == NULLVP && (un->un_lowervp->v_type == VREG)) {
974 error = union_copyup(un, (ap->a_vap->va_size != 0),
975 ap->a_cred, ap->a_td);
976 if (error)
977 return (error);
981 * Try to set attributes in upper layer,
982 * otherwise return read-only filesystem error.
984 error = EROFS;
985 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
986 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, ap->a_cred);
987 if ((error == 0) && (ap->a_vap->va_size != VNOVAL))
988 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL);
989 union_unlock_upper(uppervp, td);
991 return (error);
995 * union_getpages:
998 static int
999 union_getpages(struct vop_getpages_args *ap)
1001 int r;
1003 r = vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
1004 ap->a_count, ap->a_reqpage);
1005 return(r);
1009 * union_putpages:
1012 static int
1013 union_putpages(struct vop_putpages_args *ap)
1015 int r;
1017 r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1018 ap->a_sync, ap->a_rtvals);
1019 return(r);
1023 * union_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1024 * struct ucred *a_cred)
1026 static int
1027 union_read(struct vop_read_args *ap)
1029 struct union_node *un = VTOUNION(ap->a_vp);
1030 struct thread *td = ap->a_uio->uio_td;
1031 struct vnode *uvp;
1032 int error;
1034 uvp = union_lock_other(un, td);
1035 KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
1037 if (ap->a_vp->v_flag & VOBJBUF)
1038 union_vm_coherency(ap->a_vp, ap->a_uio, 0);
1040 error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1041 union_unlock_other(uvp, td);
1044 * XXX
1045 * perhaps the size of the underlying object has changed under
1046 * our feet. take advantage of the offset information present
1047 * in the uio structure.
1049 if (error == 0) {
1050 struct union_node *un = VTOUNION(ap->a_vp);
1051 off_t cur = ap->a_uio->uio_offset;
1053 if (uvp == un->un_uppervp) {
1054 if (cur > un->un_uppersz)
1055 union_newsize(ap->a_vp, cur, VNOVAL);
1056 } else {
1057 if (cur > un->un_lowersz)
1058 union_newsize(ap->a_vp, VNOVAL, cur);
1061 return (error);
1065 * union_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1066 * struct ucred *a_cred)
1068 static int
1069 union_write(struct vop_read_args *ap)
1071 struct union_node *un = VTOUNION(ap->a_vp);
1072 struct thread *td = ap->a_uio->uio_td;
1073 struct vnode *uppervp;
1074 int error;
1076 if ((uppervp = union_lock_upper(un, td)) == NULLVP)
1077 panic("union: missing upper layer in write");
1080 * Since our VM pages are associated with our vnode rather then
1081 * the real vnode, and since we do not run our reads and writes
1082 * through our own VM cache, we have a VM/VFS coherency problem.
1083 * We solve them by invalidating or flushing the associated VM
1084 * pages prior to allowing a normal read or write to occur.
1086 * VM-backed writes (UIO_NOCOPY) have to be converted to normal
1087 * writes because we are not cache-coherent. Normal writes need
1088 * to be made coherent with our VM-backing store, which we do by
1089 * first flushing any dirty VM pages associated with the write
1090 * range, and then destroying any clean VM pages associated with
1091 * the write range.
1094 if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
1095 ap->a_uio->uio_segflg = UIO_SYSSPACE;
1096 } else if (ap->a_vp->v_flag & VOBJBUF) {
1097 union_vm_coherency(ap->a_vp, ap->a_uio, 1);
1100 error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1103 * the size of the underlying object may be changed by the
1104 * write.
1106 if (error == 0) {
1107 off_t cur = ap->a_uio->uio_offset;
1109 if (cur > un->un_uppersz)
1110 union_newsize(ap->a_vp, cur, VNOVAL);
1112 union_unlock_upper(uppervp, td);
1113 return (error);
1117 * union_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, int a_fflag,
1118 * struct ucred *a_cred, struct thread *a_td)
1120 static int
1121 union_ioctl(struct vop_ioctl_args *ap)
1123 struct vnode *ovp = OTHERVP(ap->a_vp);
1125 ap->a_head.a_ops = *ovp->v_ops;
1126 ap->a_vp = ovp;
1127 return(vop_ioctl_ap(ap));
1131 * union_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred,
1132 * struct thread *a_td)
1134 static int
1135 union_poll(struct vop_poll_args *ap)
1137 struct vnode *ovp = OTHERVP(ap->a_vp);
1139 ap->a_head.a_ops = *ovp->v_ops;
1140 ap->a_vp = ovp;
1141 return(vop_poll_ap(ap));
1145 * union_mmap(struct vnode *a_vp, int a_fflags, struct ucred *a_cred,
1146 * struct thread *a_td)
1148 static int
1149 union_mmap(struct vop_mmap_args *ap)
1151 struct vnode *ovp = OTHERVP(ap->a_vp);
1153 ap->a_head.a_ops = *ovp->v_ops;
1154 ap->a_vp = ovp;
1155 return (vop_mmap_ap(ap));
1159 * union_fsync(struct vnode *a_vp, struct ucred *a_cred, int a_waitfor,
1160 * struct thread *a_td)
1162 static int
1163 union_fsync(struct vop_fsync_args *ap)
1165 int error = 0;
1166 struct thread *td = ap->a_td;
1167 struct vnode *targetvp;
1168 struct union_node *un = VTOUNION(ap->a_vp);
1170 if ((targetvp = union_lock_other(un, td)) != NULLVP) {
1171 error = VOP_FSYNC(targetvp, ap->a_waitfor);
1172 union_unlock_other(targetvp, td);
1175 return (error);
1179 * union_remove:
1181 * Remove the specified cnp. The dvp and vp are passed to us locked
1182 * and must remain locked on return.
1184 * union_remove(struct vnode *a_dvp, struct vnode *a_vp,
1185 * struct componentname *a_cnp)
1187 static int
1188 union_remove(struct vop_old_remove_args *ap)
1190 struct union_node *dun = VTOUNION(ap->a_dvp);
1191 struct union_node *un = VTOUNION(ap->a_vp);
1192 struct componentname *cnp = ap->a_cnp;
1193 struct thread *td = cnp->cn_td;
1194 struct vnode *uppervp;
1195 struct vnode *upperdvp;
1196 int error;
1198 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1199 panic("union remove: null upper vnode");
1201 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1202 if (union_dowhiteout(un, cnp->cn_cred, td))
1203 cnp->cn_flags |= CNP_DOWHITEOUT;
1204 error = VOP_REMOVE(upperdvp, uppervp, cnp);
1205 #if 0
1206 /* XXX */
1207 if (!error)
1208 union_removed_upper(un);
1209 #endif
1210 union_unlock_upper(uppervp, td);
1211 } else {
1212 error = union_mkwhiteout(
1213 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1214 upperdvp, ap->a_cnp, un->un_path);
1216 union_unlock_upper(upperdvp, td);
1217 return (error);
1221 * union_link:
1223 * tdvp will be locked on entry, vp will not be locked on entry.
1224 * tdvp should remain locked on return and vp should remain unlocked
1225 * on return.
1227 * union_link(struct vnode *a_tdvp, struct vnode *a_vp,
1228 * struct componentname *a_cnp)
1230 static int
1231 union_link(struct vop_old_link_args *ap)
1233 struct componentname *cnp = ap->a_cnp;
1234 struct thread *td = cnp->cn_td;
1235 struct union_node *dun = VTOUNION(ap->a_tdvp);
1236 struct vnode *vp;
1237 struct vnode *tdvp;
1238 int error = 0;
1240 if (ap->a_tdvp->v_ops != ap->a_vp->v_ops) {
1241 vp = ap->a_vp;
1242 } else {
1243 struct union_node *tun = VTOUNION(ap->a_vp);
1245 if (tun->un_uppervp == NULLVP) {
1246 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
1247 #if 0
1248 if (dun->un_uppervp == tun->un_dirvp) {
1249 if (dun->un_flags & UN_ULOCK) {
1250 dun->un_flags &= ~UN_ULOCK;
1251 vn_unlock(dun->un_uppervp);
1254 #endif
1255 error = union_copyup(tun, 1, cnp->cn_cred, td);
1256 #if 0
1257 if (dun->un_uppervp == tun->un_dirvp) {
1258 vn_lock(dun->un_uppervp,
1259 LK_EXCLUSIVE | LK_RETRY);
1260 dun->un_flags |= UN_ULOCK;
1262 #endif
1263 vn_unlock(ap->a_vp);
1265 vp = tun->un_uppervp;
1268 if (error)
1269 return (error);
1272 * Make sure upper is locked, then unlock the union directory we were
1273 * called with to avoid a deadlock while we are calling VOP_LINK on
1274 * the upper (with tdvp locked and vp not locked). Our ap->a_tdvp
1275 * is expected to be locked on return.
1278 if ((tdvp = union_lock_upper(dun, td)) == NULLVP)
1279 return (EROFS);
1281 vn_unlock(ap->a_tdvp); /* unlock calling node */
1282 error = VOP_LINK(tdvp, vp, cnp); /* call link on upper */
1285 * We have to unlock tdvp prior to relocking our calling node in
1286 * order to avoid a deadlock.
1288 union_unlock_upper(tdvp, td);
1289 vn_lock(ap->a_tdvp, LK_EXCLUSIVE | LK_RETRY);
1290 return (error);
1294 * union_rename(struct vnode *a_fdvp, struct vnode *a_fvp,
1295 * struct componentname *a_fcnp, struct vnode *a_tdvp,
1296 * struct vnode *a_tvp, struct componentname *a_tcnp)
1298 static int
1299 union_rename(struct vop_old_rename_args *ap)
1301 int error;
1302 struct vnode *fdvp = ap->a_fdvp;
1303 struct vnode *fvp = ap->a_fvp;
1304 struct vnode *tdvp = ap->a_tdvp;
1305 struct vnode *tvp = ap->a_tvp;
1308 * Figure out what fdvp to pass to our upper or lower vnode. If we
1309 * replace the fdvp, release the original one and ref the new one.
1312 if (fdvp->v_tag == VT_UNION) { /* always true */
1313 struct union_node *un = VTOUNION(fdvp);
1314 if (un->un_uppervp == NULLVP) {
1316 * this should never happen in normal
1317 * operation but might if there was
1318 * a problem creating the top-level shadow
1319 * directory.
1321 error = EXDEV;
1322 goto bad;
1324 fdvp = un->un_uppervp;
1325 vref(fdvp);
1326 vrele(ap->a_fdvp);
1330 * Figure out what fvp to pass to our upper or lower vnode. If we
1331 * replace the fvp, release the original one and ref the new one.
1334 if (fvp->v_tag == VT_UNION) { /* always true */
1335 struct union_node *un = VTOUNION(fvp);
1336 #if 0
1337 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount);
1338 #endif
1340 if (un->un_uppervp == NULLVP) {
1341 switch(fvp->v_type) {
1342 case VREG:
1343 vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY);
1344 error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_td);
1345 vn_unlock(un->un_vnode);
1346 if (error)
1347 goto bad;
1348 break;
1349 case VDIR:
1351 * XXX not yet.
1353 * There is only one way to rename a directory
1354 * based in the lowervp, and that is to copy
1355 * the entire directory hierarchy. Otherwise
1356 * it would not last across a reboot.
1358 #if 0
1359 vrele(fvp);
1360 fvp = NULL;
1361 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
1362 error = union_mkshadow(um, fdvp,
1363 ap->a_fcnp, &un->un_uppervp);
1364 vn_unlock(fdvp);
1365 if (un->un_uppervp)
1366 vn_unlock(un->un_uppervp);
1367 if (error)
1368 goto bad;
1369 break;
1370 #endif
1371 default:
1372 error = EXDEV;
1373 goto bad;
1377 if (un->un_lowervp != NULLVP)
1378 ap->a_fcnp->cn_flags |= CNP_DOWHITEOUT;
1379 fvp = un->un_uppervp;
1380 vref(fvp);
1381 vrele(ap->a_fvp);
1385 * Figure out what tdvp (destination directory) to pass to the
1386 * lower level. If we replace it with uppervp, we need to vput the
1387 * old one. The exclusive lock is transfered to what we will pass
1388 * down in the VOP_RENAME and we replace uppervp with a simple
1389 * reference.
1392 if (tdvp->v_tag == VT_UNION) {
1393 struct union_node *un = VTOUNION(tdvp);
1395 if (un->un_uppervp == NULLVP) {
1397 * this should never happen in normal
1398 * operation but might if there was
1399 * a problem creating the top-level shadow
1400 * directory.
1402 error = EXDEV;
1403 goto bad;
1407 * new tdvp is a lock and reference on uppervp, put away
1408 * the old tdvp.
1410 tdvp = union_lock_upper(un, ap->a_tcnp->cn_td);
1411 vput(ap->a_tdvp);
1415 * Figure out what tvp (destination file) to pass to the
1416 * lower level.
1418 * If the uppervp file does not exist put away the (wrong)
1419 * file and change tvp to NULL.
1422 if (tvp != NULLVP && tvp->v_tag == VT_UNION) {
1423 struct union_node *un = VTOUNION(tvp);
1425 tvp = union_lock_upper(un, ap->a_tcnp->cn_td);
1426 vput(ap->a_tvp);
1427 /* note: tvp may be NULL */
1431 * VOP_RENAME releases/vputs prior to returning, so we have no
1432 * cleanup to do.
1435 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp));
1438 * Error. We still have to release / vput the various elements.
1441 bad:
1442 vrele(fdvp);
1443 if (fvp)
1444 vrele(fvp);
1445 vput(tdvp);
1446 if (tvp != NULLVP) {
1447 if (tvp != tdvp)
1448 vput(tvp);
1449 else
1450 vrele(tvp);
1452 return (error);
1456 * union_mkdir(struct vnode *a_dvp, struct vnode **a_vpp,
1457 * struct componentname *a_cnp, struct vattr *a_vap)
1459 static int
1460 union_mkdir(struct vop_old_mkdir_args *ap)
1462 struct union_node *dun = VTOUNION(ap->a_dvp);
1463 struct componentname *cnp = ap->a_cnp;
1464 struct thread *td = cnp->cn_td;
1465 struct vnode *upperdvp;
1466 int error = EROFS;
1468 if ((upperdvp = union_lock_upper(dun, td)) != NULLVP) {
1469 struct vnode *vp;
1471 error = VOP_MKDIR(upperdvp, &vp, cnp, ap->a_vap);
1472 union_unlock_upper(upperdvp, td);
1474 if (error == 0) {
1475 vn_unlock(vp);
1476 UDEBUG(("ALLOCVP-2 FROM %p REFS %d\n", vp, vp->v_sysref.refcnt));
1477 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount,
1478 ap->a_dvp, NULLVP, cnp, vp, NULLVP, 1);
1479 UDEBUG(("ALLOCVP-2B FROM %p REFS %d\n", *ap->a_vpp, vp->v_sysref.refcnt));
1482 return (error);
1486 * union_rmdir(struct vnode *a_dvp, struct vnode *a_vp,
1487 * struct componentname *a_cnp)
1489 static int
1490 union_rmdir(struct vop_old_rmdir_args *ap)
1492 struct union_node *dun = VTOUNION(ap->a_dvp);
1493 struct union_node *un = VTOUNION(ap->a_vp);
1494 struct componentname *cnp = ap->a_cnp;
1495 struct thread *td = cnp->cn_td;
1496 struct vnode *upperdvp;
1497 struct vnode *uppervp;
1498 int error;
1500 if ((upperdvp = union_lock_upper(dun, td)) == NULLVP)
1501 panic("union rmdir: null upper vnode");
1503 if ((uppervp = union_lock_upper(un, td)) != NULLVP) {
1504 if (union_dowhiteout(un, cnp->cn_cred, td))
1505 cnp->cn_flags |= CNP_DOWHITEOUT;
1506 error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp);
1507 union_unlock_upper(uppervp, td);
1508 } else {
1509 error = union_mkwhiteout(
1510 MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount),
1511 dun->un_uppervp, ap->a_cnp, un->un_path);
1513 union_unlock_upper(upperdvp, td);
1514 return (error);
1518 * union_symlink:
1520 * dvp is locked on entry and remains locked on return. a_vpp is garbage
1521 * (unused).
1523 * union_symlink(struct vnode *a_dvp, struct vnode **a_vpp,
1524 * struct componentname *a_cnp, struct vattr *a_vap,
1525 * char *a_target)
1527 static int
1528 union_symlink(struct vop_old_symlink_args *ap)
1530 struct union_node *dun = VTOUNION(ap->a_dvp);
1531 struct componentname *cnp = ap->a_cnp;
1532 struct thread *td = cnp->cn_td;
1533 struct vnode *dvp;
1534 int error = EROFS;
1536 if ((dvp = union_lock_upper(dun, td)) != NULLVP) {
1537 error = VOP_SYMLINK(dvp, ap->a_vpp, cnp, ap->a_vap,
1538 ap->a_target);
1539 union_unlock_upper(dvp, td);
1541 return (error);
1545 * union_readdir works in concert with getdirentries and
1546 * readdir(3) to provide a list of entries in the unioned
1547 * directories. getdirentries is responsible for walking
1548 * down the union stack. readdir(3) is responsible for
1549 * eliminating duplicate names from the returned data stream.
1551 * union_readdir(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred,
1552 * int *a_eofflag, off_t *a_cookies, int a_ncookies)
1554 static int
1555 union_readdir(struct vop_readdir_args *ap)
1557 struct union_node *un = VTOUNION(ap->a_vp);
1558 struct thread *td = ap->a_uio->uio_td;
1559 struct vnode *uvp;
1560 int error = 0;
1562 if ((uvp = union_ref_upper(un)) != NULLVP) {
1563 ap->a_head.a_ops = *uvp->v_ops;
1564 ap->a_vp = uvp;
1565 error = vop_readdir_ap(ap);
1566 vrele(uvp);
1568 return(error);
1572 * union_readlink(struct vnode *a_vp, struct uio *a_uio, struct ucred *a_cred)
1574 static int
1575 union_readlink(struct vop_readlink_args *ap)
1577 int error;
1578 struct union_node *un = VTOUNION(ap->a_vp);
1579 struct uio *uio = ap->a_uio;
1580 struct thread *td = uio->uio_td;
1581 struct vnode *vp;
1583 vp = union_lock_other(un, td);
1584 KASSERT(vp != NULL, ("union_readlink: backing vnode missing!"));
1586 ap->a_head.a_ops = *vp->v_ops;
1587 ap->a_vp = vp;
1588 error = vop_readlink_ap(ap);
1589 union_unlock_other(vp, td);
1591 return (error);
1595 * union_inactive:
1597 * Called with the vnode locked. We are expected to unlock the vnode.
1599 * union_inactive(struct vnode *a_vp, struct thread *a_td)
1601 static int
1602 union_inactive(struct vop_inactive_args *ap)
1604 struct vnode *vp = ap->a_vp;
1605 /*struct thread *td = ap->a_td;*/
1606 struct union_node *un = VTOUNION(vp);
1607 struct vnode **vpp;
1610 * Do nothing (and _don't_ bypass).
1611 * Wait to vrele lowervp until reclaim,
1612 * so that until then our union_node is in the
1613 * cache and reusable.
1615 * NEEDSWORK: Someday, consider inactive'ing
1616 * the lowervp and then trying to reactivate it
1617 * with capabilities (v_id)
1618 * like they do in the name lookup cache code.
1619 * That's too much work for now.
1622 if (un->un_dircache != 0) {
1623 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++)
1624 vrele(*vpp);
1625 kfree (un->un_dircache, M_TEMP);
1626 un->un_dircache = 0;
1629 #if 0
1630 if ((un->un_flags & UN_ULOCK) && un->un_uppervp) {
1631 un->un_flags &= ~UN_ULOCK;
1632 vn_unlock(un->un_uppervp);
1634 #endif
1636 if ((un->un_flags & UN_CACHED) == 0)
1637 vgone_vxlocked(vp);
1639 return (0);
1643 * union_reclaim(struct vnode *a_vp)
1645 static int
1646 union_reclaim(struct vop_reclaim_args *ap)
1648 union_freevp(ap->a_vp);
1650 return (0);
1654 * union_bmap:
1656 * There isn't much we can do. We cannot push through to the real vnode
1657 * to get to the underlying device because this will bypass data
1658 * cached by the real vnode.
1660 * For some reason we cannot return the 'real' vnode either, it seems
1661 * to blow up memory maps.
1663 * union_bmap(struct vnode *a_vp, off_t a_loffset,
1664 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1666 static int
1667 union_bmap(struct vop_bmap_args *ap)
1669 return(EOPNOTSUPP);
1673 * union_print(struct vnode *a_vp)
1675 static int
1676 union_print(struct vop_print_args *ap)
1678 struct vnode *vp = ap->a_vp;
1680 kprintf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n",
1681 vp, UPPERVP(vp), LOWERVP(vp));
1682 if (UPPERVP(vp) != NULLVP)
1683 vprint("union: upper", UPPERVP(vp));
1684 if (LOWERVP(vp) != NULLVP)
1685 vprint("union: lower", LOWERVP(vp));
1687 return (0);
1691 * union_pathconf(struct vnode *a_vp, int a_name, int *a_retval)
1693 static int
1694 union_pathconf(struct vop_pathconf_args *ap)
1696 int error;
1697 struct thread *td = curthread; /* XXX */
1698 struct union_node *un = VTOUNION(ap->a_vp);
1699 struct vnode *vp;
1701 vp = union_lock_other(un, td);
1702 KASSERT(vp != NULL, ("union_pathconf: backing vnode missing!"));
1704 ap->a_head.a_ops = *vp->v_ops;
1705 ap->a_vp = vp;
1706 error = vop_pathconf_ap(ap);
1707 union_unlock_other(vp, td);
1709 return (error);
1713 * union_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1714 * struct flock *a_fl, int a_flags)
1716 static int
1717 union_advlock(struct vop_advlock_args *ap)
1719 struct vnode *ovp = OTHERVP(ap->a_vp);
1721 ap->a_head.a_ops = *ovp->v_ops;
1722 ap->a_vp = ovp;
1723 return (vop_advlock_ap(ap));
1728 * XXX - vop_strategy must be hand coded because it has no
1729 * YYY - and it is not coherent with anything
1731 * vnode in its arguments.
1732 * This goes away with a merged VM/buffer cache.
1734 * union_strategy(struct vnode *a_vp, struct bio *a_bio)
1736 static int
1737 union_strategy(struct vop_strategy_args *ap)
1739 struct bio *bio = ap->a_bio;
1740 struct buf *bp = bio->bio_buf;
1741 struct vnode *othervp = OTHERVP(ap->a_vp);
1743 #ifdef DIAGNOSTIC
1744 if (othervp == NULLVP)
1745 panic("union_strategy: nil vp");
1746 if (bp->b_cmd != BUF_CMD_READ && (othervp == LOWERVP(ap->a_vp)))
1747 panic("union_strategy: writing to lowervp");
1748 #endif
1749 return (vn_strategy(othervp, bio));
1753 * Global vfs data structures
1755 struct vop_ops union_vnode_vops = {
1756 .vop_default = vop_defaultop,
1757 .vop_access = union_access,
1758 .vop_advlock = union_advlock,
1759 .vop_bmap = union_bmap,
1760 .vop_close = union_close,
1761 .vop_old_create = union_create,
1762 .vop_fsync = union_fsync,
1763 .vop_getpages = union_getpages,
1764 .vop_putpages = union_putpages,
1765 .vop_getattr = union_getattr,
1766 .vop_inactive = union_inactive,
1767 .vop_ioctl = union_ioctl,
1768 .vop_old_link = union_link,
1769 .vop_old_lookup = union_lookup,
1770 .vop_old_mkdir = union_mkdir,
1771 .vop_old_mknod = union_mknod,
1772 .vop_mmap = union_mmap,
1773 .vop_open = union_open,
1774 .vop_pathconf = union_pathconf,
1775 .vop_poll = union_poll,
1776 .vop_print = union_print,
1777 .vop_read = union_read,
1778 .vop_readdir = union_readdir,
1779 .vop_readlink = union_readlink,
1780 .vop_reclaim = union_reclaim,
1781 .vop_old_remove = union_remove,
1782 .vop_old_rename = union_rename,
1783 .vop_old_rmdir = union_rmdir,
1784 .vop_setattr = union_setattr,
1785 .vop_strategy = union_strategy,
1786 .vop_old_symlink = union_symlink,
1787 .vop_old_whiteout = union_whiteout,
1788 .vop_write = union_write