4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2017 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
27 #include <sys/types.h>
28 #include <sys/atomic.h>
30 #include <sys/mutex.h>
31 #include <sys/errno.h>
32 #include <sys/param.h>
33 #include <sys/sysmacros.h>
34 #include <sys/systm.h>
35 #include <sys/cmn_err.h>
36 #include <sys/debug.h>
40 #include <sys/vfs_dispatch.h>
41 #include <sys/vnode.h>
42 #include <sys/vnode_dispatch.h>
44 #define NNODES_DEFAULT 8 /* Default number of nodes in a fem_list */
46 * fl_ntob(n) - Fem_list: number of nodes to bytes
47 * Given the number of nodes in a fem_list return the size, in bytes,
48 * of the fem_list structure.
50 #define fl_ntob(n) (sizeof (struct fem_list) + \
51 (n) * sizeof (struct fem_node))
54 FEMTYPE_NULL
, /* Uninitialized */
60 #define FEM_GUARD(_t) femtype[(_t)].guard
62 static struct fem_type_info
{
64 struct fem_node guard
;
66 } femtype
[FEMTYPE_NTYPES
];
72 static struct fem fem_guard_ops
= {
74 .femop_open
= fem_err
,
75 .femop_close
= fem_err
,
76 .femop_read
= fem_err
,
77 .femop_write
= fem_err
,
78 .femop_ioctl
= fem_err
,
79 .femop_setfl
= fem_err
,
80 .femop_getattr
= fem_err
,
81 .femop_setattr
= fem_err
,
82 .femop_access
= fem_err
,
83 .femop_lookup
= fem_err
,
84 .femop_create
= fem_err
,
85 .femop_remove
= fem_err
,
86 .femop_link
= fem_err
,
87 .femop_rename
= fem_err
,
88 .femop_mkdir
= fem_err
,
89 .femop_rmdir
= fem_err
,
90 .femop_readdir
= fem_err
,
91 .femop_symlink
= fem_err
,
92 .femop_readlink
= fem_err
,
93 .femop_fsync
= fem_err
,
94 .femop_inactive
= (void (*)()) fem_err
,
96 .femop_rwlock
= fem_err
,
97 .femop_rwunlock
= (void (*)()) fem_err
,
98 .femop_seek
= fem_err
,
100 .femop_frlock
= fem_err
,
101 .femop_space
= fem_err
,
102 .femop_realvp
= fem_err
,
103 .femop_getpage
= fem_err
,
104 .femop_putpage
= fem_err
,
105 .femop_map
= (void *) fem_err
,
106 .femop_addmap
= (void *) fem_err
,
107 .femop_delmap
= fem_err
,
108 .femop_poll
= (void *) fem_err
,
109 .femop_dump
= fem_err
,
110 .femop_pathconf
= fem_err
,
111 .femop_pageio
= fem_err
,
112 .femop_dumpctl
= fem_err
,
113 .femop_dispose
= (void *) fem_err
,
114 .femop_setsecattr
= fem_err
,
115 .femop_getsecattr
= fem_err
,
116 .femop_shrlock
= fem_err
,
117 .femop_vnevent
= fem_err
,
118 .femop_reqzcbuf
= fem_err
,
119 .femop_retzcbuf
= fem_err
,
122 static struct fsem fsem_guard_ops
= {
123 .name
= "fsem-guard",
124 .fsemop_mount
= fsem_err
,
125 .fsemop_unmount
= fsem_err
,
126 .fsemop_root
= fsem_err
,
127 .fsemop_statvfs
= fsem_err
,
128 .fsemop_sync
= (void *) fsem_err
,
129 .fsemop_vget
= fsem_err
,
130 .fsemop_mountroot
= fsem_err
,
131 .fsemop_freevfs
= (void *) fsem_err
,
132 .fsemop_vnstate
= fsem_err
,
137 * vsop_find, vfsop_find -
139 * These macros descend the stack until they find either a basic
140 * vnode/vfs operation [ indicated by a null fn_available ] or a
141 * stacked item where this method is non-null [_vsop].
144 #define vsop_find(ap, _vsop) \
145 _op_find((ap), offsetof(struct fem, _vsop))
147 #define vfsop_find(ap, _fsop) \
148 _op_find((ap), offsetof(struct fsem, _fsop))
151 _op_find(femarg_t
*ap
, size_t offs1
)
154 struct fem_node
*fnod
= ap
->fa_fnode
;
157 if (fnod
->fn_available
== NULL
)
160 fp
= *(void **)((char *)fnod
->fn_op
.anon
+ offs1
);
169 * fem_get, fem_release - manage reference counts on the stack.
171 * The list of monitors can be updated while operations are in
172 * progress on the object.
174 * The reference count facilitates this by counting the number of
175 * current accessors, and deconstructing the list when it is exhausted.
177 * fem_lock() is required to:
179 * update what femh_list points to
181 * increase femh_list->feml_refc.
183 * the feml_refc can decrement without holding the lock;
184 * when feml_refc becomes zero, the list is destroyed.
188 static struct fem_list
*
189 fem_lock(struct fem_head
*fp
)
191 struct fem_list
*sp
= NULL
;
194 mutex_enter(&fp
->femh_lock
);
200 fem_unlock(struct fem_head
*fp
)
203 mutex_exit(&fp
->femh_lock
);
207 * Addref can only be called while its head->lock is held.
211 fem_addref(struct fem_list
*sp
)
213 atomic_inc_32(&sp
->feml_refc
);
217 fem_delref(struct fem_list
*sp
)
219 return (atomic_dec_32_nv(&sp
->feml_refc
));
222 static struct fem_list
*
223 fem_get(struct fem_head
*fp
)
225 struct fem_list
*sp
= NULL
;
228 if ((sp
= fem_lock(fp
)) != NULL
) {
237 fem_release(struct fem_list
*sp
)
244 ASSERT(sp
->feml_refc
!= 0);
245 if (fem_delref(sp
) == 0) {
247 * Before freeing the list, we need to release the
248 * caller-provided data.
250 for (i
= sp
->feml_tos
; i
> 0; i
--) {
251 struct fem_node
*fnp
= &sp
->feml_nodes
[i
];
254 (*(fnp
->fn_av_rele
))(fnp
->fn_available
);
256 kmem_free(sp
, fl_ntob(sp
->feml_ssize
));
262 * These are the 'head' operations which perform the interposition.
264 * This set must be 1:1, onto with the (vnodeops, vfsos).
266 * If there is a desire to globally disable interposition for a particular
267 * method, the corresponding 'head' routine should unearth the base method
268 * and invoke it directly rather than bypassing the function.
270 * All the functions are virtually the same, save for names, types & args.
271 * 1. get a reference to the monitor stack for this object.
272 * 2. store the top of stack into the femarg structure.
273 * 3. store the basic object (vnode *, vnode **, vfs *) in the femarg struc.
274 * 4. invoke the "top" method for this object.
275 * 5. release the reference to the monitor stack.
280 vhead_open(vnode_t
**vpp
, int mode
, cred_t
*cr
, caller_context_t
*ct
)
282 int (*func
)(femarg_t
*, int, cred_t
*, caller_context_t
*);
283 struct fem_list
*femsp
;
287 if ((femsp
= fem_get((*vpp
)->v_femhead
)) == NULL
) {
290 farg
.fa_vnode
.vpp
= vpp
;
291 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
292 func
= vsop_find(&farg
, femop_open
);
296 ret
= func(&farg
, mode
, cr
, ct
);
298 ret
= fop_open_dispatch(vpp
, mode
, cr
, ct
, false);
306 vhead_close(vnode_t
*vp
, int flag
, int count
, offset_t offset
, cred_t
*cr
,
307 caller_context_t
*ct
)
309 int (*func
)(femarg_t
*, int, int, offset_t
, cred_t
*,
311 struct fem_list
*femsp
;
315 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
318 farg
.fa_vnode
.vp
= vp
;
319 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
320 func
= vsop_find(&farg
, femop_close
);
324 ret
= func(&farg
, flag
, count
, offset
, cr
, ct
);
326 ret
= fop_close_dispatch(vp
, flag
, count
, offset
, cr
, ct
,
335 vhead_read(vnode_t
*vp
, uio_t
*uiop
, int ioflag
, cred_t
*cr
,
336 caller_context_t
*ct
)
338 int (*func
)(femarg_t
*, uio_t
*, int, cred_t
*, caller_context_t
*);
339 struct fem_list
*femsp
;
343 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
346 farg
.fa_vnode
.vp
= vp
;
347 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
348 func
= vsop_find(&farg
, femop_read
);
352 ret
= func(&farg
, uiop
, ioflag
, cr
, ct
);
354 ret
= fop_read_dispatch(vp
, uiop
, ioflag
, cr
, ct
, false);
362 vhead_write(vnode_t
*vp
, uio_t
*uiop
, int ioflag
, cred_t
*cr
,
363 caller_context_t
*ct
)
365 int (*func
)(femarg_t
*, uio_t
*, int, cred_t
*, caller_context_t
*);
366 struct fem_list
*femsp
;
370 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
373 farg
.fa_vnode
.vp
= vp
;
374 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
375 func
= vsop_find(&farg
, femop_write
);
379 ret
= func(&farg
, uiop
, ioflag
, cr
, ct
);
381 ret
= fop_write_dispatch(vp
, uiop
, ioflag
, cr
, ct
, false);
389 vhead_ioctl(vnode_t
*vp
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
,
390 int *rvalp
, caller_context_t
*ct
)
392 int (*func
)(femarg_t
*, int, intptr_t, int, cred_t
*, int *,
394 struct fem_list
*femsp
;
398 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
401 farg
.fa_vnode
.vp
= vp
;
402 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
403 func
= vsop_find(&farg
, femop_ioctl
);
407 ret
= func(&farg
, cmd
, arg
, flag
, cr
, rvalp
, ct
);
409 ret
= fop_ioctl_dispatch(vp
, cmd
, arg
, flag
, cr
, rvalp
, ct
,
418 vhead_setfl(vnode_t
*vp
, int oflags
, int nflags
, cred_t
*cr
,
419 caller_context_t
*ct
)
421 int (*func
)(femarg_t
*, int, int, cred_t
*, caller_context_t
*);
422 struct fem_list
*femsp
;
426 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
429 farg
.fa_vnode
.vp
= vp
;
430 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
431 func
= vsop_find(&farg
, femop_setfl
);
435 ret
= func(&farg
, oflags
, nflags
, cr
, ct
);
437 ret
= fop_setfl_dispatch(vp
, oflags
, nflags
, cr
, ct
, false);
445 vhead_getattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
446 caller_context_t
*ct
)
448 int (*func
)(femarg_t
*, vattr_t
*, int, cred_t
*, caller_context_t
*);
449 struct fem_list
*femsp
;
453 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
456 farg
.fa_vnode
.vp
= vp
;
457 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
458 func
= vsop_find(&farg
, femop_getattr
);
462 ret
= func(&farg
, vap
, flags
, cr
, ct
);
464 ret
= fop_getattr_dispatch(vp
, vap
, flags
, cr
, ct
, false);
472 vhead_setattr(vnode_t
*vp
, vattr_t
*vap
, int flags
, cred_t
*cr
,
473 caller_context_t
*ct
)
475 int (*func
)(femarg_t
*, vattr_t
*, int, cred_t
*, caller_context_t
*);
476 struct fem_list
*femsp
;
480 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
483 farg
.fa_vnode
.vp
= vp
;
484 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
485 func
= vsop_find(&farg
, femop_setattr
);
489 ret
= func(&farg
, vap
, flags
, cr
, ct
);
491 ret
= fop_setattr_dispatch(vp
, vap
, flags
, cr
, ct
, false);
499 vhead_access(vnode_t
*vp
, int mode
, int flags
, cred_t
*cr
,
500 caller_context_t
*ct
)
502 int (*func
)(femarg_t
*, int, int, cred_t
*, caller_context_t
*);
503 struct fem_list
*femsp
;
507 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
510 farg
.fa_vnode
.vp
= vp
;
511 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
512 func
= vsop_find(&farg
, femop_access
);
516 ret
= func(&farg
, mode
, flags
, cr
, ct
);
518 ret
= fop_access_dispatch(vp
, mode
, flags
, cr
, ct
, false);
526 vhead_lookup(vnode_t
*dvp
, char *nm
, vnode_t
**vpp
, pathname_t
*pnp
,
527 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
528 int *direntflags
, pathname_t
*realpnp
)
530 int (*func
)(femarg_t
*, char *, vnode_t
**, pathname_t
*, int,
531 vnode_t
*, cred_t
*, caller_context_t
*, int *,
533 struct fem_list
*femsp
;
537 if ((femsp
= fem_get(dvp
->v_femhead
)) == NULL
) {
540 farg
.fa_vnode
.vp
= dvp
;
541 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
542 func
= vsop_find(&farg
, femop_lookup
);
546 ret
= func(&farg
, nm
, vpp
, pnp
, flags
, rdir
, cr
, ct
,
547 direntflags
, realpnp
);
549 ret
= fop_lookup_dispatch(dvp
, nm
, vpp
, pnp
, flags
, rdir
,
550 cr
, ct
, direntflags
, realpnp
, false);
558 vhead_create(vnode_t
*dvp
, char *name
, vattr_t
*vap
, vcexcl_t excl
,
559 int mode
, vnode_t
**vpp
, cred_t
*cr
, int flag
, caller_context_t
*ct
,
562 int (*func
)(femarg_t
*, char *, vattr_t
*, vcexcl_t
, int, vnode_t
**,
563 cred_t
*, int, caller_context_t
*, vsecattr_t
*);
564 struct fem_list
*femsp
;
568 if ((femsp
= fem_get(dvp
->v_femhead
)) == NULL
) {
571 farg
.fa_vnode
.vp
= dvp
;
572 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
573 func
= vsop_find(&farg
, femop_create
);
577 ret
= func(&farg
, name
, vap
, excl
, mode
, vpp
, cr
, flag
, ct
,
580 ret
= fop_create_dispatch(dvp
, name
, vap
, excl
, mode
, vpp
,
581 cr
, flag
, ct
, vsecp
, false);
589 vhead_remove(vnode_t
*dvp
, char *nm
, cred_t
*cr
, caller_context_t
*ct
,
592 int (*func
)(femarg_t
*, char *, cred_t
*, caller_context_t
*, int);
593 struct fem_list
*femsp
;
597 if ((femsp
= fem_get(dvp
->v_femhead
)) == NULL
) {
600 farg
.fa_vnode
.vp
= dvp
;
601 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
602 func
= vsop_find(&farg
, femop_remove
);
606 ret
= func(&farg
, nm
, cr
, ct
, flags
);
608 ret
= fop_remove_dispatch(dvp
, nm
, cr
, ct
, flags
, false);
616 vhead_link(vnode_t
*tdvp
, vnode_t
*svp
, char *tnm
, cred_t
*cr
,
617 caller_context_t
*ct
, int flags
)
619 int (*func
)(femarg_t
*, vnode_t
*, char *, cred_t
*,
620 caller_context_t
*, int);
621 struct fem_list
*femsp
;
625 if ((femsp
= fem_get(tdvp
->v_femhead
)) == NULL
) {
628 farg
.fa_vnode
.vp
= tdvp
;
629 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
630 func
= vsop_find(&farg
, femop_link
);
634 ret
= func(&farg
, svp
, tnm
, cr
, ct
, flags
);
636 ret
= fop_link_dispatch(tdvp
, svp
, tnm
, cr
, ct
, flags
, false);
644 vhead_rename(vnode_t
*sdvp
, char *snm
, vnode_t
*tdvp
, char *tnm
,
645 cred_t
*cr
, caller_context_t
*ct
, int flags
)
647 int (*func
)(femarg_t
*, char *, vnode_t
*, char *, cred_t
*,
648 caller_context_t
*,int);
649 struct fem_list
*femsp
;
653 if ((femsp
= fem_get(sdvp
->v_femhead
)) == NULL
) {
656 farg
.fa_vnode
.vp
= sdvp
;
657 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
658 func
= vsop_find(&farg
, femop_rename
);
662 ret
= func(&farg
, snm
, tdvp
, tnm
, cr
, ct
, flags
);
664 ret
= fop_rename_dispatch(sdvp
, snm
, tdvp
, tnm
, cr
, ct
,
673 vhead_mkdir(vnode_t
*dvp
, char *dirname
, vattr_t
*vap
, vnode_t
**vpp
,
674 cred_t
*cr
, caller_context_t
*ct
, int flags
, vsecattr_t
*vsecp
)
676 int (*func
)(femarg_t
*, char *, vattr_t
*, vnode_t
**, cred_t
*,
677 caller_context_t
*, int, vsecattr_t
*);
678 struct fem_list
*femsp
;
682 if ((femsp
= fem_get(dvp
->v_femhead
)) == NULL
) {
685 farg
.fa_vnode
.vp
= dvp
;
686 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
687 func
= vsop_find(&farg
, femop_mkdir
);
691 ret
= func(&farg
, dirname
, vap
, vpp
, cr
, ct
, flags
, vsecp
);
693 ret
= fop_mkdir_dispatch(dvp
, dirname
, vap
, vpp
, cr
, ct
, flags
,
702 vhead_rmdir(vnode_t
*dvp
, char *nm
, vnode_t
*cdir
, cred_t
*cr
,
703 caller_context_t
*ct
, int flags
)
705 int (*func
)(femarg_t
*, char *, vnode_t
*, cred_t
*, caller_context_t
*,
707 struct fem_list
*femsp
;
711 if ((femsp
= fem_get(dvp
->v_femhead
)) == NULL
) {
714 farg
.fa_vnode
.vp
= dvp
;
715 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
716 func
= vsop_find(&farg
, femop_rmdir
);
720 ret
= func(&farg
, nm
, cdir
, cr
, ct
, flags
);
722 ret
= fop_rmdir_dispatch(dvp
, nm
, cdir
, cr
, ct
, flags
, false);
730 vhead_readdir(vnode_t
*vp
, uio_t
*uiop
, cred_t
*cr
, int *eofp
,
731 caller_context_t
*ct
, int flags
)
733 int (*func
)(femarg_t
*, uio_t
*, cred_t
*, int *, caller_context_t
*,
735 struct fem_list
*femsp
;
739 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
742 farg
.fa_vnode
.vp
= vp
;
743 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
744 func
= vsop_find(&farg
, femop_readdir
);
748 ret
= func(&farg
, uiop
, cr
, eofp
, ct
, flags
);
750 ret
= fop_readdir_dispatch(vp
, uiop
, cr
, eofp
, ct
, flags
,
759 vhead_symlink(vnode_t
*dvp
, char *linkname
, vattr_t
*vap
, char *target
,
760 cred_t
*cr
, caller_context_t
*ct
, int flags
)
762 int (*func
)(femarg_t
*, char *, vattr_t
*, char *, cred_t
*,
763 caller_context_t
*, int);
764 struct fem_list
*femsp
;
768 if ((femsp
= fem_get(dvp
->v_femhead
)) == NULL
) {
771 farg
.fa_vnode
.vp
= dvp
;
772 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
773 func
= vsop_find(&farg
, femop_symlink
);
777 ret
= func(&farg
, linkname
, vap
, target
, cr
, ct
, flags
);
779 ret
= fop_symlink_dispatch(dvp
, linkname
, vap
, target
, cr
, ct
,
788 vhead_readlink(vnode_t
*vp
, uio_t
*uiop
, cred_t
*cr
, caller_context_t
*ct
)
790 int (*func
)(femarg_t
*, uio_t
*, cred_t
*, caller_context_t
*);
791 struct fem_list
*femsp
;
795 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
798 farg
.fa_vnode
.vp
= vp
;
799 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
800 func
= vsop_find(&farg
, femop_readlink
);
804 ret
= func(&farg
, uiop
, cr
, ct
);
806 ret
= fop_readlink_dispatch(vp
, uiop
, cr
, ct
, false);
814 vhead_fsync(vnode_t
*vp
, int syncflag
, cred_t
*cr
, caller_context_t
*ct
)
816 int (*func
)(femarg_t
*, int, cred_t
*, caller_context_t
*);
817 struct fem_list
*femsp
;
821 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
824 farg
.fa_vnode
.vp
= vp
;
825 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
826 func
= vsop_find(&farg
, femop_fsync
);
830 ret
= func(&farg
, syncflag
, cr
, ct
);
832 ret
= fop_fsync_dispatch(vp
, syncflag
, cr
, ct
, false);
840 vhead_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
842 void (*func
)(femarg_t
*, cred_t
*, caller_context_t
*);
843 struct fem_list
*femsp
;
846 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
849 farg
.fa_vnode
.vp
= vp
;
850 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
851 func
= vsop_find(&farg
, femop_inactive
);
857 fop_inactive_dispatch(vp
, cr
, ct
, false);
863 vhead_fid(vnode_t
*vp
, fid_t
*fidp
, caller_context_t
*ct
)
865 int (*func
)(femarg_t
*, fid_t
*, caller_context_t
*);
866 struct fem_list
*femsp
;
870 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
873 farg
.fa_vnode
.vp
= vp
;
874 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
875 func
= vsop_find(&farg
, femop_fid
);
879 ret
= func(&farg
, fidp
, ct
);
881 ret
= fop_fid_dispatch(vp
, fidp
, ct
, false);
889 vhead_rwlock(vnode_t
*vp
, int write_lock
, caller_context_t
*ct
)
891 int (*func
)(femarg_t
*, int, caller_context_t
*);
892 struct fem_list
*femsp
;
896 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
899 farg
.fa_vnode
.vp
= vp
;
900 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
901 func
= vsop_find(&farg
, femop_rwlock
);
905 ret
= func(&farg
, write_lock
, ct
);
907 ret
= fop_rwlock_dispatch(vp
, write_lock
, ct
, false);
915 vhead_rwunlock(vnode_t
*vp
, int write_lock
, caller_context_t
*ct
)
917 void (*func
)(femarg_t
*, int, caller_context_t
*);
918 struct fem_list
*femsp
;
921 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
924 farg
.fa_vnode
.vp
= vp
;
925 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
926 func
= vsop_find(&farg
, femop_rwunlock
);
930 func(&farg
, write_lock
, ct
);
932 fop_rwunlock_dispatch(vp
, write_lock
, ct
, false);
938 vhead_seek(vnode_t
*vp
, offset_t ooff
, offset_t
*noffp
, caller_context_t
*ct
)
940 int (*func
)(femarg_t
*, offset_t
, offset_t
*, caller_context_t
*);
941 struct fem_list
*femsp
;
945 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
948 farg
.fa_vnode
.vp
= vp
;
949 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
950 func
= vsop_find(&farg
, femop_seek
);
954 ret
= func(&farg
, ooff
, noffp
, ct
);
956 ret
= fop_seek_dispatch(vp
, ooff
, noffp
, ct
, false);
964 vhead_cmp(vnode_t
*vp1
, vnode_t
*vp2
, caller_context_t
*ct
)
966 int (*func
)(femarg_t
*, vnode_t
*, caller_context_t
*);
967 struct fem_list
*femsp
;
971 if ((femsp
= fem_get(vp1
->v_femhead
)) == NULL
) {
974 farg
.fa_vnode
.vp
= vp1
;
975 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
976 func
= vsop_find(&farg
, femop_cmp
);
980 ret
= func(&farg
, vp2
, ct
);
982 ret
= fop_cmp_dispatch(vp1
, vp2
, ct
, false);
990 vhead_frlock(vnode_t
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
991 offset_t offset
, struct flk_callback
*flk_cbp
, cred_t
*cr
,
992 caller_context_t
*ct
)
994 int (*func
)(femarg_t
*, int, struct flock64
*, int, offset_t
,
995 struct flk_callback
*, cred_t
*, caller_context_t
*);
996 struct fem_list
*femsp
;
1000 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1003 farg
.fa_vnode
.vp
= vp
;
1004 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1005 func
= vsop_find(&farg
, femop_frlock
);
1009 ret
= func(&farg
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
);
1011 ret
= fop_frlock_dispatch(vp
, cmd
, bfp
, flag
, offset
,
1012 flk_cbp
, cr
, ct
, false);
1020 vhead_space(vnode_t
*vp
, int cmd
, struct flock64
*bfp
, int flag
,
1021 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
1023 int (*func
)(femarg_t
*, int, struct flock64
*, int, offset_t
,
1024 cred_t
*, caller_context_t
*);
1025 struct fem_list
*femsp
;
1029 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1032 farg
.fa_vnode
.vp
= vp
;
1033 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1034 func
= vsop_find(&farg
, femop_space
);
1038 ret
= func(&farg
, cmd
, bfp
, flag
, offset
, cr
, ct
);
1040 ret
= fop_space_dispatch(vp
, cmd
, bfp
, flag
, offset
, cr
, ct
,
1049 vhead_realvp(vnode_t
*vp
, vnode_t
**vpp
, caller_context_t
*ct
)
1051 int (*func
)(femarg_t
*, vnode_t
**, caller_context_t
*);
1052 struct fem_list
*femsp
;
1056 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1059 farg
.fa_vnode
.vp
= vp
;
1060 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1061 func
= vsop_find(&farg
, femop_realvp
);
1065 ret
= func(&farg
, vpp
, ct
);
1067 ret
= fop_realvp_dispatch(vp
, vpp
, ct
, false);
1075 vhead_getpage(vnode_t
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
1076 struct page
**plarr
, size_t plsz
, struct seg
*seg
, caddr_t addr
,
1077 enum seg_rw rw
, cred_t
*cr
, caller_context_t
*ct
)
1079 int (*func
)(femarg_t
*, offset_t
, size_t, uint_t
*, struct page
**,
1080 size_t, struct seg
*, caddr_t
, enum seg_rw
, cred_t
*,
1081 caller_context_t
*);
1082 struct fem_list
*femsp
;
1086 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1089 farg
.fa_vnode
.vp
= vp
;
1090 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1091 func
= vsop_find(&farg
, femop_getpage
);
1095 ret
= func(&farg
, off
, len
, protp
, plarr
, plsz
, seg
, addr
, rw
,
1098 ret
= fop_getpage_dispatch(vp
, off
, len
, protp
, plarr
, plsz
,
1099 seg
, addr
, rw
, cr
, ct
, false);
1107 vhead_putpage(vnode_t
*vp
, offset_t off
, size_t len
, int flags
, cred_t
*cr
,
1108 caller_context_t
*ct
)
1110 int (*func
)(femarg_t
*, offset_t
, size_t, int, cred_t
*,
1111 caller_context_t
*);
1112 struct fem_list
*femsp
;
1116 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1119 farg
.fa_vnode
.vp
= vp
;
1120 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1121 func
= vsop_find(&farg
, femop_putpage
);
1125 ret
= func(&farg
, off
, len
, flags
, cr
, ct
);
1127 ret
= fop_putpage_dispatch(vp
, off
, len
, flags
, cr
, ct
, false);
1135 vhead_map(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t
*addrp
,
1136 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
1137 cred_t
*cr
, caller_context_t
*ct
)
1139 int (*func
)(femarg_t
*, offset_t
, struct as
*, caddr_t
*, size_t,
1140 uchar_t
, uchar_t
, uint_t
, cred_t
*, caller_context_t
*);
1141 struct fem_list
*femsp
;
1145 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1148 farg
.fa_vnode
.vp
= vp
;
1149 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1150 func
= vsop_find(&farg
, femop_map
);
1154 ret
= func(&farg
, off
, as
, addrp
, len
, prot
, maxprot
, flags
,
1157 ret
= fop_map_dispatch(vp
, off
, as
, addrp
, len
, prot
,
1158 maxprot
, flags
, cr
, ct
, false);
1166 vhead_addmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
1167 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
1168 cred_t
*cr
, caller_context_t
*ct
)
1170 int (*func
)(femarg_t
*, offset_t
, struct as
*, caddr_t
, size_t, uchar_t
,
1171 uchar_t
, uint_t
, cred_t
*, caller_context_t
*);
1172 struct fem_list
*femsp
;
1176 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1179 farg
.fa_vnode
.vp
= vp
;
1180 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1181 func
= vsop_find(&farg
, femop_addmap
);
1185 ret
= func(&farg
, off
, as
, addr
, len
, prot
, maxprot
, flags
,
1188 ret
= fop_addmap_dispatch(vp
, off
, as
, addr
, len
, prot
,
1189 maxprot
, flags
, cr
, ct
, false);
1197 vhead_delmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
1198 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
, cred_t
*cr
,
1199 caller_context_t
*ct
)
1201 int (*func
)(femarg_t
*, offset_t
, struct as
*, caddr_t
, size_t, uint_t
,
1202 uint_t
, uint_t
, cred_t
*, caller_context_t
*);
1203 struct fem_list
*femsp
;
1207 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1210 farg
.fa_vnode
.vp
= vp
;
1211 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1212 func
= vsop_find(&farg
, femop_delmap
);
1216 ret
= func(&farg
, off
, as
, addr
, len
, prot
, maxprot
, flags
,
1219 ret
= fop_delmap_dispatch(vp
, off
, as
, addr
, len
, prot
,
1220 maxprot
, flags
, cr
, ct
, false);
1228 vhead_poll(vnode_t
*vp
, short events
, int anyyet
, short *reventsp
,
1229 struct pollhead
**phpp
, caller_context_t
*ct
)
1231 int (*func
)(femarg_t
*, short, int, short *, struct pollhead
**,
1232 caller_context_t
*);
1233 struct fem_list
*femsp
;
1237 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1240 farg
.fa_vnode
.vp
= vp
;
1241 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1242 func
= vsop_find(&farg
, femop_poll
);
1246 ret
= func(&farg
, events
, anyyet
, reventsp
, phpp
, ct
);
1248 ret
= fop_poll_dispatch(vp
, events
, anyyet
, reventsp
, phpp
,
1257 vhead_dump(vnode_t
*vp
, caddr_t addr
, offset_t lbdn
, offset_t dblks
,
1258 caller_context_t
*ct
)
1260 int (*func
)(femarg_t
*, caddr_t
, offset_t
, offset_t
,
1261 caller_context_t
*);
1262 struct fem_list
*femsp
;
1266 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1269 farg
.fa_vnode
.vp
= vp
;
1270 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1271 func
= vsop_find(&farg
, femop_dump
);
1275 ret
= func(&farg
, addr
, lbdn
, dblks
, ct
);
1277 ret
= fop_dump_dispatch(vp
, addr
, lbdn
, dblks
, ct
, false);
1285 vhead_pathconf(vnode_t
*vp
, int cmd
, ulong_t
*valp
, cred_t
*cr
,
1286 caller_context_t
*ct
)
1288 int (*func
)(femarg_t
*, int, ulong_t
*, cred_t
*, caller_context_t
*);
1289 struct fem_list
*femsp
;
1293 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1296 farg
.fa_vnode
.vp
= vp
;
1297 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1298 func
= vsop_find(&farg
, femop_pathconf
);
1302 ret
= func(&farg
, cmd
, valp
, cr
, ct
);
1304 ret
= fop_pathconf_dispatch(vp
, cmd
, valp
, cr
, ct
, false);
1312 vhead_pageio(vnode_t
*vp
, struct page
*pp
, uoff_t io_off
,
1313 size_t io_len
, int flags
, cred_t
*cr
, caller_context_t
*ct
)
1315 int (*func
)(femarg_t
*, struct page
*, uoff_t
, size_t, int, cred_t
*,
1316 caller_context_t
*);
1317 struct fem_list
*femsp
;
1321 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1324 farg
.fa_vnode
.vp
= vp
;
1325 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1326 func
= vsop_find(&farg
, femop_pageio
);
1330 ret
= func(&farg
, pp
, io_off
, io_len
, flags
, cr
, ct
);
1332 ret
= fop_pageio_dispatch(vp
, pp
, io_off
, io_len
, flags
, cr
,
1341 vhead_dumpctl(vnode_t
*vp
, int action
, offset_t
*blkp
, caller_context_t
*ct
)
1343 int (*func
)(femarg_t
*, int, offset_t
*, caller_context_t
*);
1344 struct fem_list
*femsp
;
1348 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1351 farg
.fa_vnode
.vp
= vp
;
1352 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1353 func
= vsop_find(&farg
, femop_dumpctl
);
1357 ret
= func(&farg
, action
, blkp
, ct
);
1359 ret
= fop_dumpctl_dispatch(vp
, action
, blkp
, ct
, false);
1367 vhead_dispose(vnode_t
*vp
, struct page
*pp
, int flag
, int dn
, cred_t
*cr
,
1368 caller_context_t
*ct
)
1370 void (*func
)(femarg_t
*, struct page
*, int, int, cred_t
*,
1371 caller_context_t
*);
1372 struct fem_list
*femsp
;
1375 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1378 farg
.fa_vnode
.vp
= vp
;
1379 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1380 func
= vsop_find(&farg
, femop_dispose
);
1384 func(&farg
, pp
, flag
, dn
, cr
, ct
);
1386 fop_dispose_dispatch(vp
, pp
, flag
, dn
, cr
, ct
, false);
1392 vhead_setsecattr(vnode_t
*vp
, vsecattr_t
*vsap
, int flag
, cred_t
*cr
,
1393 caller_context_t
*ct
)
1395 int (*func
)(femarg_t
*, vsecattr_t
*, int, cred_t
*,
1396 caller_context_t
*);
1397 struct fem_list
*femsp
;
1401 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1404 farg
.fa_vnode
.vp
= vp
;
1405 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1406 func
= vsop_find(&farg
, femop_setsecattr
);
1410 ret
= func(&farg
, vsap
, flag
, cr
, ct
);
1412 ret
= fop_setsecattr_dispatch(vp
, vsap
, flag
, cr
, ct
, false);
1420 vhead_getsecattr(vnode_t
*vp
, vsecattr_t
*vsap
, int flag
, cred_t
*cr
,
1421 caller_context_t
*ct
)
1423 int (*func
)(femarg_t
*, vsecattr_t
*, int, cred_t
*,
1424 caller_context_t
*);
1425 struct fem_list
*femsp
;
1429 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1432 farg
.fa_vnode
.vp
= vp
;
1433 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1434 func
= vsop_find(&farg
, femop_getsecattr
);
1438 ret
= func(&farg
, vsap
, flag
, cr
, ct
);
1440 ret
= fop_getsecattr_dispatch(vp
, vsap
, flag
, cr
, ct
, false);
1448 vhead_shrlock(vnode_t
*vp
, int cmd
, struct shrlock
*shr
, int flag
,
1449 cred_t
*cr
, caller_context_t
*ct
)
1451 int (*func
)(femarg_t
*, int, struct shrlock
*, int, cred_t
*,
1452 caller_context_t
*);
1453 struct fem_list
*femsp
;
1457 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1460 farg
.fa_vnode
.vp
= vp
;
1461 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1462 func
= vsop_find(&farg
, femop_shrlock
);
1466 ret
= func(&farg
, cmd
, shr
, flag
, cr
, ct
);
1468 ret
= fop_shrlock_dispatch(vp
, cmd
, shr
, flag
, cr
, ct
, false);
1476 vhead_vnevent(vnode_t
*vp
, vnevent_t vnevent
, vnode_t
*dvp
, char *cname
,
1477 caller_context_t
*ct
)
1479 int (*func
)(femarg_t
*, vnevent_t
, vnode_t
*, char *,
1480 caller_context_t
*);
1481 struct fem_list
*femsp
;
1485 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1488 farg
.fa_vnode
.vp
= vp
;
1489 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1490 func
= vsop_find(&farg
, femop_vnevent
);
1494 ret
= func(&farg
, vnevent
, dvp
, cname
, ct
);
1496 ret
= fop_vnevent_dispatch(vp
, vnevent
, dvp
, cname
, ct
, false);
1504 vhead_reqzcbuf(vnode_t
*vp
, enum uio_rw ioflag
, xuio_t
*xuiop
, cred_t
*cr
,
1505 caller_context_t
*ct
)
1507 int (*func
)(femarg_t
*, enum uio_rw
, xuio_t
*, cred_t
*,
1508 caller_context_t
*);
1509 struct fem_list
*femsp
;
1513 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1516 farg
.fa_vnode
.vp
= vp
;
1517 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1518 func
= vsop_find(&farg
, femop_reqzcbuf
);
1522 ret
= func(&farg
, ioflag
, xuiop
, cr
, ct
);
1524 ret
= fop_reqzcbuf_dispatch(vp
, ioflag
, xuiop
, cr
, ct
, false);
1532 vhead_retzcbuf(vnode_t
*vp
, xuio_t
*xuiop
, cred_t
*cr
, caller_context_t
*ct
)
1534 int (*func
)(femarg_t
*, xuio_t
*, cred_t
*, caller_context_t
*);
1535 struct fem_list
*femsp
;
1539 if ((femsp
= fem_get(vp
->v_femhead
)) == NULL
) {
1542 farg
.fa_vnode
.vp
= vp
;
1543 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1544 func
= vsop_find(&farg
, femop_retzcbuf
);
1548 ret
= func(&farg
, xuiop
, cr
, ct
);
1550 ret
= fop_retzcbuf_dispatch(vp
, xuiop
, cr
, ct
, false);
1558 fshead_mount(vfs_t
*vfsp
, vnode_t
*mvp
, struct mounta
*uap
, cred_t
*cr
)
1560 int (*func
)(fsemarg_t
*, vnode_t
*, struct mounta
*, cred_t
*);
1561 struct fem_list
*femsp
;
1565 ASSERT(vfsp
->vfs_implp
);
1567 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1570 farg
.fa_vnode
.vfsp
= vfsp
;
1571 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1572 func
= vfsop_find(&farg
, fsemop_mount
);
1576 ret
= func(&farg
, mvp
, uap
, cr
);
1578 ret
= fsop_mount_dispatch(vfsp
, mvp
, uap
, cr
, false);
1586 fshead_unmount(vfs_t
*vfsp
, int flag
, cred_t
*cr
)
1588 int (*func
)(fsemarg_t
*, int, cred_t
*);
1589 struct fem_list
*femsp
;
1593 ASSERT(vfsp
->vfs_implp
);
1595 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1598 farg
.fa_vnode
.vfsp
= vfsp
;
1599 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1600 func
= vfsop_find(&farg
, fsemop_unmount
);
1604 ret
= func(&farg
, flag
, cr
);
1606 ret
= fsop_unmount_dispatch(vfsp
, flag
, cr
, false);
1614 fshead_root(vfs_t
*vfsp
, vnode_t
**vpp
)
1616 int (*func
)(fsemarg_t
*, vnode_t
**);
1617 struct fem_list
*femsp
;
1621 ASSERT(vfsp
->vfs_implp
);
1623 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1626 farg
.fa_vnode
.vfsp
= vfsp
;
1627 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1628 func
= vfsop_find(&farg
, fsemop_root
);
1632 ret
= func(&farg
, vpp
);
1634 ret
= fsop_root_dispatch(vfsp
, vpp
, false);
1642 fshead_statvfs(vfs_t
*vfsp
, statvfs64_t
*sp
)
1644 int (*func
)(fsemarg_t
*, statvfs64_t
*);
1645 struct fem_list
*femsp
;
1649 ASSERT(vfsp
->vfs_implp
);
1651 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1654 farg
.fa_vnode
.vfsp
= vfsp
;
1655 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1656 func
= vfsop_find(&farg
, fsemop_statvfs
);
1660 ret
= func(&farg
, sp
);
1662 ret
= fsop_statfs_dispatch(vfsp
, sp
, false);
1670 fshead_sync(vfs_t
*vfsp
, short flag
, cred_t
*cr
)
1672 int (*func
)(fsemarg_t
*, short, cred_t
*);
1673 struct fem_list
*femsp
;
1677 ASSERT(vfsp
->vfs_implp
);
1679 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1682 farg
.fa_vnode
.vfsp
= vfsp
;
1683 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1684 func
= vfsop_find(&farg
, fsemop_sync
);
1688 ret
= func(&farg
, flag
, cr
);
1690 ret
= fsop_sync_dispatch(vfsp
, flag
, cr
, false);
1698 fshead_vget(vfs_t
*vfsp
, vnode_t
**vpp
, fid_t
*fidp
)
1700 int (*func
)(fsemarg_t
*, vnode_t
**, fid_t
*);
1701 struct fem_list
*femsp
;
1705 ASSERT(vfsp
->vfs_implp
);
1707 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1710 farg
.fa_vnode
.vfsp
= vfsp
;
1711 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1712 func
= vfsop_find(&farg
, fsemop_vget
);
1716 ret
= func(&farg
, vpp
, fidp
);
1718 ret
= fsop_vget_dispatch(vfsp
, vpp
, fidp
, false);
1726 fshead_mountroot(vfs_t
*vfsp
, enum whymountroot reason
)
1728 int (*func
)(fsemarg_t
*, enum whymountroot
);
1729 struct fem_list
*femsp
;
1733 ASSERT(vfsp
->vfs_implp
);
1735 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1738 farg
.fa_vnode
.vfsp
= vfsp
;
1739 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1740 func
= vfsop_find(&farg
, fsemop_mountroot
);
1744 ret
= func(&farg
, reason
);
1746 ret
= fsop_mountroot_dispatch(vfsp
, reason
, false);
1754 fshead_freevfs(vfs_t
*vfsp
)
1756 void (*func
)(fsemarg_t
*);
1757 struct fem_list
*femsp
;
1760 ASSERT(vfsp
->vfs_implp
);
1762 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1765 farg
.fa_vnode
.vfsp
= vfsp
;
1766 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1767 func
= vfsop_find(&farg
, fsemop_freevfs
);
1773 fsop_freefs_dispatch(vfsp
, false);
1779 fshead_vnstate(vfs_t
*vfsp
, vnode_t
*vp
, vntrans_t nstate
)
1781 int (*func
)(fsemarg_t
*, vnode_t
*, vntrans_t
);
1782 struct fem_list
*femsp
;
1786 ASSERT(vfsp
->vfs_implp
);
1788 if ((femsp
= fem_get(vfsp
->vfs_femhead
)) == NULL
) {
1791 farg
.fa_vnode
.vfsp
= vfsp
;
1792 farg
.fa_fnode
= femsp
->feml_nodes
+ femsp
->feml_tos
;
1793 func
= vfsop_find(&farg
, fsemop_vnstate
);
1797 ret
= func(&farg
, vp
, nstate
);
1799 ret
= fsop_vnstate_dispatch(vfsp
, vp
, nstate
, false);
1808 * This set of routines transfer control to the next stacked monitor.
1810 * Each routine is identical except for naming, types and arguments.
1812 * The basic steps are:
1813 * 1. Decrease the stack pointer by one.
1814 * 2. If the current item is a base operation (vnode, vfs), goto 5.
1815 * 3. If the current item does not have a corresponding operation, goto 1
1816 * 4. Return by invoking the current item with the argument handle.
1817 * 5. Return by invoking the base operation with the base object.
1819 * for each classification, there needs to be at least one "next" operation
1820 * for each "head"operation.
1825 vnext_open(femarg_t
*vf
, int mode
, cred_t
*cr
, caller_context_t
*ct
)
1827 int (*func
)(femarg_t
*, int, cred_t
*, caller_context_t
*);
1828 struct vnode
**vnode
= vf
->fa_vnode
.vpp
;
1832 func
= vsop_find(vf
, femop_open
);
1835 return func(vf
, mode
, cr
, ct
);
1837 return fop_open_dispatch(vnode
, mode
, cr
, ct
, false);
1841 vnext_close(femarg_t
*vf
, int flag
, int count
, offset_t offset
, cred_t
*cr
,
1842 caller_context_t
*ct
)
1844 int (*func
)(femarg_t
*, int, int, offset_t
, cred_t
*,
1845 caller_context_t
*);
1846 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1850 func
= vsop_find(vf
, femop_close
);
1853 return func(vf
, flag
, count
, offset
, cr
, ct
);
1855 return fop_close_dispatch(vnode
, flag
, count
, offset
, cr
, ct
, false);
1859 vnext_read(femarg_t
*vf
, uio_t
*uiop
, int ioflag
, cred_t
*cr
,
1860 caller_context_t
*ct
)
1862 int (*func
)(femarg_t
*, uio_t
*, int, cred_t
*, caller_context_t
*);
1863 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1867 func
= vsop_find(vf
, femop_read
);
1870 return func(vf
, uiop
, ioflag
, cr
, ct
);
1872 return fop_read_dispatch(vnode
, uiop
, ioflag
, cr
, ct
, false);
1876 vnext_write(femarg_t
*vf
, uio_t
*uiop
, int ioflag
, cred_t
*cr
,
1877 caller_context_t
*ct
)
1879 int (*func
)(femarg_t
*, uio_t
*, int, cred_t
*, caller_context_t
*);
1880 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1884 func
= vsop_find(vf
, femop_write
);
1887 return func(vf
, uiop
, ioflag
, cr
, ct
);
1889 return fop_write_dispatch(vnode
, uiop
, ioflag
, cr
, ct
, false);
1893 vnext_ioctl(femarg_t
*vf
, int cmd
, intptr_t arg
, int flag
, cred_t
*cr
,
1894 int *rvalp
, caller_context_t
*ct
)
1896 int (*func
)(femarg_t
*, int, intptr_t, int, cred_t
*, int *,
1897 caller_context_t
*);
1898 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1902 func
= vsop_find(vf
, femop_ioctl
);
1905 return func(vf
, cmd
, arg
, flag
, cr
, rvalp
, ct
);
1907 return fop_ioctl_dispatch(vnode
, cmd
, arg
, flag
, cr
, rvalp
, ct
, false);
1911 vnext_setfl(femarg_t
*vf
, int oflags
, int nflags
, cred_t
*cr
,
1912 caller_context_t
*ct
)
1914 int (*func
)(femarg_t
*, int, int, cred_t
*, caller_context_t
*);
1915 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1919 func
= vsop_find(vf
, femop_setfl
);
1922 return func(vf
, oflags
, nflags
, cr
, ct
);
1924 return fop_setfl_dispatch(vnode
, oflags
, nflags
, cr
, ct
, false);
1928 vnext_getattr(femarg_t
*vf
, vattr_t
*vap
, int flags
, cred_t
*cr
,
1929 caller_context_t
*ct
)
1931 int (*func
)(femarg_t
*, vattr_t
*, int, cred_t
*, caller_context_t
*);
1932 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1936 func
= vsop_find(vf
, femop_getattr
);
1939 return func(vf
, vap
, flags
, cr
, ct
);
1941 return fop_getattr_dispatch(vnode
, vap
, flags
, cr
, ct
, false);
1945 vnext_setattr(femarg_t
*vf
, vattr_t
*vap
, int flags
, cred_t
*cr
,
1946 caller_context_t
*ct
)
1948 int (*func
)(femarg_t
*, vattr_t
*, int, cred_t
*, caller_context_t
*);
1949 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1953 func
= vsop_find(vf
, femop_setattr
);
1956 return func(vf
, vap
, flags
, cr
, ct
);
1958 return fop_setattr_dispatch(vnode
, vap
, flags
, cr
, ct
, false);
1962 vnext_access(femarg_t
*vf
, int mode
, int flags
, cred_t
*cr
,
1963 caller_context_t
*ct
)
1965 int (*func
)(femarg_t
*, int, int, cred_t
*, caller_context_t
*);
1966 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1970 func
= vsop_find(vf
, femop_access
);
1973 return func(vf
, mode
, flags
, cr
, ct
);
1975 return fop_access_dispatch(vnode
, mode
, flags
, cr
, ct
, false);
1979 vnext_lookup(femarg_t
*vf
, char *nm
, vnode_t
**vpp
, pathname_t
*pnp
,
1980 int flags
, vnode_t
*rdir
, cred_t
*cr
, caller_context_t
*ct
,
1981 int *direntflags
, pathname_t
*realpnp
)
1983 int (*func
)(femarg_t
*, char *, vnode_t
**, pathname_t
*, int,
1984 vnode_t
*, cred_t
*, caller_context_t
*, int *,
1986 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
1990 func
= vsop_find(vf
, femop_lookup
);
1993 return func(vf
, nm
, vpp
, pnp
, flags
, rdir
, cr
, ct
,
1994 direntflags
, realpnp
);
1996 return fop_lookup_dispatch(vnode
, nm
, vpp
, pnp
, flags
, rdir
, cr
, ct
,
1997 direntflags
, realpnp
, false);
2001 vnext_create(femarg_t
*vf
, char *name
, vattr_t
*vap
, vcexcl_t excl
,
2002 int mode
, vnode_t
**vpp
, cred_t
*cr
, int flag
, caller_context_t
*ct
,
2005 int (*func
)(femarg_t
*, char *, vattr_t
*, vcexcl_t
, int, vnode_t
**,
2006 cred_t
*, int, caller_context_t
*, vsecattr_t
*);
2007 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2011 func
= vsop_find(vf
, femop_create
);
2014 return func(vf
, name
, vap
, excl
, mode
, vpp
, cr
, flag
, ct
,
2017 return fop_create_dispatch(vnode
, name
, vap
, excl
, mode
, vpp
, cr
, flag
,
2022 vnext_remove(femarg_t
*vf
, char *nm
, cred_t
*cr
, caller_context_t
*ct
,
2025 int (*func
)(femarg_t
*, char *, cred_t
*, caller_context_t
*, int);
2026 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2030 func
= vsop_find(vf
, femop_remove
);
2033 return func(vf
, nm
, cr
, ct
, flags
);
2035 return fop_remove_dispatch(vnode
, nm
, cr
, ct
, flags
, false);
2039 vnext_link(femarg_t
*vf
, vnode_t
*svp
, char *tnm
, cred_t
*cr
,
2040 caller_context_t
*ct
, int flags
)
2042 int (*func
)(femarg_t
*, vnode_t
*, char *, cred_t
*,
2043 caller_context_t
*, int);
2044 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2048 func
= vsop_find(vf
, femop_link
);
2051 return func(vf
, svp
, tnm
, cr
, ct
, flags
);
2053 return fop_link_dispatch(vnode
, svp
, tnm
, cr
, ct
, flags
, false);
2057 vnext_rename(femarg_t
*vf
, char *snm
, vnode_t
*tdvp
, char *tnm
, cred_t
*cr
,
2058 caller_context_t
*ct
, int flags
)
2060 int (*func
)(femarg_t
*, char *, vnode_t
*, char *, cred_t
*,
2061 caller_context_t
*,int);
2062 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2066 func
= vsop_find(vf
, femop_rename
);
2069 return func(vf
, snm
, tdvp
, tnm
, cr
, ct
, flags
);
2071 return fop_rename_dispatch(vnode
, snm
, tdvp
, tnm
, cr
, ct
, flags
, false);
2075 vnext_mkdir(femarg_t
*vf
, char *dirname
, vattr_t
*vap
, vnode_t
**vpp
,
2076 cred_t
*cr
, caller_context_t
*ct
, int flags
, vsecattr_t
*vsecp
)
2078 int (*func
)(femarg_t
*, char *, vattr_t
*, vnode_t
**, cred_t
*,
2079 caller_context_t
*, int, vsecattr_t
*);
2080 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2084 func
= vsop_find(vf
, femop_mkdir
);
2087 return func(vf
, dirname
, vap
, vpp
, cr
, ct
, flags
, vsecp
);
2089 return fop_mkdir_dispatch(vnode
, dirname
, vap
, vpp
, cr
, ct
, flags
,
2094 vnext_rmdir(femarg_t
*vf
, char *nm
, vnode_t
*cdir
, cred_t
*cr
,
2095 caller_context_t
*ct
, int flags
)
2097 int (*func
)(femarg_t
*, char *, vnode_t
*, cred_t
*, caller_context_t
*,
2099 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2103 func
= vsop_find(vf
, femop_rmdir
);
2106 return func(vf
, nm
, cdir
, cr
, ct
, flags
);
2108 return fop_rmdir_dispatch(vnode
, nm
, cdir
, cr
, ct
, flags
, false);
2112 vnext_readdir(femarg_t
*vf
, uio_t
*uiop
, cred_t
*cr
, int *eofp
,
2113 caller_context_t
*ct
, int flags
)
2115 int (*func
)(femarg_t
*, uio_t
*, cred_t
*, int *, caller_context_t
*,
2117 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2121 func
= vsop_find(vf
, femop_readdir
);
2124 return func(vf
, uiop
, cr
, eofp
, ct
, flags
);
2126 return fop_readdir_dispatch(vnode
, uiop
, cr
, eofp
, ct
, flags
, false);
2130 vnext_symlink(femarg_t
*vf
, char *linkname
, vattr_t
*vap
, char *target
,
2131 cred_t
*cr
, caller_context_t
*ct
, int flags
)
2133 int (*func
)(femarg_t
*, char *, vattr_t
*, char *, cred_t
*,
2134 caller_context_t
*, int);
2135 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2139 func
= vsop_find(vf
, femop_symlink
);
2142 return func(vf
, linkname
, vap
, target
, cr
, ct
, flags
);
2144 return fop_symlink_dispatch(vnode
, linkname
, vap
, target
, cr
, ct
,
2149 vnext_readlink(femarg_t
*vf
, uio_t
*uiop
, cred_t
*cr
, caller_context_t
*ct
)
2151 int (*func
)(femarg_t
*, uio_t
*, cred_t
*, caller_context_t
*);
2152 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2156 func
= vsop_find(vf
, femop_readlink
);
2159 return func(vf
, uiop
, cr
, ct
);
2161 return fop_readlink_dispatch(vnode
, uiop
, cr
, ct
, false);
2165 vnext_fsync(femarg_t
*vf
, int syncflag
, cred_t
*cr
, caller_context_t
*ct
)
2167 int (*func
)(femarg_t
*, int, cred_t
*, caller_context_t
*);
2168 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2172 func
= vsop_find(vf
, femop_fsync
);
2175 return func(vf
, syncflag
, cr
, ct
);
2177 return fop_fsync_dispatch(vnode
, syncflag
, cr
, ct
, false);
2181 vnext_inactive(femarg_t
*vf
, cred_t
*cr
, caller_context_t
*ct
)
2183 void (*func
)(femarg_t
*, cred_t
*, caller_context_t
*);
2184 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2188 func
= vsop_find(vf
, femop_inactive
);
2193 fop_inactive_dispatch(vnode
, cr
, ct
, false);
2197 vnext_fid(femarg_t
*vf
, fid_t
*fidp
, caller_context_t
*ct
)
2199 int (*func
)(femarg_t
*, fid_t
*, caller_context_t
*);
2200 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2204 func
= vsop_find(vf
, femop_fid
);
2207 return func(vf
, fidp
, ct
);
2209 return fop_fid_dispatch(vnode
, fidp
, ct
, false);
2213 vnext_rwlock(femarg_t
*vf
, int write_lock
, caller_context_t
*ct
)
2215 int (*func
)(femarg_t
*, int, caller_context_t
*);
2216 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2220 func
= vsop_find(vf
, femop_rwlock
);
2223 return func(vf
, write_lock
, ct
);
2225 return fop_rwlock_dispatch(vnode
, write_lock
, ct
, false);
2229 vnext_rwunlock(femarg_t
*vf
, int write_lock
, caller_context_t
*ct
)
2231 void (*func
)(femarg_t
*, int, caller_context_t
*);
2232 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2236 func
= vsop_find(vf
, femop_rwunlock
);
2239 func(vf
, write_lock
, ct
);
2241 fop_rwunlock_dispatch(vnode
, write_lock
, ct
, false);
2245 vnext_seek(femarg_t
*vf
, offset_t ooff
, offset_t
*noffp
, caller_context_t
*ct
)
2247 int (*func
)(femarg_t
*, offset_t
, offset_t
*, caller_context_t
*);
2248 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2252 func
= vsop_find(vf
, femop_seek
);
2255 return func(vf
, ooff
, noffp
, ct
);
2257 return fop_seek_dispatch(vnode
, ooff
, noffp
, ct
, false);
2261 vnext_cmp(femarg_t
*vf
, vnode_t
*vp2
, caller_context_t
*ct
)
2263 int (*func
)(femarg_t
*, vnode_t
*, caller_context_t
*);
2264 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2268 func
= vsop_find(vf
, femop_cmp
);
2271 return func(vf
, vp2
, ct
);
2273 return fop_cmp_dispatch(vnode
, vp2
, ct
, false);
2277 vnext_frlock(femarg_t
*vf
, int cmd
, struct flock64
*bfp
, int flag
,
2278 offset_t offset
, struct flk_callback
*flk_cbp
, cred_t
*cr
,
2279 caller_context_t
*ct
)
2281 int (*func
)(femarg_t
*, int, struct flock64
*, int, offset_t
,
2282 struct flk_callback
*, cred_t
*, caller_context_t
*);
2283 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2287 func
= vsop_find(vf
, femop_frlock
);
2290 return func(vf
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
);
2292 return fop_frlock_dispatch(vnode
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
,
2297 vnext_space(femarg_t
*vf
, int cmd
, struct flock64
*bfp
, int flag
,
2298 offset_t offset
, cred_t
*cr
, caller_context_t
*ct
)
2300 int (*func
)(femarg_t
*, int, struct flock64
*, int, offset_t
,
2301 cred_t
*, caller_context_t
*);
2302 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2306 func
= vsop_find(vf
, femop_space
);
2309 return func(vf
, cmd
, bfp
, flag
, offset
, cr
, ct
);
2311 return fop_space_dispatch(vnode
, cmd
, bfp
, flag
, offset
, cr
, ct
, false);
2315 vnext_realvp(femarg_t
*vf
, vnode_t
**vpp
, caller_context_t
*ct
)
2317 int (*func
)(femarg_t
*, vnode_t
**, caller_context_t
*);
2318 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2322 func
= vsop_find(vf
, femop_realvp
);
2325 return func(vf
, vpp
, ct
);
2327 return fop_realvp_dispatch(vnode
, vpp
, ct
, false);
2331 vnext_getpage(femarg_t
*vf
, offset_t off
, size_t len
, uint_t
*protp
,
2332 struct page
**plarr
, size_t plsz
, struct seg
*seg
, caddr_t addr
,
2333 enum seg_rw rw
, cred_t
*cr
, caller_context_t
*ct
)
2335 int (*func
)(femarg_t
*, offset_t
, size_t, uint_t
*, struct page
**,
2336 size_t, struct seg
*, caddr_t
, enum seg_rw
, cred_t
*,
2337 caller_context_t
*);
2338 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2342 func
= vsop_find(vf
, femop_getpage
);
2345 return func(vf
, off
, len
, protp
, plarr
, plsz
, seg
, addr
, rw
,
2348 return fop_getpage_dispatch(vnode
, off
, len
, protp
, plarr
, plsz
, seg
, addr
,
2353 vnext_putpage(femarg_t
*vf
, offset_t off
, size_t len
, int flags
,
2354 cred_t
*cr
, caller_context_t
*ct
)
2356 int (*func
)(femarg_t
*, offset_t
, size_t, int, cred_t
*,
2357 caller_context_t
*);
2358 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2362 func
= vsop_find(vf
, femop_putpage
);
2365 return func(vf
, off
, len
, flags
, cr
, ct
);
2367 return fop_putpage_dispatch(vnode
, off
, len
, flags
, cr
, ct
, false);
2371 vnext_map(femarg_t
*vf
, offset_t off
, struct as
*as
, caddr_t
*addrp
,
2372 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
2373 cred_t
*cr
, caller_context_t
*ct
)
2375 int (*func
)(femarg_t
*, offset_t
, struct as
*, caddr_t
*, size_t,
2376 uchar_t
, uchar_t
, uint_t
, cred_t
*, caller_context_t
*);
2377 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2381 func
= vsop_find(vf
, femop_map
);
2384 return func(vf
, off
, as
, addrp
, len
, prot
, maxprot
, flags
,
2387 return fop_map_dispatch(vnode
, off
, as
, addrp
, len
, prot
, maxprot
, flags
,
2392 vnext_addmap(femarg_t
*vf
, offset_t off
, struct as
*as
, caddr_t addr
,
2393 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
2394 cred_t
*cr
, caller_context_t
*ct
)
2396 int (*func
)(femarg_t
*, offset_t
, struct as
*, caddr_t
, size_t, uchar_t
,
2397 uchar_t
, uint_t
, cred_t
*, caller_context_t
*);
2398 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2402 func
= vsop_find(vf
, femop_addmap
);
2405 return func(vf
, off
, as
, addr
, len
, prot
, maxprot
, flags
,
2408 return fop_addmap_dispatch(vnode
, off
, as
, addr
, len
, prot
, maxprot
, flags
,
2413 vnext_delmap(femarg_t
*vf
, offset_t off
, struct as
*as
, caddr_t addr
,
2414 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
,
2415 cred_t
*cr
, caller_context_t
*ct
)
2417 int (*func
)(femarg_t
*, offset_t
, struct as
*, caddr_t
, size_t, uint_t
,
2418 uint_t
, uint_t
, cred_t
*, caller_context_t
*);
2419 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2423 func
= vsop_find(vf
, femop_delmap
);
2426 return func(vf
, off
, as
, addr
, len
, prot
, maxprot
, flags
,
2429 return fop_delmap_dispatch(vnode
, off
, as
, addr
, len
, prot
, maxprot
, flags
,
2434 vnext_poll(femarg_t
*vf
, short events
, int anyyet
, short *reventsp
,
2435 struct pollhead
**phpp
, caller_context_t
*ct
)
2437 int (*func
)(femarg_t
*, short, int, short *, struct pollhead
**,
2438 caller_context_t
*);
2439 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2443 func
= vsop_find(vf
, femop_poll
);
2446 return func(vf
, events
, anyyet
, reventsp
, phpp
, ct
);
2448 return fop_poll_dispatch(vnode
, events
, anyyet
, reventsp
, phpp
, ct
, false);
2452 vnext_dump(femarg_t
*vf
, caddr_t addr
, offset_t lbdn
, offset_t dblks
,
2453 caller_context_t
*ct
)
2455 int (*func
)(femarg_t
*, caddr_t
, offset_t
, offset_t
,
2456 caller_context_t
*);
2457 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2461 func
= vsop_find(vf
, femop_dump
);
2464 return func(vf
, addr
, lbdn
, dblks
, ct
);
2466 return fop_dump_dispatch(vnode
, addr
, lbdn
, dblks
, ct
, false);
2470 vnext_pathconf(femarg_t
*vf
, int cmd
, ulong_t
*valp
, cred_t
*cr
,
2471 caller_context_t
*ct
)
2473 int (*func
)(femarg_t
*, int, ulong_t
*, cred_t
*, caller_context_t
*);
2474 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2478 func
= vsop_find(vf
, femop_pathconf
);
2481 return func(vf
, cmd
, valp
, cr
, ct
);
2483 return fop_pathconf_dispatch(vnode
, cmd
, valp
, cr
, ct
, false);
2487 vnext_pageio(femarg_t
*vf
, struct page
*pp
, uoff_t io_off
,
2488 size_t io_len
, int flags
, cred_t
*cr
, caller_context_t
*ct
)
2490 int (*func
)(femarg_t
*, struct page
*, uoff_t
, size_t, int, cred_t
*,
2491 caller_context_t
*);
2492 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2496 func
= vsop_find(vf
, femop_pageio
);
2499 return func(vf
, pp
, io_off
, io_len
, flags
, cr
, ct
);
2501 return fop_pageio_dispatch(vnode
, pp
, io_off
, io_len
, flags
, cr
, ct
,
2506 vnext_dumpctl(femarg_t
*vf
, int action
, offset_t
*blkp
, caller_context_t
*ct
)
2508 int (*func
)(femarg_t
*, int, offset_t
*, caller_context_t
*);
2509 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2513 func
= vsop_find(vf
, femop_dumpctl
);
2516 return func(vf
, action
, blkp
, ct
);
2518 return fop_dumpctl_dispatch(vnode
, action
, blkp
, ct
, false);
2522 vnext_dispose(femarg_t
*vf
, struct page
*pp
, int flag
, int dn
, cred_t
*cr
,
2523 caller_context_t
*ct
)
2525 void (*func
)(femarg_t
*, struct page
*, int, int, cred_t
*,
2526 caller_context_t
*);
2527 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2531 func
= vsop_find(vf
, femop_dispose
);
2534 func(vf
, pp
, flag
, dn
, cr
, ct
);
2536 fop_dispose_dispatch(vnode
, pp
, flag
, dn
, cr
, ct
, false);
2540 vnext_setsecattr(femarg_t
*vf
, vsecattr_t
*vsap
, int flag
, cred_t
*cr
,
2541 caller_context_t
*ct
)
2543 int (*func
)(femarg_t
*, vsecattr_t
*, int, cred_t
*,
2544 caller_context_t
*);
2545 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2549 func
= vsop_find(vf
, femop_setsecattr
);
2552 return func(vf
, vsap
, flag
, cr
, ct
);
2554 return fop_setsecattr_dispatch(vnode
, vsap
, flag
, cr
, ct
, false);
2558 vnext_getsecattr(femarg_t
*vf
, vsecattr_t
*vsap
, int flag
, cred_t
*cr
,
2559 caller_context_t
*ct
)
2561 int (*func
)(femarg_t
*, vsecattr_t
*, int, cred_t
*,
2562 caller_context_t
*);
2563 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2567 func
= vsop_find(vf
, femop_getsecattr
);
2570 return func(vf
, vsap
, flag
, cr
, ct
);
2572 return fop_getsecattr_dispatch(vnode
, vsap
, flag
, cr
, ct
, false);
2576 vnext_shrlock(femarg_t
*vf
, int cmd
, struct shrlock
*shr
, int flag
,
2577 cred_t
*cr
, caller_context_t
*ct
)
2579 int (*func
)(femarg_t
*, int, struct shrlock
*, int, cred_t
*,
2580 caller_context_t
*);
2581 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2585 func
= vsop_find(vf
, femop_shrlock
);
2588 return func(vf
, cmd
, shr
, flag
, cr
, ct
);
2590 return fop_shrlock_dispatch(vnode
, cmd
, shr
, flag
, cr
, ct
, false);
2594 vnext_vnevent(femarg_t
*vf
, vnevent_t vnevent
, vnode_t
*dvp
, char *cname
,
2595 caller_context_t
*ct
)
2597 int (*func
)(femarg_t
*, vnevent_t
, vnode_t
*, char *,
2598 caller_context_t
*);
2599 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2603 func
= vsop_find(vf
, femop_vnevent
);
2606 return func(vf
, vnevent
, dvp
, cname
, ct
);
2608 return fop_vnevent_dispatch(vnode
, vnevent
, dvp
, cname
, ct
, false);
2612 vnext_reqzcbuf(femarg_t
*vf
, enum uio_rw ioflag
, xuio_t
*xuiop
, cred_t
*cr
,
2613 caller_context_t
*ct
)
2615 int (*func
)(femarg_t
*, enum uio_rw
, xuio_t
*, cred_t
*,
2616 caller_context_t
*);
2617 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2621 func
= vsop_find(vf
, femop_reqzcbuf
);
2624 return func(vf
, ioflag
, xuiop
, cr
, ct
);
2626 return fop_reqzcbuf_dispatch(vnode
, ioflag
, xuiop
, cr
, ct
, false);
2630 vnext_retzcbuf(femarg_t
*vf
, xuio_t
*xuiop
, cred_t
*cr
, caller_context_t
*ct
)
2632 int (*func
)(femarg_t
*, xuio_t
*, cred_t
*, caller_context_t
*);
2633 struct vnode
*vnode
= vf
->fa_vnode
.vp
;
2637 func
= vsop_find(vf
, femop_retzcbuf
);
2640 return func(vf
, xuiop
, cr
, ct
);
2642 return fop_retzcbuf_dispatch(vnode
, xuiop
, cr
, ct
, false);
2646 vfsnext_mount(fsemarg_t
*vf
, vnode_t
*mvp
, struct mounta
*uap
, cred_t
*cr
)
2648 int (*func
)(fsemarg_t
*, vnode_t
*, struct mounta
*, cred_t
*);
2649 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2653 func
= vfsop_find(vf
, fsemop_mount
);
2656 return func(vf
, mvp
, uap
, cr
);
2658 return fsop_mount_dispatch(vfs
, mvp
, uap
, cr
, false);
2662 vfsnext_unmount(fsemarg_t
*vf
, int flag
, cred_t
*cr
)
2664 int (*func
)(fsemarg_t
*, int, cred_t
*);
2665 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2669 func
= vfsop_find(vf
, fsemop_unmount
);
2672 return func(vf
, flag
, cr
);
2674 return fsop_unmount_dispatch(vfs
, flag
, cr
, false);
2678 vfsnext_root(fsemarg_t
*vf
, vnode_t
**vpp
)
2680 int (*func
)(fsemarg_t
*, vnode_t
**);
2681 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2685 func
= vfsop_find(vf
, fsemop_root
);
2688 return func(vf
, vpp
);
2690 return fsop_root_dispatch(vfs
, vpp
, false);
2694 vfsnext_statvfs(fsemarg_t
*vf
, statvfs64_t
*sp
)
2696 int (*func
)(fsemarg_t
*, statvfs64_t
*);
2697 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2701 func
= vfsop_find(vf
, fsemop_statvfs
);
2704 return func(vf
, sp
);
2706 return fsop_statfs_dispatch(vfs
, sp
, false);
2710 vfsnext_sync(fsemarg_t
*vf
, short flag
, cred_t
*cr
)
2712 int (*func
)(fsemarg_t
*, short, cred_t
*);
2713 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2717 func
= vfsop_find(vf
, fsemop_sync
);
2720 return func(vf
, flag
, cr
);
2722 return fsop_sync_dispatch(vfs
, flag
, cr
, false);
2726 vfsnext_vget(fsemarg_t
*vf
, vnode_t
**vpp
, fid_t
*fidp
)
2728 int (*func
)(fsemarg_t
*, vnode_t
**, fid_t
*);
2729 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2733 func
= vfsop_find(vf
, fsemop_vget
);
2736 return func(vf
, vpp
, fidp
);
2738 return fsop_vget_dispatch(vfs
, vpp
, fidp
, false);
2742 vfsnext_mountroot(fsemarg_t
*vf
, enum whymountroot reason
)
2744 int (*func
)(fsemarg_t
*, enum whymountroot
);
2745 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2749 func
= vfsop_find(vf
, fsemop_mountroot
);
2752 return func(vf
, reason
);
2754 return fsop_mountroot_dispatch(vfs
, reason
, false);
2758 vfsnext_freevfs(fsemarg_t
*vf
)
2760 void (*func
)(fsemarg_t
*);
2761 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2765 func
= vfsop_find(vf
, fsemop_freevfs
);
2770 fsop_freefs_dispatch(vfs
, false);
2774 vfsnext_vnstate(fsemarg_t
*vf
, vnode_t
*vp
, vntrans_t nstate
)
2776 int (*func
)(fsemarg_t
*, vnode_t
*, vntrans_t
);
2777 struct vfs
*vfs
= vf
->fa_vnode
.vfsp
;
2781 func
= vfsop_find(vf
, fsemop_vnstate
);
2784 return func(vf
, vp
, nstate
);
2786 return fsop_vnstate_dispatch(vfs
, vp
, nstate
, false);
2791 * Create a new fem_head and associate with the vnode.
2792 * To keep the unaugmented vnode access path lock free, we spin
2793 * update this - create a new one, then try and install it. If
2794 * we fail to install, release the old one and pretend we succeeded.
2797 static struct fem_head
*
2798 new_femhead(struct fem_head
**hp
)
2800 struct fem_head
*head
;
2802 head
= kmem_alloc(sizeof (*head
), KM_SLEEP
);
2803 mutex_init(&head
->femh_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
2804 head
->femh_list
= NULL
;
2805 if (atomic_cas_ptr(hp
, NULL
, head
) != NULL
) {
2806 kmem_free(head
, sizeof (*head
));
2813 * Create a fem_list. The fem_list that gets returned is in a
2814 * very rudimentary state and MUST NOT be used until it's initialized
2815 * (usually by femlist_construct() or fem_dup_list()). The refcount
2816 * and size is set properly and top-of-stack is set to the "guard" node
2817 * just to be consistent.
2819 * If anyone were to accidentally trying to run on this fem_list before
2820 * it's initialized then the system would likely panic trying to defererence
2821 * the (NULL) fn_op pointer.
2824 static struct fem_list
*
2825 femlist_create(int numnodes
)
2827 struct fem_list
*sp
;
2829 sp
= kmem_alloc(fl_ntob(numnodes
), KM_SLEEP
);
2831 sp
->feml_ssize
= numnodes
;
2832 sp
->feml_nodes
[0] = FEM_GUARD(FEMTYPE_NULL
);
2838 * Construct a new femlist.
2839 * The list is constructed with the appropriate type of guard to
2840 * anchor it, and inserts the original ops.
2843 static struct fem_list
*
2844 femlist_construct(int type
, int numnodes
)
2846 struct fem_list
*sp
;
2848 sp
= femlist_create(numnodes
);
2849 sp
->feml_nodes
[0] = FEM_GUARD(type
);
2850 sp
->feml_nodes
[1].fn_op
.anon
= NULL
;
2851 sp
->feml_nodes
[1].fn_available
= NULL
;
2852 sp
->feml_nodes
[1].fn_av_hold
= NULL
;
2853 sp
->feml_nodes
[1].fn_av_rele
= NULL
;
2859 * Duplicate a list. Copy the original list to the clone.
2861 * NOTE: The caller must have the fem_head for the lists locked.
2862 * Assuming the appropriate lock is held and the caller has done the
2863 * math right, the clone list should be big enough to old the original.
2867 fem_dup_list(struct fem_list
*orig
, struct fem_list
*clone
)
2871 ASSERT(clone
->feml_ssize
>= orig
->feml_ssize
);
2873 bcopy(orig
->feml_nodes
, clone
->feml_nodes
,
2874 sizeof (orig
->feml_nodes
[0]) * orig
->feml_ssize
);
2875 clone
->feml_tos
= orig
->feml_tos
;
2877 * Now that we've copied the old list (orig) to the new list (clone),
2878 * we need to walk the new list and put another hold on fn_available.
2880 for (i
= clone
->feml_tos
; i
> 0; i
--) {
2881 struct fem_node
*fnp
= &clone
->feml_nodes
[i
];
2883 if (fnp
->fn_av_hold
)
2884 (*(fnp
->fn_av_hold
))(fnp
->fn_available
);
2891 struct fem_head
**hp
,
2893 struct fem_node
*nnode
,
2896 struct fem_head
*hd
;
2897 struct fem_list
*list
;
2902 /* Validate the node */
2903 if ((nnode
->fn_op
.anon
== NULL
) || (nnode
->fn_available
== NULL
)) {
2907 if ((hd
= *hp
) == NULL
) { /* construct a proto-list */
2908 hd
= new_femhead(hp
);
2911 * RULE: once a femhead has been pushed onto a object, it cannot be
2912 * removed until the object is destroyed. It can be deactivated by
2913 * placing the original 'object operations' onto the object, which
2914 * will ignore the femhead.
2915 * The loop will exist when the femh_list has space to push a monitor
2920 list
= fem_lock(hd
);
2923 if (list
->feml_tos
+1 < list
->feml_ssize
) {
2926 struct fem_list
*olist
= list
;
2930 list
= femlist_create(olist
->feml_ssize
* 2);
2931 (void) fem_lock(hd
);
2932 if (hd
->femh_list
== olist
) {
2933 if (list
->feml_ssize
<=
2934 olist
->feml_ssize
) {
2936 * We have a new list, but it
2937 * is too small to hold the
2938 * original contents plus the
2939 * one to push. Release the
2940 * new list and start over.
2946 * Life is good: Our new list
2947 * is big enough to hold the
2948 * original list (olist) + 1.
2950 fem_dup_list(olist
, list
);
2951 /* orphan this list */
2952 hd
->femh_list
= list
;
2953 (void) fem_delref(olist
);
2957 /* concurrent update, retry */
2961 /* remove the reference we added above */
2966 list
= femlist_construct(type
, NNODES_DEFAULT
);
2967 (void) fem_lock(hd
);
2968 if (hd
->femh_list
!= NULL
) {
2969 /* concurrent update, retry */
2973 hd
->femh_list
= list
;
2979 ASSERT(mutex_owner(&hd
->femh_lock
) == curthread
);
2980 ASSERT(list
->feml_tos
+1 < list
->feml_ssize
);
2983 * The presence of "how" will modify the behavior of how/if
2984 * nodes are pushed. If it's FORCE, then we can skip
2985 * all the checks and push it on.
2988 /* Start at the top and work our way down */
2989 for (i
= list
->feml_tos
; i
> 0; i
--) {
2990 void *fn_av
= list
->feml_nodes
[i
].fn_available
;
2991 void *fn_op
= list
->feml_nodes
[i
].fn_op
.anon
;
2994 * OPARGUNIQ means that this node should not
2995 * be pushed on if a node with the same op/avail
2996 * combination exists. This situation returns
2999 * OPUNIQ means that this node should not be
3000 * pushed on if a node with the same op exists.
3001 * This situation also returns EBUSY.
3006 if (fn_op
== nnode
->fn_op
.anon
) {
3012 if ((fn_op
== nnode
->fn_op
.anon
) &&
3013 (fn_av
== nnode
->fn_available
)) {
3019 error
= EINVAL
; /* Unexpected value */
3030 * If no errors, slap the node on the list.
3031 * Note: The following is a structure copy.
3033 list
->feml_nodes
[++(list
->feml_tos
)] = *nnode
;
3041 * Remove a node by copying the list above it down a notch.
3042 * If the list is busy, replace it with an idle one and work
3044 * A node matches if the opset matches and the datap matches or is
3049 remove_node(struct fem_list
*sp
, void *opset
, void *datap
)
3052 struct fem_node
*fn
;
3054 for (i
= sp
->feml_tos
; i
> 0; i
--) {
3055 fn
= sp
->feml_nodes
+i
;
3056 if (fn
->fn_op
.anon
== opset
&&
3057 (fn
->fn_available
== datap
|| datap
== NULL
)) {
3066 * At this point we have a node in-hand (*fn) that we are about
3067 * to remove by overwriting it and adjusting the stack. This is
3068 * our last chance to do anything with this node so we do the
3069 * release on the arg.
3072 (*(fn
->fn_av_rele
))(fn
->fn_available
);
3074 while (i
++ < sp
->feml_tos
) {
3075 sp
->feml_nodes
[i
-1] = sp
->feml_nodes
[i
];
3081 fem_remove_node(struct fem_head
*fh
, void *opset
, void *datap
)
3083 struct fem_list
*sp
;
3093 if ((sp
= fem_lock(fh
)) == NULL
) {
3096 } else if (sp
->feml_refc
== 1) {
3097 error
= remove_node(sp
, opset
, datap
);
3098 if (sp
->feml_tos
== 1) {
3100 * The top-of-stack was decremented by
3101 * remove_node(). If it got down to 1,
3102 * then the base ops were replaced and we
3103 * call fem_release() which will free the
3107 fh
->femh_list
= NULL
;
3108 /* XXX - Do we need a membar_producer() call? */
3112 /* busy - install a new one without this monitor */
3113 struct fem_list
*nsp
; /* New fem_list being cloned */
3117 nsp
= femlist_create(sp
->feml_ssize
);
3118 if (fem_lock(fh
) == sp
) {
3120 * We popped out of the lock, created a
3121 * list, then relocked. If we're in here
3122 * then the fem_head points to the same list
3125 fem_dup_list(sp
, nsp
);
3126 error
= remove_node(nsp
, opset
, datap
);
3129 } else if (nsp
->feml_tos
== 1) {
3130 /* New list now empty, tear it down */
3132 fh
->femh_list
= NULL
;
3134 fh
->femh_list
= nsp
;
3136 (void) fem_delref(sp
);
3138 /* List changed while locked, try again... */
3143 * If error is set, then we tried to remove a node
3144 * from the list, but failed. This means that we
3145 * will still be using this list so don't release it.
3157 * perform operation on each element until one returns non zero
3161 struct fem_list
*sp
,
3162 int (*f
)(struct fem_node
*, void *, void *),
3169 for (i
= sp
->feml_tos
; i
> 0; i
--) {
3170 if ((*f
)(sp
->feml_nodes
+i
, mon
, arg
) != 0) {
3178 * companion comparison functions.
3181 fem_compare_mon(struct fem_node
*n
, void *mon
, void *arg
)
3183 return ((n
->fn_op
.anon
== mon
) && (n
->fn_available
== arg
));
3187 * VNODE interposition.
3192 vnode_t
*vp
, /* Vnode on which monitor is being installed */
3193 fem_t
*mon
, /* Monitor operations being installed */
3194 void *arg
, /* Opaque data used by monitor */
3195 femhow_t how
, /* Installation control */
3196 void (*arg_hold
)(void *), /* Hold routine for "arg" */
3197 void (*arg_rele
)(void *)) /* Release routine for "arg" */
3200 struct fem_node nnode
;
3202 nnode
.fn_available
= arg
;
3203 nnode
.fn_op
.fem
= mon
;
3204 nnode
.fn_av_hold
= arg_hold
;
3205 nnode
.fn_av_rele
= arg_rele
;
3207 * If we have a non-NULL hold function, do the hold right away.
3208 * The release is done in remove_node().
3213 error
= fem_push_node(&vp
->v_femhead
, FEMTYPE_VNODE
, &nnode
, how
);
3215 /* If there was an error then the monitor wasn't pushed */
3216 if (error
&& arg_rele
)
3223 fem_is_installed(vnode_t
*v
, fem_t
*mon
, void *arg
)
3226 struct fem_list
*fl
;
3228 fl
= fem_get(v
->v_femhead
);
3230 e
= fem_walk_list(fl
, fem_compare_mon
, mon
, arg
);
3238 fem_uninstall(vnode_t
*v
, fem_t
*mon
, void *arg
)
3241 e
= fem_remove_node(v
->v_femhead
, mon
, arg
);
3248 * These need to be re-written, but there should be more common bits.
3252 fsem_is_installed(struct vfs
*v
, fsem_t
*mon
, void *arg
)
3254 struct fem_list
*fl
;
3256 if (v
->vfs_implp
== NULL
)
3259 fl
= fem_get(v
->vfs_femhead
);
3262 e
= fem_walk_list(fl
, fem_compare_mon
, mon
, arg
);
3271 struct vfs
*vfsp
, /* VFS on which monitor is being installed */
3272 fsem_t
*mon
, /* Monitor operations being installed */
3273 void *arg
, /* Opaque data used by monitor */
3274 femhow_t how
, /* Installation control */
3275 void (*arg_hold
)(void *), /* Hold routine for "arg" */
3276 void (*arg_rele
)(void *)) /* Release routine for "arg" */
3279 struct fem_node nnode
;
3281 /* If this vfs hasn't been properly initialized, fail the install */
3282 if (vfsp
->vfs_implp
== NULL
)
3285 nnode
.fn_available
= arg
;
3286 nnode
.fn_op
.fsem
= mon
;
3287 nnode
.fn_av_hold
= arg_hold
;
3288 nnode
.fn_av_rele
= arg_rele
;
3290 * If we have a non-NULL hold function, do the hold right away.
3291 * The release is done in remove_node().
3296 error
= fem_push_node(&vfsp
->vfs_femhead
, FEMTYPE_VFS
, &nnode
, how
);
3298 /* If there was an error then the monitor wasn't pushed */
3299 if (error
&& arg_rele
)
3306 fsem_uninstall(struct vfs
*v
, fsem_t
*mon
, void *arg
)
3310 if (v
->vfs_implp
== NULL
)
3313 e
= fem_remove_node(v
->vfs_femhead
, mon
, arg
);
3323 struct fem_type_info
*fi
;
3326 * This femtype is only used for fem_list creation so we only
3327 * need the "guard" to be initialized so that feml_tos has
3328 * some rudimentary meaning. A fem_list must not be used until
3329 * it has been initialized (either via femlist_construct() or
3330 * fem_dup_list()). Anything that tries to use this fem_list
3331 * before it's actually initialized would panic the system as
3332 * soon as "fn_op" (NULL) is dereferenced.
3334 fi
= &femtype
[FEMTYPE_NULL
];
3336 fi
->guard
.fn_available
= &fi
->guard
;
3337 fi
->guard
.fn_av_hold
= NULL
;
3338 fi
->guard
.fn_av_rele
= NULL
;
3339 fi
->guard
.fn_op
.anon
= NULL
;
3341 fi
= &femtype
[FEMTYPE_VNODE
];
3343 fi
->head
.fn_available
= NULL
;
3344 fi
->head
.fn_av_hold
= NULL
;
3345 fi
->head
.fn_av_rele
= NULL
;
3346 fi
->head
.fn_op
.fem
= NULL
;
3347 fi
->guard
.fn_available
= &fi
->guard
;
3348 fi
->guard
.fn_av_hold
= NULL
;
3349 fi
->guard
.fn_av_rele
= NULL
;
3350 fi
->guard
.fn_op
.fem
= &fem_guard_ops
;
3352 fi
= &femtype
[FEMTYPE_VFS
];
3353 fi
->errf
= fsem_err
;
3354 fi
->head
.fn_available
= NULL
;
3355 fi
->head
.fn_av_hold
= NULL
;
3356 fi
->head
.fn_av_rele
= NULL
;
3357 fi
->head
.fn_op
.fsem
= NULL
;
3358 fi
->guard
.fn_available
= &fi
->guard
;
3359 fi
->guard
.fn_av_hold
= NULL
;
3360 fi
->guard
.fn_av_rele
= NULL
;
3361 fi
->guard
.fn_op
.fsem
= &fsem_guard_ops
;
3368 cmn_err(CE_PANIC
, "fem/vnode operations corrupt");
3375 cmn_err(CE_PANIC
, "fem/vfs operations corrupt");