don't bother resolving onbld python module deps
[unleashed.git] / kernel / fs / fem.c
blobfdc3b82523e74b934076d77d05ee7f990a2d3373
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2017 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
27 #include <sys/types.h>
28 #include <sys/atomic.h>
29 #include <sys/kmem.h>
30 #include <sys/mutex.h>
31 #include <sys/errno.h>
32 #include <sys/param.h>
33 #include <sys/sysmacros.h>
34 #include <sys/systm.h>
35 #include <sys/cmn_err.h>
36 #include <sys/debug.h>
38 #include <sys/fem.h>
39 #include <sys/vfs.h>
40 #include <sys/vfs_dispatch.h>
41 #include <sys/vnode.h>
42 #include <sys/vnode_dispatch.h>
44 #define NNODES_DEFAULT 8 /* Default number of nodes in a fem_list */
46 * fl_ntob(n) - Fem_list: number of nodes to bytes
47 * Given the number of nodes in a fem_list return the size, in bytes,
48 * of the fem_list structure.
50 #define fl_ntob(n) (sizeof (struct fem_list) + \
51 (n) * sizeof (struct fem_node))
53 typedef enum {
54 FEMTYPE_NULL, /* Uninitialized */
55 FEMTYPE_VNODE,
56 FEMTYPE_VFS,
57 FEMTYPE_NTYPES
58 } femtype_t;
60 #define FEM_GUARD(_t) femtype[(_t)].guard
62 static struct fem_type_info {
63 struct fem_node head;
64 struct fem_node guard;
65 femop_t *errf;
66 } femtype[FEMTYPE_NTYPES];
69 int fem_err();
70 int fsem_err();
72 static struct fem fem_guard_ops = {
73 .name = "fem-guard",
74 .femop_open = fem_err,
75 .femop_close = fem_err,
76 .femop_read = fem_err,
77 .femop_write = fem_err,
78 .femop_ioctl = fem_err,
79 .femop_setfl = fem_err,
80 .femop_getattr = fem_err,
81 .femop_setattr = fem_err,
82 .femop_access = fem_err,
83 .femop_lookup = fem_err,
84 .femop_create = fem_err,
85 .femop_remove = fem_err,
86 .femop_link = fem_err,
87 .femop_rename = fem_err,
88 .femop_mkdir = fem_err,
89 .femop_rmdir = fem_err,
90 .femop_readdir = fem_err,
91 .femop_symlink = fem_err,
92 .femop_readlink = fem_err,
93 .femop_fsync = fem_err,
94 .femop_inactive = (void (*)()) fem_err,
95 .femop_fid = fem_err,
96 .femop_rwlock = fem_err,
97 .femop_rwunlock = (void (*)()) fem_err,
98 .femop_seek = fem_err,
99 .femop_cmp = fem_err,
100 .femop_frlock = fem_err,
101 .femop_space = fem_err,
102 .femop_realvp = fem_err,
103 .femop_getpage = fem_err,
104 .femop_putpage = fem_err,
105 .femop_map = (void *) fem_err,
106 .femop_addmap = (void *) fem_err,
107 .femop_delmap = fem_err,
108 .femop_poll = (void *) fem_err,
109 .femop_dump = fem_err,
110 .femop_pathconf = fem_err,
111 .femop_pageio = fem_err,
112 .femop_dumpctl = fem_err,
113 .femop_dispose = (void *) fem_err,
114 .femop_setsecattr = fem_err,
115 .femop_getsecattr = fem_err,
116 .femop_shrlock = fem_err,
117 .femop_vnevent = fem_err,
118 .femop_reqzcbuf = fem_err,
119 .femop_retzcbuf = fem_err,
122 static struct fsem fsem_guard_ops = {
123 .name = "fsem-guard",
124 .fsemop_mount = fsem_err,
125 .fsemop_unmount = fsem_err,
126 .fsemop_root = fsem_err,
127 .fsemop_statvfs = fsem_err,
128 .fsemop_sync = (void *) fsem_err,
129 .fsemop_vget = fsem_err,
130 .fsemop_mountroot = fsem_err,
131 .fsemop_freevfs = (void *) fsem_err,
132 .fsemop_vnstate = fsem_err,
137 * vsop_find, vfsop_find -
139 * These macros descend the stack until they find either a basic
140 * vnode/vfs operation [ indicated by a null fn_available ] or a
141 * stacked item where this method is non-null [_vsop].
144 #define vsop_find(ap, _vsop) \
145 _op_find((ap), offsetof(struct fem, _vsop))
147 #define vfsop_find(ap, _fsop) \
148 _op_find((ap), offsetof(struct fsem, _fsop))
150 static inline void *
151 _op_find(femarg_t *ap, size_t offs1)
153 for (;;) {
154 struct fem_node *fnod = ap->fa_fnode;
155 void *fp;
157 if (fnod->fn_available == NULL)
158 return NULL;
160 fp = *(void **)((char *)fnod->fn_op.anon + offs1);
161 if (fp != NULL)
162 return fp;
164 ap->fa_fnode--;
169 * fem_get, fem_release - manage reference counts on the stack.
171 * The list of monitors can be updated while operations are in
172 * progress on the object.
174 * The reference count facilitates this by counting the number of
175 * current accessors, and deconstructing the list when it is exhausted.
177 * fem_lock() is required to:
178 * look at femh_list
179 * update what femh_list points to
180 * update femh_list
181 * increase femh_list->feml_refc.
183 * the feml_refc can decrement without holding the lock;
184 * when feml_refc becomes zero, the list is destroyed.
188 static struct fem_list *
189 fem_lock(struct fem_head *fp)
191 struct fem_list *sp = NULL;
193 ASSERT(fp != NULL);
194 mutex_enter(&fp->femh_lock);
195 sp = fp->femh_list;
196 return (sp);
199 static void
200 fem_unlock(struct fem_head *fp)
202 ASSERT(fp != NULL);
203 mutex_exit(&fp->femh_lock);
207 * Addref can only be called while its head->lock is held.
210 static void
211 fem_addref(struct fem_list *sp)
213 atomic_inc_32(&sp->feml_refc);
216 static uint32_t
217 fem_delref(struct fem_list *sp)
219 return (atomic_dec_32_nv(&sp->feml_refc));
222 static struct fem_list *
223 fem_get(struct fem_head *fp)
225 struct fem_list *sp = NULL;
227 if (fp != NULL) {
228 if ((sp = fem_lock(fp)) != NULL) {
229 fem_addref(sp);
231 fem_unlock(fp);
233 return (sp);
236 static void
237 fem_release(struct fem_list *sp)
239 int i;
241 if (sp == NULL)
242 return;
244 ASSERT(sp->feml_refc != 0);
245 if (fem_delref(sp) == 0) {
247 * Before freeing the list, we need to release the
248 * caller-provided data.
250 for (i = sp->feml_tos; i > 0; i--) {
251 struct fem_node *fnp = &sp->feml_nodes[i];
253 if (fnp->fn_av_rele)
254 (*(fnp->fn_av_rele))(fnp->fn_available);
256 kmem_free(sp, fl_ntob(sp->feml_ssize));
262 * These are the 'head' operations which perform the interposition.
264 * This set must be 1:1, onto with the (vnodeops, vfsos).
266 * If there is a desire to globally disable interposition for a particular
267 * method, the corresponding 'head' routine should unearth the base method
268 * and invoke it directly rather than bypassing the function.
270 * All the functions are virtually the same, save for names, types & args.
271 * 1. get a reference to the monitor stack for this object.
272 * 2. store the top of stack into the femarg structure.
273 * 3. store the basic object (vnode *, vnode **, vfs *) in the femarg struc.
274 * 4. invoke the "top" method for this object.
275 * 5. release the reference to the monitor stack.
280 vhead_open(vnode_t **vpp, int mode, cred_t *cr, caller_context_t *ct)
282 int (*func)(femarg_t *, int, cred_t *, caller_context_t *);
283 struct fem_list *femsp;
284 femarg_t farg;
285 int ret;
287 if ((femsp = fem_get((*vpp)->v_femhead)) == NULL) {
288 func = NULL;
289 } else {
290 farg.fa_vnode.vpp = vpp;
291 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
292 func = vsop_find(&farg, femop_open);
295 if (func != NULL)
296 ret = func(&farg, mode, cr, ct);
297 else
298 ret = fop_open_dispatch(vpp, mode, cr, ct, false);
300 fem_release(femsp);
302 return ret;
306 vhead_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
307 caller_context_t *ct)
309 int (*func)(femarg_t *, int, int, offset_t, cred_t *,
310 caller_context_t *);
311 struct fem_list *femsp;
312 femarg_t farg;
313 int ret;
315 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
316 func = NULL;
317 } else {
318 farg.fa_vnode.vp = vp;
319 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
320 func = vsop_find(&farg, femop_close);
323 if (func != NULL)
324 ret = func(&farg, flag, count, offset, cr, ct);
325 else
326 ret = fop_close_dispatch(vp, flag, count, offset, cr, ct,
327 false);
329 fem_release(femsp);
331 return ret;
335 vhead_read(vnode_t *vp, uio_t *uiop, int ioflag, cred_t *cr,
336 caller_context_t *ct)
338 int (*func)(femarg_t *, uio_t *, int, cred_t *, caller_context_t *);
339 struct fem_list *femsp;
340 femarg_t farg;
341 int ret;
343 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
344 func = NULL;
345 } else {
346 farg.fa_vnode.vp = vp;
347 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
348 func = vsop_find(&farg, femop_read);
351 if (func != NULL)
352 ret = func(&farg, uiop, ioflag, cr, ct);
353 else
354 ret = fop_read_dispatch(vp, uiop, ioflag, cr, ct, false);
356 fem_release(femsp);
358 return ret;
362 vhead_write(vnode_t *vp, uio_t *uiop, int ioflag, cred_t *cr,
363 caller_context_t *ct)
365 int (*func)(femarg_t *, uio_t *, int, cred_t *, caller_context_t *);
366 struct fem_list *femsp;
367 femarg_t farg;
368 int ret;
370 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
371 func = NULL;
372 } else {
373 farg.fa_vnode.vp = vp;
374 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
375 func = vsop_find(&farg, femop_write);
378 if (func != NULL)
379 ret = func(&farg, uiop, ioflag, cr, ct);
380 else
381 ret = fop_write_dispatch(vp, uiop, ioflag, cr, ct, false);
383 fem_release(femsp);
385 return ret;
389 vhead_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag, cred_t *cr,
390 int *rvalp, caller_context_t *ct)
392 int (*func)(femarg_t *, int, intptr_t, int, cred_t *, int *,
393 caller_context_t *);
394 struct fem_list *femsp;
395 femarg_t farg;
396 int ret;
398 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
399 func = NULL;
400 } else {
401 farg.fa_vnode.vp = vp;
402 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
403 func = vsop_find(&farg, femop_ioctl);
406 if (func != NULL)
407 ret = func(&farg, cmd, arg, flag, cr, rvalp, ct);
408 else
409 ret = fop_ioctl_dispatch(vp, cmd, arg, flag, cr, rvalp, ct,
410 false);
412 fem_release(femsp);
414 return ret;
418 vhead_setfl(vnode_t *vp, int oflags, int nflags, cred_t *cr,
419 caller_context_t *ct)
421 int (*func)(femarg_t *, int, int, cred_t *, caller_context_t *);
422 struct fem_list *femsp;
423 femarg_t farg;
424 int ret;
426 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
427 func = NULL;
428 } else {
429 farg.fa_vnode.vp = vp;
430 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
431 func = vsop_find(&farg, femop_setfl);
434 if (func != NULL)
435 ret = func(&farg, oflags, nflags, cr, ct);
436 else
437 ret = fop_setfl_dispatch(vp, oflags, nflags, cr, ct, false);
439 fem_release(femsp);
441 return ret;
445 vhead_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
446 caller_context_t *ct)
448 int (*func)(femarg_t *, vattr_t *, int, cred_t *, caller_context_t *);
449 struct fem_list *femsp;
450 femarg_t farg;
451 int ret;
453 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
454 func = NULL;
455 } else {
456 farg.fa_vnode.vp = vp;
457 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
458 func = vsop_find(&farg, femop_getattr);
461 if (func != NULL)
462 ret = func(&farg, vap, flags, cr, ct);
463 else
464 ret = fop_getattr_dispatch(vp, vap, flags, cr, ct, false);
466 fem_release(femsp);
468 return ret;
472 vhead_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
473 caller_context_t *ct)
475 int (*func)(femarg_t *, vattr_t *, int, cred_t *, caller_context_t *);
476 struct fem_list *femsp;
477 femarg_t farg;
478 int ret;
480 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
481 func = NULL;
482 } else {
483 farg.fa_vnode.vp = vp;
484 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
485 func = vsop_find(&farg, femop_setattr);
488 if (func != NULL)
489 ret = func(&farg, vap, flags, cr, ct);
490 else
491 ret = fop_setattr_dispatch(vp, vap, flags, cr, ct, false);
493 fem_release(femsp);
495 return ret;
499 vhead_access(vnode_t *vp, int mode, int flags, cred_t *cr,
500 caller_context_t *ct)
502 int (*func)(femarg_t *, int, int, cred_t *, caller_context_t *);
503 struct fem_list *femsp;
504 femarg_t farg;
505 int ret;
507 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
508 func = NULL;
509 } else {
510 farg.fa_vnode.vp = vp;
511 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
512 func = vsop_find(&farg, femop_access);
515 if (func != NULL)
516 ret = func(&farg, mode, flags, cr, ct);
517 else
518 ret = fop_access_dispatch(vp, mode, flags, cr, ct, false);
520 fem_release(femsp);
522 return ret;
526 vhead_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
527 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
528 int *direntflags, pathname_t *realpnp)
530 int (*func)(femarg_t *, char *, vnode_t **, pathname_t *, int,
531 vnode_t *, cred_t *, caller_context_t *, int *,
532 pathname_t *);
533 struct fem_list *femsp;
534 femarg_t farg;
535 int ret;
537 if ((femsp = fem_get(dvp->v_femhead)) == NULL) {
538 func = NULL;
539 } else {
540 farg.fa_vnode.vp = dvp;
541 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
542 func = vsop_find(&farg, femop_lookup);
545 if (func != NULL)
546 ret = func(&farg, nm, vpp, pnp, flags, rdir, cr, ct,
547 direntflags, realpnp);
548 else
549 ret = fop_lookup_dispatch(dvp, nm, vpp, pnp, flags, rdir,
550 cr, ct, direntflags, realpnp, false);
552 fem_release(femsp);
554 return ret;
558 vhead_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl,
559 int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
560 vsecattr_t *vsecp)
562 int (*func)(femarg_t *, char *, vattr_t *, vcexcl_t, int, vnode_t **,
563 cred_t *, int, caller_context_t *, vsecattr_t *);
564 struct fem_list *femsp;
565 femarg_t farg;
566 int ret;
568 if ((femsp = fem_get(dvp->v_femhead)) == NULL) {
569 func = NULL;
570 } else {
571 farg.fa_vnode.vp = dvp;
572 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
573 func = vsop_find(&farg, femop_create);
576 if (func != NULL)
577 ret = func(&farg, name, vap, excl, mode, vpp, cr, flag, ct,
578 vsecp);
579 else
580 ret = fop_create_dispatch(dvp, name, vap, excl, mode, vpp,
581 cr, flag, ct, vsecp, false);
583 fem_release(femsp);
585 return ret;
589 vhead_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct,
590 int flags)
592 int (*func)(femarg_t *, char *, cred_t *, caller_context_t *, int);
593 struct fem_list *femsp;
594 femarg_t farg;
595 int ret;
597 if ((femsp = fem_get(dvp->v_femhead)) == NULL) {
598 func = NULL;
599 } else {
600 farg.fa_vnode.vp = dvp;
601 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
602 func = vsop_find(&farg, femop_remove);
605 if (func != NULL)
606 ret = func(&farg, nm, cr, ct, flags);
607 else
608 ret = fop_remove_dispatch(dvp, nm, cr, ct, flags, false);
610 fem_release(femsp);
612 return ret;
616 vhead_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
617 caller_context_t *ct, int flags)
619 int (*func)(femarg_t *, vnode_t *, char *, cred_t *,
620 caller_context_t *, int);
621 struct fem_list *femsp;
622 femarg_t farg;
623 int ret;
625 if ((femsp = fem_get(tdvp->v_femhead)) == NULL) {
626 func = NULL;
627 } else {
628 farg.fa_vnode.vp = tdvp;
629 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
630 func = vsop_find(&farg, femop_link);
633 if (func != NULL)
634 ret = func(&farg, svp, tnm, cr, ct, flags);
635 else
636 ret = fop_link_dispatch(tdvp, svp, tnm, cr, ct, flags, false);
638 fem_release(femsp);
640 return ret;
644 vhead_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
645 cred_t *cr, caller_context_t *ct, int flags)
647 int (*func)(femarg_t *, char *, vnode_t *, char *, cred_t *,
648 caller_context_t *,int);
649 struct fem_list *femsp;
650 femarg_t farg;
651 int ret;
653 if ((femsp = fem_get(sdvp->v_femhead)) == NULL) {
654 func = NULL;
655 } else {
656 farg.fa_vnode.vp = sdvp;
657 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
658 func = vsop_find(&farg, femop_rename);
661 if (func != NULL)
662 ret = func(&farg, snm, tdvp, tnm, cr, ct, flags);
663 else
664 ret = fop_rename_dispatch(sdvp, snm, tdvp, tnm, cr, ct,
665 flags, false);
667 fem_release(femsp);
669 return ret;
673 vhead_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp,
674 cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
676 int (*func)(femarg_t *, char *, vattr_t *, vnode_t **, cred_t *,
677 caller_context_t *, int, vsecattr_t *);
678 struct fem_list *femsp;
679 femarg_t farg;
680 int ret;
682 if ((femsp = fem_get(dvp->v_femhead)) == NULL) {
683 func = NULL;
684 } else {
685 farg.fa_vnode.vp = dvp;
686 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
687 func = vsop_find(&farg, femop_mkdir);
690 if (func != NULL)
691 ret = func(&farg, dirname, vap, vpp, cr, ct, flags, vsecp);
692 else
693 ret = fop_mkdir_dispatch(dvp, dirname, vap, vpp, cr, ct, flags,
694 vsecp, false);
696 fem_release(femsp);
698 return ret;
702 vhead_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
703 caller_context_t *ct, int flags)
705 int (*func)(femarg_t *, char *, vnode_t *, cred_t *, caller_context_t *,
706 int);
707 struct fem_list *femsp;
708 femarg_t farg;
709 int ret;
711 if ((femsp = fem_get(dvp->v_femhead)) == NULL) {
712 func = NULL;
713 } else {
714 farg.fa_vnode.vp = dvp;
715 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
716 func = vsop_find(&farg, femop_rmdir);
719 if (func != NULL)
720 ret = func(&farg, nm, cdir, cr, ct, flags);
721 else
722 ret = fop_rmdir_dispatch(dvp, nm, cdir, cr, ct, flags, false);
724 fem_release(femsp);
726 return ret;
730 vhead_readdir(vnode_t *vp, uio_t *uiop, cred_t *cr, int *eofp,
731 caller_context_t *ct, int flags)
733 int (*func)(femarg_t *, uio_t *, cred_t *, int *, caller_context_t *,
734 int);
735 struct fem_list *femsp;
736 femarg_t farg;
737 int ret;
739 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
740 func = NULL;
741 } else {
742 farg.fa_vnode.vp = vp;
743 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
744 func = vsop_find(&farg, femop_readdir);
747 if (func != NULL)
748 ret = func(&farg, uiop, cr, eofp, ct, flags);
749 else
750 ret = fop_readdir_dispatch(vp, uiop, cr, eofp, ct, flags,
751 false);
753 fem_release(femsp);
755 return ret;
759 vhead_symlink(vnode_t *dvp, char *linkname, vattr_t *vap, char *target,
760 cred_t *cr, caller_context_t *ct, int flags)
762 int (*func)(femarg_t *, char *, vattr_t *, char *, cred_t *,
763 caller_context_t *, int);
764 struct fem_list *femsp;
765 femarg_t farg;
766 int ret;
768 if ((femsp = fem_get(dvp->v_femhead)) == NULL) {
769 func = NULL;
770 } else {
771 farg.fa_vnode.vp = dvp;
772 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
773 func = vsop_find(&farg, femop_symlink);
776 if (func != NULL)
777 ret = func(&farg, linkname, vap, target, cr, ct, flags);
778 else
779 ret = fop_symlink_dispatch(dvp, linkname, vap, target, cr, ct,
780 flags, false);
782 fem_release(femsp);
784 return ret;
788 vhead_readlink(vnode_t *vp, uio_t *uiop, cred_t *cr, caller_context_t *ct)
790 int (*func)(femarg_t *, uio_t *, cred_t *, caller_context_t *);
791 struct fem_list *femsp;
792 femarg_t farg;
793 int ret;
795 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
796 func = NULL;
797 } else {
798 farg.fa_vnode.vp = vp;
799 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
800 func = vsop_find(&farg, femop_readlink);
803 if (func != NULL)
804 ret = func(&farg, uiop, cr, ct);
805 else
806 ret = fop_readlink_dispatch(vp, uiop, cr, ct, false);
808 fem_release(femsp);
810 return ret;
814 vhead_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
816 int (*func)(femarg_t *, int, cred_t *, caller_context_t *);
817 struct fem_list *femsp;
818 femarg_t farg;
819 int ret;
821 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
822 func = NULL;
823 } else {
824 farg.fa_vnode.vp = vp;
825 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
826 func = vsop_find(&farg, femop_fsync);
829 if (func != NULL)
830 ret = func(&farg, syncflag, cr, ct);
831 else
832 ret = fop_fsync_dispatch(vp, syncflag, cr, ct, false);
834 fem_release(femsp);
836 return ret;
839 void
840 vhead_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
842 void (*func)(femarg_t *, cred_t *, caller_context_t *);
843 struct fem_list *femsp;
844 femarg_t farg;
846 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
847 func = NULL;
848 } else {
849 farg.fa_vnode.vp = vp;
850 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
851 func = vsop_find(&farg, femop_inactive);
854 if (func != NULL)
855 func(&farg, cr, ct);
856 else
857 fop_inactive_dispatch(vp, cr, ct, false);
859 fem_release(femsp);
863 vhead_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
865 int (*func)(femarg_t *, fid_t *, caller_context_t *);
866 struct fem_list *femsp;
867 femarg_t farg;
868 int ret;
870 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
871 func = NULL;
872 } else {
873 farg.fa_vnode.vp = vp;
874 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
875 func = vsop_find(&farg, femop_fid);
878 if (func != NULL)
879 ret = func(&farg, fidp, ct);
880 else
881 ret = fop_fid_dispatch(vp, fidp, ct, false);
883 fem_release(femsp);
885 return ret;
889 vhead_rwlock(vnode_t *vp, int write_lock, caller_context_t *ct)
891 int (*func)(femarg_t *, int, caller_context_t *);
892 struct fem_list *femsp;
893 femarg_t farg;
894 int ret;
896 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
897 func = NULL;
898 } else {
899 farg.fa_vnode.vp = vp;
900 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
901 func = vsop_find(&farg, femop_rwlock);
904 if (func != NULL)
905 ret = func(&farg, write_lock, ct);
906 else
907 ret = fop_rwlock_dispatch(vp, write_lock, ct, false);
909 fem_release(femsp);
911 return ret;
914 void
915 vhead_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ct)
917 void (*func)(femarg_t *, int, caller_context_t *);
918 struct fem_list *femsp;
919 femarg_t farg;
921 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
922 func = NULL;
923 } else {
924 farg.fa_vnode.vp = vp;
925 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
926 func = vsop_find(&farg, femop_rwunlock);
929 if (func != NULL)
930 func(&farg, write_lock, ct);
931 else
932 fop_rwunlock_dispatch(vp, write_lock, ct, false);
934 fem_release(femsp);
938 vhead_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
940 int (*func)(femarg_t *, offset_t, offset_t *, caller_context_t *);
941 struct fem_list *femsp;
942 femarg_t farg;
943 int ret;
945 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
946 func = NULL;
947 } else {
948 farg.fa_vnode.vp = vp;
949 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
950 func = vsop_find(&farg, femop_seek);
953 if (func != NULL)
954 ret = func(&farg, ooff, noffp, ct);
955 else
956 ret = fop_seek_dispatch(vp, ooff, noffp, ct, false);
958 fem_release(femsp);
960 return ret;
964 vhead_cmp(vnode_t *vp1, vnode_t *vp2, caller_context_t *ct)
966 int (*func)(femarg_t *, vnode_t *, caller_context_t *);
967 struct fem_list *femsp;
968 femarg_t farg;
969 int ret;
971 if ((femsp = fem_get(vp1->v_femhead)) == NULL) {
972 func = NULL;
973 } else {
974 farg.fa_vnode.vp = vp1;
975 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
976 func = vsop_find(&farg, femop_cmp);
979 if (func != NULL)
980 ret = func(&farg, vp2, ct);
981 else
982 ret = fop_cmp_dispatch(vp1, vp2, ct, false);
984 fem_release(femsp);
986 return ret;
990 vhead_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
991 offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
992 caller_context_t *ct)
994 int (*func)(femarg_t *, int, struct flock64 *, int, offset_t,
995 struct flk_callback *, cred_t *, caller_context_t *);
996 struct fem_list *femsp;
997 femarg_t farg;
998 int ret;
1000 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1001 func = NULL;
1002 } else {
1003 farg.fa_vnode.vp = vp;
1004 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1005 func = vsop_find(&farg, femop_frlock);
1008 if (func != NULL)
1009 ret = func(&farg, cmd, bfp, flag, offset, flk_cbp, cr, ct);
1010 else
1011 ret = fop_frlock_dispatch(vp, cmd, bfp, flag, offset,
1012 flk_cbp, cr, ct, false);
1014 fem_release(femsp);
1016 return ret;
1020 vhead_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
1021 offset_t offset, cred_t *cr, caller_context_t *ct)
1023 int (*func)(femarg_t *, int, struct flock64 *, int, offset_t,
1024 cred_t *, caller_context_t *);
1025 struct fem_list *femsp;
1026 femarg_t farg;
1027 int ret;
1029 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1030 func = NULL;
1031 } else {
1032 farg.fa_vnode.vp = vp;
1033 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1034 func = vsop_find(&farg, femop_space);
1037 if (func != NULL)
1038 ret = func(&farg, cmd, bfp, flag, offset, cr, ct);
1039 else
1040 ret = fop_space_dispatch(vp, cmd, bfp, flag, offset, cr, ct,
1041 false);
1043 fem_release(femsp);
1045 return ret;
1049 vhead_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
1051 int (*func)(femarg_t *, vnode_t **, caller_context_t *);
1052 struct fem_list *femsp;
1053 femarg_t farg;
1054 int ret;
1056 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1057 func = NULL;
1058 } else {
1059 farg.fa_vnode.vp = vp;
1060 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1061 func = vsop_find(&farg, femop_realvp);
1064 if (func != NULL)
1065 ret = func(&farg, vpp, ct);
1066 else
1067 ret = fop_realvp_dispatch(vp, vpp, ct, false);
1069 fem_release(femsp);
1071 return ret;
1075 vhead_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
1076 struct page **plarr, size_t plsz, struct seg *seg, caddr_t addr,
1077 enum seg_rw rw, cred_t *cr, caller_context_t *ct)
1079 int (*func)(femarg_t *, offset_t, size_t, uint_t *, struct page **,
1080 size_t, struct seg *, caddr_t, enum seg_rw, cred_t *,
1081 caller_context_t *);
1082 struct fem_list *femsp;
1083 femarg_t farg;
1084 int ret;
1086 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1087 func = NULL;
1088 } else {
1089 farg.fa_vnode.vp = vp;
1090 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1091 func = vsop_find(&farg, femop_getpage);
1094 if (func != NULL)
1095 ret = func(&farg, off, len, protp, plarr, plsz, seg, addr, rw,
1096 cr, ct);
1097 else
1098 ret = fop_getpage_dispatch(vp, off, len, protp, plarr, plsz,
1099 seg, addr, rw, cr, ct, false);
1101 fem_release(femsp);
1103 return ret;
1107 vhead_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
1108 caller_context_t *ct)
1110 int (*func)(femarg_t *, offset_t, size_t, int, cred_t *,
1111 caller_context_t *);
1112 struct fem_list *femsp;
1113 femarg_t farg;
1114 int ret;
1116 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1117 func = NULL;
1118 } else {
1119 farg.fa_vnode.vp = vp;
1120 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1121 func = vsop_find(&farg, femop_putpage);
1124 if (func != NULL)
1125 ret = func(&farg, off, len, flags, cr, ct);
1126 else
1127 ret = fop_putpage_dispatch(vp, off, len, flags, cr, ct, false);
1129 fem_release(femsp);
1131 return ret;
1135 vhead_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
1136 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
1137 cred_t *cr, caller_context_t *ct)
1139 int (*func)(femarg_t *, offset_t, struct as *, caddr_t *, size_t,
1140 uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
1141 struct fem_list *femsp;
1142 femarg_t farg;
1143 int ret;
1145 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1146 func = NULL;
1147 } else {
1148 farg.fa_vnode.vp = vp;
1149 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1150 func = vsop_find(&farg, femop_map);
1153 if (func != NULL)
1154 ret = func(&farg, off, as, addrp, len, prot, maxprot, flags,
1155 cr, ct);
1156 else
1157 ret = fop_map_dispatch(vp, off, as, addrp, len, prot,
1158 maxprot, flags, cr, ct, false);
1160 fem_release(femsp);
1162 return ret;
1166 vhead_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
1167 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
1168 cred_t *cr, caller_context_t *ct)
1170 int (*func)(femarg_t *, offset_t, struct as *, caddr_t, size_t, uchar_t,
1171 uchar_t, uint_t, cred_t *, caller_context_t *);
1172 struct fem_list *femsp;
1173 femarg_t farg;
1174 int ret;
1176 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1177 func = NULL;
1178 } else {
1179 farg.fa_vnode.vp = vp;
1180 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1181 func = vsop_find(&farg, femop_addmap);
1184 if (func != NULL)
1185 ret = func(&farg, off, as, addr, len, prot, maxprot, flags,
1186 cr, ct);
1187 else
1188 ret = fop_addmap_dispatch(vp, off, as, addr, len, prot,
1189 maxprot, flags, cr, ct, false);
1191 fem_release(femsp);
1193 return ret;
1197 vhead_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
1198 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
1199 caller_context_t *ct)
1201 int (*func)(femarg_t *, offset_t, struct as *, caddr_t, size_t, uint_t,
1202 uint_t, uint_t, cred_t *, caller_context_t *);
1203 struct fem_list *femsp;
1204 femarg_t farg;
1205 int ret;
1207 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1208 func = NULL;
1209 } else {
1210 farg.fa_vnode.vp = vp;
1211 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1212 func = vsop_find(&farg, femop_delmap);
1215 if (func != NULL)
1216 ret = func(&farg, off, as, addr, len, prot, maxprot, flags,
1217 cr, ct);
1218 else
1219 ret = fop_delmap_dispatch(vp, off, as, addr, len, prot,
1220 maxprot, flags, cr, ct, false);
1222 fem_release(femsp);
1224 return ret;
1228 vhead_poll(vnode_t *vp, short events, int anyyet, short *reventsp,
1229 struct pollhead **phpp, caller_context_t *ct)
1231 int (*func)(femarg_t *, short, int, short *, struct pollhead **,
1232 caller_context_t *);
1233 struct fem_list *femsp;
1234 femarg_t farg;
1235 int ret;
1237 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1238 func = NULL;
1239 } else {
1240 farg.fa_vnode.vp = vp;
1241 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1242 func = vsop_find(&farg, femop_poll);
1245 if (func != NULL)
1246 ret = func(&farg, events, anyyet, reventsp, phpp, ct);
1247 else
1248 ret = fop_poll_dispatch(vp, events, anyyet, reventsp, phpp,
1249 ct, false);
1251 fem_release(femsp);
1253 return ret;
1257 vhead_dump(vnode_t *vp, caddr_t addr, offset_t lbdn, offset_t dblks,
1258 caller_context_t *ct)
1260 int (*func)(femarg_t *, caddr_t, offset_t, offset_t,
1261 caller_context_t *);
1262 struct fem_list *femsp;
1263 femarg_t farg;
1264 int ret;
1266 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1267 func = NULL;
1268 } else {
1269 farg.fa_vnode.vp = vp;
1270 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1271 func = vsop_find(&farg, femop_dump);
1274 if (func != NULL)
1275 ret = func(&farg, addr, lbdn, dblks, ct);
1276 else
1277 ret = fop_dump_dispatch(vp, addr, lbdn, dblks, ct, false);
1279 fem_release(femsp);
1281 return ret;
1285 vhead_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
1286 caller_context_t *ct)
1288 int (*func)(femarg_t *, int, ulong_t *, cred_t *, caller_context_t *);
1289 struct fem_list *femsp;
1290 femarg_t farg;
1291 int ret;
1293 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1294 func = NULL;
1295 } else {
1296 farg.fa_vnode.vp = vp;
1297 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1298 func = vsop_find(&farg, femop_pathconf);
1301 if (func != NULL)
1302 ret = func(&farg, cmd, valp, cr, ct);
1303 else
1304 ret = fop_pathconf_dispatch(vp, cmd, valp, cr, ct, false);
1306 fem_release(femsp);
1308 return ret;
1312 vhead_pageio(vnode_t *vp, struct page *pp, uoff_t io_off,
1313 size_t io_len, int flags, cred_t *cr, caller_context_t *ct)
1315 int (*func)(femarg_t *, struct page *, uoff_t, size_t, int, cred_t *,
1316 caller_context_t *);
1317 struct fem_list *femsp;
1318 femarg_t farg;
1319 int ret;
1321 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1322 func = NULL;
1323 } else {
1324 farg.fa_vnode.vp = vp;
1325 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1326 func = vsop_find(&farg, femop_pageio);
1329 if (func != NULL)
1330 ret = func(&farg, pp, io_off, io_len, flags, cr, ct);
1331 else
1332 ret = fop_pageio_dispatch(vp, pp, io_off, io_len, flags, cr,
1333 ct, false);
1335 fem_release(femsp);
1337 return ret;
1341 vhead_dumpctl(vnode_t *vp, int action, offset_t *blkp, caller_context_t *ct)
1343 int (*func)(femarg_t *, int, offset_t *, caller_context_t *);
1344 struct fem_list *femsp;
1345 femarg_t farg;
1346 int ret;
1348 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1349 func = NULL;
1350 } else {
1351 farg.fa_vnode.vp = vp;
1352 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1353 func = vsop_find(&farg, femop_dumpctl);
1356 if (func != NULL)
1357 ret = func(&farg, action, blkp, ct);
1358 else
1359 ret = fop_dumpctl_dispatch(vp, action, blkp, ct, false);
1361 fem_release(femsp);
1363 return ret;
1366 void
1367 vhead_dispose(vnode_t *vp, struct page *pp, int flag, int dn, cred_t *cr,
1368 caller_context_t *ct)
1370 void (*func)(femarg_t *, struct page *, int, int, cred_t *,
1371 caller_context_t *);
1372 struct fem_list *femsp;
1373 femarg_t farg;
1375 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1376 func = NULL;
1377 } else {
1378 farg.fa_vnode.vp = vp;
1379 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1380 func = vsop_find(&farg, femop_dispose);
1383 if (func != NULL)
1384 func(&farg, pp, flag, dn, cr, ct);
1385 else
1386 fop_dispose_dispatch(vp, pp, flag, dn, cr, ct, false);
1388 fem_release(femsp);
1392 vhead_setsecattr(vnode_t *vp, vsecattr_t *vsap, int flag, cred_t *cr,
1393 caller_context_t *ct)
1395 int (*func)(femarg_t *, vsecattr_t *, int, cred_t *,
1396 caller_context_t *);
1397 struct fem_list *femsp;
1398 femarg_t farg;
1399 int ret;
1401 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1402 func = NULL;
1403 } else {
1404 farg.fa_vnode.vp = vp;
1405 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1406 func = vsop_find(&farg, femop_setsecattr);
1409 if (func != NULL)
1410 ret = func(&farg, vsap, flag, cr, ct);
1411 else
1412 ret = fop_setsecattr_dispatch(vp, vsap, flag, cr, ct, false);
1414 fem_release(femsp);
1416 return ret;
1420 vhead_getsecattr(vnode_t *vp, vsecattr_t *vsap, int flag, cred_t *cr,
1421 caller_context_t *ct)
1423 int (*func)(femarg_t *, vsecattr_t *, int, cred_t *,
1424 caller_context_t *);
1425 struct fem_list *femsp;
1426 femarg_t farg;
1427 int ret;
1429 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1430 func = NULL;
1431 } else {
1432 farg.fa_vnode.vp = vp;
1433 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1434 func = vsop_find(&farg, femop_getsecattr);
1437 if (func != NULL)
1438 ret = func(&farg, vsap, flag, cr, ct);
1439 else
1440 ret = fop_getsecattr_dispatch(vp, vsap, flag, cr, ct, false);
1442 fem_release(femsp);
1444 return ret;
1448 vhead_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag,
1449 cred_t *cr, caller_context_t *ct)
1451 int (*func)(femarg_t *, int, struct shrlock *, int, cred_t *,
1452 caller_context_t *);
1453 struct fem_list *femsp;
1454 femarg_t farg;
1455 int ret;
1457 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1458 func = NULL;
1459 } else {
1460 farg.fa_vnode.vp = vp;
1461 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1462 func = vsop_find(&farg, femop_shrlock);
1465 if (func != NULL)
1466 ret = func(&farg, cmd, shr, flag, cr, ct);
1467 else
1468 ret = fop_shrlock_dispatch(vp, cmd, shr, flag, cr, ct, false);
1470 fem_release(femsp);
1472 return ret;
1476 vhead_vnevent(vnode_t *vp, vnevent_t vnevent, vnode_t *dvp, char *cname,
1477 caller_context_t *ct)
1479 int (*func)(femarg_t *, vnevent_t, vnode_t *, char *,
1480 caller_context_t *);
1481 struct fem_list *femsp;
1482 femarg_t farg;
1483 int ret;
1485 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1486 func = NULL;
1487 } else {
1488 farg.fa_vnode.vp = vp;
1489 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1490 func = vsop_find(&farg, femop_vnevent);
1493 if (func != NULL)
1494 ret = func(&farg, vnevent, dvp, cname, ct);
1495 else
1496 ret = fop_vnevent_dispatch(vp, vnevent, dvp, cname, ct, false);
1498 fem_release(femsp);
1500 return ret;
1504 vhead_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuiop, cred_t *cr,
1505 caller_context_t *ct)
1507 int (*func)(femarg_t *, enum uio_rw, xuio_t *, cred_t *,
1508 caller_context_t *);
1509 struct fem_list *femsp;
1510 femarg_t farg;
1511 int ret;
1513 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1514 func = NULL;
1515 } else {
1516 farg.fa_vnode.vp = vp;
1517 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1518 func = vsop_find(&farg, femop_reqzcbuf);
1521 if (func != NULL)
1522 ret = func(&farg, ioflag, xuiop, cr, ct);
1523 else
1524 ret = fop_reqzcbuf_dispatch(vp, ioflag, xuiop, cr, ct, false);
1526 fem_release(femsp);
1528 return ret;
1532 vhead_retzcbuf(vnode_t *vp, xuio_t *xuiop, cred_t *cr, caller_context_t *ct)
1534 int (*func)(femarg_t *, xuio_t *, cred_t *, caller_context_t *);
1535 struct fem_list *femsp;
1536 femarg_t farg;
1537 int ret;
1539 if ((femsp = fem_get(vp->v_femhead)) == NULL) {
1540 func = NULL;
1541 } else {
1542 farg.fa_vnode.vp = vp;
1543 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1544 func = vsop_find(&farg, femop_retzcbuf);
1547 if (func != NULL)
1548 ret = func(&farg, xuiop, cr, ct);
1549 else
1550 ret = fop_retzcbuf_dispatch(vp, xuiop, cr, ct, false);
1552 fem_release(femsp);
1554 return ret;
1558 fshead_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
1560 int (*func)(fsemarg_t *, vnode_t *, struct mounta *, cred_t *);
1561 struct fem_list *femsp;
1562 fsemarg_t farg;
1563 int ret;
1565 ASSERT(vfsp->vfs_implp);
1567 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1568 func = NULL;
1569 } else {
1570 farg.fa_vnode.vfsp = vfsp;
1571 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1572 func = vfsop_find(&farg, fsemop_mount);
1575 if (func != NULL)
1576 ret = func(&farg, mvp, uap, cr);
1577 else
1578 ret = fsop_mount_dispatch(vfsp, mvp, uap, cr, false);
1580 fem_release(femsp);
1582 return ret;
1586 fshead_unmount(vfs_t *vfsp, int flag, cred_t *cr)
1588 int (*func)(fsemarg_t *, int, cred_t *);
1589 struct fem_list *femsp;
1590 fsemarg_t farg;
1591 int ret;
1593 ASSERT(vfsp->vfs_implp);
1595 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1596 func = NULL;
1597 } else {
1598 farg.fa_vnode.vfsp = vfsp;
1599 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1600 func = vfsop_find(&farg, fsemop_unmount);
1603 if (func != NULL)
1604 ret = func(&farg, flag, cr);
1605 else
1606 ret = fsop_unmount_dispatch(vfsp, flag, cr, false);
1608 fem_release(femsp);
1610 return ret;
1614 fshead_root(vfs_t *vfsp, vnode_t **vpp)
1616 int (*func)(fsemarg_t *, vnode_t **);
1617 struct fem_list *femsp;
1618 fsemarg_t farg;
1619 int ret;
1621 ASSERT(vfsp->vfs_implp);
1623 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1624 func = NULL;
1625 } else {
1626 farg.fa_vnode.vfsp = vfsp;
1627 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1628 func = vfsop_find(&farg, fsemop_root);
1631 if (func != NULL)
1632 ret = func(&farg, vpp);
1633 else
1634 ret = fsop_root_dispatch(vfsp, vpp, false);
1636 fem_release(femsp);
1638 return ret;
1642 fshead_statvfs(vfs_t *vfsp, statvfs64_t *sp)
1644 int (*func)(fsemarg_t *, statvfs64_t *);
1645 struct fem_list *femsp;
1646 fsemarg_t farg;
1647 int ret;
1649 ASSERT(vfsp->vfs_implp);
1651 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1652 func = NULL;
1653 } else {
1654 farg.fa_vnode.vfsp = vfsp;
1655 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1656 func = vfsop_find(&farg, fsemop_statvfs);
1659 if (func != NULL)
1660 ret = func(&farg, sp);
1661 else
1662 ret = fsop_statfs_dispatch(vfsp, sp, false);
1664 fem_release(femsp);
1666 return ret;
1670 fshead_sync(vfs_t *vfsp, short flag, cred_t *cr)
1672 int (*func)(fsemarg_t *, short, cred_t *);
1673 struct fem_list *femsp;
1674 fsemarg_t farg;
1675 int ret;
1677 ASSERT(vfsp->vfs_implp);
1679 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1680 func = NULL;
1681 } else {
1682 farg.fa_vnode.vfsp = vfsp;
1683 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1684 func = vfsop_find(&farg, fsemop_sync);
1687 if (func != NULL)
1688 ret = func(&farg, flag, cr);
1689 else
1690 ret = fsop_sync_dispatch(vfsp, flag, cr, false);
1692 fem_release(femsp);
1694 return ret;
1698 fshead_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
1700 int (*func)(fsemarg_t *, vnode_t **, fid_t *);
1701 struct fem_list *femsp;
1702 fsemarg_t farg;
1703 int ret;
1705 ASSERT(vfsp->vfs_implp);
1707 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1708 func = NULL;
1709 } else {
1710 farg.fa_vnode.vfsp = vfsp;
1711 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1712 func = vfsop_find(&farg, fsemop_vget);
1715 if (func != NULL)
1716 ret = func(&farg, vpp, fidp);
1717 else
1718 ret = fsop_vget_dispatch(vfsp, vpp, fidp, false);
1720 fem_release(femsp);
1722 return ret;
1726 fshead_mountroot(vfs_t *vfsp, enum whymountroot reason)
1728 int (*func)(fsemarg_t *, enum whymountroot);
1729 struct fem_list *femsp;
1730 fsemarg_t farg;
1731 int ret;
1733 ASSERT(vfsp->vfs_implp);
1735 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1736 func = NULL;
1737 } else {
1738 farg.fa_vnode.vfsp = vfsp;
1739 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1740 func = vfsop_find(&farg, fsemop_mountroot);
1743 if (func != NULL)
1744 ret = func(&farg, reason);
1745 else
1746 ret = fsop_mountroot_dispatch(vfsp, reason, false);
1748 fem_release(femsp);
1750 return ret;
1753 void
1754 fshead_freevfs(vfs_t *vfsp)
1756 void (*func)(fsemarg_t *);
1757 struct fem_list *femsp;
1758 fsemarg_t farg;
1760 ASSERT(vfsp->vfs_implp);
1762 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1763 func = NULL;
1764 } else {
1765 farg.fa_vnode.vfsp = vfsp;
1766 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1767 func = vfsop_find(&farg, fsemop_freevfs);
1770 if (func != NULL)
1771 func(&farg);
1772 else
1773 fsop_freefs_dispatch(vfsp, false);
1775 fem_release(femsp);
1779 fshead_vnstate(vfs_t *vfsp, vnode_t *vp, vntrans_t nstate)
1781 int (*func)(fsemarg_t *, vnode_t *, vntrans_t);
1782 struct fem_list *femsp;
1783 fsemarg_t farg;
1784 int ret;
1786 ASSERT(vfsp->vfs_implp);
1788 if ((femsp = fem_get(vfsp->vfs_femhead)) == NULL) {
1789 func = NULL;
1790 } else {
1791 farg.fa_vnode.vfsp = vfsp;
1792 farg.fa_fnode = femsp->feml_nodes + femsp->feml_tos;
1793 func = vfsop_find(&farg, fsemop_vnstate);
1796 if (func != NULL)
1797 ret = func(&farg, vp, nstate);
1798 else
1799 ret = fsop_vnstate_dispatch(vfsp, vp, nstate, false);
1801 fem_release(femsp);
1803 return ret;
1808 * This set of routines transfer control to the next stacked monitor.
1810 * Each routine is identical except for naming, types and arguments.
1812 * The basic steps are:
1813 * 1. Decrease the stack pointer by one.
1814 * 2. If the current item is a base operation (vnode, vfs), goto 5.
1815 * 3. If the current item does not have a corresponding operation, goto 1
1816 * 4. Return by invoking the current item with the argument handle.
1817 * 5. Return by invoking the base operation with the base object.
1819 * for each classification, there needs to be at least one "next" operation
1820 * for each "head"operation.
1825 vnext_open(femarg_t *vf, int mode, cred_t *cr, caller_context_t *ct)
1827 int (*func)(femarg_t *, int, cred_t *, caller_context_t *);
1828 struct vnode **vnode = vf->fa_vnode.vpp;
1830 ASSERT(vf != NULL);
1831 vf->fa_fnode--;
1832 func = vsop_find(vf, femop_open);
1834 if (func != NULL)
1835 return func(vf, mode, cr, ct);
1837 return fop_open_dispatch(vnode, mode, cr, ct, false);
1841 vnext_close(femarg_t *vf, int flag, int count, offset_t offset, cred_t *cr,
1842 caller_context_t *ct)
1844 int (*func)(femarg_t *, int, int, offset_t, cred_t *,
1845 caller_context_t *);
1846 struct vnode *vnode = vf->fa_vnode.vp;
1848 ASSERT(vf != NULL);
1849 vf->fa_fnode--;
1850 func = vsop_find(vf, femop_close);
1852 if (func != NULL)
1853 return func(vf, flag, count, offset, cr, ct);
1855 return fop_close_dispatch(vnode, flag, count, offset, cr, ct, false);
1859 vnext_read(femarg_t *vf, uio_t *uiop, int ioflag, cred_t *cr,
1860 caller_context_t *ct)
1862 int (*func)(femarg_t *, uio_t *, int, cred_t *, caller_context_t *);
1863 struct vnode *vnode = vf->fa_vnode.vp;
1865 ASSERT(vf != NULL);
1866 vf->fa_fnode--;
1867 func = vsop_find(vf, femop_read);
1869 if (func != NULL)
1870 return func(vf, uiop, ioflag, cr, ct);
1872 return fop_read_dispatch(vnode, uiop, ioflag, cr, ct, false);
1876 vnext_write(femarg_t *vf, uio_t *uiop, int ioflag, cred_t *cr,
1877 caller_context_t *ct)
1879 int (*func)(femarg_t *, uio_t *, int, cred_t *, caller_context_t *);
1880 struct vnode *vnode = vf->fa_vnode.vp;
1882 ASSERT(vf != NULL);
1883 vf->fa_fnode--;
1884 func = vsop_find(vf, femop_write);
1886 if (func != NULL)
1887 return func(vf, uiop, ioflag, cr, ct);
1889 return fop_write_dispatch(vnode, uiop, ioflag, cr, ct, false);
1893 vnext_ioctl(femarg_t *vf, int cmd, intptr_t arg, int flag, cred_t *cr,
1894 int *rvalp, caller_context_t *ct)
1896 int (*func)(femarg_t *, int, intptr_t, int, cred_t *, int *,
1897 caller_context_t *);
1898 struct vnode *vnode = vf->fa_vnode.vp;
1900 ASSERT(vf != NULL);
1901 vf->fa_fnode--;
1902 func = vsop_find(vf, femop_ioctl);
1904 if (func != NULL)
1905 return func(vf, cmd, arg, flag, cr, rvalp, ct);
1907 return fop_ioctl_dispatch(vnode, cmd, arg, flag, cr, rvalp, ct, false);
1911 vnext_setfl(femarg_t *vf, int oflags, int nflags, cred_t *cr,
1912 caller_context_t *ct)
1914 int (*func)(femarg_t *, int, int, cred_t *, caller_context_t *);
1915 struct vnode *vnode = vf->fa_vnode.vp;
1917 ASSERT(vf != NULL);
1918 vf->fa_fnode--;
1919 func = vsop_find(vf, femop_setfl);
1921 if (func != NULL)
1922 return func(vf, oflags, nflags, cr, ct);
1924 return fop_setfl_dispatch(vnode, oflags, nflags, cr, ct, false);
1928 vnext_getattr(femarg_t *vf, vattr_t *vap, int flags, cred_t *cr,
1929 caller_context_t *ct)
1931 int (*func)(femarg_t *, vattr_t *, int, cred_t *, caller_context_t *);
1932 struct vnode *vnode = vf->fa_vnode.vp;
1934 ASSERT(vf != NULL);
1935 vf->fa_fnode--;
1936 func = vsop_find(vf, femop_getattr);
1938 if (func != NULL)
1939 return func(vf, vap, flags, cr, ct);
1941 return fop_getattr_dispatch(vnode, vap, flags, cr, ct, false);
1945 vnext_setattr(femarg_t *vf, vattr_t *vap, int flags, cred_t *cr,
1946 caller_context_t *ct)
1948 int (*func)(femarg_t *, vattr_t *, int, cred_t *, caller_context_t *);
1949 struct vnode *vnode = vf->fa_vnode.vp;
1951 ASSERT(vf != NULL);
1952 vf->fa_fnode--;
1953 func = vsop_find(vf, femop_setattr);
1955 if (func != NULL)
1956 return func(vf, vap, flags, cr, ct);
1958 return fop_setattr_dispatch(vnode, vap, flags, cr, ct, false);
1962 vnext_access(femarg_t *vf, int mode, int flags, cred_t *cr,
1963 caller_context_t *ct)
1965 int (*func)(femarg_t *, int, int, cred_t *, caller_context_t *);
1966 struct vnode *vnode = vf->fa_vnode.vp;
1968 ASSERT(vf != NULL);
1969 vf->fa_fnode--;
1970 func = vsop_find(vf, femop_access);
1972 if (func != NULL)
1973 return func(vf, mode, flags, cr, ct);
1975 return fop_access_dispatch(vnode, mode, flags, cr, ct, false);
1979 vnext_lookup(femarg_t *vf, char *nm, vnode_t **vpp, pathname_t *pnp,
1980 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
1981 int *direntflags, pathname_t *realpnp)
1983 int (*func)(femarg_t *, char *, vnode_t **, pathname_t *, int,
1984 vnode_t *, cred_t *, caller_context_t *, int *,
1985 pathname_t *);
1986 struct vnode *vnode = vf->fa_vnode.vp;
1988 ASSERT(vf != NULL);
1989 vf->fa_fnode--;
1990 func = vsop_find(vf, femop_lookup);
1992 if (func != NULL)
1993 return func(vf, nm, vpp, pnp, flags, rdir, cr, ct,
1994 direntflags, realpnp);
1996 return fop_lookup_dispatch(vnode, nm, vpp, pnp, flags, rdir, cr, ct,
1997 direntflags, realpnp, false);
2001 vnext_create(femarg_t *vf, char *name, vattr_t *vap, vcexcl_t excl,
2002 int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct,
2003 vsecattr_t *vsecp)
2005 int (*func)(femarg_t *, char *, vattr_t *, vcexcl_t, int, vnode_t **,
2006 cred_t *, int, caller_context_t *, vsecattr_t *);
2007 struct vnode *vnode = vf->fa_vnode.vp;
2009 ASSERT(vf != NULL);
2010 vf->fa_fnode--;
2011 func = vsop_find(vf, femop_create);
2013 if (func != NULL)
2014 return func(vf, name, vap, excl, mode, vpp, cr, flag, ct,
2015 vsecp);
2017 return fop_create_dispatch(vnode, name, vap, excl, mode, vpp, cr, flag,
2018 ct, vsecp, false);
2022 vnext_remove(femarg_t *vf, char *nm, cred_t *cr, caller_context_t *ct,
2023 int flags)
2025 int (*func)(femarg_t *, char *, cred_t *, caller_context_t *, int);
2026 struct vnode *vnode = vf->fa_vnode.vp;
2028 ASSERT(vf != NULL);
2029 vf->fa_fnode--;
2030 func = vsop_find(vf, femop_remove);
2032 if (func != NULL)
2033 return func(vf, nm, cr, ct, flags);
2035 return fop_remove_dispatch(vnode, nm, cr, ct, flags, false);
2039 vnext_link(femarg_t *vf, vnode_t *svp, char *tnm, cred_t *cr,
2040 caller_context_t *ct, int flags)
2042 int (*func)(femarg_t *, vnode_t *, char *, cred_t *,
2043 caller_context_t *, int);
2044 struct vnode *vnode = vf->fa_vnode.vp;
2046 ASSERT(vf != NULL);
2047 vf->fa_fnode--;
2048 func = vsop_find(vf, femop_link);
2050 if (func != NULL)
2051 return func(vf, svp, tnm, cr, ct, flags);
2053 return fop_link_dispatch(vnode, svp, tnm, cr, ct, flags, false);
2057 vnext_rename(femarg_t *vf, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
2058 caller_context_t *ct, int flags)
2060 int (*func)(femarg_t *, char *, vnode_t *, char *, cred_t *,
2061 caller_context_t *,int);
2062 struct vnode *vnode = vf->fa_vnode.vp;
2064 ASSERT(vf != NULL);
2065 vf->fa_fnode--;
2066 func = vsop_find(vf, femop_rename);
2068 if (func != NULL)
2069 return func(vf, snm, tdvp, tnm, cr, ct, flags);
2071 return fop_rename_dispatch(vnode, snm, tdvp, tnm, cr, ct, flags, false);
2075 vnext_mkdir(femarg_t *vf, char *dirname, vattr_t *vap, vnode_t **vpp,
2076 cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
2078 int (*func)(femarg_t *, char *, vattr_t *, vnode_t **, cred_t *,
2079 caller_context_t *, int, vsecattr_t *);
2080 struct vnode *vnode = vf->fa_vnode.vp;
2082 ASSERT(vf != NULL);
2083 vf->fa_fnode--;
2084 func = vsop_find(vf, femop_mkdir);
2086 if (func != NULL)
2087 return func(vf, dirname, vap, vpp, cr, ct, flags, vsecp);
2089 return fop_mkdir_dispatch(vnode, dirname, vap, vpp, cr, ct, flags,
2090 vsecp, false);
2094 vnext_rmdir(femarg_t *vf, char *nm, vnode_t *cdir, cred_t *cr,
2095 caller_context_t *ct, int flags)
2097 int (*func)(femarg_t *, char *, vnode_t *, cred_t *, caller_context_t *,
2098 int);
2099 struct vnode *vnode = vf->fa_vnode.vp;
2101 ASSERT(vf != NULL);
2102 vf->fa_fnode--;
2103 func = vsop_find(vf, femop_rmdir);
2105 if (func != NULL)
2106 return func(vf, nm, cdir, cr, ct, flags);
2108 return fop_rmdir_dispatch(vnode, nm, cdir, cr, ct, flags, false);
2112 vnext_readdir(femarg_t *vf, uio_t *uiop, cred_t *cr, int *eofp,
2113 caller_context_t *ct, int flags)
2115 int (*func)(femarg_t *, uio_t *, cred_t *, int *, caller_context_t *,
2116 int);
2117 struct vnode *vnode = vf->fa_vnode.vp;
2119 ASSERT(vf != NULL);
2120 vf->fa_fnode--;
2121 func = vsop_find(vf, femop_readdir);
2123 if (func != NULL)
2124 return func(vf, uiop, cr, eofp, ct, flags);
2126 return fop_readdir_dispatch(vnode, uiop, cr, eofp, ct, flags, false);
2130 vnext_symlink(femarg_t *vf, char *linkname, vattr_t *vap, char *target,
2131 cred_t *cr, caller_context_t *ct, int flags)
2133 int (*func)(femarg_t *, char *, vattr_t *, char *, cred_t *,
2134 caller_context_t *, int);
2135 struct vnode *vnode = vf->fa_vnode.vp;
2137 ASSERT(vf != NULL);
2138 vf->fa_fnode--;
2139 func = vsop_find(vf, femop_symlink);
2141 if (func != NULL)
2142 return func(vf, linkname, vap, target, cr, ct, flags);
2144 return fop_symlink_dispatch(vnode, linkname, vap, target, cr, ct,
2145 flags, false);
2149 vnext_readlink(femarg_t *vf, uio_t *uiop, cred_t *cr, caller_context_t *ct)
2151 int (*func)(femarg_t *, uio_t *, cred_t *, caller_context_t *);
2152 struct vnode *vnode = vf->fa_vnode.vp;
2154 ASSERT(vf != NULL);
2155 vf->fa_fnode--;
2156 func = vsop_find(vf, femop_readlink);
2158 if (func != NULL)
2159 return func(vf, uiop, cr, ct);
2161 return fop_readlink_dispatch(vnode, uiop, cr, ct, false);
2165 vnext_fsync(femarg_t *vf, int syncflag, cred_t *cr, caller_context_t *ct)
2167 int (*func)(femarg_t *, int, cred_t *, caller_context_t *);
2168 struct vnode *vnode = vf->fa_vnode.vp;
2170 ASSERT(vf != NULL);
2171 vf->fa_fnode--;
2172 func = vsop_find(vf, femop_fsync);
2174 if (func != NULL)
2175 return func(vf, syncflag, cr, ct);
2177 return fop_fsync_dispatch(vnode, syncflag, cr, ct, false);
2180 void
2181 vnext_inactive(femarg_t *vf, cred_t *cr, caller_context_t *ct)
2183 void (*func)(femarg_t *, cred_t *, caller_context_t *);
2184 struct vnode *vnode = vf->fa_vnode.vp;
2186 ASSERT(vf != NULL);
2187 vf->fa_fnode--;
2188 func = vsop_find(vf, femop_inactive);
2190 if (func != NULL)
2191 func(vf, cr, ct);
2192 else
2193 fop_inactive_dispatch(vnode, cr, ct, false);
2197 vnext_fid(femarg_t *vf, fid_t *fidp, caller_context_t *ct)
2199 int (*func)(femarg_t *, fid_t *, caller_context_t *);
2200 struct vnode *vnode = vf->fa_vnode.vp;
2202 ASSERT(vf != NULL);
2203 vf->fa_fnode--;
2204 func = vsop_find(vf, femop_fid);
2206 if (func != NULL)
2207 return func(vf, fidp, ct);
2209 return fop_fid_dispatch(vnode, fidp, ct, false);
2213 vnext_rwlock(femarg_t *vf, int write_lock, caller_context_t *ct)
2215 int (*func)(femarg_t *, int, caller_context_t *);
2216 struct vnode *vnode = vf->fa_vnode.vp;
2218 ASSERT(vf != NULL);
2219 vf->fa_fnode--;
2220 func = vsop_find(vf, femop_rwlock);
2222 if (func != NULL)
2223 return func(vf, write_lock, ct);
2225 return fop_rwlock_dispatch(vnode, write_lock, ct, false);
2228 void
2229 vnext_rwunlock(femarg_t *vf, int write_lock, caller_context_t *ct)
2231 void (*func)(femarg_t *, int, caller_context_t *);
2232 struct vnode *vnode = vf->fa_vnode.vp;
2234 ASSERT(vf != NULL);
2235 vf->fa_fnode--;
2236 func = vsop_find(vf, femop_rwunlock);
2238 if (func != NULL)
2239 func(vf, write_lock, ct);
2240 else
2241 fop_rwunlock_dispatch(vnode, write_lock, ct, false);
2245 vnext_seek(femarg_t *vf, offset_t ooff, offset_t *noffp, caller_context_t *ct)
2247 int (*func)(femarg_t *, offset_t, offset_t *, caller_context_t *);
2248 struct vnode *vnode = vf->fa_vnode.vp;
2250 ASSERT(vf != NULL);
2251 vf->fa_fnode--;
2252 func = vsop_find(vf, femop_seek);
2254 if (func != NULL)
2255 return func(vf, ooff, noffp, ct);
2257 return fop_seek_dispatch(vnode, ooff, noffp, ct, false);
2261 vnext_cmp(femarg_t *vf, vnode_t *vp2, caller_context_t *ct)
2263 int (*func)(femarg_t *, vnode_t *, caller_context_t *);
2264 struct vnode *vnode = vf->fa_vnode.vp;
2266 ASSERT(vf != NULL);
2267 vf->fa_fnode--;
2268 func = vsop_find(vf, femop_cmp);
2270 if (func != NULL)
2271 return func(vf, vp2, ct);
2273 return fop_cmp_dispatch(vnode, vp2, ct, false);
2277 vnext_frlock(femarg_t *vf, int cmd, struct flock64 *bfp, int flag,
2278 offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
2279 caller_context_t *ct)
2281 int (*func)(femarg_t *, int, struct flock64 *, int, offset_t,
2282 struct flk_callback *, cred_t *, caller_context_t *);
2283 struct vnode *vnode = vf->fa_vnode.vp;
2285 ASSERT(vf != NULL);
2286 vf->fa_fnode--;
2287 func = vsop_find(vf, femop_frlock);
2289 if (func != NULL)
2290 return func(vf, cmd, bfp, flag, offset, flk_cbp, cr, ct);
2292 return fop_frlock_dispatch(vnode, cmd, bfp, flag, offset, flk_cbp, cr,
2293 ct, false);
2297 vnext_space(femarg_t *vf, int cmd, struct flock64 *bfp, int flag,
2298 offset_t offset, cred_t *cr, caller_context_t *ct)
2300 int (*func)(femarg_t *, int, struct flock64 *, int, offset_t,
2301 cred_t *, caller_context_t *);
2302 struct vnode *vnode = vf->fa_vnode.vp;
2304 ASSERT(vf != NULL);
2305 vf->fa_fnode--;
2306 func = vsop_find(vf, femop_space);
2308 if (func != NULL)
2309 return func(vf, cmd, bfp, flag, offset, cr, ct);
2311 return fop_space_dispatch(vnode, cmd, bfp, flag, offset, cr, ct, false);
2315 vnext_realvp(femarg_t *vf, vnode_t **vpp, caller_context_t *ct)
2317 int (*func)(femarg_t *, vnode_t **, caller_context_t *);
2318 struct vnode *vnode = vf->fa_vnode.vp;
2320 ASSERT(vf != NULL);
2321 vf->fa_fnode--;
2322 func = vsop_find(vf, femop_realvp);
2324 if (func != NULL)
2325 return func(vf, vpp, ct);
2327 return fop_realvp_dispatch(vnode, vpp, ct, false);
2331 vnext_getpage(femarg_t *vf, offset_t off, size_t len, uint_t *protp,
2332 struct page **plarr, size_t plsz, struct seg *seg, caddr_t addr,
2333 enum seg_rw rw, cred_t *cr, caller_context_t *ct)
2335 int (*func)(femarg_t *, offset_t, size_t, uint_t *, struct page **,
2336 size_t, struct seg *, caddr_t, enum seg_rw, cred_t *,
2337 caller_context_t *);
2338 struct vnode *vnode = vf->fa_vnode.vp;
2340 ASSERT(vf != NULL);
2341 vf->fa_fnode--;
2342 func = vsop_find(vf, femop_getpage);
2344 if (func != NULL)
2345 return func(vf, off, len, protp, plarr, plsz, seg, addr, rw,
2346 cr, ct);
2348 return fop_getpage_dispatch(vnode, off, len, protp, plarr, plsz, seg, addr,
2349 rw, cr, ct, false);
2353 vnext_putpage(femarg_t *vf, offset_t off, size_t len, int flags,
2354 cred_t *cr, caller_context_t *ct)
2356 int (*func)(femarg_t *, offset_t, size_t, int, cred_t *,
2357 caller_context_t *);
2358 struct vnode *vnode = vf->fa_vnode.vp;
2360 ASSERT(vf != NULL);
2361 vf->fa_fnode--;
2362 func = vsop_find(vf, femop_putpage);
2364 if (func != NULL)
2365 return func(vf, off, len, flags, cr, ct);
2367 return fop_putpage_dispatch(vnode, off, len, flags, cr, ct, false);
2371 vnext_map(femarg_t *vf, offset_t off, struct as *as, caddr_t *addrp,
2372 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
2373 cred_t *cr, caller_context_t *ct)
2375 int (*func)(femarg_t *, offset_t, struct as *, caddr_t *, size_t,
2376 uchar_t, uchar_t, uint_t, cred_t *, caller_context_t *);
2377 struct vnode *vnode = vf->fa_vnode.vp;
2379 ASSERT(vf != NULL);
2380 vf->fa_fnode--;
2381 func = vsop_find(vf, femop_map);
2383 if (func != NULL)
2384 return func(vf, off, as, addrp, len, prot, maxprot, flags,
2385 cr, ct);
2387 return fop_map_dispatch(vnode, off, as, addrp, len, prot, maxprot, flags,
2388 cr, ct, false);
2392 vnext_addmap(femarg_t *vf, offset_t off, struct as *as, caddr_t addr,
2393 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
2394 cred_t *cr, caller_context_t *ct)
2396 int (*func)(femarg_t *, offset_t, struct as *, caddr_t, size_t, uchar_t,
2397 uchar_t, uint_t, cred_t *, caller_context_t *);
2398 struct vnode *vnode = vf->fa_vnode.vp;
2400 ASSERT(vf != NULL);
2401 vf->fa_fnode--;
2402 func = vsop_find(vf, femop_addmap);
2404 if (func != NULL)
2405 return func(vf, off, as, addr, len, prot, maxprot, flags,
2406 cr, ct);
2408 return fop_addmap_dispatch(vnode, off, as, addr, len, prot, maxprot, flags,
2409 cr, ct, false);
2413 vnext_delmap(femarg_t *vf, offset_t off, struct as *as, caddr_t addr,
2414 size_t len, uint_t prot, uint_t maxprot, uint_t flags,
2415 cred_t *cr, caller_context_t *ct)
2417 int (*func)(femarg_t *, offset_t, struct as *, caddr_t, size_t, uint_t,
2418 uint_t, uint_t, cred_t *, caller_context_t *);
2419 struct vnode *vnode = vf->fa_vnode.vp;
2421 ASSERT(vf != NULL);
2422 vf->fa_fnode--;
2423 func = vsop_find(vf, femop_delmap);
2425 if (func != NULL)
2426 return func(vf, off, as, addr, len, prot, maxprot, flags,
2427 cr, ct);
2429 return fop_delmap_dispatch(vnode, off, as, addr, len, prot, maxprot, flags,
2430 cr, ct, false);
2434 vnext_poll(femarg_t *vf, short events, int anyyet, short *reventsp,
2435 struct pollhead **phpp, caller_context_t *ct)
2437 int (*func)(femarg_t *, short, int, short *, struct pollhead **,
2438 caller_context_t *);
2439 struct vnode *vnode = vf->fa_vnode.vp;
2441 ASSERT(vf != NULL);
2442 vf->fa_fnode--;
2443 func = vsop_find(vf, femop_poll);
2445 if (func != NULL)
2446 return func(vf, events, anyyet, reventsp, phpp, ct);
2448 return fop_poll_dispatch(vnode, events, anyyet, reventsp, phpp, ct, false);
2452 vnext_dump(femarg_t *vf, caddr_t addr, offset_t lbdn, offset_t dblks,
2453 caller_context_t *ct)
2455 int (*func)(femarg_t *, caddr_t, offset_t, offset_t,
2456 caller_context_t *);
2457 struct vnode *vnode = vf->fa_vnode.vp;
2459 ASSERT(vf != NULL);
2460 vf->fa_fnode--;
2461 func = vsop_find(vf, femop_dump);
2463 if (func != NULL)
2464 return func(vf, addr, lbdn, dblks, ct);
2466 return fop_dump_dispatch(vnode, addr, lbdn, dblks, ct, false);
2470 vnext_pathconf(femarg_t *vf, int cmd, ulong_t *valp, cred_t *cr,
2471 caller_context_t *ct)
2473 int (*func)(femarg_t *, int, ulong_t *, cred_t *, caller_context_t *);
2474 struct vnode *vnode = vf->fa_vnode.vp;
2476 ASSERT(vf != NULL);
2477 vf->fa_fnode--;
2478 func = vsop_find(vf, femop_pathconf);
2480 if (func != NULL)
2481 return func(vf, cmd, valp, cr, ct);
2483 return fop_pathconf_dispatch(vnode, cmd, valp, cr, ct, false);
2487 vnext_pageio(femarg_t *vf, struct page *pp, uoff_t io_off,
2488 size_t io_len, int flags, cred_t *cr, caller_context_t *ct)
2490 int (*func)(femarg_t *, struct page *, uoff_t, size_t, int, cred_t *,
2491 caller_context_t *);
2492 struct vnode *vnode = vf->fa_vnode.vp;
2494 ASSERT(vf != NULL);
2495 vf->fa_fnode--;
2496 func = vsop_find(vf, femop_pageio);
2498 if (func != NULL)
2499 return func(vf, pp, io_off, io_len, flags, cr, ct);
2501 return fop_pageio_dispatch(vnode, pp, io_off, io_len, flags, cr, ct,
2502 false);
2506 vnext_dumpctl(femarg_t *vf, int action, offset_t *blkp, caller_context_t *ct)
2508 int (*func)(femarg_t *, int, offset_t *, caller_context_t *);
2509 struct vnode *vnode = vf->fa_vnode.vp;
2511 ASSERT(vf != NULL);
2512 vf->fa_fnode--;
2513 func = vsop_find(vf, femop_dumpctl);
2515 if (func != NULL)
2516 return func(vf, action, blkp, ct);
2518 return fop_dumpctl_dispatch(vnode, action, blkp, ct, false);
2521 void
2522 vnext_dispose(femarg_t *vf, struct page *pp, int flag, int dn, cred_t *cr,
2523 caller_context_t *ct)
2525 void (*func)(femarg_t *, struct page *, int, int, cred_t *,
2526 caller_context_t *);
2527 struct vnode *vnode = vf->fa_vnode.vp;
2529 ASSERT(vf != NULL);
2530 vf->fa_fnode--;
2531 func = vsop_find(vf, femop_dispose);
2533 if (func != NULL)
2534 func(vf, pp, flag, dn, cr, ct);
2535 else
2536 fop_dispose_dispatch(vnode, pp, flag, dn, cr, ct, false);
2540 vnext_setsecattr(femarg_t *vf, vsecattr_t *vsap, int flag, cred_t *cr,
2541 caller_context_t *ct)
2543 int (*func)(femarg_t *, vsecattr_t *, int, cred_t *,
2544 caller_context_t *);
2545 struct vnode *vnode = vf->fa_vnode.vp;
2547 ASSERT(vf != NULL);
2548 vf->fa_fnode--;
2549 func = vsop_find(vf, femop_setsecattr);
2551 if (func != NULL)
2552 return func(vf, vsap, flag, cr, ct);
2554 return fop_setsecattr_dispatch(vnode, vsap, flag, cr, ct, false);
2558 vnext_getsecattr(femarg_t *vf, vsecattr_t *vsap, int flag, cred_t *cr,
2559 caller_context_t *ct)
2561 int (*func)(femarg_t *, vsecattr_t *, int, cred_t *,
2562 caller_context_t *);
2563 struct vnode *vnode = vf->fa_vnode.vp;
2565 ASSERT(vf != NULL);
2566 vf->fa_fnode--;
2567 func = vsop_find(vf, femop_getsecattr);
2569 if (func != NULL)
2570 return func(vf, vsap, flag, cr, ct);
2572 return fop_getsecattr_dispatch(vnode, vsap, flag, cr, ct, false);
2576 vnext_shrlock(femarg_t *vf, int cmd, struct shrlock *shr, int flag,
2577 cred_t *cr, caller_context_t *ct)
2579 int (*func)(femarg_t *, int, struct shrlock *, int, cred_t *,
2580 caller_context_t *);
2581 struct vnode *vnode = vf->fa_vnode.vp;
2583 ASSERT(vf != NULL);
2584 vf->fa_fnode--;
2585 func = vsop_find(vf, femop_shrlock);
2587 if (func != NULL)
2588 return func(vf, cmd, shr, flag, cr, ct);
2590 return fop_shrlock_dispatch(vnode, cmd, shr, flag, cr, ct, false);
2594 vnext_vnevent(femarg_t *vf, vnevent_t vnevent, vnode_t *dvp, char *cname,
2595 caller_context_t *ct)
2597 int (*func)(femarg_t *, vnevent_t, vnode_t *, char *,
2598 caller_context_t *);
2599 struct vnode *vnode = vf->fa_vnode.vp;
2601 ASSERT(vf != NULL);
2602 vf->fa_fnode--;
2603 func = vsop_find(vf, femop_vnevent);
2605 if (func != NULL)
2606 return func(vf, vnevent, dvp, cname, ct);
2608 return fop_vnevent_dispatch(vnode, vnevent, dvp, cname, ct, false);
2612 vnext_reqzcbuf(femarg_t *vf, enum uio_rw ioflag, xuio_t *xuiop, cred_t *cr,
2613 caller_context_t *ct)
2615 int (*func)(femarg_t *, enum uio_rw, xuio_t *, cred_t *,
2616 caller_context_t *);
2617 struct vnode *vnode = vf->fa_vnode.vp;
2619 ASSERT(vf != NULL);
2620 vf->fa_fnode--;
2621 func = vsop_find(vf, femop_reqzcbuf);
2623 if (func != NULL)
2624 return func(vf, ioflag, xuiop, cr, ct);
2626 return fop_reqzcbuf_dispatch(vnode, ioflag, xuiop, cr, ct, false);
2630 vnext_retzcbuf(femarg_t *vf, xuio_t *xuiop, cred_t *cr, caller_context_t *ct)
2632 int (*func)(femarg_t *, xuio_t *, cred_t *, caller_context_t *);
2633 struct vnode *vnode = vf->fa_vnode.vp;
2635 ASSERT(vf != NULL);
2636 vf->fa_fnode--;
2637 func = vsop_find(vf, femop_retzcbuf);
2639 if (func != NULL)
2640 return func(vf, xuiop, cr, ct);
2642 return fop_retzcbuf_dispatch(vnode, xuiop, cr, ct, false);
2646 vfsnext_mount(fsemarg_t *vf, vnode_t *mvp, struct mounta *uap, cred_t *cr)
2648 int (*func)(fsemarg_t *, vnode_t *, struct mounta *, cred_t *);
2649 struct vfs *vfs = vf->fa_vnode.vfsp;
2651 ASSERT(vf != NULL);
2652 vf->fa_fnode--;
2653 func = vfsop_find(vf, fsemop_mount);
2655 if (func != NULL)
2656 return func(vf, mvp, uap, cr);
2658 return fsop_mount_dispatch(vfs, mvp, uap, cr, false);
2662 vfsnext_unmount(fsemarg_t *vf, int flag, cred_t *cr)
2664 int (*func)(fsemarg_t *, int, cred_t *);
2665 struct vfs *vfs = vf->fa_vnode.vfsp;
2667 ASSERT(vf != NULL);
2668 vf->fa_fnode--;
2669 func = vfsop_find(vf, fsemop_unmount);
2671 if (func != NULL)
2672 return func(vf, flag, cr);
2674 return fsop_unmount_dispatch(vfs, flag, cr, false);
2678 vfsnext_root(fsemarg_t *vf, vnode_t **vpp)
2680 int (*func)(fsemarg_t *, vnode_t **);
2681 struct vfs *vfs = vf->fa_vnode.vfsp;
2683 ASSERT(vf != NULL);
2684 vf->fa_fnode--;
2685 func = vfsop_find(vf, fsemop_root);
2687 if (func != NULL)
2688 return func(vf, vpp);
2690 return fsop_root_dispatch(vfs, vpp, false);
2694 vfsnext_statvfs(fsemarg_t *vf, statvfs64_t *sp)
2696 int (*func)(fsemarg_t *, statvfs64_t *);
2697 struct vfs *vfs = vf->fa_vnode.vfsp;
2699 ASSERT(vf != NULL);
2700 vf->fa_fnode--;
2701 func = vfsop_find(vf, fsemop_statvfs);
2703 if (func != NULL)
2704 return func(vf, sp);
2706 return fsop_statfs_dispatch(vfs, sp, false);
2710 vfsnext_sync(fsemarg_t *vf, short flag, cred_t *cr)
2712 int (*func)(fsemarg_t *, short, cred_t *);
2713 struct vfs *vfs = vf->fa_vnode.vfsp;
2715 ASSERT(vf != NULL);
2716 vf->fa_fnode--;
2717 func = vfsop_find(vf, fsemop_sync);
2719 if (func != NULL)
2720 return func(vf, flag, cr);
2722 return fsop_sync_dispatch(vfs, flag, cr, false);
2726 vfsnext_vget(fsemarg_t *vf, vnode_t **vpp, fid_t *fidp)
2728 int (*func)(fsemarg_t *, vnode_t **, fid_t *);
2729 struct vfs *vfs = vf->fa_vnode.vfsp;
2731 ASSERT(vf != NULL);
2732 vf->fa_fnode--;
2733 func = vfsop_find(vf, fsemop_vget);
2735 if (func != NULL)
2736 return func(vf, vpp, fidp);
2738 return fsop_vget_dispatch(vfs, vpp, fidp, false);
2742 vfsnext_mountroot(fsemarg_t *vf, enum whymountroot reason)
2744 int (*func)(fsemarg_t *, enum whymountroot);
2745 struct vfs *vfs = vf->fa_vnode.vfsp;
2747 ASSERT(vf != NULL);
2748 vf->fa_fnode--;
2749 func = vfsop_find(vf, fsemop_mountroot);
2751 if (func != NULL)
2752 return func(vf, reason);
2754 return fsop_mountroot_dispatch(vfs, reason, false);
2757 void
2758 vfsnext_freevfs(fsemarg_t *vf)
2760 void (*func)(fsemarg_t *);
2761 struct vfs *vfs = vf->fa_vnode.vfsp;
2763 ASSERT(vf != NULL);
2764 vf->fa_fnode--;
2765 func = vfsop_find(vf, fsemop_freevfs);
2767 if (func != NULL)
2768 func(vf);
2769 else
2770 fsop_freefs_dispatch(vfs, false);
2774 vfsnext_vnstate(fsemarg_t *vf, vnode_t *vp, vntrans_t nstate)
2776 int (*func)(fsemarg_t *, vnode_t *, vntrans_t);
2777 struct vfs *vfs = vf->fa_vnode.vfsp;
2779 ASSERT(vf != NULL);
2780 vf->fa_fnode--;
2781 func = vfsop_find(vf, fsemop_vnstate);
2783 if (func != NULL)
2784 return func(vf, vp, nstate);
2786 return fsop_vnstate_dispatch(vfs, vp, nstate, false);
2791 * Create a new fem_head and associate with the vnode.
2792 * To keep the unaugmented vnode access path lock free, we spin
2793 * update this - create a new one, then try and install it. If
2794 * we fail to install, release the old one and pretend we succeeded.
2797 static struct fem_head *
2798 new_femhead(struct fem_head **hp)
2800 struct fem_head *head;
2802 head = kmem_alloc(sizeof (*head), KM_SLEEP);
2803 mutex_init(&head->femh_lock, NULL, MUTEX_DEFAULT, NULL);
2804 head->femh_list = NULL;
2805 if (atomic_cas_ptr(hp, NULL, head) != NULL) {
2806 kmem_free(head, sizeof (*head));
2807 head = *hp;
2809 return (head);
2813 * Create a fem_list. The fem_list that gets returned is in a
2814 * very rudimentary state and MUST NOT be used until it's initialized
2815 * (usually by femlist_construct() or fem_dup_list()). The refcount
2816 * and size is set properly and top-of-stack is set to the "guard" node
2817 * just to be consistent.
2819 * If anyone were to accidentally trying to run on this fem_list before
2820 * it's initialized then the system would likely panic trying to defererence
2821 * the (NULL) fn_op pointer.
2824 static struct fem_list *
2825 femlist_create(int numnodes)
2827 struct fem_list *sp;
2829 sp = kmem_alloc(fl_ntob(numnodes), KM_SLEEP);
2830 sp->feml_refc = 1;
2831 sp->feml_ssize = numnodes;
2832 sp->feml_nodes[0] = FEM_GUARD(FEMTYPE_NULL);
2833 sp->feml_tos = 0;
2834 return (sp);
2838 * Construct a new femlist.
2839 * The list is constructed with the appropriate type of guard to
2840 * anchor it, and inserts the original ops.
2843 static struct fem_list *
2844 femlist_construct(int type, int numnodes)
2846 struct fem_list *sp;
2848 sp = femlist_create(numnodes);
2849 sp->feml_nodes[0] = FEM_GUARD(type);
2850 sp->feml_nodes[1].fn_op.anon = NULL;
2851 sp->feml_nodes[1].fn_available = NULL;
2852 sp->feml_nodes[1].fn_av_hold = NULL;
2853 sp->feml_nodes[1].fn_av_rele = NULL;
2854 sp->feml_tos = 1;
2855 return (sp);
2859 * Duplicate a list. Copy the original list to the clone.
2861 * NOTE: The caller must have the fem_head for the lists locked.
2862 * Assuming the appropriate lock is held and the caller has done the
2863 * math right, the clone list should be big enough to old the original.
2866 static void
2867 fem_dup_list(struct fem_list *orig, struct fem_list *clone)
2869 int i;
2871 ASSERT(clone->feml_ssize >= orig->feml_ssize);
2873 bcopy(orig->feml_nodes, clone->feml_nodes,
2874 sizeof (orig->feml_nodes[0]) * orig->feml_ssize);
2875 clone->feml_tos = orig->feml_tos;
2877 * Now that we've copied the old list (orig) to the new list (clone),
2878 * we need to walk the new list and put another hold on fn_available.
2880 for (i = clone->feml_tos; i > 0; i--) {
2881 struct fem_node *fnp = &clone->feml_nodes[i];
2883 if (fnp->fn_av_hold)
2884 (*(fnp->fn_av_hold))(fnp->fn_available);
2889 static int
2890 fem_push_node(
2891 struct fem_head **hp,
2892 int type,
2893 struct fem_node *nnode,
2894 femhow_t how)
2896 struct fem_head *hd;
2897 struct fem_list *list;
2898 int retry;
2899 int error = 0;
2900 int i;
2902 /* Validate the node */
2903 if ((nnode->fn_op.anon == NULL) || (nnode->fn_available == NULL)) {
2904 return (EINVAL);
2907 if ((hd = *hp) == NULL) { /* construct a proto-list */
2908 hd = new_femhead(hp);
2911 * RULE: once a femhead has been pushed onto a object, it cannot be
2912 * removed until the object is destroyed. It can be deactivated by
2913 * placing the original 'object operations' onto the object, which
2914 * will ignore the femhead.
2915 * The loop will exist when the femh_list has space to push a monitor
2916 * onto it.
2918 do {
2919 retry = 1;
2920 list = fem_lock(hd);
2922 if (list != NULL) {
2923 if (list->feml_tos+1 < list->feml_ssize) {
2924 retry = 0;
2925 } else {
2926 struct fem_list *olist = list;
2928 fem_addref(olist);
2929 fem_unlock(hd);
2930 list = femlist_create(olist->feml_ssize * 2);
2931 (void) fem_lock(hd);
2932 if (hd->femh_list == olist) {
2933 if (list->feml_ssize <=
2934 olist->feml_ssize) {
2936 * We have a new list, but it
2937 * is too small to hold the
2938 * original contents plus the
2939 * one to push. Release the
2940 * new list and start over.
2942 fem_release(list);
2943 fem_unlock(hd);
2944 } else {
2946 * Life is good: Our new list
2947 * is big enough to hold the
2948 * original list (olist) + 1.
2950 fem_dup_list(olist, list);
2951 /* orphan this list */
2952 hd->femh_list = list;
2953 (void) fem_delref(olist);
2954 retry = 0;
2956 } else {
2957 /* concurrent update, retry */
2958 fem_release(list);
2959 fem_unlock(hd);
2961 /* remove the reference we added above */
2962 fem_release(olist);
2964 } else {
2965 fem_unlock(hd);
2966 list = femlist_construct(type, NNODES_DEFAULT);
2967 (void) fem_lock(hd);
2968 if (hd->femh_list != NULL) {
2969 /* concurrent update, retry */
2970 fem_release(list);
2971 fem_unlock(hd);
2972 } else {
2973 hd->femh_list = list;
2974 retry = 0;
2977 } while (retry);
2979 ASSERT(mutex_owner(&hd->femh_lock) == curthread);
2980 ASSERT(list->feml_tos+1 < list->feml_ssize);
2983 * The presence of "how" will modify the behavior of how/if
2984 * nodes are pushed. If it's FORCE, then we can skip
2985 * all the checks and push it on.
2987 if (how != FORCE) {
2988 /* Start at the top and work our way down */
2989 for (i = list->feml_tos; i > 0; i--) {
2990 void *fn_av = list->feml_nodes[i].fn_available;
2991 void *fn_op = list->feml_nodes[i].fn_op.anon;
2994 * OPARGUNIQ means that this node should not
2995 * be pushed on if a node with the same op/avail
2996 * combination exists. This situation returns
2997 * EBUSY.
2999 * OPUNIQ means that this node should not be
3000 * pushed on if a node with the same op exists.
3001 * This situation also returns EBUSY.
3003 switch (how) {
3005 case OPUNIQ:
3006 if (fn_op == nnode->fn_op.anon) {
3007 error = EBUSY;
3009 break;
3011 case OPARGUNIQ:
3012 if ((fn_op == nnode->fn_op.anon) &&
3013 (fn_av == nnode->fn_available)) {
3014 error = EBUSY;
3016 break;
3018 default:
3019 error = EINVAL; /* Unexpected value */
3020 break;
3023 if (error)
3024 break;
3028 if (error == 0) {
3030 * If no errors, slap the node on the list.
3031 * Note: The following is a structure copy.
3033 list->feml_nodes[++(list->feml_tos)] = *nnode;
3036 fem_unlock(hd);
3037 return (error);
3041 * Remove a node by copying the list above it down a notch.
3042 * If the list is busy, replace it with an idle one and work
3043 * upon it.
3044 * A node matches if the opset matches and the datap matches or is
3045 * null.
3048 static int
3049 remove_node(struct fem_list *sp, void *opset, void *datap)
3051 int i;
3052 struct fem_node *fn;
3054 for (i = sp->feml_tos; i > 0; i--) {
3055 fn = sp->feml_nodes+i;
3056 if (fn->fn_op.anon == opset &&
3057 (fn->fn_available == datap || datap == NULL)) {
3058 break;
3061 if (i == 0) {
3062 return (EINVAL);
3066 * At this point we have a node in-hand (*fn) that we are about
3067 * to remove by overwriting it and adjusting the stack. This is
3068 * our last chance to do anything with this node so we do the
3069 * release on the arg.
3071 if (fn->fn_av_rele)
3072 (*(fn->fn_av_rele))(fn->fn_available);
3074 while (i++ < sp->feml_tos) {
3075 sp->feml_nodes[i-1] = sp->feml_nodes[i];
3077 return (0);
3080 static int
3081 fem_remove_node(struct fem_head *fh, void *opset, void *datap)
3083 struct fem_list *sp;
3084 int error = 0;
3085 int retry;
3087 if (fh == NULL) {
3088 return (EINVAL);
3091 do {
3092 retry = 0;
3093 if ((sp = fem_lock(fh)) == NULL) {
3094 fem_unlock(fh);
3095 error = EINVAL;
3096 } else if (sp->feml_refc == 1) {
3097 error = remove_node(sp, opset, datap);
3098 if (sp->feml_tos == 1) {
3100 * The top-of-stack was decremented by
3101 * remove_node(). If it got down to 1,
3102 * then the base ops were replaced and we
3103 * call fem_release() which will free the
3104 * fem_list.
3106 fem_release(sp);
3107 fh->femh_list = NULL;
3108 /* XXX - Do we need a membar_producer() call? */
3110 fem_unlock(fh);
3111 } else {
3112 /* busy - install a new one without this monitor */
3113 struct fem_list *nsp; /* New fem_list being cloned */
3115 fem_addref(sp);
3116 fem_unlock(fh);
3117 nsp = femlist_create(sp->feml_ssize);
3118 if (fem_lock(fh) == sp) {
3120 * We popped out of the lock, created a
3121 * list, then relocked. If we're in here
3122 * then the fem_head points to the same list
3123 * it started with.
3125 fem_dup_list(sp, nsp);
3126 error = remove_node(nsp, opset, datap);
3127 if (error != 0) {
3128 fem_release(nsp);
3129 } else if (nsp->feml_tos == 1) {
3130 /* New list now empty, tear it down */
3131 fem_release(nsp);
3132 fh->femh_list = NULL;
3133 } else {
3134 fh->femh_list = nsp;
3136 (void) fem_delref(sp);
3137 } else {
3138 /* List changed while locked, try again... */
3139 fem_release(nsp);
3140 retry = 1;
3143 * If error is set, then we tried to remove a node
3144 * from the list, but failed. This means that we
3145 * will still be using this list so don't release it.
3147 if (error == 0)
3148 fem_release(sp);
3149 fem_unlock(fh);
3151 } while (retry);
3152 return (error);
3157 * perform operation on each element until one returns non zero
3159 static int
3160 fem_walk_list(
3161 struct fem_list *sp,
3162 int (*f)(struct fem_node *, void *, void *),
3163 void *mon,
3164 void *arg)
3166 int i;
3168 ASSERT(sp != NULL);
3169 for (i = sp->feml_tos; i > 0; i--) {
3170 if ((*f)(sp->feml_nodes+i, mon, arg) != 0) {
3171 break;
3174 return (i);
3178 * companion comparison functions.
3180 static int
3181 fem_compare_mon(struct fem_node *n, void *mon, void *arg)
3183 return ((n->fn_op.anon == mon) && (n->fn_available == arg));
3187 * VNODE interposition.
3191 fem_install(
3192 vnode_t *vp, /* Vnode on which monitor is being installed */
3193 fem_t *mon, /* Monitor operations being installed */
3194 void *arg, /* Opaque data used by monitor */
3195 femhow_t how, /* Installation control */
3196 void (*arg_hold)(void *), /* Hold routine for "arg" */
3197 void (*arg_rele)(void *)) /* Release routine for "arg" */
3199 int error;
3200 struct fem_node nnode;
3202 nnode.fn_available = arg;
3203 nnode.fn_op.fem = mon;
3204 nnode.fn_av_hold = arg_hold;
3205 nnode.fn_av_rele = arg_rele;
3207 * If we have a non-NULL hold function, do the hold right away.
3208 * The release is done in remove_node().
3210 if (arg_hold)
3211 (*arg_hold)(arg);
3213 error = fem_push_node(&vp->v_femhead, FEMTYPE_VNODE, &nnode, how);
3215 /* If there was an error then the monitor wasn't pushed */
3216 if (error && arg_rele)
3217 (*arg_rele)(arg);
3219 return (error);
3223 fem_is_installed(vnode_t *v, fem_t *mon, void *arg)
3225 int e;
3226 struct fem_list *fl;
3228 fl = fem_get(v->v_femhead);
3229 if (fl != NULL) {
3230 e = fem_walk_list(fl, fem_compare_mon, mon, arg);
3231 fem_release(fl);
3232 return (e);
3234 return (0);
3238 fem_uninstall(vnode_t *v, fem_t *mon, void *arg)
3240 int e;
3241 e = fem_remove_node(v->v_femhead, mon, arg);
3242 return (e);
3246 * VFS interposition
3248 * These need to be re-written, but there should be more common bits.
3252 fsem_is_installed(struct vfs *v, fsem_t *mon, void *arg)
3254 struct fem_list *fl;
3256 if (v->vfs_implp == NULL)
3257 return (0);
3259 fl = fem_get(v->vfs_femhead);
3260 if (fl != NULL) {
3261 int e;
3262 e = fem_walk_list(fl, fem_compare_mon, mon, arg);
3263 fem_release(fl);
3264 return (e);
3266 return (0);
3270 fsem_install(
3271 struct vfs *vfsp, /* VFS on which monitor is being installed */
3272 fsem_t *mon, /* Monitor operations being installed */
3273 void *arg, /* Opaque data used by monitor */
3274 femhow_t how, /* Installation control */
3275 void (*arg_hold)(void *), /* Hold routine for "arg" */
3276 void (*arg_rele)(void *)) /* Release routine for "arg" */
3278 int error;
3279 struct fem_node nnode;
3281 /* If this vfs hasn't been properly initialized, fail the install */
3282 if (vfsp->vfs_implp == NULL)
3283 return (EINVAL);
3285 nnode.fn_available = arg;
3286 nnode.fn_op.fsem = mon;
3287 nnode.fn_av_hold = arg_hold;
3288 nnode.fn_av_rele = arg_rele;
3290 * If we have a non-NULL hold function, do the hold right away.
3291 * The release is done in remove_node().
3293 if (arg_hold)
3294 (*arg_hold)(arg);
3296 error = fem_push_node(&vfsp->vfs_femhead, FEMTYPE_VFS, &nnode, how);
3298 /* If there was an error then the monitor wasn't pushed */
3299 if (error && arg_rele)
3300 (*arg_rele)(arg);
3302 return (error);
3306 fsem_uninstall(struct vfs *v, fsem_t *mon, void *arg)
3308 int e;
3310 if (v->vfs_implp == NULL)
3311 return (EINVAL);
3313 e = fem_remove_node(v->vfs_femhead, mon, arg);
3314 return (e);
3318 * Setup FEM.
3320 void
3321 fem_init()
3323 struct fem_type_info *fi;
3326 * This femtype is only used for fem_list creation so we only
3327 * need the "guard" to be initialized so that feml_tos has
3328 * some rudimentary meaning. A fem_list must not be used until
3329 * it has been initialized (either via femlist_construct() or
3330 * fem_dup_list()). Anything that tries to use this fem_list
3331 * before it's actually initialized would panic the system as
3332 * soon as "fn_op" (NULL) is dereferenced.
3334 fi = &femtype[FEMTYPE_NULL];
3335 fi->errf = fem_err;
3336 fi->guard.fn_available = &fi->guard;
3337 fi->guard.fn_av_hold = NULL;
3338 fi->guard.fn_av_rele = NULL;
3339 fi->guard.fn_op.anon = NULL;
3341 fi = &femtype[FEMTYPE_VNODE];
3342 fi->errf = fem_err;
3343 fi->head.fn_available = NULL;
3344 fi->head.fn_av_hold = NULL;
3345 fi->head.fn_av_rele = NULL;
3346 fi->head.fn_op.fem = NULL;
3347 fi->guard.fn_available = &fi->guard;
3348 fi->guard.fn_av_hold = NULL;
3349 fi->guard.fn_av_rele = NULL;
3350 fi->guard.fn_op.fem = &fem_guard_ops;
3352 fi = &femtype[FEMTYPE_VFS];
3353 fi->errf = fsem_err;
3354 fi->head.fn_available = NULL;
3355 fi->head.fn_av_hold = NULL;
3356 fi->head.fn_av_rele = NULL;
3357 fi->head.fn_op.fsem = NULL;
3358 fi->guard.fn_available = &fi->guard;
3359 fi->guard.fn_av_hold = NULL;
3360 fi->guard.fn_av_rele = NULL;
3361 fi->guard.fn_op.fsem = &fsem_guard_ops;
3366 fem_err()
3368 cmn_err(CE_PANIC, "fem/vnode operations corrupt");
3369 return (0);
3373 fsem_err()
3375 cmn_err(CE_PANIC, "fem/vfs operations corrupt");
3376 return (0);