4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 * Copyright 2017 Joyent, Inc.
30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
34 * University Copyright- Copyright (c) 1982, 1986, 1988
35 * The Regents of the University of California
38 * University Acknowledgment- Portions of this document are derived from
39 * software developed by the University of California, Berkeley, and its
44 #include <sys/types.h>
45 #include <sys/t_lock.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
52 #include <sys/sysmacros.h>
54 #include <sys/vnode.h>
55 #include <sys/fs/snode.h>
56 #include <sys/fs/fifonode.h>
57 #include <sys/debug.h>
58 #include <sys/errno.h>
63 #include <sys/termios.h>
64 #include <sys/stream.h>
65 #include <sys/strsubr.h>
66 #include <sys/autoconf.h>
67 #include <sys/esunddi.h>
68 #include <sys/flock.h>
69 #include <sys/modctl.h>
73 struct kmem_cache
*snode_cache
;
76 static struct snode
*sfind(dev_t
, vtype_t
, struct vnode
*);
77 static struct vnode
*get_cvp(dev_t
, vtype_t
, struct snode
*, int *);
78 static void sinsert(struct snode
*);
90 ASSERT(realvp
&& dip
);
91 vp
= specvp(realvp
, dev
, vtyp
, cr
);
94 /* associate a dip hold with the common snode's s_dip pointer */
95 spec_assoc_vp_with_devi(vp
, dip
);
100 * Return a shadow special vnode for the given dev.
101 * If no snode exists for this dev create one and put it
102 * in a table hashed by <dev, realvp>. If the snode for
103 * this dev is already in the table return it (ref count is
104 * incremented by sfind). The snode will be flushed from the
105 * table when spec_inactive calls sdelete.
107 * The fsid is inherited from the real vnode so that clones
124 int used_csp
= 0; /* Did we use pre-allocated csp */
128 if (vp
->v_type
== VFIFO
)
129 return (fifovp(vp
, cr
));
131 ASSERT(vp
->v_type
== type
);
132 ASSERT(vp
->v_rdev
== dev
);
135 * Pre-allocate snodes before holding any locks in case we block
137 nsp
= kmem_cache_alloc(snode_cache
, KM_SLEEP
);
138 csp
= kmem_cache_alloc(snode_cache
, KM_SLEEP
);
141 * Get the time attributes outside of the stable lock since
142 * this operation may block. Unfortunately, it may not have
143 * been required if the snode is in the cache.
145 va
.va_mask
= AT_FSID
| AT_TIMES
;
146 rc
= fop_getattr(vp
, &va
, 0, cr
, NULL
); /* XXX may block! */
148 mutex_enter(&stable_lock
);
149 if ((sp
= sfind(dev
, type
, vp
)) == NULL
) {
152 sp
= nsp
; /* Use pre-allocated snode */
157 sp
->s_commonvp
= NULL
;
167 * Set times in snode to those in the vnode.
169 sp
->s_fsid
= va
.va_fsid
;
170 sp
->s_atime
= va
.va_atime
.tv_sec
;
171 sp
->s_mtime
= va
.va_mtime
.tv_sec
;
172 sp
->s_ctime
= va
.va_ctime
.tv_sec
;
174 sp
->s_fsid
= specdev
;
183 svp
->v_flag
= (vp
->v_flag
& VROOT
);
184 svp
->v_vfsp
= vp
->v_vfsp
;
185 VFS_HOLD(svp
->v_vfsp
);
188 (void) vn_copypath(vp
, svp
);
189 if (type
== VBLK
|| type
== VCHR
) {
190 cvp
= get_cvp(dev
, type
, csp
, &used_csp
);
191 svp
->v_stream
= cvp
->v_stream
;
193 sp
->s_commonvp
= cvp
;
197 mutex_exit(&stable_lock
);
199 /* Didn't use pre-allocated snode so free it */
200 kmem_cache_free(snode_cache
, csp
);
203 mutex_exit(&stable_lock
);
204 /* free unused snode memory */
205 kmem_cache_free(snode_cache
, nsp
);
206 kmem_cache_free(snode_cache
, csp
);
212 * Return a special vnode for the given dev; no vnode is supplied
213 * for it to shadow. Always create a new snode and put it in the
214 * table hashed by <dev, NULL>. The snode will be flushed from the
215 * table when spec_inactive() calls sdelete(). The association of
216 * this node with a attached instance of hardware is not made until
219 * N.B. Assumes caller takes on responsibility of making sure no one
220 * else is creating a snode for (dev, type) at this time.
223 makespecvp(dev_t dev
, vtype_t type
)
226 struct vnode
*svp
, *cvp
;
229 sp
= kmem_cache_alloc(snode_cache
, KM_SLEEP
);
231 cvp
= commonvp(dev
, type
);
232 now
= gethrestime_sec();
235 sp
->s_commonvp
= cvp
;
243 sp
->s_fsid
= specdev
;
251 svp
->v_vfsp
= &spec_vfs
;
252 svp
->v_stream
= cvp
->v_stream
;
257 mutex_enter(&stable_lock
);
259 mutex_exit(&stable_lock
);
266 * This function is called from spec_assoc_vp_with_devi(). That function
267 * associates a "new" dip with a common snode, releasing (any) old dip
268 * in the process. This function (spec_assoc_fence()) looks at the "new dip"
269 * and determines whether the snode should be fenced of or not. As the table
270 * below indicates, the value of old-dip is a don't care for all cases.
272 * old-dip new-dip common-snode
273 * =========================================
274 * Don't care NULL unfence
275 * Don't care retired fence
276 * Don't care not-retired unfence
278 * Since old-dip value is a "don't care", it is not passed into this function.
281 spec_assoc_fence(dev_info_t
*ndip
, vnode_t
*vp
)
287 ASSERT(vn_matchops(vp
, spec_getvnodeops()));
291 mutex_enter(&DEVI(ndip
)->devi_lock
);
292 if (DEVI(ndip
)->devi_flags
& DEVI_RETIRED
)
294 mutex_exit(&DEVI(ndip
)->devi_lock
);
300 /* SFENCED flag only set on common snode */
301 mutex_enter(&csp
->s_lock
);
303 csp
->s_flag
|= SFENCED
;
305 csp
->s_flag
&= ~SFENCED
;
306 mutex_exit(&csp
->s_lock
);
308 FENDBG((CE_NOTE
, "%sfenced common snode (%p) for new dip=%p",
309 fence
? "" : "un", (void *)csp
, (void *)ndip
));
313 * Associate the common snode with a devinfo node. This is called from:
315 * 1) specvp_devfs to associate a specfs node with the dip attached
318 * 2) spec_open after path reconstruction and attach.
320 * 3) From dacf processing to associate a makespecvp node with
321 * the dip that dacf postattach processing is being performed on.
322 * This association is made prior to open to avoid recursion issues.
324 * 4) From ddi_assoc_queue_with_devi to change vnode association as part of
325 * DL_ATTACH/DL_DETACH processing (SDIPSET already set). The call
326 * from ddi_assoc_queue_with_devi may specify a NULL dip.
328 * We put an extra hold on the devinfo node passed in as we establish it as
329 * the new s_dip pointer. Any hold associated with the prior s_dip pointer
330 * is released. The new hold will stay active until another call to
331 * spec_assoc_vp_with_devi or until the common snode is destroyed by
332 * spec_inactive after the last VN_RELE of the common node. This devinfo hold
333 * transfers across a clone open except in the clone_dev case, where the clone
334 * driver is no longer required after open.
336 * When SDIPSET is set and s_dip is NULL, the vnode has an association with
337 * the driver even though there is currently no association with a specific
341 spec_assoc_vp_with_devi(struct vnode
*vp
, dev_info_t
*dip
)
349 * Don't establish a NULL association for a vnode associated with the
350 * clone driver. The qassociate(, -1) call from a streams driver's
351 * open implementation to indicate support for qassociate has the
352 * side-effect of this type of spec_assoc_vp_with_devi call. This
353 * call should not change the the association of the pre-clone
354 * vnode associated with the clone driver, the post-clone newdev
355 * association will be established later by spec_clone().
357 if ((dip
== NULL
) && (getmajor(vp
->v_rdev
) == clone_major
))
362 e_ddi_hold_devi(dip
);
364 csp
= VTOS(VTOS(vp
)->s_commonvp
);
365 mutex_enter(&csp
->s_lock
);
368 csp
->s_flag
|= SDIPSET
;
370 /* If association changes then invalidate cached size */
372 csp
->s_flag
&= ~SSIZEVALID
;
373 mutex_exit(&csp
->s_lock
);
375 spec_assoc_fence(dip
, vp
);
377 /* release the old */
379 ddi_release_devi(olddip
);
383 * Return the held dip associated with the specified snode.
386 spec_hold_devi_by_vp(struct vnode
*vp
)
391 ASSERT(vn_matchops(vp
, spec_getvnodeops()));
393 csp
= VTOS(VTOS(vp
)->s_commonvp
);
396 e_ddi_hold_devi(dip
);
401 * Find a special vnode that refers to the given device
402 * of the given type. Never return a "common" vnode.
403 * Return NULL if a special vnode does not exist.
404 * HOLD the vnode before returning it.
407 specfind(dev_t dev
, vtype_t type
)
412 mutex_enter(&stable_lock
);
413 st
= stable
[STABLEHASH(dev
)];
415 if (st
->s_dev
== dev
) {
417 if (nvp
->v_type
== type
&& st
->s_commonvp
!= nvp
) {
419 /* validate vnode is visible in the zone */
420 if (nvp
->v_path
!= NULL
&&
421 ZONE_PATH_VISIBLE(nvp
->v_path
, curzone
)) {
422 mutex_exit(&stable_lock
);
430 mutex_exit(&stable_lock
);
435 * Loop through the snode cache looking for snodes referencing dip.
437 * This function determines if a devinfo node is "BUSY" from the perspective
438 * of having an active vnode associated with the device, which represents a
439 * dependency on the device's services. This function is needed because a
440 * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
441 * for instance, the framework is manipulating the node (has an open
445 * DEVI_REFERENCED - if dip is referenced
446 * DEVI_NOT_REFERENCED - if dip is not referenced
449 devi_stillreferenced(dev_info_t
*dip
)
454 /* if no hold then there can't be an snode with s_dip == dip */
455 if (e_ddi_devi_holdcnt(dip
) == 0)
456 return (DEVI_NOT_REFERENCED
);
458 mutex_enter(&stable_lock
);
459 for (i
= 0; i
< STABLESIZE
; i
++) {
460 for (sp
= stable
[i
]; sp
!= NULL
; sp
= sp
->s_next
) {
461 if (sp
->s_dip
== dip
) {
462 mutex_exit(&stable_lock
);
463 return (DEVI_REFERENCED
);
467 mutex_exit(&stable_lock
);
468 return (DEVI_NOT_REFERENCED
);
472 * Given an snode, returns the open count and the dip
473 * associated with that snode
474 * Assumes the caller holds the appropriate locks
475 * to prevent snode and/or dip from going away.
477 * -1 No associated dip
478 * >= 0 Number of opens.
481 spec_devi_open_count(struct snode
*sp
, dev_info_t
**dipp
)
495 * We are only interested in common snodes. Only common snodes
496 * get their s_count fields bumped up on opens.
498 if (sp
->s_commonvp
!= vp
|| (dip
= sp
->s_dip
) == NULL
)
501 mutex_enter(&sp
->s_lock
);
502 count
= sp
->s_count
+ sp
->s_mapcnt
;
503 if (sp
->s_flag
& SLOCKED
)
505 mutex_exit(&sp
->s_lock
);
513 * Given a device vnode, return the common
514 * vnode associated with it.
517 common_specvp(struct vnode
*vp
)
521 if ((vp
->v_type
!= VBLK
) && (vp
->v_type
!= VCHR
) ||
522 !vn_matchops(vp
, spec_getvnodeops()))
525 return (sp
->s_commonvp
);
529 * Returns a special vnode for the given dev. The vnode is the
530 * one which is "common" to all the snodes which represent the
532 * Similar to commonvp() but doesn't acquire the stable_lock, and
533 * may use a pre-allocated snode provided by caller.
535 static struct vnode
*
539 struct snode
*nsp
, /* pre-allocated snode */
540 int *used_nsp
) /* flag indicating if we use nsp */
545 ASSERT(MUTEX_HELD(&stable_lock
));
546 if ((sp
= sfind(dev
, type
, NULL
)) == NULL
) {
547 sp
= nsp
; /* Use pre-allocated snode */
548 *used_nsp
= 1; /* return value */
552 sp
->s_commonvp
= svp
; /* points to itself */
558 sp
->s_size
= UNKNOWN_SIZE
;
560 sp
->s_fsid
= specdev
;
568 svp
->v_vfsp
= &spec_vfs
;
579 * Returns a special vnode for the given dev. The vnode is the
580 * one which is "common" to all the snodes which represent the
581 * same device. For use ONLY by SPECFS.
584 commonvp(dev_t dev
, vtype_t type
)
586 struct snode
*sp
, *nsp
;
589 /* Pre-allocate snode in case we might block */
590 nsp
= kmem_cache_alloc(snode_cache
, KM_SLEEP
);
592 mutex_enter(&stable_lock
);
593 if ((sp
= sfind(dev
, type
, NULL
)) == NULL
) {
594 sp
= nsp
; /* Use pre-alloced snode */
598 sp
->s_commonvp
= svp
; /* points to itself */
604 sp
->s_size
= UNKNOWN_SIZE
;
606 sp
->s_fsid
= specdev
;
614 svp
->v_vfsp
= &spec_vfs
;
619 mutex_exit(&stable_lock
);
621 mutex_exit(&stable_lock
);
622 /* Didn't need the pre-allocated snode */
623 kmem_cache_free(snode_cache
, nsp
);
629 * Snode lookup stuff.
630 * These routines maintain a table of snodes hashed by dev so
631 * that the snode for an dev can be found if it already exists.
633 struct snode
*stable
[STABLESIZE
];
634 int stablesz
= STABLESIZE
;
635 kmutex_t stable_lock
;
638 * Put a snode in the table.
641 sinsert(struct snode
*sp
)
643 ASSERT(MUTEX_HELD(&stable_lock
));
644 sp
->s_next
= stable
[STABLEHASH(sp
->s_dev
)];
645 stable
[STABLEHASH(sp
->s_dev
)] = sp
;
649 * Remove an snode from the hash table.
650 * The realvp is not released here because spec_inactive() still
651 * needs it to do a spec_fsync().
654 sdelete(struct snode
*sp
)
657 struct snode
*stprev
= NULL
;
659 ASSERT(MUTEX_HELD(&stable_lock
));
660 st
= stable
[STABLEHASH(sp
->s_dev
)];
664 stable
[STABLEHASH(sp
->s_dev
)] = st
->s_next
;
666 stprev
->s_next
= st
->s_next
;
675 * Lookup an snode by <dev, type, vp>.
676 * ONLY looks for snodes with non-NULL s_realvp members and
677 * common snodes (with s_commonvp pointing to its vnode).
679 * If vp is NULL, only return commonvp. Otherwise return
680 * shadow vp with both shadow and common vp's VN_HELD.
682 static struct snode
*
691 ASSERT(MUTEX_HELD(&stable_lock
));
692 st
= stable
[STABLEHASH(dev
)];
695 if (st
->s_dev
== dev
&& svp
->v_type
== type
&&
696 VN_CMP(st
->s_realvp
, vp
) &&
697 (vp
!= NULL
|| st
->s_commonvp
== svp
) &&
698 (vp
== NULL
|| st
->s_realvp
->v_vfsp
== vp
->v_vfsp
)) {
708 * Mark the accessed, updated, or changed times in an snode
709 * with the current time.
712 smark(struct snode
*sp
, int flag
)
714 time_t now
= gethrestime_sec();
716 /* check for change to avoid unnecessary locking */
717 ASSERT((flag
& ~(SACC
|SUPD
|SCHG
)) == 0);
718 if (((flag
& sp
->s_flag
) != flag
) ||
719 ((flag
& SACC
) && (sp
->s_atime
!= now
)) ||
720 ((flag
& SUPD
) && (sp
->s_mtime
!= now
)) ||
721 ((flag
& SCHG
) && (sp
->s_ctime
!= now
))) {
722 /* lock and update */
723 mutex_enter(&sp
->s_lock
);
731 mutex_exit(&sp
->s_lock
);
736 * Return the maximum file offset permitted for this device.
737 * -1 means unrestricted. SLOFFSET is associated with D_64BIT.
739 * On a 32-bit kernel this will limit:
740 * o D_64BIT devices to SPEC_MAXOFFSET_T.
741 * o non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
744 spec_maxoffset(struct vnode
*vp
)
746 struct snode
*sp
= VTOS(vp
);
747 struct snode
*csp
= VTOS(sp
->s_commonvp
);
750 return ((offset_t
)-1);
751 else if (csp
->s_flag
& SANYOFFSET
) /* D_U64BIT */
752 return ((offset_t
)-1);
754 if (csp
->s_flag
& SLOFFSET
) /* D_64BIT */
755 return (SPEC_MAXOFFSET_T
);
762 snode_constructor(void *buf
, void *cdrarg
, int kmflags
)
764 struct snode
*sp
= buf
;
767 vp
= sp
->s_vnode
= vn_alloc(kmflags
);
771 vn_setops(vp
, spec_getvnodeops());
774 mutex_init(&sp
->s_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
775 cv_init(&sp
->s_cv
, NULL
, CV_DEFAULT
, NULL
);
781 snode_destructor(void *buf
, void *cdrarg
)
783 struct snode
*sp
= buf
;
784 struct vnode
*vp
= STOV(sp
);
786 mutex_destroy(&sp
->s_lock
);
787 cv_destroy(&sp
->s_cv
);
792 static const struct vfsops spec_vfsops
= {
793 .vfs_sync
= spec_sync
,
797 specinit(int fstype
, char *name
)
803 * Associate vfs and vnode operations.
805 error
= vfs_setfsops(fstype
, &spec_vfsops
);
807 cmn_err(CE_WARN
, "specinit: bad fstype");
811 mutex_init(&stable_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
812 mutex_init(&spec_syncbusy
, NULL
, MUTEX_DEFAULT
, NULL
);
817 snode_cache
= kmem_cache_create("snode_cache", sizeof (struct snode
),
818 0, snode_constructor
, snode_destructor
, NULL
, NULL
, NULL
, 0);
821 * Associate vfs operations with spec_vfs
823 VFS_INIT(&spec_vfs
, &spec_vfsops
, NULL
);
824 if ((dev
= getudev()) == -1)
826 specdev
= makedevice(dev
, 0);
831 device_close(struct vnode
*vp
, int flag
, struct cred
*cr
)
833 struct snode
*sp
= VTOS(vp
);
834 enum vtype type
= vp
->v_type
;
840 cvp
= sp
->s_commonvp
;
846 if (cvp
->v_stream
!= NULL
)
847 error
= strclose(cvp
, flag
, cr
);
850 error
= dev_close(dev
, flag
, OTYP_CHR
, cr
);
855 * On last close a block device we must
856 * invalidate any in-core blocks so that we
857 * can, for example, change floppy disks.
859 (void) spec_putpage(cvp
, 0,
860 (size_t)0, B_INVAL
|B_FORCE
, cr
, NULL
);
863 error
= dev_close(dev
, flag
, OTYP_BLK
, cr
);
866 panic("device_close: not a device");
874 makectty(vnode_t
*ovp
)
878 if (vp
= makespecvp(ovp
->v_rdev
, VCHR
)) {
884 cvp
= sp
->s_commonvp
;
886 mutex_enter(&csp
->s_lock
);
888 mutex_exit(&csp
->s_lock
);
895 spec_snode_walk(int (*callback
)(struct snode
*sp
, void *arg
), void *arg
)
902 mutex_enter(&stable_lock
);
903 for (i
= 0; i
< STABLESIZE
; i
++) {
904 for (sp
= stable
[i
]; sp
; sp
= sp
->s_next
) {
905 if (callback(sp
, arg
) != DDI_WALK_CONTINUE
)
910 mutex_exit(&stable_lock
);
914 spec_is_clone(vnode_t
*vp
)
918 if (vn_matchops(vp
, spec_getvnodeops())) {
920 return ((sp
->s_flag
& SCLONE
) ? 1 : 0);
927 spec_is_selfclone(vnode_t
*vp
)
931 if (vn_matchops(vp
, spec_getvnodeops())) {
933 return ((sp
->s_flag
& SSELFCLONE
) ? 1 : 0);
940 * We may be invoked with a NULL vp in which case we fence off
941 * all snodes associated with dip
944 spec_fence_snode(dev_info_t
*dip
, struct vnode
*vp
)
956 mutex_enter(&DEVI(dip
)->devi_lock
);
957 if (DEVI(dip
)->devi_flags
& DEVI_RETIRED
)
959 mutex_exit(&DEVI(dip
)->devi_lock
);
964 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
965 (void) ddi_pathname(dip
, path
);
969 ASSERT(vn_matchops(vp
, spec_getvnodeops()));
972 mutex_enter(&csp
->s_lock
);
973 csp
->s_flag
|= SFENCED
;
974 mutex_exit(&csp
->s_lock
);
975 FENDBG((CE_NOTE
, "fenced off snode(%p) for dip: %s",
977 kmem_free(path
, MAXPATHLEN
);
982 mutex_enter(&stable_lock
);
983 for (i
= 0; i
< STABLESIZE
; i
++) {
984 for (sp
= stable
[i
]; sp
!= NULL
; sp
= sp
->s_next
) {
985 ASSERT(sp
->s_commonvp
);
986 csp
= VTOS(sp
->s_commonvp
);
987 if (csp
->s_dip
== dip
) {
988 /* fence off the common snode */
989 mutex_enter(&csp
->s_lock
);
990 csp
->s_flag
|= SFENCED
;
991 mutex_exit(&csp
->s_lock
);
993 FENDBG((CE_NOTE
, "fenced 1 of N"));
999 mutex_exit(&stable_lock
);
1001 FENDBG((CE_NOTE
, "fenced off all snodes for dip: %s", path
));
1002 kmem_free(path
, MAXPATHLEN
);
1009 spec_unfence_snode(dev_info_t
*dip
)
1019 path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
1020 (void) ddi_pathname(dip
, path
);
1023 mutex_enter(&stable_lock
);
1024 for (i
= 0; i
< STABLESIZE
; i
++) {
1025 for (sp
= stable
[i
]; sp
!= NULL
; sp
= sp
->s_next
) {
1026 ASSERT(sp
->s_commonvp
);
1027 csp
= VTOS(sp
->s_commonvp
);
1029 if (csp
->s_dip
== dip
) {
1030 /* unfence the common snode */
1031 mutex_enter(&csp
->s_lock
);
1032 csp
->s_flag
&= ~SFENCED
;
1033 mutex_exit(&csp
->s_lock
);
1035 FENDBG((CE_NOTE
, "unfenced 1 of N"));
1041 mutex_exit(&stable_lock
);
1043 FENDBG((CE_NOTE
, "unfenced all snodes for dip: %s", path
));
1044 kmem_free(path
, MAXPATHLEN
);
1050 spec_size_invalidate(dev_t dev
, vtype_t type
)
1055 mutex_enter(&stable_lock
);
1056 if ((csp
= sfind(dev
, type
, NULL
)) != NULL
) {
1057 mutex_enter(&csp
->s_lock
);
1058 csp
->s_flag
&= ~SSIZEVALID
;
1059 VN_RELE_ASYNC(STOV(csp
), system_taskq
);
1060 mutex_exit(&csp
->s_lock
);
1062 mutex_exit(&stable_lock
);