Merge commit 'b4bf0cf0458759c67920a031021a9d96cd683cfe'
[unleashed.git] / kernel / fs / specfs / specsubr.c
blob0358622216011588b6d67abd8c31347bfcb6a79a
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 * Copyright 2017 Joyent, Inc.
30 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
34 * University Copyright- Copyright (c) 1982, 1986, 1988
35 * The Regents of the University of California
36 * All Rights Reserved
38 * University Acknowledgment- Portions of this document are derived from
39 * software developed by the University of California, Berkeley, and its
40 * contributors.
44 #include <sys/types.h>
45 #include <sys/t_lock.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/buf.h>
49 #include <sys/conf.h>
50 #include <sys/cred.h>
51 #include <sys/kmem.h>
52 #include <sys/sysmacros.h>
53 #include <sys/vfs.h>
54 #include <sys/vnode.h>
55 #include <sys/fs/snode.h>
56 #include <sys/fs/fifonode.h>
57 #include <sys/debug.h>
58 #include <sys/errno.h>
59 #include <sys/time.h>
60 #include <sys/file.h>
61 #include <sys/open.h>
62 #include <sys/user.h>
63 #include <sys/termios.h>
64 #include <sys/stream.h>
65 #include <sys/strsubr.h>
66 #include <sys/autoconf.h>
67 #include <sys/esunddi.h>
68 #include <sys/flock.h>
69 #include <sys/modctl.h>
71 struct vfs spec_vfs;
72 static dev_t specdev;
73 struct kmem_cache *snode_cache;
74 int spec_debug = 0;
76 static struct snode *sfind(dev_t, vtype_t, struct vnode *);
77 static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
78 static void sinsert(struct snode *);
80 struct vnode *
81 specvp_devfs(
82 struct vnode *realvp,
83 dev_t dev,
84 vtype_t vtyp,
85 struct cred *cr,
86 dev_info_t *dip)
88 struct vnode *vp;
90 ASSERT(realvp && dip);
91 vp = specvp(realvp, dev, vtyp, cr);
92 ASSERT(vp);
94 /* associate a dip hold with the common snode's s_dip pointer */
95 spec_assoc_vp_with_devi(vp, dip);
96 return (vp);
100 * Return a shadow special vnode for the given dev.
101 * If no snode exists for this dev create one and put it
102 * in a table hashed by <dev, realvp>. If the snode for
103 * this dev is already in the table return it (ref count is
104 * incremented by sfind). The snode will be flushed from the
105 * table when spec_inactive calls sdelete.
107 * The fsid is inherited from the real vnode so that clones
108 * can be found.
111 struct vnode *
112 specvp(
113 struct vnode *vp,
114 dev_t dev,
115 vtype_t type,
116 struct cred *cr)
118 struct snode *sp;
119 struct snode *nsp;
120 struct snode *csp;
121 struct vnode *svp;
122 struct vattr va;
123 int rc;
124 int used_csp = 0; /* Did we use pre-allocated csp */
126 if (vp == NULL)
127 return (NULL);
128 if (vp->v_type == VFIFO)
129 return (fifovp(vp, cr));
131 ASSERT(vp->v_type == type);
132 ASSERT(vp->v_rdev == dev);
135 * Pre-allocate snodes before holding any locks in case we block
137 nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
138 csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
141 * Get the time attributes outside of the stable lock since
142 * this operation may block. Unfortunately, it may not have
143 * been required if the snode is in the cache.
145 va.va_mask = AT_FSID | AT_TIMES;
146 rc = fop_getattr(vp, &va, 0, cr, NULL); /* XXX may block! */
148 mutex_enter(&stable_lock);
149 if ((sp = sfind(dev, type, vp)) == NULL) {
150 struct vnode *cvp;
152 sp = nsp; /* Use pre-allocated snode */
153 svp = STOV(sp);
155 sp->s_realvp = vp;
156 VN_HOLD(vp);
157 sp->s_commonvp = NULL;
158 sp->s_dev = dev;
159 sp->s_dip = NULL;
160 sp->s_nextr = 0;
161 sp->s_list = NULL;
162 sp->s_plcy = NULL;
163 sp->s_size = 0;
164 sp->s_flag = 0;
165 if (rc == 0) {
167 * Set times in snode to those in the vnode.
169 sp->s_fsid = va.va_fsid;
170 sp->s_atime = va.va_atime.tv_sec;
171 sp->s_mtime = va.va_mtime.tv_sec;
172 sp->s_ctime = va.va_ctime.tv_sec;
173 } else {
174 sp->s_fsid = specdev;
175 sp->s_atime = 0;
176 sp->s_mtime = 0;
177 sp->s_ctime = 0;
179 sp->s_count = 0;
180 sp->s_mapcnt = 0;
182 vn_reinit(svp);
183 svp->v_flag = (vp->v_flag & VROOT);
184 svp->v_vfsp = vp->v_vfsp;
185 VFS_HOLD(svp->v_vfsp);
186 svp->v_type = type;
187 svp->v_rdev = dev;
188 (void) vn_copypath(vp, svp);
189 if (type == VBLK || type == VCHR) {
190 cvp = get_cvp(dev, type, csp, &used_csp);
191 svp->v_stream = cvp->v_stream;
193 sp->s_commonvp = cvp;
195 vn_exists(svp);
196 sinsert(sp);
197 mutex_exit(&stable_lock);
198 if (used_csp == 0) {
199 /* Didn't use pre-allocated snode so free it */
200 kmem_cache_free(snode_cache, csp);
202 } else {
203 mutex_exit(&stable_lock);
204 /* free unused snode memory */
205 kmem_cache_free(snode_cache, nsp);
206 kmem_cache_free(snode_cache, csp);
208 return (STOV(sp));
212 * Return a special vnode for the given dev; no vnode is supplied
213 * for it to shadow. Always create a new snode and put it in the
214 * table hashed by <dev, NULL>. The snode will be flushed from the
215 * table when spec_inactive() calls sdelete(). The association of
216 * this node with a attached instance of hardware is not made until
217 * spec_open time.
219 * N.B. Assumes caller takes on responsibility of making sure no one
220 * else is creating a snode for (dev, type) at this time.
222 struct vnode *
223 makespecvp(dev_t dev, vtype_t type)
225 struct snode *sp;
226 struct vnode *svp, *cvp;
227 time_t now;
229 sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
230 svp = STOV(sp);
231 cvp = commonvp(dev, type);
232 now = gethrestime_sec();
234 sp->s_realvp = NULL;
235 sp->s_commonvp = cvp;
236 sp->s_dev = dev;
237 sp->s_dip = NULL;
238 sp->s_nextr = 0;
239 sp->s_list = NULL;
240 sp->s_plcy = NULL;
241 sp->s_size = 0;
242 sp->s_flag = 0;
243 sp->s_fsid = specdev;
244 sp->s_atime = now;
245 sp->s_mtime = now;
246 sp->s_ctime = now;
247 sp->s_count = 0;
248 sp->s_mapcnt = 0;
250 vn_reinit(svp);
251 svp->v_vfsp = &spec_vfs;
252 svp->v_stream = cvp->v_stream;
253 svp->v_type = type;
254 svp->v_rdev = dev;
256 vn_exists(svp);
257 mutex_enter(&stable_lock);
258 sinsert(sp);
259 mutex_exit(&stable_lock);
261 return (svp);
266 * This function is called from spec_assoc_vp_with_devi(). That function
267 * associates a "new" dip with a common snode, releasing (any) old dip
268 * in the process. This function (spec_assoc_fence()) looks at the "new dip"
269 * and determines whether the snode should be fenced of or not. As the table
270 * below indicates, the value of old-dip is a don't care for all cases.
272 * old-dip new-dip common-snode
273 * =========================================
274 * Don't care NULL unfence
275 * Don't care retired fence
276 * Don't care not-retired unfence
278 * Since old-dip value is a "don't care", it is not passed into this function.
280 static void
281 spec_assoc_fence(dev_info_t *ndip, vnode_t *vp)
283 int fence;
284 struct snode *csp;
286 ASSERT(vp);
287 ASSERT(vn_matchops(vp, spec_getvnodeops()));
289 fence = 0;
290 if (ndip != NULL) {
291 mutex_enter(&DEVI(ndip)->devi_lock);
292 if (DEVI(ndip)->devi_flags & DEVI_RETIRED)
293 fence = 1;
294 mutex_exit(&DEVI(ndip)->devi_lock);
297 csp = VTOCS(vp);
298 ASSERT(csp);
300 /* SFENCED flag only set on common snode */
301 mutex_enter(&csp->s_lock);
302 if (fence)
303 csp->s_flag |= SFENCED;
304 else
305 csp->s_flag &= ~SFENCED;
306 mutex_exit(&csp->s_lock);
308 FENDBG((CE_NOTE, "%sfenced common snode (%p) for new dip=%p",
309 fence ? "" : "un", (void *)csp, (void *)ndip));
313 * Associate the common snode with a devinfo node. This is called from:
315 * 1) specvp_devfs to associate a specfs node with the dip attached
316 * by devfs.
318 * 2) spec_open after path reconstruction and attach.
320 * 3) From dacf processing to associate a makespecvp node with
321 * the dip that dacf postattach processing is being performed on.
322 * This association is made prior to open to avoid recursion issues.
324 * 4) From ddi_assoc_queue_with_devi to change vnode association as part of
325 * DL_ATTACH/DL_DETACH processing (SDIPSET already set). The call
326 * from ddi_assoc_queue_with_devi may specify a NULL dip.
328 * We put an extra hold on the devinfo node passed in as we establish it as
329 * the new s_dip pointer. Any hold associated with the prior s_dip pointer
330 * is released. The new hold will stay active until another call to
331 * spec_assoc_vp_with_devi or until the common snode is destroyed by
332 * spec_inactive after the last VN_RELE of the common node. This devinfo hold
333 * transfers across a clone open except in the clone_dev case, where the clone
334 * driver is no longer required after open.
336 * When SDIPSET is set and s_dip is NULL, the vnode has an association with
337 * the driver even though there is currently no association with a specific
338 * hardware instance.
340 void
341 spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
343 struct snode *csp;
344 dev_info_t *olddip;
346 ASSERT(vp);
349 * Don't establish a NULL association for a vnode associated with the
350 * clone driver. The qassociate(, -1) call from a streams driver's
351 * open implementation to indicate support for qassociate has the
352 * side-effect of this type of spec_assoc_vp_with_devi call. This
353 * call should not change the the association of the pre-clone
354 * vnode associated with the clone driver, the post-clone newdev
355 * association will be established later by spec_clone().
357 if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
358 return;
360 /* hold the new */
361 if (dip)
362 e_ddi_hold_devi(dip);
364 csp = VTOS(VTOS(vp)->s_commonvp);
365 mutex_enter(&csp->s_lock);
366 olddip = csp->s_dip;
367 csp->s_dip = dip;
368 csp->s_flag |= SDIPSET;
370 /* If association changes then invalidate cached size */
371 if (olddip != dip)
372 csp->s_flag &= ~SSIZEVALID;
373 mutex_exit(&csp->s_lock);
375 spec_assoc_fence(dip, vp);
377 /* release the old */
378 if (olddip)
379 ddi_release_devi(olddip);
383 * Return the held dip associated with the specified snode.
385 dev_info_t *
386 spec_hold_devi_by_vp(struct vnode *vp)
388 struct snode *csp;
389 dev_info_t *dip;
391 ASSERT(vn_matchops(vp, spec_getvnodeops()));
393 csp = VTOS(VTOS(vp)->s_commonvp);
394 dip = csp->s_dip;
395 if (dip)
396 e_ddi_hold_devi(dip);
397 return (dip);
401 * Find a special vnode that refers to the given device
402 * of the given type. Never return a "common" vnode.
403 * Return NULL if a special vnode does not exist.
404 * HOLD the vnode before returning it.
406 struct vnode *
407 specfind(dev_t dev, vtype_t type)
409 struct snode *st;
410 struct vnode *nvp;
412 mutex_enter(&stable_lock);
413 st = stable[STABLEHASH(dev)];
414 while (st != NULL) {
415 if (st->s_dev == dev) {
416 nvp = STOV(st);
417 if (nvp->v_type == type && st->s_commonvp != nvp) {
418 VN_HOLD(nvp);
419 /* validate vnode is visible in the zone */
420 if (nvp->v_path != NULL &&
421 ZONE_PATH_VISIBLE(nvp->v_path, curzone)) {
422 mutex_exit(&stable_lock);
423 return (nvp);
425 VN_RELE(nvp);
428 st = st->s_next;
430 mutex_exit(&stable_lock);
431 return (NULL);
435 * Loop through the snode cache looking for snodes referencing dip.
437 * This function determines if a devinfo node is "BUSY" from the perspective
438 * of having an active vnode associated with the device, which represents a
439 * dependency on the device's services. This function is needed because a
440 * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
441 * for instance, the framework is manipulating the node (has an open
442 * ndi_hold_devi).
444 * Returns:
445 * DEVI_REFERENCED - if dip is referenced
446 * DEVI_NOT_REFERENCED - if dip is not referenced
449 devi_stillreferenced(dev_info_t *dip)
451 struct snode *sp;
452 int i;
454 /* if no hold then there can't be an snode with s_dip == dip */
455 if (e_ddi_devi_holdcnt(dip) == 0)
456 return (DEVI_NOT_REFERENCED);
458 mutex_enter(&stable_lock);
459 for (i = 0; i < STABLESIZE; i++) {
460 for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
461 if (sp->s_dip == dip) {
462 mutex_exit(&stable_lock);
463 return (DEVI_REFERENCED);
467 mutex_exit(&stable_lock);
468 return (DEVI_NOT_REFERENCED);
472 * Given an snode, returns the open count and the dip
473 * associated with that snode
474 * Assumes the caller holds the appropriate locks
475 * to prevent snode and/or dip from going away.
476 * Returns:
477 * -1 No associated dip
478 * >= 0 Number of opens.
481 spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
483 dev_info_t *dip;
484 uint_t count;
485 struct vnode *vp;
487 ASSERT(sp);
488 ASSERT(dipp);
490 vp = STOV(sp);
492 *dipp = NULL;
495 * We are only interested in common snodes. Only common snodes
496 * get their s_count fields bumped up on opens.
498 if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
499 return (-1);
501 mutex_enter(&sp->s_lock);
502 count = sp->s_count + sp->s_mapcnt;
503 if (sp->s_flag & SLOCKED)
504 count++;
505 mutex_exit(&sp->s_lock);
507 *dipp = dip;
509 return (count);
513 * Given a device vnode, return the common
514 * vnode associated with it.
516 struct vnode *
517 common_specvp(struct vnode *vp)
519 struct snode *sp;
521 if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
522 !vn_matchops(vp, spec_getvnodeops()))
523 return (vp);
524 sp = VTOS(vp);
525 return (sp->s_commonvp);
529 * Returns a special vnode for the given dev. The vnode is the
530 * one which is "common" to all the snodes which represent the
531 * same device.
532 * Similar to commonvp() but doesn't acquire the stable_lock, and
533 * may use a pre-allocated snode provided by caller.
535 static struct vnode *
536 get_cvp(
537 dev_t dev,
538 vtype_t type,
539 struct snode *nsp, /* pre-allocated snode */
540 int *used_nsp) /* flag indicating if we use nsp */
542 struct snode *sp;
543 struct vnode *svp;
545 ASSERT(MUTEX_HELD(&stable_lock));
546 if ((sp = sfind(dev, type, NULL)) == NULL) {
547 sp = nsp; /* Use pre-allocated snode */
548 *used_nsp = 1; /* return value */
549 svp = STOV(sp);
551 sp->s_realvp = NULL;
552 sp->s_commonvp = svp; /* points to itself */
553 sp->s_dev = dev;
554 sp->s_dip = NULL;
555 sp->s_nextr = 0;
556 sp->s_list = NULL;
557 sp->s_plcy = NULL;
558 sp->s_size = UNKNOWN_SIZE;
559 sp->s_flag = 0;
560 sp->s_fsid = specdev;
561 sp->s_atime = 0;
562 sp->s_mtime = 0;
563 sp->s_ctime = 0;
564 sp->s_count = 0;
565 sp->s_mapcnt = 0;
567 vn_reinit(svp);
568 svp->v_vfsp = &spec_vfs;
569 svp->v_type = type;
570 svp->v_rdev = dev;
571 vn_exists(svp);
572 sinsert(sp);
573 } else
574 *used_nsp = 0;
575 return (STOV(sp));
579 * Returns a special vnode for the given dev. The vnode is the
580 * one which is "common" to all the snodes which represent the
581 * same device. For use ONLY by SPECFS.
583 struct vnode *
584 commonvp(dev_t dev, vtype_t type)
586 struct snode *sp, *nsp;
587 struct vnode *svp;
589 /* Pre-allocate snode in case we might block */
590 nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
592 mutex_enter(&stable_lock);
593 if ((sp = sfind(dev, type, NULL)) == NULL) {
594 sp = nsp; /* Use pre-alloced snode */
595 svp = STOV(sp);
597 sp->s_realvp = NULL;
598 sp->s_commonvp = svp; /* points to itself */
599 sp->s_dev = dev;
600 sp->s_dip = NULL;
601 sp->s_nextr = 0;
602 sp->s_list = NULL;
603 sp->s_plcy = NULL;
604 sp->s_size = UNKNOWN_SIZE;
605 sp->s_flag = 0;
606 sp->s_fsid = specdev;
607 sp->s_atime = 0;
608 sp->s_mtime = 0;
609 sp->s_ctime = 0;
610 sp->s_count = 0;
611 sp->s_mapcnt = 0;
613 vn_reinit(svp);
614 svp->v_vfsp = &spec_vfs;
615 svp->v_type = type;
616 svp->v_rdev = dev;
617 vn_exists(svp);
618 sinsert(sp);
619 mutex_exit(&stable_lock);
620 } else {
621 mutex_exit(&stable_lock);
622 /* Didn't need the pre-allocated snode */
623 kmem_cache_free(snode_cache, nsp);
625 return (STOV(sp));
629 * Snode lookup stuff.
630 * These routines maintain a table of snodes hashed by dev so
631 * that the snode for an dev can be found if it already exists.
633 struct snode *stable[STABLESIZE];
634 int stablesz = STABLESIZE;
635 kmutex_t stable_lock;
638 * Put a snode in the table.
640 static void
641 sinsert(struct snode *sp)
643 ASSERT(MUTEX_HELD(&stable_lock));
644 sp->s_next = stable[STABLEHASH(sp->s_dev)];
645 stable[STABLEHASH(sp->s_dev)] = sp;
649 * Remove an snode from the hash table.
650 * The realvp is not released here because spec_inactive() still
651 * needs it to do a spec_fsync().
653 void
654 sdelete(struct snode *sp)
656 struct snode *st;
657 struct snode *stprev = NULL;
659 ASSERT(MUTEX_HELD(&stable_lock));
660 st = stable[STABLEHASH(sp->s_dev)];
661 while (st != NULL) {
662 if (st == sp) {
663 if (stprev == NULL)
664 stable[STABLEHASH(sp->s_dev)] = st->s_next;
665 else
666 stprev->s_next = st->s_next;
667 break;
669 stprev = st;
670 st = st->s_next;
675 * Lookup an snode by <dev, type, vp>.
676 * ONLY looks for snodes with non-NULL s_realvp members and
677 * common snodes (with s_commonvp pointing to its vnode).
679 * If vp is NULL, only return commonvp. Otherwise return
680 * shadow vp with both shadow and common vp's VN_HELD.
682 static struct snode *
683 sfind(
684 dev_t dev,
685 vtype_t type,
686 struct vnode *vp)
688 struct snode *st;
689 struct vnode *svp;
691 ASSERT(MUTEX_HELD(&stable_lock));
692 st = stable[STABLEHASH(dev)];
693 while (st != NULL) {
694 svp = STOV(st);
695 if (st->s_dev == dev && svp->v_type == type &&
696 VN_CMP(st->s_realvp, vp) &&
697 (vp != NULL || st->s_commonvp == svp) &&
698 (vp == NULL || st->s_realvp->v_vfsp == vp->v_vfsp)) {
699 VN_HOLD(svp);
700 return (st);
702 st = st->s_next;
704 return (NULL);
708 * Mark the accessed, updated, or changed times in an snode
709 * with the current time.
711 void
712 smark(struct snode *sp, int flag)
714 time_t now = gethrestime_sec();
716 /* check for change to avoid unnecessary locking */
717 ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
718 if (((flag & sp->s_flag) != flag) ||
719 ((flag & SACC) && (sp->s_atime != now)) ||
720 ((flag & SUPD) && (sp->s_mtime != now)) ||
721 ((flag & SCHG) && (sp->s_ctime != now))) {
722 /* lock and update */
723 mutex_enter(&sp->s_lock);
724 sp->s_flag |= flag;
725 if (flag & SACC)
726 sp->s_atime = now;
727 if (flag & SUPD)
728 sp->s_mtime = now;
729 if (flag & SCHG)
730 sp->s_ctime = now;
731 mutex_exit(&sp->s_lock);
736 * Return the maximum file offset permitted for this device.
737 * -1 means unrestricted. SLOFFSET is associated with D_64BIT.
739 * On a 32-bit kernel this will limit:
740 * o D_64BIT devices to SPEC_MAXOFFSET_T.
741 * o non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
743 offset_t
744 spec_maxoffset(struct vnode *vp)
746 struct snode *sp = VTOS(vp);
747 struct snode *csp = VTOS(sp->s_commonvp);
749 if (vp->v_stream)
750 return ((offset_t)-1);
751 else if (csp->s_flag & SANYOFFSET) /* D_U64BIT */
752 return ((offset_t)-1);
753 #ifdef _ILP32
754 if (csp->s_flag & SLOFFSET) /* D_64BIT */
755 return (SPEC_MAXOFFSET_T);
756 #endif /* _ILP32 */
757 return (MAXOFF_T);
760 /*ARGSUSED*/
761 static int
762 snode_constructor(void *buf, void *cdrarg, int kmflags)
764 struct snode *sp = buf;
765 struct vnode *vp;
767 vp = sp->s_vnode = vn_alloc(kmflags);
768 if (vp == NULL) {
769 return (-1);
771 vn_setops(vp, spec_getvnodeops());
772 vp->v_data = sp;
774 mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
775 cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
776 return (0);
779 /*ARGSUSED1*/
780 static void
781 snode_destructor(void *buf, void *cdrarg)
783 struct snode *sp = buf;
784 struct vnode *vp = STOV(sp);
786 mutex_destroy(&sp->s_lock);
787 cv_destroy(&sp->s_cv);
789 vn_free(vp);
792 static const struct vfsops spec_vfsops = {
793 .vfs_sync = spec_sync,
797 specinit(int fstype, char *name)
799 int error;
800 dev_t dev;
803 * Associate vfs and vnode operations.
805 error = vfs_setfsops(fstype, &spec_vfsops);
806 if (error != 0) {
807 cmn_err(CE_WARN, "specinit: bad fstype");
808 return (error);
811 mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
812 mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
815 * Create snode cache
817 snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
818 0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
821 * Associate vfs operations with spec_vfs
823 VFS_INIT(&spec_vfs, &spec_vfsops, NULL);
824 if ((dev = getudev()) == -1)
825 dev = 0;
826 specdev = makedevice(dev, 0);
827 return (0);
831 device_close(struct vnode *vp, int flag, struct cred *cr)
833 struct snode *sp = VTOS(vp);
834 enum vtype type = vp->v_type;
835 struct vnode *cvp;
836 dev_t dev;
837 int error;
839 dev = sp->s_dev;
840 cvp = sp->s_commonvp;
842 switch (type) {
844 case VCHR:
845 if (vp->v_stream) {
846 if (cvp->v_stream != NULL)
847 error = strclose(cvp, flag, cr);
848 vp->v_stream = NULL;
849 } else
850 error = dev_close(dev, flag, OTYP_CHR, cr);
851 break;
853 case VBLK:
855 * On last close a block device we must
856 * invalidate any in-core blocks so that we
857 * can, for example, change floppy disks.
859 (void) spec_putpage(cvp, 0,
860 (size_t)0, B_INVAL|B_FORCE, cr, NULL);
861 bflush(dev);
862 binval(dev);
863 error = dev_close(dev, flag, OTYP_BLK, cr);
864 break;
865 default:
866 panic("device_close: not a device");
867 /*NOTREACHED*/
870 return (error);
873 struct vnode *
874 makectty(vnode_t *ovp)
876 vnode_t *vp;
878 if (vp = makespecvp(ovp->v_rdev, VCHR)) {
879 struct snode *sp;
880 struct snode *csp;
881 struct vnode *cvp;
883 sp = VTOS(vp);
884 cvp = sp->s_commonvp;
885 csp = VTOS(cvp);
886 mutex_enter(&csp->s_lock);
887 csp->s_count++;
888 mutex_exit(&csp->s_lock);
891 return (vp);
894 void
895 spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
897 struct snode *sp;
898 int i;
900 ASSERT(callback);
902 mutex_enter(&stable_lock);
903 for (i = 0; i < STABLESIZE; i++) {
904 for (sp = stable[i]; sp; sp = sp->s_next) {
905 if (callback(sp, arg) != DDI_WALK_CONTINUE)
906 goto out;
909 out:
910 mutex_exit(&stable_lock);
914 spec_is_clone(vnode_t *vp)
916 struct snode *sp;
918 if (vn_matchops(vp, spec_getvnodeops())) {
919 sp = VTOS(vp);
920 return ((sp->s_flag & SCLONE) ? 1 : 0);
923 return (0);
927 spec_is_selfclone(vnode_t *vp)
929 struct snode *sp;
931 if (vn_matchops(vp, spec_getvnodeops())) {
932 sp = VTOS(vp);
933 return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
936 return (0);
940 * We may be invoked with a NULL vp in which case we fence off
941 * all snodes associated with dip
944 spec_fence_snode(dev_info_t *dip, struct vnode *vp)
946 struct snode *sp;
947 struct snode *csp;
948 int retired;
949 int i;
950 char *path;
951 int emitted;
953 ASSERT(dip);
955 retired = 0;
956 mutex_enter(&DEVI(dip)->devi_lock);
957 if (DEVI(dip)->devi_flags & DEVI_RETIRED)
958 retired = 1;
959 mutex_exit(&DEVI(dip)->devi_lock);
961 if (!retired)
962 return (0);
964 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
965 (void) ddi_pathname(dip, path);
968 if (vp != NULL) {
969 ASSERT(vn_matchops(vp, spec_getvnodeops()));
970 csp = VTOCS(vp);
971 ASSERT(csp);
972 mutex_enter(&csp->s_lock);
973 csp->s_flag |= SFENCED;
974 mutex_exit(&csp->s_lock);
975 FENDBG((CE_NOTE, "fenced off snode(%p) for dip: %s",
976 (void *)csp, path));
977 kmem_free(path, MAXPATHLEN);
978 return (0);
981 emitted = 0;
982 mutex_enter(&stable_lock);
983 for (i = 0; i < STABLESIZE; i++) {
984 for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
985 ASSERT(sp->s_commonvp);
986 csp = VTOS(sp->s_commonvp);
987 if (csp->s_dip == dip) {
988 /* fence off the common snode */
989 mutex_enter(&csp->s_lock);
990 csp->s_flag |= SFENCED;
991 mutex_exit(&csp->s_lock);
992 if (!emitted) {
993 FENDBG((CE_NOTE, "fenced 1 of N"));
994 emitted++;
999 mutex_exit(&stable_lock);
1001 FENDBG((CE_NOTE, "fenced off all snodes for dip: %s", path));
1002 kmem_free(path, MAXPATHLEN);
1004 return (0);
1009 spec_unfence_snode(dev_info_t *dip)
1011 struct snode *sp;
1012 struct snode *csp;
1013 int i;
1014 char *path;
1015 int emitted;
1017 ASSERT(dip);
1019 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1020 (void) ddi_pathname(dip, path);
1022 emitted = 0;
1023 mutex_enter(&stable_lock);
1024 for (i = 0; i < STABLESIZE; i++) {
1025 for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
1026 ASSERT(sp->s_commonvp);
1027 csp = VTOS(sp->s_commonvp);
1028 ASSERT(csp);
1029 if (csp->s_dip == dip) {
1030 /* unfence the common snode */
1031 mutex_enter(&csp->s_lock);
1032 csp->s_flag &= ~SFENCED;
1033 mutex_exit(&csp->s_lock);
1034 if (!emitted) {
1035 FENDBG((CE_NOTE, "unfenced 1 of N"));
1036 emitted++;
1041 mutex_exit(&stable_lock);
1043 FENDBG((CE_NOTE, "unfenced all snodes for dip: %s", path));
1044 kmem_free(path, MAXPATHLEN);
1046 return (0);
1049 void
1050 spec_size_invalidate(dev_t dev, vtype_t type)
1053 struct snode *csp;
1055 mutex_enter(&stable_lock);
1056 if ((csp = sfind(dev, type, NULL)) != NULL) {
1057 mutex_enter(&csp->s_lock);
1058 csp->s_flag &= ~SSIZEVALID;
1059 VN_RELE_ASYNC(STOV(csp), system_taskq);
1060 mutex_exit(&csp->s_lock);
1062 mutex_exit(&stable_lock);