remove unnecessary offset_t casts of 0
[unleashed.git] / kernel / fs / specfs / specsubr.c
blob8d04b248abc1e705ecf7e413a0429d1c25cd6492
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
33 * University Copyright- Copyright (c) 1982, 1986, 1988
34 * The Regents of the University of California
35 * All Rights Reserved
37 * University Acknowledgment- Portions of this document are derived from
38 * software developed by the University of California, Berkeley, and its
39 * contributors.
43 #include <sys/types.h>
44 #include <sys/t_lock.h>
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/buf.h>
48 #include <sys/conf.h>
49 #include <sys/cred.h>
50 #include <sys/kmem.h>
51 #include <sys/sysmacros.h>
52 #include <sys/vfs.h>
53 #include <sys/vnode.h>
54 #include <sys/fs/snode.h>
55 #include <sys/fs/fifonode.h>
56 #include <sys/debug.h>
57 #include <sys/errno.h>
58 #include <sys/time.h>
59 #include <sys/file.h>
60 #include <sys/open.h>
61 #include <sys/user.h>
62 #include <sys/termios.h>
63 #include <sys/stream.h>
64 #include <sys/strsubr.h>
65 #include <sys/autoconf.h>
66 #include <sys/esunddi.h>
67 #include <sys/flock.h>
68 #include <sys/modctl.h>
70 struct vfs spec_vfs;
71 static dev_t specdev;
72 struct kmem_cache *snode_cache;
73 int spec_debug = 0;
75 static struct snode *sfind(dev_t, vtype_t, struct vnode *);
76 static struct vnode *get_cvp(dev_t, vtype_t, struct snode *, int *);
77 static void sinsert(struct snode *);
79 struct vnode *
80 specvp_devfs(
81 struct vnode *realvp,
82 dev_t dev,
83 vtype_t vtyp,
84 struct cred *cr,
85 dev_info_t *dip)
87 struct vnode *vp;
89 ASSERT(realvp && dip);
90 vp = specvp(realvp, dev, vtyp, cr);
91 ASSERT(vp);
93 /* associate a dip hold with the common snode's s_dip pointer */
94 spec_assoc_vp_with_devi(vp, dip);
95 return (vp);
99 * Return a shadow special vnode for the given dev.
100 * If no snode exists for this dev create one and put it
101 * in a table hashed by <dev, realvp>. If the snode for
102 * this dev is already in the table return it (ref count is
103 * incremented by sfind). The snode will be flushed from the
104 * table when spec_inactive calls sdelete.
106 * The fsid is inherited from the real vnode so that clones
107 * can be found.
110 struct vnode *
111 specvp(
112 struct vnode *vp,
113 dev_t dev,
114 vtype_t type,
115 struct cred *cr)
117 struct snode *sp;
118 struct snode *nsp;
119 struct snode *csp;
120 struct vnode *svp;
121 struct vattr va;
122 int rc;
123 int used_csp = 0; /* Did we use pre-allocated csp */
125 if (vp == NULL)
126 return (NULL);
127 if (vp->v_type == VFIFO)
128 return (fifovp(vp, cr));
130 ASSERT(vp->v_type == type);
131 ASSERT(vp->v_rdev == dev);
134 * Pre-allocate snodes before holding any locks in case we block
136 nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
137 csp = kmem_cache_alloc(snode_cache, KM_SLEEP);
140 * Get the time attributes outside of the stable lock since
141 * this operation may block. Unfortunately, it may not have
142 * been required if the snode is in the cache.
144 va.va_mask = AT_FSID | AT_TIMES;
145 rc = fop_getattr(vp, &va, 0, cr, NULL); /* XXX may block! */
147 mutex_enter(&stable_lock);
148 if ((sp = sfind(dev, type, vp)) == NULL) {
149 struct vnode *cvp;
151 sp = nsp; /* Use pre-allocated snode */
152 svp = STOV(sp);
154 sp->s_realvp = vp;
155 VN_HOLD(vp);
156 sp->s_commonvp = NULL;
157 sp->s_dev = dev;
158 sp->s_dip = NULL;
159 sp->s_nextr = 0;
160 sp->s_list = NULL;
161 sp->s_plcy = NULL;
162 sp->s_size = 0;
163 sp->s_flag = 0;
164 if (rc == 0) {
166 * Set times in snode to those in the vnode.
168 sp->s_fsid = va.va_fsid;
169 sp->s_atime = va.va_atime.tv_sec;
170 sp->s_mtime = va.va_mtime.tv_sec;
171 sp->s_ctime = va.va_ctime.tv_sec;
172 } else {
173 sp->s_fsid = specdev;
174 sp->s_atime = 0;
175 sp->s_mtime = 0;
176 sp->s_ctime = 0;
178 sp->s_count = 0;
179 sp->s_mapcnt = 0;
181 vn_reinit(svp);
182 svp->v_flag = (vp->v_flag & VROOT);
183 svp->v_vfsp = vp->v_vfsp;
184 VFS_HOLD(svp->v_vfsp);
185 svp->v_type = type;
186 svp->v_rdev = dev;
187 (void) vn_copypath(vp, svp);
188 if (type == VBLK || type == VCHR) {
189 cvp = get_cvp(dev, type, csp, &used_csp);
190 svp->v_stream = cvp->v_stream;
192 sp->s_commonvp = cvp;
194 vn_exists(svp);
195 sinsert(sp);
196 mutex_exit(&stable_lock);
197 if (used_csp == 0) {
198 /* Didn't use pre-allocated snode so free it */
199 kmem_cache_free(snode_cache, csp);
201 } else {
202 mutex_exit(&stable_lock);
203 /* free unused snode memory */
204 kmem_cache_free(snode_cache, nsp);
205 kmem_cache_free(snode_cache, csp);
207 return (STOV(sp));
211 * Return a special vnode for the given dev; no vnode is supplied
212 * for it to shadow. Always create a new snode and put it in the
213 * table hashed by <dev, NULL>. The snode will be flushed from the
214 * table when spec_inactive() calls sdelete(). The association of
215 * this node with a attached instance of hardware is not made until
216 * spec_open time.
218 * N.B. Assumes caller takes on responsibility of making sure no one
219 * else is creating a snode for (dev, type) at this time.
221 struct vnode *
222 makespecvp(dev_t dev, vtype_t type)
224 struct snode *sp;
225 struct vnode *svp, *cvp;
226 time_t now;
228 sp = kmem_cache_alloc(snode_cache, KM_SLEEP);
229 svp = STOV(sp);
230 cvp = commonvp(dev, type);
231 now = gethrestime_sec();
233 sp->s_realvp = NULL;
234 sp->s_commonvp = cvp;
235 sp->s_dev = dev;
236 sp->s_dip = NULL;
237 sp->s_nextr = 0;
238 sp->s_list = NULL;
239 sp->s_plcy = NULL;
240 sp->s_size = 0;
241 sp->s_flag = 0;
242 sp->s_fsid = specdev;
243 sp->s_atime = now;
244 sp->s_mtime = now;
245 sp->s_ctime = now;
246 sp->s_count = 0;
247 sp->s_mapcnt = 0;
249 vn_reinit(svp);
250 svp->v_vfsp = &spec_vfs;
251 svp->v_stream = cvp->v_stream;
252 svp->v_type = type;
253 svp->v_rdev = dev;
255 vn_exists(svp);
256 mutex_enter(&stable_lock);
257 sinsert(sp);
258 mutex_exit(&stable_lock);
260 return (svp);
265 * This function is called from spec_assoc_vp_with_devi(). That function
266 * associates a "new" dip with a common snode, releasing (any) old dip
267 * in the process. This function (spec_assoc_fence()) looks at the "new dip"
268 * and determines whether the snode should be fenced of or not. As the table
269 * below indicates, the value of old-dip is a don't care for all cases.
271 * old-dip new-dip common-snode
272 * =========================================
273 * Don't care NULL unfence
274 * Don't care retired fence
275 * Don't care not-retired unfence
277 * Since old-dip value is a "don't care", it is not passed into this function.
279 static void
280 spec_assoc_fence(dev_info_t *ndip, vnode_t *vp)
282 int fence;
283 struct snode *csp;
285 ASSERT(vp);
286 ASSERT(vn_matchops(vp, spec_getvnodeops()));
288 fence = 0;
289 if (ndip != NULL) {
290 mutex_enter(&DEVI(ndip)->devi_lock);
291 if (DEVI(ndip)->devi_flags & DEVI_RETIRED)
292 fence = 1;
293 mutex_exit(&DEVI(ndip)->devi_lock);
296 csp = VTOCS(vp);
297 ASSERT(csp);
299 /* SFENCED flag only set on common snode */
300 mutex_enter(&csp->s_lock);
301 if (fence)
302 csp->s_flag |= SFENCED;
303 else
304 csp->s_flag &= ~SFENCED;
305 mutex_exit(&csp->s_lock);
307 FENDBG((CE_NOTE, "%sfenced common snode (%p) for new dip=%p",
308 fence ? "" : "un", (void *)csp, (void *)ndip));
312 * Associate the common snode with a devinfo node. This is called from:
314 * 1) specvp_devfs to associate a specfs node with the dip attached
315 * by devfs.
317 * 2) spec_open after path reconstruction and attach.
319 * 3) From dacf processing to associate a makespecvp node with
320 * the dip that dacf postattach processing is being performed on.
321 * This association is made prior to open to avoid recursion issues.
323 * 4) From ddi_assoc_queue_with_devi to change vnode association as part of
324 * DL_ATTACH/DL_DETACH processing (SDIPSET already set). The call
325 * from ddi_assoc_queue_with_devi may specify a NULL dip.
327 * We put an extra hold on the devinfo node passed in as we establish it as
328 * the new s_dip pointer. Any hold associated with the prior s_dip pointer
329 * is released. The new hold will stay active until another call to
330 * spec_assoc_vp_with_devi or until the common snode is destroyed by
331 * spec_inactive after the last VN_RELE of the common node. This devinfo hold
332 * transfers across a clone open except in the clone_dev case, where the clone
333 * driver is no longer required after open.
335 * When SDIPSET is set and s_dip is NULL, the vnode has an association with
336 * the driver even though there is currently no association with a specific
337 * hardware instance.
339 void
340 spec_assoc_vp_with_devi(struct vnode *vp, dev_info_t *dip)
342 struct snode *csp;
343 dev_info_t *olddip;
345 ASSERT(vp);
348 * Don't establish a NULL association for a vnode associated with the
349 * clone driver. The qassociate(, -1) call from a streams driver's
350 * open implementation to indicate support for qassociate has the
351 * side-effect of this type of spec_assoc_vp_with_devi call. This
352 * call should not change the the association of the pre-clone
353 * vnode associated with the clone driver, the post-clone newdev
354 * association will be established later by spec_clone().
356 if ((dip == NULL) && (getmajor(vp->v_rdev) == clone_major))
357 return;
359 /* hold the new */
360 if (dip)
361 e_ddi_hold_devi(dip);
363 csp = VTOS(VTOS(vp)->s_commonvp);
364 mutex_enter(&csp->s_lock);
365 olddip = csp->s_dip;
366 csp->s_dip = dip;
367 csp->s_flag |= SDIPSET;
369 /* If association changes then invalidate cached size */
370 if (olddip != dip)
371 csp->s_flag &= ~SSIZEVALID;
372 mutex_exit(&csp->s_lock);
374 spec_assoc_fence(dip, vp);
376 /* release the old */
377 if (olddip)
378 ddi_release_devi(olddip);
382 * Return the held dip associated with the specified snode.
384 dev_info_t *
385 spec_hold_devi_by_vp(struct vnode *vp)
387 struct snode *csp;
388 dev_info_t *dip;
390 ASSERT(vn_matchops(vp, spec_getvnodeops()));
392 csp = VTOS(VTOS(vp)->s_commonvp);
393 dip = csp->s_dip;
394 if (dip)
395 e_ddi_hold_devi(dip);
396 return (dip);
400 * Find a special vnode that refers to the given device
401 * of the given type. Never return a "common" vnode.
402 * Return NULL if a special vnode does not exist.
403 * HOLD the vnode before returning it.
405 struct vnode *
406 specfind(dev_t dev, vtype_t type)
408 struct snode *st;
409 struct vnode *nvp;
411 mutex_enter(&stable_lock);
412 st = stable[STABLEHASH(dev)];
413 while (st != NULL) {
414 if (st->s_dev == dev) {
415 nvp = STOV(st);
416 if (nvp->v_type == type && st->s_commonvp != nvp) {
417 VN_HOLD(nvp);
418 mutex_exit(&stable_lock);
419 return (nvp);
422 st = st->s_next;
424 mutex_exit(&stable_lock);
425 return (NULL);
429 * Loop through the snode cache looking for snodes referencing dip.
431 * This function determines if a devinfo node is "BUSY" from the perspective
432 * of having an active vnode associated with the device, which represents a
433 * dependency on the device's services. This function is needed because a
434 * devinfo node can have a non-zero devi_ref and still NOT be "BUSY" when,
435 * for instance, the framework is manipulating the node (has an open
436 * ndi_hold_devi).
438 * Returns:
439 * DEVI_REFERENCED - if dip is referenced
440 * DEVI_NOT_REFERENCED - if dip is not referenced
443 devi_stillreferenced(dev_info_t *dip)
445 struct snode *sp;
446 int i;
448 /* if no hold then there can't be an snode with s_dip == dip */
449 if (e_ddi_devi_holdcnt(dip) == 0)
450 return (DEVI_NOT_REFERENCED);
452 mutex_enter(&stable_lock);
453 for (i = 0; i < STABLESIZE; i++) {
454 for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
455 if (sp->s_dip == dip) {
456 mutex_exit(&stable_lock);
457 return (DEVI_REFERENCED);
461 mutex_exit(&stable_lock);
462 return (DEVI_NOT_REFERENCED);
466 * Given an snode, returns the open count and the dip
467 * associated with that snode
468 * Assumes the caller holds the appropriate locks
469 * to prevent snode and/or dip from going away.
470 * Returns:
471 * -1 No associated dip
472 * >= 0 Number of opens.
475 spec_devi_open_count(struct snode *sp, dev_info_t **dipp)
477 dev_info_t *dip;
478 uint_t count;
479 struct vnode *vp;
481 ASSERT(sp);
482 ASSERT(dipp);
484 vp = STOV(sp);
486 *dipp = NULL;
489 * We are only interested in common snodes. Only common snodes
490 * get their s_count fields bumped up on opens.
492 if (sp->s_commonvp != vp || (dip = sp->s_dip) == NULL)
493 return (-1);
495 mutex_enter(&sp->s_lock);
496 count = sp->s_count + sp->s_mapcnt;
497 if (sp->s_flag & SLOCKED)
498 count++;
499 mutex_exit(&sp->s_lock);
501 *dipp = dip;
503 return (count);
507 * Given a device vnode, return the common
508 * vnode associated with it.
510 struct vnode *
511 common_specvp(struct vnode *vp)
513 struct snode *sp;
515 if ((vp->v_type != VBLK) && (vp->v_type != VCHR) ||
516 !vn_matchops(vp, spec_getvnodeops()))
517 return (vp);
518 sp = VTOS(vp);
519 return (sp->s_commonvp);
523 * Returns a special vnode for the given dev. The vnode is the
524 * one which is "common" to all the snodes which represent the
525 * same device.
526 * Similar to commonvp() but doesn't acquire the stable_lock, and
527 * may use a pre-allocated snode provided by caller.
529 static struct vnode *
530 get_cvp(
531 dev_t dev,
532 vtype_t type,
533 struct snode *nsp, /* pre-allocated snode */
534 int *used_nsp) /* flag indicating if we use nsp */
536 struct snode *sp;
537 struct vnode *svp;
539 ASSERT(MUTEX_HELD(&stable_lock));
540 if ((sp = sfind(dev, type, NULL)) == NULL) {
541 sp = nsp; /* Use pre-allocated snode */
542 *used_nsp = 1; /* return value */
543 svp = STOV(sp);
545 sp->s_realvp = NULL;
546 sp->s_commonvp = svp; /* points to itself */
547 sp->s_dev = dev;
548 sp->s_dip = NULL;
549 sp->s_nextr = 0;
550 sp->s_list = NULL;
551 sp->s_plcy = NULL;
552 sp->s_size = UNKNOWN_SIZE;
553 sp->s_flag = 0;
554 sp->s_fsid = specdev;
555 sp->s_atime = 0;
556 sp->s_mtime = 0;
557 sp->s_ctime = 0;
558 sp->s_count = 0;
559 sp->s_mapcnt = 0;
561 vn_reinit(svp);
562 svp->v_vfsp = &spec_vfs;
563 svp->v_type = type;
564 svp->v_rdev = dev;
565 vn_exists(svp);
566 sinsert(sp);
567 } else
568 *used_nsp = 0;
569 return (STOV(sp));
573 * Returns a special vnode for the given dev. The vnode is the
574 * one which is "common" to all the snodes which represent the
575 * same device. For use ONLY by SPECFS.
577 struct vnode *
578 commonvp(dev_t dev, vtype_t type)
580 struct snode *sp, *nsp;
581 struct vnode *svp;
583 /* Pre-allocate snode in case we might block */
584 nsp = kmem_cache_alloc(snode_cache, KM_SLEEP);
586 mutex_enter(&stable_lock);
587 if ((sp = sfind(dev, type, NULL)) == NULL) {
588 sp = nsp; /* Use pre-alloced snode */
589 svp = STOV(sp);
591 sp->s_realvp = NULL;
592 sp->s_commonvp = svp; /* points to itself */
593 sp->s_dev = dev;
594 sp->s_dip = NULL;
595 sp->s_nextr = 0;
596 sp->s_list = NULL;
597 sp->s_plcy = NULL;
598 sp->s_size = UNKNOWN_SIZE;
599 sp->s_flag = 0;
600 sp->s_fsid = specdev;
601 sp->s_atime = 0;
602 sp->s_mtime = 0;
603 sp->s_ctime = 0;
604 sp->s_count = 0;
605 sp->s_mapcnt = 0;
607 vn_reinit(svp);
608 svp->v_vfsp = &spec_vfs;
609 svp->v_type = type;
610 svp->v_rdev = dev;
611 vn_exists(svp);
612 sinsert(sp);
613 mutex_exit(&stable_lock);
614 } else {
615 mutex_exit(&stable_lock);
616 /* Didn't need the pre-allocated snode */
617 kmem_cache_free(snode_cache, nsp);
619 return (STOV(sp));
623 * Snode lookup stuff.
624 * These routines maintain a table of snodes hashed by dev so
625 * that the snode for an dev can be found if it already exists.
627 struct snode *stable[STABLESIZE];
628 int stablesz = STABLESIZE;
629 kmutex_t stable_lock;
632 * Put a snode in the table.
634 static void
635 sinsert(struct snode *sp)
637 ASSERT(MUTEX_HELD(&stable_lock));
638 sp->s_next = stable[STABLEHASH(sp->s_dev)];
639 stable[STABLEHASH(sp->s_dev)] = sp;
643 * Remove an snode from the hash table.
644 * The realvp is not released here because spec_inactive() still
645 * needs it to do a spec_fsync().
647 void
648 sdelete(struct snode *sp)
650 struct snode *st;
651 struct snode *stprev = NULL;
653 ASSERT(MUTEX_HELD(&stable_lock));
654 st = stable[STABLEHASH(sp->s_dev)];
655 while (st != NULL) {
656 if (st == sp) {
657 if (stprev == NULL)
658 stable[STABLEHASH(sp->s_dev)] = st->s_next;
659 else
660 stprev->s_next = st->s_next;
661 break;
663 stprev = st;
664 st = st->s_next;
669 * Lookup an snode by <dev, type, vp>.
670 * ONLY looks for snodes with non-NULL s_realvp members and
671 * common snodes (with s_commonvp pointing to its vnode).
673 * If vp is NULL, only return commonvp. Otherwise return
674 * shadow vp with both shadow and common vp's VN_HELD.
676 static struct snode *
677 sfind(
678 dev_t dev,
679 vtype_t type,
680 struct vnode *vp)
682 struct snode *st;
683 struct vnode *svp;
685 ASSERT(MUTEX_HELD(&stable_lock));
686 st = stable[STABLEHASH(dev)];
687 while (st != NULL) {
688 svp = STOV(st);
689 if (st->s_dev == dev && svp->v_type == type &&
690 VN_CMP(st->s_realvp, vp) &&
691 (vp != NULL || st->s_commonvp == svp) &&
692 (vp == NULL || st->s_realvp->v_vfsp == vp->v_vfsp)) {
693 VN_HOLD(svp);
694 return (st);
696 st = st->s_next;
698 return (NULL);
702 * Mark the accessed, updated, or changed times in an snode
703 * with the current time.
705 void
706 smark(struct snode *sp, int flag)
708 time_t now = gethrestime_sec();
710 /* check for change to avoid unnecessary locking */
711 ASSERT((flag & ~(SACC|SUPD|SCHG)) == 0);
712 if (((flag & sp->s_flag) != flag) ||
713 ((flag & SACC) && (sp->s_atime != now)) ||
714 ((flag & SUPD) && (sp->s_mtime != now)) ||
715 ((flag & SCHG) && (sp->s_ctime != now))) {
716 /* lock and update */
717 mutex_enter(&sp->s_lock);
718 sp->s_flag |= flag;
719 if (flag & SACC)
720 sp->s_atime = now;
721 if (flag & SUPD)
722 sp->s_mtime = now;
723 if (flag & SCHG)
724 sp->s_ctime = now;
725 mutex_exit(&sp->s_lock);
730 * Return the maximum file offset permitted for this device.
731 * -1 means unrestricted. SLOFFSET is associated with D_64BIT.
733 * On a 32-bit kernel this will limit:
734 * o D_64BIT devices to SPEC_MAXOFFSET_T.
735 * o non-D_64BIT character drivers to a 32-bit offset (MAXOFF_T).
737 offset_t
738 spec_maxoffset(struct vnode *vp)
740 struct snode *sp = VTOS(vp);
741 struct snode *csp = VTOS(sp->s_commonvp);
743 if (vp->v_stream)
744 return ((offset_t)-1);
745 else if (csp->s_flag & SANYOFFSET) /* D_U64BIT */
746 return ((offset_t)-1);
747 #ifdef _ILP32
748 if (csp->s_flag & SLOFFSET) /* D_64BIT */
749 return (SPEC_MAXOFFSET_T);
750 #endif /* _ILP32 */
751 return (MAXOFF_T);
754 /*ARGSUSED*/
755 static int
756 snode_constructor(void *buf, void *cdrarg, int kmflags)
758 struct snode *sp = buf;
759 struct vnode *vp;
761 vp = sp->s_vnode = vn_alloc(kmflags);
762 if (vp == NULL) {
763 return (-1);
765 vn_setops(vp, spec_getvnodeops());
766 vp->v_data = sp;
768 mutex_init(&sp->s_lock, NULL, MUTEX_DEFAULT, NULL);
769 cv_init(&sp->s_cv, NULL, CV_DEFAULT, NULL);
770 return (0);
773 /*ARGSUSED1*/
774 static void
775 snode_destructor(void *buf, void *cdrarg)
777 struct snode *sp = buf;
778 struct vnode *vp = STOV(sp);
780 mutex_destroy(&sp->s_lock);
781 cv_destroy(&sp->s_cv);
783 vn_free(vp);
786 static const struct vfsops spec_vfsops = {
787 .vfs_sync = spec_sync,
791 specinit(int fstype, char *name)
793 int error;
794 dev_t dev;
797 * Associate vfs and vnode operations.
799 error = vfs_setfsops(fstype, &spec_vfsops);
800 if (error != 0) {
801 cmn_err(CE_WARN, "specinit: bad fstype");
802 return (error);
805 mutex_init(&stable_lock, NULL, MUTEX_DEFAULT, NULL);
806 mutex_init(&spec_syncbusy, NULL, MUTEX_DEFAULT, NULL);
809 * Create snode cache
811 snode_cache = kmem_cache_create("snode_cache", sizeof (struct snode),
812 0, snode_constructor, snode_destructor, NULL, NULL, NULL, 0);
815 * Associate vfs operations with spec_vfs
817 VFS_INIT(&spec_vfs, &spec_vfsops, NULL);
818 if ((dev = getudev()) == -1)
819 dev = 0;
820 specdev = makedevice(dev, 0);
821 return (0);
825 device_close(struct vnode *vp, int flag, struct cred *cr)
827 struct snode *sp = VTOS(vp);
828 enum vtype type = vp->v_type;
829 struct vnode *cvp;
830 dev_t dev;
831 int error;
833 dev = sp->s_dev;
834 cvp = sp->s_commonvp;
836 switch (type) {
838 case VCHR:
839 if (vp->v_stream) {
840 if (cvp->v_stream != NULL)
841 error = strclose(cvp, flag, cr);
842 vp->v_stream = NULL;
843 } else
844 error = dev_close(dev, flag, OTYP_CHR, cr);
845 break;
847 case VBLK:
849 * On last close a block device we must
850 * invalidate any in-core blocks so that we
851 * can, for example, change floppy disks.
853 (void) spec_putpage(cvp, 0,
854 (size_t)0, B_INVAL|B_FORCE, cr, NULL);
855 bflush(dev);
856 binval(dev);
857 error = dev_close(dev, flag, OTYP_BLK, cr);
858 break;
859 default:
860 panic("device_close: not a device");
861 /*NOTREACHED*/
864 return (error);
867 struct vnode *
868 makectty(vnode_t *ovp)
870 vnode_t *vp;
872 if (vp = makespecvp(ovp->v_rdev, VCHR)) {
873 struct snode *sp;
874 struct snode *csp;
875 struct vnode *cvp;
877 sp = VTOS(vp);
878 cvp = sp->s_commonvp;
879 csp = VTOS(cvp);
880 mutex_enter(&csp->s_lock);
881 csp->s_count++;
882 mutex_exit(&csp->s_lock);
885 return (vp);
888 void
889 spec_snode_walk(int (*callback)(struct snode *sp, void *arg), void *arg)
891 struct snode *sp;
892 int i;
894 ASSERT(callback);
896 mutex_enter(&stable_lock);
897 for (i = 0; i < STABLESIZE; i++) {
898 for (sp = stable[i]; sp; sp = sp->s_next) {
899 if (callback(sp, arg) != DDI_WALK_CONTINUE)
900 goto out;
903 out:
904 mutex_exit(&stable_lock);
908 spec_is_clone(vnode_t *vp)
910 struct snode *sp;
912 if (vn_matchops(vp, spec_getvnodeops())) {
913 sp = VTOS(vp);
914 return ((sp->s_flag & SCLONE) ? 1 : 0);
917 return (0);
921 spec_is_selfclone(vnode_t *vp)
923 struct snode *sp;
925 if (vn_matchops(vp, spec_getvnodeops())) {
926 sp = VTOS(vp);
927 return ((sp->s_flag & SSELFCLONE) ? 1 : 0);
930 return (0);
934 * We may be invoked with a NULL vp in which case we fence off
935 * all snodes associated with dip
938 spec_fence_snode(dev_info_t *dip, struct vnode *vp)
940 struct snode *sp;
941 struct snode *csp;
942 int retired;
943 int i;
944 char *path;
945 int emitted;
947 ASSERT(dip);
949 retired = 0;
950 mutex_enter(&DEVI(dip)->devi_lock);
951 if (DEVI(dip)->devi_flags & DEVI_RETIRED)
952 retired = 1;
953 mutex_exit(&DEVI(dip)->devi_lock);
955 if (!retired)
956 return (0);
958 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
959 (void) ddi_pathname(dip, path);
962 if (vp != NULL) {
963 ASSERT(vn_matchops(vp, spec_getvnodeops()));
964 csp = VTOCS(vp);
965 ASSERT(csp);
966 mutex_enter(&csp->s_lock);
967 csp->s_flag |= SFENCED;
968 mutex_exit(&csp->s_lock);
969 FENDBG((CE_NOTE, "fenced off snode(%p) for dip: %s",
970 (void *)csp, path));
971 kmem_free(path, MAXPATHLEN);
972 return (0);
975 emitted = 0;
976 mutex_enter(&stable_lock);
977 for (i = 0; i < STABLESIZE; i++) {
978 for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
979 ASSERT(sp->s_commonvp);
980 csp = VTOS(sp->s_commonvp);
981 if (csp->s_dip == dip) {
982 /* fence off the common snode */
983 mutex_enter(&csp->s_lock);
984 csp->s_flag |= SFENCED;
985 mutex_exit(&csp->s_lock);
986 if (!emitted) {
987 FENDBG((CE_NOTE, "fenced 1 of N"));
988 emitted++;
993 mutex_exit(&stable_lock);
995 FENDBG((CE_NOTE, "fenced off all snodes for dip: %s", path));
996 kmem_free(path, MAXPATHLEN);
998 return (0);
1003 spec_unfence_snode(dev_info_t *dip)
1005 struct snode *sp;
1006 struct snode *csp;
1007 int i;
1008 char *path;
1009 int emitted;
1011 ASSERT(dip);
1013 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1014 (void) ddi_pathname(dip, path);
1016 emitted = 0;
1017 mutex_enter(&stable_lock);
1018 for (i = 0; i < STABLESIZE; i++) {
1019 for (sp = stable[i]; sp != NULL; sp = sp->s_next) {
1020 ASSERT(sp->s_commonvp);
1021 csp = VTOS(sp->s_commonvp);
1022 ASSERT(csp);
1023 if (csp->s_dip == dip) {
1024 /* unfence the common snode */
1025 mutex_enter(&csp->s_lock);
1026 csp->s_flag &= ~SFENCED;
1027 mutex_exit(&csp->s_lock);
1028 if (!emitted) {
1029 FENDBG((CE_NOTE, "unfenced 1 of N"));
1030 emitted++;
1035 mutex_exit(&stable_lock);
1037 FENDBG((CE_NOTE, "unfenced all snodes for dip: %s", path));
1038 kmem_free(path, MAXPATHLEN);
1040 return (0);
1043 void
1044 spec_size_invalidate(dev_t dev, vtype_t type)
1047 struct snode *csp;
1049 mutex_enter(&stable_lock);
1050 if ((csp = sfind(dev, type, NULL)) != NULL) {
1051 mutex_enter(&csp->s_lock);
1052 csp->s_flag &= ~SSIZEVALID;
1053 VN_RELE_ASYNC(STOV(csp), system_taskq);
1054 mutex_exit(&csp->s_lock);
1056 mutex_exit(&stable_lock);