4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
26 * miscellaneous routines for the devfs
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/t_lock.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
37 #include <sys/vnode.h>
39 #include <sys/fcntl.h>
40 #include <sys/flock.h>
43 #include <sys/errno.h>
46 #include <sys/dirent.h>
47 #include <sys/pathname.h>
48 #include <sys/cmn_err.h>
49 #include <sys/debug.h>
50 #include <sys/modctl.h>
51 #include <sys/fs_subr.h>
52 #include <sys/fs/dv_node.h>
53 #include <sys/fs/snode.h>
54 #include <sys/sunndi.h>
55 #include <sys/sunmdi.h>
59 int devfs_debug
= 0x0;
62 const char dvnm
[] = "devfs";
63 kmem_cache_t
*dv_node_cache
; /* dv_node cache */
66 * The devfs_clean_key is taken during a devfs_clean operation: it is used to
67 * prevent unnecessary code execution and for detection of potential deadlocks.
69 uint_t devfs_clean_key
;
71 struct dv_node
*dvroot
;
73 /* prototype memory vattrs */
74 vattr_t dv_vattr_dir
= {
75 VATTR_TYPE
|VATTR_MODE
|VATTR_UID
|VATTR_GID
,/* va_mask */
77 DV_DIRMODE_DEFAULT
, /* va_mode */
78 DV_UID_DEFAULT
, /* va_uid */
79 DV_GID_DEFAULT
, /* va_gid */
93 vattr_t dv_vattr_file
= {
94 VATTR_TYPE
|VATTR_MODE
|VATTR_SIZE
|VATTR_UID
|VATTR_GID
|VATTR_RDEV
,/* va_mask */
96 DV_DEVMODE_DEFAULT
, /* va_mode */
97 DV_UID_DEFAULT
, /* va_uid */
98 DV_GID_DEFAULT
, /* va_gid */
112 vattr_t dv_vattr_priv
= {
113 VATTR_TYPE
|VATTR_MODE
|VATTR_SIZE
|VATTR_UID
|VATTR_GID
|VATTR_RDEV
,/* va_mask */
115 DV_DEVMODE_PRIV
, /* va_mode */
116 DV_UID_DEFAULT
, /* va_uid */
117 DV_GID_DEFAULT
, /* va_gid */
131 extern dev_info_t
*clone_dip
;
132 extern major_t clone_major
;
133 extern struct dev_ops
*ddi_hold_driver(major_t
);
135 /* dv_node node constructor for kmem cache */
137 i_dv_node_ctor(void *buf
, void *cfarg
, int flag
)
139 _NOTE(ARGUNUSED(cfarg
, flag
))
140 struct dv_node
*dv
= (struct dv_node
*)buf
;
143 bzero(buf
, sizeof (struct dv_node
));
144 vp
= dv
->dv_vnode
= vn_alloc(flag
);
149 rw_init(&dv
->dv_contents
, NULL
, RW_DEFAULT
, NULL
);
153 /* dv_node node destructor for kmem cache */
155 i_dv_node_dtor(void *buf
, void *arg
)
157 _NOTE(ARGUNUSED(arg
))
158 struct dv_node
*dv
= (struct dv_node
*)buf
;
159 struct vnode
*vp
= DVTOV(dv
);
161 rw_destroy(&dv
->dv_contents
);
167 /* initialize dv_node node cache */
171 ASSERT(dv_node_cache
== NULL
);
172 dv_node_cache
= kmem_cache_create("dv_node_cache",
173 sizeof (struct dv_node
), 0, i_dv_node_ctor
, i_dv_node_dtor
,
174 NULL
, NULL
, NULL
, 0);
176 tsd_create(&devfs_clean_key
, NULL
);
179 /* destroy dv_node node cache */
183 ASSERT(dv_node_cache
!= NULL
);
184 kmem_cache_destroy(dv_node_cache
);
185 dv_node_cache
= NULL
;
187 tsd_destroy(&devfs_clean_key
);
191 * dv_mkino - Generate a unique inode number for devfs nodes.
193 * Although ino_t is 64 bits, the inode number is truncated to 32 bits for 32
194 * bit non-LARGEFILE applications. This means that there is a requirement to
195 * maintain the inode number as a 32 bit value or applications will have
196 * stat(2) calls fail with EOVERFLOW. We form a 32 bit inode number from the
197 * dev_t. but if the minor number is larger than L_MAXMIN32 we fold extra minor
199 * To generate inode numbers for directories, we assume that we will never use
200 * more than half the major space - this allows for ~8190 drivers. We use this
201 * upper major number space to allocate inode numbers for directories by
202 * encoding the major and instance into this space.
204 * We also skew the result so that inode 2 is reserved for the root of the file
207 * As part of the future support for 64-bit dev_t APIs, the upper minor bits
208 * should be folded into the high inode bits by adding the following code
211 * #if (L_BITSMINOR32 != L_BITSMINOR)
212 * |* fold overflow minor bits into high bits of inode number *|
213 * ino |= ((ino_t)(minor >> L_BITSMINOR32)) << L_BITSMINOR;
214 * #endif |* (L_BITSMINOR32 != L_BITSMINOR) *|
216 * This way only applications that use devices that overflow their minor
217 * space will have an application level impact.
220 dv_mkino(dev_info_t
*devi
, vtype_t typ
, dev_t dev
)
228 major
= ((L_MAXMAJ32
+ 1) >> 1) + DEVI(devi
)->devi_major
;
229 minor
= ddi_get_instance(devi
);
231 /* makedevice32 in high half of major number space */
232 ino
= (ino_t
)((major
<< L_BITSMINOR32
) | (minor
& L_MAXMIN32
));
234 major
= DEVI(devi
)->devi_major
;
236 major
= getmajor(dev
);
237 minor
= getminor(dev
);
240 ino
= (ino_t
)((major
<< L_BITSMINOR32
) | (minor
& L_MAXMIN32
));
242 /* make ino for VCHR different than VBLK */
248 ino
+= DV_ROOTINO
+ 1; /* skew */
251 * diagnose things a little early because adding the skew to a large
252 * minor number could roll over the major.
254 if ((major
>= (L_MAXMAJ32
>> 1)) && (warn
== 0)) {
256 cmn_err(CE_WARN
, "%s: inode numbers are not unique", dvnm
);
263 * Compare two nodes lexographically to balance avl tree
266 dv_compare_nodes(const struct dv_node
*dv1
, const struct dv_node
*dv2
)
270 if ((rv
= strcmp(dv1
->dv_name
, dv2
->dv_name
)) == 0)
272 return ((rv
< 0) ? -1 : 1);
278 * Build the first VDIR dv_node.
281 dv_mkroot(struct vfs
*vfsp
, dev_t devfsdev
)
286 ASSERT(ddi_root_node() != NULL
);
287 ASSERT(dv_node_cache
!= NULL
);
289 dcmn_err3(("dv_mkroot\n"));
290 dv
= kmem_cache_alloc(dv_node_cache
, KM_SLEEP
);
296 vp
->v_rdev
= devfsdev
;
297 vn_setops(vp
, &dv_vnodeops
);
302 dv
->dv_name
= NULL
; /* not needed */
305 dv
->dv_devi
= ddi_root_node();
307 dv
->dv_ino
= DV_ROOTINO
;
308 dv
->dv_nlink
= 2; /* name + . (no dv_insert) */
309 dv
->dv_dotdot
= dv
; /* .. == self */
310 dv
->dv_attrvp
= NULLVP
;
312 dv
->dv_flags
= DV_BUILD
;
315 dv
->dv_dflt_mode
= 0;
317 avl_create(&dv
->dv_entries
,
318 (int (*)(const void *, const void *))dv_compare_nodes
,
319 sizeof (struct dv_node
), offsetof(struct dv_node
, dv_avllink
));
327 * Given an probed or attached nexus node, create a VDIR dv_node.
328 * No dv_attrvp is created at this point.
331 dv_mkdir(struct dv_node
*ddv
, dev_info_t
*devi
, char *nm
)
338 dcmn_err4(("dv_mkdir: %s\n", nm
));
340 dv
= kmem_cache_alloc(dv_node_cache
, KM_SLEEP
);
341 nmlen
= strlen(nm
) + 1;
342 dv
->dv_name
= kmem_alloc(nmlen
, KM_SLEEP
);
343 bcopy(nm
, dv
->dv_name
, nmlen
);
344 dv
->dv_namelen
= nmlen
- 1; /* '\0' not included */
349 vp
->v_vfsp
= DVTOV(ddv
)->v_vfsp
;
351 vp
->v_rdev
= DVTOV(ddv
)->v_rdev
;
352 vn_setops(vp
, vn_getops(DVTOV(ddv
)));
358 dv
->dv_ino
= dv_mkino(devi
, VDIR
, NODEV
);
359 dv
->dv_nlink
= 0; /* updated on insert */
361 dv
->dv_attrvp
= NULLVP
;
363 dv
->dv_flags
= DV_BUILD
;
366 dv
->dv_dflt_mode
= 0;
368 avl_create(&dv
->dv_entries
,
369 (int (*)(const void *, const void *))dv_compare_nodes
,
370 sizeof (struct dv_node
), offsetof(struct dv_node
, dv_avllink
));
378 * Given a minor node, create a VCHR or VBLK dv_node.
379 * No dv_attrvp is created at this point.
381 static struct dv_node
*
382 dv_mknod(struct dv_node
*ddv
, dev_info_t
*devi
, char *nm
,
383 struct ddi_minor_data
*dmd
)
389 dcmn_err4(("dv_mknod: %s\n", nm
));
391 dv
= kmem_cache_alloc(dv_node_cache
, KM_SLEEP
);
392 nmlen
= strlen(nm
) + 1;
393 dv
->dv_name
= kmem_alloc(nmlen
, KM_SLEEP
);
394 bcopy(nm
, dv
->dv_name
, nmlen
);
395 dv
->dv_namelen
= nmlen
- 1; /* no '\0' */
400 vp
->v_vfsp
= DVTOV(ddv
)->v_vfsp
;
401 vp
->v_type
= dmd
->ddm_spec_type
== S_IFCHR
? VCHR
: VBLK
;
402 vp
->v_rdev
= dmd
->ddm_dev
;
403 vn_setops(vp
, vn_getops(DVTOV(ddv
)));
406 /* increment dev_ref with devi_lock held */
407 ASSERT(DEVI_BUSY_OWNED(devi
));
408 mutex_enter(&DEVI(devi
)->devi_lock
);
410 DEVI(devi
)->devi_ref
++; /* ndi_hold_devi(dip) */
411 mutex_exit(&DEVI(devi
)->devi_lock
);
413 dv
->dv_ino
= dv_mkino(devi
, vp
->v_type
, vp
->v_rdev
);
414 dv
->dv_nlink
= 0; /* updated on insert */
416 dv
->dv_attrvp
= NULLVP
;
420 if (dmd
->type
== DDM_INTERNAL_PATH
)
421 dv
->dv_flags
|= DV_INTERNAL
;
422 if (dmd
->ddm_flags
& DM_NO_FSPERM
)
423 dv
->dv_flags
|= DV_NO_FSPERM
;
425 dv
->dv_priv
= dmd
->ddm_node_priv
;
430 * Minors created with ddi_create_priv_minor_node can specify
431 * a default mode permission other than the devfs default.
433 if (dv
->dv_priv
|| dv
->dv_flags
& DV_NO_FSPERM
) {
434 dcmn_err5(("%s: dv_mknod default priv mode 0%o\n",
435 dv
->dv_name
, dmd
->ddm_priv_mode
));
436 dv
->dv_flags
|= DV_DFLT_MODE
;
437 dv
->dv_dflt_mode
= dmd
->ddm_priv_mode
& S_IAMB
;
446 * Destroy what we created in dv_mkdir or dv_mknod.
447 * In the case of a *referenced* directory, do nothing.
450 dv_destroy(struct dv_node
*dv
, uint_t flags
)
452 vnode_t
*vp
= DVTOV(dv
);
453 ASSERT(dv
->dv_nlink
== 0); /* no references */
455 dcmn_err4(("dv_destroy: %s\n", dv
->dv_name
));
458 * We may be asked to unlink referenced directories.
459 * In this case, there is nothing to be done.
460 * The eventual memory free will be done in
463 if (vp
->v_count
!= 0) {
464 ASSERT(vp
->v_type
== VDIR
);
465 ASSERT(flags
& DV_CLEAN_FORCE
);
466 ASSERT(DV_STALE(dv
));
470 if (vp
->v_type
== VDIR
) {
471 ASSERT(DV_FIRST_ENTRY(dv
) == NULL
);
472 avl_destroy(&dv
->dv_entries
);
475 if (dv
->dv_attrvp
!= NULLVP
)
476 VN_RELE(dv
->dv_attrvp
);
477 if (dv
->dv_attr
!= NULL
)
478 kmem_free(dv
->dv_attr
, sizeof (struct vattr
));
479 if (dv
->dv_name
!= NULL
)
480 kmem_free(dv
->dv_name
, dv
->dv_namelen
+ 1);
481 if (dv
->dv_devi
!= NULL
) {
482 ndi_rele_devi(dv
->dv_devi
);
484 if (dv
->dv_priv
!= NULL
) {
488 kmem_cache_free(dv_node_cache
, dv
);
492 * Find and hold dv_node by name
494 static struct dv_node
*
495 dv_findbyname(struct dv_node
*ddv
, char *nm
)
499 struct dv_node dvtmp
;
501 ASSERT(RW_LOCK_HELD(&ddv
->dv_contents
));
502 dcmn_err3(("dv_findbyname: %s\n", nm
));
505 dv
= avl_find(&ddv
->dv_entries
, &dvtmp
, &where
);
507 ASSERT(dv
->dv_dotdot
== ddv
);
508 ASSERT(strcmp(dv
->dv_name
, nm
) == 0);
516 * Inserts a new dv_node in a parent directory
519 dv_insert(struct dv_node
*ddv
, struct dv_node
*dv
)
523 ASSERT(RW_WRITE_HELD(&ddv
->dv_contents
));
524 ASSERT(DVTOV(ddv
)->v_type
== VDIR
);
525 ASSERT(ddv
->dv_nlink
>= 2);
526 ASSERT(dv
->dv_nlink
== 0);
528 dcmn_err3(("dv_insert: %s\n", dv
->dv_name
));
531 if (DVTOV(dv
)->v_type
== VDIR
) {
532 ddv
->dv_nlink
++; /* .. to containing directory */
533 dv
->dv_nlink
= 2; /* name + . */
535 dv
->dv_nlink
= 1; /* name */
538 /* enter node in the avl tree */
539 VERIFY(avl_find(&ddv
->dv_entries
, dv
, &where
) == NULL
);
540 avl_insert(&ddv
->dv_entries
, dv
, where
);
544 * Unlink a dv_node from a perent directory
547 dv_unlink(struct dv_node
*ddv
, struct dv_node
*dv
)
549 /* verify linkage of arguments */
551 ASSERT(dv
->dv_dotdot
== ddv
);
552 ASSERT(RW_WRITE_HELD(&ddv
->dv_contents
));
553 ASSERT(DVTOV(ddv
)->v_type
== VDIR
);
555 dcmn_err3(("dv_unlink: %s\n", dv
->dv_name
));
557 if (DVTOV(dv
)->v_type
== VDIR
) {
558 ddv
->dv_nlink
--; /* .. to containing directory */
559 dv
->dv_nlink
-= 2; /* name + . */
561 dv
->dv_nlink
-= 1; /* name */
563 ASSERT(ddv
->dv_nlink
>= 2);
564 ASSERT(dv
->dv_nlink
== 0);
566 dv
->dv_dotdot
= NULL
;
568 /* remove from avl tree */
569 avl_remove(&ddv
->dv_entries
, dv
);
573 * Merge devfs node specific information into an attribute structure.
575 * NOTE: specfs provides ATIME,MTIME,CTIME,SIZE,BLKSIZE,NBLOCKS on leaf node.
578 dv_vattr_merge(struct dv_node
*dv
, struct vattr
*vap
)
580 struct vnode
*vp
= DVTOV(dv
);
582 vap
->va_nodeid
= dv
->dv_ino
;
583 vap
->va_nlink
= dv
->dv_nlink
;
585 if (vp
->v_type
== VDIR
) {
587 vap
->va_fsid
= vp
->v_rdev
;
589 vap
->va_rdev
= vp
->v_rdev
;
590 vap
->va_fsid
= DVTOV(dv
->dv_dotdot
)->v_rdev
;
591 vap
->va_type
= vp
->v_type
;
592 /* don't trust the shadow file type */
593 vap
->va_mode
&= ~S_IFMT
;
594 if (vap
->va_type
== VCHR
)
595 vap
->va_mode
|= S_IFCHR
;
597 vap
->va_mode
|= S_IFBLK
;
602 * Get default device permission by consulting rules in
603 * privilege specification in minor node and /etc/minor_perm.
605 * This function is called from the devname filesystem to get default
606 * permissions for a device exported to a non-global zone.
609 devfs_get_defattr(struct vnode
*vp
, struct vattr
*vap
, int *no_fs_perm
)
614 /* If vp isn't a dv_node, return something sensible */
615 if (!vn_matchops(vp
, &dv_vnodeops
)) {
618 *vap
= dv_vattr_file
;
623 * For minors not created by ddi_create_priv_minor_node(),
624 * use devfs defaults.
627 if (vp
->v_type
== VDIR
) {
629 } else if (dv
->dv_flags
& DV_NO_FSPERM
) {
632 *vap
= dv_vattr_priv
;
635 * look up perm bits from minor_perm
637 *vap
= dv_vattr_file
;
638 if (dev_minorperm(dv
->dv_devi
, dv
->dv_name
, &mp
) == 0) {
639 VATTR_MP_MERGE((*vap
), mp
);
640 dcmn_err5(("%s: minor perm mode 0%o\n",
641 dv
->dv_name
, vap
->va_mode
));
642 } else if (dv
->dv_flags
& DV_DFLT_MODE
) {
643 ASSERT((dv
->dv_dflt_mode
& ~S_IAMB
) == 0);
644 vap
->va_mode
&= ~S_IAMB
;
645 vap
->va_mode
|= dv
->dv_dflt_mode
;
646 dcmn_err5(("%s: priv mode 0%o\n",
647 dv
->dv_name
, vap
->va_mode
));
655 * Given a VDIR dv_node, find/create the associated VDIR
656 * node in the shadow attribute filesystem.
658 * Given a VCHR/VBLK dv_node, find the associated VREG
659 * node in the shadow attribute filesystem. These nodes
660 * are only created to persist non-default attributes.
661 * Lack of such a node implies the default permissions
664 * Managing the attribute file entries is slightly tricky (mostly
665 * because we can't intercept VN_HOLD and VN_RELE except on the last
668 * We assert that if the dv_attrvp pointer is non-NULL, it points
669 * to a singly-held (by us) vnode that represents the shadow entry
670 * in the underlying filesystem. To avoid store-ordering issues,
671 * we assert that the pointer can only be tested under the dv_contents
677 struct vnode
*dvp
, /* devfs parent directory vnode */
678 char *nm
, /* name component */
679 struct vnode
*vp
, /* devfs vnode */
680 struct pathname
*pnp
, /* the path .. */
681 struct vnode
*rdir
, /* the root .. */
682 struct cred
*cred
, /* who's asking? */
683 int flags
) /* optionally create shadow node */
685 struct dv_node
*dv
; /* dv_node of named directory */
686 struct vnode
*rdvp
; /* shadow parent directory vnode */
687 struct vnode
*rvp
; /* shadow vnode */
688 struct vnode
*rrvp
; /* realvp of shadow vnode */
693 ASSERT(vp
->v_type
== VDIR
|| vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
);
695 dcmn_err3(("dv_shadow_node: name %s attr %p\n",
696 nm
, (void *)dv
->dv_attrvp
));
698 if ((flags
& DV_SHADOW_WRITE_HELD
) == 0) {
699 ASSERT(RW_READ_HELD(&dv
->dv_contents
));
700 if (dv
->dv_attrvp
!= NULLVP
)
702 if (!rw_tryupgrade(&dv
->dv_contents
)) {
703 rw_exit(&dv
->dv_contents
);
704 rw_enter(&dv
->dv_contents
, RW_WRITER
);
705 if (dv
->dv_attrvp
!= NULLVP
) {
706 rw_downgrade(&dv
->dv_contents
);
711 ASSERT(RW_WRITE_HELD(&dv
->dv_contents
));
712 if (dv
->dv_attrvp
!= NULLVP
)
716 ASSERT(RW_WRITE_HELD(&dv
->dv_contents
) && dv
->dv_attrvp
== NULL
);
718 rdvp
= VTODV(dvp
)->dv_attrvp
;
721 if (rdvp
&& (dv
->dv_flags
& DV_NO_FSPERM
) == 0) {
722 error
= fop_lookup(rdvp
, nm
, &rvp
, pnp
, LOOKUP_DIR
, rdir
, cred
,
725 /* factor out the snode since we only want the attribute node */
726 if ((error
== 0) && (fop_realvp(rvp
, &rrvp
, NULL
) == 0)) {
732 error
= EROFS
; /* no parent, no entry */
735 * All we want is the permissions (and maybe ACLs and
736 * extended attributes), and we want to perform lookups
737 * by name. Drivers occasionally change their minor
738 * number space. If something changes, there's no
739 * much we can do about it here.
742 /* The shadow node checks out. We are done */
744 dv
->dv_attrvp
= rvp
; /* with one hold */
747 * Determine if we have non-trivial ACLs on this node.
748 * It is not necessary to fop_rwlock since fs_acl_nontrivial
749 * only does fop_getsecattr.
751 dv
->dv_flags
&= ~DV_ACL
;
753 if (fs_acl_nontrivial(rvp
, cred
))
754 dv
->dv_flags
|= DV_ACL
;
757 * If we have synced out the memory attributes, free
758 * them and switch back to using the persistent store.
760 if (rvp
&& dv
->dv_attr
) {
761 kmem_free(dv
->dv_attr
, sizeof (struct vattr
));
764 if ((flags
& DV_SHADOW_WRITE_HELD
) == 0)
765 rw_downgrade(&dv
->dv_contents
);
766 ASSERT(RW_LOCK_HELD(&dv
->dv_contents
));
771 * Failed to find attribute in persistent backing store,
772 * get default permission bits.
774 devfs_get_defattr(vp
, &vattr
, NULL
);
776 dv_vattr_merge(dv
, &vattr
);
777 gethrestime(&vattr
.va_atime
);
778 vattr
.va_mtime
= vattr
.va_atime
;
779 vattr
.va_ctime
= vattr
.va_atime
;
782 * Try to create shadow dir. This is necessary in case
783 * we need to create a shadow leaf node later, when user
786 if ((error
== ENOENT
) && !create_tried
) {
787 switch (vp
->v_type
) {
789 error
= fop_mkdir(rdvp
, nm
, &vattr
, &rvp
, kcred
,
791 dsysdebug(error
, ("vop_mkdir %s %s %d\n",
792 VTODV(dvp
)->dv_name
, nm
, error
));
799 * Shadow nodes are only created on demand
801 if (flags
& DV_SHADOW_CREATE
) {
802 error
= fop_create(rdvp
, nm
, &vattr
, NONEXCL
,
803 VREAD
|VWRITE
, &rvp
, kcred
, 0, NULL
, NULL
);
804 dsysdebug(error
, ("vop_create %s %s %d\n",
805 VTODV(dvp
)->dv_name
, nm
, error
));
811 cmn_err(CE_PANIC
, "devfs: %s: create", dvnm
);
816 (error
== 0) || (error
== EEXIST
)) {
822 /* Store attribute in memory */
823 if (dv
->dv_attr
== NULL
) {
824 dv
->dv_attr
= kmem_alloc(sizeof (struct vattr
), KM_SLEEP
);
825 *(dv
->dv_attr
) = vattr
;
828 if ((flags
& DV_SHADOW_WRITE_HELD
) == 0)
829 rw_downgrade(&dv
->dv_contents
);
830 ASSERT(RW_LOCK_HELD(&dv
->dv_contents
));
834 * Given a devinfo node, and a name, returns the appropriate
835 * minor information for that named node, if it exists.
838 dv_find_leafnode(dev_info_t
*devi
, char *minor_nm
, struct ddi_minor_data
*r_mi
)
840 struct ddi_minor_data
*dmd
;
842 ASSERT(i_ddi_devi_attached(devi
));
844 dcmn_err3(("dv_find_leafnode: %s\n", minor_nm
));
845 ASSERT(DEVI_BUSY_OWNED(devi
));
846 for (dmd
= DEVI(devi
)->devi_minor
; dmd
; dmd
= dmd
->next
) {
849 * Skip alias nodes and nodes without a name.
851 if ((dmd
->type
== DDM_ALIAS
) || (dmd
->ddm_name
== NULL
))
854 dcmn_err4(("dv_find_leafnode: (%s,%s)\n",
855 minor_nm
, dmd
->ddm_name
));
856 if (strcmp(minor_nm
, dmd
->ddm_name
) == 0) {
857 r_mi
->ddm_dev
= dmd
->ddm_dev
;
858 r_mi
->ddm_spec_type
= dmd
->ddm_spec_type
;
859 r_mi
->type
= dmd
->type
;
860 r_mi
->ddm_flags
= dmd
->ddm_flags
;
861 r_mi
->ddm_node_priv
= dmd
->ddm_node_priv
;
862 r_mi
->ddm_priv_mode
= dmd
->ddm_priv_mode
;
863 if (r_mi
->ddm_node_priv
)
864 dphold(r_mi
->ddm_node_priv
);
869 dcmn_err3(("dv_find_leafnode: %s: ENOENT\n", minor_nm
));
874 * Special handling for clone node:
875 * Clone minor name is a driver name, the minor number will
876 * be the major number of the driver. There is no minor
877 * node under the clone driver, so we'll manufacture the
880 static struct dv_node
*
881 dv_clone_mknod(struct dv_node
*ddv
, char *drvname
)
886 struct ddi_minor_data
*dmd
;
889 * Make sure drvname is a STREAMS driver. We load the driver,
890 * but don't attach to any instances. This makes stat(2)
893 major
= ddi_name_to_major(drvname
);
894 if (major
== DDI_MAJOR_T_NONE
)
897 if (ddi_hold_driver(major
) == NULL
)
900 if (STREAMSTAB(major
) == NULL
) {
901 ddi_rele_driver(major
);
905 ddi_rele_driver(major
);
906 devnm
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
907 (void) snprintf(devnm
, MAXNAMELEN
, "clone@0:%s", drvname
);
908 dmd
= kmem_zalloc(sizeof (*dmd
), KM_SLEEP
);
909 dmd
->ddm_dev
= makedevice(clone_major
, (minor_t
)major
);
910 dmd
->ddm_spec_type
= S_IFCHR
;
911 dvp
= dv_mknod(ddv
, clone_dip
, devnm
, dmd
);
912 kmem_free(dmd
, sizeof (*dmd
));
913 kmem_free(devnm
, MAXNAMELEN
);
918 * Given the parent directory node, and a name in it, returns the
919 * named dv_node to the caller (as a vnode).
921 * (We need pnp and rdir for doing shadow lookups; they can be NULL)
924 dv_find(struct dv_node
*ddv
, char *nm
, struct vnode
**vpp
, struct pathname
*pnp
,
925 struct vnode
*rdir
, struct cred
*cred
, uint_t ndi_flags
)
927 extern int isminiroot
; /* see modctl.c */
930 int rv
= 0, was_busy
= 0, nmlen
, write_held
= 0;
932 struct dv_node
*dv
, *dup
;
933 dev_info_t
*pdevi
, *devi
= NULL
;
935 struct ddi_minor_data
*dmd
;
937 dcmn_err3(("dv_find %s\n", nm
));
939 if (!rw_tryenter(&ddv
->dv_contents
, RW_READER
)) {
940 if (tsd_get(devfs_clean_key
))
942 rw_enter(&ddv
->dv_contents
, RW_READER
);
946 rw_exit(&ddv
->dv_contents
);
951 * Empty name or ., return node itself.
954 if ((nmlen
== 0) || ((nmlen
== 1) && (nm
[0] == '.'))) {
956 rw_exit(&ddv
->dv_contents
);
962 * .., return the parent directory
964 if ((nmlen
== 2) && (strcmp(nm
, "..") == 0)) {
965 *vpp
= DVTOV(ddv
->dv_dotdot
);
966 rw_exit(&ddv
->dv_contents
);
972 * Fail anything without a valid device name component
974 if (nm
[0] == '@' || nm
[0] == ':') {
975 dcmn_err3(("devfs: no driver '%s'\n", nm
));
976 rw_exit(&ddv
->dv_contents
);
981 * So, now we have to deal with the trickier stuff.
983 * (a) search the existing list of dv_nodes on this directory
985 if ((dv
= dv_findbyname(ddv
, nm
)) != NULL
) {
987 ASSERT(RW_LOCK_HELD(&ddv
->dv_contents
));
989 if (!rw_tryenter(&dv
->dv_contents
, RW_READER
)) {
990 if (tsd_get(devfs_clean_key
)) {
992 rw_exit(&ddv
->dv_contents
);
995 rw_enter(&dv
->dv_contents
, RW_READER
);
999 if ((dv
->dv_attrvp
!= NULLVP
) ||
1000 (vp
->v_type
!= VDIR
&& dv
->dv_attr
!= NULL
)) {
1002 * Common case - we already have attributes
1004 rw_exit(&dv
->dv_contents
);
1005 rw_exit(&ddv
->dv_contents
);
1010 * No attribute vp, try and build one.
1012 * dv_shadow_node() can briefly drop &dv->dv_contents lock
1013 * if it is unable to upgrade it to a write lock. If the
1014 * current thread has come in through the bottom-up device
1015 * configuration devfs_clean() path, we may deadlock against
1016 * a thread performing top-down device configuration if it
1017 * grabs the contents lock. To avoid this, when we are on the
1018 * devfs_clean() path we attempt to upgrade the dv_contents
1019 * lock before we call dv_shadow_node().
1021 if (tsd_get(devfs_clean_key
)) {
1022 if (!rw_tryupgrade(&dv
->dv_contents
)) {
1024 rw_exit(&dv
->dv_contents
);
1025 rw_exit(&ddv
->dv_contents
);
1029 write_held
= DV_SHADOW_WRITE_HELD
;
1032 dv_shadow_node(DVTOV(ddv
), nm
, vp
, pnp
, rdir
, cred
,
1035 rw_exit(&dv
->dv_contents
);
1036 rw_exit(&ddv
->dv_contents
);
1041 * (b) Search the child devinfo nodes of our parent directory,
1042 * looking for the named node. If we find it, build a new
1043 * node, then grab the writers lock, search the directory
1044 * if it's still not there, then insert it.
1046 * We drop the devfs locks before accessing the device tree.
1047 * Take care to mark the node BUSY so that a forced devfs_clean
1048 * doesn't mark the directory node stale.
1050 * Also, check if we are called as part of devfs_clean or
1051 * reset_perm. If so, simply return not found because there
1052 * is nothing to clean.
1054 if (tsd_get(devfs_clean_key
)) {
1055 rw_exit(&ddv
->dv_contents
);
1060 * We could be either READ or WRITE locked at
1061 * this point. Upgrade if we are read locked.
1063 ASSERT(RW_LOCK_HELD(&ddv
->dv_contents
));
1064 if (rw_read_locked(&ddv
->dv_contents
) &&
1065 !rw_tryupgrade(&ddv
->dv_contents
)) {
1066 rw_exit(&ddv
->dv_contents
);
1067 rw_enter(&ddv
->dv_contents
, RW_WRITER
);
1069 * Things may have changed when we dropped
1070 * the contents lock, so start from top again
1074 ddv
->dv_busy
++; /* mark busy before dropping lock */
1076 rw_exit(&ddv
->dv_contents
);
1078 pdevi
= ddv
->dv_devi
;
1079 ASSERT(pdevi
!= NULL
);
1081 mnm
= strchr(nm
, ':');
1086 * Configure one nexus child, will call nexus's bus_ops
1087 * If successful, devi is held upon returning.
1088 * Note: devfs lookup should not be configuring grandchildren.
1090 ASSERT((ndi_flags
& NDI_CONFIG
) == 0);
1092 rv
= ndi_devi_config_one(pdevi
, nm
, &devi
, ndi_flags
| NDI_NO_EVENT
);
1095 if (rv
!= NDI_SUCCESS
) {
1102 /* Check if this is a path alias */
1103 if (ddi_aliases_present
== B_TRUE
&& ddi_get_parent(devi
) != pdevi
) {
1104 char *curr
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
1106 (void) ddi_pathname(devi
, curr
);
1109 if (devfs_lookupname(curr
, NULL
, &vp
) == 0 && vp
) {
1111 kmem_free(curr
, MAXPATHLEN
);
1114 kmem_free(curr
, MAXPATHLEN
);
1118 * If we configured a hidden node, consider it notfound.
1120 if (ndi_dev_is_hidden_node(devi
)) {
1121 ndi_rele_devi(devi
);
1127 * Don't make vhci clients visible under phci, unless we
1130 if (isminiroot
== 0 && ddi_get_parent(devi
) != pdevi
) {
1131 ndi_rele_devi(devi
);
1136 ASSERT(devi
&& i_ddi_devi_attached(devi
));
1139 * Invalidate cache to notice newly created minor nodes.
1141 rw_enter(&ddv
->dv_contents
, RW_WRITER
);
1142 ddv
->dv_flags
|= DV_BUILD
;
1143 rw_exit(&ddv
->dv_contents
);
1146 * mkdir for nexus drivers and leaf nodes as well. If we are racing
1147 * and create a duplicate, the duplicate will be destroyed below.
1150 dv
= dv_mkdir(ddv
, devi
, nm
);
1153 * Allocate dmd first to avoid KM_SLEEP with active
1156 dmd
= kmem_zalloc(sizeof (*dmd
), KM_SLEEP
);
1157 ndi_devi_enter(devi
, &circ
);
1158 if (devi
== clone_dip
) {
1160 * For clone minors, load the driver indicated by
1163 dv
= dv_clone_mknod(ddv
, mnm
+ 1);
1166 * Find minor node and make a dv_node
1168 if (dv_find_leafnode(devi
, mnm
+ 1, dmd
) == 0) {
1169 dv
= dv_mknod(ddv
, devi
, nm
, dmd
);
1170 if (dmd
->ddm_node_priv
)
1171 dpfree(dmd
->ddm_node_priv
);
1174 ndi_devi_exit(devi
, circ
);
1175 kmem_free(dmd
, sizeof (*dmd
));
1178 * Release hold from ndi_devi_config_one()
1180 ndi_rele_devi(devi
);
1188 * We have released the dv_contents lock, need to check
1189 * if another thread already created a duplicate node
1191 rw_enter(&ddv
->dv_contents
, RW_WRITER
);
1192 if ((dup
= dv_findbyname(ddv
, nm
)) == NULL
) {
1196 * Duplicate found, use the existing node
1207 * Fail lookup of device that has now become hidden (typically via
1208 * hot removal of open device).
1210 if (dv
->dv_devi
&& ndi_dev_is_hidden_node(dv
->dv_devi
)) {
1211 dcmn_err2(("dv_find: nm %s failed: hidden/removed\n", nm
));
1218 * Skip non-kernel lookups of internal nodes.
1219 * This use of kcred to distinguish between user and
1220 * internal kernel lookups is unfortunate. The information
1221 * provided by the seg argument to lookupnameat should
1222 * evolve into a lookup flag for filesystems that need
1225 if ((dv
->dv_flags
& DV_INTERNAL
) && (cred
!= kcred
)) {
1226 dcmn_err2(("dv_find: nm %s failed: internal\n", nm
));
1232 dcmn_err2(("dv_find: returning vp for nm %s\n", nm
));
1233 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
1235 * If vnode is a device, return special vnode instead
1236 * (though it knows all about -us- via sp->s_realvp,
1237 * sp->s_devvp, and sp->s_dip)
1239 *vpp
= specvp_devfs(vp
, vp
->v_rdev
, vp
->v_type
, cred
,
1250 * Non-zero was_busy tells us that we are not in the
1251 * devfs_clean() path which in turn means that we can afford
1252 * to take the contents lock unconditionally.
1254 rw_enter(&ddv
->dv_contents
, RW_WRITER
);
1256 rw_exit(&ddv
->dv_contents
);
1262 * The given directory node is out-of-date; that is, it has been
1263 * marked as needing to be rebuilt, possibly because some new devinfo
1264 * node has come into existence, or possibly because this is the first
1265 * time we've been here.
1268 dv_filldir(struct dv_node
*ddv
)
1271 dev_info_t
*devi
, *pdevi
;
1272 struct ddi_minor_data
*dmd
;
1273 char devnm
[MAXNAMELEN
];
1276 ASSERT(DVTOV(ddv
)->v_type
== VDIR
);
1277 ASSERT(RW_WRITE_HELD(&ddv
->dv_contents
));
1278 ASSERT(ddv
->dv_flags
& DV_BUILD
);
1280 dcmn_err3(("dv_filldir: %s\n", ddv
->dv_name
));
1283 pdevi
= ddv
->dv_devi
;
1285 if (ndi_devi_config(pdevi
, NDI_NO_EVENT
) != NDI_SUCCESS
) {
1286 dcmn_err3(("dv_filldir: config error %s\n", ddv
->dv_name
));
1289 ndi_devi_enter(pdevi
, &circ
);
1290 for (devi
= ddi_get_child(pdevi
); devi
;
1291 devi
= ddi_get_next_sibling(devi
)) {
1293 * While we know enough to create a directory at DS_INITIALIZED,
1294 * the directory will be empty until DS_ATTACHED. The existence
1295 * of an empty directory dv_node will cause a devi_ref, which
1296 * has caused problems for existing code paths doing offline/DR
1297 * type operations - making devfs_clean coordination even more
1298 * sensitive and error prone. Given this, the 'continue' below
1299 * is checking for DS_ATTACHED instead of DS_INITIALIZED.
1301 if (i_ddi_node_state(devi
) < DS_ATTACHED
)
1304 /* skip hidden nodes */
1305 if (ndi_dev_is_hidden_node(devi
))
1308 dcmn_err3(("dv_filldir: node %s\n", ddi_node_name(devi
)));
1310 ndi_devi_enter(devi
, &ccirc
);
1311 for (dmd
= DEVI(devi
)->devi_minor
; dmd
; dmd
= dmd
->next
) {
1315 * Skip alias nodes, internal nodes, and nodes
1316 * without a name. We allow DDM_DEFAULT nodes
1317 * to appear in readdir.
1319 if ((dmd
->type
== DDM_ALIAS
) ||
1320 (dmd
->type
== DDM_INTERNAL_PATH
) ||
1321 (dmd
->ddm_name
== NULL
))
1324 addr
= ddi_get_name_addr(devi
);
1326 (void) sprintf(devnm
, "%s@%s:%s",
1327 ddi_node_name(devi
), addr
, dmd
->ddm_name
);
1329 (void) sprintf(devnm
, "%s:%s",
1330 ddi_node_name(devi
), dmd
->ddm_name
);
1332 if ((dv
= dv_findbyname(ddv
, devnm
)) != NULL
) {
1333 /* dv_node already exists */
1338 dv
= dv_mknod(ddv
, devi
, devnm
, dmd
);
1342 ndi_devi_exit(devi
, ccirc
);
1344 (void) ddi_deviname(devi
, devnm
);
1345 if ((dv
= dv_findbyname(ddv
, devnm
+ 1)) == NULL
) {
1346 /* directory doesn't exist */
1347 dv
= dv_mkdir(ddv
, devi
, devnm
+ 1);
1352 ndi_devi_exit(pdevi
, circ
);
1354 ddv
->dv_flags
&= ~DV_BUILD
;
1358 * Given a directory node, clean out all the nodes beneath.
1360 * VDIR: Reinvoke to clean them, then delete the directory.
1361 * VCHR, VBLK: Just blow them away.
1363 * Mark the directories touched as in need of a rebuild, in case
1364 * we fall over part way through. When DV_CLEAN_FORCE is specified,
1365 * we mark referenced empty directories as stale to facilitate DR.
1368 dv_cleandir(struct dv_node
*ddv
, char *devnm
, uint_t flags
)
1371 struct dv_node
*next
;
1376 * We should always be holding the tsd_clean_key here: dv_cleandir()
1377 * will be called as a result of a devfs_clean request and the
1378 * tsd_clean_key will be set in either in devfs_clean() itself or in
1379 * devfs_clean_vhci().
1381 * Since we are on the devfs_clean path, we return EBUSY if we cannot
1382 * get the contents lock: if we blocked here we might deadlock against
1383 * a thread performing top-down device configuration.
1385 ASSERT(tsd_get(devfs_clean_key
));
1387 dcmn_err3(("dv_cleandir: %s\n", ddv
->dv_name
));
1389 if (!(flags
& DV_CLEANDIR_LCK
) &&
1390 !rw_tryenter(&ddv
->dv_contents
, RW_WRITER
))
1393 for (dv
= DV_FIRST_ENTRY(ddv
); dv
; dv
= next
) {
1394 next
= DV_NEXT_ENTRY(ddv
, dv
);
1397 * If devnm is specified, the non-minor portion of the
1398 * name must match devnm.
1401 (strncmp(devnm
, dv
->dv_name
, strlen(devnm
)) ||
1402 (dv
->dv_name
[strlen(devnm
)] != ':' &&
1403 dv
->dv_name
[strlen(devnm
)] != '\0')))
1406 /* check type of what we are cleaning */
1408 if (vp
->v_type
== VDIR
) {
1409 /* recurse on directories */
1410 rw_enter(&dv
->dv_contents
, RW_WRITER
);
1411 if (dv_cleandir(dv
, NULL
,
1412 flags
| DV_CLEANDIR_LCK
) == EBUSY
) {
1413 rw_exit(&dv
->dv_contents
);
1417 /* A clean directory is an empty directory... */
1418 ASSERT(dv
->dv_nlink
== 2);
1419 mutex_enter(&vp
->v_lock
);
1420 if (vp
->v_count
> 0) {
1422 * ... but an empty directory can still have
1423 * references to it. If we have dv_busy or
1424 * DV_CLEAN_FORCE is *not* specified then a
1425 * referenced directory is considered busy.
1427 if (dv
->dv_busy
|| !(flags
& DV_CLEAN_FORCE
)) {
1428 mutex_exit(&vp
->v_lock
);
1429 rw_exit(&dv
->dv_contents
);
1434 * Mark referenced directory stale so that DR
1435 * will succeed even if a shell has
1436 * /devices/xxx as current directory (causing
1437 * VN_HOLD reference to an empty directory).
1439 ASSERT(!DV_STALE(dv
));
1440 ndi_rele_devi(dv
->dv_devi
);
1441 dv
->dv_devi
= NULL
; /* mark DV_STALE */
1444 ASSERT((vp
->v_type
== VCHR
) || (vp
->v_type
== VBLK
));
1445 ASSERT(dv
->dv_nlink
== 1); /* no hard links */
1446 mutex_enter(&vp
->v_lock
);
1447 if (vp
->v_count
> 0) {
1448 mutex_exit(&vp
->v_lock
);
1453 /* unlink from directory */
1457 mutex_exit(&vp
->v_lock
);
1458 if (vp
->v_type
== VDIR
)
1459 rw_exit(&dv
->dv_contents
);
1461 /* destroy vnode if ref count is zero */
1462 if (vp
->v_count
== 0)
1463 dv_destroy(dv
, flags
);
1468 * If devnm is not NULL we return immediately on busy,
1469 * otherwise we continue destroying unused dv_node's.
1477 * This code may be invoked to inform devfs that a new node has
1478 * been created in the kernel device tree. So we always set
1479 * the DV_BUILD flag to allow the next dv_filldir() to pick
1480 * the new devinfo nodes.
1482 ddv
->dv_flags
|= DV_BUILD
;
1484 if (!(flags
& DV_CLEANDIR_LCK
))
1485 rw_exit(&ddv
->dv_contents
);
1487 return (busy
? EBUSY
: 0);
1491 * Walk through the devfs hierarchy, correcting the permissions of
1492 * devices with default permissions that do not match those specified
1493 * by minor perm. This can only be done for all drivers for now.
1496 dv_reset_perm_dir(struct dv_node
*ddv
, uint_t flags
)
1501 struct vattr
*attrp
;
1508 rw_enter(&ddv
->dv_contents
, RW_WRITER
);
1509 for (dv
= DV_FIRST_ENTRY(ddv
); dv
; dv
= DV_NEXT_ENTRY(ddv
, dv
)) {
1513 rw_enter(&dv
->dv_contents
, RW_READER
);
1515 if (vp
->v_type
== VDIR
) {
1516 rw_exit(&dv
->dv_contents
);
1517 if (dv_reset_perm_dir(dv
, flags
) != 0) {
1521 ASSERT(vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
);
1524 * Check for permissions from minor_perm
1525 * If there are none, we're done
1527 rw_exit(&dv
->dv_contents
);
1528 if (dev_minorperm(dv
->dv_devi
, nm
, &mp
) != 0)
1531 rw_enter(&dv
->dv_contents
, RW_READER
);
1534 * Allow a node's permissions to be altered
1535 * permanently from the defaults by chmod,
1536 * using the shadow node as backing store.
1537 * Otherwise, update node to minor_perm permissions.
1539 if (dv
->dv_attrvp
== NULLVP
) {
1541 * No attribute vp, try to find one.
1543 dv_shadow_node(DVTOV(ddv
), nm
, vp
,
1544 NULL
, NULLVP
, kcred
, 0);
1546 if (dv
->dv_attrvp
!= NULLVP
|| dv
->dv_attr
== NULL
) {
1547 rw_exit(&dv
->dv_contents
);
1551 attrp
= dv
->dv_attr
;
1553 if (VATTRP_MP_CMP(attrp
, mp
) == 0) {
1554 dcmn_err5(("%s: no perm change: "
1555 "%d %d 0%o\n", nm
, attrp
->va_uid
,
1556 attrp
->va_gid
, attrp
->va_mode
));
1557 rw_exit(&dv
->dv_contents
);
1561 old_uid
= attrp
->va_uid
;
1562 old_gid
= attrp
->va_gid
;
1563 old_mode
= attrp
->va_mode
;
1565 VATTRP_MP_MERGE(attrp
, mp
);
1566 mutex_enter(&vp
->v_lock
);
1567 if (vp
->v_count
> 0) {
1570 mutex_exit(&vp
->v_lock
);
1572 dcmn_err5(("%s: perm %d/%d/0%o -> %d/%d/0%o (%d)\n",
1573 nm
, old_uid
, old_gid
, old_mode
, attrp
->va_uid
,
1574 attrp
->va_gid
, attrp
->va_mode
, error
));
1576 rw_exit(&dv
->dv_contents
);
1584 ddv
->dv_flags
|= DV_BUILD
;
1586 rw_exit(&ddv
->dv_contents
);
1592 devfs_reset_perm(uint_t flags
)
1594 struct dv_node
*dvp
;
1597 if ((dvp
= devfs_dip_to_dvnode(ddi_root_node())) == NULL
)
1600 VN_HOLD(DVTOV(dvp
));
1601 rval
= dv_reset_perm_dir(dvp
, flags
);
1602 VN_RELE(DVTOV(dvp
));
1607 * Clean up dangling devfs shadow nodes for removed
1608 * drivers so that, in the event the driver is re-added
1609 * to the system, newly created nodes won't incorrectly
1610 * pick up these stale shadow node permissions.
1612 * This is accomplished by walking down the pathname
1613 * to the directory, starting at the root's attribute
1614 * node, then removing all minors matching the specified
1615 * node name. Care must be taken to remove all entries
1616 * in a directory before the directory itself, so that
1617 * the clean-up associated with rem_drv'ing a nexus driver
1618 * does not inadvertently result in an inconsistent
1619 * filesystem underlying devfs.
1623 devfs_remdrv_rmdir(vnode_t
*dirvp
, const char *dir
, vnode_t
*rvp
)
1630 struct dirent64
*dp
;
1639 dlen
= ndirents
* (sizeof (*dbuf
));
1640 dbuf
= kmem_alloc(dlen
, KM_SLEEP
);
1644 uio
.uio_segflg
= UIO_SYSSPACE
;
1646 uio
.uio_extflg
= UIO_COPY_CACHED
;
1647 uio
.uio_loffset
= 0;
1648 uio
.uio_llimit
= MAXOFFSET_T
;
1652 while (!error
&& !eof
) {
1653 uio
.uio_resid
= dlen
;
1654 iov
.iov_base
= (char *)dbuf
;
1657 (void) fop_rwlock(dirvp
, V_WRITELOCK_FALSE
, NULL
);
1658 error
= fop_readdir(dirvp
, &uio
, kcred
, &eof
, NULL
, 0);
1659 fop_rwunlock(dirvp
, V_WRITELOCK_FALSE
, NULL
);
1661 dbuflen
= dlen
- uio
.uio_resid
;
1663 if (error
|| dbuflen
== 0)
1666 for (dp
= dbuf
; ((intptr_t)dp
< (intptr_t)dbuf
+ dbuflen
);
1667 dp
= (dirent64_t
*)((intptr_t)dp
+ dp
->d_reclen
)) {
1671 if (strcmp(nm
, ".") == 0 || strcmp(nm
, "..") == 0)
1674 error
= fop_lookup(dirvp
, nm
,
1675 &vp
, NULL
, 0, NULL
, kcred
, NULL
, NULL
, NULL
);
1678 ("rem_drv %s/%s lookup (%d)\n",
1684 ASSERT(vp
->v_type
== VDIR
||
1685 vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
);
1687 if (vp
->v_type
== VDIR
) {
1688 error
= devfs_remdrv_rmdir(vp
, nm
, rvp
);
1690 error
= fop_rmdir(dirvp
,
1691 (char *)nm
, rvp
, kcred
, NULL
, 0);
1693 ("rem_drv %s/%s rmdir (%d)\n",
1697 error
= fop_remove(dirvp
, (char *)nm
, kcred
,
1700 ("rem_drv %s/%s remove (%d)\n",
1713 kmem_free(dbuf
, dlen
);
1719 devfs_remdrv_cleanup(const char *dir
, const char *nodename
)
1727 struct dirent64
*dp
;
1732 int nodenamelen
= strlen(nodename
);
1735 vnode_t
*rvp
; /* root node of the underlying attribute fs */
1737 dcmn_err5(("devfs_remdrv_cleanup: %s %s\n", dir
, nodename
));
1739 if (error
= pn_get((char *)dir
, UIO_SYSSPACE
, &pn
))
1742 rvp
= dvroot
->dv_attrvp
;
1743 ASSERT(rvp
!= NULL
);
1750 nm
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
1752 while (pn_pathleft(&pn
)) {
1753 ASSERT(dirvp
->v_type
== VDIR
);
1754 (void) pn_getcomponent(&pn
, nm
);
1755 ASSERT((strcmp(nm
, ".") != 0) && (strcmp(nm
, "..") != 0));
1756 error
= fop_lookup(dirvp
, nm
, &vp
, NULL
, 0, rvp
, kcred
,
1759 dcmn_err5(("remdrv_cleanup %s lookup error %d\n",
1765 kmem_free(nm
, MAXNAMELEN
);
1773 ASSERT(dirvp
->v_type
== VDIR
);
1777 kmem_free(nm
, MAXNAMELEN
);
1779 dlen
= ndirents
* (sizeof (*dbuf
));
1780 dbuf
= kmem_alloc(dlen
, KM_SLEEP
);
1784 uio
.uio_segflg
= UIO_SYSSPACE
;
1786 uio
.uio_extflg
= UIO_COPY_CACHED
;
1787 uio
.uio_loffset
= 0;
1788 uio
.uio_llimit
= MAXOFFSET_T
;
1792 while (!error
&& !eof
) {
1793 uio
.uio_resid
= dlen
;
1794 iov
.iov_base
= (char *)dbuf
;
1797 (void) fop_rwlock(dirvp
, V_WRITELOCK_FALSE
, NULL
);
1798 error
= fop_readdir(dirvp
, &uio
, kcred
, &eof
, NULL
, 0);
1799 fop_rwunlock(dirvp
, V_WRITELOCK_FALSE
, NULL
);
1801 dbuflen
= dlen
- uio
.uio_resid
;
1803 if (error
|| dbuflen
== 0)
1806 for (dp
= dbuf
; ((intptr_t)dp
< (intptr_t)dbuf
+ dbuflen
);
1807 dp
= (dirent64_t
*)((intptr_t)dp
+ dp
->d_reclen
)) {
1811 if (strcmp(nm
, ".") == 0 || strcmp(nm
, "..") == 0)
1814 if (strncmp(nm
, nodename
, nodenamelen
) != 0)
1817 error
= fop_lookup(dirvp
, nm
, &vp
,
1818 NULL
, 0, NULL
, kcred
, NULL
, NULL
, NULL
);
1821 ("rem_drv %s/%s lookup (%d)\n",
1827 ASSERT(vp
->v_type
== VDIR
||
1828 vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
);
1830 if (vp
->v_type
== VDIR
) {
1831 error
= devfs_remdrv_rmdir(vp
, nm
, rvp
);
1833 error
= fop_rmdir(dirvp
, (char *)nm
,
1834 rvp
, kcred
, NULL
, 0);
1836 ("rem_drv %s/%s rmdir (%d)\n",
1840 error
= fop_remove(dirvp
, (char *)nm
, kcred
,
1843 ("rem_drv %s/%s remove (%d)\n",
1856 kmem_free(dbuf
, dlen
);
1863 struct dv_list
*next
;
1868 struct dv_node
*ddv
,
1870 void (*callback
)(struct dv_node
*, void *),
1875 struct dv_list
*head
, *tail
, *next
;
1878 dcmn_err3(("dv_walk: ddv = %s, devnm = %s\n",
1879 ddv
->dv_name
, devnm
? devnm
: "<null>"));
1883 ASSERT(dvp
->v_type
== VDIR
);
1885 head
= tail
= next
= NULL
;
1887 rw_enter(&ddv
->dv_contents
, RW_READER
);
1888 mutex_enter(&dvp
->v_lock
);
1889 for (dv
= DV_FIRST_ENTRY(ddv
); dv
; dv
= DV_NEXT_ENTRY(ddv
, dv
)) {
1891 * If devnm is not NULL and is not the empty string,
1892 * select only dv_nodes with matching non-minor name
1894 if (devnm
&& (len
= strlen(devnm
)) &&
1895 (strncmp(devnm
, dv
->dv_name
, len
) ||
1896 (dv
->dv_name
[len
] != ':' && dv
->dv_name
[len
] != '\0')))
1901 if (DVTOV(dv
)->v_type
!= VDIR
)
1904 next
= kmem_zalloc(sizeof (*next
), KM_SLEEP
);
1916 dv_walk(head
->dv
, NULL
, callback
, arg
);
1918 kmem_free(head
, sizeof (*head
));
1921 rw_exit(&ddv
->dv_contents
);
1922 mutex_exit(&dvp
->v_lock
);