4 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Alex Hornung <ahornung@gmail.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/kernel.h>
41 #include <sys/fcntl.h>
44 #include <sys/signalvar.h>
45 #include <sys/vnode.h>
47 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/dirent.h>
51 #include <sys/malloc.h>
54 #include <vm/vm_pager.h>
55 #include <vm/vm_zone.h>
56 #include <vm/vm_object.h>
57 #include <sys/filio.h>
58 #include <sys/ttycom.h>
60 #include <sys/diskslice.h>
61 #include <sys/sysctl.h>
62 #include <sys/devfs.h>
63 #include <sys/pioctl.h>
64 #include <vfs/fifofs/fifo.h>
66 #include <machine/limits.h>
69 #include <vm/vm_page2.h>
71 #ifndef SPEC_CHAIN_DEBUG
72 #define SPEC_CHAIN_DEBUG 0
75 MALLOC_DECLARE(M_DEVFS
);
76 #define DEVFS_BADOP (void *)devfs_vop_badop
78 static int devfs_vop_badop(struct vop_generic_args
*);
79 static int devfs_vop_access(struct vop_access_args
*);
80 static int devfs_vop_inactive(struct vop_inactive_args
*);
81 static int devfs_vop_reclaim(struct vop_reclaim_args
*);
82 static int devfs_vop_readdir(struct vop_readdir_args
*);
83 static int devfs_vop_getattr(struct vop_getattr_args
*);
84 static int devfs_vop_setattr(struct vop_setattr_args
*);
85 static int devfs_vop_readlink(struct vop_readlink_args
*);
86 static int devfs_vop_print(struct vop_print_args
*);
88 static int devfs_vop_nresolve(struct vop_nresolve_args
*);
89 static int devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*);
90 static int devfs_vop_nmkdir(struct vop_nmkdir_args
*);
91 static int devfs_vop_nsymlink(struct vop_nsymlink_args
*);
92 static int devfs_vop_nrmdir(struct vop_nrmdir_args
*);
93 static int devfs_vop_nremove(struct vop_nremove_args
*);
95 static int devfs_spec_open(struct vop_open_args
*);
96 static int devfs_spec_close(struct vop_close_args
*);
97 static int devfs_spec_fsync(struct vop_fsync_args
*);
99 static int devfs_spec_read(struct vop_read_args
*);
100 static int devfs_spec_write(struct vop_write_args
*);
101 static int devfs_spec_ioctl(struct vop_ioctl_args
*);
102 static int devfs_spec_kqfilter(struct vop_kqfilter_args
*);
103 static int devfs_spec_strategy(struct vop_strategy_args
*);
104 static void devfs_spec_strategy_done(struct bio
*);
105 static int devfs_spec_freeblks(struct vop_freeblks_args
*);
106 static int devfs_spec_bmap(struct vop_bmap_args
*);
107 static int devfs_spec_advlock(struct vop_advlock_args
*);
108 static void devfs_spec_getpages_iodone(struct bio
*);
109 static int devfs_spec_getpages(struct vop_getpages_args
*);
111 static int devfs_fo_close(struct file
*);
112 static int devfs_fo_read(struct file
*, struct uio
*, struct ucred
*, int);
113 static int devfs_fo_write(struct file
*, struct uio
*, struct ucred
*, int);
114 static int devfs_fo_stat(struct file
*, struct stat
*, struct ucred
*);
115 static int devfs_fo_kqfilter(struct file
*, struct knote
*);
116 static int devfs_fo_ioctl(struct file
*, u_long
, caddr_t
,
117 struct ucred
*, struct sysmsg
*);
118 static __inline
int sequential_heuristic(struct uio
*, struct file
*);
120 extern struct lock devfs_lock
;
123 * devfs vnode operations for regular files. All vnode ops are MPSAFE.
125 struct vop_ops devfs_vnode_norm_vops
= {
126 .vop_default
= vop_defaultop
,
127 .vop_access
= devfs_vop_access
,
128 .vop_advlock
= DEVFS_BADOP
,
129 .vop_bmap
= DEVFS_BADOP
,
130 .vop_close
= vop_stdclose
,
131 .vop_getattr
= devfs_vop_getattr
,
132 .vop_inactive
= devfs_vop_inactive
,
133 .vop_ncreate
= DEVFS_BADOP
,
134 .vop_nresolve
= devfs_vop_nresolve
,
135 .vop_nlookupdotdot
= devfs_vop_nlookupdotdot
,
136 .vop_nlink
= DEVFS_BADOP
,
137 .vop_nmkdir
= devfs_vop_nmkdir
,
138 .vop_nmknod
= DEVFS_BADOP
,
139 .vop_nremove
= devfs_vop_nremove
,
140 .vop_nrename
= DEVFS_BADOP
,
141 .vop_nrmdir
= devfs_vop_nrmdir
,
142 .vop_nsymlink
= devfs_vop_nsymlink
,
143 .vop_open
= vop_stdopen
,
144 .vop_pathconf
= vop_stdpathconf
,
145 .vop_print
= devfs_vop_print
,
146 .vop_read
= DEVFS_BADOP
,
147 .vop_readdir
= devfs_vop_readdir
,
148 .vop_readlink
= devfs_vop_readlink
,
149 .vop_reallocblks
= DEVFS_BADOP
,
150 .vop_reclaim
= devfs_vop_reclaim
,
151 .vop_setattr
= devfs_vop_setattr
,
152 .vop_write
= DEVFS_BADOP
,
153 .vop_ioctl
= DEVFS_BADOP
157 * devfs vnode operations for character devices. All vnode ops are MPSAFE.
159 struct vop_ops devfs_vnode_dev_vops
= {
160 .vop_default
= vop_defaultop
,
161 .vop_access
= devfs_vop_access
,
162 .vop_advlock
= devfs_spec_advlock
,
163 .vop_bmap
= devfs_spec_bmap
,
164 .vop_close
= devfs_spec_close
,
165 .vop_freeblks
= devfs_spec_freeblks
,
166 .vop_fsync
= devfs_spec_fsync
,
167 .vop_getattr
= devfs_vop_getattr
,
168 .vop_getpages
= devfs_spec_getpages
,
169 .vop_inactive
= devfs_vop_inactive
,
170 .vop_open
= devfs_spec_open
,
171 .vop_pathconf
= vop_stdpathconf
,
172 .vop_print
= devfs_vop_print
,
173 .vop_kqfilter
= devfs_spec_kqfilter
,
174 .vop_read
= devfs_spec_read
,
175 .vop_readdir
= DEVFS_BADOP
,
176 .vop_readlink
= DEVFS_BADOP
,
177 .vop_reallocblks
= DEVFS_BADOP
,
178 .vop_reclaim
= devfs_vop_reclaim
,
179 .vop_setattr
= devfs_vop_setattr
,
180 .vop_strategy
= devfs_spec_strategy
,
181 .vop_write
= devfs_spec_write
,
182 .vop_ioctl
= devfs_spec_ioctl
186 * devfs file pointer operations. All fileops are MPSAFE.
188 struct vop_ops
*devfs_vnode_dev_vops_p
= &devfs_vnode_dev_vops
;
190 struct fileops devfs_dev_fileops
= {
191 .fo_read
= devfs_fo_read
,
192 .fo_write
= devfs_fo_write
,
193 .fo_ioctl
= devfs_fo_ioctl
,
194 .fo_kqfilter
= devfs_fo_kqfilter
,
195 .fo_stat
= devfs_fo_stat
,
196 .fo_close
= devfs_fo_close
,
197 .fo_shutdown
= nofo_shutdown
201 * These two functions are possibly temporary hacks for devices (aka
202 * the pty code) which want to control the node attributes themselves.
204 * XXX we may ultimately desire to simply remove the uid/gid/mode
205 * from the node entirely.
207 * MPSAFE - sorta. Theoretically the overwrite can compete since they
208 * are loading from the same fields.
211 node_sync_dev_get(struct devfs_node
*node
)
215 if ((dev
= node
->d_dev
) && (dev
->si_flags
& SI_OVERRIDE
)) {
216 node
->uid
= dev
->si_uid
;
217 node
->gid
= dev
->si_gid
;
218 node
->mode
= dev
->si_perms
;
223 node_sync_dev_set(struct devfs_node
*node
)
227 if ((dev
= node
->d_dev
) && (dev
->si_flags
& SI_OVERRIDE
)) {
228 dev
->si_uid
= node
->uid
;
229 dev
->si_gid
= node
->gid
;
230 dev
->si_perms
= node
->mode
;
235 * generic entry point for unsupported operations
238 devfs_vop_badop(struct vop_generic_args
*ap
)
245 devfs_vop_access(struct vop_access_args
*ap
)
247 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
250 if (!devfs_node_is_accessible(node
))
252 node_sync_dev_get(node
);
253 error
= vop_helper_access(ap
, node
->uid
, node
->gid
,
254 node
->mode
, node
->flags
);
261 devfs_vop_inactive(struct vop_inactive_args
*ap
)
263 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
265 if (node
== NULL
|| (node
->flags
& DEVFS_NODE_LINKED
) == 0)
272 devfs_vop_reclaim(struct vop_reclaim_args
*ap
)
274 struct devfs_node
*node
;
279 * Check if it is locked already. if not, we acquire the devfs lock
281 if ((lockstatus(&devfs_lock
, curthread
)) != LK_EXCLUSIVE
) {
282 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
289 * Get rid of the devfs_node if it is no longer linked into the
290 * topology. Interlocked by devfs_lock.
293 if ((node
= DEVFS_NODE(vp
)) != NULL
) {
295 if ((node
->flags
& DEVFS_NODE_LINKED
) == 0)
300 lockmgr(&devfs_lock
, LK_RELEASE
);
303 * v_rdev needs to be properly released using v_release_rdev
304 * Make sure v_data is NULL as well.
313 devfs_vop_readdir(struct vop_readdir_args
*ap
)
315 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_vp
);
316 struct devfs_node
*node
;
325 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_readdir() called!\n");
327 if (ap
->a_uio
->uio_offset
< 0 || ap
->a_uio
->uio_offset
> INT_MAX
)
329 error
= vn_lock(ap
->a_vp
, LK_EXCLUSIVE
| LK_RETRY
| LK_FAILRECLAIM
);
333 if (!devfs_node_is_accessible(dnode
)) {
338 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
340 saveoff
= ap
->a_uio
->uio_offset
;
342 if (ap
->a_ncookies
) {
343 ncookies
= ap
->a_uio
->uio_resid
/ 16 + 1; /* Why / 16 ?? */
346 cookies
= kmalloc(256 * sizeof(off_t
), M_TEMP
, M_WAITOK
);
354 nanotime(&dnode
->atime
);
357 r
= vop_write_dirent(&error
, ap
->a_uio
, dnode
->d_dir
.d_ino
,
362 cookies
[cookie_index
] = saveoff
;
365 if (cookie_index
== ncookies
)
371 r
= vop_write_dirent(&error
, ap
->a_uio
,
372 dnode
->parent
->d_dir
.d_ino
,
375 r
= vop_write_dirent(&error
, ap
->a_uio
,
382 cookies
[cookie_index
] = saveoff
;
385 if (cookie_index
== ncookies
)
389 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
390 if ((node
->flags
& DEVFS_HIDDEN
) ||
391 (node
->flags
& DEVFS_INVISIBLE
)) {
396 * If the node type is a valid devfs alias, then we make
397 * sure that the target isn't hidden. If it is, we don't
398 * show the link in the directory listing.
400 if ((node
->node_type
== Nlink
) && (node
->link_target
!= NULL
) &&
401 (node
->link_target
->flags
& DEVFS_HIDDEN
))
404 if (node
->cookie
< saveoff
)
407 saveoff
= node
->cookie
;
409 error2
= vop_write_dirent(&error
, ap
->a_uio
, node
->d_dir
.d_ino
,
411 node
->d_dir
.d_namlen
,
420 cookies
[cookie_index
] = node
->cookie
;
422 if (cookie_index
== ncookies
)
427 lockmgr(&devfs_lock
, LK_RELEASE
);
430 ap
->a_uio
->uio_offset
= saveoff
;
431 if (error
&& cookie_index
== 0) {
433 kfree(cookies
, M_TEMP
);
435 *ap
->a_cookies
= NULL
;
439 *ap
->a_ncookies
= cookie_index
;
440 *ap
->a_cookies
= cookies
;
448 devfs_vop_nresolve(struct vop_nresolve_args
*ap
)
450 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
451 struct devfs_node
*node
, *found
= NULL
;
452 struct namecache
*ncp
;
453 struct vnode
*vp
= NULL
;
458 ncp
= ap
->a_nch
->ncp
;
461 if (!devfs_node_is_accessible(dnode
))
464 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
466 if ((dnode
->node_type
!= Nroot
) && (dnode
->node_type
!= Ndir
)) {
468 cache_setvp(ap
->a_nch
, NULL
);
472 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
473 if (len
== node
->d_dir
.d_namlen
) {
474 if (!memcmp(ncp
->nc_name
, node
->d_dir
.d_name
, len
)) {
483 while ((found
->node_type
== Nlink
) && (found
->link_target
)) {
485 devfs_debug(DEVFS_DEBUG_SHOW
, "Recursive link or depth >= 8");
489 found
= found
->link_target
;
493 if (!(found
->flags
& DEVFS_HIDDEN
))
494 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp
, found
);
499 cache_setvp(ap
->a_nch
, NULL
);
505 cache_setvp(ap
->a_nch
, vp
);
508 lockmgr(&devfs_lock
, LK_RELEASE
);
515 devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
517 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
520 if (!devfs_node_is_accessible(dnode
))
523 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
524 if (dnode
->parent
!= NULL
) {
525 devfs_allocv(ap
->a_vpp
, dnode
->parent
);
526 vn_unlock(*ap
->a_vpp
);
528 lockmgr(&devfs_lock
, LK_RELEASE
);
530 return ((*ap
->a_vpp
== NULL
) ? ENOENT
: 0);
535 * getattr() - Does not need a lock since the vp is refd
538 devfs_vop_getattr(struct vop_getattr_args
*ap
)
540 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
541 struct vattr
*vap
= ap
->a_vap
;
542 struct partinfo pinfo
;
546 if (!devfs_node_is_accessible(node
))
551 * XXX This is a temporary hack to prevent crashes when the device is
552 * being destroyed (and so the underlying node will be gone) while
553 * a userland program is blocked in a read().
558 node_sync_dev_get(node
);
560 /* start by zeroing out the attributes */
563 /* next do all the common fields */
564 vap
->va_type
= ap
->a_vp
->v_type
;
565 vap
->va_mode
= node
->mode
;
566 vap
->va_fileid
= DEVFS_NODE(ap
->a_vp
)->d_dir
.d_ino
;
568 vap
->va_blocksize
= DEV_BSIZE
;
569 vap
->va_bytes
= vap
->va_size
= 0;
571 vap
->va_fsid
= ap
->a_vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
573 vap
->va_atime
= node
->atime
;
574 vap
->va_mtime
= node
->mtime
;
575 vap
->va_ctime
= node
->ctime
;
577 vap
->va_nlink
= 1; /* number of references to file */
579 vap
->va_uid
= node
->uid
;
580 vap
->va_gid
= node
->gid
;
585 if ((node
->node_type
== Ndev
) && node
->d_dev
) {
586 reference_dev(node
->d_dev
);
587 vap
->va_rminor
= node
->d_dev
->si_uminor
;
588 release_dev(node
->d_dev
);
591 /* For a softlink the va_size is the length of the softlink */
592 if (node
->symlink_name
!= 0) {
593 vap
->va_bytes
= vap
->va_size
= node
->symlink_namelen
;
597 * For a disk-type device, va_size is the size of the underlying
598 * device, so that lseek() works properly.
600 if ((node
->d_dev
) && (dev_dflags(node
->d_dev
) & D_DISK
)) {
601 bzero(&pinfo
, sizeof(pinfo
));
602 error
= dev_dioctl(node
->d_dev
, DIOCGPART
, (void *)&pinfo
,
603 0, proc0
.p_ucred
, NULL
, NULL
);
604 if ((error
== 0) && (pinfo
.media_blksize
!= 0)) {
605 vap
->va_size
= pinfo
.media_size
;
616 devfs_vop_setattr(struct vop_setattr_args
*ap
)
618 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
625 if (!devfs_node_is_accessible(node
))
627 node_sync_dev_get(node
);
629 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
633 if ((vap
->va_uid
!= (uid_t
)VNOVAL
) || (vap
->va_gid
!= (gid_t
)VNOVAL
)) {
636 cur_mode
= node
->mode
;
637 error
= vop_helper_chown(ap
->a_vp
, vap
->va_uid
, vap
->va_gid
,
638 ap
->a_cred
, &cur_uid
, &cur_gid
, &cur_mode
);
642 if (node
->uid
!= cur_uid
|| node
->gid
!= cur_gid
) {
645 node
->mode
= cur_mode
;
649 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
650 cur_mode
= node
->mode
;
651 error
= vop_helper_chmod(ap
->a_vp
, vap
->va_mode
, ap
->a_cred
,
652 node
->uid
, node
->gid
, &cur_mode
);
653 if (error
== 0 && node
->mode
!= cur_mode
) {
654 node
->mode
= cur_mode
;
659 node_sync_dev_set(node
);
660 nanotime(&node
->ctime
);
661 lockmgr(&devfs_lock
, LK_RELEASE
);
668 devfs_vop_readlink(struct vop_readlink_args
*ap
)
670 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
673 if (!devfs_node_is_accessible(node
))
676 lockmgr(&devfs_lock
, LK_SHARED
);
677 ret
= uiomove(node
->symlink_name
, node
->symlink_namelen
, ap
->a_uio
);
678 lockmgr(&devfs_lock
, LK_RELEASE
);
685 devfs_vop_print(struct vop_print_args
*ap
)
691 devfs_vop_nmkdir(struct vop_nmkdir_args
*ap
)
693 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
694 struct devfs_node
*node
;
696 if (!devfs_node_is_accessible(dnode
))
699 if ((dnode
->node_type
!= Nroot
) && (dnode
->node_type
!= Ndir
))
702 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
703 devfs_allocvp(ap
->a_dvp
->v_mount
, ap
->a_vpp
, Ndir
,
704 ap
->a_nch
->ncp
->nc_name
, dnode
, NULL
);
707 node
= DEVFS_NODE(*ap
->a_vpp
);
708 node
->flags
|= DEVFS_USER_CREATED
;
709 cache_setunresolved(ap
->a_nch
);
710 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
712 lockmgr(&devfs_lock
, LK_RELEASE
);
714 return ((*ap
->a_vpp
== NULL
) ? ENOTDIR
: 0);
718 devfs_vop_nsymlink(struct vop_nsymlink_args
*ap
)
720 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
721 struct devfs_node
*node
;
724 if (!devfs_node_is_accessible(dnode
))
727 ap
->a_vap
->va_type
= VLNK
;
729 if ((dnode
->node_type
!= Nroot
) && (dnode
->node_type
!= Ndir
))
732 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
733 devfs_allocvp(ap
->a_dvp
->v_mount
, ap
->a_vpp
, Nlink
,
734 ap
->a_nch
->ncp
->nc_name
, dnode
, NULL
);
736 targetlen
= strlen(ap
->a_target
);
738 node
= DEVFS_NODE(*ap
->a_vpp
);
739 node
->flags
|= DEVFS_USER_CREATED
;
740 node
->symlink_namelen
= targetlen
;
741 node
->symlink_name
= kmalloc(targetlen
+ 1, M_DEVFS
, M_WAITOK
);
742 memcpy(node
->symlink_name
, ap
->a_target
, targetlen
);
743 node
->symlink_name
[targetlen
] = '\0';
744 cache_setunresolved(ap
->a_nch
);
745 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
747 lockmgr(&devfs_lock
, LK_RELEASE
);
749 return ((*ap
->a_vpp
== NULL
) ? ENOTDIR
: 0);
753 devfs_vop_nrmdir(struct vop_nrmdir_args
*ap
)
755 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
756 struct devfs_node
*node
;
757 struct namecache
*ncp
;
760 ncp
= ap
->a_nch
->ncp
;
762 if (!devfs_node_is_accessible(dnode
))
765 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
767 if ((dnode
->node_type
!= Nroot
) && (dnode
->node_type
!= Ndir
))
770 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
771 if (ncp
->nc_nlen
!= node
->d_dir
.d_namlen
)
773 if (memcmp(ncp
->nc_name
, node
->d_dir
.d_name
, ncp
->nc_nlen
))
777 * only allow removal of user created dirs
779 if ((node
->flags
& DEVFS_USER_CREATED
) == 0) {
782 } else if (node
->node_type
!= Ndir
) {
785 } else if (node
->nchildren
> 2) {
790 cache_inval_vp(node
->v_node
, CINV_DESTROY
);
797 cache_unlink(ap
->a_nch
);
799 lockmgr(&devfs_lock
, LK_RELEASE
);
804 devfs_vop_nremove(struct vop_nremove_args
*ap
)
806 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
807 struct devfs_node
*node
;
808 struct namecache
*ncp
;
811 ncp
= ap
->a_nch
->ncp
;
813 if (!devfs_node_is_accessible(dnode
))
816 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
818 if ((dnode
->node_type
!= Nroot
) && (dnode
->node_type
!= Ndir
))
821 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
822 if (ncp
->nc_nlen
!= node
->d_dir
.d_namlen
)
824 if (memcmp(ncp
->nc_name
, node
->d_dir
.d_name
, ncp
->nc_nlen
))
828 * only allow removal of user created stuff (e.g. symlinks)
830 if ((node
->flags
& DEVFS_USER_CREATED
) == 0) {
833 } else if (node
->node_type
== Ndir
) {
838 cache_inval_vp(node
->v_node
, CINV_DESTROY
);
845 cache_unlink(ap
->a_nch
);
847 lockmgr(&devfs_lock
, LK_RELEASE
);
853 devfs_spec_open(struct vop_open_args
*ap
)
855 struct vnode
*vp
= ap
->a_vp
;
856 struct vnode
*orig_vp
= NULL
;
857 struct devfs_node
*node
= DEVFS_NODE(vp
);
858 struct devfs_node
*newnode
;
859 cdev_t dev
, ndev
= NULL
;
863 if (node
->d_dev
== NULL
)
865 if (!devfs_node_is_accessible(node
))
869 if ((dev
= vp
->v_rdev
) == NULL
)
873 * Simple devices that don't care. Retain the shared lock.
875 if (dev_dflags(dev
) & D_QUICK
) {
877 error
= dev_dopen(dev
, ap
->a_mode
, S_IFCHR
,
878 ap
->a_cred
, ap
->a_fp
);
879 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
887 vn_lock(vp
, LK_UPGRADE
| LK_RETRY
);
888 if (node
&& ap
->a_fp
) {
891 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_spec_open: -1.1-\n");
892 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
894 ndev
= devfs_clone(dev
, node
->d_dir
.d_name
,
895 node
->d_dir
.d_namlen
,
896 ap
->a_mode
, ap
->a_cred
);
898 newnode
= devfs_create_device_node(
899 DEVFS_MNTDATA(vp
->v_mount
)->root_node
,
900 ndev
, &exists
, NULL
, NULL
);
901 /* XXX: possibly destroy device if this happens */
903 if (newnode
!= NULL
) {
908 devfs_debug(DEVFS_DEBUG_DEBUG
,
909 "parent here is: %s, node is: |%s|\n",
910 ((node
->parent
->node_type
== Nroot
) ?
911 "ROOT!" : node
->parent
->d_dir
.d_name
),
912 newnode
->d_dir
.d_name
);
913 devfs_debug(DEVFS_DEBUG_DEBUG
,
915 ((struct devfs_node
*)(TAILQ_LAST(DEVFS_DENODE_HEAD(node
->parent
), devfs_node_head
)))->d_dir
.d_name
);
918 * orig_vp is set to the original vp if we
921 /* node->flags |= DEVFS_CLONED; */
922 devfs_allocv(&vp
, newnode
);
927 lockmgr(&devfs_lock
, LK_RELEASE
);
930 * Synchronize devfs here to make sure that, if the cloned
931 * device creates other device nodes in addition to the
932 * cloned one, all of them are created by the time we return
933 * from opening the cloned one.
939 devfs_debug(DEVFS_DEBUG_DEBUG
,
940 "devfs_spec_open() called on %s! \n",
944 * Make this field valid before any I/O in ->d_open
946 * NOTE: Shared vnode lock probably held, but its ok as long
947 * as assignments are consistent.
949 if (!dev
->si_iosize_max
)
950 /* XXX: old DFLTPHYS == 64KB dependency */
951 dev
->si_iosize_max
= min(MAXPHYS
,64*1024);
953 if (dev_dflags(dev
) & D_TTY
)
954 vsetflags(vp
, VISTTY
);
957 * Open the underlying device
960 error
= dev_dopen(dev
, ap
->a_mode
, S_IFCHR
, ap
->a_cred
, ap
->a_fp
);
961 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
964 * Clean up any cloned vp if we error out.
970 /* orig_vp = NULL; */
976 * This checks if the disk device is going to be opened for writing.
977 * It will be only allowed in the cases where securelevel permits it
978 * and it's not mounted R/W.
980 if ((dev_dflags(dev
) & D_DISK
) && (ap
->a_mode
& FWRITE
) &&
981 (ap
->a_cred
!= FSCRED
)) {
983 /* Very secure mode. No open for writing allowed */
984 if (securelevel
>= 2)
988 * If it is mounted R/W, do not allow to open for writing.
989 * In the case it's mounted read-only but securelevel
990 * is >= 1, then do not allow opening for writing either.
992 if (vfs_mountedon(vp
)) {
993 if (!(dev
->si_mountpoint
->mnt_flag
& MNT_RDONLY
))
995 else if (securelevel
>= 1)
1001 * NOTE: vnode is still locked shared. t_stop assignment should
1002 * remain consistent so we should be ok.
1004 if (dev_dflags(dev
) & D_TTY
) {
1009 devfs_debug(DEVFS_DEBUG_DEBUG
,
1010 "devfs: no t_stop\n");
1011 tp
->t_stop
= nottystop
;
1017 * NOTE: vnode is still locked shared. assignments should
1018 * remain consistent so we should be ok. However,
1019 * upgrade to exclusive if we need a VM object.
1021 if (vn_isdisk(vp
, NULL
)) {
1022 if (!dev
->si_bsize_phys
)
1023 dev
->si_bsize_phys
= DEV_BSIZE
;
1024 vinitvmio(vp
, IDX_TO_OFF(INT_MAX
), PAGE_SIZE
, -1);
1030 nanotime(&node
->atime
);
1033 * If we replaced the vp the vop_stdopen() call will have loaded
1034 * it into fp->f_data and vref()d the vp, giving us two refs. So
1035 * instead of just unlocking it here we have to vput() it.
1040 /* Ugly pty magic, to make pty devices appear once they are opened */
1041 if (node
&& (node
->flags
& DEVFS_PTY
) == DEVFS_PTY
) {
1042 if (node
->flags
& DEVFS_INVISIBLE
)
1043 node
->flags
&= ~DEVFS_INVISIBLE
;
1048 KKASSERT(ap
->a_fp
->f_type
== DTYPE_VNODE
);
1049 KKASSERT((ap
->a_fp
->f_flag
& FMASK
) == (ap
->a_mode
& FMASK
));
1050 ap
->a_fp
->f_ops
= &devfs_dev_fileops
;
1051 KKASSERT(ap
->a_fp
->f_data
== (void *)vp
);
1058 devfs_spec_close(struct vop_close_args
*ap
)
1060 struct devfs_node
*node
;
1061 struct proc
*p
= curproc
;
1062 struct vnode
*vp
= ap
->a_vp
;
1063 cdev_t dev
= vp
->v_rdev
;
1069 * Devices flagged D_QUICK require no special handling.
1071 if (dev
&& dev_dflags(dev
) & D_QUICK
) {
1072 opencount
= vp
->v_opencount
;
1074 opencount
= count_dev(dev
); /* XXX NOT SMP SAFE */
1075 if (((vp
->v_flag
& VRECLAIMED
) ||
1076 (dev_dflags(dev
) & D_TRACKCLOSE
) ||
1077 (opencount
== 1))) {
1079 error
= dev_dclose(dev
, ap
->a_fflag
, S_IFCHR
, ap
->a_fp
);
1080 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
1086 * We do special tests on the opencount so unfortunately we need
1087 * an exclusive lock.
1089 vn_lock(vp
, LK_UPGRADE
| LK_RETRY
);
1092 devfs_debug(DEVFS_DEBUG_DEBUG
,
1093 "devfs_spec_close() called on %s! \n",
1096 devfs_debug(DEVFS_DEBUG_DEBUG
,
1097 "devfs_spec_close() called, null vode!\n");
1100 * A couple of hacks for devices and tty devices. The
1101 * vnode ref count cannot be used to figure out the
1102 * last close, but we can use v_opencount now that
1103 * revoke works properly.
1105 * Detect the last close on a controlling terminal and clear
1106 * the session (half-close).
1108 * XXX opencount is not SMP safe. The vnode is locked but there
1109 * may be multiple vnodes referencing the same device.
1113 * NOTE: Try to avoid global tokens when testing opencount
1114 * XXX hack, fixme. needs a struct lock and opencount in
1115 * struct cdev itself.
1118 opencount
= vp
->v_opencount
;
1120 opencount
= count_dev(dev
); /* XXX NOT SMP SAFE */
1125 if (p
&& vp
->v_opencount
<= 1 && vp
== p
->p_session
->s_ttyvp
) {
1126 p
->p_session
->s_ttyvp
= NULL
;
1131 * Vnodes can be opened and closed multiple times. Do not really
1132 * close the device unless (1) it is being closed forcibly,
1133 * (2) the device wants to track closes, or (3) this is the last
1134 * vnode doing its last close on the device.
1136 * XXX the VXLOCK (force close) case can leave vnodes referencing
1137 * a closed device. This might not occur now that our revoke is
1140 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_spec_close() -1- \n");
1141 if (dev
&& ((vp
->v_flag
& VRECLAIMED
) ||
1142 (dev_dflags(dev
) & D_TRACKCLOSE
) ||
1143 (opencount
== 1))) {
1145 * Ugly pty magic, to make pty devices disappear again once
1148 node
= DEVFS_NODE(ap
->a_vp
);
1149 if (node
&& (node
->flags
& DEVFS_PTY
))
1150 node
->flags
|= DEVFS_INVISIBLE
;
1153 * Unlock around dev_dclose(), unless the vnode is
1154 * undergoing a vgone/reclaim (during umount).
1157 if ((vp
->v_flag
& VRECLAIMED
) == 0 && vn_islocked(vp
)) {
1163 * WARNING! If the device destroys itself the devfs node
1164 * can disappear here.
1166 * WARNING! vn_lock() will fail if the vp is in a VRECLAIM,
1167 * which can occur during umount.
1169 error
= dev_dclose(dev
, ap
->a_fflag
, S_IFCHR
, ap
->a_fp
);
1170 /* node is now stale */
1173 if (vn_lock(vp
, LK_EXCLUSIVE
|
1175 LK_FAILRECLAIM
) != 0) {
1176 panic("devfs_spec_close: vnode %p "
1177 "unexpectedly could not be relocked",
1184 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_spec_close() -2- \n");
1187 * Track the actual opens and closes on the vnode. The last close
1188 * disassociates the rdev. If the rdev is already disassociated or
1189 * the opencount is already 0, the vnode might have been revoked
1190 * and no further opencount tracking occurs.
1195 if (vp
->v_opencount
> 0)
1203 devfs_fo_close(struct file
*fp
)
1205 struct vnode
*vp
= (struct vnode
*)fp
->f_data
;
1208 fp
->f_ops
= &badfileops
;
1209 error
= vn_close(vp
, fp
->f_flag
, fp
);
1210 devfs_clear_cdevpriv(fp
);
1217 * Device-optimized file table vnode read routine.
1219 * This bypasses the VOP table and talks directly to the device. Most
1220 * filesystems just route to specfs and can make this optimization.
1223 devfs_fo_read(struct file
*fp
, struct uio
*uio
,
1224 struct ucred
*cred
, int flags
)
1226 struct devfs_node
*node
;
1232 KASSERT(uio
->uio_td
== curthread
,
1233 ("uio_td %p is not td %p", uio
->uio_td
, curthread
));
1235 if (uio
->uio_resid
== 0)
1238 vp
= (struct vnode
*)fp
->f_data
;
1239 if (vp
== NULL
|| vp
->v_type
== VBAD
)
1242 node
= DEVFS_NODE(vp
);
1244 if ((dev
= vp
->v_rdev
) == NULL
)
1249 if ((flags
& O_FOFFSET
) == 0)
1250 uio
->uio_offset
= fp
->f_offset
;
1253 if (flags
& O_FBLOCKING
) {
1254 /* ioflag &= ~IO_NDELAY; */
1255 } else if (flags
& O_FNONBLOCKING
) {
1256 ioflag
|= IO_NDELAY
;
1257 } else if (fp
->f_flag
& FNONBLOCK
) {
1258 ioflag
|= IO_NDELAY
;
1260 if (fp
->f_flag
& O_DIRECT
) {
1261 ioflag
|= IO_DIRECT
;
1263 ioflag
|= sequential_heuristic(uio
, fp
);
1265 error
= dev_dread(dev
, uio
, ioflag
, fp
);
1269 nanotime(&node
->atime
);
1270 if ((flags
& O_FOFFSET
) == 0)
1271 fp
->f_offset
= uio
->uio_offset
;
1272 fp
->f_nextoff
= uio
->uio_offset
;
1279 devfs_fo_write(struct file
*fp
, struct uio
*uio
,
1280 struct ucred
*cred
, int flags
)
1282 struct devfs_node
*node
;
1288 KASSERT(uio
->uio_td
== curthread
,
1289 ("uio_td %p is not p %p", uio
->uio_td
, curthread
));
1291 vp
= (struct vnode
*)fp
->f_data
;
1292 if (vp
== NULL
|| vp
->v_type
== VBAD
)
1295 node
= DEVFS_NODE(vp
);
1297 if (vp
->v_type
== VREG
)
1298 bwillwrite(uio
->uio_resid
);
1300 vp
= (struct vnode
*)fp
->f_data
;
1302 if ((dev
= vp
->v_rdev
) == NULL
)
1307 if ((flags
& O_FOFFSET
) == 0)
1308 uio
->uio_offset
= fp
->f_offset
;
1311 if (vp
->v_type
== VREG
&&
1312 ((fp
->f_flag
& O_APPEND
) || (flags
& O_FAPPEND
))) {
1313 ioflag
|= IO_APPEND
;
1316 if (flags
& O_FBLOCKING
) {
1317 /* ioflag &= ~IO_NDELAY; */
1318 } else if (flags
& O_FNONBLOCKING
) {
1319 ioflag
|= IO_NDELAY
;
1320 } else if (fp
->f_flag
& FNONBLOCK
) {
1321 ioflag
|= IO_NDELAY
;
1323 if (fp
->f_flag
& O_DIRECT
) {
1324 ioflag
|= IO_DIRECT
;
1326 if (flags
& O_FASYNCWRITE
) {
1327 /* ioflag &= ~IO_SYNC; */
1328 } else if (flags
& O_FSYNCWRITE
) {
1330 } else if (fp
->f_flag
& O_FSYNC
) {
1334 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))
1336 ioflag
|= sequential_heuristic(uio
, fp
);
1338 error
= dev_dwrite(dev
, uio
, ioflag
, fp
);
1342 nanotime(&node
->atime
);
1343 nanotime(&node
->mtime
);
1346 if ((flags
& O_FOFFSET
) == 0)
1347 fp
->f_offset
= uio
->uio_offset
;
1348 fp
->f_nextoff
= uio
->uio_offset
;
1355 devfs_fo_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
1364 vp
= (struct vnode
*)fp
->f_data
;
1365 if (vp
== NULL
|| vp
->v_type
== VBAD
)
1368 error
= vn_stat(vp
, sb
, cred
);
1373 error
= VOP_GETATTR(vp
, vap
);
1378 * Zero the spare stat fields
1385 * Copy from vattr table ... or not in case it's a cloned device
1387 if (vap
->va_fsid
!= VNOVAL
)
1388 sb
->st_dev
= vap
->va_fsid
;
1390 sb
->st_dev
= vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
1392 sb
->st_ino
= vap
->va_fileid
;
1394 mode
= vap
->va_mode
;
1398 if (vap
->va_nlink
> (nlink_t
)-1)
1399 sb
->st_nlink
= (nlink_t
)-1;
1401 sb
->st_nlink
= vap
->va_nlink
;
1403 sb
->st_uid
= vap
->va_uid
;
1404 sb
->st_gid
= vap
->va_gid
;
1405 sb
->st_rdev
= dev2udev(DEVFS_NODE(vp
)->d_dev
);
1406 sb
->st_size
= vap
->va_bytes
;
1407 sb
->st_atimespec
= vap
->va_atime
;
1408 sb
->st_mtimespec
= vap
->va_mtime
;
1409 sb
->st_ctimespec
= vap
->va_ctime
;
1412 * A VCHR and VBLK device may track the last access and last modified
1413 * time independantly of the filesystem. This is particularly true
1414 * because device read and write calls may bypass the filesystem.
1416 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
1419 if (dev
->si_lastread
) {
1420 sb
->st_atimespec
.tv_sec
= time_second
+
1423 sb
->st_atimespec
.tv_nsec
= 0;
1425 if (dev
->si_lastwrite
) {
1426 sb
->st_atimespec
.tv_sec
= time_second
+
1429 sb
->st_atimespec
.tv_nsec
= 0;
1435 * According to www.opengroup.org, the meaning of st_blksize is
1436 * "a filesystem-specific preferred I/O block size for this
1437 * object. In some filesystem types, this may vary from file
1439 * Default to PAGE_SIZE after much discussion.
1442 sb
->st_blksize
= PAGE_SIZE
;
1444 sb
->st_flags
= vap
->va_flags
;
1446 error
= priv_check_cred(cred
, PRIV_VFS_GENERATION
, 0);
1450 sb
->st_gen
= (u_int32_t
)vap
->va_gen
;
1452 sb
->st_blocks
= vap
->va_bytes
/ S_BLKSIZE
;
1459 devfs_fo_kqfilter(struct file
*fp
, struct knote
*kn
)
1465 vp
= (struct vnode
*)fp
->f_data
;
1466 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
1470 if ((dev
= vp
->v_rdev
) == NULL
) {
1476 error
= dev_dkqfilter(dev
, kn
, fp
);
1485 devfs_fo_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1486 struct ucred
*ucred
, struct sysmsg
*msg
)
1489 struct devfs_node
*node
;
1495 struct fiodname_args
*name_args
;
1499 vp
= ((struct vnode
*)fp
->f_data
);
1501 if ((dev
= vp
->v_rdev
) == NULL
)
1502 return EBADF
; /* device was revoked */
1507 node
= DEVFS_NODE(vp
);
1510 devfs_debug(DEVFS_DEBUG_DEBUG
,
1511 "devfs_fo_ioctl() called! for dev %s\n",
1514 if (com
== FIODTYPE
) {
1515 *(int *)data
= dev_dflags(dev
) & D_TYPEMASK
;
1518 } else if (com
== FIODNAME
) {
1519 name_args
= (struct fiodname_args
*)data
;
1520 name
= dev
->si_name
;
1521 namlen
= strlen(name
) + 1;
1523 devfs_debug(DEVFS_DEBUG_DEBUG
,
1524 "ioctl, got: FIODNAME for %s\n", name
);
1526 if (namlen
<= name_args
->len
)
1527 error
= copyout(dev
->si_name
, name_args
->name
, namlen
);
1531 devfs_debug(DEVFS_DEBUG_DEBUG
,
1532 "ioctl stuff: error: %d\n", error
);
1536 error
= dev_dioctl(dev
, com
, data
, fp
->f_flag
, ucred
, msg
, fp
);
1540 nanotime(&node
->atime
);
1541 nanotime(&node
->mtime
);
1544 if (com
== TIOCSCTTY
) {
1545 devfs_debug(DEVFS_DEBUG_DEBUG
,
1546 "devfs_fo_ioctl: got TIOCSCTTY on %s\n",
1549 if (error
== 0 && com
== TIOCSCTTY
) {
1550 struct proc
*p
= curthread
->td_proc
;
1551 struct session
*sess
;
1553 devfs_debug(DEVFS_DEBUG_DEBUG
,
1554 "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n",
1560 sess
= p
->p_session
;
1563 * Do nothing if reassigning same control tty
1565 if (sess
->s_ttyvp
== vp
) {
1571 * Get rid of reference to old control tty
1573 ovp
= sess
->s_ttyvp
;
1582 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_fo_ioctl() finished! \n");
1588 devfs_spec_fsync(struct vop_fsync_args
*ap
)
1590 struct vnode
*vp
= ap
->a_vp
;
1593 if (!vn_isdisk(vp
, NULL
))
1597 * Flush all dirty buffers associated with a block device.
1599 error
= vfsync(vp
, ap
->a_waitfor
, 10000, NULL
, NULL
);
1604 devfs_spec_read(struct vop_read_args
*ap
)
1606 struct devfs_node
*node
;
1615 node
= DEVFS_NODE(vp
);
1617 if (dev
== NULL
) /* device was revoked */
1619 if (uio
->uio_resid
== 0)
1623 error
= dev_dread(dev
, uio
, ap
->a_ioflag
, NULL
);
1624 vn_lock(vp
, LK_SHARED
| LK_RETRY
);
1627 nanotime(&node
->atime
);
1633 * Vnode op for write
1635 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1636 * struct ucred *a_cred)
1639 devfs_spec_write(struct vop_write_args
*ap
)
1641 struct devfs_node
*node
;
1650 node
= DEVFS_NODE(vp
);
1652 KKASSERT(uio
->uio_segflg
!= UIO_NOCOPY
);
1654 if (dev
== NULL
) /* device was revoked */
1658 error
= dev_dwrite(dev
, uio
, ap
->a_ioflag
, NULL
);
1659 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1662 nanotime(&node
->atime
);
1663 nanotime(&node
->mtime
);
1670 * Device ioctl operation.
1672 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1673 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg)
1676 devfs_spec_ioctl(struct vop_ioctl_args
*ap
)
1678 struct vnode
*vp
= ap
->a_vp
;
1680 struct devfs_node
*node
;
1684 if ((dev
= vp
->v_rdev
) == NULL
)
1685 return (EBADF
); /* device was revoked */
1687 node
= DEVFS_NODE(vp
);
1690 nanotime(&node
->atime
);
1691 nanotime(&node
->mtime
);
1695 return (dev_dioctl(dev
, ap
->a_command
, ap
->a_data
, ap
->a_fflag
,
1696 ap
->a_cred
, ap
->a_sysmsg
, NULL
));
1700 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1704 devfs_spec_kqfilter(struct vop_kqfilter_args
*ap
)
1706 struct vnode
*vp
= ap
->a_vp
;
1708 struct devfs_node
*node
;
1712 if ((dev
= vp
->v_rdev
) == NULL
)
1713 return (EBADF
); /* device was revoked (EBADF) */
1715 node
= DEVFS_NODE(vp
);
1718 nanotime(&node
->atime
);
1721 return (dev_dkqfilter(dev
, ap
->a_kn
, NULL
));
1725 * Convert a vnode strategy call into a device strategy call. Vnode strategy
1726 * calls are not limited to device DMA limits so we have to deal with the
1729 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1732 devfs_spec_strategy(struct vop_strategy_args
*ap
)
1734 struct bio
*bio
= ap
->a_bio
;
1735 struct buf
*bp
= bio
->bio_buf
;
1742 if (bp
->b_cmd
!= BUF_CMD_READ
&& LIST_FIRST(&bp
->b_dep
) != NULL
)
1746 * Collect statistics on synchronous and asynchronous read
1747 * and write counts for disks that have associated filesystems.
1750 KKASSERT(vp
->v_rdev
!= NULL
); /* XXX */
1751 if (vn_isdisk(vp
, NULL
) && (mp
= vp
->v_rdev
->si_mountpoint
) != NULL
) {
1752 if (bp
->b_cmd
== BUF_CMD_READ
) {
1753 if (bp
->b_flags
& BIO_SYNC
)
1754 mp
->mnt_stat
.f_syncreads
++;
1756 mp
->mnt_stat
.f_asyncreads
++;
1758 if (bp
->b_flags
& BIO_SYNC
)
1759 mp
->mnt_stat
.f_syncwrites
++;
1761 mp
->mnt_stat
.f_asyncwrites
++;
1766 * Device iosize limitations only apply to read and write. Shortcut
1767 * the I/O if it fits.
1769 if ((maxiosize
= vp
->v_rdev
->si_iosize_max
) == 0) {
1770 devfs_debug(DEVFS_DEBUG_DEBUG
,
1771 "%s: si_iosize_max not set!\n",
1772 dev_dname(vp
->v_rdev
));
1773 maxiosize
= MAXPHYS
;
1775 #if SPEC_CHAIN_DEBUG & 2
1778 if (bp
->b_bcount
<= maxiosize
||
1779 (bp
->b_cmd
!= BUF_CMD_READ
&& bp
->b_cmd
!= BUF_CMD_WRITE
)) {
1780 dev_dstrategy_chain(vp
->v_rdev
, bio
);
1785 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1787 nbp
= kmalloc(sizeof(*bp
), M_DEVBUF
, M_INTWAIT
|M_ZERO
);
1790 BUF_LOCK(nbp
, LK_EXCLUSIVE
);
1793 nbp
->b_flags
= B_PAGING
| B_KVABIO
| (bp
->b_flags
& B_BNOCLIP
);
1794 nbp
->b_cpumask
= bp
->b_cpumask
;
1795 nbp
->b_data
= bp
->b_data
;
1796 nbp
->b_bio1
.bio_done
= devfs_spec_strategy_done
;
1797 nbp
->b_bio1
.bio_offset
= bio
->bio_offset
;
1798 nbp
->b_bio1
.bio_caller_info1
.ptr
= bio
;
1801 * Start the first transfer
1803 if (vn_isdisk(vp
, NULL
))
1804 chunksize
= vp
->v_rdev
->si_bsize_phys
;
1806 chunksize
= DEV_BSIZE
;
1807 chunksize
= maxiosize
/ chunksize
* chunksize
;
1808 #if SPEC_CHAIN_DEBUG & 1
1809 devfs_debug(DEVFS_DEBUG_DEBUG
,
1810 "spec_strategy chained I/O chunksize=%d\n",
1813 nbp
->b_cmd
= bp
->b_cmd
;
1814 nbp
->b_bcount
= chunksize
;
1815 nbp
->b_bufsize
= chunksize
; /* used to detect a short I/O */
1816 nbp
->b_bio1
.bio_caller_info2
.index
= chunksize
;
1818 #if SPEC_CHAIN_DEBUG & 1
1819 devfs_debug(DEVFS_DEBUG_DEBUG
,
1820 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1821 bp
, 0, bp
->b_bcount
, nbp
->b_bcount
);
1824 dev_dstrategy(vp
->v_rdev
, &nbp
->b_bio1
);
1826 if (DEVFS_NODE(vp
)) {
1827 nanotime(&DEVFS_NODE(vp
)->atime
);
1828 nanotime(&DEVFS_NODE(vp
)->mtime
);
1835 * Chunked up transfer completion routine - chain transfers until done
1837 * NOTE: MPSAFE callback.
1841 devfs_spec_strategy_done(struct bio
*nbio
)
1843 struct buf
*nbp
= nbio
->bio_buf
;
1844 struct bio
*bio
= nbio
->bio_caller_info1
.ptr
; /* original bio */
1845 struct buf
*bp
= bio
->bio_buf
; /* original bp */
1846 int chunksize
= nbio
->bio_caller_info2
.index
; /* chunking */
1847 int boffset
= nbp
->b_data
- bp
->b_data
;
1849 if (nbp
->b_flags
& B_ERROR
) {
1851 * An error terminates the chain, propogate the error back
1852 * to the original bp
1854 bp
->b_flags
|= B_ERROR
;
1855 bp
->b_error
= nbp
->b_error
;
1856 bp
->b_resid
= bp
->b_bcount
- boffset
+
1857 (nbp
->b_bcount
- nbp
->b_resid
);
1858 #if SPEC_CHAIN_DEBUG & 1
1859 devfs_debug(DEVFS_DEBUG_DEBUG
,
1860 "spec_strategy: chain %p error %d bcount %d/%d\n",
1861 bp
, bp
->b_error
, bp
->b_bcount
,
1862 bp
->b_bcount
- bp
->b_resid
);
1864 } else if (nbp
->b_resid
) {
1866 * A short read or write terminates the chain
1868 bp
->b_error
= nbp
->b_error
;
1869 bp
->b_resid
= bp
->b_bcount
- boffset
+
1870 (nbp
->b_bcount
- nbp
->b_resid
);
1871 #if SPEC_CHAIN_DEBUG & 1
1872 devfs_debug(DEVFS_DEBUG_DEBUG
,
1873 "spec_strategy: chain %p short read(1) "
1875 bp
, bp
->b_bcount
- bp
->b_resid
, bp
->b_bcount
);
1877 } else if (nbp
->b_bcount
!= nbp
->b_bufsize
) {
1879 * A short read or write can also occur by truncating b_bcount
1881 #if SPEC_CHAIN_DEBUG & 1
1882 devfs_debug(DEVFS_DEBUG_DEBUG
,
1883 "spec_strategy: chain %p short read(2) "
1885 bp
, nbp
->b_bcount
+ boffset
, bp
->b_bcount
);
1888 bp
->b_bcount
= nbp
->b_bcount
+ boffset
;
1889 bp
->b_resid
= nbp
->b_resid
;
1890 } else if (nbp
->b_bcount
+ boffset
== bp
->b_bcount
) {
1892 * No more data terminates the chain
1894 #if SPEC_CHAIN_DEBUG & 1
1895 devfs_debug(DEVFS_DEBUG_DEBUG
,
1896 "spec_strategy: chain %p finished bcount %d\n",
1903 * Continue the chain
1905 boffset
+= nbp
->b_bcount
;
1906 nbp
->b_data
= bp
->b_data
+ boffset
;
1907 nbp
->b_bcount
= bp
->b_bcount
- boffset
;
1908 if (nbp
->b_bcount
> chunksize
)
1909 nbp
->b_bcount
= chunksize
;
1910 nbp
->b_bio1
.bio_done
= devfs_spec_strategy_done
;
1911 nbp
->b_bio1
.bio_offset
= bio
->bio_offset
+ boffset
;
1913 #if SPEC_CHAIN_DEBUG & 1
1914 devfs_debug(DEVFS_DEBUG_DEBUG
,
1915 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1916 bp
, boffset
, bp
->b_bcount
, nbp
->b_bcount
);
1919 dev_dstrategy(nbp
->b_vp
->v_rdev
, &nbp
->b_bio1
);
1924 * Fall through to here on termination. biodone(bp) and
1925 * clean up and free nbp.
1930 kfree(nbp
, M_DEVBUF
);
1934 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1937 devfs_spec_freeblks(struct vop_freeblks_args
*ap
)
1942 * Must be a synchronous operation
1944 KKASSERT(ap
->a_vp
->v_rdev
!= NULL
);
1945 if ((ap
->a_vp
->v_rdev
->si_flags
& SI_CANFREE
) == 0)
1948 bp
->b_cmd
= BUF_CMD_FREEBLKS
;
1949 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
1950 bp
->b_bio1
.bio_offset
= ap
->a_offset
;
1951 bp
->b_bio1
.bio_done
= biodone_sync
;
1952 bp
->b_bcount
= ap
->a_length
;
1953 dev_dstrategy(ap
->a_vp
->v_rdev
, &bp
->b_bio1
);
1954 biowait(&bp
->b_bio1
, "TRIM");
1961 * Implement degenerate case where the block requested is the block
1962 * returned, and assume that the entire device is contiguous in regards
1963 * to the contiguous block range (runp and runb).
1965 * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1966 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1969 devfs_spec_bmap(struct vop_bmap_args
*ap
)
1971 if (ap
->a_doffsetp
!= NULL
)
1972 *ap
->a_doffsetp
= ap
->a_loffset
;
1973 if (ap
->a_runp
!= NULL
)
1974 *ap
->a_runp
= MAXBSIZE
;
1975 if (ap
->a_runb
!= NULL
) {
1976 if (ap
->a_loffset
< MAXBSIZE
)
1977 *ap
->a_runb
= (int)ap
->a_loffset
;
1979 *ap
->a_runb
= MAXBSIZE
;
1986 * Special device advisory byte-level locks.
1988 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1989 * struct flock *a_fl, int a_flags)
1993 devfs_spec_advlock(struct vop_advlock_args
*ap
)
1995 return ((ap
->a_flags
& F_POSIX
) ? EINVAL
: EOPNOTSUPP
);
1999 * NOTE: MPSAFE callback.
2002 devfs_spec_getpages_iodone(struct bio
*bio
)
2004 bio
->bio_buf
->b_cmd
= BUF_CMD_DONE
;
2005 wakeup(bio
->bio_buf
);
2009 * spec_getpages() - get pages associated with device vnode.
2011 * Note that spec_read and spec_write do not use the buffer cache, so we
2012 * must fully implement getpages here.
2015 devfs_spec_getpages(struct vop_getpages_args
*ap
)
2019 int i
, pcount
, size
;
2022 vm_ooffset_t offset
;
2023 int toff
, nextoff
, nread
;
2024 struct vnode
*vp
= ap
->a_vp
;
2029 pcount
= round_page(ap
->a_count
) / PAGE_SIZE
;
2032 * Calculate the offset of the transfer and do sanity check.
2034 offset
= IDX_TO_OFF(ap
->a_m
[0]->pindex
) + ap
->a_offset
;
2037 * Round up physical size for real devices. We cannot round using
2038 * v_mount's block size data because v_mount has nothing to do with
2039 * the device. i.e. it's usually '/dev'. We need the physical block
2040 * size for the device itself.
2042 * We can't use v_rdev->si_mountpoint because it only exists when the
2043 * block device is mounted. However, we can use v_rdev.
2045 if (vn_isdisk(vp
, NULL
))
2046 blksiz
= vp
->v_rdev
->si_bsize_phys
;
2050 size
= roundup2(ap
->a_count
, blksiz
);
2052 bp
= getpbuf_kva(NULL
);
2053 kva
= (vm_offset_t
)bp
->b_data
;
2056 * Map the pages to be read into the kva.
2058 pmap_qenter_noinval(kva
, ap
->a_m
, pcount
);
2060 /* Build a minimal buffer header. */
2061 bp
->b_cmd
= BUF_CMD_READ
;
2062 bp
->b_flags
|= B_KVABIO
;
2063 bp
->b_bcount
= size
;
2065 bsetrunningbufspace(bp
, size
);
2067 bp
->b_bio1
.bio_offset
= offset
;
2068 bp
->b_bio1
.bio_done
= devfs_spec_getpages_iodone
;
2070 mycpu
->gd_cnt
.v_vnodein
++;
2071 mycpu
->gd_cnt
.v_vnodepgsin
+= pcount
;
2074 vn_strategy(ap
->a_vp
, &bp
->b_bio1
);
2078 /* We definitely need to be at splbio here. */
2079 while (bp
->b_cmd
!= BUF_CMD_DONE
)
2080 tsleep(bp
, 0, "spread", 0);
2084 if (bp
->b_flags
& B_ERROR
) {
2086 error
= bp
->b_error
;
2092 * If EOF is encountered we must zero-extend the result in order
2093 * to ensure that the page does not contain garabge. When no
2094 * error occurs, an early EOF is indicated if b_bcount got truncated.
2095 * b_resid is relative to b_bcount and should be 0, but some devices
2096 * might indicate an EOF with b_resid instead of truncating b_bcount.
2098 nread
= bp
->b_bcount
- bp
->b_resid
;
2099 if (nread
< ap
->a_count
) {
2101 bzero((caddr_t
)kva
+ nread
, ap
->a_count
- nread
);
2103 pmap_qremove_noinval(kva
, pcount
);
2106 for (i
= 0, toff
= 0; i
< pcount
; i
++, toff
= nextoff
) {
2107 nextoff
= toff
+ PAGE_SIZE
;
2111 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
2112 * pmap modified bit. pmap modified bit should have
2113 * already been cleared.
2115 if (nextoff
<= nread
) {
2116 m
->valid
= VM_PAGE_BITS_ALL
;
2118 } else if (toff
< nread
) {
2120 * Since this is a VM request, we have to supply the
2121 * unaligned offset to allow vm_page_set_valid()
2122 * to zero sub-DEV_BSIZE'd portions of the page.
2124 vm_page_set_valid(m
, 0, nread
- toff
);
2125 vm_page_clear_dirty_end_nonincl(m
, 0, nread
- toff
);
2131 if (i
!= ap
->a_reqpage
) {
2133 * Just in case someone was asking for this page we
2134 * now tell them that it is ok to use.
2136 if (!error
|| (m
->valid
== VM_PAGE_BITS_ALL
)) {
2138 if (m
->flags
& PG_REFERENCED
) {
2139 vm_page_activate(m
);
2141 vm_page_deactivate(m
);
2150 } else if (m
->valid
) {
2153 * Since this is a VM request, we need to make the
2154 * entire page presentable by zeroing invalid sections.
2156 if (m
->valid
!= VM_PAGE_BITS_ALL
)
2157 vm_page_zero_invalid(m
, FALSE
);
2161 m
= ap
->a_m
[ap
->a_reqpage
];
2162 devfs_debug(DEVFS_DEBUG_WARNING
,
2163 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
2164 devtoname(vp
->v_rdev
), error
, bp
, bp
->b_vp
);
2165 devfs_debug(DEVFS_DEBUG_WARNING
,
2166 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
2167 size
, bp
->b_resid
, ap
->a_count
, m
->valid
);
2168 devfs_debug(DEVFS_DEBUG_WARNING
,
2169 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
2170 nread
, ap
->a_reqpage
, (u_long
)m
->pindex
, pcount
);
2172 * Free the buffer header back to the swap buffer pool.
2175 return VM_PAGER_ERROR
;
2178 * Free the buffer header back to the swap buffer pool.
2181 if (DEVFS_NODE(ap
->a_vp
))
2182 nanotime(&DEVFS_NODE(ap
->a_vp
)->mtime
);
2188 sequential_heuristic(struct uio
*uio
, struct file
*fp
)
2191 * Sequential heuristic - detect sequential operation
2193 if ((uio
->uio_offset
== 0 && fp
->f_seqcount
> 0) ||
2194 uio
->uio_offset
== fp
->f_nextoff
) {
2196 * XXX we assume that the filesystem block size is
2197 * the default. Not true, but still gives us a pretty
2198 * good indicator of how sequential the read operations
2201 int tmpseq
= fp
->f_seqcount
;
2203 tmpseq
+= (uio
->uio_resid
+ MAXBSIZE
- 1) / MAXBSIZE
;
2204 if (tmpseq
> IO_SEQMAX
)
2206 fp
->f_seqcount
= tmpseq
;
2207 return(fp
->f_seqcount
<< IO_SEQSHIFT
);
2211 * Not sequential, quick draw-down of seqcount
2213 if (fp
->f_seqcount
> 1)