2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/fcntl.h>
42 #include <sys/signalvar.h>
43 #include <sys/vnode.h>
45 #include <sys/mount.h>
47 #include <sys/fcntl.h>
48 #include <sys/namei.h>
49 #include <sys/dirent.h>
50 #include <sys/malloc.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_zone.h>
55 #include <vm/vm_object.h>
56 #include <sys/filio.h>
57 #include <sys/ttycom.h>
59 #include <sys/devfs.h>
60 #include <sys/pioctl.h>
62 #include <machine/limits.h>
63 #include <vm/vm_page2.h>
65 #include <sys/sysref2.h>
67 MALLOC_DECLARE(M_DEVFS
);
68 #define DEVFS_BADOP (void *)devfs_badop
70 static int devfs_badop(struct vop_generic_args
*);
71 static int devfs_access(struct vop_access_args
*);
72 static int devfs_inactive(struct vop_inactive_args
*);
73 static int devfs_reclaim(struct vop_reclaim_args
*);
74 static int devfs_readdir(struct vop_readdir_args
*);
75 static int devfs_getattr(struct vop_getattr_args
*);
76 static int devfs_setattr(struct vop_setattr_args
*);
77 static int devfs_readlink(struct vop_readlink_args
*);
78 static int devfs_print(struct vop_print_args
*);
80 static int devfs_nresolve(struct vop_nresolve_args
*);
81 static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args
*);
82 static int devfs_nsymlink(struct vop_nsymlink_args
*);
83 static int devfs_nremove(struct vop_nremove_args
*);
85 static int devfs_spec_open(struct vop_open_args
*);
86 static int devfs_spec_close(struct vop_close_args
*);
87 static int devfs_spec_fsync(struct vop_fsync_args
*);
89 static int devfs_spec_read(struct vop_read_args
*);
90 static int devfs_spec_write(struct vop_write_args
*);
91 static int devfs_spec_ioctl(struct vop_ioctl_args
*);
92 static int devfs_spec_poll(struct vop_poll_args
*);
93 static int devfs_spec_kqfilter(struct vop_kqfilter_args
*);
94 static int devfs_spec_strategy(struct vop_strategy_args
*);
95 static void devfs_spec_strategy_done(struct bio
*);
96 static int devfs_spec_freeblks(struct vop_freeblks_args
*);
97 static int devfs_spec_bmap(struct vop_bmap_args
*);
98 static int devfs_spec_advlock(struct vop_advlock_args
*);
99 static void devfs_spec_getpages_iodone(struct bio
*);
100 static int devfs_spec_getpages(struct vop_getpages_args
*);
103 static int devfs_specf_close(struct file
*);
104 static int devfs_specf_read(struct file
*, struct uio
*, struct ucred
*, int);
105 static int devfs_specf_write(struct file
*, struct uio
*, struct ucred
*, int);
106 static int devfs_specf_stat(struct file
*, struct stat
*, struct ucred
*);
107 static int devfs_specf_kqfilter(struct file
*, struct knote
*);
108 static int devfs_specf_poll(struct file
*, int, struct ucred
*);
109 static int devfs_specf_ioctl(struct file
*, u_long
, caddr_t
,
110 struct ucred
*, struct sysmsg
*);
111 static __inline
int sequential_heuristic(struct uio
*, struct file
*);
113 extern struct lock devfs_lock
;
116 * devfs vnode operations for regular files
118 struct vop_ops devfs_vnode_norm_vops
= {
119 .vop_default
= vop_defaultop
,
120 .vop_access
= devfs_access
,
121 .vop_advlock
= DEVFS_BADOP
,
122 .vop_bmap
= DEVFS_BADOP
,
123 .vop_close
= vop_stdclose
,
124 .vop_getattr
= devfs_getattr
,
125 .vop_inactive
= devfs_inactive
,
126 .vop_ncreate
= DEVFS_BADOP
,
127 .vop_nresolve
= devfs_nresolve
,
128 .vop_nlookupdotdot
= devfs_nlookupdotdot
,
129 .vop_nlink
= DEVFS_BADOP
,
130 .vop_nmkdir
= DEVFS_BADOP
,
131 .vop_nmknod
= DEVFS_BADOP
,
132 .vop_nremove
= devfs_nremove
,
133 .vop_nrename
= DEVFS_BADOP
,
134 .vop_nrmdir
= DEVFS_BADOP
,
135 .vop_nsymlink
= devfs_nsymlink
,
136 .vop_open
= vop_stdopen
,
137 .vop_pathconf
= vop_stdpathconf
,
138 .vop_print
= devfs_print
,
139 .vop_read
= DEVFS_BADOP
,
140 .vop_readdir
= devfs_readdir
,
141 .vop_readlink
= devfs_readlink
,
142 .vop_reclaim
= devfs_reclaim
,
143 .vop_setattr
= devfs_setattr
,
144 .vop_write
= DEVFS_BADOP
,
145 .vop_ioctl
= DEVFS_BADOP
149 * devfs vnode operations for character devices
151 struct vop_ops devfs_vnode_dev_vops
= {
152 .vop_default
= vop_defaultop
,
153 .vop_access
= devfs_access
,
154 .vop_advlock
= devfs_spec_advlock
,
155 .vop_bmap
= devfs_spec_bmap
,
156 .vop_close
= devfs_spec_close
,
157 .vop_freeblks
= devfs_spec_freeblks
,
158 .vop_fsync
= devfs_spec_fsync
,
159 .vop_getattr
= devfs_getattr
,
160 .vop_getpages
= devfs_spec_getpages
,
161 .vop_inactive
= devfs_inactive
,
162 .vop_open
= devfs_spec_open
,
163 .vop_pathconf
= vop_stdpathconf
,
164 .vop_print
= devfs_print
,
165 .vop_poll
= devfs_spec_poll
,
166 .vop_kqfilter
= devfs_spec_kqfilter
,
167 .vop_read
= devfs_spec_read
,
168 .vop_readdir
= DEVFS_BADOP
,
169 .vop_readlink
= DEVFS_BADOP
,
170 .vop_reclaim
= devfs_reclaim
,
171 .vop_setattr
= devfs_setattr
,
172 .vop_strategy
= devfs_spec_strategy
,
173 .vop_write
= devfs_spec_write
,
174 .vop_ioctl
= devfs_spec_ioctl
177 struct vop_ops
*devfs_vnode_dev_vops_p
= &devfs_vnode_dev_vops
;
179 struct fileops devfs_dev_fileops
= {
180 .fo_read
= devfs_specf_read
,
181 .fo_write
= devfs_specf_write
,
182 .fo_ioctl
= devfs_specf_ioctl
,
183 .fo_poll
= devfs_specf_poll
,
184 .fo_kqfilter
= devfs_specf_kqfilter
,
185 .fo_stat
= devfs_specf_stat
,
186 .fo_close
= devfs_specf_close
,
187 .fo_shutdown
= nofo_shutdown
191 * These two functions are possibly temporary hacks for
192 * devices (aka the pty code) which want to control the
193 * node attributes themselves.
195 * XXX we may ultimately desire to simply remove the uid/gid/mode
196 * from the node entirely.
199 node_sync_dev_get(struct devfs_node
*node
)
203 if ((dev
= node
->d_dev
) && (dev
->si_flags
& SI_OVERRIDE
)) {
204 node
->uid
= dev
->si_uid
;
205 node
->gid
= dev
->si_gid
;
206 node
->mode
= dev
->si_perms
;
211 node_sync_dev_set(struct devfs_node
*node
)
215 if ((dev
= node
->d_dev
) && (dev
->si_flags
& SI_OVERRIDE
)) {
216 dev
->si_uid
= node
->uid
;
217 dev
->si_gid
= node
->gid
;
218 dev
->si_perms
= node
->mode
;
223 * generic entry point for unsupported operations
226 devfs_badop(struct vop_generic_args
*ap
)
233 devfs_access(struct vop_access_args
*ap
)
235 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
238 if (!devfs_node_is_accessible(node
))
240 node_sync_dev_get(node
);
241 error
= vop_helper_access(ap
, node
->uid
, node
->gid
,
242 node
->mode
, node
->flags
);
249 devfs_inactive(struct vop_inactive_args
*ap
)
251 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
253 if (node
== NULL
|| (node
->flags
& DEVFS_NODE_LINKED
) == 0)
260 devfs_reclaim(struct vop_reclaim_args
*ap
)
262 struct devfs_node
*node
;
267 * Check if it is locked already. if not, we acquire the devfs lock
269 if (!(lockstatus(&devfs_lock
, curthread
)) == LK_EXCLUSIVE
) {
270 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
277 * Get rid of the devfs_node if it is no longer linked into the
281 if ((node
= DEVFS_NODE(vp
)) != NULL
) {
283 if ((node
->flags
& DEVFS_NODE_LINKED
) == 0)
288 lockmgr(&devfs_lock
, LK_RELEASE
);
291 * v_rdev needs to be properly released using v_release_rdev
292 * Make sure v_data is NULL as well.
301 devfs_readdir(struct vop_readdir_args
*ap
)
303 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_vp
);
304 struct devfs_node
*node
;
313 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_readdir() called!\n");
315 if (ap
->a_uio
->uio_offset
< 0 || ap
->a_uio
->uio_offset
> INT_MAX
)
317 if ((error
= vn_lock(ap
->a_vp
, LK_EXCLUSIVE
| LK_RETRY
)) != 0)
320 if (!devfs_node_is_accessible(dnode
)) {
325 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
327 saveoff
= ap
->a_uio
->uio_offset
;
329 if (ap
->a_ncookies
) {
330 ncookies
= ap
->a_uio
->uio_resid
/ 16 + 1; /* Why / 16 ?? */
333 cookies
= kmalloc(256 * sizeof(off_t
), M_TEMP
, M_WAITOK
);
341 nanotime(&dnode
->atime
);
344 r
= vop_write_dirent(&error
, ap
->a_uio
, dnode
->d_dir
.d_ino
,
349 cookies
[cookie_index
] = saveoff
;
352 if (cookie_index
== ncookies
)
358 r
= vop_write_dirent(&error
, ap
->a_uio
,
359 dnode
->parent
->d_dir
.d_ino
,
362 r
= vop_write_dirent(&error
, ap
->a_uio
,
369 cookies
[cookie_index
] = saveoff
;
372 if (cookie_index
== ncookies
)
376 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
377 if ((node
->flags
& DEVFS_HIDDEN
) ||
378 (node
->flags
& DEVFS_INVISIBLE
)) {
383 * If the node type is a valid devfs alias, then we make sure that the
384 * target isn't hidden. If it is, we don't show the link in the
387 if ((node
->node_type
== Plink
) && (node
->link_target
!= NULL
) &&
388 (node
->link_target
->flags
& DEVFS_HIDDEN
))
391 if (node
->cookie
< saveoff
)
394 saveoff
= node
->cookie
;
396 error2
= vop_write_dirent(&error
, ap
->a_uio
, node
->d_dir
.d_ino
,
398 node
->d_dir
.d_namlen
,
407 cookies
[cookie_index
] = node
->cookie
;
409 if (cookie_index
== ncookies
)
414 lockmgr(&devfs_lock
, LK_RELEASE
);
417 ap
->a_uio
->uio_offset
= saveoff
;
418 if (error
&& cookie_index
== 0) {
420 kfree(cookies
, M_TEMP
);
422 *ap
->a_cookies
= NULL
;
426 *ap
->a_ncookies
= cookie_index
;
427 *ap
->a_cookies
= cookies
;
435 devfs_nresolve(struct vop_nresolve_args
*ap
)
437 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
438 struct devfs_node
*node
, *found
= NULL
;
439 struct namecache
*ncp
;
440 struct vnode
*vp
= NULL
;
445 ncp
= ap
->a_nch
->ncp
;
448 if (!devfs_node_is_accessible(dnode
))
451 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
453 if ((dnode
->node_type
!= Proot
) && (dnode
->node_type
!= Pdir
)) {
455 cache_setvp(ap
->a_nch
, NULL
);
459 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
460 if (len
== node
->d_dir
.d_namlen
) {
461 if (!memcmp(ncp
->nc_name
, node
->d_dir
.d_name
, len
)) {
470 while ((found
->node_type
== Plink
) && (found
->link_target
)) {
472 devfs_debug(DEVFS_DEBUG_SHOW
, "Recursive link or depth >= 8");
476 found
= found
->link_target
;
480 if (!(found
->flags
& DEVFS_HIDDEN
))
481 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp
, found
);
486 cache_setvp(ap
->a_nch
, NULL
);
492 cache_setvp(ap
->a_nch
, vp
);
495 lockmgr(&devfs_lock
, LK_RELEASE
);
502 devfs_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
504 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
507 if (!devfs_node_is_accessible(dnode
))
510 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
511 if (dnode
->parent
!= NULL
) {
512 devfs_allocv(ap
->a_vpp
, dnode
->parent
);
513 vn_unlock(*ap
->a_vpp
);
515 lockmgr(&devfs_lock
, LK_RELEASE
);
517 return ((*ap
->a_vpp
== NULL
) ? ENOENT
: 0);
522 devfs_getattr(struct vop_getattr_args
*ap
)
524 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
525 struct vattr
*vap
= ap
->a_vap
;
529 if (!devfs_node_is_accessible(node
))
532 node_sync_dev_get(node
);
534 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
536 /* start by zeroing out the attributes */
539 /* next do all the common fields */
540 vap
->va_type
= ap
->a_vp
->v_type
;
541 vap
->va_mode
= node
->mode
;
542 vap
->va_fileid
= DEVFS_NODE(ap
->a_vp
)->d_dir
.d_ino
;
543 vap
->va_flags
= 0; /* XXX: what should this be? */
544 vap
->va_blocksize
= DEV_BSIZE
;
545 vap
->va_bytes
= vap
->va_size
= sizeof(struct devfs_node
);
547 vap
->va_fsid
= ap
->a_vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
549 vap
->va_atime
= node
->atime
;
550 vap
->va_mtime
= node
->mtime
;
551 vap
->va_ctime
= node
->ctime
;
553 vap
->va_nlink
= 1; /* number of references to file */
555 vap
->va_uid
= node
->uid
;
556 vap
->va_gid
= node
->gid
;
561 if ((node
->node_type
== Pdev
) && node
->d_dev
) {
562 reference_dev(node
->d_dev
);
563 vap
->va_rminor
= node
->d_dev
->si_uminor
;
564 release_dev(node
->d_dev
);
567 /* For a softlink the va_size is the length of the softlink */
568 if (node
->symlink_name
!= 0) {
569 vap
->va_size
= node
->symlink_namelen
;
571 lockmgr(&devfs_lock
, LK_RELEASE
);
578 devfs_setattr(struct vop_setattr_args
*ap
)
580 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
584 if (!devfs_node_is_accessible(node
))
586 node_sync_dev_get(node
);
588 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
592 if (vap
->va_uid
!= (uid_t
)VNOVAL
) {
593 if ((ap
->a_cred
->cr_uid
!= node
->uid
) &&
594 (!groupmember(node
->gid
, ap
->a_cred
))) {
595 error
= priv_check(curthread
, PRIV_VFS_CHOWN
);
599 node
->uid
= vap
->va_uid
;
602 if (vap
->va_gid
!= (uid_t
)VNOVAL
) {
603 if ((ap
->a_cred
->cr_uid
!= node
->uid
) &&
604 (!groupmember(node
->gid
, ap
->a_cred
))) {
605 error
= priv_check(curthread
, PRIV_VFS_CHOWN
);
609 node
->gid
= vap
->va_gid
;
612 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
613 if (ap
->a_cred
->cr_uid
!= node
->uid
) {
614 error
= priv_check(curthread
, PRIV_VFS_ADMIN
);
618 node
->mode
= vap
->va_mode
;
622 node_sync_dev_set(node
);
623 nanotime(&node
->ctime
);
624 lockmgr(&devfs_lock
, LK_RELEASE
);
631 devfs_readlink(struct vop_readlink_args
*ap
)
633 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
636 if (!devfs_node_is_accessible(node
))
639 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
640 ret
= uiomove(node
->symlink_name
, node
->symlink_namelen
, ap
->a_uio
);
641 lockmgr(&devfs_lock
, LK_RELEASE
);
648 devfs_print(struct vop_print_args
*ap
)
655 devfs_nsymlink(struct vop_nsymlink_args
*ap
)
657 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
658 struct devfs_node
*node
;
661 if (!devfs_node_is_accessible(dnode
))
664 ap
->a_vap
->va_type
= VLNK
;
666 if ((dnode
->node_type
!= Proot
) && (dnode
->node_type
!= Pdir
))
669 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
670 devfs_allocvp(ap
->a_dvp
->v_mount
, ap
->a_vpp
, Plink
,
671 ap
->a_nch
->ncp
->nc_name
, dnode
, NULL
);
673 targetlen
= strlen(ap
->a_target
);
675 node
= DEVFS_NODE(*ap
->a_vpp
);
676 node
->flags
|= DEVFS_USER_CREATED
;
677 node
->symlink_namelen
= targetlen
;
678 node
->symlink_name
= kmalloc(targetlen
+ 1, M_DEVFS
, M_WAITOK
);
679 memcpy(node
->symlink_name
, ap
->a_target
, targetlen
);
680 node
->symlink_name
[targetlen
] = '\0';
681 cache_setunresolved(ap
->a_nch
);
682 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
684 lockmgr(&devfs_lock
, LK_RELEASE
);
686 return ((*ap
->a_vpp
== NULL
) ? ENOTDIR
: 0);
691 devfs_nremove(struct vop_nremove_args
*ap
)
693 struct devfs_node
*dnode
= DEVFS_NODE(ap
->a_dvp
);
694 struct devfs_node
*node
;
695 struct namecache
*ncp
;
698 ncp
= ap
->a_nch
->ncp
;
700 if (!devfs_node_is_accessible(dnode
))
703 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
705 if ((dnode
->node_type
!= Proot
) && (dnode
->node_type
!= Pdir
))
708 TAILQ_FOREACH(node
, DEVFS_DENODE_HEAD(dnode
), link
) {
709 if (ncp
->nc_nlen
!= node
->d_dir
.d_namlen
)
711 if (memcmp(ncp
->nc_name
, node
->d_dir
.d_name
, ncp
->nc_nlen
))
715 * only allow removal of user created stuff (e.g. symlinks)
717 if ((node
->flags
& DEVFS_USER_CREATED
) == 0) {
722 cache_inval_vp(node
->v_node
, CINV_DESTROY
);
729 cache_setunresolved(ap
->a_nch
);
730 cache_setvp(ap
->a_nch
, NULL
);
733 lockmgr(&devfs_lock
, LK_RELEASE
);
739 devfs_spec_open(struct vop_open_args
*ap
)
741 struct vnode
*vp
= ap
->a_vp
;
742 struct vnode
*orig_vp
= NULL
;
743 struct devfs_node
*node
= DEVFS_NODE(vp
);
744 struct devfs_node
*newnode
;
745 cdev_t dev
, ndev
= NULL
;
749 if (node
->d_dev
== NULL
)
751 if (!devfs_node_is_accessible(node
))
755 if ((dev
= vp
->v_rdev
) == NULL
)
758 if (node
&& ap
->a_fp
) {
759 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_spec_open: -1.1-\n");
760 lockmgr(&devfs_lock
, LK_EXCLUSIVE
);
762 ndev
= devfs_clone(dev
, node
->d_dir
.d_name
, node
->d_dir
.d_namlen
,
763 ap
->a_mode
, ap
->a_cred
);
765 newnode
= devfs_create_device_node(
766 DEVFS_MNTDATA(vp
->v_mount
)->root_node
,
768 /* XXX: possibly destroy device if this happens */
770 if (newnode
!= NULL
) {
774 devfs_debug(DEVFS_DEBUG_DEBUG
,
775 "parent here is: %s, node is: |%s|\n",
776 ((node
->parent
->node_type
== Proot
) ?
777 "ROOT!" : node
->parent
->d_dir
.d_name
),
778 newnode
->d_dir
.d_name
);
779 devfs_debug(DEVFS_DEBUG_DEBUG
,
781 ((struct devfs_node
*)(TAILQ_LAST(DEVFS_DENODE_HEAD(node
->parent
), devfs_node_head
)))->d_dir
.d_name
);
784 * orig_vp is set to the original vp if we cloned.
786 /* node->flags |= DEVFS_CLONED; */
787 devfs_allocv(&vp
, newnode
);
792 lockmgr(&devfs_lock
, LK_RELEASE
);
795 devfs_debug(DEVFS_DEBUG_DEBUG
,
796 "devfs_spec_open() called on %s! \n",
800 * Make this field valid before any I/O in ->d_open
802 if (!dev
->si_iosize_max
)
803 dev
->si_iosize_max
= DFLTPHYS
;
805 if (dev_dflags(dev
) & D_TTY
)
806 vp
->v_flag
|= VISTTY
;
809 error
= dev_dopen(dev
, ap
->a_mode
, S_IFCHR
, ap
->a_cred
);
810 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
813 * Clean up any cloned vp if we error out.
819 /* orig_vp = NULL; */
825 if (dev_dflags(dev
) & D_TTY
) {
830 devfs_debug(DEVFS_DEBUG_DEBUG
,
831 "devfs: no t_stop\n");
832 tp
->t_stop
= nottystop
;
838 if (vn_isdisk(vp
, NULL
)) {
839 if (!dev
->si_bsize_phys
)
840 dev
->si_bsize_phys
= DEV_BSIZE
;
841 vinitvmio(vp
, IDX_TO_OFF(INT_MAX
));
847 nanotime(&node
->atime
);
853 /* Ugly pty magic, to make pty devices appear once they are opened */
854 if (node
&& (node
->flags
& DEVFS_PTY
) == DEVFS_PTY
)
855 node
->flags
&= ~DEVFS_INVISIBLE
;
858 ap
->a_fp
->f_type
= DTYPE_VNODE
;
859 ap
->a_fp
->f_flag
= ap
->a_mode
& FMASK
;
860 ap
->a_fp
->f_ops
= &devfs_dev_fileops
;
861 ap
->a_fp
->f_data
= vp
;
869 devfs_spec_close(struct vop_close_args
*ap
)
871 struct devfs_node
*node
= DEVFS_NODE(ap
->a_vp
);
872 struct proc
*p
= curproc
;
873 struct vnode
*vp
= ap
->a_vp
;
874 cdev_t dev
= vp
->v_rdev
;
878 devfs_debug(DEVFS_DEBUG_DEBUG
,
879 "devfs_spec_close() called on %s! \n",
883 * A couple of hacks for devices and tty devices. The
884 * vnode ref count cannot be used to figure out the
885 * last close, but we can use v_opencount now that
886 * revoke works properly.
888 * Detect the last close on a controlling terminal and clear
889 * the session (half-close).
894 if (p
&& vp
->v_opencount
<= 1 && vp
== p
->p_session
->s_ttyvp
) {
895 p
->p_session
->s_ttyvp
= NULL
;
900 * Vnodes can be opened and closed multiple times. Do not really
901 * close the device unless (1) it is being closed forcibly,
902 * (2) the device wants to track closes, or (3) this is the last
903 * vnode doing its last close on the device.
905 * XXX the VXLOCK (force close) case can leave vnodes referencing
906 * a closed device. This might not occur now that our revoke is
909 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_spec_close() -1- \n");
910 if (dev
&& ((vp
->v_flag
& VRECLAIMED
) ||
911 (dev_dflags(dev
) & D_TRACKCLOSE
) ||
912 (vp
->v_opencount
== 1))) {
914 * Unlock around dev_dclose()
917 if (vn_islocked(vp
)) {
921 error
= dev_dclose(dev
, ap
->a_fflag
, S_IFCHR
);
924 * Ugly pty magic, to make pty devices disappear again once
927 if (node
&& (node
->flags
& DEVFS_PTY
) == DEVFS_PTY
)
928 node
->flags
|= DEVFS_INVISIBLE
;
931 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
935 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_spec_close() -2- \n");
938 * Track the actual opens and closes on the vnode. The last close
939 * disassociates the rdev. If the rdev is already disassociated or
940 * the opencount is already 0, the vnode might have been revoked
941 * and no further opencount tracking occurs.
945 if (vp
->v_opencount
> 0)
953 devfs_specf_close(struct file
*fp
)
955 struct vnode
*vp
= (struct vnode
*)fp
->f_data
;
959 fp
->f_ops
= &badfileops
;
960 error
= vn_close(vp
, fp
->f_flag
);
968 * Device-optimized file table vnode read routine.
970 * This bypasses the VOP table and talks directly to the device. Most
971 * filesystems just route to specfs and can make this optimization.
973 * MPALMOSTSAFE - acquires mplock
976 devfs_specf_read(struct file
*fp
, struct uio
*uio
,
977 struct ucred
*cred
, int flags
)
979 struct devfs_node
*node
;
986 KASSERT(uio
->uio_td
== curthread
,
987 ("uio_td %p is not td %p", uio
->uio_td
, curthread
));
989 vp
= (struct vnode
*)fp
->f_data
;
990 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
994 node
= DEVFS_NODE(vp
);
996 if ((dev
= vp
->v_rdev
) == NULL
) {
1003 if (uio
->uio_resid
== 0) {
1007 if ((flags
& O_FOFFSET
) == 0)
1008 uio
->uio_offset
= fp
->f_offset
;
1011 if (flags
& O_FBLOCKING
) {
1012 /* ioflag &= ~IO_NDELAY; */
1013 } else if (flags
& O_FNONBLOCKING
) {
1014 ioflag
|= IO_NDELAY
;
1015 } else if (fp
->f_flag
& FNONBLOCK
) {
1016 ioflag
|= IO_NDELAY
;
1018 if (flags
& O_FBUFFERED
) {
1019 /* ioflag &= ~IO_DIRECT; */
1020 } else if (flags
& O_FUNBUFFERED
) {
1021 ioflag
|= IO_DIRECT
;
1022 } else if (fp
->f_flag
& O_DIRECT
) {
1023 ioflag
|= IO_DIRECT
;
1025 ioflag
|= sequential_heuristic(uio
, fp
);
1027 error
= dev_dread(dev
, uio
, ioflag
);
1031 nanotime(&node
->atime
);
1032 if ((flags
& O_FOFFSET
) == 0)
1033 fp
->f_offset
= uio
->uio_offset
;
1034 fp
->f_nextoff
= uio
->uio_offset
;
1042 devfs_specf_write(struct file
*fp
, struct uio
*uio
,
1043 struct ucred
*cred
, int flags
)
1045 struct devfs_node
*node
;
1052 KASSERT(uio
->uio_td
== curthread
,
1053 ("uio_td %p is not p %p", uio
->uio_td
, curthread
));
1055 vp
= (struct vnode
*)fp
->f_data
;
1056 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
1060 node
= DEVFS_NODE(vp
);
1061 if (vp
->v_type
== VREG
)
1062 bwillwrite(uio
->uio_resid
);
1063 vp
= (struct vnode
*)fp
->f_data
;
1065 if ((dev
= vp
->v_rdev
) == NULL
) {
1071 if ((flags
& O_FOFFSET
) == 0)
1072 uio
->uio_offset
= fp
->f_offset
;
1075 if (vp
->v_type
== VREG
&&
1076 ((fp
->f_flag
& O_APPEND
) || (flags
& O_FAPPEND
))) {
1077 ioflag
|= IO_APPEND
;
1080 if (flags
& O_FBLOCKING
) {
1081 /* ioflag &= ~IO_NDELAY; */
1082 } else if (flags
& O_FNONBLOCKING
) {
1083 ioflag
|= IO_NDELAY
;
1084 } else if (fp
->f_flag
& FNONBLOCK
) {
1085 ioflag
|= IO_NDELAY
;
1087 if (flags
& O_FBUFFERED
) {
1088 /* ioflag &= ~IO_DIRECT; */
1089 } else if (flags
& O_FUNBUFFERED
) {
1090 ioflag
|= IO_DIRECT
;
1091 } else if (fp
->f_flag
& O_DIRECT
) {
1092 ioflag
|= IO_DIRECT
;
1094 if (flags
& O_FASYNCWRITE
) {
1095 /* ioflag &= ~IO_SYNC; */
1096 } else if (flags
& O_FSYNCWRITE
) {
1098 } else if (fp
->f_flag
& O_FSYNC
) {
1102 if (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))
1104 ioflag
|= sequential_heuristic(uio
, fp
);
1106 error
= dev_dwrite(dev
, uio
, ioflag
);
1110 nanotime(&node
->atime
);
1111 nanotime(&node
->mtime
);
1114 if ((flags
& O_FOFFSET
) == 0)
1115 fp
->f_offset
= uio
->uio_offset
;
1116 fp
->f_nextoff
= uio
->uio_offset
;
1124 devfs_specf_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
1130 vp
= (struct vnode
*)fp
->f_data
;
1131 error
= vn_stat(vp
, sb
, cred
);
1143 error
= VOP_GETATTR(vp
, vap
);
1150 * Zero the spare stat fields
1156 * Copy from vattr table ... or not in case it's a cloned device
1158 if (vap
->va_fsid
!= VNOVAL
)
1159 sb
->st_dev
= vap
->va_fsid
;
1161 sb
->st_dev
= vp
->v_mount
->mnt_stat
.f_fsid
.val
[0];
1163 sb
->st_ino
= vap
->va_fileid
;
1165 mode
= vap
->va_mode
;
1169 if (vap
->va_nlink
> (nlink_t
)-1)
1170 sb
->st_nlink
= (nlink_t
)-1;
1172 sb
->st_nlink
= vap
->va_nlink
;
1173 sb
->st_uid
= vap
->va_uid
;
1174 sb
->st_gid
= vap
->va_gid
;
1175 sb
->st_rdev
= dev2udev(DEVFS_NODE(vp
)->d_dev
);
1176 sb
->st_size
= vap
->va_size
;
1177 sb
->st_atimespec
= vap
->va_atime
;
1178 sb
->st_mtimespec
= vap
->va_mtime
;
1179 sb
->st_ctimespec
= vap
->va_ctime
;
1182 * A VCHR and VBLK device may track the last access and last modified
1183 * time independantly of the filesystem. This is particularly true
1184 * because device read and write calls may bypass the filesystem.
1186 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
) {
1189 if (dev
->si_lastread
) {
1190 sb
->st_atimespec
.tv_sec
= dev
->si_lastread
;
1191 sb
->st_atimespec
.tv_nsec
= 0;
1193 if (dev
->si_lastwrite
) {
1194 sb
->st_atimespec
.tv_sec
= dev
->si_lastwrite
;
1195 sb
->st_atimespec
.tv_nsec
= 0;
1201 * According to www.opengroup.org, the meaning of st_blksize is
1202 * "a filesystem-specific preferred I/O block size for this
1203 * object. In some filesystem types, this may vary from file
1205 * Default to PAGE_SIZE after much discussion.
1208 sb
->st_blksize
= PAGE_SIZE
;
1210 sb
->st_flags
= vap
->va_flags
;
1212 error
= priv_check_cred(cred
, PRIV_VFS_GENERATION
, 0);
1216 sb
->st_gen
= (u_int32_t
)vap
->va_gen
;
1218 sb
->st_blocks
= vap
->va_bytes
/ S_BLKSIZE
;
1219 sb
->st_fsmid
= vap
->va_fsmid
;
1227 devfs_specf_kqfilter(struct file
*fp
, struct knote
*kn
)
1235 vp
= (struct vnode
*)fp
->f_data
;
1236 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
1240 if ((dev
= vp
->v_rdev
) == NULL
) {
1246 error
= dev_dkqfilter(dev
, kn
);
1257 devfs_specf_poll(struct file
*fp
, int events
, struct ucred
*cred
)
1259 struct devfs_node
*node
;
1266 vp
= (struct vnode
*)fp
->f_data
;
1267 if (vp
== NULL
|| vp
->v_type
== VBAD
) {
1271 node
= DEVFS_NODE(vp
);
1273 if ((dev
= vp
->v_rdev
) == NULL
) {
1278 error
= dev_dpoll(dev
, events
);
1284 nanotime(&node
->atime
);
1293 * MPALMOSTSAFE - acquires mplock
1296 devfs_specf_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
1297 struct ucred
*ucred
, struct sysmsg
*msg
)
1299 struct devfs_node
*node
;
1304 struct fiodname_args
*name_args
;
1309 vp
= ((struct vnode
*)fp
->f_data
);
1310 if ((dev
= vp
->v_rdev
) == NULL
) {
1311 error
= EBADF
; /* device was revoked */
1315 node
= DEVFS_NODE(vp
);
1317 devfs_debug(DEVFS_DEBUG_DEBUG
,
1318 "devfs_specf_ioctl() called! for dev %s\n",
1321 if (com
== FIODTYPE
) {
1322 *(int *)data
= dev_dflags(dev
) & D_TYPEMASK
;
1325 } else if (com
== FIODNAME
) {
1326 name_args
= (struct fiodname_args
*)data
;
1327 name
= dev
->si_name
;
1328 namlen
= strlen(name
) + 1;
1330 devfs_debug(DEVFS_DEBUG_DEBUG
,
1331 "ioctl, got: FIODNAME for %s\n", name
);
1333 if (namlen
<= name_args
->len
)
1334 error
= copyout(dev
->si_name
, name_args
->name
, namlen
);
1338 devfs_debug(DEVFS_DEBUG_DEBUG
,
1339 "ioctl stuff: error: %d\n", error
);
1343 error
= dev_dioctl(dev
, com
, data
, fp
->f_flag
, ucred
, msg
);
1347 nanotime(&node
->atime
);
1348 nanotime(&node
->mtime
);
1352 if (com
== TIOCSCTTY
) {
1353 devfs_debug(DEVFS_DEBUG_DEBUG
,
1354 "devfs_specf_ioctl: got TIOCSCTTY on %s\n",
1357 if (error
== 0 && com
== TIOCSCTTY
) {
1358 struct proc
*p
= curthread
->td_proc
;
1359 struct session
*sess
;
1361 devfs_debug(DEVFS_DEBUG_DEBUG
,
1362 "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n",
1368 sess
= p
->p_session
;
1371 * Do nothing if reassigning same control tty
1373 if (sess
->s_ttyvp
== vp
) {
1379 * Get rid of reference to old control tty
1381 ovp
= sess
->s_ttyvp
;
1390 devfs_debug(DEVFS_DEBUG_DEBUG
, "devfs_specf_ioctl() finished! \n");
1396 devfs_spec_fsync(struct vop_fsync_args
*ap
)
1398 struct vnode
*vp
= ap
->a_vp
;
1401 if (!vn_isdisk(vp
, NULL
))
1405 * Flush all dirty buffers associated with a block device.
1407 error
= vfsync(vp
, ap
->a_waitfor
, 10000, NULL
, NULL
);
1412 devfs_spec_read(struct vop_read_args
*ap
)
1414 struct devfs_node
*node
;
1423 node
= DEVFS_NODE(vp
);
1425 if (dev
== NULL
) /* device was revoked */
1427 if (uio
->uio_resid
== 0)
1431 error
= dev_dread(dev
, uio
, ap
->a_ioflag
);
1432 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1435 nanotime(&node
->atime
);
1441 * Vnode op for write
1443 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1444 * struct ucred *a_cred)
1447 devfs_spec_write(struct vop_write_args
*ap
)
1449 struct devfs_node
*node
;
1458 node
= DEVFS_NODE(vp
);
1460 KKASSERT(uio
->uio_segflg
!= UIO_NOCOPY
);
1462 if (dev
== NULL
) /* device was revoked */
1466 error
= dev_dwrite(dev
, uio
, ap
->a_ioflag
);
1467 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1470 nanotime(&node
->atime
);
1471 nanotime(&node
->mtime
);
1478 * Device ioctl operation.
1480 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1481 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg)
1484 devfs_spec_ioctl(struct vop_ioctl_args
*ap
)
1486 struct vnode
*vp
= ap
->a_vp
;
1487 struct devfs_node
*node
;
1490 if ((dev
= vp
->v_rdev
) == NULL
)
1491 return (EBADF
); /* device was revoked */
1492 node
= DEVFS_NODE(vp
);
1496 nanotime(&node
->atime
);
1497 nanotime(&node
->mtime
);
1501 return (dev_dioctl(dev
, ap
->a_command
, ap
->a_data
, ap
->a_fflag
,
1502 ap
->a_cred
, ap
->a_sysmsg
));
1506 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
1510 devfs_spec_poll(struct vop_poll_args
*ap
)
1512 struct vnode
*vp
= ap
->a_vp
;
1513 struct devfs_node
*node
;
1516 if ((dev
= vp
->v_rdev
) == NULL
)
1517 return (EBADF
); /* device was revoked */
1518 node
= DEVFS_NODE(vp
);
1522 nanotime(&node
->atime
);
1525 return (dev_dpoll(dev
, ap
->a_events
));
1529 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1533 devfs_spec_kqfilter(struct vop_kqfilter_args
*ap
)
1535 struct vnode
*vp
= ap
->a_vp
;
1536 struct devfs_node
*node
;
1539 if ((dev
= vp
->v_rdev
) == NULL
)
1540 return (EBADF
); /* device was revoked */
1541 node
= DEVFS_NODE(vp
);
1545 nanotime(&node
->atime
);
1548 return (dev_dkqfilter(dev
, ap
->a_kn
));
1552 * Convert a vnode strategy call into a device strategy call. Vnode strategy
1553 * calls are not limited to device DMA limits so we have to deal with the
1556 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1559 devfs_spec_strategy(struct vop_strategy_args
*ap
)
1561 struct bio
*bio
= ap
->a_bio
;
1562 struct buf
*bp
= bio
->bio_buf
;
1569 if (bp
->b_cmd
!= BUF_CMD_READ
&& LIST_FIRST(&bp
->b_dep
) != NULL
)
1573 * Collect statistics on synchronous and asynchronous read
1574 * and write counts for disks that have associated filesystems.
1577 KKASSERT(vp
->v_rdev
!= NULL
); /* XXX */
1578 if (vn_isdisk(vp
, NULL
) && (mp
= vp
->v_rdev
->si_mountpoint
) != NULL
) {
1579 if (bp
->b_cmd
== BUF_CMD_READ
) {
1580 if (bp
->b_flags
& BIO_SYNC
)
1581 mp
->mnt_stat
.f_syncreads
++;
1583 mp
->mnt_stat
.f_asyncreads
++;
1585 if (bp
->b_flags
& BIO_SYNC
)
1586 mp
->mnt_stat
.f_syncwrites
++;
1588 mp
->mnt_stat
.f_asyncwrites
++;
1593 * Device iosize limitations only apply to read and write. Shortcut
1594 * the I/O if it fits.
1596 if ((maxiosize
= vp
->v_rdev
->si_iosize_max
) == 0) {
1597 devfs_debug(DEVFS_DEBUG_DEBUG
,
1598 "%s: si_iosize_max not set!\n",
1599 dev_dname(vp
->v_rdev
));
1600 maxiosize
= MAXPHYS
;
1602 #if SPEC_CHAIN_DEBUG & 2
1605 if (bp
->b_bcount
<= maxiosize
||
1606 (bp
->b_cmd
!= BUF_CMD_READ
&& bp
->b_cmd
!= BUF_CMD_WRITE
)) {
1607 dev_dstrategy_chain(vp
->v_rdev
, bio
);
1612 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1614 nbp
= kmalloc(sizeof(*bp
), M_DEVBUF
, M_INTWAIT
|M_ZERO
);
1618 BUF_LOCK(nbp
, LK_EXCLUSIVE
);
1621 nbp
->b_flags
= B_PAGING
| (bp
->b_flags
& B_BNOCLIP
);
1622 nbp
->b_data
= bp
->b_data
;
1623 nbp
->b_bio1
.bio_done
= devfs_spec_strategy_done
;
1624 nbp
->b_bio1
.bio_offset
= bio
->bio_offset
;
1625 nbp
->b_bio1
.bio_caller_info1
.ptr
= bio
;
1628 * Start the first transfer
1630 if (vn_isdisk(vp
, NULL
))
1631 chunksize
= vp
->v_rdev
->si_bsize_phys
;
1633 chunksize
= DEV_BSIZE
;
1634 chunksize
= maxiosize
/ chunksize
* chunksize
;
1635 #if SPEC_CHAIN_DEBUG & 1
1636 devfs_debug(DEVFS_DEBUG_DEBUG
,
1637 "spec_strategy chained I/O chunksize=%d\n",
1640 nbp
->b_cmd
= bp
->b_cmd
;
1641 nbp
->b_bcount
= chunksize
;
1642 nbp
->b_bufsize
= chunksize
; /* used to detect a short I/O */
1643 nbp
->b_bio1
.bio_caller_info2
.index
= chunksize
;
1645 #if SPEC_CHAIN_DEBUG & 1
1646 devfs_debug(DEVFS_DEBUG_DEBUG
,
1647 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1648 bp
, 0, bp
->b_bcount
, nbp
->b_bcount
);
1651 dev_dstrategy(vp
->v_rdev
, &nbp
->b_bio1
);
1653 if (DEVFS_NODE(vp
)) {
1654 nanotime(&DEVFS_NODE(vp
)->atime
);
1655 nanotime(&DEVFS_NODE(vp
)->mtime
);
1662 * Chunked up transfer completion routine - chain transfers until done
1666 devfs_spec_strategy_done(struct bio
*nbio
)
1668 struct buf
*nbp
= nbio
->bio_buf
;
1669 struct bio
*bio
= nbio
->bio_caller_info1
.ptr
; /* original bio */
1670 struct buf
*bp
= bio
->bio_buf
; /* original bp */
1671 int chunksize
= nbio
->bio_caller_info2
.index
; /* chunking */
1672 int boffset
= nbp
->b_data
- bp
->b_data
;
1674 if (nbp
->b_flags
& B_ERROR
) {
1676 * An error terminates the chain, propogate the error back
1677 * to the original bp
1679 bp
->b_flags
|= B_ERROR
;
1680 bp
->b_error
= nbp
->b_error
;
1681 bp
->b_resid
= bp
->b_bcount
- boffset
+
1682 (nbp
->b_bcount
- nbp
->b_resid
);
1683 #if SPEC_CHAIN_DEBUG & 1
1684 devfs_debug(DEVFS_DEBUG_DEBUG
,
1685 "spec_strategy: chain %p error %d bcount %d/%d\n",
1686 bp
, bp
->b_error
, bp
->b_bcount
,
1687 bp
->b_bcount
- bp
->b_resid
);
1689 kfree(nbp
, M_DEVBUF
);
1691 } else if (nbp
->b_resid
) {
1693 * A short read or write terminates the chain
1695 bp
->b_error
= nbp
->b_error
;
1696 bp
->b_resid
= bp
->b_bcount
- boffset
+
1697 (nbp
->b_bcount
- nbp
->b_resid
);
1698 #if SPEC_CHAIN_DEBUG & 1
1699 devfs_debug(DEVFS_DEBUG_DEBUG
,
1700 "spec_strategy: chain %p short read(1) "
1702 bp
, bp
->b_bcount
- bp
->b_resid
, bp
->b_bcount
);
1704 kfree(nbp
, M_DEVBUF
);
1706 } else if (nbp
->b_bcount
!= nbp
->b_bufsize
) {
1708 * A short read or write can also occur by truncating b_bcount
1710 #if SPEC_CHAIN_DEBUG & 1
1711 devfs_debug(DEVFS_DEBUG_DEBUG
,
1712 "spec_strategy: chain %p short read(2) "
1714 bp
, nbp
->b_bcount
+ boffset
, bp
->b_bcount
);
1717 bp
->b_bcount
= nbp
->b_bcount
+ boffset
;
1718 bp
->b_resid
= nbp
->b_resid
;
1719 kfree(nbp
, M_DEVBUF
);
1721 } else if (nbp
->b_bcount
+ boffset
== bp
->b_bcount
) {
1723 * No more data terminates the chain
1725 #if SPEC_CHAIN_DEBUG & 1
1726 devfs_debug(DEVFS_DEBUG_DEBUG
,
1727 "spec_strategy: chain %p finished bcount %d\n",
1732 kfree(nbp
, M_DEVBUF
);
1736 * Continue the chain
1738 boffset
+= nbp
->b_bcount
;
1739 nbp
->b_data
= bp
->b_data
+ boffset
;
1740 nbp
->b_bcount
= bp
->b_bcount
- boffset
;
1741 if (nbp
->b_bcount
> chunksize
)
1742 nbp
->b_bcount
= chunksize
;
1743 nbp
->b_bio1
.bio_done
= devfs_spec_strategy_done
;
1744 nbp
->b_bio1
.bio_offset
= bio
->bio_offset
+ boffset
;
1746 #if SPEC_CHAIN_DEBUG & 1
1747 devfs_debug(DEVFS_DEBUG_DEBUG
,
1748 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1749 bp
, boffset
, bp
->b_bcount
, nbp
->b_bcount
);
1752 dev_dstrategy(nbp
->b_vp
->v_rdev
, &nbp
->b_bio1
);
1757 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1760 devfs_spec_freeblks(struct vop_freeblks_args
*ap
)
1765 * XXX: This assumes that strategy does the deed right away.
1766 * XXX: this may not be TRTTD.
1768 KKASSERT(ap
->a_vp
->v_rdev
!= NULL
);
1769 if ((dev_dflags(ap
->a_vp
->v_rdev
) & D_CANFREE
) == 0)
1771 bp
= geteblk(ap
->a_length
);
1772 bp
->b_cmd
= BUF_CMD_FREEBLKS
;
1773 bp
->b_bio1
.bio_offset
= ap
->a_offset
;
1774 bp
->b_bcount
= ap
->a_length
;
1775 dev_dstrategy(ap
->a_vp
->v_rdev
, &bp
->b_bio1
);
1780 * Implement degenerate case where the block requested is the block
1781 * returned, and assume that the entire device is contiguous in regards
1782 * to the contiguous block range (runp and runb).
1784 * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1785 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1788 devfs_spec_bmap(struct vop_bmap_args
*ap
)
1790 if (ap
->a_doffsetp
!= NULL
)
1791 *ap
->a_doffsetp
= ap
->a_loffset
;
1792 if (ap
->a_runp
!= NULL
)
1793 *ap
->a_runp
= MAXBSIZE
;
1794 if (ap
->a_runb
!= NULL
) {
1795 if (ap
->a_loffset
< MAXBSIZE
)
1796 *ap
->a_runb
= (int)ap
->a_loffset
;
1798 *ap
->a_runb
= MAXBSIZE
;
1805 * Special device advisory byte-level locks.
1807 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1808 * struct flock *a_fl, int a_flags)
1812 devfs_spec_advlock(struct vop_advlock_args
*ap
)
1814 return ((ap
->a_flags
& F_POSIX
) ? EINVAL
: EOPNOTSUPP
);
1818 devfs_spec_getpages_iodone(struct bio
*bio
)
1820 bio
->bio_buf
->b_cmd
= BUF_CMD_DONE
;
1821 wakeup(bio
->bio_buf
);
1825 * spec_getpages() - get pages associated with device vnode.
1827 * Note that spec_read and spec_write do not use the buffer cache, so we
1828 * must fully implement getpages here.
1831 devfs_spec_getpages(struct vop_getpages_args
*ap
)
1835 int i
, pcount
, size
;
1838 vm_ooffset_t offset
;
1839 int toff
, nextoff
, nread
;
1840 struct vnode
*vp
= ap
->a_vp
;
1845 pcount
= round_page(ap
->a_count
) / PAGE_SIZE
;
1848 * Calculate the offset of the transfer and do sanity check.
1850 offset
= IDX_TO_OFF(ap
->a_m
[0]->pindex
) + ap
->a_offset
;
1853 * Round up physical size for real devices. We cannot round using
1854 * v_mount's block size data because v_mount has nothing to do with
1855 * the device. i.e. it's usually '/dev'. We need the physical block
1856 * size for the device itself.
1858 * We can't use v_rdev->si_mountpoint because it only exists when the
1859 * block device is mounted. However, we can use v_rdev.
1861 if (vn_isdisk(vp
, NULL
))
1862 blksiz
= vp
->v_rdev
->si_bsize_phys
;
1866 size
= (ap
->a_count
+ blksiz
- 1) & ~(blksiz
- 1);
1869 kva
= (vm_offset_t
)bp
->b_data
;
1872 * Map the pages to be read into the kva.
1874 pmap_qenter(kva
, ap
->a_m
, pcount
);
1876 /* Build a minimal buffer header. */
1877 bp
->b_cmd
= BUF_CMD_READ
;
1878 bp
->b_bcount
= size
;
1880 bp
->b_runningbufspace
= size
;
1882 runningbufspace
+= bp
->b_runningbufspace
;
1886 bp
->b_bio1
.bio_offset
= offset
;
1887 bp
->b_bio1
.bio_done
= devfs_spec_getpages_iodone
;
1889 mycpu
->gd_cnt
.v_vnodein
++;
1890 mycpu
->gd_cnt
.v_vnodepgsin
+= pcount
;
1893 vn_strategy(ap
->a_vp
, &bp
->b_bio1
);
1897 /* We definitely need to be at splbio here. */
1898 while (bp
->b_cmd
!= BUF_CMD_DONE
)
1899 tsleep(bp
, 0, "spread", 0);
1903 if (bp
->b_flags
& B_ERROR
) {
1905 error
= bp
->b_error
;
1911 * If EOF is encountered we must zero-extend the result in order
1912 * to ensure that the page does not contain garabge. When no
1913 * error occurs, an early EOF is indicated if b_bcount got truncated.
1914 * b_resid is relative to b_bcount and should be 0, but some devices
1915 * might indicate an EOF with b_resid instead of truncating b_bcount.
1917 nread
= bp
->b_bcount
- bp
->b_resid
;
1918 if (nread
< ap
->a_count
)
1919 bzero((caddr_t
)kva
+ nread
, ap
->a_count
- nread
);
1920 pmap_qremove(kva
, pcount
);
1923 for (i
= 0, toff
= 0; i
< pcount
; i
++, toff
= nextoff
) {
1924 nextoff
= toff
+ PAGE_SIZE
;
1927 m
->flags
&= ~PG_ZERO
;
1930 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
1931 * pmap modified bit. pmap modified bit should have
1932 * already been cleared.
1934 if (nextoff
<= nread
) {
1935 m
->valid
= VM_PAGE_BITS_ALL
;
1937 } else if (toff
< nread
) {
1939 * Since this is a VM request, we have to supply the
1940 * unaligned offset to allow vm_page_set_valid()
1941 * to zero sub-DEV_BSIZE'd portions of the page.
1943 vm_page_set_valid(m
, 0, nread
- toff
);
1944 vm_page_clear_dirty_end_nonincl(m
, 0, nread
- toff
);
1950 if (i
!= ap
->a_reqpage
) {
1952 * Just in case someone was asking for this page we
1953 * now tell them that it is ok to use.
1955 if (!error
|| (m
->valid
== VM_PAGE_BITS_ALL
)) {
1957 if (m
->flags
& PG_WANTED
) {
1958 vm_page_activate(m
);
1960 vm_page_deactivate(m
);
1969 } else if (m
->valid
) {
1972 * Since this is a VM request, we need to make the
1973 * entire page presentable by zeroing invalid sections.
1975 if (m
->valid
!= VM_PAGE_BITS_ALL
)
1976 vm_page_zero_invalid(m
, FALSE
);
1980 m
= ap
->a_m
[ap
->a_reqpage
];
1981 devfs_debug(DEVFS_DEBUG_WARNING
,
1982 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
1983 devtoname(vp
->v_rdev
), error
, bp
, bp
->b_vp
);
1984 devfs_debug(DEVFS_DEBUG_WARNING
,
1985 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
1986 size
, bp
->b_resid
, ap
->a_count
, m
->valid
);
1987 devfs_debug(DEVFS_DEBUG_WARNING
,
1988 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
1989 nread
, ap
->a_reqpage
, (u_long
)m
->pindex
, pcount
);
1991 * Free the buffer header back to the swap buffer pool.
1994 return VM_PAGER_ERROR
;
1997 * Free the buffer header back to the swap buffer pool.
2000 if (DEVFS_NODE(ap
->a_vp
))
2001 nanotime(&DEVFS_NODE(ap
->a_vp
)->mtime
);
2007 sequential_heuristic(struct uio
*uio
, struct file
*fp
)
2010 * Sequential heuristic - detect sequential operation
2012 if ((uio
->uio_offset
== 0 && fp
->f_seqcount
> 0) ||
2013 uio
->uio_offset
== fp
->f_nextoff
) {
2015 * XXX we assume that the filesystem block size is
2016 * the default. Not true, but still gives us a pretty
2017 * good indicator of how sequential the read operations
2020 int tmpseq
= fp
->f_seqcount
;
2022 tmpseq
+= (uio
->uio_resid
+ BKVASIZE
- 1) / BKVASIZE
;
2023 if (tmpseq
> IO_SEQMAX
)
2025 fp
->f_seqcount
= tmpseq
;
2026 return(fp
->f_seqcount
<< IO_SEQSHIFT
);
2030 * Not sequential, quick draw-down of seqcount
2032 if (fp
->f_seqcount
> 1)