dma: add DragonFly compat files
[dragonfly.git] / sys / vfs / devfs / devfs_vnops.c
blob5105ccf5740ff8f305d3b8e938b92d4197a9a53a
1 /*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Alex Hornung <ahornung@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/time.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/fcntl.h>
40 #include <sys/proc.h>
41 #include <sys/priv.h>
42 #include <sys/signalvar.h>
43 #include <sys/vnode.h>
44 #include <sys/uio.h>
45 #include <sys/mount.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/namei.h>
49 #include <sys/dirent.h>
50 #include <sys/malloc.h>
51 #include <sys/stat.h>
52 #include <sys/reg.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_zone.h>
55 #include <vm/vm_object.h>
56 #include <sys/filio.h>
57 #include <sys/ttycom.h>
58 #include <sys/tty.h>
59 #include <sys/devfs.h>
60 #include <sys/pioctl.h>
62 #include <machine/limits.h>
63 #include <vm/vm_page2.h>
64 #include <sys/buf2.h>
65 #include <sys/sysref2.h>
67 MALLOC_DECLARE(M_DEVFS);
68 #define DEVFS_BADOP (void *)devfs_badop
70 static int devfs_badop(struct vop_generic_args *);
71 static int devfs_access(struct vop_access_args *);
72 static int devfs_inactive(struct vop_inactive_args *);
73 static int devfs_reclaim(struct vop_reclaim_args *);
74 static int devfs_readdir(struct vop_readdir_args *);
75 static int devfs_getattr(struct vop_getattr_args *);
76 static int devfs_setattr(struct vop_setattr_args *);
77 static int devfs_readlink(struct vop_readlink_args *);
78 static int devfs_print(struct vop_print_args *);
80 static int devfs_nresolve(struct vop_nresolve_args *);
81 static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *);
82 static int devfs_nsymlink(struct vop_nsymlink_args *);
83 static int devfs_nremove(struct vop_nremove_args *);
85 static int devfs_spec_open(struct vop_open_args *);
86 static int devfs_spec_close(struct vop_close_args *);
87 static int devfs_spec_fsync(struct vop_fsync_args *);
89 static int devfs_spec_read(struct vop_read_args *);
90 static int devfs_spec_write(struct vop_write_args *);
91 static int devfs_spec_ioctl(struct vop_ioctl_args *);
92 static int devfs_spec_poll(struct vop_poll_args *);
93 static int devfs_spec_kqfilter(struct vop_kqfilter_args *);
94 static int devfs_spec_strategy(struct vop_strategy_args *);
95 static void devfs_spec_strategy_done(struct bio *);
96 static int devfs_spec_freeblks(struct vop_freeblks_args *);
97 static int devfs_spec_bmap(struct vop_bmap_args *);
98 static int devfs_spec_advlock(struct vop_advlock_args *);
99 static void devfs_spec_getpages_iodone(struct bio *);
100 static int devfs_spec_getpages(struct vop_getpages_args *);
103 static int devfs_specf_close(struct file *);
104 static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int);
105 static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int);
106 static int devfs_specf_stat(struct file *, struct stat *, struct ucred *);
107 static int devfs_specf_kqfilter(struct file *, struct knote *);
108 static int devfs_specf_poll(struct file *, int, struct ucred *);
109 static int devfs_specf_ioctl(struct file *, u_long, caddr_t,
110 struct ucred *, struct sysmsg *);
111 static __inline int sequential_heuristic(struct uio *, struct file *);
113 extern struct lock devfs_lock;
116 * devfs vnode operations for regular files
118 struct vop_ops devfs_vnode_norm_vops = {
119 .vop_default = vop_defaultop,
120 .vop_access = devfs_access,
121 .vop_advlock = DEVFS_BADOP,
122 .vop_bmap = DEVFS_BADOP,
123 .vop_close = vop_stdclose,
124 .vop_getattr = devfs_getattr,
125 .vop_inactive = devfs_inactive,
126 .vop_ncreate = DEVFS_BADOP,
127 .vop_nresolve = devfs_nresolve,
128 .vop_nlookupdotdot = devfs_nlookupdotdot,
129 .vop_nlink = DEVFS_BADOP,
130 .vop_nmkdir = DEVFS_BADOP,
131 .vop_nmknod = DEVFS_BADOP,
132 .vop_nremove = devfs_nremove,
133 .vop_nrename = DEVFS_BADOP,
134 .vop_nrmdir = DEVFS_BADOP,
135 .vop_nsymlink = devfs_nsymlink,
136 .vop_open = vop_stdopen,
137 .vop_pathconf = vop_stdpathconf,
138 .vop_print = devfs_print,
139 .vop_read = DEVFS_BADOP,
140 .vop_readdir = devfs_readdir,
141 .vop_readlink = devfs_readlink,
142 .vop_reclaim = devfs_reclaim,
143 .vop_setattr = devfs_setattr,
144 .vop_write = DEVFS_BADOP,
145 .vop_ioctl = DEVFS_BADOP
149 * devfs vnode operations for character devices
151 struct vop_ops devfs_vnode_dev_vops = {
152 .vop_default = vop_defaultop,
153 .vop_access = devfs_access,
154 .vop_advlock = devfs_spec_advlock,
155 .vop_bmap = devfs_spec_bmap,
156 .vop_close = devfs_spec_close,
157 .vop_freeblks = devfs_spec_freeblks,
158 .vop_fsync = devfs_spec_fsync,
159 .vop_getattr = devfs_getattr,
160 .vop_getpages = devfs_spec_getpages,
161 .vop_inactive = devfs_inactive,
162 .vop_open = devfs_spec_open,
163 .vop_pathconf = vop_stdpathconf,
164 .vop_print = devfs_print,
165 .vop_poll = devfs_spec_poll,
166 .vop_kqfilter = devfs_spec_kqfilter,
167 .vop_read = devfs_spec_read,
168 .vop_readdir = DEVFS_BADOP,
169 .vop_readlink = DEVFS_BADOP,
170 .vop_reclaim = devfs_reclaim,
171 .vop_setattr = devfs_setattr,
172 .vop_strategy = devfs_spec_strategy,
173 .vop_write = devfs_spec_write,
174 .vop_ioctl = devfs_spec_ioctl
177 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops;
179 struct fileops devfs_dev_fileops = {
180 .fo_read = devfs_specf_read,
181 .fo_write = devfs_specf_write,
182 .fo_ioctl = devfs_specf_ioctl,
183 .fo_poll = devfs_specf_poll,
184 .fo_kqfilter = devfs_specf_kqfilter,
185 .fo_stat = devfs_specf_stat,
186 .fo_close = devfs_specf_close,
187 .fo_shutdown = nofo_shutdown
191 * These two functions are possibly temporary hacks for
192 * devices (aka the pty code) which want to control the
193 * node attributes themselves.
195 * XXX we may ultimately desire to simply remove the uid/gid/mode
196 * from the node entirely.
198 static __inline void
199 node_sync_dev_get(struct devfs_node *node)
201 cdev_t dev;
203 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) {
204 node->uid = dev->si_uid;
205 node->gid = dev->si_gid;
206 node->mode = dev->si_perms;
210 static __inline void
211 node_sync_dev_set(struct devfs_node *node)
213 cdev_t dev;
215 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) {
216 dev->si_uid = node->uid;
217 dev->si_gid = node->gid;
218 dev->si_perms = node->mode;
223 * generic entry point for unsupported operations
225 static int
226 devfs_badop(struct vop_generic_args *ap)
228 return (EIO);
232 static int
233 devfs_access(struct vop_access_args *ap)
235 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
236 int error;
238 if (!devfs_node_is_accessible(node))
239 return ENOENT;
240 node_sync_dev_get(node);
241 error = vop_helper_access(ap, node->uid, node->gid,
242 node->mode, node->flags);
244 return error;
248 static int
249 devfs_inactive(struct vop_inactive_args *ap)
251 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
253 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0)
254 vrecycle(ap->a_vp);
255 return 0;
259 static int
260 devfs_reclaim(struct vop_reclaim_args *ap)
262 struct devfs_node *node;
263 struct vnode *vp;
264 int locked;
267 * Check if it is locked already. if not, we acquire the devfs lock
269 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) {
270 lockmgr(&devfs_lock, LK_EXCLUSIVE);
271 locked = 1;
272 } else {
273 locked = 0;
277 * Get rid of the devfs_node if it is no longer linked into the
278 * topology.
280 vp = ap->a_vp;
281 if ((node = DEVFS_NODE(vp)) != NULL) {
282 node->v_node = NULL;
283 if ((node->flags & DEVFS_NODE_LINKED) == 0)
284 devfs_freep(node);
287 if (locked)
288 lockmgr(&devfs_lock, LK_RELEASE);
291 * v_rdev needs to be properly released using v_release_rdev
292 * Make sure v_data is NULL as well.
294 vp->v_data = NULL;
295 v_release_rdev(vp);
296 return 0;
300 static int
301 devfs_readdir(struct vop_readdir_args *ap)
303 struct devfs_node *dnode = DEVFS_NODE(ap->a_vp);
304 struct devfs_node *node;
305 int cookie_index;
306 int ncookies;
307 int error2;
308 int error;
309 int r;
310 off_t *cookies;
311 off_t saveoff;
313 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n");
315 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX)
316 return (EINVAL);
317 if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0)
318 return (error);
320 if (!devfs_node_is_accessible(dnode)) {
321 vn_unlock(ap->a_vp);
322 return ENOENT;
325 lockmgr(&devfs_lock, LK_EXCLUSIVE);
327 saveoff = ap->a_uio->uio_offset;
329 if (ap->a_ncookies) {
330 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */
331 if (ncookies > 256)
332 ncookies = 256;
333 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK);
334 cookie_index = 0;
335 } else {
336 ncookies = -1;
337 cookies = NULL;
338 cookie_index = 0;
341 nanotime(&dnode->atime);
343 if (saveoff == 0) {
344 r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino,
345 DT_DIR, 1, ".");
346 if (r)
347 goto done;
348 if (cookies)
349 cookies[cookie_index] = saveoff;
350 saveoff++;
351 cookie_index++;
352 if (cookie_index == ncookies)
353 goto done;
356 if (saveoff == 1) {
357 if (dnode->parent) {
358 r = vop_write_dirent(&error, ap->a_uio,
359 dnode->parent->d_dir.d_ino,
360 DT_DIR, 2, "..");
361 } else {
362 r = vop_write_dirent(&error, ap->a_uio,
363 dnode->d_dir.d_ino,
364 DT_DIR, 2, "..");
366 if (r)
367 goto done;
368 if (cookies)
369 cookies[cookie_index] = saveoff;
370 saveoff++;
371 cookie_index++;
372 if (cookie_index == ncookies)
373 goto done;
376 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
377 if ((node->flags & DEVFS_HIDDEN) ||
378 (node->flags & DEVFS_INVISIBLE)) {
379 continue;
383 * If the node type is a valid devfs alias, then we make sure that the
384 * target isn't hidden. If it is, we don't show the link in the
385 * directory listing.
387 if ((node->node_type == Plink) && (node->link_target != NULL) &&
388 (node->link_target->flags & DEVFS_HIDDEN))
389 continue;
391 if (node->cookie < saveoff)
392 continue;
394 saveoff = node->cookie;
396 error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino,
397 node->d_dir.d_type,
398 node->d_dir.d_namlen,
399 node->d_dir.d_name);
401 if (error2)
402 break;
404 saveoff++;
406 if (cookies)
407 cookies[cookie_index] = node->cookie;
408 ++cookie_index;
409 if (cookie_index == ncookies)
410 break;
413 done:
414 lockmgr(&devfs_lock, LK_RELEASE);
415 vn_unlock(ap->a_vp);
417 ap->a_uio->uio_offset = saveoff;
418 if (error && cookie_index == 0) {
419 if (cookies) {
420 kfree(cookies, M_TEMP);
421 *ap->a_ncookies = 0;
422 *ap->a_cookies = NULL;
424 } else {
425 if (cookies) {
426 *ap->a_ncookies = cookie_index;
427 *ap->a_cookies = cookies;
430 return (error);
434 static int
435 devfs_nresolve(struct vop_nresolve_args *ap)
437 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
438 struct devfs_node *node, *found = NULL;
439 struct namecache *ncp;
440 struct vnode *vp = NULL;
441 int error = 0;
442 int len;
443 int depth;
445 ncp = ap->a_nch->ncp;
446 len = ncp->nc_nlen;
448 if (!devfs_node_is_accessible(dnode))
449 return ENOENT;
451 lockmgr(&devfs_lock, LK_EXCLUSIVE);
453 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) {
454 error = ENOENT;
455 cache_setvp(ap->a_nch, NULL);
456 goto out;
459 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
460 if (len == node->d_dir.d_namlen) {
461 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
462 found = node;
463 break;
468 if (found) {
469 depth = 0;
470 while ((found->node_type == Plink) && (found->link_target)) {
471 if (depth >= 8) {
472 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8");
473 break;
476 found = found->link_target;
477 ++depth;
480 if (!(found->flags & DEVFS_HIDDEN))
481 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
484 if (vp == NULL) {
485 error = ENOENT;
486 cache_setvp(ap->a_nch, NULL);
487 goto out;
490 KKASSERT(vp);
491 vn_unlock(vp);
492 cache_setvp(ap->a_nch, vp);
493 vrele(vp);
494 out:
495 lockmgr(&devfs_lock, LK_RELEASE);
497 return error;
501 static int
502 devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
504 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
506 *ap->a_vpp = NULL;
507 if (!devfs_node_is_accessible(dnode))
508 return ENOENT;
510 lockmgr(&devfs_lock, LK_EXCLUSIVE);
511 if (dnode->parent != NULL) {
512 devfs_allocv(ap->a_vpp, dnode->parent);
513 vn_unlock(*ap->a_vpp);
515 lockmgr(&devfs_lock, LK_RELEASE);
517 return ((*ap->a_vpp == NULL) ? ENOENT : 0);
521 static int
522 devfs_getattr(struct vop_getattr_args *ap)
524 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
525 struct vattr *vap = ap->a_vap;
526 int error = 0;
528 #if 0
529 if (!devfs_node_is_accessible(node))
530 return ENOENT;
531 #endif
532 node_sync_dev_get(node);
534 lockmgr(&devfs_lock, LK_EXCLUSIVE);
536 /* start by zeroing out the attributes */
537 VATTR_NULL(vap);
539 /* next do all the common fields */
540 vap->va_type = ap->a_vp->v_type;
541 vap->va_mode = node->mode;
542 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
543 vap->va_flags = 0; /* XXX: what should this be? */
544 vap->va_blocksize = DEV_BSIZE;
545 vap->va_bytes = vap->va_size = sizeof(struct devfs_node);
547 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
549 vap->va_atime = node->atime;
550 vap->va_mtime = node->mtime;
551 vap->va_ctime = node->ctime;
553 vap->va_nlink = 1; /* number of references to file */
555 vap->va_uid = node->uid;
556 vap->va_gid = node->gid;
558 vap->va_rmajor = 0;
559 vap->va_rminor = 0;
561 if ((node->node_type == Pdev) && node->d_dev) {
562 reference_dev(node->d_dev);
563 vap->va_rminor = node->d_dev->si_uminor;
564 release_dev(node->d_dev);
567 /* For a softlink the va_size is the length of the softlink */
568 if (node->symlink_name != 0) {
569 vap->va_size = node->symlink_namelen;
571 lockmgr(&devfs_lock, LK_RELEASE);
573 return (error);
577 static int
578 devfs_setattr(struct vop_setattr_args *ap)
580 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
581 struct vattr *vap;
582 int error = 0;
584 if (!devfs_node_is_accessible(node))
585 return ENOENT;
586 node_sync_dev_get(node);
588 lockmgr(&devfs_lock, LK_EXCLUSIVE);
590 vap = ap->a_vap;
592 if (vap->va_uid != (uid_t)VNOVAL) {
593 if ((ap->a_cred->cr_uid != node->uid) &&
594 (!groupmember(node->gid, ap->a_cred))) {
595 error = priv_check(curthread, PRIV_VFS_CHOWN);
596 if (error)
597 goto out;
599 node->uid = vap->va_uid;
602 if (vap->va_gid != (uid_t)VNOVAL) {
603 if ((ap->a_cred->cr_uid != node->uid) &&
604 (!groupmember(node->gid, ap->a_cred))) {
605 error = priv_check(curthread, PRIV_VFS_CHOWN);
606 if (error)
607 goto out;
609 node->gid = vap->va_gid;
612 if (vap->va_mode != (mode_t)VNOVAL) {
613 if (ap->a_cred->cr_uid != node->uid) {
614 error = priv_check(curthread, PRIV_VFS_ADMIN);
615 if (error)
616 goto out;
618 node->mode = vap->va_mode;
621 out:
622 node_sync_dev_set(node);
623 nanotime(&node->ctime);
624 lockmgr(&devfs_lock, LK_RELEASE);
626 return error;
630 static int
631 devfs_readlink(struct vop_readlink_args *ap)
633 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
634 int ret;
636 if (!devfs_node_is_accessible(node))
637 return ENOENT;
639 lockmgr(&devfs_lock, LK_EXCLUSIVE);
640 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio);
641 lockmgr(&devfs_lock, LK_RELEASE);
643 return ret;
647 static int
648 devfs_print(struct vop_print_args *ap)
650 return (0);
654 static int
655 devfs_nsymlink(struct vop_nsymlink_args *ap)
657 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
658 struct devfs_node *node;
659 size_t targetlen;
661 if (!devfs_node_is_accessible(dnode))
662 return ENOENT;
664 ap->a_vap->va_type = VLNK;
666 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir))
667 goto out;
669 lockmgr(&devfs_lock, LK_EXCLUSIVE);
670 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink,
671 ap->a_nch->ncp->nc_name, dnode, NULL);
673 targetlen = strlen(ap->a_target);
674 if (*ap->a_vpp) {
675 node = DEVFS_NODE(*ap->a_vpp);
676 node->flags |= DEVFS_USER_CREATED;
677 node->symlink_namelen = targetlen;
678 node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
679 memcpy(node->symlink_name, ap->a_target, targetlen);
680 node->symlink_name[targetlen] = '\0';
681 cache_setunresolved(ap->a_nch);
682 cache_setvp(ap->a_nch, *ap->a_vpp);
684 lockmgr(&devfs_lock, LK_RELEASE);
685 out:
686 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
690 static int
691 devfs_nremove(struct vop_nremove_args *ap)
693 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
694 struct devfs_node *node;
695 struct namecache *ncp;
696 int error = ENOENT;
698 ncp = ap->a_nch->ncp;
700 if (!devfs_node_is_accessible(dnode))
701 return ENOENT;
703 lockmgr(&devfs_lock, LK_EXCLUSIVE);
705 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir))
706 goto out;
708 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
709 if (ncp->nc_nlen != node->d_dir.d_namlen)
710 continue;
711 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen))
712 continue;
715 * only allow removal of user created stuff (e.g. symlinks)
717 if ((node->flags & DEVFS_USER_CREATED) == 0) {
718 error = EPERM;
719 goto out;
720 } else {
721 if (node->v_node)
722 cache_inval_vp(node->v_node, CINV_DESTROY);
723 devfs_unlinkp(node);
724 error = 0;
725 break;
729 cache_setunresolved(ap->a_nch);
730 cache_setvp(ap->a_nch, NULL);
732 out:
733 lockmgr(&devfs_lock, LK_RELEASE);
734 return error;
738 static int
739 devfs_spec_open(struct vop_open_args *ap)
741 struct vnode *vp = ap->a_vp;
742 struct vnode *orig_vp = NULL;
743 struct devfs_node *node = DEVFS_NODE(vp);
744 struct devfs_node *newnode;
745 cdev_t dev, ndev = NULL;
746 int error = 0;
748 if (node) {
749 if (node->d_dev == NULL)
750 return ENXIO;
751 if (!devfs_node_is_accessible(node))
752 return ENOENT;
755 if ((dev = vp->v_rdev) == NULL)
756 return ENXIO;
758 if (node && ap->a_fp) {
759 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n");
760 lockmgr(&devfs_lock, LK_EXCLUSIVE);
762 ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen,
763 ap->a_mode, ap->a_cred);
764 if (ndev != NULL) {
765 newnode = devfs_create_device_node(
766 DEVFS_MNTDATA(vp->v_mount)->root_node,
767 ndev, NULL, NULL);
768 /* XXX: possibly destroy device if this happens */
770 if (newnode != NULL) {
771 dev = ndev;
772 devfs_link_dev(dev);
774 devfs_debug(DEVFS_DEBUG_DEBUG,
775 "parent here is: %s, node is: |%s|\n",
776 ((node->parent->node_type == Proot) ?
777 "ROOT!" : node->parent->d_dir.d_name),
778 newnode->d_dir.d_name);
779 devfs_debug(DEVFS_DEBUG_DEBUG,
780 "test: %s\n",
781 ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name);
784 * orig_vp is set to the original vp if we cloned.
786 /* node->flags |= DEVFS_CLONED; */
787 devfs_allocv(&vp, newnode);
788 orig_vp = ap->a_vp;
789 ap->a_vp = vp;
792 lockmgr(&devfs_lock, LK_RELEASE);
795 devfs_debug(DEVFS_DEBUG_DEBUG,
796 "devfs_spec_open() called on %s! \n",
797 dev->si_name);
800 * Make this field valid before any I/O in ->d_open
802 if (!dev->si_iosize_max)
803 dev->si_iosize_max = DFLTPHYS;
805 if (dev_dflags(dev) & D_TTY)
806 vp->v_flag |= VISTTY;
808 vn_unlock(vp);
809 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred);
810 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
813 * Clean up any cloned vp if we error out.
815 if (error) {
816 if (orig_vp) {
817 vput(vp);
818 ap->a_vp = orig_vp;
819 /* orig_vp = NULL; */
821 return error;
825 if (dev_dflags(dev) & D_TTY) {
826 if (dev->si_tty) {
827 struct tty *tp;
828 tp = dev->si_tty;
829 if (!tp->t_stop) {
830 devfs_debug(DEVFS_DEBUG_DEBUG,
831 "devfs: no t_stop\n");
832 tp->t_stop = nottystop;
838 if (vn_isdisk(vp, NULL)) {
839 if (!dev->si_bsize_phys)
840 dev->si_bsize_phys = DEV_BSIZE;
841 vinitvmio(vp, IDX_TO_OFF(INT_MAX));
844 vop_stdopen(ap);
845 #if 0
846 if (node)
847 nanotime(&node->atime);
848 #endif
850 if (orig_vp)
851 vn_unlock(vp);
853 /* Ugly pty magic, to make pty devices appear once they are opened */
854 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY)
855 node->flags &= ~DEVFS_INVISIBLE;
857 if (ap->a_fp) {
858 ap->a_fp->f_type = DTYPE_VNODE;
859 ap->a_fp->f_flag = ap->a_mode & FMASK;
860 ap->a_fp->f_ops = &devfs_dev_fileops;
861 ap->a_fp->f_data = vp;
864 return 0;
868 static int
869 devfs_spec_close(struct vop_close_args *ap)
871 struct devfs_node *node = DEVFS_NODE(ap->a_vp);
872 struct proc *p = curproc;
873 struct vnode *vp = ap->a_vp;
874 cdev_t dev = vp->v_rdev;
875 int error = 0;
876 int needrelock;
878 devfs_debug(DEVFS_DEBUG_DEBUG,
879 "devfs_spec_close() called on %s! \n",
880 dev->si_name);
883 * A couple of hacks for devices and tty devices. The
884 * vnode ref count cannot be used to figure out the
885 * last close, but we can use v_opencount now that
886 * revoke works properly.
888 * Detect the last close on a controlling terminal and clear
889 * the session (half-close).
891 if (dev)
892 reference_dev(dev);
894 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
895 p->p_session->s_ttyvp = NULL;
896 vrele(vp);
900 * Vnodes can be opened and closed multiple times. Do not really
901 * close the device unless (1) it is being closed forcibly,
902 * (2) the device wants to track closes, or (3) this is the last
903 * vnode doing its last close on the device.
905 * XXX the VXLOCK (force close) case can leave vnodes referencing
906 * a closed device. This might not occur now that our revoke is
907 * fixed.
909 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
910 if (dev && ((vp->v_flag & VRECLAIMED) ||
911 (dev_dflags(dev) & D_TRACKCLOSE) ||
912 (vp->v_opencount == 1))) {
914 * Unlock around dev_dclose()
916 needrelock = 0;
917 if (vn_islocked(vp)) {
918 needrelock = 1;
919 vn_unlock(vp);
921 error = dev_dclose(dev, ap->a_fflag, S_IFCHR);
924 * Ugly pty magic, to make pty devices disappear again once
925 * they are closed
927 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY)
928 node->flags |= DEVFS_INVISIBLE;
930 if (needrelock)
931 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
932 } else {
933 error = 0;
935 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");
938 * Track the actual opens and closes on the vnode. The last close
939 * disassociates the rdev. If the rdev is already disassociated or
940 * the opencount is already 0, the vnode might have been revoked
941 * and no further opencount tracking occurs.
943 if (dev)
944 release_dev(dev);
945 if (vp->v_opencount > 0)
946 vop_stdclose(ap);
947 return(error);
952 static int
953 devfs_specf_close(struct file *fp)
955 struct vnode *vp = (struct vnode *)fp->f_data;
956 int error;
958 get_mplock();
959 fp->f_ops = &badfileops;
960 error = vn_close(vp, fp->f_flag);
961 rel_mplock();
963 return (error);
968 * Device-optimized file table vnode read routine.
970 * This bypasses the VOP table and talks directly to the device. Most
971 * filesystems just route to specfs and can make this optimization.
973 * MPALMOSTSAFE - acquires mplock
975 static int
976 devfs_specf_read(struct file *fp, struct uio *uio,
977 struct ucred *cred, int flags)
979 struct devfs_node *node;
980 struct vnode *vp;
981 int ioflag;
982 int error;
983 cdev_t dev;
985 get_mplock();
986 KASSERT(uio->uio_td == curthread,
987 ("uio_td %p is not td %p", uio->uio_td, curthread));
989 vp = (struct vnode *)fp->f_data;
990 if (vp == NULL || vp->v_type == VBAD) {
991 error = EBADF;
992 goto done;
994 node = DEVFS_NODE(vp);
996 if ((dev = vp->v_rdev) == NULL) {
997 error = EBADF;
998 goto done;
1001 reference_dev(dev);
1003 if (uio->uio_resid == 0) {
1004 error = 0;
1005 goto done;
1007 if ((flags & O_FOFFSET) == 0)
1008 uio->uio_offset = fp->f_offset;
1010 ioflag = 0;
1011 if (flags & O_FBLOCKING) {
1012 /* ioflag &= ~IO_NDELAY; */
1013 } else if (flags & O_FNONBLOCKING) {
1014 ioflag |= IO_NDELAY;
1015 } else if (fp->f_flag & FNONBLOCK) {
1016 ioflag |= IO_NDELAY;
1018 if (flags & O_FBUFFERED) {
1019 /* ioflag &= ~IO_DIRECT; */
1020 } else if (flags & O_FUNBUFFERED) {
1021 ioflag |= IO_DIRECT;
1022 } else if (fp->f_flag & O_DIRECT) {
1023 ioflag |= IO_DIRECT;
1025 ioflag |= sequential_heuristic(uio, fp);
1027 error = dev_dread(dev, uio, ioflag);
1029 release_dev(dev);
1030 if (node)
1031 nanotime(&node->atime);
1032 if ((flags & O_FOFFSET) == 0)
1033 fp->f_offset = uio->uio_offset;
1034 fp->f_nextoff = uio->uio_offset;
1035 done:
1036 rel_mplock();
1037 return (error);
1041 static int
1042 devfs_specf_write(struct file *fp, struct uio *uio,
1043 struct ucred *cred, int flags)
1045 struct devfs_node *node;
1046 struct vnode *vp;
1047 int ioflag;
1048 int error;
1049 cdev_t dev;
1051 get_mplock();
1052 KASSERT(uio->uio_td == curthread,
1053 ("uio_td %p is not p %p", uio->uio_td, curthread));
1055 vp = (struct vnode *)fp->f_data;
1056 if (vp == NULL || vp->v_type == VBAD) {
1057 error = EBADF;
1058 goto done;
1060 node = DEVFS_NODE(vp);
1061 if (vp->v_type == VREG)
1062 bwillwrite(uio->uio_resid);
1063 vp = (struct vnode *)fp->f_data;
1065 if ((dev = vp->v_rdev) == NULL) {
1066 error = EBADF;
1067 goto done;
1069 reference_dev(dev);
1071 if ((flags & O_FOFFSET) == 0)
1072 uio->uio_offset = fp->f_offset;
1074 ioflag = IO_UNIT;
1075 if (vp->v_type == VREG &&
1076 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
1077 ioflag |= IO_APPEND;
1080 if (flags & O_FBLOCKING) {
1081 /* ioflag &= ~IO_NDELAY; */
1082 } else if (flags & O_FNONBLOCKING) {
1083 ioflag |= IO_NDELAY;
1084 } else if (fp->f_flag & FNONBLOCK) {
1085 ioflag |= IO_NDELAY;
1087 if (flags & O_FBUFFERED) {
1088 /* ioflag &= ~IO_DIRECT; */
1089 } else if (flags & O_FUNBUFFERED) {
1090 ioflag |= IO_DIRECT;
1091 } else if (fp->f_flag & O_DIRECT) {
1092 ioflag |= IO_DIRECT;
1094 if (flags & O_FASYNCWRITE) {
1095 /* ioflag &= ~IO_SYNC; */
1096 } else if (flags & O_FSYNCWRITE) {
1097 ioflag |= IO_SYNC;
1098 } else if (fp->f_flag & O_FSYNC) {
1099 ioflag |= IO_SYNC;
1102 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
1103 ioflag |= IO_SYNC;
1104 ioflag |= sequential_heuristic(uio, fp);
1106 error = dev_dwrite(dev, uio, ioflag);
1108 release_dev(dev);
1109 if (node) {
1110 nanotime(&node->atime);
1111 nanotime(&node->mtime);
1114 if ((flags & O_FOFFSET) == 0)
1115 fp->f_offset = uio->uio_offset;
1116 fp->f_nextoff = uio->uio_offset;
1117 done:
1118 rel_mplock();
1119 return (error);
1123 static int
1124 devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred)
1126 struct vnode *vp;
1127 int error;
1129 get_mplock();
1130 vp = (struct vnode *)fp->f_data;
1131 error = vn_stat(vp, sb, cred);
1132 if (error) {
1133 rel_mplock();
1134 return (error);
1137 struct vattr vattr;
1138 struct vattr *vap;
1139 u_short mode;
1140 cdev_t dev;
1142 vap = &vattr;
1143 error = VOP_GETATTR(vp, vap);
1144 if (error) {
1145 rel_mplock();
1146 return (error);
1150 * Zero the spare stat fields
1152 sb->st_lspare = 0;
1153 sb->st_qspare = 0;
1156 * Copy from vattr table ... or not in case it's a cloned device
1158 if (vap->va_fsid != VNOVAL)
1159 sb->st_dev = vap->va_fsid;
1160 else
1161 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1163 sb->st_ino = vap->va_fileid;
1165 mode = vap->va_mode;
1166 mode |= S_IFCHR;
1167 sb->st_mode = mode;
1169 if (vap->va_nlink > (nlink_t)-1)
1170 sb->st_nlink = (nlink_t)-1;
1171 else
1172 sb->st_nlink = vap->va_nlink;
1173 sb->st_uid = vap->va_uid;
1174 sb->st_gid = vap->va_gid;
1175 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev);
1176 sb->st_size = vap->va_size;
1177 sb->st_atimespec = vap->va_atime;
1178 sb->st_mtimespec = vap->va_mtime;
1179 sb->st_ctimespec = vap->va_ctime;
1182 * A VCHR and VBLK device may track the last access and last modified
1183 * time independantly of the filesystem. This is particularly true
1184 * because device read and write calls may bypass the filesystem.
1186 if (vp->v_type == VCHR || vp->v_type == VBLK) {
1187 dev = vp->v_rdev;
1188 if (dev != NULL) {
1189 if (dev->si_lastread) {
1190 sb->st_atimespec.tv_sec = dev->si_lastread;
1191 sb->st_atimespec.tv_nsec = 0;
1193 if (dev->si_lastwrite) {
1194 sb->st_atimespec.tv_sec = dev->si_lastwrite;
1195 sb->st_atimespec.tv_nsec = 0;
1201 * According to www.opengroup.org, the meaning of st_blksize is
1202 * "a filesystem-specific preferred I/O block size for this
1203 * object. In some filesystem types, this may vary from file
1204 * to file"
1205 * Default to PAGE_SIZE after much discussion.
1208 sb->st_blksize = PAGE_SIZE;
1210 sb->st_flags = vap->va_flags;
1212 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
1213 if (error)
1214 sb->st_gen = 0;
1215 else
1216 sb->st_gen = (u_int32_t)vap->va_gen;
1218 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1219 sb->st_fsmid = vap->va_fsmid;
1221 rel_mplock();
1222 return (0);
1226 static int
1227 devfs_specf_kqfilter(struct file *fp, struct knote *kn)
1229 struct vnode *vp;
1230 int error;
1231 cdev_t dev;
1233 get_mplock();
1235 vp = (struct vnode *)fp->f_data;
1236 if (vp == NULL || vp->v_type == VBAD) {
1237 error = EBADF;
1238 goto done;
1240 if ((dev = vp->v_rdev) == NULL) {
1241 error = EBADF;
1242 goto done;
1244 reference_dev(dev);
1246 error = dev_dkqfilter(dev, kn);
1248 release_dev(dev);
1250 done:
1251 rel_mplock();
1252 return (error);
1256 static int
1257 devfs_specf_poll(struct file *fp, int events, struct ucred *cred)
1259 struct devfs_node *node;
1260 struct vnode *vp;
1261 int error;
1262 cdev_t dev;
1264 get_mplock();
1266 vp = (struct vnode *)fp->f_data;
1267 if (vp == NULL || vp->v_type == VBAD) {
1268 error = EBADF;
1269 goto done;
1271 node = DEVFS_NODE(vp);
1273 if ((dev = vp->v_rdev) == NULL) {
1274 error = EBADF;
1275 goto done;
1277 reference_dev(dev);
1278 error = dev_dpoll(dev, events);
1280 release_dev(dev);
1282 #if 0
1283 if (node)
1284 nanotime(&node->atime);
1285 #endif
1286 done:
1287 rel_mplock();
1288 return (error);
1293 * MPALMOSTSAFE - acquires mplock
1295 static int
1296 devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data,
1297 struct ucred *ucred, struct sysmsg *msg)
1299 struct devfs_node *node;
1300 struct vnode *vp;
1301 struct vnode *ovp;
1302 cdev_t dev;
1303 int error;
1304 struct fiodname_args *name_args;
1305 size_t namlen;
1306 const char *name;
1308 get_mplock();
1309 vp = ((struct vnode *)fp->f_data);
1310 if ((dev = vp->v_rdev) == NULL) {
1311 error = EBADF; /* device was revoked */
1312 goto out;
1315 node = DEVFS_NODE(vp);
1317 devfs_debug(DEVFS_DEBUG_DEBUG,
1318 "devfs_specf_ioctl() called! for dev %s\n",
1319 dev->si_name);
1321 if (com == FIODTYPE) {
1322 *(int *)data = dev_dflags(dev) & D_TYPEMASK;
1323 error = 0;
1324 goto out;
1325 } else if (com == FIODNAME) {
1326 name_args = (struct fiodname_args *)data;
1327 name = dev->si_name;
1328 namlen = strlen(name) + 1;
1330 devfs_debug(DEVFS_DEBUG_DEBUG,
1331 "ioctl, got: FIODNAME for %s\n", name);
1333 if (namlen <= name_args->len)
1334 error = copyout(dev->si_name, name_args->name, namlen);
1335 else
1336 error = EINVAL;
1338 devfs_debug(DEVFS_DEBUG_DEBUG,
1339 "ioctl stuff: error: %d\n", error);
1340 goto out;
1342 reference_dev(dev);
1343 error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg);
1344 release_dev(dev);
1345 #if 0
1346 if (node) {
1347 nanotime(&node->atime);
1348 nanotime(&node->mtime);
1350 #endif
1352 if (com == TIOCSCTTY) {
1353 devfs_debug(DEVFS_DEBUG_DEBUG,
1354 "devfs_specf_ioctl: got TIOCSCTTY on %s\n",
1355 dev->si_name);
1357 if (error == 0 && com == TIOCSCTTY) {
1358 struct proc *p = curthread->td_proc;
1359 struct session *sess;
1361 devfs_debug(DEVFS_DEBUG_DEBUG,
1362 "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n",
1363 dev->si_name);
1364 if (p == NULL) {
1365 error = ENOTTY;
1366 goto out;
1368 sess = p->p_session;
1371 * Do nothing if reassigning same control tty
1373 if (sess->s_ttyvp == vp) {
1374 error = 0;
1375 goto out;
1379 * Get rid of reference to old control tty
1381 ovp = sess->s_ttyvp;
1382 vref(vp);
1383 sess->s_ttyvp = vp;
1384 if (ovp)
1385 vrele(ovp);
1388 out:
1389 rel_mplock();
1390 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n");
1391 return (error);
1395 static int
1396 devfs_spec_fsync(struct vop_fsync_args *ap)
1398 struct vnode *vp = ap->a_vp;
1399 int error;
1401 if (!vn_isdisk(vp, NULL))
1402 return (0);
1405 * Flush all dirty buffers associated with a block device.
1407 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL);
1408 return (error);
1411 static int
1412 devfs_spec_read(struct vop_read_args *ap)
1414 struct devfs_node *node;
1415 struct vnode *vp;
1416 struct uio *uio;
1417 cdev_t dev;
1418 int error;
1420 vp = ap->a_vp;
1421 dev = vp->v_rdev;
1422 uio = ap->a_uio;
1423 node = DEVFS_NODE(vp);
1425 if (dev == NULL) /* device was revoked */
1426 return (EBADF);
1427 if (uio->uio_resid == 0)
1428 return (0);
1430 vn_unlock(vp);
1431 error = dev_dread(dev, uio, ap->a_ioflag);
1432 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1434 if (node)
1435 nanotime(&node->atime);
1437 return (error);
1441 * Vnode op for write
1443 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
1444 * struct ucred *a_cred)
1446 static int
1447 devfs_spec_write(struct vop_write_args *ap)
1449 struct devfs_node *node;
1450 struct vnode *vp;
1451 struct uio *uio;
1452 cdev_t dev;
1453 int error;
1455 vp = ap->a_vp;
1456 dev = vp->v_rdev;
1457 uio = ap->a_uio;
1458 node = DEVFS_NODE(vp);
1460 KKASSERT(uio->uio_segflg != UIO_NOCOPY);
1462 if (dev == NULL) /* device was revoked */
1463 return (EBADF);
1465 vn_unlock(vp);
1466 error = dev_dwrite(dev, uio, ap->a_ioflag);
1467 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1469 if (node) {
1470 nanotime(&node->atime);
1471 nanotime(&node->mtime);
1474 return (error);
1478 * Device ioctl operation.
1480 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data,
1481 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg)
1483 static int
1484 devfs_spec_ioctl(struct vop_ioctl_args *ap)
1486 struct vnode *vp = ap->a_vp;
1487 struct devfs_node *node;
1488 cdev_t dev;
1490 if ((dev = vp->v_rdev) == NULL)
1491 return (EBADF); /* device was revoked */
1492 node = DEVFS_NODE(vp);
1494 #if 0
1495 if (node) {
1496 nanotime(&node->atime);
1497 nanotime(&node->mtime);
1499 #endif
1501 return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag,
1502 ap->a_cred, ap->a_sysmsg));
1506 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred)
1508 /* ARGSUSED */
1509 static int
1510 devfs_spec_poll(struct vop_poll_args *ap)
1512 struct vnode *vp = ap->a_vp;
1513 struct devfs_node *node;
1514 cdev_t dev;
1516 if ((dev = vp->v_rdev) == NULL)
1517 return (EBADF); /* device was revoked */
1518 node = DEVFS_NODE(vp);
1520 #if 0
1521 if (node)
1522 nanotime(&node->atime);
1523 #endif
1525 return (dev_dpoll(dev, ap->a_events));
1529 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn)
1531 /* ARGSUSED */
1532 static int
1533 devfs_spec_kqfilter(struct vop_kqfilter_args *ap)
1535 struct vnode *vp = ap->a_vp;
1536 struct devfs_node *node;
1537 cdev_t dev;
1539 if ((dev = vp->v_rdev) == NULL)
1540 return (EBADF); /* device was revoked */
1541 node = DEVFS_NODE(vp);
1543 #if 0
1544 if (node)
1545 nanotime(&node->atime);
1546 #endif
1548 return (dev_dkqfilter(dev, ap->a_kn));
1552 * Convert a vnode strategy call into a device strategy call. Vnode strategy
1553 * calls are not limited to device DMA limits so we have to deal with the
1554 * case.
1556 * spec_strategy(struct vnode *a_vp, struct bio *a_bio)
1558 static int
1559 devfs_spec_strategy(struct vop_strategy_args *ap)
1561 struct bio *bio = ap->a_bio;
1562 struct buf *bp = bio->bio_buf;
1563 struct buf *nbp;
1564 struct vnode *vp;
1565 struct mount *mp;
1566 int chunksize;
1567 int maxiosize;
1569 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL)
1570 buf_start(bp);
1573 * Collect statistics on synchronous and asynchronous read
1574 * and write counts for disks that have associated filesystems.
1576 vp = ap->a_vp;
1577 KKASSERT(vp->v_rdev != NULL); /* XXX */
1578 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) {
1579 if (bp->b_cmd == BUF_CMD_READ) {
1580 if (bp->b_flags & BIO_SYNC)
1581 mp->mnt_stat.f_syncreads++;
1582 else
1583 mp->mnt_stat.f_asyncreads++;
1584 } else {
1585 if (bp->b_flags & BIO_SYNC)
1586 mp->mnt_stat.f_syncwrites++;
1587 else
1588 mp->mnt_stat.f_asyncwrites++;
1593 * Device iosize limitations only apply to read and write. Shortcut
1594 * the I/O if it fits.
1596 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) {
1597 devfs_debug(DEVFS_DEBUG_DEBUG,
1598 "%s: si_iosize_max not set!\n",
1599 dev_dname(vp->v_rdev));
1600 maxiosize = MAXPHYS;
1602 #if SPEC_CHAIN_DEBUG & 2
1603 maxiosize = 4096;
1604 #endif
1605 if (bp->b_bcount <= maxiosize ||
1606 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) {
1607 dev_dstrategy_chain(vp->v_rdev, bio);
1608 return (0);
1612 * Clone the buffer and set up an I/O chain to chunk up the I/O.
1614 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO);
1615 initbufbio(nbp);
1616 buf_dep_init(nbp);
1617 BUF_LOCKINIT(nbp);
1618 BUF_LOCK(nbp, LK_EXCLUSIVE);
1619 BUF_KERNPROC(nbp);
1620 nbp->b_vp = vp;
1621 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP);
1622 nbp->b_data = bp->b_data;
1623 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1624 nbp->b_bio1.bio_offset = bio->bio_offset;
1625 nbp->b_bio1.bio_caller_info1.ptr = bio;
1628 * Start the first transfer
1630 if (vn_isdisk(vp, NULL))
1631 chunksize = vp->v_rdev->si_bsize_phys;
1632 else
1633 chunksize = DEV_BSIZE;
1634 chunksize = maxiosize / chunksize * chunksize;
1635 #if SPEC_CHAIN_DEBUG & 1
1636 devfs_debug(DEVFS_DEBUG_DEBUG,
1637 "spec_strategy chained I/O chunksize=%d\n",
1638 chunksize);
1639 #endif
1640 nbp->b_cmd = bp->b_cmd;
1641 nbp->b_bcount = chunksize;
1642 nbp->b_bufsize = chunksize; /* used to detect a short I/O */
1643 nbp->b_bio1.bio_caller_info2.index = chunksize;
1645 #if SPEC_CHAIN_DEBUG & 1
1646 devfs_debug(DEVFS_DEBUG_DEBUG,
1647 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1648 bp, 0, bp->b_bcount, nbp->b_bcount);
1649 #endif
1651 dev_dstrategy(vp->v_rdev, &nbp->b_bio1);
1653 if (DEVFS_NODE(vp)) {
1654 nanotime(&DEVFS_NODE(vp)->atime);
1655 nanotime(&DEVFS_NODE(vp)->mtime);
1658 return (0);
1662 * Chunked up transfer completion routine - chain transfers until done
1664 static
1665 void
1666 devfs_spec_strategy_done(struct bio *nbio)
1668 struct buf *nbp = nbio->bio_buf;
1669 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */
1670 struct buf *bp = bio->bio_buf; /* original bp */
1671 int chunksize = nbio->bio_caller_info2.index; /* chunking */
1672 int boffset = nbp->b_data - bp->b_data;
1674 if (nbp->b_flags & B_ERROR) {
1676 * An error terminates the chain, propogate the error back
1677 * to the original bp
1679 bp->b_flags |= B_ERROR;
1680 bp->b_error = nbp->b_error;
1681 bp->b_resid = bp->b_bcount - boffset +
1682 (nbp->b_bcount - nbp->b_resid);
1683 #if SPEC_CHAIN_DEBUG & 1
1684 devfs_debug(DEVFS_DEBUG_DEBUG,
1685 "spec_strategy: chain %p error %d bcount %d/%d\n",
1686 bp, bp->b_error, bp->b_bcount,
1687 bp->b_bcount - bp->b_resid);
1688 #endif
1689 kfree(nbp, M_DEVBUF);
1690 biodone(bio);
1691 } else if (nbp->b_resid) {
1693 * A short read or write terminates the chain
1695 bp->b_error = nbp->b_error;
1696 bp->b_resid = bp->b_bcount - boffset +
1697 (nbp->b_bcount - nbp->b_resid);
1698 #if SPEC_CHAIN_DEBUG & 1
1699 devfs_debug(DEVFS_DEBUG_DEBUG,
1700 "spec_strategy: chain %p short read(1) "
1701 "bcount %d/%d\n",
1702 bp, bp->b_bcount - bp->b_resid, bp->b_bcount);
1703 #endif
1704 kfree(nbp, M_DEVBUF);
1705 biodone(bio);
1706 } else if (nbp->b_bcount != nbp->b_bufsize) {
1708 * A short read or write can also occur by truncating b_bcount
1710 #if SPEC_CHAIN_DEBUG & 1
1711 devfs_debug(DEVFS_DEBUG_DEBUG,
1712 "spec_strategy: chain %p short read(2) "
1713 "bcount %d/%d\n",
1714 bp, nbp->b_bcount + boffset, bp->b_bcount);
1715 #endif
1716 bp->b_error = 0;
1717 bp->b_bcount = nbp->b_bcount + boffset;
1718 bp->b_resid = nbp->b_resid;
1719 kfree(nbp, M_DEVBUF);
1720 biodone(bio);
1721 } else if (nbp->b_bcount + boffset == bp->b_bcount) {
1723 * No more data terminates the chain
1725 #if SPEC_CHAIN_DEBUG & 1
1726 devfs_debug(DEVFS_DEBUG_DEBUG,
1727 "spec_strategy: chain %p finished bcount %d\n",
1728 bp, bp->b_bcount);
1729 #endif
1730 bp->b_error = 0;
1731 bp->b_resid = 0;
1732 kfree(nbp, M_DEVBUF);
1733 biodone(bio);
1734 } else {
1736 * Continue the chain
1738 boffset += nbp->b_bcount;
1739 nbp->b_data = bp->b_data + boffset;
1740 nbp->b_bcount = bp->b_bcount - boffset;
1741 if (nbp->b_bcount > chunksize)
1742 nbp->b_bcount = chunksize;
1743 nbp->b_bio1.bio_done = devfs_spec_strategy_done;
1744 nbp->b_bio1.bio_offset = bio->bio_offset + boffset;
1746 #if SPEC_CHAIN_DEBUG & 1
1747 devfs_debug(DEVFS_DEBUG_DEBUG,
1748 "spec_strategy: chain %p offset %d/%d bcount %d\n",
1749 bp, boffset, bp->b_bcount, nbp->b_bcount);
1750 #endif
1752 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1);
1757 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length)
1759 static int
1760 devfs_spec_freeblks(struct vop_freeblks_args *ap)
1762 struct buf *bp;
1765 * XXX: This assumes that strategy does the deed right away.
1766 * XXX: this may not be TRTTD.
1768 KKASSERT(ap->a_vp->v_rdev != NULL);
1769 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0)
1770 return (0);
1771 bp = geteblk(ap->a_length);
1772 bp->b_cmd = BUF_CMD_FREEBLKS;
1773 bp->b_bio1.bio_offset = ap->a_offset;
1774 bp->b_bcount = ap->a_length;
1775 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1);
1776 return (0);
1780 * Implement degenerate case where the block requested is the block
1781 * returned, and assume that the entire device is contiguous in regards
1782 * to the contiguous block range (runp and runb).
1784 * spec_bmap(struct vnode *a_vp, off_t a_loffset,
1785 * off_t *a_doffsetp, int *a_runp, int *a_runb)
1787 static int
1788 devfs_spec_bmap(struct vop_bmap_args *ap)
1790 if (ap->a_doffsetp != NULL)
1791 *ap->a_doffsetp = ap->a_loffset;
1792 if (ap->a_runp != NULL)
1793 *ap->a_runp = MAXBSIZE;
1794 if (ap->a_runb != NULL) {
1795 if (ap->a_loffset < MAXBSIZE)
1796 *ap->a_runb = (int)ap->a_loffset;
1797 else
1798 *ap->a_runb = MAXBSIZE;
1800 return (0);
1805 * Special device advisory byte-level locks.
1807 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op,
1808 * struct flock *a_fl, int a_flags)
1810 /* ARGSUSED */
1811 static int
1812 devfs_spec_advlock(struct vop_advlock_args *ap)
1814 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP);
1817 static void
1818 devfs_spec_getpages_iodone(struct bio *bio)
1820 bio->bio_buf->b_cmd = BUF_CMD_DONE;
1821 wakeup(bio->bio_buf);
1825 * spec_getpages() - get pages associated with device vnode.
1827 * Note that spec_read and spec_write do not use the buffer cache, so we
1828 * must fully implement getpages here.
1830 static int
1831 devfs_spec_getpages(struct vop_getpages_args *ap)
1833 vm_offset_t kva;
1834 int error;
1835 int i, pcount, size;
1836 struct buf *bp;
1837 vm_page_t m;
1838 vm_ooffset_t offset;
1839 int toff, nextoff, nread;
1840 struct vnode *vp = ap->a_vp;
1841 int blksiz;
1842 int gotreqpage;
1844 error = 0;
1845 pcount = round_page(ap->a_count) / PAGE_SIZE;
1848 * Calculate the offset of the transfer and do sanity check.
1850 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset;
1853 * Round up physical size for real devices. We cannot round using
1854 * v_mount's block size data because v_mount has nothing to do with
1855 * the device. i.e. it's usually '/dev'. We need the physical block
1856 * size for the device itself.
1858 * We can't use v_rdev->si_mountpoint because it only exists when the
1859 * block device is mounted. However, we can use v_rdev.
1861 if (vn_isdisk(vp, NULL))
1862 blksiz = vp->v_rdev->si_bsize_phys;
1863 else
1864 blksiz = DEV_BSIZE;
1866 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1);
1868 bp = getpbuf(NULL);
1869 kva = (vm_offset_t)bp->b_data;
1872 * Map the pages to be read into the kva.
1874 pmap_qenter(kva, ap->a_m, pcount);
1876 /* Build a minimal buffer header. */
1877 bp->b_cmd = BUF_CMD_READ;
1878 bp->b_bcount = size;
1879 bp->b_resid = 0;
1880 bp->b_runningbufspace = size;
1881 if (size) {
1882 runningbufspace += bp->b_runningbufspace;
1883 ++runningbufcount;
1886 bp->b_bio1.bio_offset = offset;
1887 bp->b_bio1.bio_done = devfs_spec_getpages_iodone;
1889 mycpu->gd_cnt.v_vnodein++;
1890 mycpu->gd_cnt.v_vnodepgsin += pcount;
1892 /* Do the input. */
1893 vn_strategy(ap->a_vp, &bp->b_bio1);
1895 crit_enter();
1897 /* We definitely need to be at splbio here. */
1898 while (bp->b_cmd != BUF_CMD_DONE)
1899 tsleep(bp, 0, "spread", 0);
1901 crit_exit();
1903 if (bp->b_flags & B_ERROR) {
1904 if (bp->b_error)
1905 error = bp->b_error;
1906 else
1907 error = EIO;
1911 * If EOF is encountered we must zero-extend the result in order
1912 * to ensure that the page does not contain garabge. When no
1913 * error occurs, an early EOF is indicated if b_bcount got truncated.
1914 * b_resid is relative to b_bcount and should be 0, but some devices
1915 * might indicate an EOF with b_resid instead of truncating b_bcount.
1917 nread = bp->b_bcount - bp->b_resid;
1918 if (nread < ap->a_count)
1919 bzero((caddr_t)kva + nread, ap->a_count - nread);
1920 pmap_qremove(kva, pcount);
1922 gotreqpage = 0;
1923 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) {
1924 nextoff = toff + PAGE_SIZE;
1925 m = ap->a_m[i];
1927 m->flags &= ~PG_ZERO;
1930 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
1931 * pmap modified bit. pmap modified bit should have
1932 * already been cleared.
1934 if (nextoff <= nread) {
1935 m->valid = VM_PAGE_BITS_ALL;
1936 vm_page_undirty(m);
1937 } else if (toff < nread) {
1939 * Since this is a VM request, we have to supply the
1940 * unaligned offset to allow vm_page_set_valid()
1941 * to zero sub-DEV_BSIZE'd portions of the page.
1943 vm_page_set_valid(m, 0, nread - toff);
1944 vm_page_clear_dirty_end_nonincl(m, 0, nread - toff);
1945 } else {
1946 m->valid = 0;
1947 vm_page_undirty(m);
1950 if (i != ap->a_reqpage) {
1952 * Just in case someone was asking for this page we
1953 * now tell them that it is ok to use.
1955 if (!error || (m->valid == VM_PAGE_BITS_ALL)) {
1956 if (m->valid) {
1957 if (m->flags & PG_WANTED) {
1958 vm_page_activate(m);
1959 } else {
1960 vm_page_deactivate(m);
1962 vm_page_wakeup(m);
1963 } else {
1964 vm_page_free(m);
1966 } else {
1967 vm_page_free(m);
1969 } else if (m->valid) {
1970 gotreqpage = 1;
1972 * Since this is a VM request, we need to make the
1973 * entire page presentable by zeroing invalid sections.
1975 if (m->valid != VM_PAGE_BITS_ALL)
1976 vm_page_zero_invalid(m, FALSE);
1979 if (!gotreqpage) {
1980 m = ap->a_m[ap->a_reqpage];
1981 devfs_debug(DEVFS_DEBUG_WARNING,
1982 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n",
1983 devtoname(vp->v_rdev), error, bp, bp->b_vp);
1984 devfs_debug(DEVFS_DEBUG_WARNING,
1985 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n",
1986 size, bp->b_resid, ap->a_count, m->valid);
1987 devfs_debug(DEVFS_DEBUG_WARNING,
1988 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n",
1989 nread, ap->a_reqpage, (u_long)m->pindex, pcount);
1991 * Free the buffer header back to the swap buffer pool.
1993 relpbuf(bp, NULL);
1994 return VM_PAGER_ERROR;
1997 * Free the buffer header back to the swap buffer pool.
1999 relpbuf(bp, NULL);
2000 if (DEVFS_NODE(ap->a_vp))
2001 nanotime(&DEVFS_NODE(ap->a_vp)->mtime);
2002 return VM_PAGER_OK;
2005 static __inline
2007 sequential_heuristic(struct uio *uio, struct file *fp)
2010 * Sequential heuristic - detect sequential operation
2012 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
2013 uio->uio_offset == fp->f_nextoff) {
2015 * XXX we assume that the filesystem block size is
2016 * the default. Not true, but still gives us a pretty
2017 * good indicator of how sequential the read operations
2018 * are.
2020 int tmpseq = fp->f_seqcount;
2022 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
2023 if (tmpseq > IO_SEQMAX)
2024 tmpseq = IO_SEQMAX;
2025 fp->f_seqcount = tmpseq;
2026 return(fp->f_seqcount << IO_SEQSHIFT);
2030 * Not sequential, quick draw-down of seqcount
2032 if (fp->f_seqcount > 1)
2033 fp->f_seqcount = 1;
2034 else
2035 fp->f_seqcount = 0;
2036 return(0);