2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elishcer,
6 * Copyright (c) 1982, 1986, 1991, 1993
7 * The Regents of the University of California. All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/module.h>
36 #include <sys/malloc.h>
40 #include <sys/vnode.h>
41 #include <sys/queue.h>
42 #include <sys/device.h>
44 #include <sys/syslink_rpc.h>
46 #include <sys/dsched.h>
47 #include <sys/devfs.h>
49 #include <machine/stdarg.h>
51 #include <sys/thread2.h>
52 #include <sys/mplock2.h>
55 * system link descriptors identify the command in the
56 * arguments structure.
58 #define DDESCNAME(name) __CONCAT(__CONCAT(dev_,name),_desc)
60 #define DEVOP_DESC_INIT(name) \
61 struct syslink_desc DDESCNAME(name) = { \
62 __offsetof(struct dev_ops, __CONCAT(d_, name)), \
65 DEVOP_DESC_INIT(default);
66 DEVOP_DESC_INIT(open
);
67 DEVOP_DESC_INIT(close
);
68 DEVOP_DESC_INIT(read
);
69 DEVOP_DESC_INIT(write
);
70 DEVOP_DESC_INIT(ioctl
);
71 DEVOP_DESC_INIT(dump
);
72 DEVOP_DESC_INIT(psize
);
73 DEVOP_DESC_INIT(mmap
);
74 DEVOP_DESC_INIT(mmap_single
);
75 DEVOP_DESC_INIT(strategy
);
76 DEVOP_DESC_INIT(kqfilter
);
77 DEVOP_DESC_INIT(revoke
);
78 DEVOP_DESC_INIT(clone
);
83 struct dev_ops dead_dev_ops
;
85 static d_open_t noopen
;
86 static d_close_t noclose
;
87 static d_read_t noread
;
88 static d_write_t nowrite
;
89 static d_ioctl_t noioctl
;
90 static d_mmap_t nommap
;
91 static d_mmap_single_t nommap_single
;
92 static d_strategy_t nostrategy
;
93 static d_dump_t nodump
;
94 static d_psize_t nopsize
;
95 static d_kqfilter_t nokqfilter
;
96 static d_clone_t noclone
;
97 static d_revoke_t norevoke
;
99 struct dev_ops default_dev_ops
= {
101 .d_default
= NULL
, /* must be NULL */
108 .d_mmap_single
= nommap_single
,
109 .d_strategy
= nostrategy
,
112 .d_kqfilter
= nokqfilter
,
113 .d_revoke
= norevoke
,
119 dev_needmplock(cdev_t dev
)
121 return((dev
->si_ops
->head
.flags
& D_MPSAFE
) == 0);
126 dev_nokvabio(cdev_t dev
)
128 return((dev
->si_ops
->head
.flags
& D_KVABIO
) == 0);
131 /************************************************************************
132 * GENERAL DEVICE API FUNCTIONS *
133 ************************************************************************
135 * The MPSAFEness of these depends on dev->si_ops->head.flags
138 dev_dopen(cdev_t dev
, int oflags
, int devtype
, struct ucred
*cred
,
141 struct dev_open_args ap
;
142 int needmplock
= dev_needmplock(dev
);
145 ap
.a_head
.a_desc
= &dev_open_desc
;
146 ap
.a_head
.a_dev
= dev
;
147 ap
.a_oflags
= oflags
;
148 ap
.a_devtype
= devtype
;
154 error
= dev
->si_ops
->d_open(&ap
);
161 dev_dclose(cdev_t dev
, int fflag
, int devtype
, struct file
*fp
)
163 struct dev_close_args ap
;
164 int needmplock
= dev_needmplock(dev
);
167 ap
.a_head
.a_desc
= &dev_close_desc
;
168 ap
.a_head
.a_dev
= dev
;
170 ap
.a_devtype
= devtype
;
175 error
= dev
->si_ops
->d_close(&ap
);
182 dev_dread(cdev_t dev
, struct uio
*uio
, int ioflag
, struct file
*fp
)
184 struct dev_read_args ap
;
185 int needmplock
= dev_needmplock(dev
);
188 ap
.a_head
.a_desc
= &dev_read_desc
;
189 ap
.a_head
.a_dev
= dev
;
191 ap
.a_ioflag
= ioflag
;
196 error
= dev
->si_ops
->d_read(&ap
);
200 dev
->si_lastread
= time_uptime
;
205 dev_dwrite(cdev_t dev
, struct uio
*uio
, int ioflag
, struct file
*fp
)
207 struct dev_write_args ap
;
208 int needmplock
= dev_needmplock(dev
);
211 dev
->si_lastwrite
= time_uptime
;
212 ap
.a_head
.a_desc
= &dev_write_desc
;
213 ap
.a_head
.a_dev
= dev
;
215 ap
.a_ioflag
= ioflag
;
220 error
= dev
->si_ops
->d_write(&ap
);
227 dev_dioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct ucred
*cred
,
228 struct sysmsg
*msg
, struct file
*fp
)
230 struct dev_ioctl_args ap
;
231 int needmplock
= dev_needmplock(dev
);
234 ap
.a_head
.a_desc
= &dev_ioctl_desc
;
235 ap
.a_head
.a_dev
= dev
;
245 error
= dev
->si_ops
->d_ioctl(&ap
);
252 dev_dmmap(cdev_t dev
, vm_offset_t offset
, int nprot
, struct file
*fp
)
254 struct dev_mmap_args ap
;
255 int needmplock
= dev_needmplock(dev
);
258 ap
.a_head
.a_desc
= &dev_mmap_desc
;
259 ap
.a_head
.a_dev
= dev
;
260 ap
.a_offset
= offset
;
266 error
= dev
->si_ops
->d_mmap(&ap
);
276 dev_dmmap_single(cdev_t dev
, vm_ooffset_t
*offset
, vm_size_t size
,
277 struct vm_object
**object
, int nprot
, struct file
*fp
)
279 struct dev_mmap_single_args ap
;
280 int needmplock
= dev_needmplock(dev
);
283 ap
.a_head
.a_desc
= &dev_mmap_single_desc
;
284 ap
.a_head
.a_dev
= dev
;
285 ap
.a_offset
= offset
;
287 ap
.a_object
= object
;
293 error
= dev
->si_ops
->d_mmap_single(&ap
);
301 dev_dclone(cdev_t dev
)
303 struct dev_clone_args ap
;
304 int needmplock
= dev_needmplock(dev
);
307 ap
.a_head
.a_desc
= &dev_clone_desc
;
308 ap
.a_head
.a_dev
= dev
;
312 error
= dev
->si_ops
->d_clone(&ap
);
319 dev_drevoke(cdev_t dev
)
321 struct dev_revoke_args ap
;
322 int needmplock
= dev_needmplock(dev
);
325 ap
.a_head
.a_desc
= &dev_revoke_desc
;
326 ap
.a_head
.a_dev
= dev
;
330 error
= dev
->si_ops
->d_revoke(&ap
);
338 * Core device strategy call, used to issue I/O on a device. There are
339 * two versions, a non-chained version and a chained version. The chained
340 * version reuses a BIO set up by vn_strategy(). The only difference is
341 * that, for now, we do not push a new tracking structure when chaining
342 * from vn_strategy. XXX this will ultimately have to change.
345 dev_dstrategy(cdev_t dev
, struct bio
*bio
)
347 struct dev_strategy_args ap
;
348 struct bio_track
*track
;
349 struct buf
*bp
= bio
->bio_buf
;
350 int needmplock
= dev_needmplock(dev
);
353 * If the device doe snot support KVABIO and the buffer is using
354 * KVABIO, we must synchronize b_data to all cpus before dispatching.
356 if (dev_nokvabio(dev
) && (bp
->b_flags
& B_KVABIO
))
359 ap
.a_head
.a_desc
= &dev_strategy_desc
;
360 ap
.a_head
.a_dev
= dev
;
363 KKASSERT(bio
->bio_track
== NULL
);
364 KKASSERT(bp
->b_cmd
!= BUF_CMD_DONE
);
365 if (bp
->b_cmd
== BUF_CMD_READ
)
366 track
= &dev
->si_track_read
;
368 track
= &dev
->si_track_write
;
369 bio_track_ref(track
);
370 bio
->bio_track
= track
;
371 dsched_buf_enter(bp
); /* might stack */
373 KKASSERT((bio
->bio_flags
& BIO_DONE
) == 0);
376 (void)dev
->si_ops
->d_strategy(&ap
);
382 dev_dstrategy_chain(cdev_t dev
, struct bio
*bio
)
384 struct dev_strategy_args ap
;
385 struct buf
*bp
= bio
->bio_buf
;
386 int needmplock
= dev_needmplock(dev
);
389 * If the device doe snot support KVABIO and the buffer is using
390 * KVABIO, we must synchronize b_data to all cpus before dispatching.
392 if (dev_nokvabio(dev
) && (bp
->b_flags
& B_KVABIO
))
395 ap
.a_head
.a_desc
= &dev_strategy_desc
;
396 ap
.a_head
.a_dev
= dev
;
399 KKASSERT(bio
->bio_track
!= NULL
);
400 KKASSERT((bio
->bio_flags
& BIO_DONE
) == 0);
403 (void)dev
->si_ops
->d_strategy(&ap
);
409 * note: the disk layer is expected to set count, blkno, and secsize before
410 * forwarding the message.
413 dev_ddump(cdev_t dev
, void *virtual, vm_offset_t physical
, off_t offset
,
416 struct dev_dump_args ap
;
417 int needmplock
= dev_needmplock(dev
);
420 ap
.a_head
.a_desc
= &dev_dump_desc
;
421 ap
.a_head
.a_dev
= dev
;
425 ap
.a_virtual
= virtual;
426 ap
.a_physical
= physical
;
427 ap
.a_offset
= offset
;
428 ap
.a_length
= length
;
432 error
= dev
->si_ops
->d_dump(&ap
);
439 dev_dpsize(cdev_t dev
)
441 struct dev_psize_args ap
;
442 int needmplock
= dev_needmplock(dev
);
445 ap
.a_head
.a_desc
= &dev_psize_desc
;
446 ap
.a_head
.a_dev
= dev
;
450 error
= dev
->si_ops
->d_psize(&ap
);
455 return (ap
.a_result
);
460 * Pass-thru to the device kqfilter.
462 * NOTE: We explicitly preset a_result to 0 so d_kqfilter() functions
463 * which return 0 do not have to bother setting a_result.
466 dev_dkqfilter(cdev_t dev
, struct knote
*kn
, struct file
*fp
)
468 struct dev_kqfilter_args ap
;
469 int needmplock
= dev_needmplock(dev
);
472 ap
.a_head
.a_desc
= &dev_kqfilter_desc
;
473 ap
.a_head
.a_dev
= dev
;
480 error
= dev
->si_ops
->d_kqfilter(&ap
);
489 /************************************************************************
490 * DEVICE HELPER FUNCTIONS *
491 ************************************************************************/
497 dev_drefs(cdev_t dev
)
499 return(dev
->si_sysref
.refcnt
);
506 dev_dname(cdev_t dev
)
508 return(dev
->si_ops
->head
.name
);
515 dev_dflags(cdev_t dev
)
517 return(dev
->si_ops
->head
.flags
);
526 return(dev
->si_ops
->head
.maj
);
530 * Used when forwarding a request through layers. The caller adjusts
531 * ap->a_head.a_dev and then calls this function.
534 dev_doperate(struct dev_generic_args
*ap
)
536 int (*func
)(struct dev_generic_args
*);
537 int needmplock
= dev_needmplock(ap
->a_dev
);
540 func
= *(void **)((char *)ap
->a_dev
->si_ops
+ ap
->a_desc
->sd_offset
);
552 * Used by the console intercept code only. Issue an operation through
553 * a foreign ops structure allowing the ops structure associated
554 * with the device to remain intact.
557 dev_doperate_ops(struct dev_ops
*ops
, struct dev_generic_args
*ap
)
559 int (*func
)(struct dev_generic_args
*);
560 int needmplock
= ((ops
->head
.flags
& D_MPSAFE
) == 0);
563 func
= *(void **)((char *)ops
+ ap
->a_desc
->sd_offset
);
575 * Convert a template dev_ops into the real thing by filling in
576 * uninitialized fields.
579 compile_dev_ops(struct dev_ops
*ops
)
583 for (offset
= offsetof(struct dev_ops
, dev_ops_first_field
);
584 offset
<= offsetof(struct dev_ops
, dev_ops_last_field
);
585 offset
+= sizeof(void *)
587 void **func_p
= (void **)((char *)ops
+ offset
);
588 void **def_p
= (void **)((char *)&default_dev_ops
+ offset
);
589 if (*func_p
== NULL
) {
591 *func_p
= ops
->d_default
;
598 /************************************************************************
599 * MAJOR/MINOR SPACE FUNCTION *
600 ************************************************************************/
603 * This makes a dev_ops entry visible to userland (e.g /dev/<blah>).
605 * Disk devices typically register their major, e.g. 'ad0', and then call
606 * into the disk label management code which overloads its own onto e.g. 'ad0'
607 * to support all the various slice and partition combinations.
609 * The mask/match supplied in this call are a full 32 bits and the same
610 * mask and match must be specified in a later dev_ops_remove() call to
611 * match this add. However, the match value for the minor number should never
612 * have any bits set in the major number's bit range (8-15). The mask value
613 * may be conveniently specified as -1 without creating any major number
619 rb_dev_ops_compare(struct dev_ops_maj
*a
, struct dev_ops_maj
*b
)
623 else if (a
->maj
> b
->maj
)
628 RB_GENERATE2(dev_ops_rb_tree
, dev_ops_maj
, rbnode
, rb_dev_ops_compare
, int, maj
);
630 struct dev_ops_rb_tree dev_ops_rbhead
= RB_INITIALIZER(dev_ops_rbhead
);
633 dev_ops_remove_all(struct dev_ops
*ops
)
635 return devfs_destroy_dev_by_ops(ops
, -1);
639 dev_ops_remove_minor(struct dev_ops
*ops
, int minor
)
641 return devfs_destroy_dev_by_ops(ops
, minor
);
645 dev_ops_intercept(cdev_t dev
, struct dev_ops
*iops
)
647 struct dev_ops
*oops
= dev
->si_ops
;
649 compile_dev_ops(iops
);
650 iops
->head
.maj
= oops
->head
.maj
;
651 iops
->head
.data
= oops
->head
.data
;
652 iops
->head
.flags
= oops
->head
.flags
;
654 dev
->si_flags
|= SI_INTERCEPTED
;
660 dev_ops_restore(cdev_t dev
, struct dev_ops
*oops
)
662 struct dev_ops
*iops
= dev
->si_ops
;
665 dev
->si_flags
&= ~SI_INTERCEPTED
;
667 iops
->head
.data
= NULL
;
668 iops
->head
.flags
= 0;
671 /************************************************************************
672 * DEFAULT DEV OPS FUNCTIONS *
673 ************************************************************************/
677 * Unsupported devswitch functions (e.g. for writing to read-only device).
678 * XXX may belong elsewhere.
681 norevoke(struct dev_revoke_args
*ap
)
688 noclone(struct dev_clone_args
*ap
)
691 return (0); /* allow the clone */
695 noopen(struct dev_open_args
*ap
)
701 noclose(struct dev_close_args
*ap
)
707 noread(struct dev_read_args
*ap
)
713 nowrite(struct dev_write_args
*ap
)
719 noioctl(struct dev_ioctl_args
*ap
)
725 nokqfilter(struct dev_kqfilter_args
*ap
)
731 nommap(struct dev_mmap_args
*ap
)
737 nommap_single(struct dev_mmap_single_args
*ap
)
743 nostrategy(struct dev_strategy_args
*ap
)
745 struct bio
*bio
= ap
->a_bio
;
747 bio
->bio_buf
->b_flags
|= B_ERROR
;
748 bio
->bio_buf
->b_error
= EOPNOTSUPP
;
754 nopsize(struct dev_psize_args
*ap
)
761 nodump(struct dev_dump_args
*ap
)
767 * XXX this is probably bogus. Any device that uses it isn't checking the
771 nullopen(struct dev_open_args
*ap
)
777 nullclose(struct dev_close_args
*ap
)