2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> All rights reserved.
3 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Terrence R. Lambert
4 * cdevsw from kern/kern_conf.c Copyright (c) 1995 Julian R. Elischer,
6 * Copyright (c) 1982, 1986, 1991, 1993
7 * The Regents of the University of California. All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/module.h>
36 #include <sys/malloc.h>
40 #include <sys/vnode.h>
41 #include <sys/queue.h>
42 #include <sys/device.h>
44 #include <sys/syslink_rpc.h>
46 #include <sys/dsched.h>
47 #include <sys/devfs.h>
50 #include <machine/stdarg.h>
52 #include <sys/mplock2.h>
55 * system link descriptors identify the command in the
56 * arguments structure.
58 #define DDESCNAME(name) __CONCAT(__CONCAT(dev_,name),_desc)
60 #define DEVOP_DESC_INIT(name) \
61 struct syslink_desc DDESCNAME(name) = { \
62 __offsetof(struct dev_ops, __CONCAT(d_, name)), \
65 DEVOP_DESC_INIT(default);
66 DEVOP_DESC_INIT(open
);
67 DEVOP_DESC_INIT(close
);
68 DEVOP_DESC_INIT(read
);
69 DEVOP_DESC_INIT(write
);
70 DEVOP_DESC_INIT(ioctl
);
71 DEVOP_DESC_INIT(dump
);
72 DEVOP_DESC_INIT(psize
);
73 DEVOP_DESC_INIT(mmap
);
74 DEVOP_DESC_INIT(mmap_single
);
75 DEVOP_DESC_INIT(strategy
);
76 DEVOP_DESC_INIT(kqfilter
);
77 DEVOP_DESC_INIT(revoke
);
78 DEVOP_DESC_INIT(clone
);
83 struct dev_ops dead_dev_ops
;
85 static d_open_t noopen
;
86 static d_close_t noclose
;
87 static d_read_t noread
;
88 static d_write_t nowrite
;
89 static d_ioctl_t noioctl
;
90 static d_mmap_t nommap
;
91 static d_mmap_single_t nommap_single
;
92 static d_strategy_t nostrategy
;
93 static d_dump_t nodump
;
94 static d_psize_t nopsize
;
95 static d_kqfilter_t nokqfilter
;
96 static d_clone_t noclone
;
97 static d_revoke_t norevoke
;
99 struct dev_ops default_dev_ops
= {
101 .d_default
= NULL
, /* must be NULL */
108 .d_mmap_single
= nommap_single
,
109 .d_strategy
= nostrategy
,
112 .d_kqfilter
= nokqfilter
,
113 .d_revoke
= norevoke
,
119 dev_needmplock(cdev_t dev
)
121 return((dev
->si_ops
->head
.flags
& D_MPSAFE
) == 0);
126 dev_nokvabio(cdev_t dev
)
128 return((dev
->si_ops
->head
.flags
& D_KVABIO
) == 0);
131 /************************************************************************
132 * GENERAL DEVICE API FUNCTIONS *
133 ************************************************************************
135 * The MPSAFEness of these depends on dev->si_ops->head.flags
138 dev_dopen(cdev_t dev
, int oflags
, int devtype
, struct ucred
*cred
,
139 struct file
**fpp
, struct vnode
*vp
)
141 struct dev_open_args ap
;
142 int needmplock
= dev_needmplock(dev
);
145 ap
.a_head
.a_desc
= &dev_open_desc
;
146 ap
.a_head
.a_dev
= dev
;
147 ap
.a_oflags
= oflags
;
148 ap
.a_devtype
= devtype
;
152 (*ap
.a_fpp
)->f_data
= vp
;
155 * vref(vp) is being done in vop_stdopen()
157 * If a non-null vp is passed-in, the caller must also issue a
160 * NOTE: d_open() may replace *ap.a_fpp
164 error
= dev
->si_ops
->d_open(&ap
);
171 dev_dclose(cdev_t dev
, int fflag
, int devtype
, struct file
*fp
)
173 struct dev_close_args ap
;
174 int needmplock
= dev_needmplock(dev
);
177 ap
.a_head
.a_desc
= &dev_close_desc
;
178 ap
.a_head
.a_dev
= dev
;
180 ap
.a_devtype
= devtype
;
185 error
= dev
->si_ops
->d_close(&ap
);
192 dev_dread(cdev_t dev
, struct uio
*uio
, int ioflag
, struct file
*fp
)
194 struct dev_read_args ap
;
195 int needmplock
= dev_needmplock(dev
);
198 ap
.a_head
.a_desc
= &dev_read_desc
;
199 ap
.a_head
.a_dev
= dev
;
201 ap
.a_ioflag
= ioflag
;
206 error
= dev
->si_ops
->d_read(&ap
);
210 dev
->si_lastread
= time_uptime
;
215 dev_dwrite(cdev_t dev
, struct uio
*uio
, int ioflag
, struct file
*fp
)
217 struct dev_write_args ap
;
218 int needmplock
= dev_needmplock(dev
);
221 dev
->si_lastwrite
= time_uptime
;
222 ap
.a_head
.a_desc
= &dev_write_desc
;
223 ap
.a_head
.a_dev
= dev
;
225 ap
.a_ioflag
= ioflag
;
230 error
= dev
->si_ops
->d_write(&ap
);
237 dev_dioctl(cdev_t dev
, u_long cmd
, caddr_t data
, int fflag
, struct ucred
*cred
,
238 struct sysmsg
*msg
, struct file
*fp
)
240 struct dev_ioctl_args ap
;
241 int needmplock
= dev_needmplock(dev
);
244 ap
.a_head
.a_desc
= &dev_ioctl_desc
;
245 ap
.a_head
.a_dev
= dev
;
255 error
= dev
->si_ops
->d_ioctl(&ap
);
262 dev_dmmap(cdev_t dev
, vm_offset_t offset
, int nprot
, struct file
*fp
)
264 struct dev_mmap_args ap
;
265 int needmplock
= dev_needmplock(dev
);
268 ap
.a_head
.a_desc
= &dev_mmap_desc
;
269 ap
.a_head
.a_dev
= dev
;
270 ap
.a_offset
= offset
;
276 error
= dev
->si_ops
->d_mmap(&ap
);
286 dev_dmmap_single(cdev_t dev
, vm_ooffset_t
*offset
, vm_size_t size
,
287 struct vm_object
**object
, int nprot
, struct file
*fp
)
289 struct dev_mmap_single_args ap
;
290 int needmplock
= dev_needmplock(dev
);
293 ap
.a_head
.a_desc
= &dev_mmap_single_desc
;
294 ap
.a_head
.a_dev
= dev
;
295 ap
.a_offset
= offset
;
297 ap
.a_object
= object
;
303 error
= dev
->si_ops
->d_mmap_single(&ap
);
311 dev_dclone(cdev_t dev
)
313 struct dev_clone_args ap
;
314 int needmplock
= dev_needmplock(dev
);
317 ap
.a_head
.a_desc
= &dev_clone_desc
;
318 ap
.a_head
.a_dev
= dev
;
322 error
= dev
->si_ops
->d_clone(&ap
);
329 dev_drevoke(cdev_t dev
)
331 struct dev_revoke_args ap
;
332 int needmplock
= dev_needmplock(dev
);
335 ap
.a_head
.a_desc
= &dev_revoke_desc
;
336 ap
.a_head
.a_dev
= dev
;
340 error
= dev
->si_ops
->d_revoke(&ap
);
348 * Core device strategy call, used to issue I/O on a device. There are
349 * two versions, a non-chained version and a chained version. The chained
350 * version reuses a BIO set up by vn_strategy(). The only difference is
351 * that, for now, we do not push a new tracking structure when chaining
352 * from vn_strategy. XXX this will ultimately have to change.
355 dev_dstrategy(cdev_t dev
, struct bio
*bio
)
357 struct dev_strategy_args ap
;
358 struct bio_track
*track
;
359 struct buf
*bp
= bio
->bio_buf
;
360 int needmplock
= dev_needmplock(dev
);
363 * If the device doe snot support KVABIO and the buffer is using
364 * KVABIO, we must synchronize b_data to all cpus before dispatching.
366 if (dev_nokvabio(dev
) && (bp
->b_flags
& B_KVABIO
))
369 ap
.a_head
.a_desc
= &dev_strategy_desc
;
370 ap
.a_head
.a_dev
= dev
;
373 KKASSERT(bio
->bio_track
== NULL
);
374 KKASSERT(bp
->b_cmd
!= BUF_CMD_DONE
);
375 if (bp
->b_cmd
== BUF_CMD_READ
)
376 track
= &dev
->si_track_read
;
378 track
= &dev
->si_track_write
;
379 bio_track_ref(track
);
380 bio
->bio_track
= track
;
381 dsched_buf_enter(bp
); /* might stack */
383 KKASSERT((bio
->bio_flags
& BIO_DONE
) == 0);
386 (void)dev
->si_ops
->d_strategy(&ap
);
392 dev_dstrategy_chain(cdev_t dev
, struct bio
*bio
)
394 struct dev_strategy_args ap
;
395 struct buf
*bp
= bio
->bio_buf
;
396 int needmplock
= dev_needmplock(dev
);
399 * If the device doe snot support KVABIO and the buffer is using
400 * KVABIO, we must synchronize b_data to all cpus before dispatching.
402 if (dev_nokvabio(dev
) && (bp
->b_flags
& B_KVABIO
))
405 ap
.a_head
.a_desc
= &dev_strategy_desc
;
406 ap
.a_head
.a_dev
= dev
;
409 KKASSERT(bio
->bio_track
!= NULL
);
410 KKASSERT((bio
->bio_flags
& BIO_DONE
) == 0);
413 (void)dev
->si_ops
->d_strategy(&ap
);
419 * note: the disk layer is expected to set count, blkno, and secsize before
420 * forwarding the message.
423 dev_ddump(cdev_t dev
, void *virtual, vm_offset_t physical
, off_t offset
,
426 struct dev_dump_args ap
;
427 int needmplock
= dev_needmplock(dev
);
430 ap
.a_head
.a_desc
= &dev_dump_desc
;
431 ap
.a_head
.a_dev
= dev
;
435 ap
.a_virtual
= virtual;
436 ap
.a_physical
= physical
;
437 ap
.a_offset
= offset
;
438 ap
.a_length
= length
;
442 error
= dev
->si_ops
->d_dump(&ap
);
449 dev_dpsize(cdev_t dev
)
451 struct dev_psize_args ap
;
452 int needmplock
= dev_needmplock(dev
);
455 ap
.a_head
.a_desc
= &dev_psize_desc
;
456 ap
.a_head
.a_dev
= dev
;
460 error
= dev
->si_ops
->d_psize(&ap
);
465 return (ap
.a_result
);
470 * Pass-thru to the device kqfilter.
472 * NOTE: We explicitly preset a_result to 0 so d_kqfilter() functions
473 * which return 0 do not have to bother setting a_result.
476 dev_dkqfilter(cdev_t dev
, struct knote
*kn
, struct file
*fp
)
478 struct dev_kqfilter_args ap
;
479 int needmplock
= dev_needmplock(dev
);
482 ap
.a_head
.a_desc
= &dev_kqfilter_desc
;
483 ap
.a_head
.a_dev
= dev
;
490 error
= dev
->si_ops
->d_kqfilter(&ap
);
499 /************************************************************************
500 * DEVICE HELPER FUNCTIONS *
501 ************************************************************************/
507 dev_drefs(cdev_t dev
)
509 return(dev
->si_sysref
.refcnt
);
516 dev_dname(cdev_t dev
)
518 return(dev
->si_ops
->head
.name
);
525 dev_dflags(cdev_t dev
)
527 return(dev
->si_ops
->head
.flags
);
536 return(dev
->si_ops
->head
.maj
);
540 * Used when forwarding a request through layers. The caller adjusts
541 * ap->a_head.a_dev and then calls this function.
544 dev_doperate(struct dev_generic_args
*ap
)
546 int (*func
)(struct dev_generic_args
*);
547 int needmplock
= dev_needmplock(ap
->a_dev
);
550 func
= *(void **)((char *)ap
->a_dev
->si_ops
+ ap
->a_desc
->sd_offset
);
562 * Used by the console intercept code only. Issue an operation through
563 * a foreign ops structure allowing the ops structure associated
564 * with the device to remain intact.
567 dev_doperate_ops(struct dev_ops
*ops
, struct dev_generic_args
*ap
)
569 int (*func
)(struct dev_generic_args
*);
570 int needmplock
= ((ops
->head
.flags
& D_MPSAFE
) == 0);
573 func
= *(void **)((char *)ops
+ ap
->a_desc
->sd_offset
);
585 * Convert a template dev_ops into the real thing by filling in
586 * uninitialized fields.
589 compile_dev_ops(struct dev_ops
*ops
)
593 for (offset
= offsetof(struct dev_ops
, dev_ops_first_field
);
594 offset
<= offsetof(struct dev_ops
, dev_ops_last_field
);
595 offset
+= sizeof(void *)
597 void **func_p
= (void **)((char *)ops
+ offset
);
598 void **def_p
= (void **)((char *)&default_dev_ops
+ offset
);
599 if (*func_p
== NULL
) {
601 *func_p
= ops
->d_default
;
608 /************************************************************************
609 * MAJOR/MINOR SPACE FUNCTION *
610 ************************************************************************/
613 * This makes a dev_ops entry visible to userland (e.g /dev/<blah>).
615 * Disk devices typically register their major, e.g. 'ad0', and then call
616 * into the disk label management code which overloads its own onto e.g. 'ad0'
617 * to support all the various slice and partition combinations.
619 * The mask/match supplied in this call are a full 32 bits and the same
620 * mask and match must be specified in a later dev_ops_remove() call to
621 * match this add. However, the match value for the minor number should never
622 * have any bits set in the major number's bit range (8-15). The mask value
623 * may be conveniently specified as -1 without creating any major number
629 rb_dev_ops_compare(struct dev_ops_maj
*a
, struct dev_ops_maj
*b
)
633 else if (a
->maj
> b
->maj
)
638 RB_GENERATE2(dev_ops_rb_tree
, dev_ops_maj
, rbnode
, rb_dev_ops_compare
, int, maj
);
640 struct dev_ops_rb_tree dev_ops_rbhead
= RB_INITIALIZER(dev_ops_rbhead
);
643 dev_ops_remove_all(struct dev_ops
*ops
)
645 return devfs_destroy_dev_by_ops(ops
, -1);
649 dev_ops_remove_minor(struct dev_ops
*ops
, int minor
)
651 return devfs_destroy_dev_by_ops(ops
, minor
);
655 dev_ops_intercept(cdev_t dev
, struct dev_ops
*iops
)
657 struct dev_ops
*oops
= dev
->si_ops
;
659 compile_dev_ops(iops
);
660 iops
->head
.maj
= oops
->head
.maj
;
661 iops
->head
.data
= oops
->head
.data
;
662 iops
->head
.flags
= oops
->head
.flags
;
664 dev
->si_flags
|= SI_INTERCEPTED
;
670 dev_ops_restore(cdev_t dev
, struct dev_ops
*oops
)
672 struct dev_ops
*iops
= dev
->si_ops
;
675 dev
->si_flags
&= ~SI_INTERCEPTED
;
677 iops
->head
.data
= NULL
;
678 iops
->head
.flags
= 0;
681 /************************************************************************
682 * DEFAULT DEV OPS FUNCTIONS *
683 ************************************************************************/
687 * Unsupported devswitch functions (e.g. for writing to read-only device).
688 * XXX may belong elsewhere.
691 norevoke(struct dev_revoke_args
*ap
)
698 noclone(struct dev_clone_args
*ap
)
701 return (0); /* allow the clone */
705 noopen(struct dev_open_args
*ap
)
711 noclose(struct dev_close_args
*ap
)
717 noread(struct dev_read_args
*ap
)
723 nowrite(struct dev_write_args
*ap
)
729 noioctl(struct dev_ioctl_args
*ap
)
735 nokqfilter(struct dev_kqfilter_args
*ap
)
741 nommap(struct dev_mmap_args
*ap
)
747 nommap_single(struct dev_mmap_single_args
*ap
)
753 nostrategy(struct dev_strategy_args
*ap
)
755 struct bio
*bio
= ap
->a_bio
;
757 bio
->bio_buf
->b_flags
|= B_ERROR
;
758 bio
->bio_buf
->b_error
= EOPNOTSUPP
;
764 nopsize(struct dev_psize_args
*ap
)
771 nodump(struct dev_dump_args
*ap
)
777 * XXX this is probably bogus. Any device that uses it isn't checking the
781 nullopen(struct dev_open_args
*ap
)
787 nullclose(struct dev_close_args
*ap
)