2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * ----------------------------------------------------------------------------
35 * "THE BEER-WARE LICENSE" (Revision 42):
36 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
37 * can do whatever you want with this stuff. If we meet some day, and you think
38 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
39 * ----------------------------------------------------------------------------
41 * Copyright (c) 1982, 1986, 1988, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
78 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
87 #include <sys/sysctl.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
96 #include <sys/malloc.h>
97 #include <sys/sysctl.h>
98 #include <machine/md_var.h>
99 #include <sys/ctype.h>
100 #include <sys/syslog.h>
101 #include <sys/device.h>
102 #include <sys/msgport.h>
103 #include <sys/msgport2.h>
104 #include <sys/buf2.h>
105 #include <vfs/devfs/devfs.h>
106 #include <sys/thread.h>
107 #include <sys/thread2.h>
109 #include <sys/queue.h>
110 #include <sys/lock.h>
112 static MALLOC_DEFINE(M_DISK
, "disk", "disk data");
114 static void disk_msg_autofree_reply(lwkt_port_t
, lwkt_msg_t
);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk
*dp
, cdev_t dev
, int slice
, int reprobe
);
117 static void disk_probe(struct disk
*dp
, int reprobe
);
118 static void _setdiskinfo(struct disk
*disk
, struct disk_info
*info
);
120 static d_open_t diskopen
;
121 static d_close_t diskclose
;
122 static d_ioctl_t diskioctl
;
123 static d_strategy_t diskstrategy
;
124 static d_psize_t diskpsize
;
125 static d_clone_t diskclone
;
126 static d_dump_t diskdump
;
128 static LIST_HEAD(, disk
) disklist
= LIST_HEAD_INITIALIZER(&disklist
);
129 static struct lwkt_token disklist_token
;
131 static struct dev_ops disk_ops
= {
132 { "disk", 0, D_DISK
},
134 .d_close
= diskclose
,
136 .d_write
= physwrite
,
137 .d_ioctl
= diskioctl
,
138 .d_strategy
= diskstrategy
,
140 .d_psize
= diskpsize
,
144 static struct objcache
*disk_msg_cache
;
146 struct objcache_malloc_args disk_msg_malloc_args
= {
147 sizeof(struct disk_msg
), M_DISK
};
149 static struct lwkt_port disk_dispose_port
;
150 static struct lwkt_port disk_msg_port
;
154 disk_probe_slice(struct disk
*dp
, cdev_t dev
, int slice
, int reprobe
)
156 struct disk_info
*info
= &dp
->d_info
;
157 struct diskslice
*sp
= &dp
->d_slice
->dss_slices
[slice
];
159 struct partinfo part
;
165 sno
= slice
? slice
- 1 : 0;
167 ops
= &disklabel32_ops
;
168 msg
= ops
->op_readdisklabel(dev
, sp
, &sp
->ds_label
, info
);
169 if (msg
&& !strcmp(msg
, "no disk label")) {
170 ops
= &disklabel64_ops
;
171 msg
= ops
->op_readdisklabel(dev
, sp
, &sp
->ds_label
, info
);
174 if (slice
!= WHOLE_DISK_SLICE
)
175 ops
->op_adjust_label_reserved(dp
->d_slice
, slice
, sp
);
180 for (i
= 0; i
< ops
->op_getnumparts(sp
->ds_label
); i
++) {
181 ops
->op_loadpartinfo(sp
->ds_label
, i
, &part
);
184 (ndev
= devfs_find_device_by_name("%s%c",
185 dev
->si_name
, 'a' + i
))
188 * Device already exists and
191 ndev
->si_flags
|= SI_REPROBE_TEST
;
193 ndev
= make_dev(&disk_ops
,
194 dkmakeminor(dkunit(dp
->d_cdev
),
196 UID_ROOT
, GID_OPERATOR
, 0640,
197 "%s%c", dev
->si_name
, 'a'+ i
);
199 if (dp
->d_info
.d_serialno
) {
202 dp
->d_info
.d_serialno
,
205 ndev
->si_flags
|= SI_REPROBE_TEST
;
209 } else if (info
->d_dsflags
& DSO_COMPATLABEL
) {
211 if (sp
->ds_size
>= 0x100000000ULL
)
212 ops
= &disklabel64_ops
;
214 ops
= &disklabel32_ops
;
215 sp
->ds_label
= ops
->op_clone_label(info
, sp
);
217 if (sp
->ds_type
== DOSPTYP_386BSD
/* XXX */) {
218 log(LOG_WARNING
, "%s: cannot find label (%s)\n",
224 sp
->ds_wlabel
= FALSE
;
227 return (msg
? EINVAL
: 0);
232 disk_probe(struct disk
*dp
, int reprobe
)
234 struct disk_info
*info
= &dp
->d_info
;
235 cdev_t dev
= dp
->d_cdev
;
238 struct diskslice
*sp
;
240 KKASSERT (info
->d_media_blksize
!= 0);
242 dp
->d_slice
= dsmakeslicestruct(BASE_SLICE
, info
);
244 error
= mbrinit(dev
, info
, &(dp
->d_slice
));
248 for (i
= 0; i
< dp
->d_slice
->dss_nslices
; i
++) {
250 * Ignore the whole-disk slice, it has already been created.
252 if (i
== WHOLE_DISK_SLICE
)
254 sp
= &dp
->d_slice
->dss_slices
[i
];
257 * Handle s0. s0 is a compatibility slice if there are no
258 * other slices and it has not otherwise been set up, else
261 if (i
== COMPATIBILITY_SLICE
) {
263 if (sp
->ds_type
== 0 &&
264 dp
->d_slice
->dss_nslices
== BASE_SLICE
) {
265 sp
->ds_size
= info
->d_media_blocks
;
274 * Ignore 0-length slices
276 if (sp
->ds_size
== 0)
280 (ndev
= devfs_find_device_by_name("%ss%d",
281 dev
->si_name
, sno
))) {
283 * Device already exists and is still valid
285 ndev
->si_flags
|= SI_REPROBE_TEST
;
288 * Else create new device
290 ndev
= make_dev(&disk_ops
,
291 dkmakewholeslice(dkunit(dev
), i
),
292 UID_ROOT
, GID_OPERATOR
, 0640,
293 "%ss%d", dev
->si_name
, sno
);
294 if (dp
->d_info
.d_serialno
) {
295 make_dev_alias(ndev
, "serno/%s.s%d",
296 dp
->d_info
.d_serialno
, sno
);
299 ndev
->si_flags
|= SI_REPROBE_TEST
;
304 * Probe appropriate slices for a disklabel
306 * XXX slice type 1 used by our gpt probe code.
307 * XXX slice type 0 used by mbr compat slice.
309 if (sp
->ds_type
== DOSPTYP_386BSD
|| sp
->ds_type
== 0 ||
311 if (dp
->d_slice
->dss_first_bsd_slice
== 0)
312 dp
->d_slice
->dss_first_bsd_slice
= i
;
313 disk_probe_slice(dp
, ndev
, i
, reprobe
);
320 disk_msg_core(void *arg
)
323 struct diskslice
*sp
;
328 lwkt_initport_thread(&disk_msg_port
, curthread
);
333 msg
= (disk_msg_t
)lwkt_waitport(&disk_msg_port
, 0);
335 switch (msg
->hdr
.u
.ms_result
) {
336 case DISK_DISK_PROBE
:
337 dp
= (struct disk
*)msg
->load
;
340 case DISK_DISK_DESTROY
:
341 dp
= (struct disk
*)msg
->load
;
342 devfs_destroy_subnames(dp
->d_cdev
->si_name
);
343 devfs_destroy_dev(dp
->d_cdev
);
344 lwkt_gettoken(&ilock
, &disklist_token
);
345 LIST_REMOVE(dp
, d_list
);
346 lwkt_reltoken(&ilock
);
347 if (dp
->d_info
.d_serialno
) {
348 kfree(dp
->d_info
.d_serialno
, M_TEMP
);
349 dp
->d_info
.d_serialno
= NULL
;
353 dp
= (struct disk
*)msg
->load
;
354 devfs_destroy_subnames(dp
->d_cdev
->si_name
);
356 case DISK_SLICE_REPROBE
:
357 dp
= (struct disk
*)msg
->load
;
358 sp
= (struct diskslice
*)msg
->load2
;
359 devfs_clr_subnames_flag(sp
->ds_dev
->si_name
,
361 devfs_debug(DEVFS_DEBUG_DEBUG
,
362 "DISK_SLICE_REPROBE: %s\n",
363 sp
->ds_dev
->si_name
);
364 disk_probe_slice(dp
, sp
->ds_dev
,
365 dkslice(sp
->ds_dev
), 1);
366 devfs_destroy_subnames_without_flag(
367 sp
->ds_dev
->si_name
, SI_REPROBE_TEST
);
369 case DISK_DISK_REPROBE
:
370 dp
= (struct disk
*)msg
->load
;
371 devfs_clr_subnames_flag(dp
->d_cdev
->si_name
, SI_REPROBE_TEST
);
372 devfs_debug(DEVFS_DEBUG_DEBUG
,
373 "DISK_DISK_REPROBE: %s\n",
374 dp
->d_cdev
->si_name
);
376 devfs_destroy_subnames_without_flag(
377 dp
->d_cdev
->si_name
, SI_REPROBE_TEST
);
382 devfs_debug(DEVFS_DEBUG_WARNING
,
383 "disk_msg_core: unknown message "
384 "received at core\n");
387 lwkt_replymsg((lwkt_msg_t
)msg
, 0);
394 * Acts as a message drain. Any message that is replied to here gets
395 * destroyed and the memory freed.
398 disk_msg_autofree_reply(lwkt_port_t port
, lwkt_msg_t msg
)
400 objcache_put(disk_msg_cache
, msg
);
405 disk_msg_send(uint32_t cmd
, void *load
, void *load2
)
408 lwkt_port_t port
= &disk_msg_port
;
410 disk_msg
= objcache_get(disk_msg_cache
, M_WAITOK
);
412 lwkt_initmsg(&disk_msg
->hdr
, &disk_dispose_port
, 0);
414 disk_msg
->hdr
.u
.ms_result
= cmd
;
415 disk_msg
->load
= load
;
416 disk_msg
->load2
= load2
;
418 lwkt_sendmsg(port
, (lwkt_msg_t
)disk_msg
);
422 disk_msg_send_sync(uint32_t cmd
, void *load
, void *load2
)
424 struct lwkt_port rep_port
;
425 disk_msg_t disk_msg
= objcache_get(disk_msg_cache
, M_WAITOK
);
426 disk_msg_t msg_incoming
;
427 lwkt_port_t port
= &disk_msg_port
;
429 lwkt_initport_thread(&rep_port
, curthread
);
430 lwkt_initmsg(&disk_msg
->hdr
, &rep_port
, 0);
432 disk_msg
->hdr
.u
.ms_result
= cmd
;
433 disk_msg
->load
= load
;
434 disk_msg
->load2
= load2
;
437 lwkt_sendmsg(port
, (lwkt_msg_t
)disk_msg
);
438 msg_incoming
= lwkt_waitport(&rep_port
, 0);
442 * Create a raw device for the dev_ops template (which is returned). Also
443 * create a slice and unit managed disk and overload the user visible
444 * device space with it.
446 * NOTE: The returned raw device is NOT a slice and unit managed device.
447 * It is an actual raw device representing the raw disk as specified by
448 * the passed dev_ops. The disk layer not only returns such a raw device,
449 * it also uses it internally when passing (modified) commands through.
452 disk_create(int unit
, struct disk
*dp
, struct dev_ops
*raw_ops
)
457 rawdev
= make_only_dev(raw_ops
, dkmakewholedisk(unit
),
458 UID_ROOT
, GID_OPERATOR
, 0640,
459 "%s%d", raw_ops
->head
.name
, unit
);
461 bzero(dp
, sizeof(*dp
));
463 dp
->d_rawdev
= rawdev
;
464 dp
->d_raw_ops
= raw_ops
;
465 dp
->d_dev_ops
= &disk_ops
;
466 dp
->d_cdev
= make_dev(&disk_ops
,
467 dkmakewholedisk(unit
),
468 UID_ROOT
, GID_OPERATOR
, 0640,
469 "%s%d", raw_ops
->head
.name
, unit
);
471 dp
->d_cdev
->si_disk
= dp
;
473 lwkt_gettoken(&ilock
, &disklist_token
);
474 LIST_INSERT_HEAD(&disklist
, dp
, d_list
);
475 lwkt_reltoken(&ilock
);
476 return (dp
->d_rawdev
);
481 _setdiskinfo(struct disk
*disk
, struct disk_info
*info
)
485 oldserialno
= disk
->d_info
.d_serialno
;
486 bcopy(info
, &disk
->d_info
, sizeof(disk
->d_info
));
487 info
= &disk
->d_info
;
490 * The serial number is duplicated so the caller can throw
493 if (info
->d_serialno
&& info
->d_serialno
[0]) {
494 info
->d_serialno
= kstrdup(info
->d_serialno
, M_TEMP
);
496 make_dev_alias(disk
->d_cdev
, "serno/%s",
500 info
->d_serialno
= NULL
;
503 kfree(oldserialno
, M_TEMP
);
506 * The caller may set d_media_size or d_media_blocks and we
507 * calculate the other.
509 KKASSERT(info
->d_media_size
== 0 || info
->d_media_blksize
== 0);
510 if (info
->d_media_size
== 0 && info
->d_media_blocks
) {
511 info
->d_media_size
= (u_int64_t
)info
->d_media_blocks
*
512 info
->d_media_blksize
;
513 } else if (info
->d_media_size
&& info
->d_media_blocks
== 0 &&
514 info
->d_media_blksize
) {
515 info
->d_media_blocks
= info
->d_media_size
/
516 info
->d_media_blksize
;
520 * The si_* fields for rawdev are not set until after the
521 * disk_create() call, so someone using the cooked version
522 * of the raw device (i.e. da0s0) will not get the right
523 * si_iosize_max unless we fix it up here.
525 if (disk
->d_cdev
&& disk
->d_rawdev
&&
526 disk
->d_cdev
->si_iosize_max
== 0) {
527 disk
->d_cdev
->si_iosize_max
= disk
->d_rawdev
->si_iosize_max
;
528 disk
->d_cdev
->si_bsize_phys
= disk
->d_rawdev
->si_bsize_phys
;
529 disk
->d_cdev
->si_bsize_best
= disk
->d_rawdev
->si_bsize_best
;
534 * Disk drivers must call this routine when media parameters are available
538 disk_setdiskinfo(struct disk
*disk
, struct disk_info
*info
)
540 _setdiskinfo(disk
, info
);
541 disk_msg_send(DISK_DISK_PROBE
, disk
, NULL
);
545 disk_setdiskinfo_sync(struct disk
*disk
, struct disk_info
*info
)
547 _setdiskinfo(disk
, info
);
548 disk_msg_send_sync(DISK_DISK_PROBE
, disk
, NULL
);
552 * This routine is called when an adapter detaches. The higher level
553 * managed disk device is destroyed while the lower level raw device is
557 disk_destroy(struct disk
*disk
)
559 disk_msg_send_sync(DISK_DISK_DESTROY
, disk
, NULL
);
564 disk_dumpcheck(cdev_t dev
, u_int64_t
*count
, u_int64_t
*blkno
, u_int
*secsize
)
566 struct partinfo pinfo
;
569 bzero(&pinfo
, sizeof(pinfo
));
570 error
= dev_dioctl(dev
, DIOCGPART
, (void *)&pinfo
, 0, proc0
.p_ucred
);
573 if (pinfo
.media_blksize
== 0)
575 *count
= (u_int64_t
)Maxmem
* PAGE_SIZE
/ pinfo
.media_blksize
;
576 if (dumplo64
< pinfo
.reserved_blocks
||
577 dumplo64
+ *count
> pinfo
.media_blocks
) {
580 *blkno
= dumplo64
+ pinfo
.media_offset
/ pinfo
.media_blksize
;
581 *secsize
= pinfo
.media_blksize
;
586 disk_unprobe(struct disk
*disk
)
591 disk_msg_send_sync(DISK_UNPROBE
, disk
, NULL
);
595 disk_invalidate (struct disk
*disk
)
598 dsgone(&disk
->d_slice
);
602 disk_enumerate(struct disk
*disk
)
607 lwkt_gettoken(&ilock
, &disklist_token
);
609 dp
= (LIST_FIRST(&disklist
));
611 dp
= (LIST_NEXT(disk
, d_list
));
612 lwkt_reltoken(&ilock
);
619 sysctl_disks(SYSCTL_HANDLER_ARGS
)
627 while ((disk
= disk_enumerate(disk
))) {
629 error
= SYSCTL_OUT(req
, " ", 1);
635 error
= SYSCTL_OUT(req
, disk
->d_rawdev
->si_name
,
636 strlen(disk
->d_rawdev
->si_name
));
640 error
= SYSCTL_OUT(req
, "", 1);
644 SYSCTL_PROC(_kern
, OID_AUTO
, disks
, CTLTYPE_STRING
| CTLFLAG_RD
, NULL
, 0,
645 sysctl_disks
, "A", "names of available disks");
648 * Open a disk device or partition.
652 diskopen(struct dev_open_args
*ap
)
654 cdev_t dev
= ap
->a_head
.a_dev
;
659 * dp can't be NULL here XXX.
661 * d_slice will be NULL if setdiskinfo() has not been called yet.
662 * setdiskinfo() is typically called whether the disk is present
663 * or not (e.g. CD), but the base disk device is created first
664 * and there may be a race.
667 if (dp
== NULL
|| dp
->d_slice
== NULL
)
672 * Deal with open races
674 while (dp
->d_flags
& DISKFLAG_LOCK
) {
675 dp
->d_flags
|= DISKFLAG_WANTED
;
676 error
= tsleep(dp
, PCATCH
, "diskopen", hz
);
680 dp
->d_flags
|= DISKFLAG_LOCK
;
683 * Open the underlying raw device.
685 if (!dsisopen(dp
->d_slice
)) {
687 if (!pdev
->si_iosize_max
)
688 pdev
->si_iosize_max
= dev
->si_iosize_max
;
690 error
= dev_dopen(dp
->d_rawdev
, ap
->a_oflags
,
691 ap
->a_devtype
, ap
->a_cred
);
695 * Inherit properties from the underlying device now that it is
703 error
= dsopen(dev
, ap
->a_devtype
, dp
->d_info
.d_dsflags
,
704 &dp
->d_slice
, &dp
->d_info
);
705 if (!dsisopen(dp
->d_slice
)) {
706 dev_dclose(dp
->d_rawdev
, ap
->a_oflags
, ap
->a_devtype
);
709 dp
->d_flags
&= ~DISKFLAG_LOCK
;
710 if (dp
->d_flags
& DISKFLAG_WANTED
) {
711 dp
->d_flags
&= ~DISKFLAG_WANTED
;
719 * Close a disk device or partition
723 diskclose(struct dev_close_args
*ap
)
725 cdev_t dev
= ap
->a_head
.a_dev
;
732 dsclose(dev
, ap
->a_devtype
, dp
->d_slice
);
733 if (!dsisopen(dp
->d_slice
)) {
734 error
= dev_dclose(dp
->d_rawdev
, ap
->a_fflag
, ap
->a_devtype
);
740 * First execute the ioctl on the disk device, and if it isn't supported
741 * try running it on the backing device.
745 diskioctl(struct dev_ioctl_args
*ap
)
747 cdev_t dev
= ap
->a_head
.a_dev
;
755 devfs_debug(DEVFS_DEBUG_DEBUG
,
756 "diskioctl: cmd is: %x (name: %s)\n",
757 ap
->a_cmd
, dev
->si_name
);
758 devfs_debug(DEVFS_DEBUG_DEBUG
,
759 "diskioctl: &dp->d_slice is: %x, %x\n",
760 &dp
->d_slice
, dp
->d_slice
);
762 error
= dsioctl(dev
, ap
->a_cmd
, ap
->a_data
, ap
->a_fflag
,
763 &dp
->d_slice
, &dp
->d_info
);
765 if (error
== ENOIOCTL
) {
766 error
= dev_dioctl(dp
->d_rawdev
, ap
->a_cmd
, ap
->a_data
,
767 ap
->a_fflag
, ap
->a_cred
);
773 * Execute strategy routine
777 diskstrategy(struct dev_strategy_args
*ap
)
779 cdev_t dev
= ap
->a_head
.a_dev
;
780 struct bio
*bio
= ap
->a_bio
;
787 bio
->bio_buf
->b_error
= ENXIO
;
788 bio
->bio_buf
->b_flags
|= B_ERROR
;
792 KKASSERT(dev
->si_disk
== dp
);
795 * The dscheck() function will also transform the slice relative
796 * block number i.e. bio->bio_offset into a block number that can be
797 * passed directly to the underlying raw device. If dscheck()
798 * returns NULL it will have handled the bio for us (e.g. EOF
799 * or error due to being beyond the device size).
801 if ((nbio
= dscheck(dev
, bio
, dp
->d_slice
)) != NULL
) {
802 dev_dstrategy(dp
->d_rawdev
, nbio
);
810 * Return the partition size in ?blocks?
814 diskpsize(struct dev_psize_args
*ap
)
816 cdev_t dev
= ap
->a_head
.a_dev
;
822 ap
->a_result
= dssize(dev
, &dp
->d_slice
);
827 * When new device entries are instantiated, make sure they inherit our
828 * si_disk structure and block and iosize limits from the raw device.
830 * This routine is always called synchronously in the context of the
833 * XXX The various io and block size constraints are not always initialized
834 * properly by devices.
838 diskclone(struct dev_clone_args
*ap
)
840 cdev_t dev
= ap
->a_head
.a_dev
;
844 KKASSERT(dp
!= NULL
);
846 dev
->si_iosize_max
= dp
->d_rawdev
->si_iosize_max
;
847 dev
->si_bsize_phys
= dp
->d_rawdev
->si_bsize_phys
;
848 dev
->si_bsize_best
= dp
->d_rawdev
->si_bsize_best
;
853 diskdump(struct dev_dump_args
*ap
)
855 cdev_t dev
= ap
->a_head
.a_dev
;
856 struct disk
*dp
= dev
->si_disk
;
859 error
= disk_dumpcheck(dev
, &ap
->a_count
, &ap
->a_blkno
, &ap
->a_secsize
);
861 ap
->a_head
.a_dev
= dp
->d_rawdev
;
862 error
= dev_doperate(&ap
->a_head
);
869 SYSCTL_INT(_debug_sizeof
, OID_AUTO
, diskslices
, CTLFLAG_RD
,
870 0, sizeof(struct diskslices
), "sizeof(struct diskslices)");
872 SYSCTL_INT(_debug_sizeof
, OID_AUTO
, disk
, CTLFLAG_RD
,
873 0, sizeof(struct disk
), "sizeof(struct disk)");
876 * How sorted do we want to be? The higher the number the harder we try
877 * to sort, but also the higher the risk of bio's getting starved do
878 * to insertions in front of them.
880 static int bioq_barrier
= 16;
881 SYSCTL_INT(_kern
, OID_AUTO
, bioq_barrier
, CTLFLAG_RW
, &bioq_barrier
, 0, "");
885 * Seek sort for disks.
887 * The bio_queue keep two queues, sorted in ascending block order. The first
888 * queue holds those requests which are positioned after the current block
889 * (in the first request); the second, which starts at queue->switch_point,
890 * holds requests which came in after their block number was passed. Thus
891 * we implement a one way scan, retracting after reaching the end of the drive
892 * to the first request on the second queue, at which time it becomes the
895 * A one-way scan is natural because of the way UNIX read-ahead blocks are
899 bioqdisksort(struct bio_queue_head
*bioq
, struct bio
*bio
)
905 be
= TAILQ_LAST(&bioq
->queue
, bio_queue
);
908 * If the queue is empty or we are an
909 * ordered transaction, then it's easy.
911 if ((bq
= bioq_first(bioq
)) == NULL
||
912 (bio
->bio_buf
->b_flags
& B_ORDERED
) != 0) {
913 bioq_insert_tail(bioq
, bio
);
918 * Avoid permanent request starvation by forcing the request to
919 * be ordered every 16 requests. Without this long sequential
920 * write pipelines can prevent requests later in the queue from
921 * getting serviced for many seconds.
923 if (++bioq
->order_count
>= bioq_barrier
) {
924 bioq_insert_tail_order(bioq
, bio
, 1);
928 if (bioq
->insert_point
!= NULL
) {
930 * A certain portion of the list is
931 * "locked" to preserve ordering, so
932 * we can only insert after the insert
935 bq
= bioq
->insert_point
;
938 * If we lie before the last removed (currently active)
939 * request, and are not inserting ourselves into the
940 * "locked" portion of the list, then we must add ourselves
941 * to the second request list.
943 if (bio
->bio_offset
< bioq
->last_offset
) {
944 bq
= bioq
->switch_point
;
947 * If we are starting a new secondary list,
951 bioq
->switch_point
= bio
;
952 bioq_insert_tail(bioq
, bio
);
957 * If we lie ahead of the current switch point,
958 * insert us before the switch point and move
961 if (bio
->bio_offset
< bq
->bio_offset
) {
962 bioq
->switch_point
= bio
;
963 TAILQ_INSERT_BEFORE(bq
, bio
, bio_act
);
967 if (bioq
->switch_point
!= NULL
)
968 be
= TAILQ_PREV(bioq
->switch_point
,
971 * If we lie between last_offset and bq,
974 if (bio
->bio_offset
< bq
->bio_offset
) {
975 TAILQ_INSERT_BEFORE(bq
, bio
, bio_act
);
982 * Request is at/after our current position in the list.
983 * Optimize for sequential I/O by seeing if we go at the tail.
985 if (bio
->bio_offset
> be
->bio_offset
) {
986 TAILQ_INSERT_AFTER(&bioq
->queue
, be
, bio
, bio_act
);
990 /* Otherwise, insertion sort */
991 while ((bn
= TAILQ_NEXT(bq
, bio_act
)) != NULL
) {
993 * We want to go after the current request if it is the end
994 * of the first request list, or if the next request is a
995 * larger cylinder than our request.
997 if (bn
== bioq
->switch_point
||
998 bio
->bio_offset
< bn
->bio_offset
) {
1003 TAILQ_INSERT_AFTER(&bioq
->queue
, bq
, bio
, bio_act
);
1007 * Disk error is the preface to plaintive error messages
1008 * about failing disk transfers. It prints messages of the form
1010 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1012 * if the offset of the error in the transfer and a disk label
1013 * are both available. blkdone should be -1 if the position of the error
1014 * is unknown; the disklabel pointer may be null from drivers that have not
1015 * been converted to use them. The message is printed with kprintf
1016 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1017 * The message should be completed (with at least a newline) with kprintf
1018 * or log(-1, ...), respectively. There is no trailing space.
1021 diskerr(struct bio
*bio
, cdev_t dev
, const char *what
, int pri
, int donecnt
)
1023 struct buf
*bp
= bio
->bio_buf
;
1037 kprintf("%s: %s %sing ", dev
->si_name
, what
, term
);
1038 kprintf("offset %012llx for %d",
1039 (long long)bio
->bio_offset
,
1043 kprintf(" (%d bytes completed)", donecnt
);
1047 * Locate a disk device
1050 disk_locate(const char *devname
)
1052 return devfs_find_device_by_name(devname
);
1056 disk_config(void *arg
)
1058 disk_msg_send_sync(DISK_SYNC
, NULL
, NULL
);
1064 struct thread
* td_core
;
1066 disk_msg_cache
= objcache_create("disk-msg-cache", 0, 0,
1068 objcache_malloc_alloc
,
1069 objcache_malloc_free
,
1070 &disk_msg_malloc_args
);
1072 lwkt_token_init(&disklist_token
);
1075 * Initialize the reply-only port which acts as a message drain
1077 lwkt_initport_replyonly(&disk_dispose_port
, disk_msg_autofree_reply
);
1079 lwkt_create(disk_msg_core
, /*args*/NULL
, &td_core
, NULL
,
1080 0, 0, "disk_msg_core");
1082 tsleep(td_core
, 0, "diskcore", 0);
1088 objcache_destroy(disk_msg_cache
);
1091 SYSINIT(disk_register
, SI_SUB_PRE_DRIVERS
, SI_ORDER_FIRST
, disk_init
, NULL
);
1092 SYSUNINIT(disk_register
, SI_SUB_PRE_DRIVERS
, SI_ORDER_ANY
, disk_uninit
, NULL
);