mtree(8): Fix crc() prototype.
[dragonfly.git] / sys / kern / subr_disk.c
blobe5b64e72a5c4fe1735c9fd648fcd41580375efd1
1 /*
2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * and Alex Hornung <ahornung@gmail.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * ----------------------------------------------------------------------------
36 * "THE BEER-WARE LICENSE" (Revision 42):
37 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
38 * can do whatever you want with this stuff. If we meet some day, and you think
39 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
40 * ----------------------------------------------------------------------------
42 * Copyright (c) 1982, 1986, 1988, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
74 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
75 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
76 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/sysctl.h>
84 #include <sys/buf.h>
85 #include <sys/conf.h>
86 #include <sys/disklabel.h>
87 #include <sys/disklabel32.h>
88 #include <sys/disklabel64.h>
89 #include <sys/diskslice.h>
90 #include <sys/diskmbr.h>
91 #include <sys/disk.h>
92 #include <sys/kerneldump.h>
93 #include <sys/malloc.h>
94 #include <machine/md_var.h>
95 #include <sys/ctype.h>
96 #include <sys/syslog.h>
97 #include <sys/device.h>
98 #include <sys/msgport.h>
99 #include <sys/devfs.h>
100 #include <sys/thread.h>
101 #include <sys/dsched.h>
102 #include <sys/queue.h>
103 #include <sys/lock.h>
104 #include <sys/udev.h>
105 #include <sys/uuid.h>
107 #include <sys/buf2.h>
108 #include <sys/msgport2.h>
109 #include <sys/thread2.h>
111 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
112 static int disk_debug_enable = 0;
114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
117 static void disk_probe(struct disk *dp, int reprobe);
118 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
119 static void bioqwritereorder(struct bio_queue_head *bioq);
120 static void disk_cleanserial(char *serno);
121 static int disk_debug(int, char *, ...) __printflike(2, 3);
122 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp,
123 struct dev_ops *raw_ops, int clone);
125 static d_open_t diskopen;
126 static d_close_t diskclose;
127 static d_ioctl_t diskioctl;
128 static d_strategy_t diskstrategy;
129 static d_psize_t diskpsize;
130 static d_dump_t diskdump;
132 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
133 static struct lwkt_token disklist_token;
134 static struct lwkt_token ds_token;
136 static struct dev_ops disk1_ops = {
137 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO },
138 .d_open = diskopen,
139 .d_close = diskclose,
140 .d_read = physread,
141 .d_write = physwrite,
142 .d_ioctl = diskioctl,
143 .d_strategy = diskstrategy,
144 .d_dump = diskdump,
145 .d_psize = diskpsize,
148 static struct dev_ops disk2_ops = {
149 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO |
150 D_NOEMERGPGR },
151 .d_open = diskopen,
152 .d_close = diskclose,
153 .d_read = physread,
154 .d_write = physwrite,
155 .d_ioctl = diskioctl,
156 .d_strategy = diskstrategy,
157 .d_dump = diskdump,
158 .d_psize = diskpsize,
161 static struct objcache *disk_msg_cache;
163 struct objcache_malloc_args disk_msg_malloc_args = {
164 sizeof(struct disk_msg), M_DISK };
166 static struct lwkt_port disk_dispose_port;
167 static struct lwkt_port disk_msg_port;
169 static int
170 disk_debug(int level, char *fmt, ...)
172 __va_list ap;
174 __va_start(ap, fmt);
175 if (level <= disk_debug_enable)
176 kvprintf(fmt, ap);
177 __va_end(ap);
179 return 0;
182 static int
183 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
185 struct disk_info *info = &dp->d_info;
186 struct diskslice *sp = &dp->d_slice->dss_slices[slice];
187 disklabel_ops_t ops;
188 struct dev_ops *dops;
189 struct partinfo part;
190 const char *msg;
191 char uuid_buf[128];
192 cdev_t ndev;
193 int sno;
194 u_int i;
196 disk_debug(2, "disk_probe_slice (begin): %s (%s)\n",
197 dev->si_name, dp->d_cdev->si_name);
199 sno = slice ? slice - 1 : 0;
200 dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
201 &disk2_ops : &disk1_ops;
203 ops = &disklabel32_ops;
204 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
205 if (msg && !strcmp(msg, "no disk label")) {
206 ops = &disklabel64_ops;
207 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
210 if (msg == NULL) {
211 if (slice != WHOLE_DISK_SLICE)
212 ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
213 else
214 sp->ds_reserved = 0;
216 sp->ds_ops = ops;
217 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
218 ops->op_loadpartinfo(sp->ds_label, i, &part);
219 if (part.fstype) {
220 if (reprobe &&
221 (ndev = devfs_find_device_by_name("%s%c",
222 dev->si_name, 'a' + i))
225 * Device already exists and
226 * is still valid.
228 ndev->si_flags |= SI_REPROBE_TEST;
231 * Destroy old UUID alias
233 destroy_dev_alias(ndev, "part-by-uuid/*");
235 /* Create UUID alias */
236 if (!kuuid_is_nil(&part.storage_uuid)) {
237 snprintf_uuid(uuid_buf,
238 sizeof(uuid_buf),
239 &part.storage_uuid);
240 make_dev_alias(ndev,
241 "part-by-uuid/%s",
242 uuid_buf);
243 udev_dict_set_cstr(ndev, "uuid", uuid_buf);
245 } else {
246 ndev = make_dev_covering(dops,
247 dp->d_rawdev->si_ops,
248 dkmakeminor(dkunit(dp->d_cdev),
249 slice, i),
250 UID_ROOT, GID_OPERATOR, 0640,
251 "%s%c", dev->si_name, 'a'+ i);
252 ndev->si_parent = dev;
253 ndev->si_iosize_max = dev->si_iosize_max;
254 ndev->si_disk = dp;
255 udev_dict_set_cstr(ndev, "subsystem", "disk");
256 /* Inherit parent's disk type */
257 if (dp->d_disktype) {
258 udev_dict_set_cstr(ndev, "disk-type",
259 __DECONST(char *, dp->d_disktype));
262 /* Create serno alias */
263 if (dp->d_info.d_serialno) {
264 make_dev_alias(ndev,
265 "serno/%s.s%d%c",
266 dp->d_info.d_serialno,
267 sno, 'a' + i);
270 /* Create UUID alias */
271 if (!kuuid_is_nil(&part.storage_uuid)) {
272 snprintf_uuid(uuid_buf,
273 sizeof(uuid_buf),
274 &part.storage_uuid);
275 make_dev_alias(ndev,
276 "part-by-uuid/%s",
277 uuid_buf);
278 udev_dict_set_cstr(ndev, "uuid", uuid_buf);
280 ndev->si_flags |= SI_REPROBE_TEST;
284 } else if (info->d_dsflags & DSO_COMPATLABEL) {
285 msg = NULL;
286 if (sp->ds_size >= 0x100000000ULL)
287 ops = &disklabel64_ops;
288 else
289 ops = &disklabel32_ops;
290 sp->ds_label = ops->op_clone_label(info, sp);
291 } else {
292 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
293 sp->ds_type == DOSPTYP_NETBSD ||
294 sp->ds_type == DOSPTYP_OPENBSD ||
295 sp->ds_type == DOSPTYP_DFLYBSD) {
296 log(LOG_WARNING, "%s: cannot find label (%s)\n",
297 dev->si_name, msg);
300 if (sp->ds_label.opaque != NULL && sp->ds_ops != NULL) {
301 /* Clear out old label - it's not around anymore */
302 disk_debug(2,
303 "disk_probe_slice: clear out old diskabel on %s\n",
304 dev->si_name);
306 sp->ds_ops->op_freedisklabel(&sp->ds_label);
307 sp->ds_ops = NULL;
311 if (msg == NULL) {
312 sp->ds_wlabel = FALSE;
315 return (msg ? EINVAL : 0);
319 * This routine is only called for newly minted drives or to reprobe
320 * a drive with no open slices. disk_probe_slice() is called directly
321 * when reprobing partition changes within slices.
323 static void
324 disk_probe(struct disk *dp, int reprobe)
326 struct disk_info *info = &dp->d_info;
327 cdev_t dev = dp->d_cdev;
328 cdev_t ndev;
329 int error, i, sno;
330 struct diskslices *osp;
331 struct diskslice *sp;
332 struct dev_ops *dops;
333 char uuid_buf[128];
336 * d_media_blksize can be 0 for non-disk storage devices such
337 * as audio CDs.
339 if (info->d_media_blksize == 0)
340 return;
342 osp = dp->d_slice;
343 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
344 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
346 error = mbrinit(dev, info, &(dp->d_slice));
347 if (error) {
348 dsgone(&osp);
349 return;
352 dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
353 &disk2_ops : &disk1_ops;
355 for (i = 0; i < dp->d_slice->dss_nslices; i++) {
357 * Ignore the whole-disk slice, it has already been created.
359 if (i == WHOLE_DISK_SLICE)
360 continue;
362 #if 1
364 * Ignore the compatibility slice s0 if it's a device mapper
365 * volume.
367 if ((i == COMPATIBILITY_SLICE) &&
368 (info->d_dsflags & DSO_DEVICEMAPPER))
369 continue;
370 #endif
372 sp = &dp->d_slice->dss_slices[i];
375 * Handle s0. s0 is a compatibility slice if there are no
376 * other slices and it has not otherwise been set up, else
377 * we ignore it.
379 if (i == COMPATIBILITY_SLICE) {
380 sno = 0;
381 if (sp->ds_type == 0 &&
382 dp->d_slice->dss_nslices == BASE_SLICE) {
383 sp->ds_size = info->d_media_blocks;
384 sp->ds_reserved = 0;
386 } else {
387 sno = i - 1;
388 sp->ds_reserved = 0;
392 * Ignore 0-length slices
394 if (sp->ds_size == 0)
395 continue;
397 if (reprobe &&
398 (ndev = devfs_find_device_by_name("%ss%d",
399 dev->si_name, sno))) {
401 * Device already exists and is still valid
403 ndev->si_flags |= SI_REPROBE_TEST;
406 * Destroy old UUID alias
408 destroy_dev_alias(ndev, "slice-by-uuid/*");
410 /* Create UUID alias */
411 if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
412 snprintf_uuid(uuid_buf, sizeof(uuid_buf),
413 &sp->ds_stor_uuid);
414 make_dev_alias(ndev, "slice-by-uuid/%s",
415 uuid_buf);
417 } else {
419 * Else create new device
421 ndev = make_dev_covering(dops, dp->d_rawdev->si_ops,
422 dkmakewholeslice(dkunit(dev), i),
423 UID_ROOT, GID_OPERATOR, 0640,
424 (info->d_dsflags & DSO_DEVICEMAPPER)?
425 "%s.s%d" : "%ss%d", dev->si_name, sno);
426 ndev->si_parent = dev;
427 ndev->si_iosize_max = dev->si_iosize_max;
428 udev_dict_set_cstr(ndev, "subsystem", "disk");
429 /* Inherit parent's disk type */
430 if (dp->d_disktype) {
431 udev_dict_set_cstr(ndev, "disk-type",
432 __DECONST(char *, dp->d_disktype));
435 /* Create serno alias */
436 if (dp->d_info.d_serialno) {
437 make_dev_alias(ndev, "serno/%s.s%d",
438 dp->d_info.d_serialno, sno);
441 /* Create UUID alias */
442 if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
443 snprintf_uuid(uuid_buf, sizeof(uuid_buf),
444 &sp->ds_stor_uuid);
445 make_dev_alias(ndev, "slice-by-uuid/%s",
446 uuid_buf);
449 ndev->si_disk = dp;
450 ndev->si_flags |= SI_REPROBE_TEST;
452 sp->ds_dev = ndev;
455 * Probe appropriate slices for a disklabel
457 * XXX slice type 1 used by our gpt probe code.
458 * XXX slice type 0 used by mbr compat slice.
460 if (sp->ds_type == DOSPTYP_386BSD ||
461 sp->ds_type == DOSPTYP_NETBSD ||
462 sp->ds_type == DOSPTYP_OPENBSD ||
463 sp->ds_type == DOSPTYP_DFLYBSD ||
464 sp->ds_type == 0 ||
465 sp->ds_type == 1) {
466 if (dp->d_slice->dss_first_bsd_slice == 0)
467 dp->d_slice->dss_first_bsd_slice = i;
468 disk_probe_slice(dp, ndev, i, reprobe);
471 dsgone(&osp);
472 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
476 static void
477 disk_msg_core(void *arg)
479 struct disk *dp;
480 struct diskslice *sp;
481 disk_msg_t msg;
482 int run;
484 lwkt_gettoken(&disklist_token);
485 lwkt_initport_thread(&disk_msg_port, curthread);
486 wakeup(curthread); /* synchronous startup */
487 lwkt_reltoken(&disklist_token);
489 lwkt_gettoken(&ds_token);
490 run = 1;
492 while (run) {
493 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
495 switch (msg->hdr.u.ms_result) {
496 case DISK_DISK_PROBE:
497 dp = (struct disk *)msg->load;
498 disk_debug(1,
499 "DISK_DISK_PROBE: %s\n",
500 dp->d_cdev->si_name);
501 disk_iocom_update(dp);
502 disk_probe(dp, 0);
503 break;
504 case DISK_DISK_DESTROY:
505 dp = (struct disk *)msg->load;
506 disk_debug(1,
507 "DISK_DISK_DESTROY: %s\n",
508 dp->d_cdev->si_name);
509 disk_iocom_uninit(dp);
512 * Interlock against struct disk enumerations.
513 * Wait for enumerations to complete then remove
514 * the dp from the list before tearing it down.
515 * This avoids numerous races.
517 lwkt_gettoken(&disklist_token);
518 while (dp->d_refs)
519 tsleep(&dp->d_refs, 0, "diskdel", hz / 10);
520 LIST_REMOVE(dp, d_list);
522 dsched_disk_destroy(dp);
523 devfs_destroy_related(dp->d_cdev);
524 destroy_dev(dp->d_cdev);
525 destroy_only_dev(dp->d_rawdev);
527 lwkt_reltoken(&disklist_token);
529 if (dp->d_info.d_serialno) {
530 kfree(dp->d_info.d_serialno, M_TEMP);
531 dp->d_info.d_serialno = NULL;
533 break;
534 case DISK_UNPROBE:
535 dp = (struct disk *)msg->load;
536 disk_debug(1,
537 "DISK_DISK_UNPROBE: %s\n",
538 dp->d_cdev->si_name);
539 devfs_destroy_related(dp->d_cdev);
540 break;
541 case DISK_SLICE_REPROBE:
542 dp = (struct disk *)msg->load;
543 sp = (struct diskslice *)msg->load2;
544 devfs_clr_related_flag(sp->ds_dev,
545 SI_REPROBE_TEST);
546 disk_debug(1,
547 "DISK_SLICE_REPROBE: %s\n",
548 sp->ds_dev->si_name);
549 disk_probe_slice(dp, sp->ds_dev,
550 dkslice(sp->ds_dev), 1);
551 devfs_destroy_related_without_flag(
552 sp->ds_dev, SI_REPROBE_TEST);
553 break;
554 case DISK_DISK_REPROBE:
555 dp = (struct disk *)msg->load;
556 devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST);
557 disk_debug(1,
558 "DISK_DISK_REPROBE: %s\n",
559 dp->d_cdev->si_name);
560 disk_probe(dp, 1);
561 devfs_destroy_related_without_flag(
562 dp->d_cdev, SI_REPROBE_TEST);
563 break;
564 case DISK_SYNC:
565 disk_debug(1, "DISK_SYNC\n");
566 break;
567 default:
568 devfs_debug(DEVFS_DEBUG_WARNING,
569 "disk_msg_core: unknown message "
570 "received at core\n");
571 break;
573 lwkt_replymsg(&msg->hdr, 0);
575 lwkt_reltoken(&ds_token);
576 lwkt_exit();
581 * Acts as a message drain. Any message that is replied to here gets
582 * destroyed and the memory freed.
584 static void
585 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
587 objcache_put(disk_msg_cache, msg);
591 void
592 disk_msg_send(uint32_t cmd, void *load, void *load2)
594 disk_msg_t disk_msg;
595 lwkt_port_t port = &disk_msg_port;
597 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
599 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
601 disk_msg->hdr.u.ms_result = cmd;
602 disk_msg->load = load;
603 disk_msg->load2 = load2;
604 KKASSERT(port);
605 lwkt_sendmsg(port, &disk_msg->hdr);
608 void
609 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
611 struct lwkt_port rep_port;
612 disk_msg_t disk_msg;
613 lwkt_port_t port;
615 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
616 port = &disk_msg_port;
618 /* XXX could probably use curthread's built-in msgport */
619 lwkt_initport_thread(&rep_port, curthread);
620 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
622 disk_msg->hdr.u.ms_result = cmd;
623 disk_msg->load = load;
624 disk_msg->load2 = load2;
626 lwkt_domsg(port, &disk_msg->hdr, 0);
627 objcache_put(disk_msg_cache, disk_msg);
631 * Create a raw device for the dev_ops template (which is returned). Also
632 * create a slice and unit managed disk and overload the user visible
633 * device space with it.
635 * NOTE: The returned raw device is NOT a slice and unit managed device.
636 * It is an actual raw device representing the raw disk as specified by
637 * the passed dev_ops. The disk layer not only returns such a raw device,
638 * it also uses it internally when passing (modified) commands through.
640 cdev_t
641 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
643 return _disk_create_named(NULL, unit, dp, raw_ops, 0);
646 cdev_t
647 disk_create_clone(int unit, struct disk *dp,
648 struct dev_ops *raw_ops)
650 return _disk_create_named(NULL, unit, dp, raw_ops, 1);
653 cdev_t
654 disk_create_named(const char *name, int unit, struct disk *dp,
655 struct dev_ops *raw_ops)
657 return _disk_create_named(name, unit, dp, raw_ops, 0);
660 cdev_t
661 disk_create_named_clone(const char *name, int unit, struct disk *dp,
662 struct dev_ops *raw_ops)
664 return _disk_create_named(name, unit, dp, raw_ops, 1);
667 static cdev_t
668 _disk_create_named(const char *name, int unit, struct disk *dp,
669 struct dev_ops *raw_ops, int clone)
671 cdev_t rawdev;
672 struct dev_ops *dops;
674 disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
676 if (name) {
677 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
678 UID_ROOT, GID_OPERATOR, 0640, "%s", name);
679 } else {
680 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
681 UID_ROOT, GID_OPERATOR, 0640,
682 "%s%d", raw_ops->head.name, unit);
685 bzero(dp, sizeof(*dp));
687 dops = (raw_ops->head.flags & D_NOEMERGPGR) ? &disk2_ops : &disk1_ops;
689 dp->d_rawdev = rawdev;
690 dp->d_raw_ops = raw_ops;
691 dp->d_dev_ops = dops;
693 if (name) {
694 if (clone) {
695 dp->d_cdev = make_only_dev_covering(
696 dops, dp->d_rawdev->si_ops,
697 dkmakewholedisk(unit),
698 UID_ROOT, GID_OPERATOR, 0640,
699 "%s", name);
700 } else {
701 dp->d_cdev = make_dev_covering(
702 dops, dp->d_rawdev->si_ops,
703 dkmakewholedisk(unit),
704 UID_ROOT, GID_OPERATOR, 0640,
705 "%s", name);
707 } else {
708 if (clone) {
709 dp->d_cdev = make_only_dev_covering(
710 dops, dp->d_rawdev->si_ops,
711 dkmakewholedisk(unit),
712 UID_ROOT, GID_OPERATOR, 0640,
713 "%s%d", raw_ops->head.name, unit);
714 } else {
715 dp->d_cdev = make_dev_covering(
716 dops, dp->d_rawdev->si_ops,
717 dkmakewholedisk(unit),
718 UID_ROOT, GID_OPERATOR, 0640,
719 "%s%d", raw_ops->head.name, unit);
723 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
724 dp->d_cdev->si_disk = dp;
726 if (name)
727 dsched_disk_create(dp, name, unit);
728 else
729 dsched_disk_create(dp, raw_ops->head.name, unit);
731 lwkt_gettoken(&disklist_token);
732 LIST_INSERT_HEAD(&disklist, dp, d_list);
733 lwkt_reltoken(&disklist_token);
735 disk_iocom_init(dp);
737 disk_debug(1, "disk_create (end): %s%d\n",
738 (name != NULL)?(name):(raw_ops->head.name), unit);
740 return (dp->d_rawdev);
744 disk_setdisktype(struct disk *disk, const char *type)
746 int error;
748 KKASSERT(disk != NULL);
750 disk->d_disktype = type;
751 error = udev_dict_set_cstr(disk->d_cdev, "disk-type",
752 __DECONST(char *, type));
753 return error;
757 disk_getopencount(struct disk *disk)
759 return disk->d_opencount;
762 static void
763 _setdiskinfo(struct disk *disk, struct disk_info *info)
765 char *oldserialno;
767 oldserialno = disk->d_info.d_serialno;
768 bcopy(info, &disk->d_info, sizeof(disk->d_info));
769 info = &disk->d_info;
771 disk_debug(1, "_setdiskinfo: %s\n", disk->d_cdev->si_name);
774 * The serial number is duplicated so the caller can throw
775 * their copy away.
777 if (info->d_serialno && info->d_serialno[0] &&
778 (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) {
779 info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
780 disk_cleanserial(info->d_serialno);
781 if (disk->d_cdev) {
782 make_dev_alias(disk->d_cdev, "serno/%s",
783 info->d_serialno);
785 } else {
786 info->d_serialno = NULL;
788 if (oldserialno)
789 kfree(oldserialno, M_TEMP);
791 dsched_disk_update(disk, info);
794 * The caller may set d_media_size or d_media_blocks and we
795 * calculate the other.
797 KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
798 if (info->d_media_size == 0 && info->d_media_blocks) {
799 info->d_media_size = (u_int64_t)info->d_media_blocks *
800 info->d_media_blksize;
801 } else if (info->d_media_size && info->d_media_blocks == 0 &&
802 info->d_media_blksize) {
803 info->d_media_blocks = info->d_media_size /
804 info->d_media_blksize;
808 * The si_* fields for rawdev are not set until after the
809 * disk_create() call, so someone using the cooked version
810 * of the raw device (i.e. da0s0) will not get the right
811 * si_iosize_max unless we fix it up here.
813 if (disk->d_cdev && disk->d_rawdev &&
814 disk->d_cdev->si_iosize_max == 0) {
815 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
816 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
817 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
820 /* Add the serial number to the udev_dictionary */
821 if (info->d_serialno)
822 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
826 * Disk drivers must call this routine when media parameters are available
827 * or have changed.
829 void
830 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
832 _setdiskinfo(disk, info);
833 disk_msg_send(DISK_DISK_PROBE, disk, NULL);
834 disk_debug(1, "disk_setdiskinfo: sent probe for %s\n",
835 disk->d_cdev->si_name);
838 void
839 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
841 _setdiskinfo(disk, info);
842 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
843 disk_debug(1, "disk_setdiskinfo_sync: sent probe for %s\n",
844 disk->d_cdev->si_name);
848 * This routine is called when an adapter detaches. The higher level
849 * managed disk device is destroyed while the lower level raw device is
850 * released.
852 void
853 disk_destroy(struct disk *disk)
855 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
856 return;
860 disk_dumpcheck(cdev_t dev, u_int64_t *size,
861 u_int64_t *blkno, u_int32_t *secsize)
863 struct partinfo pinfo;
864 int error;
866 if (size)
867 *size = 0; /* avoid gcc warnings */
868 if (secsize)
869 *secsize = 512; /* avoid gcc warnings */
870 bzero(&pinfo, sizeof(pinfo));
872 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
873 proc0.p_ucred, NULL, NULL);
874 if (error)
875 return (error);
877 if (pinfo.media_blksize == 0)
878 return (ENXIO);
880 if (blkno) /* XXX: make sure this reserved stuff is right */
881 *blkno = pinfo.reserved_blocks +
882 pinfo.media_offset / pinfo.media_blksize;
883 if (secsize)
884 *secsize = pinfo.media_blksize;
885 if (size)
886 *size = (pinfo.media_blocks - pinfo.reserved_blocks);
888 return (0);
892 disk_dumpconf(cdev_t dev, u_int onoff)
894 struct dumperinfo di;
895 u_int64_t size, blkno;
896 u_int32_t secsize;
897 int error;
899 if (!onoff)
900 return set_dumper(NULL);
902 error = disk_dumpcheck(dev, &size, &blkno, &secsize);
904 if (error)
905 return ENXIO;
907 bzero(&di, sizeof(struct dumperinfo));
908 di.dumper = diskdump;
909 di.priv = dev;
910 di.blocksize = secsize;
911 di.maxiosize = dev->si_iosize_max;
912 di.mediaoffset = blkno * DEV_BSIZE;
913 di.mediasize = size * DEV_BSIZE;
915 return set_dumper(&di);
918 void
919 disk_unprobe(struct disk *disk)
921 if (disk == NULL)
922 return;
924 disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
927 void
928 disk_invalidate (struct disk *disk)
930 dsgone(&disk->d_slice);
934 * Enumerate disks, pass a marker and an initial NULL dp to initialize,
935 * then loop with the previously returned dp.
937 * The returned dp will be referenced, preventing its destruction. When
938 * you pass the returned dp back into the loop the ref is dropped.
940 * WARNING: If terminating your loop early you must call
941 * disk_enumerate_stop().
943 struct disk *
944 disk_enumerate(struct disk *marker, struct disk *dp)
946 lwkt_gettoken(&disklist_token);
947 if (dp) {
948 --dp->d_refs;
949 dp = LIST_NEXT(marker, d_list);
950 LIST_REMOVE(marker, d_list);
951 } else {
952 bzero(marker, sizeof(*marker));
953 marker->d_flags = DISKFLAG_MARKER;
954 dp = LIST_FIRST(&disklist);
956 while (dp) {
957 if ((dp->d_flags & DISKFLAG_MARKER) == 0)
958 break;
959 dp = LIST_NEXT(dp, d_list);
961 if (dp) {
962 ++dp->d_refs;
963 LIST_INSERT_AFTER(dp, marker, d_list);
965 lwkt_reltoken(&disklist_token);
966 return (dp);
970 * Terminate an enumeration early. Do not call this function if the
971 * enumeration ended normally. dp can be NULL, indicating that you
972 * wish to retain the ref count on dp.
974 * This function removes the marker.
976 void
977 disk_enumerate_stop(struct disk *marker, struct disk *dp)
979 lwkt_gettoken(&disklist_token);
980 LIST_REMOVE(marker, d_list);
981 if (dp)
982 --dp->d_refs;
983 lwkt_reltoken(&disklist_token);
986 static
988 sysctl_disks(SYSCTL_HANDLER_ARGS)
990 struct disk marker;
991 struct disk *dp;
992 int error, first;
994 first = 1;
995 error = 0;
996 dp = NULL;
998 while ((dp = disk_enumerate(&marker, dp))) {
999 if (!first) {
1000 error = SYSCTL_OUT(req, " ", 1);
1001 if (error) {
1002 disk_enumerate_stop(&marker, dp);
1003 break;
1005 } else {
1006 first = 0;
1008 error = SYSCTL_OUT(req, dp->d_rawdev->si_name,
1009 strlen(dp->d_rawdev->si_name));
1010 if (error) {
1011 disk_enumerate_stop(&marker, dp);
1012 break;
1015 if (error == 0)
1016 error = SYSCTL_OUT(req, "", 1);
1017 return error;
1020 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
1021 sysctl_disks, "A", "names of available disks");
1024 * Open a disk device or partition.
1026 static
1028 diskopen(struct dev_open_args *ap)
1030 cdev_t dev = ap->a_head.a_dev;
1031 struct disk *dp;
1032 int error;
1035 * dp can't be NULL here XXX.
1037 * d_slice will be NULL if setdiskinfo() has not been called yet.
1038 * setdiskinfo() is typically called whether the disk is present
1039 * or not (e.g. CD), but the base disk device is created first
1040 * and there may be a race.
1042 dp = dev->si_disk;
1043 if (dp == NULL || dp->d_slice == NULL)
1044 return (ENXIO);
1045 error = 0;
1048 * Deal with open races
1050 lwkt_gettoken(&ds_token);
1051 while (dp->d_flags & DISKFLAG_LOCK) {
1052 dp->d_flags |= DISKFLAG_WANTED;
1053 error = tsleep(dp, PCATCH, "diskopen", hz);
1054 if (error) {
1055 lwkt_reltoken(&ds_token);
1056 return (error);
1059 dp->d_flags |= DISKFLAG_LOCK;
1062 * Open the underlying raw device.
1064 if (!dsisopen(dp->d_slice)) {
1065 #if 0
1066 if (!pdev->si_iosize_max)
1067 pdev->si_iosize_max = dev->si_iosize_max;
1068 #endif
1069 error = dev_dopen(dp->d_rawdev, ap->a_oflags,
1070 ap->a_devtype, ap->a_cred, NULL);
1073 if (error)
1074 goto out;
1075 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
1076 &dp->d_slice, &dp->d_info);
1077 if (!dsisopen(dp->d_slice)) {
1078 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype, NULL);
1080 out:
1081 dp->d_flags &= ~DISKFLAG_LOCK;
1082 if (dp->d_flags & DISKFLAG_WANTED) {
1083 dp->d_flags &= ~DISKFLAG_WANTED;
1084 wakeup(dp);
1086 lwkt_reltoken(&ds_token);
1088 KKASSERT(dp->d_opencount >= 0);
1089 /* If the open was successful, bump open count */
1090 if (error == 0)
1091 atomic_add_int(&dp->d_opencount, 1);
1093 return(error);
1097 * Close a disk device or partition
1099 static
1101 diskclose(struct dev_close_args *ap)
1103 cdev_t dev = ap->a_head.a_dev;
1104 struct disk *dp;
1105 int error;
1106 int lcount;
1108 error = 0;
1109 dp = dev->si_disk;
1112 * The cdev_t represents the disk/slice/part. The shared
1113 * dp structure governs all cdevs associated with the disk.
1115 * As a safety only close the underlying raw device on the last
1116 * close the disk device if our tracking of the slices/partitions
1117 * also indicates nothing is open.
1119 KKASSERT(dp->d_opencount >= 1);
1120 lcount = atomic_fetchadd_int(&dp->d_opencount, -1);
1122 lwkt_gettoken(&ds_token);
1123 dsclose(dev, ap->a_devtype, dp->d_slice);
1124 if (lcount <= 1 && !dsisopen(dp->d_slice)) {
1125 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype, NULL);
1127 lwkt_reltoken(&ds_token);
1129 return (error);
1133 * First execute the ioctl on the disk device, and if it isn't supported
1134 * try running it on the backing device.
1136 static
1138 diskioctl(struct dev_ioctl_args *ap)
1140 cdev_t dev = ap->a_head.a_dev;
1141 struct disk *dp;
1142 int error;
1143 u_int u;
1145 dp = dev->si_disk;
1146 if (dp == NULL)
1147 return (ENXIO);
1149 devfs_debug(DEVFS_DEBUG_DEBUG,
1150 "diskioctl: cmd is: %lx (name: %s)\n",
1151 ap->a_cmd, dev->si_name);
1152 devfs_debug(DEVFS_DEBUG_DEBUG,
1153 "diskioctl: &dp->d_slice is: %p, %p\n",
1154 &dp->d_slice, dp->d_slice);
1156 if (ap->a_cmd == DIOCGKERNELDUMP) {
1157 u = *(u_int *)ap->a_data;
1158 return disk_dumpconf(dev, u);
1161 if (ap->a_cmd == DIOCRECLUSTER && dev == dp->d_cdev) {
1162 error = disk_iocom_ioctl(dp, ap->a_cmd, ap->a_data);
1163 return error;
1166 if (&dp->d_slice == NULL || dp->d_slice == NULL ||
1167 ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) &&
1168 dkslice(dev) == WHOLE_DISK_SLICE)) {
1169 error = ENOIOCTL;
1170 } else {
1171 lwkt_gettoken(&ds_token);
1172 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
1173 &dp->d_slice, &dp->d_info);
1174 lwkt_reltoken(&ds_token);
1177 if (error == ENOIOCTL) {
1178 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
1179 ap->a_fflag, ap->a_cred, NULL, NULL);
1181 return (error);
1185 * Execute strategy routine
1187 * WARNING! We are using the KVABIO API and must not access memory
1188 * through bp->b_data without first calling bkvasync(bp).
1190 static
1192 diskstrategy(struct dev_strategy_args *ap)
1194 cdev_t dev = ap->a_head.a_dev;
1195 struct bio *bio = ap->a_bio;
1196 struct bio *nbio;
1197 struct disk *dp;
1199 dp = dev->si_disk;
1201 if (dp == NULL) {
1202 bio->bio_buf->b_error = ENXIO;
1203 bio->bio_buf->b_flags |= B_ERROR;
1204 biodone(bio);
1205 return(0);
1207 KKASSERT(dev->si_disk == dp);
1210 * The dscheck() function will also transform the slice relative
1211 * block number i.e. bio->bio_offset into a block number that can be
1212 * passed directly to the underlying raw device. If dscheck()
1213 * returns NULL it will have handled the bio for us (e.g. EOF
1214 * or error due to being beyond the device size).
1216 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
1217 dev_dstrategy(dp->d_rawdev, nbio);
1218 } else {
1219 biodone(bio);
1221 return(0);
1225 * Return the partition size in ?blocks?
1227 static
1229 diskpsize(struct dev_psize_args *ap)
1231 cdev_t dev = ap->a_head.a_dev;
1232 struct disk *dp;
1234 dp = dev->si_disk;
1235 if (dp == NULL)
1236 return(ENODEV);
1238 ap->a_result = dssize(dev, &dp->d_slice);
1240 if ((ap->a_result == -1) &&
1241 (dp->d_info.d_dsflags & DSO_RAWPSIZE)) {
1242 ap->a_head.a_dev = dp->d_rawdev;
1243 return dev_doperate(&ap->a_head);
1245 return(0);
1248 static int
1249 diskdump(struct dev_dump_args *ap)
1251 cdev_t dev = ap->a_head.a_dev;
1252 struct disk *dp = dev->si_disk;
1253 u_int64_t size, offset;
1254 int error;
1256 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1257 /* XXX: this should probably go in disk_dumpcheck somehow */
1258 if (ap->a_length != 0) {
1259 size *= DEV_BSIZE;
1260 offset = ap->a_blkno * DEV_BSIZE;
1261 if ((ap->a_offset < offset) ||
1262 (ap->a_offset + ap->a_length - offset > size)) {
1263 kprintf("Attempt to write outside dump "
1264 "device boundaries.\n");
1265 error = ENOSPC;
1269 if (error == 0) {
1270 ap->a_head.a_dev = dp->d_rawdev;
1271 error = dev_doperate(&ap->a_head);
1274 return(error);
1278 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1279 0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1281 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1282 0, sizeof(struct disk), "sizeof(struct disk)");
1285 * Reorder interval for burst write allowance and minor write
1286 * allowance.
1288 * We always want to trickle some writes in to make use of the
1289 * disk's zone cache. Bursting occurs on a longer interval and only
1290 * runningbufspace is well over the hirunningspace limit.
1292 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */
1293 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1294 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1295 int bioq_reorder_minor_interval = 5;
1296 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1297 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1299 int bioq_reorder_burst_bytes = 3000000;
1300 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1301 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1302 int bioq_reorder_minor_bytes = 262144;
1303 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1304 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1308 * Order I/Os. Generally speaking this code is designed to make better
1309 * use of drive zone caches. A drive zone cache can typically track linear
1310 * reads or writes for around 16 zones simultaniously.
1312 * Read prioritization issues: It is possible for hundreds of megabytes worth
1313 * of writes to be queued asynchronously. This creates a huge bottleneck
1314 * for reads which reduce read bandwidth to a trickle.
1316 * To solve this problem we generally reorder reads before writes.
1318 * However, a large number of random reads can also starve writes and
1319 * make poor use of the drive zone cache so we allow writes to trickle
1320 * in every N reads.
1322 void
1323 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1325 #if 0
1327 * The BIO wants to be ordered. Adding to the tail also
1328 * causes transition to be set to NULL, forcing the ordering
1329 * of all prior I/O's.
1331 if (bio->bio_buf->b_flags & B_ORDERED) {
1332 bioq_insert_tail(bioq, bio);
1333 return;
1335 #endif
1337 switch(bio->bio_buf->b_cmd) {
1338 case BUF_CMD_READ:
1339 if (bioq->transition) {
1341 * Insert before the first write. Bleedover writes
1342 * based on reorder intervals to prevent starvation.
1344 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1345 ++bioq->reorder;
1346 if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1347 bioqwritereorder(bioq);
1348 if (bioq->reorder >=
1349 bioq_reorder_burst_interval) {
1350 bioq->reorder = 0;
1353 } else {
1355 * No writes queued (or ordering was forced),
1356 * insert at tail.
1358 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1360 break;
1361 case BUF_CMD_WRITE:
1363 * Writes are always appended. If no writes were previously
1364 * queued or an ordered tail insertion occured the transition
1365 * field will be NULL.
1367 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1368 if (bioq->transition == NULL)
1369 bioq->transition = bio;
1370 break;
1371 default:
1373 * All other request types are forced to be ordered.
1375 bioq_insert_tail(bioq, bio);
1376 break;
1381 * Move the read-write transition point to prevent reads from
1382 * completely starving our writes. This brings a number of writes into
1383 * the fold every N reads.
1385 * We bring a few linear writes into the fold on a minor interval
1386 * and we bring a non-linear burst of writes into the fold on a major
1387 * interval. Bursting only occurs if runningbufspace is really high
1388 * (typically from syncs, fsyncs, or HAMMER flushes).
1390 static
1391 void
1392 bioqwritereorder(struct bio_queue_head *bioq)
1394 struct bio *bio;
1395 off_t next_offset;
1396 size_t left;
1397 size_t n;
1398 int check_off;
1400 if (bioq->reorder < bioq_reorder_burst_interval ||
1401 !buf_runningbufspace_severe()) {
1402 left = (size_t)bioq_reorder_minor_bytes;
1403 check_off = 1;
1404 } else {
1405 left = (size_t)bioq_reorder_burst_bytes;
1406 check_off = 0;
1409 next_offset = bioq->transition->bio_offset;
1410 while ((bio = bioq->transition) != NULL &&
1411 (check_off == 0 || next_offset == bio->bio_offset)
1413 n = bio->bio_buf->b_bcount;
1414 next_offset = bio->bio_offset + n;
1415 bioq->transition = TAILQ_NEXT(bio, bio_act);
1416 if (left < n)
1417 break;
1418 left -= n;
1423 * Bounds checking against the media size, used for the raw partition.
1424 * secsize, mediasize and b_blkno must all be the same units.
1425 * Possibly this has to be DEV_BSIZE (512).
1428 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1430 struct buf *bp = bio->bio_buf;
1431 int64_t sz;
1433 sz = howmany(bp->b_bcount, secsize);
1435 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1436 sz = mediasize - bio->bio_offset/DEV_BSIZE;
1437 if (sz == 0) {
1438 /* If exactly at end of disk, return EOF. */
1439 bp->b_resid = bp->b_bcount;
1440 return 0;
1442 if (sz < 0) {
1443 /* If past end of disk, return EINVAL. */
1444 bp->b_error = EINVAL;
1445 return 0;
1447 /* Otherwise, truncate request. */
1448 bp->b_bcount = sz * secsize;
1451 return 1;
1455 * Disk error is the preface to plaintive error messages
1456 * about failing disk transfers. It prints messages of the form
1458 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1460 * if the offset of the error in the transfer and a disk label
1461 * are both available. blkdone should be -1 if the position of the error
1462 * is unknown; the disklabel pointer may be null from drivers that have not
1463 * been converted to use them. The message is printed with kprintf
1464 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1465 * The message should be completed (with at least a newline) with kprintf
1466 * or log(-1, ...), respectively. There is no trailing space.
1468 void
1469 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1471 struct buf *bp = bio->bio_buf;
1472 const char *term;
1474 switch(bp->b_cmd) {
1475 case BUF_CMD_READ:
1476 term = "read";
1477 break;
1478 case BUF_CMD_WRITE:
1479 term = "write";
1480 break;
1481 default:
1482 term = "access";
1483 break;
1485 kprintf("%s: %s %sing ", dev->si_name, what, term);
1486 kprintf("offset %012llx for %d",
1487 (long long)bio->bio_offset,
1488 bp->b_bcount);
1490 if (donecnt)
1491 kprintf(" (%d bytes completed)", donecnt);
1495 * Locate a disk device
1497 cdev_t
1498 disk_locate(const char *devname)
1500 return devfs_find_device_by_name("%s", devname);
1503 void
1504 disk_config(void *arg)
1506 disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1509 static void
1510 disk_init(void)
1512 struct thread* td_core;
1514 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1515 NULL, NULL, NULL,
1516 objcache_malloc_alloc,
1517 objcache_malloc_free,
1518 &disk_msg_malloc_args);
1520 lwkt_token_init(&disklist_token, "disks");
1521 lwkt_token_init(&ds_token, "ds");
1524 * Initialize the reply-only port which acts as a message drain
1526 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1528 lwkt_gettoken(&disklist_token);
1529 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1530 0, -1, "disk_msg_core");
1531 tsleep(td_core, 0, "diskcore", 0);
1532 lwkt_reltoken(&disklist_token);
1535 static void
1536 disk_uninit(void)
1538 objcache_destroy(disk_msg_cache);
1542 * Clean out illegal characters in serial numbers.
1544 static void
1545 disk_cleanserial(char *serno)
1547 char c;
1549 while ((c = *serno) != 0) {
1550 if (c >= 'a' && c <= 'z')
1552 else if (c >= 'A' && c <= 'Z')
1554 else if (c >= '0' && c <= '9')
1556 else if (c == '-' || c == '@' || c == '+' || c == '.')
1558 else
1559 c = '_';
1560 *serno++= c;
1564 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1565 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1566 0, "Enable subr_disk debugging");
1568 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1569 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);