2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
44 #include <sys/ctype.h>
48 #include <sys/mutex.h>
50 #include <sys/errno.h>
53 #include <sys/fcntl.h>
54 #include <sys/limits.h>
55 #include <sys/sysctl.h>
56 #include <geom/geom.h>
57 #include <geom/geom_int.h>
58 #include <machine/stdarg.h>
63 struct cdev
*sc_alias
;
68 static d_open_t g_dev_open
;
69 static d_close_t g_dev_close
;
70 static d_strategy_t g_dev_strategy
;
71 static d_ioctl_t g_dev_ioctl
;
73 static struct cdevsw g_dev_cdevsw
= {
74 .d_version
= D_VERSION
,
76 .d_close
= g_dev_close
,
79 .d_ioctl
= g_dev_ioctl
,
80 .d_strategy
= g_dev_strategy
,
82 .d_flags
= D_DISK
| D_TRACKCLOSE
,
85 static g_init_t g_dev_init
;
86 static g_fini_t g_dev_fini
;
87 static g_taste_t g_dev_taste
;
88 static g_orphan_t g_dev_orphan
;
89 static g_attrchanged_t g_dev_attrchanged
;
91 static struct g_class g_dev_class
= {
97 .orphan
= g_dev_orphan
,
98 .attrchanged
= g_dev_attrchanged
102 * We target 262144 (8 x 32768) sectors by default as this significantly
103 * increases the throughput on commonly used SSD's with a marginal
104 * increase in non-interruptible request latency.
106 static uint64_t g_dev_del_max_sectors
= 262144;
107 SYSCTL_DECL(_kern_geom
);
108 SYSCTL_NODE(_kern_geom
, OID_AUTO
, dev
, CTLFLAG_RW
, 0, "GEOM_DEV stuff");
109 SYSCTL_QUAD(_kern_geom_dev
, OID_AUTO
, delete_max_sectors
, CTLFLAG_RW
,
110 &g_dev_del_max_sectors
, 0, "Maximum number of sectors in a single "
111 "delete request sent to the provider. Larger requests are chunked "
112 "so they can be interrupted. (0 = disable chunking)");
114 static char *dumpdev
= NULL
;
116 g_dev_init(struct g_class
*mp
)
119 dumpdev
= kern_getenv("dumpdev");
123 g_dev_fini(struct g_class
*mp
)
131 g_dev_setdumpdev(struct cdev
*dev
, struct thread
*td
)
133 struct g_kerneldump kd
;
134 struct g_consumer
*cp
;
138 return (set_dumper(NULL
, NULL
, td
));
144 error
= g_io_getattr("GEOM::kerneldump", cp
, &len
, &kd
);
146 error
= set_dumper(&kd
.di
, devtoname(dev
), td
);
148 dev
->si_flags
|= SI_DUMPDEV
;
154 init_dumpdev(struct cdev
*dev
)
156 struct g_consumer
*cp
;
157 const char *devprefix
= "/dev/", *devname
;
164 len
= strlen(devprefix
);
165 devname
= devtoname(dev
);
166 if (strcmp(devname
, dumpdev
) != 0 &&
167 (strncmp(dumpdev
, devprefix
, len
) != 0 ||
168 strcmp(devname
, dumpdev
+ len
) != 0))
171 cp
= (struct g_consumer
*)dev
->si_drv2
;
172 error
= g_access(cp
, 1, 0, 0);
176 error
= g_dev_setdumpdev(dev
, curthread
);
182 (void)g_access(cp
, -1, 0, 0);
188 g_dev_destroy(void *arg
, int flags __unused
)
190 struct g_consumer
*cp
;
192 struct g_dev_softc
*sc
;
193 char buf
[SPECNAMELEN
+ 6];
199 g_trace(G_T_TOPOLOGY
, "g_dev_destroy(%p(%s))", cp
, gp
->name
);
200 snprintf(buf
, sizeof(buf
), "cdev=%s", gp
->name
);
201 devctl_notify_f("GEOM", "DEV", "DESTROY", buf
, M_WAITOK
);
202 if (cp
->acr
> 0 || cp
->acw
> 0 || cp
->ace
> 0)
203 g_access(cp
, -cp
->acr
, -cp
->acw
, -cp
->ace
);
205 g_destroy_consumer(cp
);
207 mtx_destroy(&sc
->sc_mtx
);
217 LIST_FOREACH(gp
, &g_dev_class
.geom
, geom
) {
218 printf("%s%s", p
, gp
->name
);
225 g_dev_set_physpath(struct g_consumer
*cp
)
227 struct g_dev_softc
*sc
;
229 int error
, physpath_len
;
231 if (g_access(cp
, 1, 0, 0) != 0)
235 physpath_len
= MAXPATHLEN
;
236 physpath
= g_malloc(physpath_len
, M_WAITOK
|M_ZERO
);
237 error
= g_io_getattr("GEOM::physpath", cp
, &physpath_len
, physpath
);
238 g_access(cp
, -1, 0, 0);
239 if (error
== 0 && strlen(physpath
) != 0) {
240 struct cdev
*dev
, *old_alias_dev
;
241 struct cdev
**alias_devp
;
244 old_alias_dev
= sc
->sc_alias
;
245 alias_devp
= (struct cdev
**)&sc
->sc_alias
;
246 make_dev_physpath_alias(MAKEDEV_WAITOK
, alias_devp
, dev
,
247 old_alias_dev
, physpath
);
248 } else if (sc
->sc_alias
) {
249 destroy_dev((struct cdev
*)sc
->sc_alias
);
256 g_dev_set_media(struct g_consumer
*cp
)
258 struct g_dev_softc
*sc
;
260 char buf
[SPECNAMELEN
+ 6];
264 snprintf(buf
, sizeof(buf
), "cdev=%s", dev
->si_name
);
265 devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf
, M_WAITOK
);
266 devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf
, M_WAITOK
);
269 snprintf(buf
, sizeof(buf
), "cdev=%s", dev
->si_name
);
270 devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf
, M_WAITOK
);
271 devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf
, M_WAITOK
);
276 g_dev_attrchanged(struct g_consumer
*cp
, const char *attr
)
279 if (strcmp(attr
, "GEOM::media") == 0) {
284 if (strcmp(attr
, "GEOM::physpath") == 0) {
285 g_dev_set_physpath(cp
);
291 g_dev_getprovider(struct cdev
*dev
)
293 struct g_consumer
*cp
;
298 if (dev
->si_devsw
!= &g_dev_cdevsw
)
301 return (cp
->provider
);
304 static struct g_geom
*
305 g_dev_taste(struct g_class
*mp
, struct g_provider
*pp
, int insist __unused
)
308 struct g_consumer
*cp
;
309 struct g_dev_softc
*sc
;
312 char buf
[SPECNAMELEN
+ 6];
314 g_trace(G_T_TOPOLOGY
, "dev_taste(%s,%s)", mp
->name
, pp
->name
);
316 gp
= g_new_geomf(mp
, "%s", pp
->name
);
317 sc
= g_malloc(sizeof(*sc
), M_WAITOK
| M_ZERO
);
318 mtx_init(&sc
->sc_mtx
, "g_dev", NULL
, MTX_DEF
);
319 cp
= g_new_consumer(gp
);
321 cp
->flags
|= G_CF_DIRECT_SEND
| G_CF_DIRECT_RECEIVE
;
322 error
= g_attach(cp
, pp
);
324 ("g_dev_taste(%s) failed to g_attach, err=%d", pp
->name
, error
));
325 error
= make_dev_p(MAKEDEV_CHECKNAME
| MAKEDEV_WAITOK
, &dev
,
326 &g_dev_cdevsw
, NULL
, UID_ROOT
, GID_OPERATOR
, 0640, "%s", gp
->name
);
328 printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
329 __func__
, gp
->name
, error
);
331 g_destroy_consumer(cp
);
333 mtx_destroy(&sc
->sc_mtx
);
337 dev
->si_flags
|= SI_UNMAPPED
;
340 dev
->si_iosize_max
= MAXPHYS
;
342 error
= init_dumpdev(dev
);
344 printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
345 __func__
, gp
->name
, error
);
347 g_dev_attrchanged(cp
, "GEOM::physpath");
348 snprintf(buf
, sizeof(buf
), "cdev=%s", gp
->name
);
349 devctl_notify_f("GEOM", "DEV", "CREATE", buf
, M_WAITOK
);
355 g_dev_open(struct cdev
*dev
, int flags
, int fmt
, struct thread
*td
)
357 struct g_consumer
*cp
;
358 struct g_dev_softc
*sc
;
363 return (ENXIO
); /* g_dev_taste() not done yet */
364 g_trace(G_T_ACCESS
, "g_dev_open(%s, %d, %d, %p)",
365 cp
->geom
->name
, flags
, fmt
, td
);
367 r
= flags
& FREAD
? 1 : 0;
368 w
= flags
& FWRITE
? 1 : 0;
370 e
= flags
& O_EXCL
? 1 : 0;
376 * This happens on attempt to open a device node with O_EXEC.
383 * When running in very secure mode, do not allow
384 * opens for writing of any disks.
386 error
= securelevel_ge(td
->td_ucred
, 2);
391 error
= g_access(cp
, r
, w
, e
);
395 mtx_lock(&sc
->sc_mtx
);
396 if (sc
->sc_open
== 0 && sc
->sc_active
!= 0)
397 wakeup(&sc
->sc_active
);
398 sc
->sc_open
+= r
+ w
+ e
;
399 mtx_unlock(&sc
->sc_mtx
);
405 g_dev_close(struct cdev
*dev
, int flags
, int fmt
, struct thread
*td
)
407 struct g_consumer
*cp
;
408 struct g_dev_softc
*sc
;
414 g_trace(G_T_ACCESS
, "g_dev_close(%s, %d, %d, %p)",
415 cp
->geom
->name
, flags
, fmt
, td
);
417 r
= flags
& FREAD
? -1 : 0;
418 w
= flags
& FWRITE
? -1 : 0;
420 e
= flags
& O_EXCL
? -1 : 0;
426 * The vgonel(9) - caused by eg. forced unmount of devfs - calls
427 * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags,
428 * which would result in zero deltas, which in turn would cause
429 * panic in g_access(9).
431 * Note that we cannot zero the counters (ie. do "r = cp->acr"
432 * etc) instead, because the consumer might be opened in another
439 mtx_lock(&sc
->sc_mtx
);
440 sc
->sc_open
+= r
+ w
+ e
;
441 while (sc
->sc_open
== 0 && sc
->sc_active
!= 0)
442 msleep(&sc
->sc_active
, &sc
->sc_mtx
, 0, "PRIBIO", 0);
443 mtx_unlock(&sc
->sc_mtx
);
445 error
= g_access(cp
, r
, w
, e
);
451 * XXX: Until we have unmessed the ioctl situation, there is a race against
452 * XXX: a concurrent orphanization. We cannot close it by holding topology
453 * XXX: since that would prevent us from doing our job, and stalling events
454 * XXX: will break (actually: stall) the BSD disklabel hacks.
457 g_dev_ioctl(struct cdev
*dev
, u_long cmd
, caddr_t data
, int fflag
, struct thread
*td
)
459 struct g_consumer
*cp
;
460 struct g_provider
*pp
;
461 off_t offset
, length
, chunk
;
468 KASSERT(cp
->acr
|| cp
->acw
,
469 ("Consumer with zero access count in g_dev_ioctl"));
471 i
= IOCPARM_LEN(cmd
);
473 case DIOCGSECTORSIZE
:
474 *(u_int
*)data
= cp
->provider
->sectorsize
;
475 if (*(u_int
*)data
== 0)
479 *(off_t
*)data
= cp
->provider
->mediasize
;
480 if (*(off_t
*)data
== 0)
484 error
= g_io_getattr("GEOM::fwsectors", cp
, &i
, data
);
485 if (error
== 0 && *(u_int
*)data
== 0)
489 error
= g_io_getattr("GEOM::fwheads", cp
, &i
, data
);
490 if (error
== 0 && *(u_int
*)data
== 0)
493 case DIOCGFRONTSTUFF
:
494 error
= g_io_getattr("GEOM::frontstuff", cp
, &i
, data
);
496 case DIOCSKERNELDUMP
:
497 if (*(u_int
*)data
== 0)
498 error
= g_dev_setdumpdev(NULL
, td
);
500 error
= g_dev_setdumpdev(dev
, td
);
503 error
= g_io_flush(cp
);
506 offset
= ((off_t
*)data
)[0];
507 length
= ((off_t
*)data
)[1];
508 if ((offset
% cp
->provider
->sectorsize
) != 0 ||
509 (length
% cp
->provider
->sectorsize
) != 0 || length
<= 0) {
510 printf("%s: offset=%jd length=%jd\n", __func__
, offset
,
517 if (g_dev_del_max_sectors
!= 0 && chunk
>
518 g_dev_del_max_sectors
* cp
->provider
->sectorsize
) {
519 chunk
= g_dev_del_max_sectors
*
520 cp
->provider
->sectorsize
;
522 error
= g_delete_data(cp
, offset
, chunk
);
528 * Since the request size can be large, the service
529 * time can be is likewise. We make this ioctl
530 * interruptible by checking for signals for each bio.
537 error
= g_io_getattr("GEOM::ident", cp
, &i
, data
);
539 case DIOCGPROVIDERNAME
:
542 strlcpy(data
, pp
->name
, i
);
544 case DIOCGSTRIPESIZE
:
545 *(off_t
*)data
= cp
->provider
->stripesize
;
547 case DIOCGSTRIPEOFFSET
:
548 *(off_t
*)data
= cp
->provider
->stripeoffset
;
551 error
= g_io_getattr("GEOM::physpath", cp
, &i
, data
);
552 if (error
== 0 && *(char *)data
== '\0')
556 struct diocgattr_arg
*arg
= (struct diocgattr_arg
*)data
;
558 if (arg
->len
> sizeof(arg
->value
)) {
562 error
= g_io_getattr(arg
->name
, cp
, &arg
->len
, &arg
->value
);
566 struct disk_zone_args
*zone_args
=(struct disk_zone_args
*)data
;
567 struct disk_zone_rep_entry
*new_entries
, *old_entries
;
568 struct disk_zone_report
*rep
;
576 if (zone_args
->zone_cmd
== DISK_ZONE_REPORT_ZONES
) {
578 rep
= &zone_args
->zone_params
.report
;
579 alloc_size
= rep
->entries_allocated
*
580 sizeof(struct disk_zone_rep_entry
);
582 new_entries
= g_malloc(alloc_size
,
584 old_entries
= rep
->entries
;
585 rep
->entries
= new_entries
;
587 error
= g_io_zonecmd(zone_args
, cp
);
588 if ((zone_args
->zone_cmd
== DISK_ZONE_REPORT_ZONES
)
591 error
= copyout(new_entries
, old_entries
, alloc_size
);
593 if ((old_entries
!= NULL
)
595 rep
->entries
= old_entries
;
597 if (new_entries
!= NULL
)
602 if (cp
->provider
->geom
->ioctl
!= NULL
) {
603 error
= cp
->provider
->geom
->ioctl(cp
->provider
, cmd
, data
, fflag
, td
);
613 g_dev_done(struct bio
*bp2
)
615 struct g_consumer
*cp
;
616 struct g_dev_softc
*sc
;
622 bp
= bp2
->bio_parent
;
623 bp
->bio_error
= bp2
->bio_error
;
624 bp
->bio_completed
= bp2
->bio_completed
;
625 bp
->bio_resid
= bp
->bio_length
- bp2
->bio_completed
;
626 if (bp2
->bio_cmd
== BIO_ZONE
)
627 bcopy(&bp2
->bio_zone
, &bp
->bio_zone
, sizeof(bp
->bio_zone
));
629 if (bp2
->bio_error
!= 0) {
630 g_trace(G_T_BIO
, "g_dev_done(%p) had error %d",
631 bp2
, bp2
->bio_error
);
632 bp
->bio_flags
|= BIO_ERROR
;
634 g_trace(G_T_BIO
, "g_dev_done(%p/%p) resid %ld completed %jd",
635 bp2
, bp
, bp2
->bio_resid
, (intmax_t)bp2
->bio_completed
);
639 mtx_lock(&sc
->sc_mtx
);
640 if ((--sc
->sc_active
) == 0) {
641 if (sc
->sc_open
== 0)
642 wakeup(&sc
->sc_active
);
643 if (sc
->sc_dev
== NULL
)
646 mtx_unlock(&sc
->sc_mtx
);
648 g_post_event(g_dev_destroy
, cp
, M_NOWAIT
, NULL
);
653 g_dev_strategy(struct bio
*bp
)
655 struct g_consumer
*cp
;
658 struct g_dev_softc
*sc
;
660 KASSERT(bp
->bio_cmd
== BIO_READ
||
661 bp
->bio_cmd
== BIO_WRITE
||
662 bp
->bio_cmd
== BIO_DELETE
||
663 bp
->bio_cmd
== BIO_FLUSH
||
664 bp
->bio_cmd
== BIO_ZONE
,
665 ("Wrong bio_cmd bio=%p cmd=%d", bp
, bp
->bio_cmd
));
669 KASSERT(cp
->acr
|| cp
->acw
,
670 ("Consumer with zero access count in g_dev_strategy"));
672 if ((bp
->bio_offset
% cp
->provider
->sectorsize
) != 0 ||
673 (bp
->bio_bcount
% cp
->provider
->sectorsize
) != 0) {
674 bp
->bio_resid
= bp
->bio_bcount
;
675 biofinish(bp
, NULL
, EINVAL
);
679 mtx_lock(&sc
->sc_mtx
);
680 KASSERT(sc
->sc_open
> 0, ("Closed device in g_dev_strategy"));
682 mtx_unlock(&sc
->sc_mtx
);
686 * XXX: This is not an ideal solution, but I believe it to
687 * XXX: deadlock safely, all things considered.
689 bp2
= g_clone_bio(bp
);
692 pause("gdstrat", hz
/ 10);
694 KASSERT(bp2
!= NULL
, ("XXX: ENOMEM in a bad place"));
695 bp2
->bio_done
= g_dev_done
;
697 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
698 bp
, bp2
, (intmax_t)bp
->bio_offset
, (intmax_t)bp2
->bio_length
,
699 bp2
->bio_data
, bp2
->bio_cmd
);
700 g_io_request(bp2
, cp
);
701 KASSERT(cp
->acr
|| cp
->acw
,
702 ("g_dev_strategy raced with g_dev_close and lost"));
709 * Called by devfs when asynchronous device destruction is completed.
710 * - Mark that we have no attached device any more.
711 * - If there are no outstanding requests, schedule geom destruction.
712 * Otherwise destruction will be scheduled later by g_dev_done().
716 g_dev_callback(void *arg
)
718 struct g_consumer
*cp
;
719 struct g_dev_softc
*sc
;
724 g_trace(G_T_TOPOLOGY
, "g_dev_callback(%p(%s))", cp
, cp
->geom
->name
);
726 mtx_lock(&sc
->sc_mtx
);
729 destroy
= (sc
->sc_active
== 0);
730 mtx_unlock(&sc
->sc_mtx
);
732 g_post_event(g_dev_destroy
, cp
, M_WAITOK
, NULL
);
738 * Called from below when the provider orphaned us.
739 * - Clear any dump settings.
740 * - Request asynchronous device destruction to prevent any more requests
741 * from coming in. The provider is already marked with an error, so
742 * anything which comes in the interim will be returned immediately.
746 g_dev_orphan(struct g_consumer
*cp
)
749 struct g_dev_softc
*sc
;
754 g_trace(G_T_TOPOLOGY
, "g_dev_orphan(%p(%s))", cp
, cp
->geom
->name
);
756 /* Reset any dump-area set on this device */
757 if (dev
->si_flags
& SI_DUMPDEV
)
758 (void)set_dumper(NULL
, NULL
, curthread
);
760 /* Destroy the struct cdev *so we get no more requests */
761 destroy_dev_sched_cb(dev
, g_dev_callback
, cp
);
764 DECLARE_GEOM_CLASS(g_dev_class
, g_dev
);