4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
30 #include <sys/scsi/scsi.h>
31 #include <sys/dktp/cm.h>
32 #include <sys/dktp/quetypes.h>
33 #include <sys/dktp/queue.h>
34 #include <sys/dktp/fctypes.h>
35 #include <sys/dktp/flowctrl.h>
36 #include <sys/dktp/cmdev.h>
38 #include <sys/dktp/tgdk.h>
39 #include <sys/dktp/dadk.h>
40 #include <sys/dktp/bbh.h>
41 #include <sys/dktp/altsctr.h>
42 #include <sys/dktp/cmdk.h>
47 #include <sys/dktp/dadkio.h>
48 #include <sys/aio_req.h>
59 static int cmdk_debug
= DIO
;
71 * NDKMAP is the base number for accessing the fdisk partitions.
72 * c?d?p0 --> cmdk@?,?:q
74 #define PARTITION0_INDEX (NDKMAP + 0)
76 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data
77 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext
82 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
85 static kmutex_t cmdk_attach_mutex
;
86 static int cmdk_max_instance
= 0;
90 * There is only a single flag that is not mutex locked since
91 * the system is prevented from thread switching and cmdk_dump
92 * will only be called in a single threaded operation.
94 static int cmdk_indump
;
97 * Local Function Prototypes
99 static int cmdk_create_obj(dev_info_t
*dip
, struct cmdk
*dkp
);
100 static void cmdk_destroy_obj(dev_info_t
*dip
, struct cmdk
*dkp
);
101 static void cmdkmin(struct buf
*bp
);
102 static int cmdkrw(dev_t dev
, struct uio
*uio
, int flag
);
103 static int cmdkarw(dev_t dev
, struct aio_req
*aio
, int flag
);
106 * Bad Block Handling Functions Prototypes
108 static void cmdk_bbh_reopen(struct cmdk
*dkp
);
109 static opaque_t
cmdk_bbh_gethandle(opaque_t bbh_data
, struct buf
*bp
);
110 static bbh_cookie_t
cmdk_bbh_htoc(opaque_t bbh_data
, opaque_t handle
);
111 static void cmdk_bbh_freehandle(opaque_t bbh_data
, opaque_t handle
);
112 static void cmdk_bbh_close(struct cmdk
*dkp
);
113 static void cmdk_bbh_setalts_idx(struct cmdk
*dkp
);
114 static int cmdk_bbh_bsearch(struct alts_ent
*buf
, int cnt
, daddr32_t key
);
116 static struct bbh_objops cmdk_bbh_ops
= {
125 static int cmdkopen(dev_t
*dev_p
, int flag
, int otyp
, cred_t
*credp
);
126 static int cmdkclose(dev_t dev
, int flag
, int otyp
, cred_t
*credp
);
127 static int cmdkstrategy(struct buf
*bp
);
128 static int cmdkdump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblk
);
129 static int cmdkioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
130 static int cmdkread(dev_t dev
, struct uio
*uio
, cred_t
*credp
);
131 static int cmdkwrite(dev_t dev
, struct uio
*uio
, cred_t
*credp
);
132 static int cmdk_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
133 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
);
134 static int cmdkaread(dev_t dev
, struct aio_req
*aio
, cred_t
*credp
);
135 static int cmdkawrite(dev_t dev
, struct aio_req
*aio
, cred_t
*credp
);
138 * Device driver ops vector
141 static struct cb_ops cmdk_cb_ops
= {
143 cmdkclose
, /* close */
144 cmdkstrategy
, /* strategy */
148 cmdkwrite
, /* write */
149 cmdkioctl
, /* ioctl */
154 cmdk_prop_op
, /* cb_prop_op */
156 D_64BIT
| D_MP
| D_NEW
, /* Driver comaptibility flag */
158 cmdkaread
, /* async read */
159 cmdkawrite
/* async write */
162 static int cmdkinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
,
164 static int cmdkprobe(dev_info_t
*dip
);
165 static int cmdkattach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
166 static int cmdkdetach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
168 static void cmdk_setup_pm(dev_info_t
*dip
, struct cmdk
*dkp
);
169 static int cmdkresume(dev_info_t
*dip
);
170 static int cmdksuspend(dev_info_t
*dip
);
171 static int cmdkpower(dev_info_t
*dip
, int component
, int level
);
173 struct dev_ops cmdk_ops
= {
174 DEVO_REV
, /* devo_rev, */
177 nulldev
, /* identify */
178 cmdkprobe
, /* probe */
179 cmdkattach
, /* attach */
180 cmdkdetach
, /* detach */
182 &cmdk_cb_ops
, /* driver operations */
183 NULL
, /* bus operations */
184 cmdkpower
, /* power */
185 ddi_quiesce_not_needed
, /* quiesce */
189 * This is the loadable module wrapper.
191 #include <sys/modctl.h>
193 static struct modldrv modldrv
= {
194 &mod_driverops
, /* Type of module. This one is a driver */
195 "Common Direct Access Disk",
196 &cmdk_ops
, /* driver ops */
199 static struct modlinkage modlinkage
= {
200 MODREV_1
, (void *)&modldrv
, NULL
203 /* Function prototypes for cmlb callbacks */
205 static int cmdk_lb_rdwr(dev_info_t
*dip
, uchar_t cmd
, void *bufaddr
,
206 diskaddr_t start
, size_t length
, void *tg_cookie
);
208 static int cmdk_lb_getinfo(dev_info_t
*dip
, int cmd
, void *arg
,
211 static void cmdk_devid_setup(struct cmdk
*dkp
);
212 static int cmdk_devid_modser(struct cmdk
*dkp
);
213 static int cmdk_get_modser(struct cmdk
*dkp
, int ioccmd
, char *buf
, int len
);
214 static int cmdk_devid_fabricate(struct cmdk
*dkp
);
215 static int cmdk_devid_read(struct cmdk
*dkp
);
217 static cmlb_tg_ops_t cmdk_lb_ops
= {
224 cmdk_isopen(struct cmdk
*dkp
, dev_t dev
)
229 ASSERT(MUTEX_HELD((&dkp
->dk_mutex
)));
231 part
= CMDKPART(dev
);
234 /* account for close */
235 if (dkp
->dk_open_lyr
[part
] != 0)
237 for (otyp
= 0; otyp
< OTYPCNT
; otyp
++)
238 if (dkp
->dk_open_reg
[otyp
] & partbit
)
248 if (rval
= ddi_soft_state_init(&cmdk_state
, sizeof (struct cmdk
), 7))
251 mutex_init(&cmdk_attach_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
252 if ((rval
= mod_install(&modlinkage
)) != 0) {
253 mutex_destroy(&cmdk_attach_mutex
);
254 ddi_soft_state_fini(&cmdk_state
);
266 _info(struct modinfo
*modinfop
)
268 return (mod_info(&modlinkage
, modinfop
));
272 * Autoconfiguration Routines
275 cmdkprobe(dev_info_t
*dip
)
281 instance
= ddi_get_instance(dip
);
283 if (ddi_get_soft_state(cmdk_state
, instance
))
284 return (DDI_PROBE_PARTIAL
);
286 if (ddi_soft_state_zalloc(cmdk_state
, instance
) != DDI_SUCCESS
)
287 return (DDI_PROBE_PARTIAL
);
289 if ((dkp
= ddi_get_soft_state(cmdk_state
, instance
)) == NULL
)
290 return (DDI_PROBE_PARTIAL
);
292 mutex_init(&dkp
->dk_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
293 rw_init(&dkp
->dk_bbh_mutex
, NULL
, RW_DRIVER
, NULL
);
295 mutex_enter(&dkp
->dk_mutex
);
297 dkp
->dk_dev
= makedevice(ddi_driver_major(dip
),
298 ddi_get_instance(dip
) << CMDK_UNITSHF
);
300 /* linkage to dadk and strategy */
301 if (cmdk_create_obj(dip
, dkp
) != DDI_SUCCESS
) {
302 mutex_exit(&dkp
->dk_mutex
);
303 mutex_destroy(&dkp
->dk_mutex
);
304 rw_destroy(&dkp
->dk_bbh_mutex
);
305 ddi_soft_state_free(cmdk_state
, instance
);
306 return (DDI_PROBE_PARTIAL
);
309 status
= dadk_probe(DKTP_DATA
, KM_NOSLEEP
);
310 if (status
!= DDI_PROBE_SUCCESS
) {
311 cmdk_destroy_obj(dip
, dkp
); /* dadk/strategy linkage */
312 mutex_exit(&dkp
->dk_mutex
);
313 mutex_destroy(&dkp
->dk_mutex
);
314 rw_destroy(&dkp
->dk_bbh_mutex
);
315 ddi_soft_state_free(cmdk_state
, instance
);
319 mutex_exit(&dkp
->dk_mutex
);
321 if (cmdk_debug
& DENT
)
322 PRF("cmdkprobe: instance= %d name= `%s`\n",
323 instance
, ddi_get_name_addr(dip
));
329 cmdkattach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
339 return (cmdkresume(dip
));
341 return (DDI_FAILURE
);
344 instance
= ddi_get_instance(dip
);
345 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
346 return (DDI_FAILURE
);
348 dkp
->dk_pm_level
= CMDK_SPINDLE_UNINIT
;
349 mutex_init(&dkp
->dk_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
351 mutex_enter(&dkp
->dk_mutex
);
353 /* dadk_attach is an empty function that only returns SUCCESS */
354 (void) dadk_attach(DKTP_DATA
);
356 node_type
= (DKTP_EXT
->tg_nodetype
);
359 * this open allows cmlb to read the device
360 * and determine the label types
361 * so that cmlb can create minor nodes for device
364 /* open the target disk */
365 if (dadk_open(DKTP_DATA
, 0) != DDI_SUCCESS
)
370 struct tgdk_geom phyg
;
371 (void) dadk_getphygeom(DKTP_DATA
, &phyg
);
372 if ((phyg
.g_cap
- 1) > DK_MAX_BLOCKS
) {
373 (void) dadk_close(DKTP_DATA
);
380 /* mark as having opened target */
381 dkp
->dk_flag
|= CMDK_TGDK_OPEN
;
383 cmlb_alloc_handle((cmlb_handle_t
*)&dkp
->dk_cmlbhandle
);
387 DTYPE_DIRECT
, /* device_type */
388 B_FALSE
, /* removable */
389 B_FALSE
, /* hot pluggable XXX */
391 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT
, /* alter_behaviour */
396 /* Calling validate will create minor nodes according to disk label */
397 (void) cmlb_validate(dkp
->dk_cmlbhandle
, 0, 0);
399 /* set bbh (Bad Block Handling) */
400 cmdk_bbh_reopen(dkp
);
402 /* setup devid string */
403 cmdk_devid_setup(dkp
);
405 mutex_enter(&cmdk_attach_mutex
);
406 if (instance
> cmdk_max_instance
)
407 cmdk_max_instance
= instance
;
408 mutex_exit(&cmdk_attach_mutex
);
410 mutex_exit(&dkp
->dk_mutex
);
413 * Add a zero-length attribute to tell the world we support
414 * kernel ioctls (for layered drivers)
416 (void) ddi_prop_create(DDI_DEV_T_NONE
, dip
, DDI_PROP_CANSLEEP
,
417 DDI_KERNEL_IOCTL
, NULL
, 0);
421 * Initialize power management
423 mutex_init(&dkp
->dk_pm_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
424 cv_init(&dkp
->dk_suspend_cv
, NULL
, CV_DRIVER
, NULL
);
425 cmdk_setup_pm(dip
, dkp
);
427 return (DDI_SUCCESS
);
430 cmlb_free_handle(&dkp
->dk_cmlbhandle
);
431 (void) dadk_close(DKTP_DATA
);
433 cmdk_destroy_obj(dip
, dkp
);
434 rw_destroy(&dkp
->dk_bbh_mutex
);
435 mutex_exit(&dkp
->dk_mutex
);
436 mutex_destroy(&dkp
->dk_mutex
);
437 ddi_soft_state_free(cmdk_state
, instance
);
438 return (DDI_FAILURE
);
443 cmdkdetach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
451 /* return (DDI_FAILURE); */
454 return (cmdksuspend(dip
));
457 if (cmdk_debug
& DIO
) {
458 PRF("cmdkdetach: cmd = %d unknown\n", cmd
);
461 return (DDI_FAILURE
);
464 mutex_enter(&cmdk_attach_mutex
);
465 max_instance
= cmdk_max_instance
;
466 mutex_exit(&cmdk_attach_mutex
);
468 /* check if any instance of driver is open */
469 for (instance
= 0; instance
< max_instance
; instance
++) {
470 dkp
= ddi_get_soft_state(cmdk_state
, instance
);
473 if (dkp
->dk_flag
& CMDK_OPEN
)
474 return (DDI_FAILURE
);
477 instance
= ddi_get_instance(dip
);
478 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
479 return (DDI_SUCCESS
);
481 mutex_enter(&dkp
->dk_mutex
);
484 * The cmdk_part_info call at the end of cmdkattach may have
485 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
486 * detach for case when cmdkopen/cmdkclose never occurs.
488 if (dkp
->dk_flag
& CMDK_TGDK_OPEN
) {
489 dkp
->dk_flag
&= ~CMDK_TGDK_OPEN
;
490 (void) dadk_close(DKTP_DATA
);
493 cmlb_detach(dkp
->dk_cmlbhandle
, 0);
494 cmlb_free_handle(&dkp
->dk_cmlbhandle
);
495 ddi_prop_remove_all(dip
);
497 cmdk_destroy_obj(dip
, dkp
); /* dadk/strategy linkage */
500 * free the devid structure if allocated before
503 ddi_devid_free(dkp
->dk_devid
);
504 dkp
->dk_devid
= NULL
;
507 mutex_exit(&dkp
->dk_mutex
);
508 mutex_destroy(&dkp
->dk_mutex
);
509 rw_destroy(&dkp
->dk_bbh_mutex
);
510 mutex_destroy(&dkp
->dk_pm_mutex
);
511 cv_destroy(&dkp
->dk_suspend_cv
);
512 ddi_soft_state_free(cmdk_state
, instance
);
514 return (DDI_SUCCESS
);
518 cmdkinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
, void **result
)
520 dev_t dev
= (dev_t
)arg
;
525 if (cmdk_debug
& DENT
)
526 PRF("cmdkinfo: call\n");
528 instance
= CMDKUNIT(dev
);
531 case DDI_INFO_DEVT2DEVINFO
:
532 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
533 return (DDI_FAILURE
);
534 *result
= (void *) dkp
->dk_dip
;
536 case DDI_INFO_DEVT2INSTANCE
:
537 *result
= (void *)(intptr_t)instance
;
540 return (DDI_FAILURE
);
542 return (DDI_SUCCESS
);
546 * Initialize the power management components
549 cmdk_setup_pm(dev_info_t
*dip
, struct cmdk
*dkp
)
551 char *pm_comp
[] = { "NAME=cmdk", "0=off", "1=on", NULL
};
554 * Since the cmdk device does not the 'reg' property,
555 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
556 * The following code is to tell cpr that this device
557 * DOES need to be suspended and resumed.
559 (void) ddi_prop_update_string(DDI_DEV_T_NONE
, dip
,
560 "pm-hardware-state", "needs-suspend-resume");
562 if (ddi_prop_update_string_array(DDI_DEV_T_NONE
, dip
,
563 "pm-components", pm_comp
, 3) == DDI_PROP_SUCCESS
) {
564 if (pm_raise_power(dip
, 0, CMDK_SPINDLE_ON
) == DDI_SUCCESS
) {
565 mutex_enter(&dkp
->dk_pm_mutex
);
566 dkp
->dk_pm_level
= CMDK_SPINDLE_ON
;
567 dkp
->dk_pm_is_enabled
= 1;
568 mutex_exit(&dkp
->dk_pm_mutex
);
570 mutex_enter(&dkp
->dk_pm_mutex
);
571 dkp
->dk_pm_level
= CMDK_SPINDLE_OFF
;
572 dkp
->dk_pm_is_enabled
= 0;
573 mutex_exit(&dkp
->dk_pm_mutex
);
576 mutex_enter(&dkp
->dk_pm_mutex
);
577 dkp
->dk_pm_level
= CMDK_SPINDLE_UNINIT
;
578 dkp
->dk_pm_is_enabled
= 0;
579 mutex_exit(&dkp
->dk_pm_mutex
);
584 * suspend routine, it will be run when get the command
585 * DDI_SUSPEND at detach(9E) from system power management
588 cmdksuspend(dev_info_t
*dip
)
594 instance
= ddi_get_instance(dip
);
595 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
596 return (DDI_FAILURE
);
597 mutex_enter(&dkp
->dk_mutex
);
598 if (dkp
->dk_flag
& CMDK_SUSPEND
) {
599 mutex_exit(&dkp
->dk_mutex
);
600 return (DDI_SUCCESS
);
602 dkp
->dk_flag
|= CMDK_SUSPEND
;
604 /* need to wait a while */
605 while (dadk_getcmds(DKTP_DATA
) != 0) {
608 dkp
->dk_flag
&= ~CMDK_SUSPEND
;
609 cv_broadcast(&dkp
->dk_suspend_cv
);
610 mutex_exit(&dkp
->dk_mutex
);
611 return (DDI_FAILURE
);
615 mutex_exit(&dkp
->dk_mutex
);
616 return (DDI_SUCCESS
);
620 * resume routine, it will be run when get the command
621 * DDI_RESUME at attach(9E) from system power management
624 cmdkresume(dev_info_t
*dip
)
629 instance
= ddi_get_instance(dip
);
630 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
631 return (DDI_FAILURE
);
632 mutex_enter(&dkp
->dk_mutex
);
633 if (!(dkp
->dk_flag
& CMDK_SUSPEND
)) {
634 mutex_exit(&dkp
->dk_mutex
);
635 return (DDI_FAILURE
);
637 dkp
->dk_pm_level
= CMDK_SPINDLE_ON
;
638 dkp
->dk_flag
&= ~CMDK_SUSPEND
;
639 cv_broadcast(&dkp
->dk_suspend_cv
);
640 mutex_exit(&dkp
->dk_mutex
);
641 return (DDI_SUCCESS
);
646 * power management entry point, it was used to
647 * change power management component.
648 * Actually, the real hard drive suspend/resume
649 * was handled in ata, so this function is not
650 * doing any real work other than verifying that
654 cmdkpower(dev_info_t
*dip
, int component
, int level
)
659 instance
= ddi_get_instance(dip
);
660 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)) ||
661 component
!= 0 || level
> CMDK_SPINDLE_ON
||
662 level
< CMDK_SPINDLE_OFF
) {
663 return (DDI_FAILURE
);
666 mutex_enter(&dkp
->dk_pm_mutex
);
667 if (dkp
->dk_pm_is_enabled
&& dkp
->dk_pm_level
== level
) {
668 mutex_exit(&dkp
->dk_pm_mutex
);
669 return (DDI_SUCCESS
);
671 mutex_exit(&dkp
->dk_pm_mutex
);
673 if ((level
== CMDK_SPINDLE_OFF
) &&
674 (dadk_getcmds(DKTP_DATA
) != 0)) {
675 return (DDI_FAILURE
);
678 mutex_enter(&dkp
->dk_pm_mutex
);
679 dkp
->dk_pm_level
= level
;
680 mutex_exit(&dkp
->dk_pm_mutex
);
681 return (DDI_SUCCESS
);
685 cmdk_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
686 char *name
, caddr_t valuep
, int *lengthp
)
691 if (cmdk_debug
& DENT
)
692 PRF("cmdk_prop_op: call\n");
695 dkp
= ddi_get_soft_state(cmdk_state
, ddi_get_instance(dip
));
697 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
698 name
, valuep
, lengthp
));
700 return (cmlb_prop_op(dkp
->dk_cmlbhandle
,
701 dev
, dip
, prop_op
, mod_flags
, name
, valuep
, lengthp
,
702 CMDKPART(dev
), NULL
));
709 cmdkdump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblk
)
713 diskaddr_t p_lblksrt
;
714 diskaddr_t p_lblkcnt
;
719 if (cmdk_debug
& DENT
)
720 PRF("cmdkdump: call\n");
722 instance
= CMDKUNIT(dev
);
723 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)) || (blkno
< 0))
737 if ((blkno
+nblk
) > p_lblkcnt
)
740 cmdk_indump
= 1; /* Tell disk targets we are panic dumpping */
743 bzero(bp
, sizeof (*bp
));
744 bp
->b_flags
= B_BUSY
;
745 bp
->b_un
.b_addr
= addr
;
746 bp
->b_bcount
= nblk
<< SCTRSHFT
;
747 SET_BP_SEC(bp
, ((ulong_t
)(p_lblksrt
+ blkno
)));
749 (void) dadk_dump(DKTP_DATA
, bp
);
750 return (bp
->b_error
);
754 * Copy in the dadkio_rwcmd according to the user's data model. If needed,
755 * convert it for our internal use.
758 rwcmd_copyin(struct dadkio_rwcmd
*rwcmdp
, caddr_t inaddr
, int flag
)
760 switch (ddi_model_convert_from(flag
)) {
761 case DDI_MODEL_ILP32
: {
762 struct dadkio_rwcmd32 cmd32
;
764 if (ddi_copyin(inaddr
, &cmd32
,
765 sizeof (struct dadkio_rwcmd32
), flag
)) {
769 rwcmdp
->cmd
= cmd32
.cmd
;
770 rwcmdp
->flags
= cmd32
.flags
;
771 rwcmdp
->blkaddr
= (blkaddr_t
)cmd32
.blkaddr
;
772 rwcmdp
->buflen
= cmd32
.buflen
;
773 rwcmdp
->bufaddr
= (caddr_t
)(intptr_t)cmd32
.bufaddr
;
775 * Note: we do not convert the 'status' field,
776 * as it should not contain valid data at this
779 bzero(&rwcmdp
->status
, sizeof (rwcmdp
->status
));
782 case DDI_MODEL_NONE
: {
783 if (ddi_copyin(inaddr
, rwcmdp
,
784 sizeof (struct dadkio_rwcmd
), flag
)) {
793 * If necessary, convert the internal rwcmdp and status to the appropriate
794 * data model and copy it out to the user.
797 rwcmd_copyout(struct dadkio_rwcmd
*rwcmdp
, caddr_t outaddr
, int flag
)
799 switch (ddi_model_convert_from(flag
)) {
800 case DDI_MODEL_ILP32
: {
801 struct dadkio_rwcmd32 cmd32
;
803 cmd32
.cmd
= rwcmdp
->cmd
;
804 cmd32
.flags
= rwcmdp
->flags
;
805 cmd32
.blkaddr
= rwcmdp
->blkaddr
;
806 cmd32
.buflen
= rwcmdp
->buflen
;
807 ASSERT64(((uintptr_t)rwcmdp
->bufaddr
>> 32) == 0);
808 cmd32
.bufaddr
= (caddr32_t
)(uintptr_t)rwcmdp
->bufaddr
;
810 cmd32
.status
.status
= rwcmdp
->status
.status
;
811 cmd32
.status
.resid
= rwcmdp
->status
.resid
;
812 cmd32
.status
.failed_blk_is_valid
=
813 rwcmdp
->status
.failed_blk_is_valid
;
814 cmd32
.status
.failed_blk
= rwcmdp
->status
.failed_blk
;
815 cmd32
.status
.fru_code_is_valid
=
816 rwcmdp
->status
.fru_code_is_valid
;
817 cmd32
.status
.fru_code
= rwcmdp
->status
.fru_code
;
819 bcopy(rwcmdp
->status
.add_error_info
,
820 cmd32
.status
.add_error_info
, DADKIO_ERROR_INFO_LEN
);
822 if (ddi_copyout(&cmd32
, outaddr
,
823 sizeof (struct dadkio_rwcmd32
), flag
))
827 case DDI_MODEL_NONE
: {
828 if (ddi_copyout(rwcmdp
, outaddr
,
829 sizeof (struct dadkio_rwcmd
), flag
))
840 cmdkioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*credp
, int *rvalp
)
843 struct scsi_device
*devp
;
847 instance
= CMDKUNIT(dev
);
848 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
851 mutex_enter(&dkp
->dk_mutex
);
852 while (dkp
->dk_flag
& CMDK_SUSPEND
) {
853 cv_wait(&dkp
->dk_suspend_cv
, &dkp
->dk_mutex
);
855 mutex_exit(&dkp
->dk_mutex
);
857 bzero(data
, sizeof (data
));
861 case DKIOCGMEDIAINFO
: {
862 struct dk_minfo media_info
;
863 struct tgdk_geom phyg
;
865 /* dadk_getphygeom always returns success */
866 (void) dadk_getphygeom(DKTP_DATA
, &phyg
);
868 media_info
.dki_lbsize
= phyg
.g_secsiz
;
869 media_info
.dki_capacity
= phyg
.g_cap
;
870 media_info
.dki_media_type
= DK_FIXED_DISK
;
872 if (ddi_copyout(&media_info
, (void *)arg
,
873 sizeof (struct dk_minfo
), flag
)) {
881 struct dk_cinfo
*info
= (struct dk_cinfo
*)data
;
883 /* controller information */
884 info
->dki_ctype
= (DKTP_EXT
->tg_ctype
);
885 info
->dki_cnum
= ddi_get_instance(ddi_get_parent(dkp
->dk_dip
));
886 (void) strcpy(info
->dki_cname
,
887 ddi_get_name(ddi_get_parent(dkp
->dk_dip
)));
889 /* Unit Information */
890 info
->dki_unit
= ddi_get_instance(dkp
->dk_dip
);
891 devp
= ddi_get_driver_private(dkp
->dk_dip
);
892 info
->dki_slave
= (CMDEV_TARG(devp
)<<3) | CMDEV_LUN(devp
);
893 (void) strcpy(info
->dki_dname
, ddi_driver_name(dkp
->dk_dip
));
894 info
->dki_flags
= DKI_FMTVOL
;
895 info
->dki_partition
= CMDKPART(dev
);
897 info
->dki_maxtransfer
= maxphys
/ DEV_BSIZE
;
903 if (ddi_copyout(data
, (void *)arg
, sizeof (*info
), flag
))
912 diskaddr_t p_lblksrt
;
913 diskaddr_t p_lblkcnt
;
915 if (ddi_copyin((void *)arg
, &state
, sizeof (int), flag
))
918 /* dadk_check_media blocks until state changes */
919 if (rval
= dadk_check_media(DKTP_DATA
, &state
))
922 if (state
== DKIO_INSERTED
) {
924 if (cmlb_validate(dkp
->dk_cmlbhandle
, 0, 0) != 0)
927 if (cmlb_partinfo(dkp
->dk_cmlbhandle
, CMDKPART(dev
),
928 &p_lblkcnt
, &p_lblksrt
, NULL
, NULL
, 0))
935 if (ddi_copyout(&state
, (caddr_t
)arg
, sizeof (int), flag
))
942 * is media removable?
944 case DKIOCREMOVABLE
: {
947 i
= (DKTP_EXT
->tg_rmb
) ? 1 : 0;
949 if (ddi_copyout(&i
, (caddr_t
)arg
, sizeof (int), flag
))
957 * This is not an update mechanism to add bad blocks
958 * to the bad block structures stored on disk.
962 cmdk_bbh_reopen(dkp
);
966 case DKIOCG_VIRTGEOM
:
976 case DKIOCEXTPARTINFO
:
982 case DKIOCSETEXTPART
:
986 rc
= cmlb_ioctl(dkp
->dk_cmlbhandle
, dev
, cmd
, arg
, flag
,
988 if (cmd
== DKIOCSVTOC
|| cmd
== DKIOCSEXTVTOC
)
989 cmdk_devid_setup(dkp
);
994 struct dadkio_rwcmd
*rwcmdp
;
997 rwcmdp
= kmem_alloc(sizeof (struct dadkio_rwcmd
), KM_SLEEP
);
999 status
= rwcmd_copyin(rwcmdp
, (caddr_t
)arg
, flag
);
1002 bzero(&(rwcmdp
->status
), sizeof (struct dadkio_status
));
1003 status
= dadk_ioctl(DKTP_DATA
,
1012 status
= rwcmd_copyout(rwcmdp
, (caddr_t
)arg
, flag
);
1014 kmem_free(rwcmdp
, sizeof (struct dadkio_rwcmd
));
1019 return (dadk_ioctl(DKTP_DATA
,
1031 cmdkclose(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
1040 instance
= CMDKUNIT(dev
);
1041 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)) ||
1045 mutex_enter(&dkp
->dk_mutex
);
1047 /* check if device has been opened */
1048 ASSERT(cmdk_isopen(dkp
, dev
));
1049 if (!(dkp
->dk_flag
& CMDK_OPEN
)) {
1050 mutex_exit(&dkp
->dk_mutex
);
1054 while (dkp
->dk_flag
& CMDK_SUSPEND
) {
1055 cv_wait(&dkp
->dk_suspend_cv
, &dkp
->dk_mutex
);
1058 part
= CMDKPART(dev
);
1059 partbit
= 1 << part
;
1061 /* account for close */
1062 if (otyp
== OTYP_LYR
) {
1063 ASSERT(dkp
->dk_open_lyr
[part
] > 0);
1064 if (dkp
->dk_open_lyr
[part
])
1065 dkp
->dk_open_lyr
[part
]--;
1067 ASSERT((dkp
->dk_open_reg
[otyp
] & partbit
) != 0);
1068 dkp
->dk_open_reg
[otyp
] &= ~partbit
;
1070 dkp
->dk_open_exl
&= ~partbit
;
1072 for (i
= 0; i
< CMDK_MAXPART
; i
++)
1073 if (dkp
->dk_open_lyr
[i
] != 0) {
1079 for (i
= 0; i
< OTYPCNT
; i
++)
1080 if (dkp
->dk_open_reg
[i
] != 0) {
1085 mutex_exit(&dkp
->dk_mutex
);
1088 cmlb_invalidate(dkp
->dk_cmlbhandle
, 0);
1090 return (DDI_SUCCESS
);
1095 cmdkopen(dev_t
*dev_p
, int flag
, int otyp
, cred_t
*credp
)
1102 diskaddr_t p_lblksrt
;
1103 diskaddr_t p_lblkcnt
;
1107 instance
= CMDKUNIT(dev
);
1108 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
1111 if (otyp
>= OTYPCNT
)
1114 mutex_enter(&dkp
->dk_mutex
);
1115 while (dkp
->dk_flag
& CMDK_SUSPEND
) {
1116 cv_wait(&dkp
->dk_suspend_cv
, &dkp
->dk_mutex
);
1118 mutex_exit(&dkp
->dk_mutex
);
1120 part
= CMDKPART(dev
);
1121 partbit
= 1 << part
;
1122 nodelay
= (flag
& (FNDELAY
| FNONBLOCK
));
1124 mutex_enter(&dkp
->dk_mutex
);
1126 if (cmlb_validate(dkp
->dk_cmlbhandle
, 0, 0) != 0) {
1128 /* fail if not doing non block open */
1130 mutex_exit(&dkp
->dk_mutex
);
1133 } else if (cmlb_partinfo(dkp
->dk_cmlbhandle
, part
, &p_lblkcnt
,
1134 &p_lblksrt
, NULL
, NULL
, 0) == 0) {
1136 if (p_lblkcnt
<= 0 && (!nodelay
|| otyp
!= OTYP_CHR
)) {
1137 mutex_exit(&dkp
->dk_mutex
);
1141 /* fail if not doing non block open */
1143 mutex_exit(&dkp
->dk_mutex
);
1148 if ((DKTP_EXT
->tg_rdonly
) && (flag
& FWRITE
)) {
1149 mutex_exit(&dkp
->dk_mutex
);
1153 /* check for part already opend exclusively */
1154 if (dkp
->dk_open_exl
& partbit
)
1155 goto excl_open_fail
;
1157 /* check if we can establish exclusive open */
1159 if (dkp
->dk_open_lyr
[part
])
1160 goto excl_open_fail
;
1161 for (i
= 0; i
< OTYPCNT
; i
++) {
1162 if (dkp
->dk_open_reg
[i
] & partbit
)
1163 goto excl_open_fail
;
1167 /* open will succeed, account for open */
1168 dkp
->dk_flag
|= CMDK_OPEN
;
1169 if (otyp
== OTYP_LYR
)
1170 dkp
->dk_open_lyr
[part
]++;
1172 dkp
->dk_open_reg
[otyp
] |= partbit
;
1174 dkp
->dk_open_exl
|= partbit
;
1176 mutex_exit(&dkp
->dk_mutex
);
1177 return (DDI_SUCCESS
);
1180 mutex_exit(&dkp
->dk_mutex
);
1189 cmdkread(dev_t dev
, struct uio
*uio
, cred_t
*credp
)
1191 return (cmdkrw(dev
, uio
, B_READ
));
1195 * async read routine
1199 cmdkaread(dev_t dev
, struct aio_req
*aio
, cred_t
*credp
)
1201 return (cmdkarw(dev
, aio
, B_READ
));
1209 cmdkwrite(dev_t dev
, struct uio
*uio
, cred_t
*credp
)
1211 return (cmdkrw(dev
, uio
, B_WRITE
));
1215 * async write routine
1219 cmdkawrite(dev_t dev
, struct aio_req
*aio
, cred_t
*credp
)
1221 return (cmdkarw(dev
, aio
, B_WRITE
));
1225 cmdkmin(struct buf
*bp
)
1227 if (bp
->b_bcount
> DK_MAXRECSIZE
)
1228 bp
->b_bcount
= DK_MAXRECSIZE
;
1232 cmdkrw(dev_t dev
, struct uio
*uio
, int flag
)
1237 instance
= CMDKUNIT(dev
);
1238 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
1241 mutex_enter(&dkp
->dk_mutex
);
1242 while (dkp
->dk_flag
& CMDK_SUSPEND
) {
1243 cv_wait(&dkp
->dk_suspend_cv
, &dkp
->dk_mutex
);
1245 mutex_exit(&dkp
->dk_mutex
);
1247 return (physio(cmdkstrategy
, NULL
, dev
, flag
, cmdkmin
, uio
));
1251 cmdkarw(dev_t dev
, struct aio_req
*aio
, int flag
)
1256 instance
= CMDKUNIT(dev
);
1257 if (!(dkp
= ddi_get_soft_state(cmdk_state
, instance
)))
1260 mutex_enter(&dkp
->dk_mutex
);
1261 while (dkp
->dk_flag
& CMDK_SUSPEND
) {
1262 cv_wait(&dkp
->dk_suspend_cv
, &dkp
->dk_mutex
);
1264 mutex_exit(&dkp
->dk_mutex
);
1266 return (aphysio(cmdkstrategy
, anocancel
, dev
, flag
, cmdkmin
, aio
));
1273 cmdkstrategy(struct buf
*bp
)
1278 diskaddr_t p_lblksrt
;
1279 diskaddr_t p_lblkcnt
;
1281 instance
= CMDKUNIT(bp
->b_edev
);
1282 if (cmdk_indump
|| !(dkp
= ddi_get_soft_state(cmdk_state
, instance
)) ||
1283 (dkblock(bp
) < 0)) {
1284 bp
->b_resid
= bp
->b_bcount
;
1285 SETBPERR(bp
, ENXIO
);
1290 mutex_enter(&dkp
->dk_mutex
);
1291 ASSERT(cmdk_isopen(dkp
, bp
->b_edev
));
1292 while (dkp
->dk_flag
& CMDK_SUSPEND
) {
1293 cv_wait(&dkp
->dk_suspend_cv
, &dkp
->dk_mutex
);
1295 mutex_exit(&dkp
->dk_mutex
);
1297 bp
->b_flags
&= ~(B_DONE
|B_ERROR
);
1302 * only re-read the vtoc if necessary (force == FALSE)
1304 if (cmlb_partinfo(dkp
->dk_cmlbhandle
, CMDKPART(bp
->b_edev
),
1305 &p_lblkcnt
, &p_lblksrt
, NULL
, NULL
, 0)) {
1306 SETBPERR(bp
, ENXIO
);
1309 if ((bp
->b_bcount
& (NBPSCTR
-1)) || (dkblock(bp
) > p_lblkcnt
))
1310 SETBPERR(bp
, ENXIO
);
1312 if ((bp
->b_flags
& B_ERROR
) || (dkblock(bp
) == p_lblkcnt
)) {
1313 bp
->b_resid
= bp
->b_bcount
;
1318 d_cnt
= bp
->b_bcount
>> SCTRSHFT
;
1319 if ((dkblock(bp
) + d_cnt
) > p_lblkcnt
) {
1320 bp
->b_resid
= ((dkblock(bp
) + d_cnt
) - p_lblkcnt
) << SCTRSHFT
;
1321 bp
->b_bcount
-= bp
->b_resid
;
1324 SET_BP_SEC(bp
, ((ulong_t
)(p_lblksrt
+ dkblock(bp
))));
1325 if (dadk_strategy(DKTP_DATA
, bp
) != DDI_SUCCESS
) {
1326 bp
->b_resid
+= bp
->b_bcount
;
1333 cmdk_create_obj(dev_info_t
*dip
, struct cmdk
*dkp
)
1335 struct scsi_device
*devp
;
1336 opaque_t queobjp
= NULL
;
1337 opaque_t flcobjp
= NULL
;
1338 char que_keyvalp
[64];
1340 char flc_keyvalp
[64];
1343 ASSERT(mutex_owned(&dkp
->dk_mutex
));
1345 /* Create linkage to queueing routines based on property */
1346 que_keylen
= sizeof (que_keyvalp
);
1347 if (ddi_prop_op(DDI_DEV_T_NONE
, dip
, PROP_LEN_AND_VAL_BUF
,
1348 DDI_PROP_CANSLEEP
, "queue", que_keyvalp
, &que_keylen
) !=
1350 cmn_err(CE_WARN
, "cmdk_create_obj: queue property undefined");
1351 return (DDI_FAILURE
);
1353 que_keyvalp
[que_keylen
] = '\0';
1355 if (strcmp(que_keyvalp
, "qfifo") == 0) {
1356 queobjp
= (opaque_t
)qfifo_create();
1357 } else if (strcmp(que_keyvalp
, "qsort") == 0) {
1358 queobjp
= (opaque_t
)qsort_create();
1360 return (DDI_FAILURE
);
1363 /* Create linkage to dequeueing routines based on property */
1364 flc_keylen
= sizeof (flc_keyvalp
);
1365 if (ddi_prop_op(DDI_DEV_T_NONE
, dip
, PROP_LEN_AND_VAL_BUF
,
1366 DDI_PROP_CANSLEEP
, "flow_control", flc_keyvalp
, &flc_keylen
) !=
1369 "cmdk_create_obj: flow-control property undefined");
1370 return (DDI_FAILURE
);
1373 flc_keyvalp
[flc_keylen
] = '\0';
1375 if (strcmp(flc_keyvalp
, "dsngl") == 0) {
1376 flcobjp
= (opaque_t
)dsngl_create();
1377 } else if (strcmp(flc_keyvalp
, "dmult") == 0) {
1378 flcobjp
= (opaque_t
)dmult_create();
1380 return (DDI_FAILURE
);
1383 /* populate bbh_obj object stored in dkp */
1384 dkp
->dk_bbh_obj
.bbh_data
= dkp
;
1385 dkp
->dk_bbh_obj
.bbh_ops
= &cmdk_bbh_ops
;
1387 /* create linkage to dadk */
1388 dkp
->dk_tgobjp
= (opaque_t
)dadk_create();
1390 devp
= ddi_get_driver_private(dip
);
1391 (void) dadk_init(DKTP_DATA
, devp
, flcobjp
, queobjp
, &dkp
->dk_bbh_obj
,
1394 return (DDI_SUCCESS
);
1398 cmdk_destroy_obj(dev_info_t
*dip
, struct cmdk
*dkp
)
1400 char que_keyvalp
[64];
1402 char flc_keyvalp
[64];
1405 ASSERT(mutex_owned(&dkp
->dk_mutex
));
1407 (void) dadk_free((dkp
->dk_tgobjp
));
1408 dkp
->dk_tgobjp
= NULL
;
1410 que_keylen
= sizeof (que_keyvalp
);
1411 if (ddi_prop_op(DDI_DEV_T_NONE
, dip
, PROP_LEN_AND_VAL_BUF
,
1412 DDI_PROP_CANSLEEP
, "queue", que_keyvalp
, &que_keylen
) !=
1414 cmn_err(CE_WARN
, "cmdk_destroy_obj: queue property undefined");
1417 que_keyvalp
[que_keylen
] = '\0';
1419 flc_keylen
= sizeof (flc_keyvalp
);
1420 if (ddi_prop_op(DDI_DEV_T_NONE
, dip
, PROP_LEN_AND_VAL_BUF
,
1421 DDI_PROP_CANSLEEP
, "flow_control", flc_keyvalp
, &flc_keylen
) !=
1424 "cmdk_destroy_obj: flow-control property undefined");
1427 flc_keyvalp
[flc_keylen
] = '\0';
1431 cmdk_lb_rdwr(dev_info_t
*dip
, uchar_t cmd
, void *bufaddr
,
1432 diskaddr_t start
, size_t count
, void *tg_cookie
)
1440 dkp
= ddi_get_soft_state(cmdk_state
, ddi_get_instance(dip
));
1444 if (cmd
!= TG_READ
&& cmd
!= TG_WRITE
)
1447 /* buflen must be multiple of 512 */
1448 buflen
= (count
+ NBPSCTR
- 1) & -NBPSCTR
;
1449 handle
= dadk_iob_alloc(DKTP_DATA
, start
, buflen
, KM_SLEEP
);
1453 if (cmd
== TG_READ
) {
1454 bufa
= dadk_iob_xfer(DKTP_DATA
, handle
, B_READ
);
1458 bcopy(bufa
, bufaddr
, count
);
1460 bufa
= dadk_iob_htoc(DKTP_DATA
, handle
);
1461 bcopy(bufaddr
, bufa
, count
);
1462 bufa
= dadk_iob_xfer(DKTP_DATA
, handle
, B_WRITE
);
1466 (void) dadk_iob_free(DKTP_DATA
, handle
);
1473 cmdk_lb_getinfo(dev_info_t
*dip
, int cmd
, void *arg
, void *tg_cookie
)
1477 struct tgdk_geom phyg
;
1480 dkp
= ddi_get_soft_state(cmdk_state
, ddi_get_instance(dip
));
1485 case TG_GETPHYGEOM
: {
1486 cmlb_geom_t
*phygeomp
= (cmlb_geom_t
*)arg
;
1488 /* dadk_getphygeom always returns success */
1489 (void) dadk_getphygeom(DKTP_DATA
, &phyg
);
1491 phygeomp
->g_capacity
= phyg
.g_cap
;
1492 phygeomp
->g_nsect
= phyg
.g_sec
;
1493 phygeomp
->g_nhead
= phyg
.g_head
;
1494 phygeomp
->g_acyl
= phyg
.g_acyl
;
1495 phygeomp
->g_ncyl
= phyg
.g_cyl
;
1496 phygeomp
->g_secsize
= phyg
.g_secsiz
;
1497 phygeomp
->g_intrlv
= 1;
1498 phygeomp
->g_rpm
= 3600;
1503 case TG_GETVIRTGEOM
: {
1504 cmlb_geom_t
*virtgeomp
= (cmlb_geom_t
*)arg
;
1505 diskaddr_t capacity
;
1507 (void) dadk_getgeom(DKTP_DATA
, &phyg
);
1508 capacity
= phyg
.g_cap
;
1511 * If the controller returned us something that doesn't
1512 * really fit into an Int 13/function 8 geometry
1513 * result, just fail the ioctl. See PSARC 1998/313.
1515 if (capacity
< 0 || capacity
>= 63 * 254 * 1024)
1518 virtgeomp
->g_capacity
= capacity
;
1519 virtgeomp
->g_nsect
= 63;
1520 virtgeomp
->g_nhead
= 254;
1521 virtgeomp
->g_ncyl
= capacity
/ (63 * 254);
1522 virtgeomp
->g_acyl
= 0;
1523 virtgeomp
->g_secsize
= 512;
1524 virtgeomp
->g_intrlv
= 1;
1525 virtgeomp
->g_rpm
= 3600;
1530 case TG_GETCAPACITY
:
1531 case TG_GETBLOCKSIZE
:
1534 /* dadk_getphygeom always returns success */
1535 (void) dadk_getphygeom(DKTP_DATA
, &phyg
);
1536 if (cmd
== TG_GETCAPACITY
)
1537 *(diskaddr_t
*)arg
= phyg
.g_cap
;
1539 *(uint32_t *)arg
= (uint32_t)phyg
.g_secsiz
;
1545 tg_attribute_t
*tgattribute
= (tg_attribute_t
*)arg
;
1546 if ((DKTP_EXT
->tg_rdonly
))
1547 tgattribute
->media_is_writable
= FALSE
;
1549 tgattribute
->media_is_writable
= TRUE
;
1550 tgattribute
->media_is_rotational
= TRUE
;
1565 * Create and register the devid.
1566 * There are 4 different ways we can get a device id:
1567 * 1. Already have one - nothing to do
1568 * 2. Build one from the drive's model and serial numbers
1569 * 3. Read one from the disk (first sector of last track)
1570 * 4. Fabricate one and write it on the disk.
1571 * If any of these succeeds, register the deviceid
1574 cmdk_devid_setup(struct cmdk
*dkp
)
1578 /* Try options until one succeeds, or all have failed */
1580 /* 1. All done if already registered */
1581 if (dkp
->dk_devid
!= NULL
)
1584 /* 2. Build a devid from the model and serial number */
1585 rc
= cmdk_devid_modser(dkp
);
1586 if (rc
!= DDI_SUCCESS
) {
1587 /* 3. Read devid from the disk, if present */
1588 rc
= cmdk_devid_read(dkp
);
1590 /* 4. otherwise make one up and write it on the disk */
1591 if (rc
!= DDI_SUCCESS
)
1592 rc
= cmdk_devid_fabricate(dkp
);
1595 /* If we managed to get a devid any of the above ways, register it */
1596 if (rc
== DDI_SUCCESS
)
1597 (void) ddi_devid_register(dkp
->dk_dip
, dkp
->dk_devid
);
1602 * Build a devid from the model and serial number
1603 * Return DDI_SUCCESS or DDI_FAILURE.
1606 cmdk_devid_modser(struct cmdk
*dkp
)
1608 int rc
= DDI_FAILURE
;
1614 * device ID is a concatenation of model number, '=', serial number.
1616 hwid
= kmem_alloc(CMDK_HWIDLEN
, KM_SLEEP
);
1617 modlen
= cmdk_get_modser(dkp
, DIOCTL_GETMODEL
, hwid
, CMDK_HWIDLEN
);
1622 hwid
[modlen
++] = '=';
1623 serlen
= cmdk_get_modser(dkp
, DIOCTL_GETSERIAL
,
1624 hwid
+ modlen
, CMDK_HWIDLEN
- modlen
);
1629 hwid
[modlen
+ serlen
] = 0;
1631 /* Initialize the device ID, trailing NULL not included */
1632 rc
= ddi_devid_init(dkp
->dk_dip
, DEVID_ATA_SERIAL
, modlen
+ serlen
,
1633 hwid
, &dkp
->dk_devid
);
1634 if (rc
!= DDI_SUCCESS
) {
1642 kmem_free(hwid
, CMDK_HWIDLEN
);
1647 cmdk_get_modser(struct cmdk
*dkp
, int ioccmd
, char *buf
, int len
)
1649 dadk_ioc_string_t strarg
;
1657 strarg
.is_buf
= buf
;
1658 strarg
.is_size
= len
;
1659 if (dadk_ioctl(DKTP_DATA
,
1669 * valid model/serial string must contain a non-zero non-space
1670 * trim trailing spaces/NULL
1674 for (i
= 0; i
< strarg
.is_size
; i
++) {
1676 if (ch
!= ' ' && ch
!= '\0')
1678 if (ch
!= ' ' && ch
!= '\0' && ch
!= '0')
1689 * Read a devid from on the first block of the last track of
1690 * the last cylinder. Make sure what we read is a valid devid.
1691 * Return DDI_SUCCESS or DDI_FAILURE.
1694 cmdk_devid_read(struct cmdk
*dkp
)
1697 struct dk_devid
*dkdevidp
;
1701 tgdk_iob_handle handle
= NULL
;
1702 int rc
= DDI_FAILURE
;
1704 if (cmlb_get_devid_block(dkp
->dk_cmlbhandle
, &blk
, 0))
1707 /* read the devid */
1708 handle
= dadk_iob_alloc(DKTP_DATA
, blk
, NBPSCTR
, KM_SLEEP
);
1712 dkdevidp
= (struct dk_devid
*)dadk_iob_xfer(DKTP_DATA
, handle
, B_READ
);
1713 if (dkdevidp
== NULL
)
1716 /* Validate the revision */
1717 if ((dkdevidp
->dkd_rev_hi
!= DK_DEVID_REV_MSB
) ||
1718 (dkdevidp
->dkd_rev_lo
!= DK_DEVID_REV_LSB
))
1721 /* Calculate the checksum */
1723 ip
= (uint_t
*)dkdevidp
;
1724 for (i
= 0; i
< ((NBPSCTR
- sizeof (int))/sizeof (int)); i
++)
1726 if (DKD_GETCHKSUM(dkdevidp
) != chksum
)
1729 /* Validate the device id */
1730 if (ddi_devid_valid((ddi_devid_t
)dkdevidp
->dkd_devid
) != DDI_SUCCESS
)
1733 /* keep a copy of the device id */
1734 sz
= ddi_devid_sizeof((ddi_devid_t
)dkdevidp
->dkd_devid
);
1735 dkp
->dk_devid
= kmem_alloc(sz
, KM_SLEEP
);
1736 bcopy(dkdevidp
->dkd_devid
, dkp
->dk_devid
, sz
);
1742 (void) dadk_iob_free(DKTP_DATA
, handle
);
1747 * Create a devid and write it on the first block of the last track of
1748 * the last cylinder.
1749 * Return DDI_SUCCESS or DDI_FAILURE.
1752 cmdk_devid_fabricate(struct cmdk
*dkp
)
1754 ddi_devid_t devid
= NULL
; /* devid made by ddi_devid_init */
1755 struct dk_devid
*dkdevidp
; /* devid struct stored on disk */
1757 tgdk_iob_handle handle
= NULL
;
1760 int rc
= DDI_FAILURE
;
1762 if (ddi_devid_init(dkp
->dk_dip
, DEVID_FAB
, 0, NULL
, &devid
) !=
1766 if (cmlb_get_devid_block(dkp
->dk_cmlbhandle
, &blk
, 0)) {
1767 /* no device id block address */
1771 handle
= dadk_iob_alloc(DKTP_DATA
, blk
, NBPSCTR
, KM_SLEEP
);
1775 /* Locate the buffer */
1776 dkdevidp
= (struct dk_devid
*)dadk_iob_htoc(DKTP_DATA
, handle
);
1778 /* Fill in the revision */
1779 bzero(dkdevidp
, NBPSCTR
);
1780 dkdevidp
->dkd_rev_hi
= DK_DEVID_REV_MSB
;
1781 dkdevidp
->dkd_rev_lo
= DK_DEVID_REV_LSB
;
1783 /* Copy in the device id */
1784 i
= ddi_devid_sizeof(devid
);
1785 if (i
> DK_DEVID_SIZE
)
1787 bcopy(devid
, dkdevidp
->dkd_devid
, i
);
1789 /* Calculate the chksum */
1791 ip
= (uint_t
*)dkdevidp
;
1792 for (i
= 0; i
< ((NBPSCTR
- sizeof (int))/sizeof (int)); i
++)
1795 /* Fill in the checksum */
1796 DKD_FORMCHKSUM(chksum
, dkdevidp
);
1798 /* write the devid */
1799 (void) dadk_iob_xfer(DKTP_DATA
, handle
, B_WRITE
);
1801 dkp
->dk_devid
= devid
;
1807 (void) dadk_iob_free(DKTP_DATA
, handle
);
1809 if (rc
!= DDI_SUCCESS
&& devid
!= NULL
)
1810 ddi_devid_free(devid
);
1816 cmdk_bbh_free_alts(struct cmdk
*dkp
)
1818 if (dkp
->dk_alts_hdl
) {
1819 (void) dadk_iob_free(DKTP_DATA
, dkp
->dk_alts_hdl
);
1820 kmem_free(dkp
->dk_slc_cnt
,
1821 NDKMAP
* (sizeof (uint32_t) + sizeof (struct alts_ent
*)));
1822 dkp
->dk_alts_hdl
= NULL
;
1827 cmdk_bbh_reopen(struct cmdk
*dkp
)
1829 tgdk_iob_handle handle
= NULL
;
1830 diskaddr_t slcb
, slcn
, slce
;
1831 struct alts_parttbl
*ap
;
1832 struct alts_ent
*enttblp
;
1840 /* find slice with V_ALTSCTR tag */
1841 for (alts
= 0; alts
< NDKMAP
; alts
++) {
1850 goto empty
; /* no partition table exists */
1853 if (vtoctag
== V_ALTSCTR
&& slcn
> 1)
1856 if (alts
>= NDKMAP
) {
1857 goto empty
; /* no V_ALTSCTR slice defined */
1860 /* read in ALTS label block */
1861 handle
= dadk_iob_alloc(DKTP_DATA
, slcb
, NBPSCTR
, KM_SLEEP
);
1866 ap
= (struct alts_parttbl
*)dadk_iob_xfer(DKTP_DATA
, handle
, B_READ
);
1867 if (!ap
|| (ap
->alts_sanity
!= ALTS_SANITY
)) {
1871 altused
= ap
->alts_ent_used
; /* number of BB entries */
1872 altbase
= ap
->alts_ent_base
; /* blk offset from begin slice */
1873 altlast
= ap
->alts_ent_end
; /* blk offset to last block */
1874 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1878 altbase
> altlast
||
1882 (void) dadk_iob_free(DKTP_DATA
, handle
);
1884 /* read in ALTS remapping table */
1885 handle
= dadk_iob_alloc(DKTP_DATA
,
1887 (altlast
- altbase
+ 1) << SCTRSHFT
, KM_SLEEP
);
1892 enttblp
= (struct alts_ent
*)dadk_iob_xfer(DKTP_DATA
, handle
, B_READ
);
1897 rw_enter(&dkp
->dk_bbh_mutex
, RW_WRITER
);
1899 /* allocate space for dk_slc_cnt and dk_slc_ent tables */
1900 if (dkp
->dk_slc_cnt
== NULL
) {
1901 dkp
->dk_slc_cnt
= kmem_alloc(NDKMAP
*
1902 (sizeof (long) + sizeof (struct alts_ent
*)), KM_SLEEP
);
1904 dkp
->dk_slc_ent
= (struct alts_ent
**)(dkp
->dk_slc_cnt
+ NDKMAP
);
1906 /* free previous BB table (if any) */
1907 if (dkp
->dk_alts_hdl
) {
1908 (void) dadk_iob_free(DKTP_DATA
, dkp
->dk_alts_hdl
);
1909 dkp
->dk_alts_hdl
= NULL
;
1910 dkp
->dk_altused
= 0;
1913 /* save linkage to new BB table */
1914 dkp
->dk_alts_hdl
= handle
;
1915 dkp
->dk_altused
= altused
;
1918 * build indexes to BB table by slice
1919 * effectively we have
1920 * struct alts_ent *enttblp[altused];
1922 * uint32_t dk_slc_cnt[NDKMAP];
1923 * struct alts_ent *dk_slc_ent[NDKMAP];
1925 for (i
= 0; i
< NDKMAP
; i
++) {
1937 dkp
->dk_slc_cnt
[i
] = 0;
1939 continue; /* slice is not allocated */
1941 /* last block in slice */
1942 slce
= slcb
+ slcn
- 1;
1944 /* find first remap entry in after beginnning of slice */
1945 for (j
= 0; j
< altused
; j
++) {
1946 if (enttblp
[j
].bad_start
+ enttblp
[j
].bad_end
>= slcb
)
1949 dkp
->dk_slc_ent
[i
] = enttblp
+ j
;
1951 /* count remap entrys until end of slice */
1952 for (; j
< altused
&& enttblp
[j
].bad_start
<= slce
; j
++) {
1953 dkp
->dk_slc_cnt
[i
] += 1;
1957 rw_exit(&dkp
->dk_bbh_mutex
);
1961 rw_enter(&dkp
->dk_bbh_mutex
, RW_WRITER
);
1963 if (handle
&& handle
!= dkp
->dk_alts_hdl
)
1964 (void) dadk_iob_free(DKTP_DATA
, handle
);
1966 if (dkp
->dk_alts_hdl
) {
1967 (void) dadk_iob_free(DKTP_DATA
, dkp
->dk_alts_hdl
);
1968 dkp
->dk_alts_hdl
= NULL
;
1971 rw_exit(&dkp
->dk_bbh_mutex
);
1976 cmdk_bbh_htoc(opaque_t bbh_data
, opaque_t handle
)
1978 struct bbh_handle
*hp
;
1981 hp
= (struct bbh_handle
*)handle
;
1982 ckp
= hp
->h_cktab
+ hp
->h_idx
;
1989 cmdk_bbh_freehandle(opaque_t bbh_data
, opaque_t handle
)
1991 struct bbh_handle
*hp
;
1993 hp
= (struct bbh_handle
*)handle
;
1994 kmem_free(handle
, (sizeof (struct bbh_handle
) +
1995 (hp
->h_totck
* (sizeof (struct bbh_cookie
)))));
2000 * cmdk_bbh_gethandle remaps the bad sectors to alternates.
2001 * There are 7 different cases when the comparison is made
2002 * between the bad sector cluster and the disk section.
2004 * bad sector cluster gggggggggggbbbbbbbggggggggggg
2008 * case 4: dddddddddddd
2009 * case 5: ddddddd-----
2010 * case 6: ---ddddddd
2013 * where: g = good sector, b = bad sector
2014 * d = sector in disk section
2015 * - = disk section may be extended to cover those disk area
2019 cmdk_bbh_gethandle(opaque_t bbh_data
, struct buf
*bp
)
2021 struct cmdk
*dkp
= (struct cmdk
*)bbh_data
;
2022 struct bbh_handle
*hp
;
2023 struct bbh_cookie
*ckp
;
2024 struct alts_ent
*altp
;
2026 uint32_t part
= CMDKPART(bp
->b_edev
);
2033 if (part
>= V_NUMPAR
)
2037 * This if statement is atomic and it will succeed
2038 * if there are no bad blocks (almost always)
2040 * so this if is performed outside of the rw_enter for speed
2041 * and then repeated inside the rw_enter for safety
2043 if (!dkp
->dk_alts_hdl
) {
2047 rw_enter(&dkp
->dk_bbh_mutex
, RW_READER
);
2049 if (dkp
->dk_alts_hdl
== NULL
) {
2050 rw_exit(&dkp
->dk_bbh_mutex
);
2054 alts_used
= dkp
->dk_slc_cnt
[part
];
2055 if (alts_used
== 0) {
2056 rw_exit(&dkp
->dk_bbh_mutex
);
2059 altp
= dkp
->dk_slc_ent
[part
];
2062 * binary search for the largest bad sector index in the alternate
2063 * entry table which overlaps or larger than the starting d_sec
2065 i
= cmdk_bbh_bsearch(altp
, alts_used
, GET_BP_SEC(bp
));
2066 /* if starting sector is > the largest bad sector, return */
2068 rw_exit(&dkp
->dk_bbh_mutex
);
2071 /* i is the starting index. Set altp to the starting entry addr */
2074 d_count
= bp
->b_bcount
>> SCTRSHFT
;
2075 lastsec
= GET_BP_SEC(bp
) + d_count
- 1;
2077 /* calculate the number of bad sectors */
2078 for (idx
= i
, cnt
= 0; idx
< alts_used
; idx
++, altp
++, cnt
++) {
2079 if (lastsec
< altp
->bad_start
)
2084 rw_exit(&dkp
->dk_bbh_mutex
);
2088 /* calculate the maximum number of reserved cookies */
2092 /* allocate the handle */
2093 hp
= kmem_zalloc((sizeof (*hp
) + (cnt
* sizeof (*ckp
))), KM_SLEEP
);
2097 ckp
= hp
->h_cktab
= (struct bbh_cookie
*)(hp
+ 1);
2098 ckp
[0].ck_sector
= GET_BP_SEC(bp
);
2099 ckp
[0].ck_seclen
= d_count
;
2101 altp
= dkp
->dk_slc_ent
[part
];
2103 for (idx
= 0; i
< alts_used
; i
++, altp
++) {
2105 if (lastsec
< altp
->bad_start
)
2109 if (ckp
[idx
].ck_sector
> altp
->bad_end
)
2113 if ((ckp
[idx
].ck_sector
>= altp
->bad_start
) &&
2114 (lastsec
<= altp
->bad_end
)) {
2115 ckp
[idx
].ck_sector
= altp
->good_start
+
2116 ckp
[idx
].ck_sector
- altp
->bad_start
;
2120 /* at least one bad sector in our section. break it. */
2122 if ((lastsec
>= altp
->bad_start
) &&
2123 (lastsec
<= altp
->bad_end
)) {
2124 ckp
[idx
+1].ck_seclen
= lastsec
- altp
->bad_start
+ 1;
2125 ckp
[idx
].ck_seclen
-= ckp
[idx
+1].ck_seclen
;
2126 ckp
[idx
+1].ck_sector
= altp
->good_start
;
2130 if ((ckp
[idx
].ck_sector
<= altp
->bad_end
) &&
2131 (ckp
[idx
].ck_sector
>= altp
->bad_start
)) {
2132 ckp
[idx
+1].ck_seclen
= ckp
[idx
].ck_seclen
;
2133 ckp
[idx
].ck_seclen
= altp
->bad_end
-
2134 ckp
[idx
].ck_sector
+ 1;
2135 ckp
[idx
+1].ck_seclen
-= ckp
[idx
].ck_seclen
;
2136 ckp
[idx
].ck_sector
= altp
->good_start
+
2137 ckp
[idx
].ck_sector
- altp
->bad_start
;
2139 ckp
[idx
].ck_sector
= altp
->bad_end
+ 1;
2140 continue; /* check rest of section */
2144 ckp
[idx
].ck_seclen
= altp
->bad_start
- ckp
[idx
].ck_sector
;
2145 ckp
[idx
+1].ck_sector
= altp
->good_start
;
2146 ckp
[idx
+1].ck_seclen
= altp
->bad_end
- altp
->bad_start
+ 1;
2148 ckp
[idx
].ck_sector
= altp
->bad_end
+ 1;
2149 ckp
[idx
].ck_seclen
= lastsec
- altp
->bad_end
;
2152 rw_exit(&dkp
->dk_bbh_mutex
);
2153 return ((opaque_t
)hp
);
2157 cmdk_bbh_bsearch(struct alts_ent
*buf
, int cnt
, daddr32_t key
)
2167 ind
= 1; /* compiler complains about possible uninitialized var */
2168 for (i
= 1; i
<= cnt
; i
<<= 1)
2171 for (interval
= ind
; interval
; ) {
2172 if ((key
>= buf
[ind
-1].bad_start
) &&
2173 (key
<= buf
[ind
-1].bad_end
)) {
2177 if (key
< buf
[ind
-1].bad_start
) {
2178 /* record the largest bad sector index */
2182 ind
= ind
- interval
;
2185 * if key is larger than the last element
2188 if ((ind
== cnt
) || !interval
)
2190 if ((ind
+interval
) <= cnt
)