Merge illumos-gate
[unleashed.git] / usr / src / uts / intel / io / dktp / disk / cmdk.c
blob6388778d7fc27c017bc907adb180c2c8aee71e8c
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
30 #include <sys/scsi/scsi.h>
31 #include <sys/dktp/cm.h>
32 #include <sys/dktp/quetypes.h>
33 #include <sys/dktp/queue.h>
34 #include <sys/dktp/fctypes.h>
35 #include <sys/dktp/flowctrl.h>
36 #include <sys/dktp/cmdev.h>
37 #include <sys/dkio.h>
38 #include <sys/dktp/tgdk.h>
39 #include <sys/dktp/dadk.h>
40 #include <sys/dktp/bbh.h>
41 #include <sys/dktp/altsctr.h>
42 #include <sys/dktp/cmdk.h>
44 #include <sys/stat.h>
45 #include <sys/vtoc.h>
46 #include <sys/file.h>
47 #include <sys/dktp/dadkio.h>
48 #include <sys/aio_req.h>
50 #include <sys/cmlb.h>
53 * Local Static Data
55 #ifdef CMDK_DEBUG
56 #define DENT 0x0001
57 #define DIO 0x0002
59 static int cmdk_debug = DIO;
60 #endif
62 #ifndef TRUE
63 #define TRUE 1
64 #endif
66 #ifndef FALSE
67 #define FALSE 0
68 #endif
71 * NDKMAP is the base number for accessing the fdisk partitions.
72 * c?d?p0 --> cmdk@?,?:q
74 #define PARTITION0_INDEX (NDKMAP + 0)
76 #define DKTP_DATA (dkp->dk_tgobjp)->tg_data
77 #define DKTP_EXT (dkp->dk_tgobjp)->tg_ext
79 void *cmdk_state;
82 * the cmdk_attach_mutex protects cmdk_max_instance in multi-threaded
83 * attach situations
85 static kmutex_t cmdk_attach_mutex;
86 static int cmdk_max_instance = 0;
89 * Panic dumpsys state
90 * There is only a single flag that is not mutex locked since
91 * the system is prevented from thread switching and cmdk_dump
92 * will only be called in a single threaded operation.
94 static int cmdk_indump;
97 * Local Function Prototypes
99 static int cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp);
100 static void cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp);
101 static void cmdkmin(struct buf *bp);
102 static int cmdkrw(dev_t dev, struct uio *uio, int flag);
103 static int cmdkarw(dev_t dev, struct aio_req *aio, int flag);
106 * Bad Block Handling Functions Prototypes
108 static void cmdk_bbh_reopen(struct cmdk *dkp);
109 static opaque_t cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp);
110 static bbh_cookie_t cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle);
111 static void cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle);
112 static void cmdk_bbh_close(struct cmdk *dkp);
113 static void cmdk_bbh_setalts_idx(struct cmdk *dkp);
114 static int cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key);
116 static struct bbh_objops cmdk_bbh_ops = {
117 nulldev,
118 nulldev,
119 cmdk_bbh_gethandle,
120 cmdk_bbh_htoc,
121 cmdk_bbh_freehandle,
122 0, 0
125 static int cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp);
126 static int cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp);
127 static int cmdkstrategy(struct buf *bp);
128 static int cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
129 static int cmdkioctl(dev_t, int, intptr_t, int, cred_t *, int *);
130 static int cmdkread(dev_t dev, struct uio *uio, cred_t *credp);
131 static int cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp);
132 static int cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
133 int mod_flags, char *name, caddr_t valuep, int *lengthp);
134 static int cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp);
135 static int cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp);
138 * Device driver ops vector
141 static struct cb_ops cmdk_cb_ops = {
142 cmdkopen, /* open */
143 cmdkclose, /* close */
144 cmdkstrategy, /* strategy */
145 nodev, /* print */
146 cmdkdump, /* dump */
147 cmdkread, /* read */
148 cmdkwrite, /* write */
149 cmdkioctl, /* ioctl */
150 nodev, /* devmap */
151 nodev, /* mmap */
152 nodev, /* segmap */
153 nochpoll, /* poll */
154 cmdk_prop_op, /* cb_prop_op */
155 0, /* streamtab */
156 D_64BIT | D_MP | D_NEW, /* Driver comaptibility flag */
157 CB_REV, /* cb_rev */
158 cmdkaread, /* async read */
159 cmdkawrite /* async write */
162 static int cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
163 void **result);
164 static int cmdkprobe(dev_info_t *dip);
165 static int cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd);
166 static int cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd);
168 static void cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp);
169 static int cmdkresume(dev_info_t *dip);
170 static int cmdksuspend(dev_info_t *dip);
171 static int cmdkpower(dev_info_t *dip, int component, int level);
173 struct dev_ops cmdk_ops = {
174 DEVO_REV, /* devo_rev, */
175 0, /* refcnt */
176 cmdkinfo, /* info */
177 nulldev, /* identify */
178 cmdkprobe, /* probe */
179 cmdkattach, /* attach */
180 cmdkdetach, /* detach */
181 nodev, /* reset */
182 &cmdk_cb_ops, /* driver operations */
183 NULL, /* bus operations */
184 cmdkpower, /* power */
185 ddi_quiesce_not_needed, /* quiesce */
189 * This is the loadable module wrapper.
191 #include <sys/modctl.h>
193 static struct modldrv modldrv = {
194 &mod_driverops, /* Type of module. This one is a driver */
195 "Common Direct Access Disk",
196 &cmdk_ops, /* driver ops */
199 static struct modlinkage modlinkage = {
200 MODREV_1, (void *)&modldrv, NULL
203 /* Function prototypes for cmlb callbacks */
205 static int cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
206 diskaddr_t start, size_t length, void *tg_cookie);
208 static int cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg,
209 void *tg_cookie);
211 static void cmdk_devid_setup(struct cmdk *dkp);
212 static int cmdk_devid_modser(struct cmdk *dkp);
213 static int cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len);
214 static int cmdk_devid_fabricate(struct cmdk *dkp);
215 static int cmdk_devid_read(struct cmdk *dkp);
217 static cmlb_tg_ops_t cmdk_lb_ops = {
218 TG_DK_OPS_VERSION_1,
219 cmdk_lb_rdwr,
220 cmdk_lb_getinfo
223 static boolean_t
224 cmdk_isopen(struct cmdk *dkp, dev_t dev)
226 int part, otyp;
227 ulong_t partbit;
229 ASSERT(MUTEX_HELD((&dkp->dk_mutex)));
231 part = CMDKPART(dev);
232 partbit = 1 << part;
234 /* account for close */
235 if (dkp->dk_open_lyr[part] != 0)
236 return (B_TRUE);
237 for (otyp = 0; otyp < OTYPCNT; otyp++)
238 if (dkp->dk_open_reg[otyp] & partbit)
239 return (B_TRUE);
240 return (B_FALSE);
244 _init(void)
246 int rval;
248 if (rval = ddi_soft_state_init(&cmdk_state, sizeof (struct cmdk), 7))
249 return (rval);
251 mutex_init(&cmdk_attach_mutex, NULL, MUTEX_DRIVER, NULL);
252 if ((rval = mod_install(&modlinkage)) != 0) {
253 mutex_destroy(&cmdk_attach_mutex);
254 ddi_soft_state_fini(&cmdk_state);
256 return (rval);
260 _fini(void)
262 return (EBUSY);
266 _info(struct modinfo *modinfop)
268 return (mod_info(&modlinkage, modinfop));
272 * Autoconfiguration Routines
274 static int
275 cmdkprobe(dev_info_t *dip)
277 int instance;
278 int status;
279 struct cmdk *dkp;
281 instance = ddi_get_instance(dip);
283 if (ddi_get_soft_state(cmdk_state, instance))
284 return (DDI_PROBE_PARTIAL);
286 if (ddi_soft_state_zalloc(cmdk_state, instance) != DDI_SUCCESS)
287 return (DDI_PROBE_PARTIAL);
289 if ((dkp = ddi_get_soft_state(cmdk_state, instance)) == NULL)
290 return (DDI_PROBE_PARTIAL);
292 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
293 rw_init(&dkp->dk_bbh_mutex, NULL, RW_DRIVER, NULL);
294 dkp->dk_dip = dip;
295 mutex_enter(&dkp->dk_mutex);
297 dkp->dk_dev = makedevice(ddi_driver_major(dip),
298 ddi_get_instance(dip) << CMDK_UNITSHF);
300 /* linkage to dadk and strategy */
301 if (cmdk_create_obj(dip, dkp) != DDI_SUCCESS) {
302 mutex_exit(&dkp->dk_mutex);
303 mutex_destroy(&dkp->dk_mutex);
304 rw_destroy(&dkp->dk_bbh_mutex);
305 ddi_soft_state_free(cmdk_state, instance);
306 return (DDI_PROBE_PARTIAL);
309 status = dadk_probe(DKTP_DATA, KM_NOSLEEP);
310 if (status != DDI_PROBE_SUCCESS) {
311 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */
312 mutex_exit(&dkp->dk_mutex);
313 mutex_destroy(&dkp->dk_mutex);
314 rw_destroy(&dkp->dk_bbh_mutex);
315 ddi_soft_state_free(cmdk_state, instance);
316 return (status);
319 mutex_exit(&dkp->dk_mutex);
320 #ifdef CMDK_DEBUG
321 if (cmdk_debug & DENT)
322 PRF("cmdkprobe: instance= %d name= `%s`\n",
323 instance, ddi_get_name_addr(dip));
324 #endif
325 return (status);
328 static int
329 cmdkattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
331 int instance;
332 struct cmdk *dkp;
333 char *node_type;
335 switch (cmd) {
336 case DDI_ATTACH:
337 break;
338 case DDI_RESUME:
339 return (cmdkresume(dip));
340 default:
341 return (DDI_FAILURE);
344 instance = ddi_get_instance(dip);
345 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
346 return (DDI_FAILURE);
348 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
349 mutex_init(&dkp->dk_mutex, NULL, MUTEX_DRIVER, NULL);
351 mutex_enter(&dkp->dk_mutex);
353 /* dadk_attach is an empty function that only returns SUCCESS */
354 (void) dadk_attach(DKTP_DATA);
356 node_type = (DKTP_EXT->tg_nodetype);
359 * this open allows cmlb to read the device
360 * and determine the label types
361 * so that cmlb can create minor nodes for device
364 /* open the target disk */
365 if (dadk_open(DKTP_DATA, 0) != DDI_SUCCESS)
366 goto fail2;
368 #ifdef _ILP32
370 struct tgdk_geom phyg;
371 (void) dadk_getphygeom(DKTP_DATA, &phyg);
372 if ((phyg.g_cap - 1) > DK_MAX_BLOCKS) {
373 (void) dadk_close(DKTP_DATA);
374 goto fail2;
377 #endif
380 /* mark as having opened target */
381 dkp->dk_flag |= CMDK_TGDK_OPEN;
383 cmlb_alloc_handle((cmlb_handle_t *)&dkp->dk_cmlbhandle);
385 if (cmlb_attach(dip,
386 &cmdk_lb_ops,
387 DTYPE_DIRECT, /* device_type */
388 B_FALSE, /* removable */
389 B_FALSE, /* hot pluggable XXX */
390 node_type,
391 CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT, /* alter_behaviour */
392 dkp->dk_cmlbhandle,
393 0) != 0)
394 goto fail1;
396 /* Calling validate will create minor nodes according to disk label */
397 (void) cmlb_validate(dkp->dk_cmlbhandle, 0, 0);
399 /* set bbh (Bad Block Handling) */
400 cmdk_bbh_reopen(dkp);
402 /* setup devid string */
403 cmdk_devid_setup(dkp);
405 mutex_enter(&cmdk_attach_mutex);
406 if (instance > cmdk_max_instance)
407 cmdk_max_instance = instance;
408 mutex_exit(&cmdk_attach_mutex);
410 mutex_exit(&dkp->dk_mutex);
413 * Add a zero-length attribute to tell the world we support
414 * kernel ioctls (for layered drivers)
416 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
417 DDI_KERNEL_IOCTL, NULL, 0);
418 ddi_report_dev(dip);
421 * Initialize power management
423 mutex_init(&dkp->dk_pm_mutex, NULL, MUTEX_DRIVER, NULL);
424 cv_init(&dkp->dk_suspend_cv, NULL, CV_DRIVER, NULL);
425 cmdk_setup_pm(dip, dkp);
427 return (DDI_SUCCESS);
429 fail1:
430 cmlb_free_handle(&dkp->dk_cmlbhandle);
431 (void) dadk_close(DKTP_DATA);
432 fail2:
433 cmdk_destroy_obj(dip, dkp);
434 rw_destroy(&dkp->dk_bbh_mutex);
435 mutex_exit(&dkp->dk_mutex);
436 mutex_destroy(&dkp->dk_mutex);
437 ddi_soft_state_free(cmdk_state, instance);
438 return (DDI_FAILURE);
442 static int
443 cmdkdetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
445 struct cmdk *dkp;
446 int instance;
447 int max_instance;
449 switch (cmd) {
450 case DDI_DETACH:
451 /* return (DDI_FAILURE); */
452 break;
453 case DDI_SUSPEND:
454 return (cmdksuspend(dip));
455 default:
456 #ifdef CMDK_DEBUG
457 if (cmdk_debug & DIO) {
458 PRF("cmdkdetach: cmd = %d unknown\n", cmd);
460 #endif
461 return (DDI_FAILURE);
464 mutex_enter(&cmdk_attach_mutex);
465 max_instance = cmdk_max_instance;
466 mutex_exit(&cmdk_attach_mutex);
468 /* check if any instance of driver is open */
469 for (instance = 0; instance < max_instance; instance++) {
470 dkp = ddi_get_soft_state(cmdk_state, instance);
471 if (!dkp)
472 continue;
473 if (dkp->dk_flag & CMDK_OPEN)
474 return (DDI_FAILURE);
477 instance = ddi_get_instance(dip);
478 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
479 return (DDI_SUCCESS);
481 mutex_enter(&dkp->dk_mutex);
484 * The cmdk_part_info call at the end of cmdkattach may have
485 * caused cmdk_reopen to do a TGDK_OPEN, make sure we close on
486 * detach for case when cmdkopen/cmdkclose never occurs.
488 if (dkp->dk_flag & CMDK_TGDK_OPEN) {
489 dkp->dk_flag &= ~CMDK_TGDK_OPEN;
490 (void) dadk_close(DKTP_DATA);
493 cmlb_detach(dkp->dk_cmlbhandle, 0);
494 cmlb_free_handle(&dkp->dk_cmlbhandle);
495 ddi_prop_remove_all(dip);
497 cmdk_destroy_obj(dip, dkp); /* dadk/strategy linkage */
500 * free the devid structure if allocated before
502 if (dkp->dk_devid) {
503 ddi_devid_free(dkp->dk_devid);
504 dkp->dk_devid = NULL;
507 mutex_exit(&dkp->dk_mutex);
508 mutex_destroy(&dkp->dk_mutex);
509 rw_destroy(&dkp->dk_bbh_mutex);
510 mutex_destroy(&dkp->dk_pm_mutex);
511 cv_destroy(&dkp->dk_suspend_cv);
512 ddi_soft_state_free(cmdk_state, instance);
514 return (DDI_SUCCESS);
517 static int
518 cmdkinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
520 dev_t dev = (dev_t)arg;
521 int instance;
522 struct cmdk *dkp;
524 #ifdef CMDK_DEBUG
525 if (cmdk_debug & DENT)
526 PRF("cmdkinfo: call\n");
527 #endif
528 instance = CMDKUNIT(dev);
530 switch (infocmd) {
531 case DDI_INFO_DEVT2DEVINFO:
532 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
533 return (DDI_FAILURE);
534 *result = (void *) dkp->dk_dip;
535 break;
536 case DDI_INFO_DEVT2INSTANCE:
537 *result = (void *)(intptr_t)instance;
538 break;
539 default:
540 return (DDI_FAILURE);
542 return (DDI_SUCCESS);
546 * Initialize the power management components
548 static void
549 cmdk_setup_pm(dev_info_t *dip, struct cmdk *dkp)
551 char *pm_comp[] = { "NAME=cmdk", "0=off", "1=on", NULL };
554 * Since the cmdk device does not the 'reg' property,
555 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
556 * The following code is to tell cpr that this device
557 * DOES need to be suspended and resumed.
559 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
560 "pm-hardware-state", "needs-suspend-resume");
562 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
563 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) {
564 if (pm_raise_power(dip, 0, CMDK_SPINDLE_ON) == DDI_SUCCESS) {
565 mutex_enter(&dkp->dk_pm_mutex);
566 dkp->dk_pm_level = CMDK_SPINDLE_ON;
567 dkp->dk_pm_is_enabled = 1;
568 mutex_exit(&dkp->dk_pm_mutex);
569 } else {
570 mutex_enter(&dkp->dk_pm_mutex);
571 dkp->dk_pm_level = CMDK_SPINDLE_OFF;
572 dkp->dk_pm_is_enabled = 0;
573 mutex_exit(&dkp->dk_pm_mutex);
575 } else {
576 mutex_enter(&dkp->dk_pm_mutex);
577 dkp->dk_pm_level = CMDK_SPINDLE_UNINIT;
578 dkp->dk_pm_is_enabled = 0;
579 mutex_exit(&dkp->dk_pm_mutex);
584 * suspend routine, it will be run when get the command
585 * DDI_SUSPEND at detach(9E) from system power management
587 static int
588 cmdksuspend(dev_info_t *dip)
590 struct cmdk *dkp;
591 int instance;
592 clock_t count = 0;
594 instance = ddi_get_instance(dip);
595 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
596 return (DDI_FAILURE);
597 mutex_enter(&dkp->dk_mutex);
598 if (dkp->dk_flag & CMDK_SUSPEND) {
599 mutex_exit(&dkp->dk_mutex);
600 return (DDI_SUCCESS);
602 dkp->dk_flag |= CMDK_SUSPEND;
604 /* need to wait a while */
605 while (dadk_getcmds(DKTP_DATA) != 0) {
606 ddi_sleep(1);
607 if (count > 60) {
608 dkp->dk_flag &= ~CMDK_SUSPEND;
609 cv_broadcast(&dkp->dk_suspend_cv);
610 mutex_exit(&dkp->dk_mutex);
611 return (DDI_FAILURE);
613 count++;
615 mutex_exit(&dkp->dk_mutex);
616 return (DDI_SUCCESS);
620 * resume routine, it will be run when get the command
621 * DDI_RESUME at attach(9E) from system power management
623 static int
624 cmdkresume(dev_info_t *dip)
626 struct cmdk *dkp;
627 int instance;
629 instance = ddi_get_instance(dip);
630 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
631 return (DDI_FAILURE);
632 mutex_enter(&dkp->dk_mutex);
633 if (!(dkp->dk_flag & CMDK_SUSPEND)) {
634 mutex_exit(&dkp->dk_mutex);
635 return (DDI_FAILURE);
637 dkp->dk_pm_level = CMDK_SPINDLE_ON;
638 dkp->dk_flag &= ~CMDK_SUSPEND;
639 cv_broadcast(&dkp->dk_suspend_cv);
640 mutex_exit(&dkp->dk_mutex);
641 return (DDI_SUCCESS);
646 * power management entry point, it was used to
647 * change power management component.
648 * Actually, the real hard drive suspend/resume
649 * was handled in ata, so this function is not
650 * doing any real work other than verifying that
651 * the disk is idle.
653 static int
654 cmdkpower(dev_info_t *dip, int component, int level)
656 struct cmdk *dkp;
657 int instance;
659 instance = ddi_get_instance(dip);
660 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
661 component != 0 || level > CMDK_SPINDLE_ON ||
662 level < CMDK_SPINDLE_OFF) {
663 return (DDI_FAILURE);
666 mutex_enter(&dkp->dk_pm_mutex);
667 if (dkp->dk_pm_is_enabled && dkp->dk_pm_level == level) {
668 mutex_exit(&dkp->dk_pm_mutex);
669 return (DDI_SUCCESS);
671 mutex_exit(&dkp->dk_pm_mutex);
673 if ((level == CMDK_SPINDLE_OFF) &&
674 (dadk_getcmds(DKTP_DATA) != 0)) {
675 return (DDI_FAILURE);
678 mutex_enter(&dkp->dk_pm_mutex);
679 dkp->dk_pm_level = level;
680 mutex_exit(&dkp->dk_pm_mutex);
681 return (DDI_SUCCESS);
684 static int
685 cmdk_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
686 char *name, caddr_t valuep, int *lengthp)
688 struct cmdk *dkp;
690 #ifdef CMDK_DEBUG
691 if (cmdk_debug & DENT)
692 PRF("cmdk_prop_op: call\n");
693 #endif
695 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
696 if (dkp == NULL)
697 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
698 name, valuep, lengthp));
700 return (cmlb_prop_op(dkp->dk_cmlbhandle,
701 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
702 CMDKPART(dev), NULL));
706 * dump routine
708 static int
709 cmdkdump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
711 int instance;
712 struct cmdk *dkp;
713 diskaddr_t p_lblksrt;
714 diskaddr_t p_lblkcnt;
715 struct buf local;
716 struct buf *bp;
718 #ifdef CMDK_DEBUG
719 if (cmdk_debug & DENT)
720 PRF("cmdkdump: call\n");
721 #endif
722 instance = CMDKUNIT(dev);
723 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) || (blkno < 0))
724 return (ENXIO);
726 if (cmlb_partinfo(
727 dkp->dk_cmlbhandle,
728 CMDKPART(dev),
729 &p_lblkcnt,
730 &p_lblksrt,
731 NULL,
732 NULL,
733 0)) {
734 return (ENXIO);
737 if ((blkno+nblk) > p_lblkcnt)
738 return (EINVAL);
740 cmdk_indump = 1; /* Tell disk targets we are panic dumpping */
742 bp = &local;
743 bzero(bp, sizeof (*bp));
744 bp->b_flags = B_BUSY;
745 bp->b_un.b_addr = addr;
746 bp->b_bcount = nblk << SCTRSHFT;
747 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + blkno)));
749 (void) dadk_dump(DKTP_DATA, bp);
750 return (bp->b_error);
754 * Copy in the dadkio_rwcmd according to the user's data model. If needed,
755 * convert it for our internal use.
757 static int
758 rwcmd_copyin(struct dadkio_rwcmd *rwcmdp, caddr_t inaddr, int flag)
760 switch (ddi_model_convert_from(flag)) {
761 case DDI_MODEL_ILP32: {
762 struct dadkio_rwcmd32 cmd32;
764 if (ddi_copyin(inaddr, &cmd32,
765 sizeof (struct dadkio_rwcmd32), flag)) {
766 return (EFAULT);
769 rwcmdp->cmd = cmd32.cmd;
770 rwcmdp->flags = cmd32.flags;
771 rwcmdp->blkaddr = (blkaddr_t)cmd32.blkaddr;
772 rwcmdp->buflen = cmd32.buflen;
773 rwcmdp->bufaddr = (caddr_t)(intptr_t)cmd32.bufaddr;
775 * Note: we do not convert the 'status' field,
776 * as it should not contain valid data at this
777 * point.
779 bzero(&rwcmdp->status, sizeof (rwcmdp->status));
780 break;
782 case DDI_MODEL_NONE: {
783 if (ddi_copyin(inaddr, rwcmdp,
784 sizeof (struct dadkio_rwcmd), flag)) {
785 return (EFAULT);
789 return (0);
793 * If necessary, convert the internal rwcmdp and status to the appropriate
794 * data model and copy it out to the user.
796 static int
797 rwcmd_copyout(struct dadkio_rwcmd *rwcmdp, caddr_t outaddr, int flag)
799 switch (ddi_model_convert_from(flag)) {
800 case DDI_MODEL_ILP32: {
801 struct dadkio_rwcmd32 cmd32;
803 cmd32.cmd = rwcmdp->cmd;
804 cmd32.flags = rwcmdp->flags;
805 cmd32.blkaddr = rwcmdp->blkaddr;
806 cmd32.buflen = rwcmdp->buflen;
807 ASSERT64(((uintptr_t)rwcmdp->bufaddr >> 32) == 0);
808 cmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmdp->bufaddr;
810 cmd32.status.status = rwcmdp->status.status;
811 cmd32.status.resid = rwcmdp->status.resid;
812 cmd32.status.failed_blk_is_valid =
813 rwcmdp->status.failed_blk_is_valid;
814 cmd32.status.failed_blk = rwcmdp->status.failed_blk;
815 cmd32.status.fru_code_is_valid =
816 rwcmdp->status.fru_code_is_valid;
817 cmd32.status.fru_code = rwcmdp->status.fru_code;
819 bcopy(rwcmdp->status.add_error_info,
820 cmd32.status.add_error_info, DADKIO_ERROR_INFO_LEN);
822 if (ddi_copyout(&cmd32, outaddr,
823 sizeof (struct dadkio_rwcmd32), flag))
824 return (EFAULT);
825 break;
827 case DDI_MODEL_NONE: {
828 if (ddi_copyout(rwcmdp, outaddr,
829 sizeof (struct dadkio_rwcmd), flag))
830 return (EFAULT);
833 return (0);
837 * ioctl routine
839 static int
840 cmdkioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *credp, int *rvalp)
842 int instance;
843 struct scsi_device *devp;
844 struct cmdk *dkp;
845 char data[NBPSCTR];
847 instance = CMDKUNIT(dev);
848 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
849 return (ENXIO);
851 mutex_enter(&dkp->dk_mutex);
852 while (dkp->dk_flag & CMDK_SUSPEND) {
853 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
855 mutex_exit(&dkp->dk_mutex);
857 bzero(data, sizeof (data));
859 switch (cmd) {
861 case DKIOCGMEDIAINFO: {
862 struct dk_minfo media_info;
863 struct tgdk_geom phyg;
865 /* dadk_getphygeom always returns success */
866 (void) dadk_getphygeom(DKTP_DATA, &phyg);
868 media_info.dki_lbsize = phyg.g_secsiz;
869 media_info.dki_capacity = phyg.g_cap;
870 media_info.dki_media_type = DK_FIXED_DISK;
872 if (ddi_copyout(&media_info, (void *)arg,
873 sizeof (struct dk_minfo), flag)) {
874 return (EFAULT);
875 } else {
876 return (0);
880 case DKIOCINFO: {
881 struct dk_cinfo *info = (struct dk_cinfo *)data;
883 /* controller information */
884 info->dki_ctype = (DKTP_EXT->tg_ctype);
885 info->dki_cnum = ddi_get_instance(ddi_get_parent(dkp->dk_dip));
886 (void) strcpy(info->dki_cname,
887 ddi_get_name(ddi_get_parent(dkp->dk_dip)));
889 /* Unit Information */
890 info->dki_unit = ddi_get_instance(dkp->dk_dip);
891 devp = ddi_get_driver_private(dkp->dk_dip);
892 info->dki_slave = (CMDEV_TARG(devp)<<3) | CMDEV_LUN(devp);
893 (void) strcpy(info->dki_dname, ddi_driver_name(dkp->dk_dip));
894 info->dki_flags = DKI_FMTVOL;
895 info->dki_partition = CMDKPART(dev);
897 info->dki_maxtransfer = maxphys / DEV_BSIZE;
898 info->dki_addr = 1;
899 info->dki_space = 0;
900 info->dki_prio = 0;
901 info->dki_vec = 0;
903 if (ddi_copyout(data, (void *)arg, sizeof (*info), flag))
904 return (EFAULT);
905 else
906 return (0);
909 case DKIOCSTATE: {
910 int state;
911 int rval;
912 diskaddr_t p_lblksrt;
913 diskaddr_t p_lblkcnt;
915 if (ddi_copyin((void *)arg, &state, sizeof (int), flag))
916 return (EFAULT);
918 /* dadk_check_media blocks until state changes */
919 if (rval = dadk_check_media(DKTP_DATA, &state))
920 return (rval);
922 if (state == DKIO_INSERTED) {
924 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0)
925 return (ENXIO);
927 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(dev),
928 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0))
929 return (ENXIO);
931 if (p_lblkcnt <= 0)
932 return (ENXIO);
935 if (ddi_copyout(&state, (caddr_t)arg, sizeof (int), flag))
936 return (EFAULT);
938 return (0);
942 * is media removable?
944 case DKIOCREMOVABLE: {
945 int i;
947 i = (DKTP_EXT->tg_rmb) ? 1 : 0;
949 if (ddi_copyout(&i, (caddr_t)arg, sizeof (int), flag))
950 return (EFAULT);
952 return (0);
955 case DKIOCADDBAD:
957 * This is not an update mechanism to add bad blocks
958 * to the bad block structures stored on disk.
961 /* start BBH */
962 cmdk_bbh_reopen(dkp);
963 return (0);
965 case DKIOCG_PHYGEOM:
966 case DKIOCG_VIRTGEOM:
967 case DKIOCGGEOM:
968 case DKIOCSGEOM:
969 case DKIOCGAPART:
970 case DKIOCSAPART:
971 case DKIOCGVTOC:
972 case DKIOCSVTOC:
973 case DKIOCPARTINFO:
974 case DKIOCGEXTVTOC:
975 case DKIOCSEXTVTOC:
976 case DKIOCEXTPARTINFO:
977 case DKIOCGMBOOT:
978 case DKIOCSMBOOT:
979 case DKIOCGETEFI:
980 case DKIOCSETEFI:
981 case DKIOCPARTITION:
982 case DKIOCSETEXTPART:
984 int rc;
986 rc = cmlb_ioctl(dkp->dk_cmlbhandle, dev, cmd, arg, flag,
987 credp, rvalp, 0);
988 if (cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC)
989 cmdk_devid_setup(dkp);
990 return (rc);
993 case DIOCTL_RWCMD: {
994 struct dadkio_rwcmd *rwcmdp;
995 int status;
997 rwcmdp = kmem_alloc(sizeof (struct dadkio_rwcmd), KM_SLEEP);
999 status = rwcmd_copyin(rwcmdp, (caddr_t)arg, flag);
1001 if (status == 0) {
1002 bzero(&(rwcmdp->status), sizeof (struct dadkio_status));
1003 status = dadk_ioctl(DKTP_DATA,
1004 dev,
1005 cmd,
1006 (uintptr_t)rwcmdp,
1007 flag,
1008 credp,
1009 rvalp);
1011 if (status == 0)
1012 status = rwcmd_copyout(rwcmdp, (caddr_t)arg, flag);
1014 kmem_free(rwcmdp, sizeof (struct dadkio_rwcmd));
1015 return (status);
1018 default:
1019 return (dadk_ioctl(DKTP_DATA,
1020 dev,
1021 cmd,
1022 arg,
1023 flag,
1024 credp,
1025 rvalp));
1029 /*ARGSUSED1*/
1030 static int
1031 cmdkclose(dev_t dev, int flag, int otyp, cred_t *credp)
1033 int part;
1034 ulong_t partbit;
1035 int instance;
1036 struct cmdk *dkp;
1037 int lastclose = 1;
1038 int i;
1040 instance = CMDKUNIT(dev);
1041 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1042 (otyp >= OTYPCNT))
1043 return (ENXIO);
1045 mutex_enter(&dkp->dk_mutex);
1047 /* check if device has been opened */
1048 ASSERT(cmdk_isopen(dkp, dev));
1049 if (!(dkp->dk_flag & CMDK_OPEN)) {
1050 mutex_exit(&dkp->dk_mutex);
1051 return (ENXIO);
1054 while (dkp->dk_flag & CMDK_SUSPEND) {
1055 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1058 part = CMDKPART(dev);
1059 partbit = 1 << part;
1061 /* account for close */
1062 if (otyp == OTYP_LYR) {
1063 ASSERT(dkp->dk_open_lyr[part] > 0);
1064 if (dkp->dk_open_lyr[part])
1065 dkp->dk_open_lyr[part]--;
1066 } else {
1067 ASSERT((dkp->dk_open_reg[otyp] & partbit) != 0);
1068 dkp->dk_open_reg[otyp] &= ~partbit;
1070 dkp->dk_open_exl &= ~partbit;
1072 for (i = 0; i < CMDK_MAXPART; i++)
1073 if (dkp->dk_open_lyr[i] != 0) {
1074 lastclose = 0;
1075 break;
1078 if (lastclose)
1079 for (i = 0; i < OTYPCNT; i++)
1080 if (dkp->dk_open_reg[i] != 0) {
1081 lastclose = 0;
1082 break;
1085 mutex_exit(&dkp->dk_mutex);
1087 if (lastclose)
1088 cmlb_invalidate(dkp->dk_cmlbhandle, 0);
1090 return (DDI_SUCCESS);
1093 /*ARGSUSED3*/
1094 static int
1095 cmdkopen(dev_t *dev_p, int flag, int otyp, cred_t *credp)
1097 dev_t dev = *dev_p;
1098 int part;
1099 ulong_t partbit;
1100 int instance;
1101 struct cmdk *dkp;
1102 diskaddr_t p_lblksrt;
1103 diskaddr_t p_lblkcnt;
1104 int i;
1105 int nodelay;
1107 instance = CMDKUNIT(dev);
1108 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1109 return (ENXIO);
1111 if (otyp >= OTYPCNT)
1112 return (EINVAL);
1114 mutex_enter(&dkp->dk_mutex);
1115 while (dkp->dk_flag & CMDK_SUSPEND) {
1116 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1118 mutex_exit(&dkp->dk_mutex);
1120 part = CMDKPART(dev);
1121 partbit = 1 << part;
1122 nodelay = (flag & (FNDELAY | FNONBLOCK));
1124 mutex_enter(&dkp->dk_mutex);
1126 if (cmlb_validate(dkp->dk_cmlbhandle, 0, 0) != 0) {
1128 /* fail if not doing non block open */
1129 if (!nodelay) {
1130 mutex_exit(&dkp->dk_mutex);
1131 return (ENXIO);
1133 } else if (cmlb_partinfo(dkp->dk_cmlbhandle, part, &p_lblkcnt,
1134 &p_lblksrt, NULL, NULL, 0) == 0) {
1136 if (p_lblkcnt <= 0 && (!nodelay || otyp != OTYP_CHR)) {
1137 mutex_exit(&dkp->dk_mutex);
1138 return (ENXIO);
1140 } else {
1141 /* fail if not doing non block open */
1142 if (!nodelay) {
1143 mutex_exit(&dkp->dk_mutex);
1144 return (ENXIO);
1148 if ((DKTP_EXT->tg_rdonly) && (flag & FWRITE)) {
1149 mutex_exit(&dkp->dk_mutex);
1150 return (EROFS);
1153 /* check for part already opend exclusively */
1154 if (dkp->dk_open_exl & partbit)
1155 goto excl_open_fail;
1157 /* check if we can establish exclusive open */
1158 if (flag & FEXCL) {
1159 if (dkp->dk_open_lyr[part])
1160 goto excl_open_fail;
1161 for (i = 0; i < OTYPCNT; i++) {
1162 if (dkp->dk_open_reg[i] & partbit)
1163 goto excl_open_fail;
1167 /* open will succeed, account for open */
1168 dkp->dk_flag |= CMDK_OPEN;
1169 if (otyp == OTYP_LYR)
1170 dkp->dk_open_lyr[part]++;
1171 else
1172 dkp->dk_open_reg[otyp] |= partbit;
1173 if (flag & FEXCL)
1174 dkp->dk_open_exl |= partbit;
1176 mutex_exit(&dkp->dk_mutex);
1177 return (DDI_SUCCESS);
1179 excl_open_fail:
1180 mutex_exit(&dkp->dk_mutex);
1181 return (EBUSY);
1185 * read routine
1187 /*ARGSUSED2*/
1188 static int
1189 cmdkread(dev_t dev, struct uio *uio, cred_t *credp)
1191 return (cmdkrw(dev, uio, B_READ));
1195 * async read routine
1197 /*ARGSUSED2*/
1198 static int
1199 cmdkaread(dev_t dev, struct aio_req *aio, cred_t *credp)
1201 return (cmdkarw(dev, aio, B_READ));
1205 * write routine
1207 /*ARGSUSED2*/
1208 static int
1209 cmdkwrite(dev_t dev, struct uio *uio, cred_t *credp)
1211 return (cmdkrw(dev, uio, B_WRITE));
1215 * async write routine
1217 /*ARGSUSED2*/
1218 static int
1219 cmdkawrite(dev_t dev, struct aio_req *aio, cred_t *credp)
1221 return (cmdkarw(dev, aio, B_WRITE));
1224 static void
1225 cmdkmin(struct buf *bp)
1227 if (bp->b_bcount > DK_MAXRECSIZE)
1228 bp->b_bcount = DK_MAXRECSIZE;
1231 static int
1232 cmdkrw(dev_t dev, struct uio *uio, int flag)
1234 int instance;
1235 struct cmdk *dkp;
1237 instance = CMDKUNIT(dev);
1238 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1239 return (ENXIO);
1241 mutex_enter(&dkp->dk_mutex);
1242 while (dkp->dk_flag & CMDK_SUSPEND) {
1243 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1245 mutex_exit(&dkp->dk_mutex);
1247 return (physio(cmdkstrategy, NULL, dev, flag, cmdkmin, uio));
1250 static int
1251 cmdkarw(dev_t dev, struct aio_req *aio, int flag)
1253 int instance;
1254 struct cmdk *dkp;
1256 instance = CMDKUNIT(dev);
1257 if (!(dkp = ddi_get_soft_state(cmdk_state, instance)))
1258 return (ENXIO);
1260 mutex_enter(&dkp->dk_mutex);
1261 while (dkp->dk_flag & CMDK_SUSPEND) {
1262 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1264 mutex_exit(&dkp->dk_mutex);
1266 return (aphysio(cmdkstrategy, anocancel, dev, flag, cmdkmin, aio));
1270 * strategy routine
1272 static int
1273 cmdkstrategy(struct buf *bp)
1275 int instance;
1276 struct cmdk *dkp;
1277 long d_cnt;
1278 diskaddr_t p_lblksrt;
1279 diskaddr_t p_lblkcnt;
1281 instance = CMDKUNIT(bp->b_edev);
1282 if (cmdk_indump || !(dkp = ddi_get_soft_state(cmdk_state, instance)) ||
1283 (dkblock(bp) < 0)) {
1284 bp->b_resid = bp->b_bcount;
1285 SETBPERR(bp, ENXIO);
1286 biodone(bp);
1287 return (0);
1290 mutex_enter(&dkp->dk_mutex);
1291 ASSERT(cmdk_isopen(dkp, bp->b_edev));
1292 while (dkp->dk_flag & CMDK_SUSPEND) {
1293 cv_wait(&dkp->dk_suspend_cv, &dkp->dk_mutex);
1295 mutex_exit(&dkp->dk_mutex);
1297 bp->b_flags &= ~(B_DONE|B_ERROR);
1298 bp->b_resid = 0;
1299 bp->av_back = NULL;
1302 * only re-read the vtoc if necessary (force == FALSE)
1304 if (cmlb_partinfo(dkp->dk_cmlbhandle, CMDKPART(bp->b_edev),
1305 &p_lblkcnt, &p_lblksrt, NULL, NULL, 0)) {
1306 SETBPERR(bp, ENXIO);
1309 if ((bp->b_bcount & (NBPSCTR-1)) || (dkblock(bp) > p_lblkcnt))
1310 SETBPERR(bp, ENXIO);
1312 if ((bp->b_flags & B_ERROR) || (dkblock(bp) == p_lblkcnt)) {
1313 bp->b_resid = bp->b_bcount;
1314 biodone(bp);
1315 return (0);
1318 d_cnt = bp->b_bcount >> SCTRSHFT;
1319 if ((dkblock(bp) + d_cnt) > p_lblkcnt) {
1320 bp->b_resid = ((dkblock(bp) + d_cnt) - p_lblkcnt) << SCTRSHFT;
1321 bp->b_bcount -= bp->b_resid;
1324 SET_BP_SEC(bp, ((ulong_t)(p_lblksrt + dkblock(bp))));
1325 if (dadk_strategy(DKTP_DATA, bp) != DDI_SUCCESS) {
1326 bp->b_resid += bp->b_bcount;
1327 biodone(bp);
1329 return (0);
1332 static int
1333 cmdk_create_obj(dev_info_t *dip, struct cmdk *dkp)
1335 struct scsi_device *devp;
1336 opaque_t queobjp = NULL;
1337 opaque_t flcobjp = NULL;
1338 char que_keyvalp[64];
1339 int que_keylen;
1340 char flc_keyvalp[64];
1341 int flc_keylen;
1343 ASSERT(mutex_owned(&dkp->dk_mutex));
1345 /* Create linkage to queueing routines based on property */
1346 que_keylen = sizeof (que_keyvalp);
1347 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1348 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1349 DDI_PROP_SUCCESS) {
1350 cmn_err(CE_WARN, "cmdk_create_obj: queue property undefined");
1351 return (DDI_FAILURE);
1353 que_keyvalp[que_keylen] = '\0';
1355 if (strcmp(que_keyvalp, "qfifo") == 0) {
1356 queobjp = (opaque_t)qfifo_create();
1357 } else if (strcmp(que_keyvalp, "qsort") == 0) {
1358 queobjp = (opaque_t)qsort_create();
1359 } else {
1360 return (DDI_FAILURE);
1363 /* Create linkage to dequeueing routines based on property */
1364 flc_keylen = sizeof (flc_keyvalp);
1365 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1366 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1367 DDI_PROP_SUCCESS) {
1368 cmn_err(CE_WARN,
1369 "cmdk_create_obj: flow-control property undefined");
1370 return (DDI_FAILURE);
1373 flc_keyvalp[flc_keylen] = '\0';
1375 if (strcmp(flc_keyvalp, "dsngl") == 0) {
1376 flcobjp = (opaque_t)dsngl_create();
1377 } else if (strcmp(flc_keyvalp, "dmult") == 0) {
1378 flcobjp = (opaque_t)dmult_create();
1379 } else {
1380 return (DDI_FAILURE);
1383 /* populate bbh_obj object stored in dkp */
1384 dkp->dk_bbh_obj.bbh_data = dkp;
1385 dkp->dk_bbh_obj.bbh_ops = &cmdk_bbh_ops;
1387 /* create linkage to dadk */
1388 dkp->dk_tgobjp = (opaque_t)dadk_create();
1390 devp = ddi_get_driver_private(dip);
1391 (void) dadk_init(DKTP_DATA, devp, flcobjp, queobjp, &dkp->dk_bbh_obj,
1392 NULL);
1394 return (DDI_SUCCESS);
1397 static void
1398 cmdk_destroy_obj(dev_info_t *dip, struct cmdk *dkp)
1400 char que_keyvalp[64];
1401 int que_keylen;
1402 char flc_keyvalp[64];
1403 int flc_keylen;
1405 ASSERT(mutex_owned(&dkp->dk_mutex));
1407 (void) dadk_free((dkp->dk_tgobjp));
1408 dkp->dk_tgobjp = NULL;
1410 que_keylen = sizeof (que_keyvalp);
1411 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1412 DDI_PROP_CANSLEEP, "queue", que_keyvalp, &que_keylen) !=
1413 DDI_PROP_SUCCESS) {
1414 cmn_err(CE_WARN, "cmdk_destroy_obj: queue property undefined");
1415 return;
1417 que_keyvalp[que_keylen] = '\0';
1419 flc_keylen = sizeof (flc_keyvalp);
1420 if (ddi_prop_op(DDI_DEV_T_NONE, dip, PROP_LEN_AND_VAL_BUF,
1421 DDI_PROP_CANSLEEP, "flow_control", flc_keyvalp, &flc_keylen) !=
1422 DDI_PROP_SUCCESS) {
1423 cmn_err(CE_WARN,
1424 "cmdk_destroy_obj: flow-control property undefined");
1425 return;
1427 flc_keyvalp[flc_keylen] = '\0';
1429 /*ARGSUSED5*/
1430 static int
1431 cmdk_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufaddr,
1432 diskaddr_t start, size_t count, void *tg_cookie)
1434 struct cmdk *dkp;
1435 opaque_t handle;
1436 int rc = 0;
1437 char *bufa;
1438 size_t buflen;
1440 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1441 if (dkp == NULL)
1442 return (ENXIO);
1444 if (cmd != TG_READ && cmd != TG_WRITE)
1445 return (EINVAL);
1447 /* buflen must be multiple of 512 */
1448 buflen = (count + NBPSCTR - 1) & -NBPSCTR;
1449 handle = dadk_iob_alloc(DKTP_DATA, start, buflen, KM_SLEEP);
1450 if (!handle)
1451 return (ENOMEM);
1453 if (cmd == TG_READ) {
1454 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1455 if (!bufa)
1456 rc = EIO;
1457 else
1458 bcopy(bufa, bufaddr, count);
1459 } else {
1460 bufa = dadk_iob_htoc(DKTP_DATA, handle);
1461 bcopy(bufaddr, bufa, count);
1462 bufa = dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1463 if (!bufa)
1464 rc = EIO;
1466 (void) dadk_iob_free(DKTP_DATA, handle);
1468 return (rc);
1471 /*ARGSUSED3*/
1472 static int
1473 cmdk_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
1476 struct cmdk *dkp;
1477 struct tgdk_geom phyg;
1480 dkp = ddi_get_soft_state(cmdk_state, ddi_get_instance(dip));
1481 if (dkp == NULL)
1482 return (ENXIO);
1484 switch (cmd) {
1485 case TG_GETPHYGEOM: {
1486 cmlb_geom_t *phygeomp = (cmlb_geom_t *)arg;
1488 /* dadk_getphygeom always returns success */
1489 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1491 phygeomp->g_capacity = phyg.g_cap;
1492 phygeomp->g_nsect = phyg.g_sec;
1493 phygeomp->g_nhead = phyg.g_head;
1494 phygeomp->g_acyl = phyg.g_acyl;
1495 phygeomp->g_ncyl = phyg.g_cyl;
1496 phygeomp->g_secsize = phyg.g_secsiz;
1497 phygeomp->g_intrlv = 1;
1498 phygeomp->g_rpm = 3600;
1500 return (0);
1503 case TG_GETVIRTGEOM: {
1504 cmlb_geom_t *virtgeomp = (cmlb_geom_t *)arg;
1505 diskaddr_t capacity;
1507 (void) dadk_getgeom(DKTP_DATA, &phyg);
1508 capacity = phyg.g_cap;
1511 * If the controller returned us something that doesn't
1512 * really fit into an Int 13/function 8 geometry
1513 * result, just fail the ioctl. See PSARC 1998/313.
1515 if (capacity < 0 || capacity >= 63 * 254 * 1024)
1516 return (EINVAL);
1518 virtgeomp->g_capacity = capacity;
1519 virtgeomp->g_nsect = 63;
1520 virtgeomp->g_nhead = 254;
1521 virtgeomp->g_ncyl = capacity / (63 * 254);
1522 virtgeomp->g_acyl = 0;
1523 virtgeomp->g_secsize = 512;
1524 virtgeomp->g_intrlv = 1;
1525 virtgeomp->g_rpm = 3600;
1527 return (0);
1530 case TG_GETCAPACITY:
1531 case TG_GETBLOCKSIZE:
1534 /* dadk_getphygeom always returns success */
1535 (void) dadk_getphygeom(DKTP_DATA, &phyg);
1536 if (cmd == TG_GETCAPACITY)
1537 *(diskaddr_t *)arg = phyg.g_cap;
1538 else
1539 *(uint32_t *)arg = (uint32_t)phyg.g_secsiz;
1541 return (0);
1544 case TG_GETATTR: {
1545 tg_attribute_t *tgattribute = (tg_attribute_t *)arg;
1546 if ((DKTP_EXT->tg_rdonly))
1547 tgattribute->media_is_writable = FALSE;
1548 else
1549 tgattribute->media_is_writable = TRUE;
1550 tgattribute->media_is_rotational = TRUE;
1552 return (0);
1555 default:
1556 return (ENOTTY);
1565 * Create and register the devid.
1566 * There are 4 different ways we can get a device id:
1567 * 1. Already have one - nothing to do
1568 * 2. Build one from the drive's model and serial numbers
1569 * 3. Read one from the disk (first sector of last track)
1570 * 4. Fabricate one and write it on the disk.
1571 * If any of these succeeds, register the deviceid
1573 static void
1574 cmdk_devid_setup(struct cmdk *dkp)
1576 int rc;
1578 /* Try options until one succeeds, or all have failed */
1580 /* 1. All done if already registered */
1581 if (dkp->dk_devid != NULL)
1582 return;
1584 /* 2. Build a devid from the model and serial number */
1585 rc = cmdk_devid_modser(dkp);
1586 if (rc != DDI_SUCCESS) {
1587 /* 3. Read devid from the disk, if present */
1588 rc = cmdk_devid_read(dkp);
1590 /* 4. otherwise make one up and write it on the disk */
1591 if (rc != DDI_SUCCESS)
1592 rc = cmdk_devid_fabricate(dkp);
1595 /* If we managed to get a devid any of the above ways, register it */
1596 if (rc == DDI_SUCCESS)
1597 (void) ddi_devid_register(dkp->dk_dip, dkp->dk_devid);
1602 * Build a devid from the model and serial number
1603 * Return DDI_SUCCESS or DDI_FAILURE.
1605 static int
1606 cmdk_devid_modser(struct cmdk *dkp)
1608 int rc = DDI_FAILURE;
1609 char *hwid;
1610 int modlen;
1611 int serlen;
1614 * device ID is a concatenation of model number, '=', serial number.
1616 hwid = kmem_alloc(CMDK_HWIDLEN, KM_SLEEP);
1617 modlen = cmdk_get_modser(dkp, DIOCTL_GETMODEL, hwid, CMDK_HWIDLEN);
1618 if (modlen == 0) {
1619 rc = DDI_FAILURE;
1620 goto err;
1622 hwid[modlen++] = '=';
1623 serlen = cmdk_get_modser(dkp, DIOCTL_GETSERIAL,
1624 hwid + modlen, CMDK_HWIDLEN - modlen);
1625 if (serlen == 0) {
1626 rc = DDI_FAILURE;
1627 goto err;
1629 hwid[modlen + serlen] = 0;
1631 /* Initialize the device ID, trailing NULL not included */
1632 rc = ddi_devid_init(dkp->dk_dip, DEVID_ATA_SERIAL, modlen + serlen,
1633 hwid, &dkp->dk_devid);
1634 if (rc != DDI_SUCCESS) {
1635 rc = DDI_FAILURE;
1636 goto err;
1639 rc = DDI_SUCCESS;
1641 err:
1642 kmem_free(hwid, CMDK_HWIDLEN);
1643 return (rc);
1646 static int
1647 cmdk_get_modser(struct cmdk *dkp, int ioccmd, char *buf, int len)
1649 dadk_ioc_string_t strarg;
1650 int rval;
1651 char *s;
1652 char ch;
1653 boolean_t ret;
1654 int i;
1655 int tb;
1657 strarg.is_buf = buf;
1658 strarg.is_size = len;
1659 if (dadk_ioctl(DKTP_DATA,
1660 dkp->dk_dev,
1661 ioccmd,
1662 (uintptr_t)&strarg,
1663 FNATIVE | FKIOCTL,
1664 NULL,
1665 &rval) != 0)
1666 return (0);
1669 * valid model/serial string must contain a non-zero non-space
1670 * trim trailing spaces/NULL
1672 ret = B_FALSE;
1673 s = buf;
1674 for (i = 0; i < strarg.is_size; i++) {
1675 ch = *s++;
1676 if (ch != ' ' && ch != '\0')
1677 tb = i + 1;
1678 if (ch != ' ' && ch != '\0' && ch != '0')
1679 ret = B_TRUE;
1682 if (ret == B_FALSE)
1683 return (0);
1685 return (tb);
1689 * Read a devid from on the first block of the last track of
1690 * the last cylinder. Make sure what we read is a valid devid.
1691 * Return DDI_SUCCESS or DDI_FAILURE.
1693 static int
1694 cmdk_devid_read(struct cmdk *dkp)
1696 diskaddr_t blk;
1697 struct dk_devid *dkdevidp;
1698 uint_t *ip;
1699 int chksum;
1700 int i, sz;
1701 tgdk_iob_handle handle = NULL;
1702 int rc = DDI_FAILURE;
1704 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0))
1705 goto err;
1707 /* read the devid */
1708 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1709 if (handle == NULL)
1710 goto err;
1712 dkdevidp = (struct dk_devid *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1713 if (dkdevidp == NULL)
1714 goto err;
1716 /* Validate the revision */
1717 if ((dkdevidp->dkd_rev_hi != DK_DEVID_REV_MSB) ||
1718 (dkdevidp->dkd_rev_lo != DK_DEVID_REV_LSB))
1719 goto err;
1721 /* Calculate the checksum */
1722 chksum = 0;
1723 ip = (uint_t *)dkdevidp;
1724 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1725 chksum ^= ip[i];
1726 if (DKD_GETCHKSUM(dkdevidp) != chksum)
1727 goto err;
1729 /* Validate the device id */
1730 if (ddi_devid_valid((ddi_devid_t)dkdevidp->dkd_devid) != DDI_SUCCESS)
1731 goto err;
1733 /* keep a copy of the device id */
1734 sz = ddi_devid_sizeof((ddi_devid_t)dkdevidp->dkd_devid);
1735 dkp->dk_devid = kmem_alloc(sz, KM_SLEEP);
1736 bcopy(dkdevidp->dkd_devid, dkp->dk_devid, sz);
1738 rc = DDI_SUCCESS;
1740 err:
1741 if (handle != NULL)
1742 (void) dadk_iob_free(DKTP_DATA, handle);
1743 return (rc);
1747 * Create a devid and write it on the first block of the last track of
1748 * the last cylinder.
1749 * Return DDI_SUCCESS or DDI_FAILURE.
1751 static int
1752 cmdk_devid_fabricate(struct cmdk *dkp)
1754 ddi_devid_t devid = NULL; /* devid made by ddi_devid_init */
1755 struct dk_devid *dkdevidp; /* devid struct stored on disk */
1756 diskaddr_t blk;
1757 tgdk_iob_handle handle = NULL;
1758 uint_t *ip, chksum;
1759 int i;
1760 int rc = DDI_FAILURE;
1762 if (ddi_devid_init(dkp->dk_dip, DEVID_FAB, 0, NULL, &devid) !=
1763 DDI_SUCCESS)
1764 goto err;
1766 if (cmlb_get_devid_block(dkp->dk_cmlbhandle, &blk, 0)) {
1767 /* no device id block address */
1768 goto err;
1771 handle = dadk_iob_alloc(DKTP_DATA, blk, NBPSCTR, KM_SLEEP);
1772 if (!handle)
1773 goto err;
1775 /* Locate the buffer */
1776 dkdevidp = (struct dk_devid *)dadk_iob_htoc(DKTP_DATA, handle);
1778 /* Fill in the revision */
1779 bzero(dkdevidp, NBPSCTR);
1780 dkdevidp->dkd_rev_hi = DK_DEVID_REV_MSB;
1781 dkdevidp->dkd_rev_lo = DK_DEVID_REV_LSB;
1783 /* Copy in the device id */
1784 i = ddi_devid_sizeof(devid);
1785 if (i > DK_DEVID_SIZE)
1786 goto err;
1787 bcopy(devid, dkdevidp->dkd_devid, i);
1789 /* Calculate the chksum */
1790 chksum = 0;
1791 ip = (uint_t *)dkdevidp;
1792 for (i = 0; i < ((NBPSCTR - sizeof (int))/sizeof (int)); i++)
1793 chksum ^= ip[i];
1795 /* Fill in the checksum */
1796 DKD_FORMCHKSUM(chksum, dkdevidp);
1798 /* write the devid */
1799 (void) dadk_iob_xfer(DKTP_DATA, handle, B_WRITE);
1801 dkp->dk_devid = devid;
1803 rc = DDI_SUCCESS;
1805 err:
1806 if (handle != NULL)
1807 (void) dadk_iob_free(DKTP_DATA, handle);
1809 if (rc != DDI_SUCCESS && devid != NULL)
1810 ddi_devid_free(devid);
1812 return (rc);
1815 static void
1816 cmdk_bbh_free_alts(struct cmdk *dkp)
1818 if (dkp->dk_alts_hdl) {
1819 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1820 kmem_free(dkp->dk_slc_cnt,
1821 NDKMAP * (sizeof (uint32_t) + sizeof (struct alts_ent *)));
1822 dkp->dk_alts_hdl = NULL;
1826 static void
1827 cmdk_bbh_reopen(struct cmdk *dkp)
1829 tgdk_iob_handle handle = NULL;
1830 diskaddr_t slcb, slcn, slce;
1831 struct alts_parttbl *ap;
1832 struct alts_ent *enttblp;
1833 uint32_t altused;
1834 uint32_t altbase;
1835 uint32_t altlast;
1836 int alts;
1837 uint16_t vtoctag;
1838 int i, j;
1840 /* find slice with V_ALTSCTR tag */
1841 for (alts = 0; alts < NDKMAP; alts++) {
1842 if (cmlb_partinfo(
1843 dkp->dk_cmlbhandle,
1844 alts,
1845 &slcn,
1846 &slcb,
1847 NULL,
1848 &vtoctag,
1849 0)) {
1850 goto empty; /* no partition table exists */
1853 if (vtoctag == V_ALTSCTR && slcn > 1)
1854 break;
1856 if (alts >= NDKMAP) {
1857 goto empty; /* no V_ALTSCTR slice defined */
1860 /* read in ALTS label block */
1861 handle = dadk_iob_alloc(DKTP_DATA, slcb, NBPSCTR, KM_SLEEP);
1862 if (!handle) {
1863 goto empty;
1866 ap = (struct alts_parttbl *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1867 if (!ap || (ap->alts_sanity != ALTS_SANITY)) {
1868 goto empty;
1871 altused = ap->alts_ent_used; /* number of BB entries */
1872 altbase = ap->alts_ent_base; /* blk offset from begin slice */
1873 altlast = ap->alts_ent_end; /* blk offset to last block */
1874 /* ((altused * sizeof (struct alts_ent) + NBPSCTR - 1) & ~NBPSCTR) */
1876 if (altused == 0 ||
1877 altbase < 1 ||
1878 altbase > altlast ||
1879 altlast >= slcn) {
1880 goto empty;
1882 (void) dadk_iob_free(DKTP_DATA, handle);
1884 /* read in ALTS remapping table */
1885 handle = dadk_iob_alloc(DKTP_DATA,
1886 slcb + altbase,
1887 (altlast - altbase + 1) << SCTRSHFT, KM_SLEEP);
1888 if (!handle) {
1889 goto empty;
1892 enttblp = (struct alts_ent *)dadk_iob_xfer(DKTP_DATA, handle, B_READ);
1893 if (!enttblp) {
1894 goto empty;
1897 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1899 /* allocate space for dk_slc_cnt and dk_slc_ent tables */
1900 if (dkp->dk_slc_cnt == NULL) {
1901 dkp->dk_slc_cnt = kmem_alloc(NDKMAP *
1902 (sizeof (long) + sizeof (struct alts_ent *)), KM_SLEEP);
1904 dkp->dk_slc_ent = (struct alts_ent **)(dkp->dk_slc_cnt + NDKMAP);
1906 /* free previous BB table (if any) */
1907 if (dkp->dk_alts_hdl) {
1908 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1909 dkp->dk_alts_hdl = NULL;
1910 dkp->dk_altused = 0;
1913 /* save linkage to new BB table */
1914 dkp->dk_alts_hdl = handle;
1915 dkp->dk_altused = altused;
1918 * build indexes to BB table by slice
1919 * effectively we have
1920 * struct alts_ent *enttblp[altused];
1922 * uint32_t dk_slc_cnt[NDKMAP];
1923 * struct alts_ent *dk_slc_ent[NDKMAP];
1925 for (i = 0; i < NDKMAP; i++) {
1926 if (cmlb_partinfo(
1927 dkp->dk_cmlbhandle,
1929 &slcn,
1930 &slcb,
1931 NULL,
1932 NULL,
1933 0)) {
1934 goto empty1;
1937 dkp->dk_slc_cnt[i] = 0;
1938 if (slcn == 0)
1939 continue; /* slice is not allocated */
1941 /* last block in slice */
1942 slce = slcb + slcn - 1;
1944 /* find first remap entry in after beginnning of slice */
1945 for (j = 0; j < altused; j++) {
1946 if (enttblp[j].bad_start + enttblp[j].bad_end >= slcb)
1947 break;
1949 dkp->dk_slc_ent[i] = enttblp + j;
1951 /* count remap entrys until end of slice */
1952 for (; j < altused && enttblp[j].bad_start <= slce; j++) {
1953 dkp->dk_slc_cnt[i] += 1;
1957 rw_exit(&dkp->dk_bbh_mutex);
1958 return;
1960 empty:
1961 rw_enter(&dkp->dk_bbh_mutex, RW_WRITER);
1962 empty1:
1963 if (handle && handle != dkp->dk_alts_hdl)
1964 (void) dadk_iob_free(DKTP_DATA, handle);
1966 if (dkp->dk_alts_hdl) {
1967 (void) dadk_iob_free(DKTP_DATA, dkp->dk_alts_hdl);
1968 dkp->dk_alts_hdl = NULL;
1971 rw_exit(&dkp->dk_bbh_mutex);
1974 /*ARGSUSED*/
1975 static bbh_cookie_t
1976 cmdk_bbh_htoc(opaque_t bbh_data, opaque_t handle)
1978 struct bbh_handle *hp;
1979 bbh_cookie_t ckp;
1981 hp = (struct bbh_handle *)handle;
1982 ckp = hp->h_cktab + hp->h_idx;
1983 hp->h_idx++;
1984 return (ckp);
1987 /*ARGSUSED*/
1988 static void
1989 cmdk_bbh_freehandle(opaque_t bbh_data, opaque_t handle)
1991 struct bbh_handle *hp;
1993 hp = (struct bbh_handle *)handle;
1994 kmem_free(handle, (sizeof (struct bbh_handle) +
1995 (hp->h_totck * (sizeof (struct bbh_cookie)))));
2000 * cmdk_bbh_gethandle remaps the bad sectors to alternates.
2001 * There are 7 different cases when the comparison is made
2002 * between the bad sector cluster and the disk section.
2004 * bad sector cluster gggggggggggbbbbbbbggggggggggg
2005 * case 1: ddddd
2006 * case 2: -d-----
2007 * case 3: ddddd
2008 * case 4: dddddddddddd
2009 * case 5: ddddddd-----
2010 * case 6: ---ddddddd
2011 * case 7: ddddddd
2013 * where: g = good sector, b = bad sector
2014 * d = sector in disk section
2015 * - = disk section may be extended to cover those disk area
2018 static opaque_t
2019 cmdk_bbh_gethandle(opaque_t bbh_data, struct buf *bp)
2021 struct cmdk *dkp = (struct cmdk *)bbh_data;
2022 struct bbh_handle *hp;
2023 struct bbh_cookie *ckp;
2024 struct alts_ent *altp;
2025 uint32_t alts_used;
2026 uint32_t part = CMDKPART(bp->b_edev);
2027 daddr32_t lastsec;
2028 long d_count;
2029 int i;
2030 int idx;
2031 int cnt;
2033 if (part >= V_NUMPAR)
2034 return (NULL);
2037 * This if statement is atomic and it will succeed
2038 * if there are no bad blocks (almost always)
2040 * so this if is performed outside of the rw_enter for speed
2041 * and then repeated inside the rw_enter for safety
2043 if (!dkp->dk_alts_hdl) {
2044 return (NULL);
2047 rw_enter(&dkp->dk_bbh_mutex, RW_READER);
2049 if (dkp->dk_alts_hdl == NULL) {
2050 rw_exit(&dkp->dk_bbh_mutex);
2051 return (NULL);
2054 alts_used = dkp->dk_slc_cnt[part];
2055 if (alts_used == 0) {
2056 rw_exit(&dkp->dk_bbh_mutex);
2057 return (NULL);
2059 altp = dkp->dk_slc_ent[part];
2062 * binary search for the largest bad sector index in the alternate
2063 * entry table which overlaps or larger than the starting d_sec
2065 i = cmdk_bbh_bsearch(altp, alts_used, GET_BP_SEC(bp));
2066 /* if starting sector is > the largest bad sector, return */
2067 if (i == -1) {
2068 rw_exit(&dkp->dk_bbh_mutex);
2069 return (NULL);
2071 /* i is the starting index. Set altp to the starting entry addr */
2072 altp += i;
2074 d_count = bp->b_bcount >> SCTRSHFT;
2075 lastsec = GET_BP_SEC(bp) + d_count - 1;
2077 /* calculate the number of bad sectors */
2078 for (idx = i, cnt = 0; idx < alts_used; idx++, altp++, cnt++) {
2079 if (lastsec < altp->bad_start)
2080 break;
2083 if (!cnt) {
2084 rw_exit(&dkp->dk_bbh_mutex);
2085 return (NULL);
2088 /* calculate the maximum number of reserved cookies */
2089 cnt <<= 1;
2090 cnt++;
2092 /* allocate the handle */
2093 hp = kmem_zalloc((sizeof (*hp) + (cnt * sizeof (*ckp))), KM_SLEEP);
2095 hp->h_idx = 0;
2096 hp->h_totck = cnt;
2097 ckp = hp->h_cktab = (struct bbh_cookie *)(hp + 1);
2098 ckp[0].ck_sector = GET_BP_SEC(bp);
2099 ckp[0].ck_seclen = d_count;
2101 altp = dkp->dk_slc_ent[part];
2102 altp += i;
2103 for (idx = 0; i < alts_used; i++, altp++) {
2104 /* CASE 1: */
2105 if (lastsec < altp->bad_start)
2106 break;
2108 /* CASE 3: */
2109 if (ckp[idx].ck_sector > altp->bad_end)
2110 continue;
2112 /* CASE 2 and 7: */
2113 if ((ckp[idx].ck_sector >= altp->bad_start) &&
2114 (lastsec <= altp->bad_end)) {
2115 ckp[idx].ck_sector = altp->good_start +
2116 ckp[idx].ck_sector - altp->bad_start;
2117 break;
2120 /* at least one bad sector in our section. break it. */
2121 /* CASE 5: */
2122 if ((lastsec >= altp->bad_start) &&
2123 (lastsec <= altp->bad_end)) {
2124 ckp[idx+1].ck_seclen = lastsec - altp->bad_start + 1;
2125 ckp[idx].ck_seclen -= ckp[idx+1].ck_seclen;
2126 ckp[idx+1].ck_sector = altp->good_start;
2127 break;
2129 /* CASE 6: */
2130 if ((ckp[idx].ck_sector <= altp->bad_end) &&
2131 (ckp[idx].ck_sector >= altp->bad_start)) {
2132 ckp[idx+1].ck_seclen = ckp[idx].ck_seclen;
2133 ckp[idx].ck_seclen = altp->bad_end -
2134 ckp[idx].ck_sector + 1;
2135 ckp[idx+1].ck_seclen -= ckp[idx].ck_seclen;
2136 ckp[idx].ck_sector = altp->good_start +
2137 ckp[idx].ck_sector - altp->bad_start;
2138 idx++;
2139 ckp[idx].ck_sector = altp->bad_end + 1;
2140 continue; /* check rest of section */
2143 /* CASE 4: */
2144 ckp[idx].ck_seclen = altp->bad_start - ckp[idx].ck_sector;
2145 ckp[idx+1].ck_sector = altp->good_start;
2146 ckp[idx+1].ck_seclen = altp->bad_end - altp->bad_start + 1;
2147 idx += 2;
2148 ckp[idx].ck_sector = altp->bad_end + 1;
2149 ckp[idx].ck_seclen = lastsec - altp->bad_end;
2152 rw_exit(&dkp->dk_bbh_mutex);
2153 return ((opaque_t)hp);
2156 static int
2157 cmdk_bbh_bsearch(struct alts_ent *buf, int cnt, daddr32_t key)
2159 int i;
2160 int ind;
2161 int interval;
2162 int mystatus = -1;
2164 if (!cnt)
2165 return (mystatus);
2167 ind = 1; /* compiler complains about possible uninitialized var */
2168 for (i = 1; i <= cnt; i <<= 1)
2169 ind = i;
2171 for (interval = ind; interval; ) {
2172 if ((key >= buf[ind-1].bad_start) &&
2173 (key <= buf[ind-1].bad_end)) {
2174 return (ind-1);
2175 } else {
2176 interval >>= 1;
2177 if (key < buf[ind-1].bad_start) {
2178 /* record the largest bad sector index */
2179 mystatus = ind-1;
2180 if (!interval)
2181 break;
2182 ind = ind - interval;
2183 } else {
2185 * if key is larger than the last element
2186 * then break
2188 if ((ind == cnt) || !interval)
2189 break;
2190 if ((ind+interval) <= cnt)
2191 ind += interval;
2195 return (mystatus);