4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Direct Attached disk driver for SPARC machines.
32 * Includes, Declarations and Local Data
34 #include <sys/dada/dada.h>
35 #include <sys/dkbad.h>
36 #include <sys/dklabel.h>
40 #include <sys/dada/targets/daddef.h>
41 #include <sys/dada/targets/dadpriv.h>
44 #include <sys/kstat.h>
45 #include <sys/vtrace.h>
46 #include <sys/aio_req.h>
51 * Global Error Levels for Error Reporting
53 int dcd_error_level
= DCD_ERR_RETRYABLE
;
58 static int dcd_io_time
= DCD_IO_TIME
;
59 static int dcd_retry_count
= DCD_RETRY_COUNT
;
60 static int dcd_report_pfa
= 1;
61 static int dcd_rot_delay
= 4;
62 static int dcd_poll_busycnt
= DCD_POLL_TIMEOUT
;
65 * Local Function Prototypes
68 static int dcdopen(dev_t
*dev_p
, int flag
, int otyp
, cred_t
*cred_p
);
69 static int dcdclose(dev_t dev
, int flag
, int otyp
, cred_t
*cred_p
);
70 static int dcdstrategy(struct buf
*bp
);
71 static int dcddump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblk
);
72 static int dcdioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
73 static int dcdread(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
);
74 static int dcdwrite(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
);
75 static int dcd_prop_op(dev_t
, dev_info_t
*, ddi_prop_op_t
, int,
76 char *, caddr_t
, int *);
77 static int dcdaread(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
);
78 static int dcdawrite(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
);
81 static void dcd_free_softstate(struct dcd_disk
*un
, dev_info_t
*devi
);
82 static int dcd_doattach(dev_info_t
*devi
, int (*f
)());
83 static int dcd_validate_geometry(struct dcd_disk
*un
);
84 static ddi_devid_t
dcd_get_devid(struct dcd_disk
*un
);
85 static ddi_devid_t
dcd_create_devid(struct dcd_disk
*un
);
86 static int dcd_make_devid_from_serial(struct dcd_disk
*un
);
87 static void dcd_validate_model_serial(char *str
, int *retlen
, int totallen
);
88 static int dcd_read_deviceid(struct dcd_disk
*un
);
89 static int dcd_write_deviceid(struct dcd_disk
*un
);
90 static int dcd_poll(struct dcd_pkt
*pkt
);
91 static char *dcd_rname(int reason
);
92 static void dcd_flush_cache(struct dcd_disk
*un
);
94 static int dcd_compute_dk_capacity(struct dcd_device
*devp
,
95 diskaddr_t
*capacity
);
96 static int dcd_send_lb_rw_cmd(dev_info_t
*devinfo
, void *bufaddr
,
97 diskaddr_t start_block
, size_t reqlength
, uchar_t cmd
);
99 static void dcdmin(struct buf
*bp
);
101 static int dcdioctl_cmd(dev_t
, struct udcd_cmd
*,
102 enum uio_seg
, enum uio_seg
);
104 static void dcdstart(struct dcd_disk
*un
);
105 static void dcddone_and_mutex_exit(struct dcd_disk
*un
, struct buf
*bp
);
106 static void make_dcd_cmd(struct dcd_disk
*un
, struct buf
*bp
, int (*f
)());
107 static void dcdudcdmin(struct buf
*bp
);
109 static int dcdrunout(caddr_t
);
110 static int dcd_check_wp(dev_t dev
);
111 static int dcd_unit_ready(dev_t dev
);
112 static void dcd_handle_tran_busy(struct buf
*bp
, struct diskhd
*dp
,
113 struct dcd_disk
*un
);
114 static void dcdintr(struct dcd_pkt
*pkt
);
115 static int dcd_handle_incomplete(struct dcd_disk
*un
, struct buf
*bp
);
116 static void dcd_offline(struct dcd_disk
*un
, int bechatty
);
117 static int dcd_ready_and_valid(dev_t dev
, struct dcd_disk
*un
);
118 static void dcd_reset_disk(struct dcd_disk
*un
, struct dcd_pkt
*pkt
);
119 static void dcd_translate(struct dadkio_status32
*statp
, struct udcd_cmd
*cmdp
);
120 static int dcdflushdone(struct buf
*bp
);
122 /* Function prototypes for cmlb */
124 static int dcd_lb_rdwr(dev_info_t
*devi
, uchar_t cmd
, void *bufaddr
,
125 diskaddr_t start_block
, size_t reqlength
, void *tg_cookie
);
127 static int dcd_lb_getphygeom(dev_info_t
*devi
, cmlb_geom_t
*phygeomp
);
128 static int dcd_lb_getinfo(dev_info_t
*devi
, int cmd
, void *arg
,
132 static cmlb_tg_ops_t dcd_lb_ops
= {
139 * Error and Logging Functions
141 static void clean_print(dev_info_t
*dev
, char *label
, uint_t level
,
142 char *title
, char *data
, int len
);
143 static void dcdrestart(void *arg
);
145 static int dcd_check_error(struct dcd_disk
*un
, struct buf
*bp
);
148 * Error statistics create/update functions
150 static int dcd_create_errstats(struct dcd_disk
*, int);
155 extern void dcd_log(dev_info_t
*, char *, uint_t
, const char *, ...)
157 extern void makecommand(struct dcd_pkt
*, int, uchar_t
, uint32_t,
158 uchar_t
, uint32_t, uchar_t
, uchar_t
);
162 * Configuration Routines
164 static int dcdinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
,
166 static int dcdprobe(dev_info_t
*devi
);
167 static int dcdattach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
);
168 static int dcddetach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
);
169 static int dcdreset(dev_info_t
*dip
, ddi_reset_cmd_t cmd
);
170 static int dcd_dr_detach(dev_info_t
*devi
);
171 static int dcdpower(dev_info_t
*devi
, int component
, int level
);
173 static void *dcd_state
;
174 static int dcd_max_instance
;
175 static char *dcd_label
= "dad";
177 static char *diskokay
= "disk okay\n";
183 int dcd_test_flag
= 0;
188 static int dcddebug
= 0;
189 #define DEBUGGING (dcddebug > 1)
190 #define DAD_DEBUG if (dcddebug == 1) dcd_log
191 #define DAD_DEBUG2 if (dcddebug > 1) dcd_log
194 #define DEBUGGING (0)
195 #define DAD_DEBUG if (0) dcd_log
196 #define DAD_DEBUG2 if (0) dcd_log
200 * we use pkt_private area for storing bp and retry_count
201 * XXX: Really is this usefull.
203 struct dcd_pkt_private
{
204 struct buf
*dcdpp_bp
;
205 short dcdpp_retry_count
;
206 short dcdpp_victim_retry_count
;
210 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf
))
212 #define PP_LEN (sizeof (struct dcd_pkt_private))
214 #define PKT_SET_BP(pkt, bp) \
215 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp
216 #define PKT_GET_BP(pkt) \
217 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp)
220 #define PKT_SET_RETRY_CNT(pkt, n) \
221 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n
223 #define PKT_GET_RETRY_CNT(pkt) \
224 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count)
226 #define PKT_INCR_RETRY_CNT(pkt, n) \
227 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n
229 #define PKT_SET_VICTIM_RETRY_CNT(pkt, n) \
230 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
233 #define PKT_GET_VICTIM_RETRY_CNT(pkt) \
234 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count)
235 #define PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \
236 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
239 #define DISK_NOT_READY_RETRY_COUNT (dcd_retry_count / 2)
245 #define SET_BP_ERROR(bp, err) \
248 #define IOSP KSTAT_IO_PTR(un->un_stats)
249 #define IO_PARTITION_STATS un->un_pstats[DCDPART(bp->b_edev)]
250 #define IOSP_PARTITION KSTAT_IO_PTR(IO_PARTITION_STATS)
252 #define DCD_DO_KSTATS(un, kstat_function, bp) \
253 ASSERT(mutex_owned(DCD_MUTEX)); \
254 if (bp != un->un_sbufp) { \
255 if (un->un_stats) { \
256 kstat_function(IOSP); \
258 if (IO_PARTITION_STATS) { \
259 kstat_function(IOSP_PARTITION); \
263 #define DCD_DO_ERRSTATS(un, x) \
264 if (un->un_errstats) { \
265 struct dcd_errstats *dtp; \
266 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
267 dtp->x.value.ui32++; \
270 #define GET_SOFT_STATE(dev) \
271 struct dcd_disk *un; \
272 int instance, part; \
273 minor_t minor = getminor(dev); \
275 part = minor & DCDPART_MASK; \
276 instance = minor >> DCDUNIT_SHIFT; \
277 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) \
280 #define LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \
281 (((blkno) & ((1 << (blknoshift)) - 1)) == 0)
284 * After the following number of sectors, the cylinder number spills over
285 * 0xFFFF if sectors = 63 and heads = 16.
287 #define NUM_SECTORS_32G 0x3EFFC10
294 * Device driver ops vector
297 static struct cb_ops dcd_cb_ops
= {
299 dcdclose
, /* close */
300 dcdstrategy
, /* strategy */
304 dcdwrite
, /* write */
305 dcdioctl
, /* ioctl */
310 dcd_prop_op
, /* cb_prop_op */
312 D_64BIT
| D_MP
| D_NEW
, /* Driver compatibility flag */
314 dcdaread
, /* async I/O read entry point */
315 dcdawrite
/* async I/O write entry point */
318 static struct dev_ops dcd_ops
= {
319 DEVO_REV
, /* devo_rev, */
322 nulldev
, /* identify */
323 dcdprobe
, /* probe */
324 dcdattach
, /* attach */
325 dcddetach
, /* detach */
326 dcdreset
, /* reset */
327 &dcd_cb_ops
, /* driver operations */
328 NULL
, /* bus operations */
329 dcdpower
, /* power */
330 ddi_quiesce_not_supported
, /* devo_quiesce */
335 * This is the loadable module wrapper.
337 #include <sys/modctl.h>
339 static struct modldrv modldrv
= {
340 &mod_driverops
, /* Type of module. This one is a driver */
341 "DAD Disk Driver", /* Name of the module. */
342 &dcd_ops
, /* driver ops */
347 static struct modlinkage modlinkage
= {
348 MODREV_1
, &modldrv
, NULL
352 * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded
355 static kmutex_t dcd_attach_mutex
;
362 if ((e
= ddi_soft_state_init(&dcd_state
, sizeof (struct dcd_disk
),
366 mutex_init(&dcd_attach_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
367 e
= mod_install(&modlinkage
);
369 mutex_destroy(&dcd_attach_mutex
);
370 ddi_soft_state_fini(&dcd_state
);
382 if ((e
= mod_remove(&modlinkage
)) != 0)
385 ddi_soft_state_fini(&dcd_state
);
386 mutex_destroy(&dcd_attach_mutex
);
392 _info(struct modinfo
*modinfop
)
395 return (mod_info(&modlinkage
, modinfop
));
399 dcdprobe(dev_info_t
*devi
)
401 struct dcd_device
*devp
;
402 int rval
= DDI_PROBE_PARTIAL
;
405 devp
= ddi_get_driver_private(devi
);
406 instance
= ddi_get_instance(devi
);
409 * Keep a count of how many disks (ie. highest instance no) we have
410 * XXX currently not used but maybe useful later again
412 mutex_enter(&dcd_attach_mutex
);
413 if (instance
> dcd_max_instance
)
414 dcd_max_instance
= instance
;
415 mutex_exit(&dcd_attach_mutex
);
417 DAD_DEBUG2(devp
->dcd_dev
, dcd_label
, DCD_DEBUG
, "dcdprobe:\n");
419 if (ddi_get_soft_state(dcd_state
, instance
) != NULL
)
420 return (DDI_PROBE_PARTIAL
);
423 * Turn around and call utility probe routine
424 * to see whether we actually have a disk at
427 DAD_DEBUG2(devp
->dcd_dev
, dcd_label
, DCD_DEBUG
,
428 "dcdprobe: %x\n", dcd_probe(devp
, NULL_FUNC
));
430 switch (dcd_probe(devp
, NULL_FUNC
)) {
432 case DCDPROBE_NORESP
:
433 case DCDPROBE_NONCCS
:
435 case DCDPROBE_FAILURE
:
439 case DCDPROBE_EXISTS
:
441 * Check whether it is a ATA device and then
444 DAD_DEBUG2(devp
->dcd_dev
, dcd_label
, DCD_DEBUG
,
445 "config %x\n", devp
->dcd_ident
->dcd_config
);
446 if ((devp
->dcd_ident
->dcd_config
& ATAPI_DEVICE
) == 0) {
447 if (devp
->dcd_ident
->dcd_config
& ATANON_REMOVABLE
) {
448 rval
= DDI_PROBE_SUCCESS
;
450 rval
= DDI_PROBE_FAILURE
;
452 rval
= DDI_PROBE_FAILURE
;
458 DAD_DEBUG2(devp
->dcd_dev
, dcd_label
, DCD_DEBUG
,
459 "dcdprobe returns %x\n", rval
);
467 dcdattach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
470 struct dcd_device
*devp
;
474 { "NAME=ide-disk", "0=standby", "1=idle", "2=active" };
477 ASSERT(NO_COMPETING_THREADS
);
480 devp
= ddi_get_driver_private(devi
);
481 instance
= ddi_get_instance(devi
);
482 DAD_DEBUG2(devp
->dcd_dev
, dcd_label
, DCD_DEBUG
, "Attach Started\n");
489 if (!(un
= ddi_get_soft_state(dcd_state
, instance
)))
490 return (DDI_FAILURE
);
491 mutex_enter(DCD_MUTEX
);
494 * Restore the state which was saved to give the
495 * the right state in un_last_state
497 un
->un_last_state
= un
->un_save_state
;
499 cv_broadcast(&un
->un_suspend_cv
);
501 * Raise the power level of the device to active.
503 mutex_exit(DCD_MUTEX
);
504 (void) pm_raise_power(DCD_DEVINFO
, 0, DCD_DEVICE_ACTIVE
);
505 mutex_enter(DCD_MUTEX
);
508 * start unit - if this is a low-activity device
509 * commands in queue will have to wait until new
510 * commands come in, which may take awhile.
511 * Also, we specifically don't check un_ncmds
512 * because we know that there really are no
513 * commands in progress after the unit was suspended
514 * and we could have reached the throttle level, been
515 * suspended, and have no new commands coming in for
516 * awhile. Highly unlikely, but so is the low-
517 * activity disk scenario.
520 if (dp
->b_actf
&& (dp
->b_forw
== NULL
)) {
524 mutex_exit(DCD_MUTEX
);
525 return (DDI_SUCCESS
);
528 return (DDI_FAILURE
);
531 if (dcd_doattach(devi
, SLEEP_FUNC
) == DDI_FAILURE
) {
532 return (DDI_FAILURE
);
535 if (!(un
= (struct dcd_disk
*)
536 ddi_get_soft_state(dcd_state
, instance
))) {
537 return (DDI_FAILURE
);
539 devp
->dcd_private
= (ataopaque_t
)un
;
542 * Add a zero-length attribute to tell the world we support
543 * kernel ioctls (for layered drivers)
545 (void) ddi_prop_create(DDI_DEV_T_NONE
, devi
, DDI_PROP_CANSLEEP
,
546 DDI_KERNEL_IOCTL
, NULL
, 0);
549 * Since the dad device does not have the 'reg' property,
550 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
551 * The following code is to tell cpr that this device
552 * does need to be suspended and resumed.
554 (void) ddi_prop_update_string(DDI_DEV_T_NONE
, devi
,
555 "pm-hardware-state", (caddr_t
)"needs-suspend-resume");
558 * Initialize power management bookkeeping;
559 * Create components - In IDE case there are 3 levels and one
560 * component. The levels being - active, idle, standby.
563 rval
= ddi_prop_update_string_array(DDI_DEV_T_NONE
,
564 devi
, "pm-components", pm_comp
, 4);
565 if (rval
== DDI_PROP_SUCCESS
) {
567 * Ignore the return value of pm_raise_power
568 * Even if we check the return values and
569 * remove the property created above, PM
570 * framework will not honour the change after
571 * first call to pm_raise_power. Hence, the
572 * removal of that property does not help if
573 * pm_raise_power fails.
575 (void) pm_raise_power(DCD_DEVINFO
, 0, DCD_DEVICE_ACTIVE
);
578 ddi_report_dev(devi
);
580 cmlb_alloc_handle(&un
->un_dklbhandle
);
582 if (cmlb_attach(devi
,
588 CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8
,
591 cmlb_free_handle(&un
->un_dklbhandle
);
592 dcd_free_softstate(un
, devi
);
593 return (DDI_FAILURE
);
596 mutex_enter(DCD_MUTEX
);
597 (void) dcd_validate_geometry(un
);
599 /* Get devid; create a devid ONLY IF could not get ID */
600 if (dcd_get_devid(un
) == NULL
) {
601 /* Create the fab'd devid */
602 (void) dcd_create_devid(un
);
604 mutex_exit(DCD_MUTEX
);
606 return (DDI_SUCCESS
);
610 dcd_free_softstate(struct dcd_disk
*un
, dev_info_t
*devi
)
612 struct dcd_device
*devp
;
613 int instance
= ddi_get_instance(devi
);
615 devp
= ddi_get_driver_private(devi
);
618 sema_destroy(&un
->un_semoclose
);
619 cv_destroy(&un
->un_sbuf_cv
);
620 cv_destroy(&un
->un_state_cv
);
621 cv_destroy(&un
->un_disk_busy_cv
);
622 cv_destroy(&un
->un_suspend_cv
);
625 * Deallocate command packet resources.
628 freerbuf(un
->un_sbufp
);
630 kmem_free((caddr_t
)un
->un_dp
, sizeof (*un
->un_dp
));
633 * Unregister the devid and free devid resources allocated
635 ddi_devid_unregister(DCD_DEVINFO
);
637 ddi_devid_free(un
->un_devid
);
642 * Delete kstats. Kstats for non CD devices are deleted
646 kstat_delete(un
->un_stats
);
652 * Cleanup scsi_device resources.
654 ddi_soft_state_free(dcd_state
, instance
);
655 devp
->dcd_private
= (ataopaque_t
)0;
656 /* unprobe scsi device */
659 /* Remove properties created during attach */
660 ddi_prop_remove_all(devi
);
664 dcddetach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
)
668 clock_t wait_cmds_complete
;
669 instance
= ddi_get_instance(devi
);
671 if (!(un
= ddi_get_soft_state(dcd_state
, instance
)))
672 return (DDI_FAILURE
);
676 return (dcd_dr_detach(devi
));
679 mutex_enter(DCD_MUTEX
);
680 if (un
->un_state
== DCD_STATE_SUSPENDED
) {
681 mutex_exit(DCD_MUTEX
);
682 return (DDI_SUCCESS
);
686 * Save the last state first
688 un
->un_save_state
= un
->un_last_state
;
690 New_state(un
, DCD_STATE_SUSPENDED
);
693 * wait till current operation completed. If we are
694 * in the resource wait state (with an intr outstanding)
695 * then we need to wait till the intr completes and
696 * starts the next cmd. We wait for
697 * DCD_WAIT_CMDS_COMPLETE seconds before failing the
700 wait_cmds_complete
= ddi_get_lbolt();
701 wait_cmds_complete
+=
702 DCD_WAIT_CMDS_COMPLETE
* drv_usectohz(1000000);
704 while (un
->un_ncmds
) {
705 if (cv_timedwait(&un
->un_disk_busy_cv
,
706 DCD_MUTEX
, wait_cmds_complete
) == -1) {
708 * commands Didn't finish in the
709 * specified time, fail the DDI_SUSPEND.
711 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
,
712 DCD_DEBUG
, "dcddetach: SUSPEND "
713 "failed due to outstanding cmds\n");
715 mutex_exit(DCD_MUTEX
);
716 return (DDI_FAILURE
);
719 mutex_exit(DCD_MUTEX
);
720 return (DDI_SUCCESS
);
722 return (DDI_FAILURE
);
726 * The reset entry point gets invoked at the system shutdown time or through
727 * CPR code at system suspend.
728 * Will be flushing the cache and expect this to be last I/O operation to the
729 * disk before system reset/power off.
733 dcdreset(dev_info_t
*dip
, ddi_reset_cmd_t cmd
)
738 instance
= ddi_get_instance(dip
);
740 if (!(un
= ddi_get_soft_state(dcd_state
, instance
)))
741 return (DDI_FAILURE
);
745 return (DDI_SUCCESS
);
750 dcd_dr_detach(dev_info_t
*devi
)
752 struct dcd_device
*devp
;
756 * Get scsi_device structure for this instance.
758 if ((devp
= ddi_get_driver_private(devi
)) == NULL
)
759 return (DDI_FAILURE
);
762 * Get dcd_disk structure containing target 'private' information
764 un
= (struct dcd_disk
*)devp
->dcd_private
;
767 * Verify there are NO outstanding commands issued to this device.
769 * It's possible to have outstanding commands through the physio
770 * code path, even though everything's closed.
772 _NOTE(COMPETING_THREADS_NOW
);
773 mutex_enter(DCD_MUTEX
);
775 mutex_exit(DCD_MUTEX
);
776 _NOTE(NO_COMPETING_THREADS_NOW
);
777 return (DDI_FAILURE
);
780 mutex_exit(DCD_MUTEX
);
782 cmlb_detach(un
->un_dklbhandle
, 0);
783 cmlb_free_handle(&un
->un_dklbhandle
);
787 * Lower the power state of the device
788 * i.e. the minimum power consumption state - sleep.
790 (void) pm_lower_power(DCD_DEVINFO
, 0, DCD_DEVICE_STANDBY
);
792 _NOTE(NO_COMPETING_THREADS_NOW
);
795 * at this point there are no competing threads anymore
796 * release active MT locks and all device resources.
798 dcd_free_softstate(un
, devi
);
800 return (DDI_SUCCESS
);
804 dcdpower(dev_info_t
*devi
, int component
, int level
)
812 instance
= ddi_get_instance(devi
);
814 if (!(un
= ddi_get_soft_state(dcd_state
, instance
)) ||
815 (DCD_DEVICE_STANDBY
> level
) || (level
> DCD_DEVICE_ACTIVE
) ||
817 return (DDI_FAILURE
);
820 mutex_enter(DCD_MUTEX
);
822 * if there are active commands for the device or device will be
823 * active soon. At the same time there is request to lower power
826 if ((un
->un_ncmds
) && (level
!= DCD_DEVICE_ACTIVE
)) {
827 mutex_exit(DCD_MUTEX
);
828 return (DDI_FAILURE
);
831 if ((un
->un_state
== DCD_STATE_OFFLINE
) ||
832 (un
->un_state
== DCD_STATE_FATAL
)) {
833 mutex_exit(DCD_MUTEX
);
834 return (DDI_FAILURE
);
837 if (level
== DCD_DEVICE_ACTIVE
) {
839 * No need to fire any command, just set the state structure
840 * to indicate previous state and set the level to active
842 un
->un_power_level
= DCD_DEVICE_ACTIVE
;
843 if (un
->un_state
== DCD_STATE_PM_SUSPENDED
)
845 mutex_exit(DCD_MUTEX
);
847 pkt
= dcd_init_pkt(ROUTE
, NULL
,
848 NULL
, (uint32_t)sizeof (struct dcd_cmd
), 2, PP_LEN
,
849 PKT_CONSISTENT
, NULL_FUNC
, NULL
);
852 mutex_exit(DCD_MUTEX
);
853 return (DDI_FAILURE
);
857 case DCD_DEVICE_IDLE
:
858 cmd
= ATA_IDLE_IMMEDIATE
;
861 case DCD_DEVICE_STANDBY
:
862 cmd
= ATA_STANDBY_IMMEDIATE
;
866 makecommand(pkt
, 0, cmd
, 0, 0, 0, NO_DATA_XFER
, 0);
867 mutex_exit(DCD_MUTEX
);
869 * Issue the appropriate command
871 if ((dcd_poll(pkt
)) || (SCBP_C(pkt
) != STATUS_GOOD
)) {
872 dcd_destroy_pkt(pkt
);
873 return (DDI_FAILURE
);
875 dcd_destroy_pkt(pkt
);
876 mutex_enter(DCD_MUTEX
);
877 if (un
->un_state
!= DCD_STATE_PM_SUSPENDED
)
878 New_state(un
, DCD_STATE_PM_SUSPENDED
);
879 un
->un_power_level
= level
;
880 mutex_exit(DCD_MUTEX
);
883 return (DDI_SUCCESS
);
887 dcd_doattach(dev_info_t
*devi
, int (*canwait
)())
889 struct dcd_device
*devp
;
890 struct dcd_disk
*un
= NULL
;
892 int km_flags
= (canwait
!= NULL_FUNC
)? KM_SLEEP
: KM_NOSLEEP
;
894 char *prop_template
= "target%x-dcd-options";
900 devp
= ddi_get_driver_private(devi
);
903 * Call the routine scsi_probe to do some of the dirty work.
904 * If the INQUIRY command succeeds, the field dcd_inq in the
905 * device structure will be filled in. The dcd_sense structure
906 * will also be allocated.
909 switch (dcd_probe(devp
, canwait
)) {
911 return (DDI_FAILURE
);
913 case DCDPROBE_EXISTS
:
914 if ((devp
->dcd_ident
->dcd_config
& ATAPI_DEVICE
) == 0) {
915 if (devp
->dcd_ident
->dcd_config
& ATANON_REMOVABLE
) {
928 instance
= ddi_get_instance(devp
->dcd_dev
);
930 if (ddi_soft_state_zalloc(dcd_state
, instance
) != DDI_SUCCESS
) {
935 un
= ddi_get_soft_state(dcd_state
, instance
);
937 un
->un_sbufp
= getrbuf(km_flags
);
938 if (un
->un_sbufp
== NULL
) {
945 un
->un_power_level
= -1;
946 un
->un_tgattribute
.media_is_writable
= 1;
948 sema_init(&un
->un_semoclose
, 1, NULL
, SEMA_DRIVER
, NULL
);
949 cv_init(&un
->un_sbuf_cv
, NULL
, CV_DRIVER
, NULL
);
950 cv_init(&un
->un_state_cv
, NULL
, CV_DRIVER
, NULL
);
951 /* Initialize power management conditional variable */
952 cv_init(&un
->un_disk_busy_cv
, NULL
, CV_DRIVER
, NULL
);
953 cv_init(&un
->un_suspend_cv
, NULL
, CV_DRIVER
, NULL
);
955 if (un
->un_dp
== 0) {
957 * Assume CCS drive, assume parity, but call
958 * it a CDROM if it is a RODIRECT device.
960 un
->un_dp
= (struct dcd_drivetype
*)
961 kmem_zalloc(sizeof (struct dcd_drivetype
), km_flags
);
966 if ((devp
->dcd_ident
->dcd_config
& ATAPI_DEVICE
) == 0) {
967 if (devp
->dcd_ident
->dcd_config
& ATANON_REMOVABLE
) {
968 un
->un_dp
->ctype
= CTYPE_DISK
;
974 un
->un_dp
->name
= "CCS";
975 un
->un_dp
->options
= 0;
979 * Allow I/O requests at un_secsize offset in multiple of un_secsize.
981 un
->un_secsize
= DEV_BSIZE
;
984 * If the device is not a removable media device, make sure that
985 * that the device is ready, by issuing the another identify but
986 * not needed. Get the capacity from identify data and store here.
988 if (dcd_compute_dk_capacity(devp
, &capacity
) == 0) {
989 un
->un_diskcapacity
= capacity
;
990 un
->un_lbasize
= DEV_BSIZE
;
993 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "Geometry Data\n");
994 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "cyls %x, heads %x",
995 devp
->dcd_ident
->dcd_fixcyls
,
996 devp
->dcd_ident
->dcd_heads
);
997 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "sectors %x,",
998 devp
->dcd_ident
->dcd_sectors
);
999 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "capacity %llx\n",
1002 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1003 "dcdprobe: drive selected\n");
1006 * Check for the property target<n>-dcd-options to find the option
1007 * set by the HBA driver for this target so that we can set the
1008 * Unit structure variable so that we can send commands accordingly.
1010 target
= devp
->dcd_address
->da_target
;
1011 (void) sprintf(prop_str
, prop_template
, target
);
1012 options
= ddi_prop_get_int(DDI_DEV_T_ANY
, devi
, DDI_PROP_NOTPROM
,
1015 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1016 "No per target properties");
1018 if ((options
& DCD_DMA_MODE
) == DCD_DMA_MODE
) {
1019 un
->un_dp
->options
|= DMA_SUPPORTTED
;
1020 un
->un_dp
->dma_mode
= (options
>> 3) & 0x03;
1021 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1022 "mode %x\n", un
->un_dp
->dma_mode
);
1024 un
->un_dp
->options
&= ~DMA_SUPPORTTED
;
1025 un
->un_dp
->pio_mode
= options
& 0x7;
1026 if (options
& DCD_BLOCK_MODE
)
1027 un
->un_dp
->options
|= BLOCK_MODE
;
1028 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1029 "mode %x\n", un
->un_dp
->pio_mode
);
1031 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1032 "options %x,", un
->un_dp
->options
);
1035 un
->un_throttle
= 2;
1037 * set default max_xfer_size - This should depend on whether the
1038 * Block mode is supported by the device or not.
1040 un
->un_max_xfer_size
= MAX_ATA_XFER_SIZE
;
1043 * Set write cache enable softstate
1045 * WCE is only supported in ATAPI-4 or higher; for
1046 * lower rev devices, must assume write cache is
1049 mutex_enter(DCD_MUTEX
);
1050 un
->un_write_cache_enabled
= (devp
->dcd_ident
->dcd_majvers
== 0xffff) ||
1051 ((devp
->dcd_ident
->dcd_majvers
& IDENTIFY_80_ATAPI_4
) == 0) ||
1052 (devp
->dcd_ident
->dcd_features85
& IDENTIFY_85_WCE
) != 0;
1053 mutex_exit(DCD_MUTEX
);
1055 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1056 "dcd_doattach returns good\n");
1061 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "dcd_doattach failed\n");
1062 dcd_free_softstate(un
, devi
);
1068 * This routine is used to set the block mode of operation by issuing the
1069 * Set Block mode ata command with the maximum block mode possible
1071 dcd_set_multiple(struct dcd_disk
*un
)
1074 struct udcd_cmd ucmd
;
1079 /* Zero all the required structure */
1080 (void) bzero((caddr_t
)&ucmd
, sizeof (ucmd
));
1082 (void) bzero((caddr_t
)&cdb
, sizeof (struct dcd_cmd
));
1084 cdb
.cmd
= ATA_SET_MULTIPLE
;
1086 * Here we should pass what needs to go into sector count REGISTER.
1087 * Eventhough this field indicates the number of bytes to read we
1088 * need to specify the block factor in terms of bytes so that it
1089 * will be programmed by the HBA driver into the sector count register.
1091 cdb
.size
= un
->un_lbasize
* un
->un_dp
->block_factor
;
1093 cdb
.sector_num
.lba_num
= 0;
1094 cdb
.address_mode
= ADD_LBA_MODE
;
1095 cdb
.direction
= NO_DATA_XFER
;
1097 ucmd
.udcd_flags
= 0;
1098 ucmd
.udcd_cmd
= &cdb
;
1099 ucmd
.udcd_bufaddr
= NULL
;
1100 ucmd
.udcd_buflen
= 0;
1101 ucmd
.udcd_flags
|= UDCD_SILENT
;
1103 dev
= makedevice(ddi_driver_major(DCD_DEVINFO
),
1104 ddi_get_instance(DCD_DEVINFO
) << DCDUNIT_SHIFT
);
1107 status
= dcdioctl_cmd(dev
, &ucmd
, UIO_SYSSPACE
, UIO_SYSSPACE
);
1112 * The following routine is used only for setting the transfer mode
1113 * and it is not designed for transferring any other features subcommand.
1115 dcd_set_features(struct dcd_disk
*un
, uchar_t mode
)
1118 struct udcd_cmd ucmd
;
1123 /* Zero all the required structure */
1124 (void) bzero((caddr_t
)&ucmd
, sizeof (ucmd
));
1126 (void) bzero((caddr_t
)&cdb
, sizeof (struct dcd_cmd
));
1128 cdb
.cmd
= ATA_SET_FEATURES
;
1130 * Here we need to pass what needs to go into the sector count register
1131 * But in the case of SET FEATURES command the value taken in the
1132 * sector count register depends what type of subcommand is
1133 * passed in the features register. Since we have defined the size to
1134 * be the size in bytes in this context it does not indicate bytes
1135 * instead it indicates the mode to be programmed.
1137 cdb
.size
= un
->un_lbasize
* mode
;
1139 cdb
.sector_num
.lba_num
= 0;
1140 cdb
.address_mode
= ADD_LBA_MODE
;
1141 cdb
.direction
= NO_DATA_XFER
;
1142 cdb
.features
= ATA_FEATURE_SET_MODE
;
1143 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1144 "size %x, features %x, cmd %x\n",
1145 cdb
.size
, cdb
.features
, cdb
.cmd
);
1147 ucmd
.udcd_flags
= 0;
1148 ucmd
.udcd_cmd
= &cdb
;
1149 ucmd
.udcd_bufaddr
= NULL
;
1150 ucmd
.udcd_buflen
= 0;
1151 ucmd
.udcd_flags
|= UDCD_SILENT
;
1153 dev
= makedevice(ddi_driver_major(DCD_DEVINFO
),
1154 ddi_get_instance(DCD_DEVINFO
) << DCDUNIT_SHIFT
);
1156 status
= dcdioctl_cmd(dev
, &ucmd
, UIO_SYSSPACE
, UIO_SYSSPACE
);
1163 * Validate the geometry for this disk, e.g.,
1164 * see whether it has a valid label.
1167 dcd_validate_geometry(struct dcd_disk
*un
)
1170 struct dcd_device
*devp
;
1174 ASSERT(mutex_owned(DCD_MUTEX
));
1175 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1176 "dcd_validate_geometry: started \n");
1178 if (un
->un_lbasize
< 0) {
1179 return (DCD_BAD_LABEL
);
1182 if (un
->un_state
== DCD_STATE_PM_SUSPENDED
) {
1183 mutex_exit(DCD_MUTEX
);
1184 if (pm_raise_power(DCD_DEVINFO
, 0, DCD_DEVICE_ACTIVE
) !=
1186 mutex_enter(DCD_MUTEX
);
1187 return (DCD_BAD_LABEL
);
1189 mutex_enter(DCD_MUTEX
);
1192 secsize
= un
->un_secsize
;
1195 * take a log base 2 of sector size (sorry)
1197 for (secdiv
= 0; secsize
= secsize
>> 1; secdiv
++)
1199 un
->un_secdiv
= secdiv
;
1202 * Only DIRECT ACCESS devices will have Sun labels.
1203 * CD's supposedly have a Sun label, too
1208 if (((devp
->dcd_ident
->dcd_config
& ATAPI_DEVICE
) == 0) &&
1209 (devp
->dcd_ident
->dcd_config
& ATANON_REMOVABLE
)) {
1210 mutex_exit(DCD_MUTEX
);
1211 rval
= cmlb_validate(un
->un_dklbhandle
, 0, 0);
1212 mutex_enter(DCD_MUTEX
);
1214 return (DCD_NO_MEM_FOR_LABEL
);
1216 return (DCD_BAD_LABEL
);
1218 /* it should never get here. */
1219 return (DCD_BAD_LABEL
);
1223 * take a log base 2 of logical block size
1225 secsize
= un
->un_lbasize
;
1226 for (secdiv
= 0; secsize
= secsize
>> 1; secdiv
++)
1228 un
->un_lbadiv
= secdiv
;
1231 * take a log base 2 of the multiple of DEV_BSIZE blocks that
1232 * make up one logical block
1234 secsize
= un
->un_lbasize
>> DEV_BSHIFT
;
1235 for (secdiv
= 0; secsize
= secsize
>> 1; secdiv
++)
1237 un
->un_blknoshift
= secdiv
;
1247 dcdopen(dev_t
*dev_p
, int flag
, int otyp
, cred_t
*cred_p
)
1252 int nodelay
= (flag
& (FNDELAY
| FNONBLOCK
));
1254 char kstatname
[KSTAT_STRLEN
];
1258 GET_SOFT_STATE(dev
);
1260 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1261 "Inside Open flag %x, otyp %x\n", flag
, otyp
);
1263 if (otyp
>= OTYPCNT
) {
1267 partmask
= 1 << part
;
1270 * We use a semaphore here in order to serialize
1271 * open and close requests on the device.
1273 sema_p(&un
->un_semoclose
);
1275 mutex_enter(DCD_MUTEX
);
1277 if ((un
->un_state
& DCD_STATE_FATAL
) == DCD_STATE_FATAL
) {
1282 while (un
->un_state
== DCD_STATE_SUSPENDED
) {
1283 cv_wait(&un
->un_suspend_cv
, DCD_MUTEX
);
1286 if ((un
->un_state
== DCD_STATE_PM_SUSPENDED
) && (!nodelay
)) {
1287 mutex_exit(DCD_MUTEX
);
1288 if (pm_raise_power(DCD_DEVINFO
, 0, DCD_DEVICE_ACTIVE
)
1290 mutex_enter(DCD_MUTEX
);
1294 mutex_enter(DCD_MUTEX
);
1298 * set make_dcd_cmd() flags and stat_size here since these
1299 * are unlikely to change
1301 un
->un_cmd_flags
= 0;
1303 un
->un_cmd_stat_size
= 2;
1305 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "dcdopen un=0x%p\n",
1308 * check for previous exclusive open
1310 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1311 "exclopen=%x, flag=%x, regopen=%x\n",
1312 un
->un_exclopen
, flag
, un
->un_ocmap
.regopen
[otyp
]);
1313 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1314 "Exclusive open flag %x, partmask %x\n",
1315 un
->un_exclopen
, partmask
);
1317 if (un
->un_exclopen
& (partmask
)) {
1319 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1320 "exclusive open fails\n");
1327 if (un
->un_ocmap
.lyropen
[part
]) {
1328 goto failed_exclusive
;
1330 for (i
= 0; i
< (OTYPCNT
- 1); i
++) {
1331 if (un
->un_ocmap
.regopen
[i
] & (partmask
)) {
1332 goto failed_exclusive
;
1336 if (flag
& FWRITE
) {
1337 mutex_exit(DCD_MUTEX
);
1338 if (dcd_check_wp(dev
)) {
1339 sema_v(&un
->un_semoclose
);
1342 mutex_enter(DCD_MUTEX
);
1345 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1346 "Check Write Protect handled\n");
1349 mutex_exit(DCD_MUTEX
);
1350 if ((rval
= dcd_ready_and_valid(dev
, un
)) != 0) {
1353 (void) pm_idle_component(DCD_DEVINFO
, 0);
1355 * Fail if device is not ready or if the number of disk
1356 * blocks is zero or negative for non CD devices.
1358 if (rval
|| cmlb_partinfo(un
->un_dklbhandle
,
1359 part
, &lblocks
, NULL
, &partname
, NULL
, 0) ||
1362 mutex_enter(DCD_MUTEX
);
1365 mutex_enter(DCD_MUTEX
);
1368 if (otyp
== OTYP_LYR
) {
1369 un
->un_ocmap
.lyropen
[part
]++;
1371 un
->un_ocmap
.regopen
[otyp
] |= partmask
;
1375 * set up open and exclusive open flags
1378 un
->un_exclopen
|= (partmask
);
1382 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1383 "open of part %d type %d\n",
1386 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1387 "Kstats getting updated\n");
1389 * only create kstats for disks, CD kstats created in dcdattach
1391 _NOTE(NO_COMPETING_THREADS_NOW
);
1392 mutex_exit(DCD_MUTEX
);
1393 if (un
->un_stats
== (kstat_t
*)0) {
1394 un
->un_stats
= kstat_create("dad", instance
,
1395 NULL
, "disk", KSTAT_TYPE_IO
, 1,
1396 KSTAT_FLAG_PERSISTENT
);
1398 un
->un_stats
->ks_lock
= DCD_MUTEX
;
1399 kstat_install(un
->un_stats
);
1403 * set up partition statistics for each partition
1404 * with number of blocks > 0
1407 for (i
= 0; i
< NDKMAP
; i
++) {
1408 if ((un
->un_pstats
[i
] == (kstat_t
*)0) &&
1409 (cmlb_partinfo(un
->un_dklbhandle
,
1410 i
, &lblocks
, NULL
, &partname
,
1411 NULL
, 0) == 0) && lblocks
> 0) {
1412 (void) sprintf(kstatname
, "dad%d,%s",
1413 instance
, partname
);
1414 un
->un_pstats
[i
] = kstat_create("dad",
1420 KSTAT_FLAG_PERSISTENT
);
1421 if (un
->un_pstats
[i
]) {
1422 un
->un_pstats
[i
]->ks_lock
=
1424 kstat_install(un
->un_pstats
[i
]);
1430 * set up error kstats
1432 (void) dcd_create_errstats(un
, instance
);
1434 _NOTE(COMPETING_THREADS_NOW
);
1436 sema_v(&un
->un_semoclose
);
1437 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "Open success\n");
1441 mutex_exit(DCD_MUTEX
);
1442 sema_v(&un
->un_semoclose
);
1448 * Test if disk is ready and has a valid geometry.
1451 dcd_ready_and_valid(dev_t dev
, struct dcd_disk
*un
)
1456 mutex_enter(DCD_MUTEX
);
1460 if (un
->un_ncmds
== 0) {
1461 (void) dcd_unit_ready(dev
);
1465 * If device is not yet ready here, inform it is offline
1467 if (un
->un_state
== DCD_STATE_NORMAL
) {
1468 rval
= dcd_unit_ready(dev
);
1469 if (rval
!= 0 && rval
!= EACCES
) {
1475 if (un
->un_format_in_progress
== 0) {
1476 g_error
= dcd_validate_geometry(un
);
1480 * check if geometry was valid. We don't check the validity of
1481 * geometry for CDROMS.
1484 if (g_error
== DCD_BAD_LABEL
) {
1491 * the state has changed; inform the media watch routines
1493 un
->un_mediastate
= DKIO_INSERTED
;
1494 cv_broadcast(&un
->un_state_cv
);
1498 mutex_exit(DCD_MUTEX
);
1505 dcdclose(dev_t dev
, int flag
, int otyp
, cred_t
*cred_p
)
1510 GET_SOFT_STATE(dev
);
1513 if (otyp
>= OTYPCNT
)
1516 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1517 "close of part %d type %d\n",
1519 sema_p(&un
->un_semoclose
);
1521 mutex_enter(DCD_MUTEX
);
1523 if (un
->un_exclopen
& (1<<part
)) {
1524 un
->un_exclopen
&= ~(1<<part
);
1527 if (otyp
== OTYP_LYR
) {
1528 un
->un_ocmap
.lyropen
[part
] -= 1;
1530 un
->un_ocmap
.regopen
[otyp
] &= ~(1<<part
);
1533 cp
= &un
->un_ocmap
.chkd
[0];
1534 while (cp
< &un
->un_ocmap
.chkd
[OCSIZE
]) {
1535 if (*cp
!= (uchar_t
)0) {
1541 if (cp
== &un
->un_ocmap
.chkd
[OCSIZE
]) {
1542 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "last close\n");
1543 if (un
->un_state
== DCD_STATE_OFFLINE
) {
1547 mutex_exit(DCD_MUTEX
);
1548 (void) cmlb_close(un
->un_dklbhandle
, 0);
1550 _NOTE(NO_COMPETING_THREADS_NOW
);
1552 kstat_delete(un
->un_stats
);
1555 for (i
= 0; i
< NDKMAP
; i
++) {
1556 if (un
->un_pstats
[i
]) {
1557 kstat_delete(un
->un_pstats
[i
]);
1558 un
->un_pstats
[i
] = (kstat_t
*)0;
1562 if (un
->un_errstats
) {
1563 kstat_delete(un
->un_errstats
);
1564 un
->un_errstats
= (kstat_t
*)0;
1566 mutex_enter(DCD_MUTEX
);
1568 _NOTE(COMPETING_THREADS_NOW
);
1571 mutex_exit(DCD_MUTEX
);
1572 sema_v(&un
->un_semoclose
);
1577 dcd_offline(struct dcd_disk
*un
, int bechatty
)
1580 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
, "offline\n");
1582 mutex_exit(DCD_MUTEX
);
1583 cmlb_invalidate(un
->un_dklbhandle
, 0);
1584 mutex_enter(DCD_MUTEX
);
1588 * Given the device number return the devinfo pointer
1589 * from the scsi_device structure.
1593 dcdinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
, void **result
)
1596 struct dcd_disk
*un
;
1597 int instance
, error
;
1601 case DDI_INFO_DEVT2DEVINFO
:
1603 instance
= DCDUNIT(dev
);
1604 if ((un
= ddi_get_soft_state(dcd_state
, instance
)) == NULL
)
1605 return (DDI_FAILURE
);
1606 *result
= (void *) DCD_DEVINFO
;
1607 error
= DDI_SUCCESS
;
1609 case DDI_INFO_DEVT2INSTANCE
:
1611 instance
= DCDUNIT(dev
);
1612 *result
= (void *)(uintptr_t)instance
;
1613 error
= DDI_SUCCESS
;
1616 error
= DDI_FAILURE
;
1622 * property operation routine. return the number of blocks for the partition
1623 * in question or forward the request to the propery facilities.
1626 dcd_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
1627 char *name
, caddr_t valuep
, int *lengthp
)
1629 struct dcd_disk
*un
;
1631 if ((un
= ddi_get_soft_state(dcd_state
, ddi_get_instance(dip
))) == NULL
)
1632 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
1633 name
, valuep
, lengthp
));
1635 return (cmlb_prop_op(un
->un_dklbhandle
,
1636 dev
, dip
, prop_op
, mod_flags
, name
, valuep
, lengthp
,
1637 DCDPART(dev
), NULL
));
1641 * These routines perform raw i/o operations.
1645 dcduscsimin(struct buf
*bp
)
1652 dcdmin(struct buf
*bp
)
1654 struct dcd_disk
*un
;
1656 minor_t minor
= getminor(bp
->b_edev
);
1657 instance
= minor
>> DCDUNIT_SHIFT
;
1658 un
= ddi_get_soft_state(dcd_state
, instance
);
1660 if (bp
->b_bcount
> un
->un_max_xfer_size
)
1661 bp
->b_bcount
= un
->un_max_xfer_size
;
1667 dcdread(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
)
1670 GET_SOFT_STATE(dev
);
1671 secmask
= un
->un_secsize
- 1;
1673 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
1674 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1675 "file offset not modulo %d\n",
1678 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
1679 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1680 "transfer length not modulo %d\n", un
->un_secsize
);
1683 return (physio(dcdstrategy
, NULL
, dev
, B_READ
, dcdmin
, uio
));
1688 dcdaread(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
)
1691 struct uio
*uio
= aio
->aio_uio
;
1692 GET_SOFT_STATE(dev
);
1693 secmask
= un
->un_secsize
- 1;
1695 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
1696 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1697 "file offset not modulo %d\n",
1700 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
1701 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1702 "transfer length not modulo %d\n", un
->un_secsize
);
1705 return (aphysio(dcdstrategy
, anocancel
, dev
, B_READ
, dcdmin
, aio
));
1710 dcdwrite(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
)
1713 GET_SOFT_STATE(dev
);
1714 secmask
= un
->un_secsize
- 1;
1716 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
1717 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1718 "file offset not modulo %d\n",
1721 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
1722 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1723 "transfer length not modulo %d\n", un
->un_secsize
);
1726 return (physio(dcdstrategy
, NULL
, dev
, B_WRITE
, dcdmin
,
1732 dcdawrite(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
)
1735 struct uio
*uio
= aio
->aio_uio
;
1736 GET_SOFT_STATE(dev
);
1737 secmask
= un
->un_secsize
- 1;
1739 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
1740 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1741 "file offset not modulo %d\n",
1744 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
1745 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1746 "transfer length not modulo %d\n", un
->un_secsize
);
1749 return (aphysio(dcdstrategy
, anocancel
, dev
, B_WRITE
, dcdmin
, aio
));
1756 dcdstrategy(struct buf
*bp
)
1758 struct dcd_disk
*un
;
1761 minor_t minor
= getminor(bp
->b_edev
);
1762 diskaddr_t p_lblksrt
;
1766 if ((un
= ddi_get_soft_state(dcd_state
,
1767 minor
>> DCDUNIT_SHIFT
)) == NULL
||
1768 un
->un_state
== DCD_STATE_DUMPING
||
1769 ((un
->un_state
& DCD_STATE_FATAL
) == DCD_STATE_FATAL
)) {
1770 SET_BP_ERROR(bp
, ((un
) ? ENXIO
: EIO
));
1772 bp
->b_resid
= bp
->b_bcount
;
1778 * If the request size (buf->b_bcount)is greater than the size
1779 * (un->un_max_xfer_size) supported by the target driver fail
1780 * the request with EINVAL error code.
1782 * We are not supposed to receive requests exceeding
1783 * un->un_max_xfer_size size because the caller is expected to
1784 * check what is the maximum size that is supported by this
1785 * driver either through ioctl or dcdmin routine(which is private
1787 * But we have seen cases (like meta driver(md))where dcdstrategy
1788 * called with more than supported size and cause data corruption.
1791 if (bp
->b_bcount
> un
->un_max_xfer_size
) {
1792 SET_BP_ERROR(bp
, EINVAL
);
1796 TRACE_2(TR_FAC_DADA
, TR_DCDSTRATEGY_START
,
1797 "dcdstrategy_start: bp 0x%p un 0x%p", bp
, un
);
1800 * Commands may sneak in while we released the mutex in
1801 * DDI_SUSPEND, we should block new commands.
1803 mutex_enter(DCD_MUTEX
);
1804 while (un
->un_state
== DCD_STATE_SUSPENDED
) {
1805 cv_wait(&un
->un_suspend_cv
, DCD_MUTEX
);
1808 if (un
->un_state
== DCD_STATE_PM_SUSPENDED
) {
1809 mutex_exit(DCD_MUTEX
);
1810 (void) pm_idle_component(DCD_DEVINFO
, 0);
1811 if (pm_raise_power(DCD_DEVINFO
, 0,
1812 DCD_DEVICE_ACTIVE
) != DDI_SUCCESS
) {
1813 SET_BP_ERROR(bp
, EIO
);
1816 mutex_enter(DCD_MUTEX
);
1818 mutex_exit(DCD_MUTEX
);
1821 * Map-in the buffer in case starting address is not word aligned.
1824 if (((uintptr_t)bp
->b_un
.b_addr
) & 0x1)
1827 bp
->b_flags
&= ~(B_DONE
|B_ERROR
);
1831 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1832 "bp->b_bcount %lx\n", bp
->b_bcount
);
1834 if (bp
!= un
->un_sbufp
) {
1835 validated
: if (cmlb_partinfo(un
->un_dklbhandle
,
1836 minor
& DCDPART_MASK
,
1845 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1846 "dkblock(bp) is %llu\n", bn
);
1851 } else if (bn
>= lblocks
) {
1853 * For proper comparison, file system block
1854 * number has to be scaled to actual CD
1856 * Since all the CDROM operations
1857 * that have Sun Labels are in the correct
1858 * block size this will work for CD's. This
1859 * will have to change when we have different
1863 * Not an error, resid == count
1870 } else if (bp
->b_bcount
& (un
->un_secsize
-1)) {
1872 * This should really be:
1874 * ... if (bp->b_bcount & (un->un_lbasize-1))
1879 if (!bp
->b_bcount
) {
1880 printf("Waring : Zero read or Write\n");
1884 * sort by absolute block number.
1887 bp
->b_resid
+= p_lblksrt
;
1889 * zero out av_back - this will be a signal
1890 * to dcdstart to go and fetch the resources
1892 bp
->av_back
= NO_PKT_ALLOCATED
;
1896 * Check to see whether or not we are done
1897 * (with or without errors).
1902 bp
->b_flags
|= B_ERROR
;
1908 * opened in NDELAY/NONBLOCK mode?
1909 * Check if disk is ready and has a valid geometry
1911 if (dcd_ready_and_valid(bp
->b_edev
, un
) == 0) {
1914 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
1915 "i/o to invalid geometry\n");
1916 SET_BP_ERROR(bp
, EIO
);
1920 } else if (BP_HAS_NO_PKT(bp
)) {
1921 struct udcd_cmd
*tscmdp
;
1922 struct dcd_cmd
*tcmdp
;
1924 * This indicates that it is a special buffer
1925 * This could be a udcd-cmd and hence call bp_mapin just
1926 * in case that it could be a PIO command issued.
1928 tscmdp
= (struct udcd_cmd
*)bp
->b_forw
;
1929 tcmdp
= tscmdp
->udcd_cmd
;
1930 if ((tcmdp
->cmd
!= ATA_READ_DMA
) && (tcmdp
->cmd
!= 0xc9) &&
1931 (tcmdp
->cmd
!= ATA_WRITE_DMA
) && (tcmdp
->cmd
!= 0xcb) &&
1932 (tcmdp
->cmd
!= IDENTIFY_DMA
) &&
1933 (tcmdp
->cmd
!= ATA_FLUSH_CACHE
)) {
1939 * We are doing it a bit non-standard. That is, the
1940 * head of the b_actf chain is *not* the active command-
1941 * it is just the head of the wait queue. The reason
1942 * we do this is that the head of the b_actf chain is
1943 * guaranteed to not be moved by disksort(), so that
1944 * our restart command (pointed to by
1945 * b_forw) and the head of the wait queue (b_actf) can
1946 * have resources granted without it getting lost in
1947 * the queue at some later point (where we would have
1948 * to go and look for it).
1950 mutex_enter(DCD_MUTEX
);
1952 DCD_DO_KSTATS(un
, kstat_waitq_enter
, bp
);
1956 if (dp
->b_actf
== NULL
) {
1959 } else if ((un
->un_state
== DCD_STATE_SUSPENDED
) &&
1960 bp
== un
->un_sbufp
) {
1961 bp
->b_actf
= dp
->b_actf
;
1964 TRACE_3(TR_FAC_DADA
, TR_DCDSTRATEGY_DISKSORT_START
,
1965 "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p",
1968 TRACE_0(TR_FAC_DADA
, TR_DCDSTRATEGY_DISKSORT_END
,
1969 "dcdstrategy_disksort_end");
1972 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
1973 "ncmd %x , throttle %x, forw 0x%p\n",
1974 un
->un_ncmds
, un
->un_throttle
, (void *)dp
->b_forw
);
1975 ASSERT(un
->un_ncmds
>= 0);
1976 ASSERT(un
->un_throttle
>= 0);
1977 if ((un
->un_ncmds
< un
->un_throttle
) && (dp
->b_forw
== NULL
)) {
1979 } else if (BP_HAS_NO_PKT(dp
->b_actf
)) {
1982 cmd_bp
= dp
->b_actf
;
1983 cmd_bp
->av_back
= ALLOCATING_PKT
;
1984 mutex_exit(DCD_MUTEX
);
1986 * try and map this one
1988 TRACE_0(TR_FAC_DADA
, TR_DCDSTRATEGY_SMALL_WINDOW_START
,
1989 "dcdstrategy_small_window_call (begin)");
1991 make_dcd_cmd(un
, cmd_bp
, NULL_FUNC
);
1993 TRACE_0(TR_FAC_DADA
, TR_DCDSTRATEGY_SMALL_WINDOW_END
,
1994 "dcdstrategy_small_window_call (end)");
1997 * there is a small window where the active cmd
1998 * completes before make_dcd_cmd returns.
1999 * consequently, this cmd never gets started so
2000 * we start it from here
2002 mutex_enter(DCD_MUTEX
);
2003 if ((un
->un_ncmds
< un
->un_throttle
) &&
2004 (dp
->b_forw
== NULL
)) {
2008 mutex_exit(DCD_MUTEX
);
2011 TRACE_0(TR_FAC_DADA
, TR_DCDSTRATEGY_END
, "dcdstrategy_end");
2017 * Unit start and Completion
2018 * NOTE: we assume that the caller has at least checked for:
2019 * (un->un_ncmds < un->un_throttle)
2020 * if not, there is no real harm done, dcd_transport() will
2024 dcdstart(struct dcd_disk
*un
)
2026 int status
, sort_key
;
2029 uchar_t state
= un
->un_last_state
;
2031 TRACE_1(TR_FAC_DADA
, TR_DCDSTART_START
, "dcdstart_start: un 0x%p", un
);
2034 ASSERT(mutex_owned(DCD_MUTEX
));
2037 if (((bp
= dp
->b_actf
) == NULL
) || (bp
->av_back
== ALLOCATING_PKT
) ||
2038 (dp
->b_forw
!= NULL
)) {
2039 TRACE_0(TR_FAC_DADA
, TR_DCDSTART_NO_WORK_END
,
2040 "dcdstart_end (no work)");
2045 * remove from active queue
2047 dp
->b_actf
= bp
->b_actf
;
2051 * increment ncmds before calling dcd_transport because dcdintr
2052 * may be called before we return from dcd_transport!
2057 * If measuring stats, mark exit from wait queue and
2058 * entrance into run 'queue' if and only if we are
2059 * going to actually start a command.
2060 * Normally the bp already has a packet at this point
2062 DCD_DO_KSTATS(un
, kstat_waitq_to_runq
, bp
);
2064 mutex_exit(DCD_MUTEX
);
2066 if (BP_HAS_NO_PKT(bp
)) {
2067 make_dcd_cmd(un
, bp
, dcdrunout
);
2068 if (BP_HAS_NO_PKT(bp
) && !(bp
->b_flags
& B_ERROR
)) {
2069 mutex_enter(DCD_MUTEX
);
2070 DCD_DO_KSTATS(un
, kstat_runq_back_to_waitq
, bp
);
2072 bp
->b_actf
= dp
->b_actf
;
2074 New_state(un
, DCD_STATE_RWAIT
);
2076 TRACE_0(TR_FAC_DADA
, TR_DCDSTART_NO_RESOURCES_END
,
2077 "dcdstart_end (No Resources)");
2080 } else if (bp
->b_flags
& B_ERROR
) {
2081 mutex_enter(DCD_MUTEX
);
2082 DCD_DO_KSTATS(un
, kstat_runq_exit
, bp
);
2085 bp
->b_resid
= bp
->b_bcount
;
2086 if (bp
->b_error
== 0) {
2087 SET_BP_ERROR(bp
, EIO
);
2093 un
->un_state
= un
->un_last_state
;
2094 un
->un_last_state
= state
;
2096 mutex_exit(DCD_MUTEX
);
2099 mutex_enter(DCD_MUTEX
);
2100 if (un
->un_state
== DCD_STATE_SUSPENDED
) {
2101 cv_broadcast(&un
->un_disk_busy_cv
);
2104 if ((un
->un_ncmds
< un
->un_throttle
) &&
2105 (dp
->b_forw
== NULL
)) {
2114 * Restore resid from the packet, b_resid had been the
2117 sort_key
= bp
->b_resid
;
2118 bp
->b_resid
= BP_PKT(bp
)->pkt_resid
;
2119 BP_PKT(bp
)->pkt_resid
= 0;
2121 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2122 "bp->b_resid %lx, pkt_resid %lx\n",
2123 bp
->b_resid
, BP_PKT(bp
)->pkt_resid
);
2126 * We used to check whether or not to try and link commands here.
2127 * Since we have found that there is no performance improvement
2128 * for linked commands, this has not made much sense.
2130 if ((status
= dcd_transport((struct dcd_pkt
*)BP_PKT(bp
)))
2132 mutex_enter(DCD_MUTEX
);
2134 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2135 "transport returned %x\n", status
);
2136 if (status
== TRAN_BUSY
) {
2137 DCD_DO_ERRSTATS(un
, dcd_transerrs
);
2138 DCD_DO_KSTATS(un
, kstat_runq_back_to_waitq
, bp
);
2139 dcd_handle_tran_busy(bp
, dp
, un
);
2140 if (un
->un_ncmds
> 0) {
2141 bp
->b_resid
= sort_key
;
2144 DCD_DO_KSTATS(un
, kstat_runq_exit
, bp
);
2145 mutex_exit(DCD_MUTEX
);
2147 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2148 "transport rejected (%d)\n",
2150 SET_BP_ERROR(bp
, EIO
);
2151 bp
->b_resid
= bp
->b_bcount
;
2152 if (bp
!= un
->un_sbufp
) {
2153 dcd_destroy_pkt(BP_PKT(bp
));
2157 mutex_enter(DCD_MUTEX
);
2158 if (un
->un_state
== DCD_STATE_SUSPENDED
) {
2159 cv_broadcast(&un
->un_disk_busy_cv
);
2161 if ((un
->un_ncmds
< un
->un_throttle
) &&
2162 (dp
->b_forw
== NULL
)) {
2167 mutex_enter(DCD_MUTEX
);
2169 if (dp
->b_actf
&& BP_HAS_NO_PKT(dp
->b_actf
)) {
2172 cmd_bp
= dp
->b_actf
;
2173 cmd_bp
->av_back
= ALLOCATING_PKT
;
2174 mutex_exit(DCD_MUTEX
);
2176 * try and map this one
2178 TRACE_0(TR_FAC_DADA
, TR_DCASTART_SMALL_WINDOW_START
,
2179 "dcdstart_small_window_start");
2181 make_dcd_cmd(un
, cmd_bp
, NULL_FUNC
);
2183 TRACE_0(TR_FAC_DADA
, TR_DCDSTART_SMALL_WINDOW_END
,
2184 "dcdstart_small_window_end");
2186 * there is a small window where the active cmd
2187 * completes before make_dcd_cmd returns.
2188 * consequently, this cmd never gets started so
2189 * we start it from here
2191 mutex_enter(DCD_MUTEX
);
2192 if ((un
->un_ncmds
< un
->un_throttle
) &&
2193 (dp
->b_forw
== NULL
)) {
2200 ASSERT(mutex_owned(DCD_MUTEX
));
2201 TRACE_0(TR_FAC_DADA
, TR_DCDSTART_END
, "dcdstart_end");
2205 * make_dcd_cmd: create a pkt
2208 make_dcd_cmd(struct dcd_disk
*un
, struct buf
*bp
, int (*func
)())
2210 auto int count
, com
, direction
;
2211 struct dcd_pkt
*pkt
;
2214 _NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp
))
2215 TRACE_3(TR_FAC_DADA
, TR_MAKE_DCD_CMD_START
,
2216 "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un
, bp
, un
);
2219 flags
= un
->un_cmd_flags
;
2221 if (bp
!= un
->un_sbufp
) {
2222 int partition
= DCDPART(bp
->b_edev
);
2223 diskaddr_t p_lblksrt
;
2227 int dkl_nblk
, delta
;
2230 if (cmlb_partinfo(un
->un_dklbhandle
,
2241 dkl_nblk
= (int)lblocks
;
2244 * Make sure we don't run off the end of a partition.
2246 * Put this test here so that we can adjust b_count
2247 * to accurately reflect the actual amount we are
2248 * goint to transfer.
2252 * First, compute partition-relative block number
2254 blkno
= dkblock(bp
);
2255 secnt
= (bp
->b_bcount
+ (un
->un_secsize
- 1)) >> un
->un_secdiv
;
2256 count
= MIN(secnt
, dkl_nblk
- blkno
);
2257 if (count
!= secnt
) {
2259 * We have an overrun
2261 resid
= (secnt
- count
) << un
->un_secdiv
;
2262 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2263 "overrun by %ld sectors\n",
2265 bp
->b_bcount
-= resid
;
2271 * Adjust block number to absolute
2273 delta
= (int)p_lblksrt
;
2276 mutex_enter(DCD_MUTEX
);
2278 * This is for devices having block size different from
2279 * from DEV_BSIZE (e.g. 2K CDROMs).
2281 if (un
->un_lbasize
!= un
->un_secsize
) {
2282 blkno
>>= un
->un_blknoshift
;
2283 count
>>= un
->un_blknoshift
;
2285 mutex_exit(DCD_MUTEX
);
2287 TRACE_0(TR_FAC_DADA
, TR_MAKE_DCD_CMD_INIT_PKT_START
,
2288 "make_dcd_cmd_init_pkt_call (begin)");
2289 pkt
= dcd_init_pkt(ROUTE
, NULL
, bp
,
2290 (uint32_t)sizeof (struct dcd_cmd
),
2291 un
->un_cmd_stat_size
, PP_LEN
, PKT_CONSISTENT
,
2293 TRACE_1(TR_FAC_DADA
, TR_MAKE_DCD_CMD_INIT_PKT_END
,
2294 "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt
);
2296 bp
->b_bcount
+= resid
;
2297 bp
->av_back
= NO_PKT_ALLOCATED
;
2298 TRACE_0(TR_FAC_DADA
,
2299 TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END
,
2300 "make_dcd_cmd_end (NO_PKT_ALLOCATED1)");
2303 if (bp
->b_flags
& B_READ
) {
2304 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) ==
2308 if (un
->un_dp
->options
& BLOCK_MODE
)
2309 com
= ATA_READ_MULTIPLE
;
2313 direction
= DATA_READ
;
2315 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) ==
2317 com
= ATA_WRITE_DMA
;
2319 if (un
->un_dp
->options
& BLOCK_MODE
)
2320 com
= ATA_WRITE_MULTIPLE
;
2324 direction
= DATA_WRITE
;
2328 * Save the resid in the packet, temporarily until
2329 * we transport the command.
2331 pkt
->pkt_resid
= resid
;
2333 makecommand(pkt
, flags
, com
, blkno
, ADD_LBA_MODE
,
2334 bp
->b_bcount
, direction
, 0);
2338 struct udcd_cmd
*scmd
= (struct udcd_cmd
*)bp
->b_forw
;
2343 if ((scmd
->udcd_flags
& UDCD_SILENT
) && !(DEBUGGING
)) {
2344 flags
|= FLAG_SILENT
;
2346 if (scmd
->udcd_flags
& UDCD_DIAGNOSE
)
2347 flags
|= FLAG_DIAGNOSE
;
2349 if (scmd
->udcd_flags
& UDCD_NOINTR
)
2350 flags
|= FLAG_NOINTR
;
2352 pkt
= dcd_init_pkt(ROUTE
, NULL
,
2353 (bp
->b_bcount
)? bp
: NULL
,
2354 (uint32_t)sizeof (struct dcd_cmd
),
2355 2, PP_LEN
, PKT_CONSISTENT
, func
, (caddr_t
)un
);
2358 bp
->av_back
= NO_PKT_ALLOCATED
;
2362 makecommand(pkt
, 0, scmd
->udcd_cmd
->cmd
,
2363 scmd
->udcd_cmd
->sector_num
.lba_num
,
2364 scmd
->udcd_cmd
->address_mode
,
2365 scmd
->udcd_cmd
->size
,
2366 scmd
->udcd_cmd
->direction
, scmd
->udcd_cmd
->features
);
2368 pkt
->pkt_flags
= flags
;
2369 if (scmd
->udcd_timeout
== 0)
2372 tval
= scmd
->udcd_timeout
;
2373 /* UDAD interface should be decided. */
2374 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2375 "udcd interface\n");
2378 pkt
->pkt_comp
= dcdintr
;
2379 pkt
->pkt_time
= tval
;
2380 PKT_SET_BP(pkt
, bp
);
2381 bp
->av_back
= (struct buf
*)pkt
;
2383 TRACE_0(TR_FAC_DADA
, TR_MAKE_DCD_CMD_END
, "make_dcd_cmd_end");
2387 * Command completion processing
2390 dcdintr(struct dcd_pkt
*pkt
)
2392 struct dcd_disk
*un
;
2397 bp
= PKT_GET_BP(pkt
);
2398 un
= ddi_get_soft_state(dcd_state
, DCDUNIT(bp
->b_edev
));
2400 TRACE_1(TR_FAC_DADA
, TR_DCDINTR_START
, "dcdintr_start: un 0x%p", un
);
2401 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "dcdintr\n");
2403 mutex_enter(DCD_MUTEX
);
2405 DCD_DO_KSTATS(un
, kstat_runq_exit
, bp
);
2406 ASSERT(un
->un_ncmds
>= 0);
2408 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2409 "reason %x and Status %x\n", pkt
->pkt_reason
, SCBP_C(pkt
));
2412 * do most common case first
2414 if ((pkt
->pkt_reason
== CMD_CMPLT
) && (SCBP_C(pkt
) == 0)) {
2415 int com
= GETATACMD((struct dcd_cmd
*)pkt
->pkt_cdbp
);
2417 if (un
->un_state
== DCD_STATE_OFFLINE
) {
2418 un
->un_state
= un
->un_last_state
;
2419 dcd_log(DCD_DEVINFO
, dcd_label
, CE_NOTE
,
2420 (const char *) diskokay
);
2423 * If the command is a read or a write, and we have
2424 * a non-zero pkt_resid, that is an error. We should
2425 * attempt to retry the operation if possible.
2427 action
= COMMAND_DONE
;
2428 if (pkt
->pkt_resid
&& (com
== ATA_READ
|| com
== ATA_WRITE
)) {
2429 DCD_DO_ERRSTATS(un
, dcd_harderrs
);
2430 if ((int)PKT_GET_RETRY_CNT(pkt
) < dcd_retry_count
) {
2431 PKT_INCR_RETRY_CNT(pkt
, 1);
2432 action
= QUE_COMMAND
;
2435 * if we have exhausted retries
2436 * a command with a residual is in error in
2439 action
= COMMAND_DONE_ERROR
;
2441 dcd_log(DCD_DEVINFO
, dcd_label
,
2442 CE_WARN
, "incomplete %s- %s\n",
2443 (bp
->b_flags
& B_READ
)? "read" : "write",
2444 (action
== QUE_COMMAND
)? "retrying" :
2449 * pkt_resid will reflect, at this point, a residual
2450 * of how many bytes left to be transferred there were
2451 * from the actual scsi command. Add this to b_resid i.e
2452 * the amount this driver could not see to transfer,
2453 * to get the total number of bytes not transfered.
2455 if (action
!= QUE_COMMAND
) {
2456 bp
->b_resid
+= pkt
->pkt_resid
;
2459 } else if (pkt
->pkt_reason
!= CMD_CMPLT
) {
2460 action
= dcd_handle_incomplete(un
, bp
);
2464 * If we are in the middle of syncing or dumping, we have got
2465 * here because dcd_transport has called us explictly after
2466 * completing the command in a polled mode. We don't want to
2467 * have a recursive call into dcd_transport again.
2469 if (ddi_in_panic() && (action
== QUE_COMMAND
)) {
2470 action
= COMMAND_DONE_ERROR
;
2474 * save pkt reason; consecutive failures are not reported unless
2476 * do not reset last_pkt_reason when the cmd was retried and
2478 * there maybe more commands comming back with last_pkt_reason
2480 if ((un
->un_last_pkt_reason
!= pkt
->pkt_reason
) &&
2481 ((pkt
->pkt_reason
!= CMD_CMPLT
) ||
2482 (PKT_GET_RETRY_CNT(pkt
) == 0))) {
2483 un
->un_last_pkt_reason
= pkt
->pkt_reason
;
2487 case COMMAND_DONE_ERROR
:
2489 if (bp
->b_resid
== 0) {
2490 bp
->b_resid
= bp
->b_bcount
;
2492 if (bp
->b_error
== 0) {
2493 struct dcd_cmd
*cdbp
= (struct dcd_cmd
*)pkt
->pkt_cdbp
;
2494 if (cdbp
->cmd
== ATA_FLUSH_CACHE
&&
2495 (pkt
->pkt_scbp
[0] & STATUS_ATA_ERR
) &&
2496 (pkt
->pkt_scbp
[1] & ERR_ABORT
)) {
2497 SET_BP_ERROR(bp
, ENOTSUP
);
2498 un
->un_flush_not_supported
= 1;
2500 SET_BP_ERROR(bp
, EIO
);
2503 bp
->b_flags
|= B_ERROR
;
2506 dcddone_and_mutex_exit(un
, bp
);
2508 TRACE_0(TR_FAC_DADA
, TR_DCDINTR_COMMAND_DONE_END
,
2509 "dcdintr_end (COMMAND_DONE)");
2513 if (un
->un_ncmds
>= un
->un_throttle
) {
2514 struct diskhd
*dp
= &un
->un_utab
;
2516 bp
->b_actf
= dp
->b_actf
;
2519 DCD_DO_KSTATS(un
, kstat_waitq_enter
, bp
);
2521 mutex_exit(DCD_MUTEX
);
2526 /* reset the pkt reason again */
2527 pkt
->pkt_reason
= 0;
2528 DCD_DO_KSTATS(un
, kstat_runq_enter
, bp
);
2529 mutex_exit(DCD_MUTEX
);
2530 if ((status
= dcd_transport(BP_PKT(bp
))) != TRAN_ACCEPT
) {
2531 struct diskhd
*dp
= &un
->un_utab
;
2533 mutex_enter(DCD_MUTEX
);
2535 if (status
== TRAN_BUSY
) {
2536 DCD_DO_KSTATS(un
, kstat_runq_back_to_waitq
, bp
);
2537 dcd_handle_tran_busy(bp
, dp
, un
);
2538 mutex_exit(DCD_MUTEX
);
2541 DCD_DO_ERRSTATS(un
, dcd_transerrs
);
2542 DCD_DO_KSTATS(un
, kstat_runq_exit
, bp
);
2544 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2545 "requeue of command fails (%x)\n", status
);
2546 SET_BP_ERROR(bp
, EIO
);
2547 bp
->b_resid
= bp
->b_bcount
;
2549 dcddone_and_mutex_exit(un
, bp
);
2556 DCD_DO_KSTATS(un
, kstat_waitq_enter
, bp
);
2557 mutex_exit(DCD_MUTEX
);
2562 TRACE_0(TR_FAC_DADA
, TR_DCDINTR_END
, "dcdintr_end");
2567 * Done with a command.
2570 dcddone_and_mutex_exit(struct dcd_disk
*un
, register struct buf
*bp
)
2574 TRACE_1(TR_FAC_DADA
, TR_DCDONE_START
, "dcddone_start: un 0x%p", un
);
2576 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un
->un_dcd
->dcd_mutex
));
2579 if (bp
== dp
->b_forw
) {
2584 ulong_t n_done
= bp
->b_bcount
- bp
->b_resid
;
2585 if (bp
->b_flags
& B_READ
) {
2587 IOSP
->nread
+= n_done
;
2590 IOSP
->nwritten
+= n_done
;
2593 if (IO_PARTITION_STATS
) {
2594 ulong_t n_done
= bp
->b_bcount
- bp
->b_resid
;
2595 if (bp
->b_flags
& B_READ
) {
2596 IOSP_PARTITION
->reads
++;
2597 IOSP_PARTITION
->nread
+= n_done
;
2599 IOSP_PARTITION
->writes
++;
2600 IOSP_PARTITION
->nwritten
+= n_done
;
2605 * Start the next one before releasing resources on this one
2607 if (un
->un_state
== DCD_STATE_SUSPENDED
) {
2608 cv_broadcast(&un
->un_disk_busy_cv
);
2609 } else if (dp
->b_actf
&& (un
->un_ncmds
< un
->un_throttle
) &&
2610 (dp
->b_forw
== NULL
&& un
->un_state
!= DCD_STATE_SUSPENDED
)) {
2614 mutex_exit(DCD_MUTEX
);
2616 if (bp
!= un
->un_sbufp
) {
2617 dcd_destroy_pkt(BP_PKT(bp
));
2618 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2619 "regular done: resid %ld\n", bp
->b_resid
);
2621 ASSERT(un
->un_sbuf_busy
);
2623 TRACE_0(TR_FAC_DADA
, TR_DCDDONE_BIODONE_CALL
, "dcddone_biodone_call");
2627 (void) pm_idle_component(DCD_DEVINFO
, 0);
2629 TRACE_0(TR_FAC_DADA
, TR_DCDDONE_END
, "dcddone end");
2634 * reset the disk unless the transport layer has already
2635 * cleared the problem
2637 #define C1 (STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED)
2639 dcd_reset_disk(struct dcd_disk
*un
, struct dcd_pkt
*pkt
)
2642 if ((pkt
->pkt_statistics
& C1
) == 0) {
2643 mutex_exit(DCD_MUTEX
);
2644 if (!dcd_reset(ROUTE
, RESET_ALL
)) {
2645 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2648 mutex_enter(DCD_MUTEX
);
2653 dcd_handle_incomplete(struct dcd_disk
*un
, struct buf
*bp
)
2655 static char *fail
= "ATA transport failed: reason '%s': %s\n";
2656 static char *notresp
= "disk not responding to selection\n";
2657 int rval
= COMMAND_DONE_ERROR
;
2658 int action
= COMMAND_SOFT_ERROR
;
2659 struct dcd_pkt
*pkt
= BP_PKT(bp
);
2660 int be_chatty
= (un
->un_state
!= DCD_STATE_SUSPENDED
) &&
2661 (bp
!= un
->un_sbufp
|| !(pkt
->pkt_flags
& FLAG_SILENT
));
2663 ASSERT(mutex_owned(DCD_MUTEX
));
2665 switch (pkt
->pkt_reason
) {
2669 * This Indicates the already the HBA would have reset
2670 * so Just indicate to retry the command
2674 case CMD_INCOMPLETE
:
2675 action
= dcd_check_error(un
, bp
);
2676 DCD_DO_ERRSTATS(un
, dcd_transerrs
);
2677 if (action
== COMMAND_HARD_ERROR
) {
2678 (void) dcd_reset_disk(un
, pkt
);
2684 * Something drastic has gone wrong
2693 * the target may still be running the command,
2694 * so we should try and reset that target.
2696 DCD_DO_ERRSTATS(un
, dcd_transerrs
);
2697 if ((pkt
->pkt_reason
!= CMD_RESET
) &&
2698 (pkt
->pkt_reason
!= CMD_ABORTED
)) {
2699 (void) dcd_reset_disk(un
, pkt
);
2705 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got
2706 * reset/aborted because another disk on this bus caused it.
2707 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics
2708 * of STAT_TIMEOUT/STAT_DEV_RESET
2710 if ((pkt
->pkt_reason
== CMD_RESET
) ||(pkt
->pkt_reason
== CMD_ABORTED
)) {
2711 /* To be written : XXX */
2712 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2713 "Command aborted\n");
2716 if (bp
== un
->un_sbufp
&& (pkt
->pkt_flags
& FLAG_DIAGNOSE
)) {
2717 rval
= COMMAND_DONE_ERROR
;
2719 if ((rval
== COMMAND_DONE_ERROR
) &&
2720 (action
== COMMAND_SOFT_ERROR
) &&
2721 ((int)PKT_GET_RETRY_CNT(pkt
) < dcd_retry_count
)) {
2722 PKT_INCR_RETRY_CNT(pkt
, 1);
2727 if (pkt
->pkt_reason
== CMD_INCOMPLETE
&& rval
== COMMAND_DONE_ERROR
) {
2729 * Looks like someone turned off this shoebox.
2731 if (un
->un_state
!= DCD_STATE_OFFLINE
) {
2732 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2733 (const char *) notresp
);
2734 New_state(un
, DCD_STATE_OFFLINE
);
2736 } else if (pkt
->pkt_reason
== CMD_FATAL
) {
2738 * Suppressing the following message for the time being
2739 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2740 * (const char *) notresp);
2742 PKT_INCR_RETRY_CNT(pkt
, 6);
2743 rval
= COMMAND_DONE_ERROR
;
2744 New_state(un
, DCD_STATE_FATAL
);
2745 } else if (be_chatty
) {
2746 int in_panic
= ddi_in_panic();
2747 if (!in_panic
|| (rval
== COMMAND_DONE_ERROR
)) {
2748 if (((pkt
->pkt_reason
!= un
->un_last_pkt_reason
) &&
2749 (pkt
->pkt_reason
!= CMD_RESET
)) ||
2750 (rval
== COMMAND_DONE_ERROR
) ||
2751 (dcd_error_level
== DCD_ERR_ALL
)) {
2752 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2753 fail
, dcd_rname(pkt
->pkt_reason
),
2754 (rval
== COMMAND_DONE_ERROR
) ?
2755 "giving up": "retrying command");
2756 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2758 PKT_GET_RETRY_CNT(pkt
));
2767 dcd_check_error(struct dcd_disk
*un
, struct buf
*bp
)
2769 struct diskhd
*dp
= &un
->un_utab
;
2770 struct dcd_pkt
*pkt
= BP_PKT(bp
);
2772 unsigned char status
;
2773 unsigned char error
;
2775 TRACE_0(TR_FAC_DADA
, TR_DCD_CHECK_ERROR_START
, "dcd_check_error_start");
2776 ASSERT(mutex_owned(DCD_MUTEX
));
2778 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
2779 "Pkt: 0x%p dp: 0x%p\n", (void *)pkt
, (void *)dp
);
2782 * Here we need to check status first and then if error is indicated
2783 * Then the error register.
2786 status
= (pkt
->pkt_scbp
)[0];
2787 if ((status
& STATUS_ATA_DWF
) == STATUS_ATA_DWF
) {
2789 * There has been a Device Fault - reason for such error
2790 * is vendor specific
2791 * Action to be taken is - Indicate error and reset device.
2794 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
, "Device Fault\n");
2795 rval
= COMMAND_HARD_ERROR
;
2796 } else if ((status
& STATUS_ATA_CORR
) == STATUS_ATA_CORR
) {
2799 * The sector read or written is marginal and hence ECC
2800 * Correction has been applied. Indicate to repair
2801 * Here we need to probably re-assign based on the badblock
2805 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2806 "Soft Error on block %x\n",
2807 ((struct dcd_cmd
*)pkt
->pkt_cdbp
)->sector_num
.lba_num
);
2808 rval
= COMMAND_SOFT_ERROR
;
2809 } else if ((status
& STATUS_ATA_ERR
) == STATUS_ATA_ERR
) {
2810 error
= pkt
->pkt_scbp
[1];
2812 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2813 "Command:0x%x,Error:0x%x,Status:0x%x\n",
2814 GETATACMD((struct dcd_cmd
*)pkt
->pkt_cdbp
),
2816 if ((error
& ERR_AMNF
) == ERR_AMNF
) {
2817 /* Address make not found */
2818 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2819 "Address Mark Not Found");
2820 } else if ((error
& ERR_TKONF
) == ERR_TKONF
) {
2821 /* Track 0 Not found */
2822 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2823 "Track 0 Not found \n");
2824 } else if ((error
& ERR_IDNF
) == ERR_IDNF
) {
2825 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2826 " ID not found \n");
2827 } else if ((error
& ERR_UNC
) == ERR_UNC
) {
2828 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2829 "Uncorrectable data Error: Block %x\n",
2830 ((struct dcd_cmd
*)pkt
->pkt_cdbp
)->
2831 sector_num
.lba_num
);
2832 } else if ((error
& ERR_BBK
) == ERR_BBK
) {
2833 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2834 "Bad block detected: Block %x\n",
2835 ((struct dcd_cmd
*)pkt
->pkt_cdbp
)->
2836 sector_num
.lba_num
);
2837 } else if ((error
& ERR_ABORT
) == ERR_ABORT
) {
2838 /* Aborted Command */
2839 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2840 " Aborted Command \n");
2843 * Return the soft error so that the command
2846 rval
= COMMAND_SOFT_ERROR
;
2849 TRACE_0(TR_FAC_DADA
, TR_DCD_CHECK_ERROR_END
, "dcd_check_error_end");
2855 * System Crash Dump routine
2858 #define NDUMP_RETRIES 5
2861 dcddump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblk
)
2863 struct dcd_pkt
*pkt
;
2865 struct buf local
, *bp
;
2868 diskaddr_t p_lblksrt
;
2871 GET_SOFT_STATE(dev
);
2873 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un
))
2875 if ((un
->un_state
& DCD_STATE_FATAL
) == DCD_STATE_FATAL
)
2878 if (cmlb_partinfo(un
->un_dklbhandle
, DCDPART(dev
),
2879 &lblocks
, &p_lblksrt
, NULL
, NULL
, 0))
2882 if (blkno
+nblk
> lblocks
) {
2887 if ((un
->un_state
== DCD_STATE_SUSPENDED
) ||
2888 (un
->un_state
== DCD_STATE_PM_SUSPENDED
)) {
2889 if (pm_raise_power(DCD_DEVINFO
, 0,
2890 DCD_DEVICE_ACTIVE
) != DDI_SUCCESS
) {
2896 * When cpr calls dcddump, we know that dad is in a
2897 * a good state, so no bus reset is required
2899 un
->un_throttle
= 0;
2901 if ((un
->un_state
!= DCD_STATE_SUSPENDED
) &&
2902 (un
->un_state
!= DCD_STATE_DUMPING
)) {
2904 New_state(un
, DCD_STATE_DUMPING
);
2907 * Reset the bus. I'd like to not have to do this,
2908 * but this is the safest thing to do...
2911 if (dcd_reset(ROUTE
, RESET_ALL
) == 0) {
2920 * It should be safe to call the allocator here without
2921 * worrying about being locked for DVMA mapping because
2922 * the address we're passed is already a DVMA mapping
2924 * We are also not going to worry about semaphore ownership
2925 * in the dump buffer. Dumping is single threaded at present.
2929 bzero((caddr_t
)bp
, sizeof (*bp
));
2930 bp
->b_flags
= B_BUSY
;
2931 bp
->b_un
.b_addr
= addr
;
2932 bp
->b_bcount
= nblk
<< DEV_BSHIFT
;
2935 for (i
= 0; i
< NDUMP_RETRIES
; i
++) {
2936 bp
->b_flags
&= ~B_ERROR
;
2937 if ((pkt
= dcd_init_pkt(ROUTE
, NULL
, bp
,
2938 (uint32_t)sizeof (struct dcd_cmd
), 2, PP_LEN
,
2939 PKT_CONSISTENT
, NULL_FUNC
, NULL
)) != NULL
) {
2943 if (bp
->b_flags
& B_ERROR
) {
2944 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2945 "no resources for dumping; "
2946 "error code: 0x%x, retrying",
2949 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
2950 "no resources for dumping; retrying");
2952 } else if (i
!= (NDUMP_RETRIES
- 1)) {
2953 if (bp
->b_flags
& B_ERROR
) {
2954 dcd_log(DCD_DEVINFO
, dcd_label
, CE_CONT
, "no "
2955 "resources for dumping; error code: 0x%x, "
2956 "retrying\n", geterror(bp
));
2959 if (bp
->b_flags
& B_ERROR
) {
2960 dcd_log(DCD_DEVINFO
, dcd_label
, CE_CONT
,
2961 "no resources for dumping; "
2962 "error code: 0x%x, retries failed, "
2963 "giving up.\n", geterror(bp
));
2965 dcd_log(DCD_DEVINFO
, dcd_label
, CE_CONT
,
2966 "no resources for dumping; "
2967 "retries failed, giving up.\n");
2973 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) == DMA_SUPPORTTED
) {
2974 com
= ATA_WRITE_DMA
;
2976 if (un
->un_dp
->options
& BLOCK_MODE
)
2977 com
= ATA_WRITE_MULTIPLE
;
2982 makecommand(pkt
, 0, com
, blkno
, ADD_LBA_MODE
,
2983 (int)nblk
*un
->un_secsize
, DATA_WRITE
, 0);
2985 for (err
= EIO
, i
= 0; i
< NDUMP_RETRIES
&& err
== EIO
; i
++) {
2987 if (dcd_poll(pkt
) == 0) {
2988 switch (SCBP_C(pkt
)) {
2990 if (pkt
->pkt_resid
== 0) {
2994 case STATUS_ATA_BUSY
:
2995 (void) dcd_reset(ROUTE
, RESET_TARGET
);
2998 mutex_enter(DCD_MUTEX
);
2999 (void) dcd_reset_disk(un
, pkt
);
3000 mutex_exit(DCD_MUTEX
);
3003 } else if (i
> NDUMP_RETRIES
/2) {
3004 (void) dcd_reset(ROUTE
, RESET_ALL
);
3008 dcd_destroy_pkt(pkt
);
3013 * This routine implements the ioctl calls. It is called
3014 * from the device switch at normal priority.
3018 dcdioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
,
3019 cred_t
*cred_p
, int *rval_p
)
3021 auto int32_t data
[512 / (sizeof (int32_t))];
3022 struct dk_cinfo
*info
;
3023 struct dk_minfo media_info
;
3024 struct udcd_cmd
*scmd
;
3026 enum uio_seg uioseg
= 0;
3027 enum dkio_state state
= 0;
3028 #ifdef _MULTI_DATAMODEL
3029 struct dadkio_rwcmd rwcmd
;
3031 struct dadkio_rwcmd32 rwcmd32
;
3032 struct dcd_cmd dcdcmd
;
3034 GET_SOFT_STATE(dev
);
3036 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
3037 "dcd_ioctl : cmd %x, arg %lx\n", cmd
, arg
);
3039 bzero((caddr_t
)data
, sizeof (data
));
3045 * Following ioctl are for testing RESET/ABORTS
3047 #define DKIOCRESET (DKIOC|14)
3048 #define DKIOCABORT (DKIOC|15)
3051 if (ddi_copyin((caddr_t
)arg
, (caddr_t
)data
, 4, flag
))
3053 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
3054 "DKIOCRESET: data = 0x%x\n", data
[0]);
3055 if (dcd_reset(ROUTE
, data
[0])) {
3061 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
3063 if (dcd_abort(ROUTE
, NULL
)) {
3072 * Controller Information
3074 info
= (struct dk_cinfo
*)data
;
3076 mutex_enter(DCD_MUTEX
);
3077 switch (un
->un_dp
->ctype
) {
3079 info
->dki_ctype
= DKC_DIRECT
;
3082 mutex_exit(DCD_MUTEX
);
3083 info
->dki_cnum
= ddi_get_instance(ddi_get_parent(DCD_DEVINFO
));
3084 (void) strcpy(info
->dki_cname
,
3085 ddi_get_name(ddi_get_parent(DCD_DEVINFO
)));
3089 info
->dki_unit
= ddi_get_instance(DCD_DEVINFO
);
3090 info
->dki_slave
= (Tgt(DCD_DCD_DEVP
)<<3);
3091 (void) strcpy(info
->dki_dname
, ddi_driver_name(DCD_DEVINFO
));
3092 info
->dki_flags
= DKI_FMTVOL
;
3093 info
->dki_partition
= DCDPART(dev
);
3096 * Max Transfer size of this device in blocks
3098 info
->dki_maxtransfer
= un
->un_max_xfer_size
/ DEV_BSIZE
;
3101 * We can't get from here to there yet
3104 info
->dki_space
= 0;
3108 i
= sizeof (struct dk_cinfo
);
3109 if (ddi_copyout((caddr_t
)data
, (caddr_t
)arg
, i
, flag
))
3114 case DKIOCGMEDIAINFO
:
3116 * As dad target driver is used for IDE disks only
3117 * Can keep the return value hardcoded to FIXED_DISK
3119 media_info
.dki_media_type
= DK_FIXED_DISK
;
3121 mutex_enter(DCD_MUTEX
);
3122 media_info
.dki_lbsize
= un
->un_lbasize
;
3123 media_info
.dki_capacity
= un
->un_diskcapacity
;
3124 mutex_exit(DCD_MUTEX
);
3126 if (ddi_copyout(&media_info
, (caddr_t
)arg
,
3127 sizeof (struct dk_minfo
), flag
))
3136 mutex_enter(DCD_MUTEX
);
3137 if (un
->un_ncmds
== 0) {
3138 if ((err
= dcd_unit_ready(dev
)) != 0) {
3139 mutex_exit(DCD_MUTEX
);
3144 mutex_exit(DCD_MUTEX
);
3145 err
= cmlb_ioctl(un
->un_dklbhandle
, dev
, cmd
,
3146 arg
, flag
, cred_p
, rval_p
, 0);
3154 case DKIOCPARTITION
:
3159 err
= cmlb_ioctl(un
->un_dklbhandle
, dev
, cmd
,
3160 arg
, flag
, cred_p
, rval_p
, 0);
3164 if (drv_priv(cred_p
) != 0) {
3168 #ifdef _MULTI_DATAMODEL
3169 switch (ddi_model_convert_from(flag
& FMODELS
)) {
3170 case DDI_MODEL_NONE
:
3171 if (ddi_copyin((caddr_t
)arg
, (caddr_t
)&rwcmd
,
3172 sizeof (struct dadkio_rwcmd
), flag
)) {
3175 rwcmd32
.cmd
= rwcmd
.cmd
;
3176 rwcmd32
.flags
= rwcmd
.flags
;
3177 rwcmd32
.blkaddr
= rwcmd
.blkaddr
;
3178 rwcmd32
.buflen
= rwcmd
.buflen
;
3179 rwcmd32
.bufaddr
= (caddr32_t
)(uintptr_t)rwcmd
.bufaddr
;
3181 case DDI_MODEL_ILP32
:
3182 if (ddi_copyin((caddr_t
)arg
, (caddr_t
)&rwcmd32
,
3183 sizeof (struct dadkio_rwcmd32
), flag
)) {
3189 if (ddi_copyin((caddr_t
)arg
, (caddr_t
)&rwcmd32
,
3190 sizeof (struct dadkio_rwcmd32
), flag
)) {
3194 mutex_enter(DCD_MUTEX
);
3196 uioseg
= UIO_SYSSPACE
;
3197 scmd
= (struct udcd_cmd
*)data
;
3198 scmd
->udcd_cmd
= &dcdcmd
;
3200 * Convert the dadkio_rwcmd structure to udcd_cmd so that
3201 * it can take the normal path to get the io done
3203 if (rwcmd32
.cmd
== DADKIO_RWCMD_READ
) {
3204 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) ==
3206 scmd
->udcd_cmd
->cmd
= ATA_READ_DMA
;
3208 scmd
->udcd_cmd
->cmd
= ATA_READ
;
3209 scmd
->udcd_cmd
->address_mode
= ADD_LBA_MODE
;
3210 scmd
->udcd_cmd
->direction
= DATA_READ
;
3211 scmd
->udcd_flags
|= UDCD_READ
|UDCD_SILENT
;
3212 } else if (rwcmd32
.cmd
== DADKIO_RWCMD_WRITE
) {
3213 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) ==
3215 scmd
->udcd_cmd
->cmd
= ATA_WRITE_DMA
;
3217 scmd
->udcd_cmd
->cmd
= ATA_WRITE
;
3218 scmd
->udcd_cmd
->direction
= DATA_WRITE
;
3219 scmd
->udcd_flags
|= UDCD_WRITE
|UDCD_SILENT
;
3221 mutex_exit(DCD_MUTEX
);
3225 scmd
->udcd_cmd
->address_mode
= ADD_LBA_MODE
;
3226 scmd
->udcd_cmd
->features
= 0;
3227 scmd
->udcd_cmd
->size
= rwcmd32
.buflen
;
3228 scmd
->udcd_cmd
->sector_num
.lba_num
= rwcmd32
.blkaddr
;
3229 scmd
->udcd_bufaddr
= (caddr_t
)(uintptr_t)rwcmd32
.bufaddr
;
3230 scmd
->udcd_buflen
= rwcmd32
.buflen
;
3231 scmd
->udcd_timeout
= (ushort_t
)dcd_io_time
;
3232 scmd
->udcd_resid
= 0ULL;
3233 scmd
->udcd_status
= 0;
3234 scmd
->udcd_error_reg
= 0;
3235 scmd
->udcd_status_reg
= 0;
3237 mutex_exit(DCD_MUTEX
);
3239 i
= dcdioctl_cmd(dev
, scmd
, UIO_SYSSPACE
, UIO_USERSPACE
);
3240 mutex_enter(DCD_MUTEX
);
3242 * After return convert the status from scmd to
3245 (void) dcd_translate(&(rwcmd32
.status
), scmd
);
3246 rwcmd32
.status
.resid
= scmd
->udcd_resid
;
3247 mutex_exit(DCD_MUTEX
);
3249 #ifdef _MULTI_DATAMODEL
3250 switch (ddi_model_convert_from(flag
& FMODELS
)) {
3251 case DDI_MODEL_NONE
: {
3253 rwcmd
.status
.status
= rwcmd32
.status
.status
;
3254 rwcmd
.status
.resid
= rwcmd32
.status
.resid
;
3255 rwcmd
.status
.failed_blk_is_valid
=
3256 rwcmd32
.status
.failed_blk_is_valid
;
3257 rwcmd
.status
.failed_blk
= rwcmd32
.status
.failed_blk
;
3258 rwcmd
.status
.fru_code_is_valid
=
3259 rwcmd32
.status
.fru_code_is_valid
;
3260 rwcmd
.status
.fru_code
= rwcmd32
.status
.fru_code
;
3262 counter
< DADKIO_ERROR_INFO_LEN
; counter
++)
3263 rwcmd
.status
.add_error_info
[counter
] =
3264 rwcmd32
.status
.add_error_info
[counter
];
3266 /* Copy out the result back to the user program */
3267 if (ddi_copyout((caddr_t
)&rwcmd
, (caddr_t
)arg
,
3268 sizeof (struct dadkio_rwcmd
), flag
)) {
3274 case DDI_MODEL_ILP32
:
3275 /* Copy out the result back to the user program */
3276 if (ddi_copyout((caddr_t
)&rwcmd32
, (caddr_t
)arg
,
3277 sizeof (struct dadkio_rwcmd32
), flag
)) {
3285 /* Copy out the result back to the user program */
3286 if (ddi_copyout((caddr_t
)&rwcmd32
, (caddr_t
)arg
,
3287 sizeof (struct dadkio_rwcmd32
), flag
)) {
3295 #ifdef _MULTI_DATAMODEL
3297 * For use when a 32 bit app makes a call into a
3300 struct udcd_cmd32 udcd_cmd_32_for_64
;
3301 struct udcd_cmd32
*ucmd32
= &udcd_cmd_32_for_64
;
3303 #endif /* _MULTI_DATAMODEL */
3305 if (drv_priv(cred_p
) != 0) {
3309 scmd
= (struct udcd_cmd
*)data
;
3311 #ifdef _MULTI_DATAMODEL
3312 switch (model
= ddi_model_convert_from(flag
& FMODELS
)) {
3313 case DDI_MODEL_ILP32
:
3314 if (ddi_copyin((caddr_t
)arg
, ucmd32
,
3315 sizeof (struct udcd_cmd32
), flag
)) {
3319 * Convert the ILP32 uscsi data from the
3320 * application to LP64 for internal use.
3322 udcd_cmd32toudcd_cmd(ucmd32
, scmd
);
3324 case DDI_MODEL_NONE
:
3325 if (ddi_copyin((caddr_t
)arg
, scmd
, sizeof (*scmd
),
3331 #else /* ! _MULTI_DATAMODEL */
3332 if (ddi_copyin((caddr_t
)arg
, (caddr_t
)scmd
,
3333 sizeof (*scmd
), flag
)) {
3336 #endif /* ! _MULTI_DATAMODEL */
3338 scmd
->udcd_flags
&= ~UDCD_NOINTR
;
3339 uioseg
= (flag
& FKIOCTL
)? UIO_SYSSPACE
: UIO_USERSPACE
;
3341 i
= dcdioctl_cmd(dev
, scmd
, uioseg
, uioseg
);
3342 #ifdef _MULTI_DATAMODEL
3344 case DDI_MODEL_ILP32
:
3346 * Convert back to ILP32 before copyout to the
3349 udcd_cmdtoudcd_cmd32(scmd
, ucmd32
);
3350 if (ddi_copyout(ucmd32
, (caddr_t
)arg
,
3351 sizeof (*ucmd32
), flag
)) {
3356 case DDI_MODEL_NONE
:
3357 if (ddi_copyout(scmd
, (caddr_t
)arg
, sizeof (*scmd
),
3364 #else /* ! _MULTI_DATAMODE */
3365 if (ddi_copyout((caddr_t
)scmd
, (caddr_t
)arg
,
3366 sizeof (*scmd
), flag
)) {
3373 case DKIOCFLUSHWRITECACHE
: {
3374 struct dk_callback
*dkc
= (struct dk_callback
*)arg
;
3375 struct dcd_pkt
*pkt
;
3379 mutex_enter(DCD_MUTEX
);
3380 if (un
->un_flush_not_supported
||
3381 ! un
->un_write_cache_enabled
) {
3382 i
= un
->un_flush_not_supported
? ENOTSUP
: 0;
3383 mutex_exit(DCD_MUTEX
);
3385 * If a callback was requested: a callback will
3386 * always be done if the caller saw the
3387 * DKIOCFLUSHWRITECACHE ioctl return 0, and
3388 * never done if the caller saw the ioctl return
3391 if ((flag
& FKIOCTL
) && dkc
!= NULL
&&
3392 dkc
->dkc_callback
!= NULL
) {
3393 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, i
);
3395 * Did callback and reported error.
3396 * Since we did a callback, ioctl
3405 * Get the special buffer
3407 while (un
->un_sbuf_busy
) {
3408 cv_wait(&un
->un_sbuf_cv
, DCD_MUTEX
);
3410 un
->un_sbuf_busy
= 1;
3412 mutex_exit(DCD_MUTEX
);
3414 pkt
= dcd_init_pkt(ROUTE
, NULL
,
3415 NULL
, (uint32_t)sizeof (struct dcd_cmd
),
3416 2, PP_LEN
, PKT_CONSISTENT
, SLEEP_FUNC
, (caddr_t
)un
);
3417 ASSERT(pkt
!= NULL
);
3419 makecommand(pkt
, un
->un_cmd_flags
| FLAG_SILENT
,
3420 ATA_FLUSH_CACHE
, 0, ADD_LBA_MODE
, 0, NO_DATA_XFER
, 0);
3422 pkt
->pkt_comp
= dcdintr
;
3423 pkt
->pkt_time
= DCD_FLUSH_TIME
;
3424 PKT_SET_BP(pkt
, bp
);
3426 bp
->av_back
= (struct buf
*)pkt
;
3428 bp
->b_flags
= B_BUSY
;
3431 bp
->b_dev
= cmpdev(dev
);
3434 bp
->b_un
.b_addr
= 0;
3435 bp
->b_iodone
= NULL
;
3437 bp
->b_private
= NULL
;
3439 if ((flag
& FKIOCTL
) && dkc
!= NULL
&&
3440 dkc
->dkc_callback
!= NULL
) {
3441 struct dk_callback
*dkc2
= (struct dk_callback
*)
3442 kmem_zalloc(sizeof (*dkc2
), KM_SLEEP
);
3443 bcopy(dkc
, dkc2
, sizeof (*dkc2
));
3445 bp
->b_private
= dkc2
;
3446 bp
->b_iodone
= dcdflushdone
;
3450 (void) dcdstrategy(bp
);
3455 (void) dcdflushdone(bp
);
3468 dcdflushdone(struct buf
*bp
)
3470 struct dcd_disk
*un
= ddi_get_soft_state(dcd_state
,
3471 DCDUNIT(bp
->b_edev
));
3472 struct dcd_pkt
*pkt
= BP_PKT(bp
);
3473 struct dk_callback
*dkc
= bp
->b_private
;
3476 ASSERT(bp
== un
->un_sbufp
);
3477 ASSERT(pkt
!= NULL
);
3479 dcd_destroy_pkt(pkt
);
3480 bp
->av_back
= NO_PKT_ALLOCATED
;
3483 ASSERT(bp
->b_iodone
!= NULL
);
3484 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, geterror(bp
));
3485 kmem_free(dkc
, sizeof (*dkc
));
3486 bp
->b_iodone
= NULL
;
3487 bp
->b_private
= NULL
;
3491 * Tell anybody who cares that the buffer is now free
3493 mutex_enter(DCD_MUTEX
);
3494 un
->un_sbuf_busy
= 0;
3495 cv_signal(&un
->un_sbuf_cv
);
3496 mutex_exit(DCD_MUTEX
);
3502 * the callback function for resource allocation
3504 * XXX it would be preferable that dcdrunout() scans the whole
3505 * list for possible candidates for dcdstart(); this avoids
3506 * that a bp at the head of the list whose request cannot be
3507 * satisfied is retried again and again
3511 dcdrunout(caddr_t arg
)
3514 struct dcd_disk
*un
;
3517 TRACE_1(TR_FAC_DADA
, TR_DCDRUNOUT_START
, "dcdrunout_start: arg 0x%p",
3521 un
= (struct dcd_disk
*)arg
;
3525 * We now support passing a structure to the callback
3529 mutex_enter(DCD_MUTEX
);
3530 if ((un
->un_ncmds
< un
->un_throttle
) && (dp
->b_forw
== NULL
)) {
3533 if (un
->un_state
== DCD_STATE_RWAIT
) {
3536 mutex_exit(DCD_MUTEX
);
3537 TRACE_1(TR_FAC_DADA
, TR_DCDRUNOUT_END
,
3538 "dcdrunout_end: serviced %d", serviced
);
3544 * This routine called to see whether unit is (still) there. Must not
3545 * be called when un->un_sbufp is in use, and must not be called with
3546 * an unattached disk. Soft state of disk is restored to what it was
3547 * upon entry- up to caller to set the correct state.
3549 * We enter with the disk mutex held.
3554 dcd_unit_ready(dev_t dev
)
3556 auto struct udcd_cmd dcmd
, *com
= &dcmd
;
3557 auto struct dcd_cmd cmdblk
;
3559 GET_SOFT_STATE(dev
);
3562 * Now that we protect the special buffer with
3563 * a mutex, we could probably do a mutex_tryenter
3564 * on it here and return failure if it were held...
3573 dcdioctl_cmd(dev_t devp
, struct udcd_cmd
*in
, enum uio_seg cdbspace
,
3574 enum uio_seg dataspace
)
3578 struct udcd_cmd
*scmd
;
3579 struct dcd_pkt
*pkt
;
3584 GET_SOFT_STATE(devp
);
3588 * Is this a request to reset the bus?
3589 * if so, we need to do reseting.
3592 if (in
->udcd_flags
& UDCD_RESET
) {
3593 int flag
= RESET_TARGET
;
3594 err
= dcd_reset(ROUTE
, flag
) ? 0: EIO
;
3601 /* Do some sanity checks */
3602 if (scmd
->udcd_buflen
<= 0) {
3603 if (scmd
->udcd_flags
& (UDCD_READ
| UDCD_WRITE
)) {
3606 scmd
->udcd_buflen
= 0;
3610 /* Make a copy of the dcd_cmd passed */
3611 cdb
= kmem_zalloc(sizeof (struct dcd_cmd
), KM_SLEEP
);
3612 if (cdbspace
== UIO_SYSSPACE
) {
3616 if (ddi_copyin((void *)scmd
->udcd_cmd
, cdb
, sizeof (struct dcd_cmd
),
3618 kmem_free(cdb
, sizeof (struct dcd_cmd
));
3621 scmd
= kmem_alloc(sizeof (*scmd
), KM_SLEEP
);
3622 bcopy((caddr_t
)in
, (caddr_t
)scmd
, sizeof (*scmd
));
3623 scmd
->udcd_cmd
= (struct dcd_cmd
*)cdb
;
3624 rw
= (scmd
->udcd_flags
& UDCD_READ
) ? B_READ
: B_WRITE
;
3628 * Get the special buffer
3631 mutex_enter(DCD_MUTEX
);
3632 while (un
->un_sbuf_busy
) {
3633 if (cv_wait_sig(&un
->un_sbuf_cv
, DCD_MUTEX
) == 0) {
3634 kmem_free(scmd
->udcd_cmd
, sizeof (struct dcd_cmd
));
3635 kmem_free((caddr_t
)scmd
, sizeof (*scmd
));
3636 mutex_exit(DCD_MUTEX
);
3641 un
->un_sbuf_busy
= 1;
3643 mutex_exit(DCD_MUTEX
);
3647 * If we are going to do actual I/O, let physio do all the
3650 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
3651 "dcdioctl_cmd : buflen %x\n", scmd
->udcd_buflen
);
3653 if (scmd
->udcd_buflen
) {
3654 auto struct iovec aiov
;
3655 auto struct uio auio
;
3656 struct uio
*uio
= &auio
;
3658 bzero((caddr_t
)&auio
, sizeof (struct uio
));
3659 bzero((caddr_t
)&aiov
, sizeof (struct iovec
));
3661 aiov
.iov_base
= scmd
->udcd_bufaddr
;
3662 aiov
.iov_len
= scmd
->udcd_buflen
;
3664 uio
->uio_iov
= &aiov
;
3665 uio
->uio_iovcnt
= 1;
3666 uio
->uio_resid
= scmd
->udcd_buflen
;
3667 uio
->uio_segflg
= dataspace
;
3670 * Let physio do the rest...
3672 bp
->av_back
= NO_PKT_ALLOCATED
;
3673 bp
->b_forw
= (struct buf
*)scmd
;
3674 err
= physio(dcdstrategy
, bp
, devp
, rw
, dcdudcdmin
, uio
);
3677 * We have to mimic what physio would do here.
3679 bp
->av_back
= NO_PKT_ALLOCATED
;
3680 bp
->b_forw
= (struct buf
*)scmd
;
3681 bp
->b_flags
= B_BUSY
| rw
;
3683 bp
->b_dev
= cmpdev(devp
);
3684 bp
->b_bcount
= bp
->b_blkno
= 0;
3685 (void) dcdstrategy(bp
);
3690 if ((pkt
= BP_PKT(bp
)) != NULL
) {
3691 bp
->av_back
= NO_PKT_ALLOCATED
;
3692 /* we need to update the completion status of udcd command */
3693 in
->udcd_resid
= bp
->b_resid
;
3694 in
->udcd_status_reg
= SCBP_C(pkt
);
3695 /* XXX: we need to give error_reg also */
3696 dcd_destroy_pkt(pkt
);
3699 * Tell anybody who cares that the buffer is now free
3701 mutex_enter(DCD_MUTEX
);
3702 un
->un_sbuf_busy
= 0;
3703 cv_signal(&un
->un_sbuf_cv
);
3704 mutex_exit(DCD_MUTEX
);
3706 kmem_free(scmd
->udcd_cmd
, sizeof (struct dcd_cmd
));
3707 kmem_free((caddr_t
)scmd
, sizeof (*scmd
));
3712 dcdudcdmin(struct buf
*bp
)
3719 * restart a cmd from timeout() context
3721 * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero
3722 * a restart timeout request has been issued and no new timeouts should
3723 * be requested. b_forw is reset when the cmd eventually completes in
3724 * dcddone_and_mutex_exit()
3727 dcdrestart(void *arg
)
3729 struct dcd_disk
*un
= (struct dcd_disk
*)arg
;
3733 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "dcdrestart\n");
3735 mutex_enter(DCD_MUTEX
);
3736 bp
= un
->un_utab
.b_forw
;
3739 DCD_DO_KSTATS(un
, kstat_waitq_to_runq
, bp
);
3744 struct dcd_pkt
*pkt
= BP_PKT(bp
);
3746 mutex_exit(DCD_MUTEX
);
3750 if ((status
= dcd_transport(pkt
)) != TRAN_ACCEPT
) {
3751 mutex_enter(DCD_MUTEX
);
3752 DCD_DO_KSTATS(un
, kstat_runq_back_to_waitq
, bp
);
3754 if (status
== TRAN_BUSY
) {
3755 /* XXX : To be checked */
3757 * if (un->un_throttle > 1) {
3758 * ASSERT(un->un_ncmds >= 0);
3759 * un->un_throttle = un->un_ncmds;
3762 un
->un_reissued_timeid
=
3763 timeout(dcdrestart
, (caddr_t
)un
,
3764 DCD_BSY_TIMEOUT
/500);
3765 mutex_exit(DCD_MUTEX
);
3768 DCD_DO_ERRSTATS(un
, dcd_transerrs
);
3769 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
3770 "dcdrestart transport failed (%x)\n", status
);
3771 bp
->b_resid
= bp
->b_bcount
;
3772 SET_BP_ERROR(bp
, EIO
);
3774 DCD_DO_KSTATS(un
, kstat_waitq_exit
, bp
);
3775 un
->un_reissued_timeid
= 0L;
3776 dcddone_and_mutex_exit(un
, bp
);
3779 mutex_enter(DCD_MUTEX
);
3781 un
->un_reissued_timeid
= 0L;
3782 mutex_exit(DCD_MUTEX
);
3783 DAD_DEBUG(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "dcdrestart done\n");
3787 * This routine gets called to reset the throttle to its saved
3788 * value wheneven we lower the throttle.
3791 dcd_reset_throttle(caddr_t arg
)
3793 struct dcd_disk
*un
= (struct dcd_disk
*)arg
;
3796 mutex_enter(DCD_MUTEX
);
3800 * start any commands that didn't start while throttling.
3802 if (dp
->b_actf
&& (un
->un_ncmds
< un
->un_throttle
) &&
3803 (dp
->b_forw
== NULL
)) {
3806 mutex_exit(DCD_MUTEX
);
3811 * This routine handles the case when a TRAN_BUSY is
3814 * If there are some commands already in the transport, the
3815 * bp can be put back on queue and it will
3816 * be retried when the queue is emptied after command
3817 * completes. But if there is no command in the tranport
3818 * and it still return busy, we have to retry the command
3819 * after some time like 10ms.
3823 dcd_handle_tran_busy(struct buf
*bp
, struct diskhd
*dp
, struct dcd_disk
*un
)
3825 ASSERT(mutex_owned(DCD_MUTEX
));
3828 if (dp
->b_forw
== NULL
|| dp
->b_forw
== bp
) {
3830 } else if (dp
->b_forw
!= bp
) {
3831 bp
->b_actf
= dp
->b_actf
;
3835 if (!un
->un_reissued_timeid
) {
3836 un
->un_reissued_timeid
=
3837 timeout(dcdrestart
, (caddr_t
)un
, DCD_BSY_TIMEOUT
/500);
3842 dcd_write_deviceid(struct dcd_disk
*un
)
3847 struct udcd_cmd ucmd
;
3849 struct dk_devid
*dkdevid
;
3854 mutex_exit(DCD_MUTEX
);
3855 if (cmlb_get_devid_block(un
->un_dklbhandle
, &blk
, 0)) {
3856 mutex_enter(DCD_MUTEX
);
3859 mutex_enter(DCD_MUTEX
);
3861 /* Allocate the buffer */
3862 dkdevid
= kmem_zalloc(un
->un_secsize
, KM_SLEEP
);
3864 /* Fill in the revision */
3865 dkdevid
->dkd_rev_hi
= DK_DEVID_REV_MSB
;
3866 dkdevid
->dkd_rev_lo
= DK_DEVID_REV_LSB
;
3868 /* Copy in the device id */
3869 bcopy(un
->un_devid
, &dkdevid
->dkd_devid
,
3870 ddi_devid_sizeof(un
->un_devid
));
3872 /* Calculate the chksum */
3874 ip
= (uint_t
*)dkdevid
;
3875 for (i
= 0; i
< ((un
->un_secsize
- sizeof (int))/sizeof (int)); i
++)
3878 /* Fill in the checksum */
3879 DKD_FORMCHKSUM(chksum
, dkdevid
);
3881 (void) bzero((caddr_t
)&ucmd
, sizeof (ucmd
));
3882 (void) bzero((caddr_t
)&cdb
, sizeof (struct dcd_cmd
));
3884 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) == DMA_SUPPORTTED
) {
3885 cdb
.cmd
= ATA_WRITE_DMA
;
3887 if (un
->un_dp
->options
& BLOCK_MODE
)
3888 cdb
.cmd
= ATA_WRITE_MULTIPLE
;
3890 cdb
.cmd
= ATA_WRITE
;
3892 cdb
.size
= un
->un_secsize
;
3893 cdb
.sector_num
.lba_num
= blk
;
3894 cdb
.address_mode
= ADD_LBA_MODE
;
3895 cdb
.direction
= DATA_WRITE
;
3897 ucmd
.udcd_flags
= UDCD_WRITE
;
3898 ucmd
.udcd_cmd
= &cdb
;
3899 ucmd
.udcd_bufaddr
= (caddr_t
)dkdevid
;
3900 ucmd
.udcd_buflen
= un
->un_secsize
;
3901 ucmd
.udcd_flags
|= UDCD_SILENT
;
3902 dev
= makedevice(ddi_driver_major(DCD_DEVINFO
),
3903 ddi_get_instance(DCD_DEVINFO
) << DCDUNIT_SHIFT
);
3904 mutex_exit(DCD_MUTEX
);
3905 status
= dcdioctl_cmd(dev
, &ucmd
, UIO_SYSSPACE
, UIO_SYSSPACE
);
3906 mutex_enter(DCD_MUTEX
);
3908 kmem_free(dkdevid
, un
->un_secsize
);
3913 dcd_read_deviceid(struct dcd_disk
*un
)
3917 struct udcd_cmd ucmd
;
3919 struct dk_devid
*dkdevid
;
3925 mutex_exit(DCD_MUTEX
);
3926 if (cmlb_get_devid_block(un
->un_dklbhandle
, &blk
, 0)) {
3927 mutex_enter(DCD_MUTEX
);
3930 mutex_enter(DCD_MUTEX
);
3932 dkdevid
= kmem_alloc(un
->un_secsize
, KM_SLEEP
);
3934 (void) bzero((caddr_t
)&ucmd
, sizeof (ucmd
));
3935 (void) bzero((caddr_t
)&cdb
, sizeof (cdb
));
3937 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) == DMA_SUPPORTTED
) {
3938 cdb
.cmd
= ATA_READ_DMA
;
3940 if (un
->un_dp
->options
& BLOCK_MODE
)
3941 cdb
.cmd
= ATA_READ_MULTIPLE
;
3945 cdb
.size
= un
->un_secsize
;
3946 cdb
.sector_num
.lba_num
= blk
;
3947 cdb
.address_mode
= ADD_LBA_MODE
;
3948 cdb
.direction
= DATA_READ
;
3950 ucmd
.udcd_flags
= UDCD_READ
;
3951 ucmd
.udcd_cmd
= &cdb
;
3952 ucmd
.udcd_bufaddr
= (caddr_t
)dkdevid
;
3953 ucmd
.udcd_buflen
= un
->un_secsize
;
3954 ucmd
.udcd_flags
|= UDCD_SILENT
;
3955 dev
= makedevice(ddi_driver_major(DCD_DEVINFO
),
3956 ddi_get_instance(DCD_DEVINFO
) << DCDUNIT_SHIFT
);
3957 mutex_exit(DCD_MUTEX
);
3958 status
= dcdioctl_cmd(dev
, &ucmd
, UIO_SYSSPACE
, UIO_SYSSPACE
);
3959 mutex_enter(DCD_MUTEX
);
3962 kmem_free((caddr_t
)dkdevid
, un
->un_secsize
);
3966 /* Validate the revision */
3968 if ((dkdevid
->dkd_rev_hi
!= DK_DEVID_REV_MSB
) ||
3969 (dkdevid
->dkd_rev_lo
!= DK_DEVID_REV_LSB
)) {
3970 kmem_free((caddr_t
)dkdevid
, un
->un_secsize
);
3974 /* Calculate the checksum */
3976 ip
= (uint_t
*)dkdevid
;
3977 for (i
= 0; i
< ((un
->un_secsize
- sizeof (int))/sizeof (int)); i
++)
3980 /* Compare the checksums */
3982 if (DKD_GETCHKSUM(dkdevid
) != chksum
) {
3983 kmem_free((caddr_t
)dkdevid
, un
->un_secsize
);
3987 /* VAlidate the device id */
3988 if (ddi_devid_valid((ddi_devid_t
)&dkdevid
->dkd_devid
) != DDI_SUCCESS
) {
3989 kmem_free((caddr_t
)dkdevid
, un
->un_secsize
);
3993 /* return a copy of the device id */
3994 sz
= ddi_devid_sizeof((ddi_devid_t
)&dkdevid
->dkd_devid
);
3995 un
->un_devid
= (ddi_devid_t
)kmem_alloc(sz
, KM_SLEEP
);
3996 bcopy(&dkdevid
->dkd_devid
, un
->un_devid
, sz
);
3997 kmem_free((caddr_t
)dkdevid
, un
->un_secsize
);
4003 * Return the device id for the device.
4004 * 1. If the device ID exists then just return it - nothing to do in that case.
4005 * 2. Build one from the drives model number and serial number.
4006 * 3. If there is a problem in building it from serial/model #, then try
4007 * to read it from the acyl region of the disk.
4008 * Note: If this function is unable to return a valid ID then the calling
4009 * point will invoke the routine to create a fabricated ID ans stor it on the
4010 * acyl region of the disk.
4013 dcd_get_devid(struct dcd_disk
*un
)
4017 /* If already registered, return that value */
4018 if (un
->un_devid
!= NULL
)
4019 return (un
->un_devid
);
4021 /* Build a devid from model and serial number, if present */
4022 rc
= dcd_make_devid_from_serial(un
);
4024 if (rc
!= DDI_SUCCESS
) {
4025 /* Read the devid from the disk. */
4026 if (dcd_read_deviceid(un
))
4030 (void) ddi_devid_register(DCD_DEVINFO
, un
->un_devid
);
4031 return (un
->un_devid
);
4036 dcd_create_devid(struct dcd_disk
*un
)
4038 if (ddi_devid_init(DCD_DEVINFO
, DEVID_FAB
, 0, NULL
, (ddi_devid_t
*)
4039 &un
->un_devid
) == DDI_FAILURE
)
4042 if (dcd_write_deviceid(un
)) {
4043 ddi_devid_free(un
->un_devid
);
4044 un
->un_devid
= NULL
;
4048 (void) ddi_devid_register(DCD_DEVINFO
, un
->un_devid
);
4049 return (un
->un_devid
);
4053 * Build a devid from the model and serial number, if present
4054 * Return DDI_SUCCESS or DDI_FAILURE.
4057 dcd_make_devid_from_serial(struct dcd_disk
*un
)
4059 int rc
= DDI_SUCCESS
;
4067 /* initialize the model and serial number information */
4068 model
= un
->un_dcd
->dcd_ident
->dcd_model
;
4069 model_len
= DCD_MODEL_NUMBER_LENGTH
;
4070 serno
= un
->un_dcd
->dcd_ident
->dcd_drvser
;
4071 serno_len
= DCD_SERIAL_NUMBER_LENGTH
;
4073 /* Verify the model and serial number */
4074 dcd_validate_model_serial(model
, &model_len
, model_len
);
4075 if (model_len
== 0) {
4079 dcd_validate_model_serial(serno
, &serno_len
, serno_len
);
4080 if (serno_len
== 0) {
4086 * The device ID will be concatenation of the model number,
4087 * the '=' separator, the serial number. Allocate
4088 * the string and concatenate the components.
4090 total_len
= model_len
+ 1 + serno_len
;
4091 hwid
= kmem_alloc(total_len
, KM_SLEEP
);
4092 bcopy((caddr_t
)model
, (caddr_t
)hwid
, model_len
);
4093 bcopy((caddr_t
)"=", (caddr_t
)&hwid
[model_len
], 1);
4094 bcopy((caddr_t
)serno
, (caddr_t
)&hwid
[model_len
+ 1], serno_len
);
4096 /* Initialize the device ID, trailing NULL not included */
4097 rc
= ddi_devid_init(DCD_DEVINFO
, DEVID_ATA_SERIAL
, total_len
,
4098 hwid
, (ddi_devid_t
*)&un
->un_devid
);
4100 /* Free the allocated string */
4101 kmem_free(hwid
, total_len
);
4107 * Test for a valid model or serial number. Assume that a valid representation
4108 * contains at least one character that is neither a space, 0 digit, or NULL.
4109 * Trim trailing blanks and NULLS from returned length.
4112 dcd_validate_model_serial(char *str
, int *retlen
, int totallen
)
4115 boolean_t ret
= B_FALSE
;
4119 for (i
= 0, tb
= 0; i
< totallen
; i
++) {
4121 if ((ch
!= ' ') && (ch
!= '\0') && (ch
!= '0'))
4123 if ((ch
== ' ') || (ch
== '\0'))
4129 if (ret
== B_TRUE
) {
4130 /* Atleast one non 0 or blank character. */
4131 *retlen
= totallen
- tb
;
4138 clean_print(dev_info_t
*dev
, char *label
, uint_t level
,
4139 char *title
, char *data
, int len
)
4144 (void) sprintf(buf
, "%s:", title
);
4145 for (i
= 0; i
< len
; i
++) {
4146 (void) sprintf(&buf
[strlen(buf
)], "0x%x ", (data
[i
] & 0xff));
4148 (void) sprintf(&buf
[strlen(buf
)], "\n");
4150 dcd_log(dev
, label
, level
, "%s", buf
);
4154 * Print a piece of inquiry data- cleaned up for non-printable characters
4155 * and stopping at the first space character after the beginning of the
4160 inq_fill(char *p
, int l
, char *s
)
4166 if ((c
= *p
++) < ' ' || c
>= 0177) {
4168 } else if (i
!= 1 && c
== ' ') {
4177 dcd_sname(uchar_t status
)
4179 switch (status
& STATUS_ATA_MASK
) {
4181 return ("good status");
4183 case STATUS_ATA_BUSY
:
4187 return ("<unknown status>");
4193 dcd_rname(int reason
)
4195 static char *rnames
[] = {
4205 if (reason
> CMD_DATA_OVR
) {
4206 return ("<unknown reason>");
4208 return (rnames
[reason
]);
4216 dcd_check_wp(dev_t dev
)
4223 * Create device error kstats
4226 dcd_create_errstats(struct dcd_disk
*un
, int instance
)
4229 char kstatname
[KSTAT_STRLEN
];
4231 if (un
->un_errstats
== (kstat_t
*)0) {
4232 (void) sprintf(kstatname
, "dad%d,error", instance
);
4233 un
->un_errstats
= kstat_create("daderror", instance
, kstatname
,
4234 "device_error", KSTAT_TYPE_NAMED
,
4235 sizeof (struct dcd_errstats
)/ sizeof (kstat_named_t
),
4236 KSTAT_FLAG_PERSISTENT
);
4238 if (un
->un_errstats
) {
4239 struct dcd_errstats
*dtp
;
4241 dtp
= (struct dcd_errstats
*)un
->un_errstats
->ks_data
;
4242 kstat_named_init(&dtp
->dcd_softerrs
, "Soft Errors",
4244 kstat_named_init(&dtp
->dcd_harderrs
, "Hard Errors",
4246 kstat_named_init(&dtp
->dcd_transerrs
,
4247 "Transport Errors", KSTAT_DATA_UINT32
);
4248 kstat_named_init(&dtp
->dcd_model
, "Model",
4250 kstat_named_init(&dtp
->dcd_revision
, "Revision",
4252 kstat_named_init(&dtp
->dcd_serial
, "Serial No",
4254 kstat_named_init(&dtp
->dcd_capacity
, "Size",
4255 KSTAT_DATA_ULONGLONG
);
4256 kstat_named_init(&dtp
->dcd_rq_media_err
, "Media Error",
4258 kstat_named_init(&dtp
->dcd_rq_ntrdy_err
,
4259 "Device Not Ready", KSTAT_DATA_UINT32
);
4260 kstat_named_init(&dtp
->dcd_rq_nodev_err
, " No Device",
4262 kstat_named_init(&dtp
->dcd_rq_recov_err
, "Recoverable",
4264 kstat_named_init(&dtp
->dcd_rq_illrq_err
,
4265 "Illegal Request", KSTAT_DATA_UINT32
);
4267 un
->un_errstats
->ks_private
= un
;
4268 un
->un_errstats
->ks_update
= nulldev
;
4269 kstat_install(un
->un_errstats
);
4271 (void) strncpy(&dtp
->dcd_model
.value
.c
[0],
4272 un
->un_dcd
->dcd_ident
->dcd_model
, 16);
4273 (void) strncpy(&dtp
->dcd_serial
.value
.c
[0],
4274 un
->un_dcd
->dcd_ident
->dcd_drvser
, 16);
4275 (void) strncpy(&dtp
->dcd_revision
.value
.c
[0],
4276 un
->un_dcd
->dcd_ident
->dcd_fw
, 8);
4277 dtp
->dcd_capacity
.value
.ui64
=
4278 (uint64_t)((uint64_t)un
->un_diskcapacity
*
4279 (uint64_t)un
->un_lbasize
);
4287 * This has been moved from DADA layer as this does not do anything other than
4288 * retrying the command when it is busy or it does not complete
4291 dcd_poll(struct dcd_pkt
*pkt
)
4293 int busy_count
, rval
= -1, savef
;
4301 savef
= pkt
->pkt_flags
;
4302 savec
= pkt
->pkt_comp
;
4303 savet
= pkt
->pkt_time
;
4305 pkt
->pkt_flags
|= FLAG_NOINTR
;
4309 * Set the Pkt_comp to NULL
4315 * Set the Pkt time for the polled command
4317 if (pkt
->pkt_time
== 0) {
4318 pkt
->pkt_time
= DCD_POLL_TIMEOUT
;
4322 /* Now transport the command */
4323 for (busy_count
= 0; busy_count
< dcd_poll_busycnt
; busy_count
++) {
4324 if ((rval
= dcd_transport(pkt
)) == TRAN_ACCEPT
) {
4325 if (pkt
->pkt_reason
== CMD_INCOMPLETE
&&
4326 pkt
->pkt_state
== 0) {
4328 } else if (pkt
->pkt_reason
== CMD_CMPLT
) {
4333 if (rval
== TRAN_BUSY
) {
4339 pkt
->pkt_flags
= savef
;
4340 pkt
->pkt_comp
= savec
;
4341 pkt
->pkt_time
= savet
;
4347 dcd_translate(struct dadkio_status32
*statp
, struct udcd_cmd
*cmdp
)
4349 if (cmdp
->udcd_status_reg
& STATUS_ATA_BUSY
)
4350 statp
->status
= DADKIO_STAT_NOT_READY
;
4351 else if (cmdp
->udcd_status_reg
& STATUS_ATA_DWF
)
4352 statp
->status
= DADKIO_STAT_HARDWARE_ERROR
;
4353 else if (cmdp
->udcd_status_reg
& STATUS_ATA_CORR
)
4354 statp
->status
= DADKIO_STAT_SOFT_ERROR
;
4355 else if (cmdp
->udcd_status_reg
& STATUS_ATA_ERR
) {
4357 * The error register is valid only when BSY and DRQ not set
4358 * Assumed that HBA has checked this before it gives the data
4360 if (cmdp
->udcd_error_reg
& ERR_AMNF
)
4361 statp
->status
= DADKIO_STAT_NOT_FORMATTED
;
4362 else if (cmdp
->udcd_error_reg
& ERR_TKONF
)
4363 statp
->status
= DADKIO_STAT_NOT_FORMATTED
;
4364 else if (cmdp
->udcd_error_reg
& ERR_ABORT
)
4365 statp
->status
= DADKIO_STAT_ILLEGAL_REQUEST
;
4366 else if (cmdp
->udcd_error_reg
& ERR_IDNF
)
4367 statp
->status
= DADKIO_STAT_NOT_FORMATTED
;
4368 else if (cmdp
->udcd_error_reg
& ERR_UNC
)
4369 statp
->status
= DADKIO_STAT_BUS_ERROR
;
4370 else if (cmdp
->udcd_error_reg
& ERR_BBK
)
4371 statp
->status
= DADKIO_STAT_MEDIUM_ERROR
;
4373 statp
->status
= DADKIO_STAT_NO_ERROR
;
4377 dcd_flush_cache(struct dcd_disk
*un
)
4379 struct dcd_pkt
*pkt
;
4383 if ((pkt
= dcd_init_pkt(ROUTE
, NULL
, NULL
,
4384 (uint32_t)sizeof (struct dcd_cmd
), 2, PP_LEN
,
4385 PKT_CONSISTENT
, NULL_FUNC
, NULL
)) == NULL
) {
4389 makecommand(pkt
, 0, ATA_FLUSH_CACHE
, 0, ADD_LBA_MODE
, 0,
4393 * Send the command. There are chances it might fail on some
4394 * disks since it is not a mandatory command as per ata-4. Try
4395 * 3 times if it fails. The retry count has been randomly selected.
4396 * There is a need for retry since as per the spec FLUSH CACHE can fail
4397 * as a result of unrecoverable error encountered during execution
4398 * of writing data and subsequent command should continue flushing
4401 for (retry_count
= 0; retry_count
< 3; retry_count
++) {
4403 * Set the packet fields.
4406 pkt
->pkt_time
= DCD_POLL_TIMEOUT
;
4407 pkt
->pkt_flags
|= FLAG_FORCENOINTR
;
4408 pkt
->pkt_flags
|= FLAG_NOINTR
;
4409 if (dcd_transport(pkt
) == TRAN_ACCEPT
) {
4410 if (pkt
->pkt_reason
== CMD_CMPLT
) {
4415 * Note the wait time value of 100ms is same as in the
4418 drv_usecwait(1000000);
4420 (void) dcd_destroy_pkt(pkt
);
4424 dcd_send_lb_rw_cmd(dev_info_t
*devi
, void *bufaddr
,
4425 diskaddr_t start_block
, size_t reqlength
, uchar_t cmd
)
4427 struct dcd_pkt
*pkt
;
4429 diskaddr_t real_addr
= start_block
;
4430 size_t buffer_size
= reqlength
;
4431 uchar_t command
, tmp
;
4433 struct dcd_disk
*un
;
4435 un
= ddi_get_soft_state(dcd_state
, ddi_get_instance(devi
));
4439 bp
= dcd_alloc_consistent_buf(ROUTE
, NULL
,
4440 buffer_size
, B_READ
, NULL_FUNC
, NULL
);
4442 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
4443 "no bp for disk label\n");
4447 pkt
= dcd_init_pkt(ROUTE
, NULL
,
4448 bp
, (uint32_t)sizeof (struct dcd_cmd
), 2, PP_LEN
,
4449 PKT_CONSISTENT
, NULL_FUNC
, NULL
);
4452 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
4453 "no memory for disk label\n");
4454 dcd_free_consistent_buf(bp
);
4458 if (cmd
== TG_READ
) {
4459 bzero(bp
->b_un
.b_addr
, buffer_size
);
4462 bcopy((caddr_t
)bufaddr
, bp
->b_un
.b_addr
, buffer_size
);
4466 mutex_enter(DCD_MUTEX
);
4467 if ((un
->un_dp
->options
& DMA_SUPPORTTED
) == DMA_SUPPORTTED
) {
4468 if (cmd
== TG_READ
) {
4469 command
= ATA_READ_DMA
;
4471 command
= ATA_WRITE_DMA
;
4474 if (cmd
== TG_READ
) {
4475 if (un
->un_dp
->options
& BLOCK_MODE
)
4476 command
= ATA_READ_MULTIPLE
;
4480 if (un
->un_dp
->options
& BLOCK_MODE
)
4481 command
= ATA_READ_MULTIPLE
;
4483 command
= ATA_WRITE
;
4486 mutex_exit(DCD_MUTEX
);
4487 (void) makecommand(pkt
, 0, command
, real_addr
, ADD_LBA_MODE
,
4488 buffer_size
, tmp
, 0);
4490 for (i
= 0; i
< 3; i
++) {
4491 if (dcd_poll(pkt
) || SCBP_C(pkt
) != STATUS_GOOD
||
4492 (pkt
->pkt_state
& STATE_XFERRED_DATA
) == 0 ||
4493 (pkt
->pkt_resid
!= 0)) {
4494 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
4495 "Status %x, state %x, resid %lx\n",
4496 SCBP_C(pkt
), pkt
->pkt_state
, pkt
->pkt_resid
);
4504 dcd_destroy_pkt(pkt
);
4505 dcd_free_consistent_buf(bp
);
4509 if (cmd
== TG_READ
) {
4510 bcopy(bp
->b_un
.b_addr
, bufaddr
, reqlength
);
4514 dcd_destroy_pkt(pkt
);
4515 dcd_free_consistent_buf(bp
);
4519 static int dcd_compute_dk_capacity(struct dcd_device
*devp
,
4520 diskaddr_t
*capacity
)
4523 diskaddr_t no_of_lbasec
;
4525 cap
= devp
->dcd_ident
->dcd_fixcyls
*
4526 devp
->dcd_ident
->dcd_heads
*
4527 devp
->dcd_ident
->dcd_sectors
;
4528 no_of_lbasec
= devp
->dcd_ident
->dcd_addrsec
[1];
4529 no_of_lbasec
= no_of_lbasec
<< 16;
4530 no_of_lbasec
= no_of_lbasec
| devp
->dcd_ident
->dcd_addrsec
[0];
4532 if (no_of_lbasec
> cap
) {
4536 if (cap
!= ((uint32_t)-1))
4545 dcd_lb_rdwr(dev_info_t
*devi
, uchar_t cmd
, void *bufaddr
,
4546 diskaddr_t start_block
, size_t reqlength
, void *tg_cookie
)
4548 if (cmd
!= TG_READ
&& cmd
!= TG_WRITE
)
4551 return (dcd_send_lb_rw_cmd(devi
, bufaddr
, start_block
,
4556 dcd_lb_getphygeom(dev_info_t
*devi
, cmlb_geom_t
*phygeomp
)
4558 struct dcd_device
*devp
;
4559 uint32_t no_of_lbasec
, capacity
, calculated_cylinders
;
4561 devp
= ddi_get_driver_private(devi
);
4563 if ((devp
->dcd_ident
->dcd_config
& ATAPI_DEVICE
) == 0) {
4564 if (devp
->dcd_ident
->dcd_config
& ATANON_REMOVABLE
) {
4565 phygeomp
->g_ncyl
= devp
->dcd_ident
->dcd_fixcyls
- 2;
4566 phygeomp
->g_acyl
= 2;
4567 phygeomp
->g_nhead
= devp
->dcd_ident
->dcd_heads
;
4568 phygeomp
->g_nsect
= devp
->dcd_ident
->dcd_sectors
;
4570 no_of_lbasec
= devp
->dcd_ident
->dcd_addrsec
[1];
4571 no_of_lbasec
= no_of_lbasec
<< 16;
4572 no_of_lbasec
= no_of_lbasec
|
4573 devp
->dcd_ident
->dcd_addrsec
[0];
4574 capacity
= devp
->dcd_ident
->dcd_fixcyls
*
4575 devp
->dcd_ident
->dcd_heads
*
4576 devp
->dcd_ident
->dcd_sectors
;
4577 if (no_of_lbasec
> capacity
) {
4578 capacity
= no_of_lbasec
;
4579 if (capacity
> NUM_SECTORS_32G
) {
4581 * if the capacity is greater than 32G,
4582 * then 255 is the sectors per track.
4583 * This should be good until 128G disk
4584 * capacity, which is the current ATA-4
4587 phygeomp
->g_nsect
= 255;
4591 * If the disk capacity is >= 128GB then no. of
4592 * addressable sectors will be set to 0xfffffff
4593 * in the IDENTIFY info. In that case set the
4594 * no. of pcyl to the Max. 16bit value.
4597 calculated_cylinders
= (capacity
) /
4598 (phygeomp
->g_nhead
* phygeomp
->g_nsect
);
4599 if (calculated_cylinders
>= USHRT_MAX
) {
4600 phygeomp
->g_ncyl
= USHRT_MAX
- 2;
4603 calculated_cylinders
- 2;
4607 phygeomp
->g_capacity
= capacity
;
4608 phygeomp
->g_intrlv
= 0;
4609 phygeomp
->g_rpm
= 5400;
4610 phygeomp
->g_secsize
= devp
->dcd_ident
->dcd_secsiz
;
4623 dcd_lb_getinfo(dev_info_t
*devi
, int cmd
, void *arg
, void *tg_cookie
)
4625 struct dcd_disk
*un
;
4627 un
= ddi_get_soft_state(dcd_state
, ddi_get_instance(devi
));
4634 return (dcd_lb_getphygeom(devi
, (cmlb_geom_t
*)arg
));
4636 case TG_GETVIRTGEOM
:
4639 case TG_GETCAPACITY
:
4640 case TG_GETBLOCKSIZE
:
4641 mutex_enter(DCD_MUTEX
);
4642 if (un
->un_diskcapacity
<= 0) {
4643 mutex_exit(DCD_MUTEX
);
4644 dcd_log(DCD_DEVINFO
, dcd_label
, CE_WARN
,
4645 "invalid disk capacity\n");
4648 if (cmd
== TG_GETCAPACITY
)
4649 *(diskaddr_t
*)arg
= un
->un_diskcapacity
;
4651 *(uint32_t *)arg
= DEV_BSIZE
;
4653 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
, "capacity %x\n",
4654 un
->un_diskcapacity
);
4655 mutex_exit(DCD_MUTEX
);
4659 mutex_enter(DCD_MUTEX
);
4660 *(tg_attribute_t
*)arg
= un
->un_tgattribute
;
4661 DAD_DEBUG2(DCD_DEVINFO
, dcd_label
, DCD_DEBUG
,
4662 "media_is_writable %x\n",
4663 un
->un_tgattribute
.media_is_writable
);
4664 mutex_exit(DCD_MUTEX
);