kernel: remove unused utsname_set_machine()
[unleashed.git] / usr / src / uts / sun / io / dada / targets / dad.c
blob4280accd70f87981175ce5bfc1f22568704931f6
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Direct Attached disk driver for SPARC machines.
32 * Includes, Declarations and Local Data
34 #include <sys/dada/dada.h>
35 #include <sys/dkbad.h>
36 #include <sys/dklabel.h>
37 #include <sys/dkio.h>
38 #include <sys/cdio.h>
39 #include <sys/vtoc.h>
40 #include <sys/dada/targets/daddef.h>
41 #include <sys/dada/targets/dadpriv.h>
42 #include <sys/file.h>
43 #include <sys/stat.h>
44 #include <sys/kstat.h>
45 #include <sys/vtrace.h>
46 #include <sys/aio_req.h>
47 #include <sys/note.h>
48 #include <sys/cmlb.h>
51 * Global Error Levels for Error Reporting
53 int dcd_error_level = DCD_ERR_RETRYABLE;
55 * Local Static Data
58 static int dcd_io_time = DCD_IO_TIME;
59 static int dcd_retry_count = DCD_RETRY_COUNT;
60 static int dcd_report_pfa = 1;
61 static int dcd_rot_delay = 4;
62 static int dcd_poll_busycnt = DCD_POLL_TIMEOUT;
65 * Local Function Prototypes
68 static int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
69 static int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
70 static int dcdstrategy(struct buf *bp);
71 static int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
72 static int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
73 static int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p);
74 static int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
75 static int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int,
76 char *, caddr_t, int *);
77 static int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
78 static int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
81 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi);
82 static int dcd_doattach(dev_info_t *devi, int (*f)());
83 static int dcd_validate_geometry(struct dcd_disk *un);
84 static ddi_devid_t dcd_get_devid(struct dcd_disk *un);
85 static ddi_devid_t dcd_create_devid(struct dcd_disk *un);
86 static int dcd_make_devid_from_serial(struct dcd_disk *un);
87 static void dcd_validate_model_serial(char *str, int *retlen, int totallen);
88 static int dcd_read_deviceid(struct dcd_disk *un);
89 static int dcd_write_deviceid(struct dcd_disk *un);
90 static int dcd_poll(struct dcd_pkt *pkt);
91 static char *dcd_rname(int reason);
92 static void dcd_flush_cache(struct dcd_disk *un);
94 static int dcd_compute_dk_capacity(struct dcd_device *devp,
95 diskaddr_t *capacity);
96 static int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr,
97 diskaddr_t start_block, size_t reqlength, uchar_t cmd);
99 static void dcdmin(struct buf *bp);
101 static int dcdioctl_cmd(dev_t, struct udcd_cmd *,
102 enum uio_seg, enum uio_seg);
104 static void dcdstart(struct dcd_disk *un);
105 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp);
106 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)());
107 static void dcdudcdmin(struct buf *bp);
109 static int dcdrunout(caddr_t);
110 static int dcd_check_wp(dev_t dev);
111 static int dcd_unit_ready(dev_t dev);
112 static void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp,
113 struct dcd_disk *un);
114 static void dcdintr(struct dcd_pkt *pkt);
115 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp);
116 static void dcd_offline(struct dcd_disk *un, int bechatty);
117 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un);
118 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt);
119 static void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp);
120 static int dcdflushdone(struct buf *bp);
122 /* Function prototypes for cmlb */
124 static int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
125 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
127 static int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp);
128 static int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg,
129 void *tg_cookie);
132 static cmlb_tg_ops_t dcd_lb_ops = {
133 TG_DK_OPS_VERSION_1,
134 dcd_lb_rdwr,
135 dcd_lb_getinfo
139 * Error and Logging Functions
141 static void clean_print(dev_info_t *dev, char *label, uint_t level,
142 char *title, char *data, int len);
143 static void dcdrestart(void *arg);
145 static int dcd_check_error(struct dcd_disk *un, struct buf *bp);
148 * Error statistics create/update functions
150 static int dcd_create_errstats(struct dcd_disk *, int);
154 /*PRINTFLIKE4*/
155 extern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...)
156 __KPRINTFLIKE(4);
157 extern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t,
158 uchar_t, uint32_t, uchar_t, uchar_t);
162 * Configuration Routines
164 static int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
165 void **result);
166 static int dcdprobe(dev_info_t *devi);
167 static int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
168 static int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
169 static int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd);
170 static int dcd_dr_detach(dev_info_t *devi);
171 static int dcdpower(dev_info_t *devi, int component, int level);
173 static void *dcd_state;
174 static int dcd_max_instance;
175 static char *dcd_label = "dad";
177 static char *diskokay = "disk okay\n";
179 #if DEBUG || lint
180 #define DCDDEBUG
181 #endif
183 int dcd_test_flag = 0;
185 * Debugging macros
187 #ifdef DCDDEBUG
188 static int dcddebug = 0;
189 #define DEBUGGING (dcddebug > 1)
190 #define DAD_DEBUG if (dcddebug == 1) dcd_log
191 #define DAD_DEBUG2 if (dcddebug > 1) dcd_log
192 #else /* DCDDEBUG */
193 #define dcddebug (0)
194 #define DEBUGGING (0)
195 #define DAD_DEBUG if (0) dcd_log
196 #define DAD_DEBUG2 if (0) dcd_log
197 #endif
200 * we use pkt_private area for storing bp and retry_count
201 * XXX: Really is this usefull.
203 struct dcd_pkt_private {
204 struct buf *dcdpp_bp;
205 short dcdpp_retry_count;
206 short dcdpp_victim_retry_count;
210 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf))
212 #define PP_LEN (sizeof (struct dcd_pkt_private))
214 #define PKT_SET_BP(pkt, bp) \
215 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp
216 #define PKT_GET_BP(pkt) \
217 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp)
220 #define PKT_SET_RETRY_CNT(pkt, n) \
221 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n
223 #define PKT_GET_RETRY_CNT(pkt) \
224 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count)
226 #define PKT_INCR_RETRY_CNT(pkt, n) \
227 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n
229 #define PKT_SET_VICTIM_RETRY_CNT(pkt, n) \
230 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
233 #define PKT_GET_VICTIM_RETRY_CNT(pkt) \
234 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count)
235 #define PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \
236 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
237 += n
239 #define DISK_NOT_READY_RETRY_COUNT (dcd_retry_count / 2)
243 * Urk!
245 #define SET_BP_ERROR(bp, err) \
246 bioerror(bp, err);
248 #define IOSP KSTAT_IO_PTR(un->un_stats)
249 #define IO_PARTITION_STATS un->un_pstats[DCDPART(bp->b_edev)]
250 #define IOSP_PARTITION KSTAT_IO_PTR(IO_PARTITION_STATS)
252 #define DCD_DO_KSTATS(un, kstat_function, bp) \
253 ASSERT(mutex_owned(DCD_MUTEX)); \
254 if (bp != un->un_sbufp) { \
255 if (un->un_stats) { \
256 kstat_function(IOSP); \
258 if (IO_PARTITION_STATS) { \
259 kstat_function(IOSP_PARTITION); \
263 #define DCD_DO_ERRSTATS(un, x) \
264 if (un->un_errstats) { \
265 struct dcd_errstats *dtp; \
266 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
267 dtp->x.value.ui32++; \
270 #define GET_SOFT_STATE(dev) \
271 struct dcd_disk *un; \
272 int instance, part; \
273 minor_t minor = getminor(dev); \
275 part = minor & DCDPART_MASK; \
276 instance = minor >> DCDUNIT_SHIFT; \
277 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) \
278 return (ENXIO);
280 #define LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \
281 (((blkno) & ((1 << (blknoshift)) - 1)) == 0)
284 * After the following number of sectors, the cylinder number spills over
285 * 0xFFFF if sectors = 63 and heads = 16.
287 #define NUM_SECTORS_32G 0x3EFFC10
290 * Configuration Data
294 * Device driver ops vector
297 static struct cb_ops dcd_cb_ops = {
298 dcdopen, /* open */
299 dcdclose, /* close */
300 dcdstrategy, /* strategy */
301 nodev, /* print */
302 dcddump, /* dump */
303 dcdread, /* read */
304 dcdwrite, /* write */
305 dcdioctl, /* ioctl */
306 nodev, /* devmap */
307 nodev, /* mmap */
308 nodev, /* segmap */
309 nochpoll, /* poll */
310 dcd_prop_op, /* cb_prop_op */
311 0, /* streamtab */
312 D_64BIT | D_MP | D_NEW, /* Driver compatibility flag */
313 CB_REV, /* cb_rev */
314 dcdaread, /* async I/O read entry point */
315 dcdawrite /* async I/O write entry point */
318 static struct dev_ops dcd_ops = {
319 DEVO_REV, /* devo_rev, */
320 0, /* refcnt */
321 dcdinfo, /* info */
322 nulldev, /* identify */
323 dcdprobe, /* probe */
324 dcdattach, /* attach */
325 dcddetach, /* detach */
326 dcdreset, /* reset */
327 &dcd_cb_ops, /* driver operations */
328 NULL, /* bus operations */
329 dcdpower, /* power */
330 ddi_quiesce_not_supported, /* devo_quiesce */
335 * This is the loadable module wrapper.
337 #include <sys/modctl.h>
339 static struct modldrv modldrv = {
340 &mod_driverops, /* Type of module. This one is a driver */
341 "DAD Disk Driver", /* Name of the module. */
342 &dcd_ops, /* driver ops */
347 static struct modlinkage modlinkage = {
348 MODREV_1, &modldrv, NULL
352 * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded
353 * attach situations
355 static kmutex_t dcd_attach_mutex;
358 _init(void)
360 int e;
362 if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk),
363 DCD_MAXUNIT)) != 0)
364 return (e);
366 mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL);
367 e = mod_install(&modlinkage);
368 if (e != 0) {
369 mutex_destroy(&dcd_attach_mutex);
370 ddi_soft_state_fini(&dcd_state);
371 return (e);
374 return (e);
378 _fini(void)
380 int e;
382 if ((e = mod_remove(&modlinkage)) != 0)
383 return (e);
385 ddi_soft_state_fini(&dcd_state);
386 mutex_destroy(&dcd_attach_mutex);
388 return (e);
392 _info(struct modinfo *modinfop)
395 return (mod_info(&modlinkage, modinfop));
398 static int
399 dcdprobe(dev_info_t *devi)
401 struct dcd_device *devp;
402 int rval = DDI_PROBE_PARTIAL;
403 int instance;
405 devp = ddi_get_driver_private(devi);
406 instance = ddi_get_instance(devi);
409 * Keep a count of how many disks (ie. highest instance no) we have
410 * XXX currently not used but maybe useful later again
412 mutex_enter(&dcd_attach_mutex);
413 if (instance > dcd_max_instance)
414 dcd_max_instance = instance;
415 mutex_exit(&dcd_attach_mutex);
417 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "dcdprobe:\n");
419 if (ddi_get_soft_state(dcd_state, instance) != NULL)
420 return (DDI_PROBE_PARTIAL);
423 * Turn around and call utility probe routine
424 * to see whether we actually have a disk at
427 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
428 "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC));
430 switch (dcd_probe(devp, NULL_FUNC)) {
431 default:
432 case DCDPROBE_NORESP:
433 case DCDPROBE_NONCCS:
434 case DCDPROBE_NOMEM:
435 case DCDPROBE_FAILURE:
436 case DCDPROBE_BUSY:
437 break;
439 case DCDPROBE_EXISTS:
441 * Check whether it is a ATA device and then
442 * return SUCCESS.
444 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
445 "config %x\n", devp->dcd_ident->dcd_config);
446 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
447 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
448 rval = DDI_PROBE_SUCCESS;
449 } else
450 rval = DDI_PROBE_FAILURE;
451 } else {
452 rval = DDI_PROBE_FAILURE;
454 break;
456 dcd_unprobe(devp);
458 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
459 "dcdprobe returns %x\n", rval);
461 return (rval);
465 /*ARGSUSED*/
466 static int
467 dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
469 int instance, rval;
470 struct dcd_device *devp;
471 struct dcd_disk *un;
472 struct diskhd *dp;
473 char *pm_comp[] =
474 { "NAME=ide-disk", "0=standby", "1=idle", "2=active" };
476 /* CONSTCOND */
477 ASSERT(NO_COMPETING_THREADS);
480 devp = ddi_get_driver_private(devi);
481 instance = ddi_get_instance(devi);
482 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n");
484 switch (cmd) {
485 case DDI_ATTACH:
486 break;
488 case DDI_RESUME:
489 if (!(un = ddi_get_soft_state(dcd_state, instance)))
490 return (DDI_FAILURE);
491 mutex_enter(DCD_MUTEX);
492 Restore_state(un);
494 * Restore the state which was saved to give the
495 * the right state in un_last_state
497 un->un_last_state = un->un_save_state;
498 un->un_throttle = 2;
499 cv_broadcast(&un->un_suspend_cv);
501 * Raise the power level of the device to active.
503 mutex_exit(DCD_MUTEX);
504 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
505 mutex_enter(DCD_MUTEX);
508 * start unit - if this is a low-activity device
509 * commands in queue will have to wait until new
510 * commands come in, which may take awhile.
511 * Also, we specifically don't check un_ncmds
512 * because we know that there really are no
513 * commands in progress after the unit was suspended
514 * and we could have reached the throttle level, been
515 * suspended, and have no new commands coming in for
516 * awhile. Highly unlikely, but so is the low-
517 * activity disk scenario.
519 dp = &un->un_utab;
520 if (dp->b_actf && (dp->b_forw == NULL)) {
521 dcdstart(un);
524 mutex_exit(DCD_MUTEX);
525 return (DDI_SUCCESS);
527 default:
528 return (DDI_FAILURE);
531 if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) {
532 return (DDI_FAILURE);
535 if (!(un = (struct dcd_disk *)
536 ddi_get_soft_state(dcd_state, instance))) {
537 return (DDI_FAILURE);
539 devp->dcd_private = (ataopaque_t)un;
542 * Add a zero-length attribute to tell the world we support
543 * kernel ioctls (for layered drivers)
545 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
546 DDI_KERNEL_IOCTL, NULL, 0);
549 * Since the dad device does not have the 'reg' property,
550 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
551 * The following code is to tell cpr that this device
552 * does need to be suspended and resumed.
554 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
555 "pm-hardware-state", (caddr_t)"needs-suspend-resume");
558 * Initialize power management bookkeeping;
559 * Create components - In IDE case there are 3 levels and one
560 * component. The levels being - active, idle, standby.
563 rval = ddi_prop_update_string_array(DDI_DEV_T_NONE,
564 devi, "pm-components", pm_comp, 4);
565 if (rval == DDI_PROP_SUCCESS) {
567 * Ignore the return value of pm_raise_power
568 * Even if we check the return values and
569 * remove the property created above, PM
570 * framework will not honour the change after
571 * first call to pm_raise_power. Hence, the
572 * removal of that property does not help if
573 * pm_raise_power fails.
575 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
578 ddi_report_dev(devi);
580 cmlb_alloc_handle(&un->un_dklbhandle);
582 if (cmlb_attach(devi,
583 &dcd_lb_ops,
585 B_FALSE,
586 B_FALSE,
587 DDI_NT_BLOCK_CHAN,
588 CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8,
589 un->un_dklbhandle,
590 0) != 0) {
591 cmlb_free_handle(&un->un_dklbhandle);
592 dcd_free_softstate(un, devi);
593 return (DDI_FAILURE);
596 mutex_enter(DCD_MUTEX);
597 (void) dcd_validate_geometry(un);
599 /* Get devid; create a devid ONLY IF could not get ID */
600 if (dcd_get_devid(un) == NULL) {
601 /* Create the fab'd devid */
602 (void) dcd_create_devid(un);
604 mutex_exit(DCD_MUTEX);
606 return (DDI_SUCCESS);
609 static void
610 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi)
612 struct dcd_device *devp;
613 int instance = ddi_get_instance(devi);
615 devp = ddi_get_driver_private(devi);
617 if (un) {
618 sema_destroy(&un->un_semoclose);
619 cv_destroy(&un->un_sbuf_cv);
620 cv_destroy(&un->un_state_cv);
621 cv_destroy(&un->un_disk_busy_cv);
622 cv_destroy(&un->un_suspend_cv);
625 * Deallocate command packet resources.
627 if (un->un_sbufp)
628 freerbuf(un->un_sbufp);
629 if (un->un_dp) {
630 kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp));
633 * Unregister the devid and free devid resources allocated
635 ddi_devid_unregister(DCD_DEVINFO);
636 if (un->un_devid) {
637 ddi_devid_free(un->un_devid);
638 un->un_devid = NULL;
642 * Delete kstats. Kstats for non CD devices are deleted
643 * in dcdclose.
645 if (un->un_stats) {
646 kstat_delete(un->un_stats);
652 * Cleanup scsi_device resources.
654 ddi_soft_state_free(dcd_state, instance);
655 devp->dcd_private = (ataopaque_t)0;
656 /* unprobe scsi device */
657 dcd_unprobe(devp);
659 /* Remove properties created during attach */
660 ddi_prop_remove_all(devi);
663 static int
664 dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
666 int instance;
667 struct dcd_disk *un;
668 clock_t wait_cmds_complete;
669 instance = ddi_get_instance(devi);
671 if (!(un = ddi_get_soft_state(dcd_state, instance)))
672 return (DDI_FAILURE);
674 switch (cmd) {
675 case DDI_DETACH:
676 return (dcd_dr_detach(devi));
678 case DDI_SUSPEND:
679 mutex_enter(DCD_MUTEX);
680 if (un->un_state == DCD_STATE_SUSPENDED) {
681 mutex_exit(DCD_MUTEX);
682 return (DDI_SUCCESS);
684 un->un_throttle = 0;
686 * Save the last state first
688 un->un_save_state = un->un_last_state;
690 New_state(un, DCD_STATE_SUSPENDED);
693 * wait till current operation completed. If we are
694 * in the resource wait state (with an intr outstanding)
695 * then we need to wait till the intr completes and
696 * starts the next cmd. We wait for
697 * DCD_WAIT_CMDS_COMPLETE seconds before failing the
698 * DDI_SUSPEND.
700 wait_cmds_complete = ddi_get_lbolt();
701 wait_cmds_complete +=
702 DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000);
704 while (un->un_ncmds) {
705 if (cv_timedwait(&un->un_disk_busy_cv,
706 DCD_MUTEX, wait_cmds_complete) == -1) {
708 * commands Didn't finish in the
709 * specified time, fail the DDI_SUSPEND.
711 DAD_DEBUG2(DCD_DEVINFO, dcd_label,
712 DCD_DEBUG, "dcddetach: SUSPEND "
713 "failed due to outstanding cmds\n");
714 Restore_state(un);
715 mutex_exit(DCD_MUTEX);
716 return (DDI_FAILURE);
719 mutex_exit(DCD_MUTEX);
720 return (DDI_SUCCESS);
722 return (DDI_FAILURE);
726 * The reset entry point gets invoked at the system shutdown time or through
727 * CPR code at system suspend.
728 * Will be flushing the cache and expect this to be last I/O operation to the
729 * disk before system reset/power off.
731 /*ARGSUSED*/
732 static int
733 dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd)
735 struct dcd_disk *un;
736 int instance;
738 instance = ddi_get_instance(dip);
740 if (!(un = ddi_get_soft_state(dcd_state, instance)))
741 return (DDI_FAILURE);
743 dcd_flush_cache(un);
745 return (DDI_SUCCESS);
749 static int
750 dcd_dr_detach(dev_info_t *devi)
752 struct dcd_device *devp;
753 struct dcd_disk *un;
756 * Get scsi_device structure for this instance.
758 if ((devp = ddi_get_driver_private(devi)) == NULL)
759 return (DDI_FAILURE);
762 * Get dcd_disk structure containing target 'private' information
764 un = (struct dcd_disk *)devp->dcd_private;
767 * Verify there are NO outstanding commands issued to this device.
768 * ie, un_ncmds == 0.
769 * It's possible to have outstanding commands through the physio
770 * code path, even though everything's closed.
772 _NOTE(COMPETING_THREADS_NOW);
773 mutex_enter(DCD_MUTEX);
774 if (un->un_ncmds) {
775 mutex_exit(DCD_MUTEX);
776 _NOTE(NO_COMPETING_THREADS_NOW);
777 return (DDI_FAILURE);
780 mutex_exit(DCD_MUTEX);
782 cmlb_detach(un->un_dklbhandle, 0);
783 cmlb_free_handle(&un->un_dklbhandle);
787 * Lower the power state of the device
788 * i.e. the minimum power consumption state - sleep.
790 (void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY);
792 _NOTE(NO_COMPETING_THREADS_NOW);
795 * at this point there are no competing threads anymore
796 * release active MT locks and all device resources.
798 dcd_free_softstate(un, devi);
800 return (DDI_SUCCESS);
803 static int
804 dcdpower(dev_info_t *devi, int component, int level)
806 struct dcd_pkt *pkt;
807 struct dcd_disk *un;
808 int instance;
809 uchar_t cmd;
812 instance = ddi_get_instance(devi);
814 if (!(un = ddi_get_soft_state(dcd_state, instance)) ||
815 (DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) ||
816 component != 0) {
817 return (DDI_FAILURE);
820 mutex_enter(DCD_MUTEX);
822 * if there are active commands for the device or device will be
823 * active soon. At the same time there is request to lower power
824 * return failure.
826 if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) {
827 mutex_exit(DCD_MUTEX);
828 return (DDI_FAILURE);
831 if ((un->un_state == DCD_STATE_OFFLINE) ||
832 (un->un_state == DCD_STATE_FATAL)) {
833 mutex_exit(DCD_MUTEX);
834 return (DDI_FAILURE);
837 if (level == DCD_DEVICE_ACTIVE) {
839 * No need to fire any command, just set the state structure
840 * to indicate previous state and set the level to active
842 un->un_power_level = DCD_DEVICE_ACTIVE;
843 if (un->un_state == DCD_STATE_PM_SUSPENDED)
844 Restore_state(un);
845 mutex_exit(DCD_MUTEX);
846 } else {
847 pkt = dcd_init_pkt(ROUTE, NULL,
848 NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
849 PKT_CONSISTENT, NULL_FUNC, NULL);
851 if (pkt == NULL) {
852 mutex_exit(DCD_MUTEX);
853 return (DDI_FAILURE);
856 switch (level) {
857 case DCD_DEVICE_IDLE:
858 cmd = ATA_IDLE_IMMEDIATE;
859 break;
861 case DCD_DEVICE_STANDBY:
862 cmd = ATA_STANDBY_IMMEDIATE;
863 break;
866 makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0);
867 mutex_exit(DCD_MUTEX);
869 * Issue the appropriate command
871 if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) {
872 dcd_destroy_pkt(pkt);
873 return (DDI_FAILURE);
875 dcd_destroy_pkt(pkt);
876 mutex_enter(DCD_MUTEX);
877 if (un->un_state != DCD_STATE_PM_SUSPENDED)
878 New_state(un, DCD_STATE_PM_SUSPENDED);
879 un->un_power_level = level;
880 mutex_exit(DCD_MUTEX);
883 return (DDI_SUCCESS);
886 static int
887 dcd_doattach(dev_info_t *devi, int (*canwait)())
889 struct dcd_device *devp;
890 struct dcd_disk *un = NULL;
891 int instance;
892 int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP;
893 int rval;
894 char *prop_template = "target%x-dcd-options";
895 int options;
896 char prop_str[32];
897 int target;
898 diskaddr_t capacity;
900 devp = ddi_get_driver_private(devi);
903 * Call the routine scsi_probe to do some of the dirty work.
904 * If the INQUIRY command succeeds, the field dcd_inq in the
905 * device structure will be filled in. The dcd_sense structure
906 * will also be allocated.
909 switch (dcd_probe(devp, canwait)) {
910 default:
911 return (DDI_FAILURE);
913 case DCDPROBE_EXISTS:
914 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
915 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
916 rval = DDI_SUCCESS;
917 } else {
918 rval = DDI_FAILURE;
919 goto error;
921 } else {
922 rval = DDI_FAILURE;
923 goto error;
928 instance = ddi_get_instance(devp->dcd_dev);
930 if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) {
931 rval = DDI_FAILURE;
932 goto error;
935 un = ddi_get_soft_state(dcd_state, instance);
937 un->un_sbufp = getrbuf(km_flags);
938 if (un->un_sbufp == NULL) {
939 rval = DDI_FAILURE;
940 goto error;
944 un->un_dcd = devp;
945 un->un_power_level = -1;
946 un->un_tgattribute.media_is_writable = 1;
948 sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
949 cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL);
950 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
951 /* Initialize power management conditional variable */
952 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
953 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
955 if (un->un_dp == 0) {
957 * Assume CCS drive, assume parity, but call
958 * it a CDROM if it is a RODIRECT device.
960 un->un_dp = (struct dcd_drivetype *)
961 kmem_zalloc(sizeof (struct dcd_drivetype), km_flags);
962 if (!un->un_dp) {
963 rval = DDI_FAILURE;
964 goto error;
966 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
967 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
968 un->un_dp->ctype = CTYPE_DISK;
970 } else {
971 rval = DDI_FAILURE;
972 goto error;
974 un->un_dp->name = "CCS";
975 un->un_dp->options = 0;
979 * Allow I/O requests at un_secsize offset in multiple of un_secsize.
981 un->un_secsize = DEV_BSIZE;
984 * If the device is not a removable media device, make sure that
985 * that the device is ready, by issuing the another identify but
986 * not needed. Get the capacity from identify data and store here.
988 if (dcd_compute_dk_capacity(devp, &capacity) == 0) {
989 un->un_diskcapacity = capacity;
990 un->un_lbasize = DEV_BSIZE;
993 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n");
994 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x",
995 devp->dcd_ident->dcd_fixcyls,
996 devp->dcd_ident->dcd_heads);
997 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,",
998 devp->dcd_ident->dcd_sectors);
999 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n",
1000 capacity);
1002 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1003 "dcdprobe: drive selected\n");
1006 * Check for the property target<n>-dcd-options to find the option
1007 * set by the HBA driver for this target so that we can set the
1008 * Unit structure variable so that we can send commands accordingly.
1010 target = devp->dcd_address->da_target;
1011 (void) sprintf(prop_str, prop_template, target);
1012 options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM,
1013 prop_str, -1);
1014 if (options < 0) {
1015 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1016 "No per target properties");
1017 } else {
1018 if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) {
1019 un->un_dp->options |= DMA_SUPPORTTED;
1020 un->un_dp->dma_mode = (options >> 3) & 0x03;
1021 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1022 "mode %x\n", un->un_dp->dma_mode);
1023 } else {
1024 un->un_dp->options &= ~DMA_SUPPORTTED;
1025 un->un_dp->pio_mode = options & 0x7;
1026 if (options & DCD_BLOCK_MODE)
1027 un->un_dp->options |= BLOCK_MODE;
1028 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1029 "mode %x\n", un->un_dp->pio_mode);
1031 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1032 "options %x,", un->un_dp->options);
1035 un->un_throttle = 2;
1037 * set default max_xfer_size - This should depend on whether the
1038 * Block mode is supported by the device or not.
1040 un->un_max_xfer_size = MAX_ATA_XFER_SIZE;
1043 * Set write cache enable softstate
1045 * WCE is only supported in ATAPI-4 or higher; for
1046 * lower rev devices, must assume write cache is
1047 * enabled.
1049 mutex_enter(DCD_MUTEX);
1050 un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) ||
1051 ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) ||
1052 (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0;
1053 mutex_exit(DCD_MUTEX);
1055 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1056 "dcd_doattach returns good\n");
1058 return (rval);
1060 error:
1061 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n");
1062 dcd_free_softstate(un, devi);
1063 return (rval);
1066 #ifdef NOTNEEDED
1068 * This routine is used to set the block mode of operation by issuing the
1069 * Set Block mode ata command with the maximum block mode possible
1071 dcd_set_multiple(struct dcd_disk *un)
1073 int status;
1074 struct udcd_cmd ucmd;
1075 struct dcd_cmd cdb;
1076 dev_t dev;
1079 /* Zero all the required structure */
1080 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1082 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1084 cdb.cmd = ATA_SET_MULTIPLE;
1086 * Here we should pass what needs to go into sector count REGISTER.
1087 * Eventhough this field indicates the number of bytes to read we
1088 * need to specify the block factor in terms of bytes so that it
1089 * will be programmed by the HBA driver into the sector count register.
1091 cdb.size = un->un_lbasize * un->un_dp->block_factor;
1093 cdb.sector_num.lba_num = 0;
1094 cdb.address_mode = ADD_LBA_MODE;
1095 cdb.direction = NO_DATA_XFER;
1097 ucmd.udcd_flags = 0;
1098 ucmd.udcd_cmd = &cdb;
1099 ucmd.udcd_bufaddr = NULL;
1100 ucmd.udcd_buflen = 0;
1101 ucmd.udcd_flags |= UDCD_SILENT;
1103 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1104 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1107 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1109 return (status);
1112 * The following routine is used only for setting the transfer mode
1113 * and it is not designed for transferring any other features subcommand.
1115 dcd_set_features(struct dcd_disk *un, uchar_t mode)
1117 int status;
1118 struct udcd_cmd ucmd;
1119 struct dcd_cmd cdb;
1120 dev_t dev;
1123 /* Zero all the required structure */
1124 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1126 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1128 cdb.cmd = ATA_SET_FEATURES;
1130 * Here we need to pass what needs to go into the sector count register
1131 * But in the case of SET FEATURES command the value taken in the
1132 * sector count register depends what type of subcommand is
1133 * passed in the features register. Since we have defined the size to
1134 * be the size in bytes in this context it does not indicate bytes
1135 * instead it indicates the mode to be programmed.
1137 cdb.size = un->un_lbasize * mode;
1139 cdb.sector_num.lba_num = 0;
1140 cdb.address_mode = ADD_LBA_MODE;
1141 cdb.direction = NO_DATA_XFER;
1142 cdb.features = ATA_FEATURE_SET_MODE;
1143 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1144 "size %x, features %x, cmd %x\n",
1145 cdb.size, cdb.features, cdb.cmd);
1147 ucmd.udcd_flags = 0;
1148 ucmd.udcd_cmd = &cdb;
1149 ucmd.udcd_bufaddr = NULL;
1150 ucmd.udcd_buflen = 0;
1151 ucmd.udcd_flags |= UDCD_SILENT;
1153 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1154 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1156 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1158 return (status);
1160 #endif
1163 * Validate the geometry for this disk, e.g.,
1164 * see whether it has a valid label.
1166 static int
1167 dcd_validate_geometry(struct dcd_disk *un)
1169 int secsize = 0;
1170 struct dcd_device *devp;
1171 int secdiv;
1172 int rval;
1174 ASSERT(mutex_owned(DCD_MUTEX));
1175 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1176 "dcd_validate_geometry: started \n");
1178 if (un->un_lbasize < 0) {
1179 return (DCD_BAD_LABEL);
1182 if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1183 mutex_exit(DCD_MUTEX);
1184 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) !=
1185 DDI_SUCCESS) {
1186 mutex_enter(DCD_MUTEX);
1187 return (DCD_BAD_LABEL);
1189 mutex_enter(DCD_MUTEX);
1192 secsize = un->un_secsize;
1195 * take a log base 2 of sector size (sorry)
1197 for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1199 un->un_secdiv = secdiv;
1202 * Only DIRECT ACCESS devices will have Sun labels.
1203 * CD's supposedly have a Sun label, too
1206 devp = un->un_dcd;
1208 if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) &&
1209 (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) {
1210 mutex_exit(DCD_MUTEX);
1211 rval = cmlb_validate(un->un_dklbhandle, 0, 0);
1212 mutex_enter(DCD_MUTEX);
1213 if (rval == ENOMEM)
1214 return (DCD_NO_MEM_FOR_LABEL);
1215 else if (rval != 0)
1216 return (DCD_BAD_LABEL);
1217 } else {
1218 /* it should never get here. */
1219 return (DCD_BAD_LABEL);
1223 * take a log base 2 of logical block size
1225 secsize = un->un_lbasize;
1226 for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1228 un->un_lbadiv = secdiv;
1231 * take a log base 2 of the multiple of DEV_BSIZE blocks that
1232 * make up one logical block
1234 secsize = un->un_lbasize >> DEV_BSHIFT;
1235 for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1237 un->un_blknoshift = secdiv;
1238 return (0);
1242 * Unix Entry Points
1245 /* ARGSUSED3 */
1246 static int
1247 dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
1249 dev_t dev = *dev_p;
1250 int rval = EIO;
1251 int partmask;
1252 int nodelay = (flag & (FNDELAY | FNONBLOCK));
1253 int i;
1254 char kstatname[KSTAT_STRLEN];
1255 diskaddr_t lblocks;
1256 char *partname;
1258 GET_SOFT_STATE(dev);
1260 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1261 "Inside Open flag %x, otyp %x\n", flag, otyp);
1263 if (otyp >= OTYPCNT) {
1264 return (EINVAL);
1267 partmask = 1 << part;
1270 * We use a semaphore here in order to serialize
1271 * open and close requests on the device.
1273 sema_p(&un->un_semoclose);
1275 mutex_enter(DCD_MUTEX);
1277 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) {
1278 rval = ENXIO;
1279 goto done;
1282 while (un->un_state == DCD_STATE_SUSPENDED) {
1283 cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1286 if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) {
1287 mutex_exit(DCD_MUTEX);
1288 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE)
1289 != DDI_SUCCESS) {
1290 mutex_enter(DCD_MUTEX);
1291 rval = EIO;
1292 goto done;
1294 mutex_enter(DCD_MUTEX);
1298 * set make_dcd_cmd() flags and stat_size here since these
1299 * are unlikely to change
1301 un->un_cmd_flags = 0;
1303 un->un_cmd_stat_size = 2;
1305 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n",
1306 (void *)un);
1308 * check for previous exclusive open
1310 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1311 "exclopen=%x, flag=%x, regopen=%x\n",
1312 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
1313 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1314 "Exclusive open flag %x, partmask %x\n",
1315 un->un_exclopen, partmask);
1317 if (un->un_exclopen & (partmask)) {
1318 failed_exclusive:
1319 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1320 "exclusive open fails\n");
1321 rval = EBUSY;
1322 goto done;
1325 if (flag & FEXCL) {
1326 int i;
1327 if (un->un_ocmap.lyropen[part]) {
1328 goto failed_exclusive;
1330 for (i = 0; i < (OTYPCNT - 1); i++) {
1331 if (un->un_ocmap.regopen[i] & (partmask)) {
1332 goto failed_exclusive;
1336 if (flag & FWRITE) {
1337 mutex_exit(DCD_MUTEX);
1338 if (dcd_check_wp(dev)) {
1339 sema_v(&un->un_semoclose);
1340 return (EROFS);
1342 mutex_enter(DCD_MUTEX);
1345 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1346 "Check Write Protect handled\n");
1348 if (!nodelay) {
1349 mutex_exit(DCD_MUTEX);
1350 if ((rval = dcd_ready_and_valid(dev, un)) != 0) {
1351 rval = EIO;
1353 (void) pm_idle_component(DCD_DEVINFO, 0);
1355 * Fail if device is not ready or if the number of disk
1356 * blocks is zero or negative for non CD devices.
1358 if (rval || cmlb_partinfo(un->un_dklbhandle,
1359 part, &lblocks, NULL, &partname, NULL, 0) ||
1360 lblocks <= 0) {
1361 rval = EIO;
1362 mutex_enter(DCD_MUTEX);
1363 goto done;
1365 mutex_enter(DCD_MUTEX);
1368 if (otyp == OTYP_LYR) {
1369 un->un_ocmap.lyropen[part]++;
1370 } else {
1371 un->un_ocmap.regopen[otyp] |= partmask;
1375 * set up open and exclusive open flags
1377 if (flag & FEXCL) {
1378 un->un_exclopen |= (partmask);
1382 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1383 "open of part %d type %d\n",
1384 part, otyp);
1386 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1387 "Kstats getting updated\n");
1389 * only create kstats for disks, CD kstats created in dcdattach
1391 _NOTE(NO_COMPETING_THREADS_NOW);
1392 mutex_exit(DCD_MUTEX);
1393 if (un->un_stats == (kstat_t *)0) {
1394 un->un_stats = kstat_create("dad", instance,
1395 NULL, "disk", KSTAT_TYPE_IO, 1,
1396 KSTAT_FLAG_PERSISTENT);
1397 if (un->un_stats) {
1398 un->un_stats->ks_lock = DCD_MUTEX;
1399 kstat_install(un->un_stats);
1403 * set up partition statistics for each partition
1404 * with number of blocks > 0
1406 if (!nodelay) {
1407 for (i = 0; i < NDKMAP; i++) {
1408 if ((un->un_pstats[i] == (kstat_t *)0) &&
1409 (cmlb_partinfo(un->un_dklbhandle,
1410 i, &lblocks, NULL, &partname,
1411 NULL, 0) == 0) && lblocks > 0) {
1412 (void) sprintf(kstatname, "dad%d,%s",
1413 instance, partname);
1414 un->un_pstats[i] = kstat_create("dad",
1415 instance,
1416 kstatname,
1417 "partition",
1418 KSTAT_TYPE_IO,
1420 KSTAT_FLAG_PERSISTENT);
1421 if (un->un_pstats[i]) {
1422 un->un_pstats[i]->ks_lock =
1423 DCD_MUTEX;
1424 kstat_install(un->un_pstats[i]);
1430 * set up error kstats
1432 (void) dcd_create_errstats(un, instance);
1434 _NOTE(COMPETING_THREADS_NOW);
1436 sema_v(&un->un_semoclose);
1437 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n");
1438 return (0);
1440 done:
1441 mutex_exit(DCD_MUTEX);
1442 sema_v(&un->un_semoclose);
1443 return (rval);
1448 * Test if disk is ready and has a valid geometry.
1450 static int
1451 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un)
1453 int rval = 1;
1454 int g_error = 0;
1456 mutex_enter(DCD_MUTEX);
1458 * cmds outstanding
1460 if (un->un_ncmds == 0) {
1461 (void) dcd_unit_ready(dev);
1465 * If device is not yet ready here, inform it is offline
1467 if (un->un_state == DCD_STATE_NORMAL) {
1468 rval = dcd_unit_ready(dev);
1469 if (rval != 0 && rval != EACCES) {
1470 dcd_offline(un, 1);
1471 goto done;
1475 if (un->un_format_in_progress == 0) {
1476 g_error = dcd_validate_geometry(un);
1480 * check if geometry was valid. We don't check the validity of
1481 * geometry for CDROMS.
1484 if (g_error == DCD_BAD_LABEL) {
1485 rval = 1;
1486 goto done;
1491 * the state has changed; inform the media watch routines
1493 un->un_mediastate = DKIO_INSERTED;
1494 cv_broadcast(&un->un_state_cv);
1495 rval = 0;
1497 done:
1498 mutex_exit(DCD_MUTEX);
1499 return (rval);
1503 /*ARGSUSED*/
1504 static int
1505 dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
1507 uchar_t *cp;
1508 int i;
1510 GET_SOFT_STATE(dev);
1513 if (otyp >= OTYPCNT)
1514 return (ENXIO);
1516 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1517 "close of part %d type %d\n",
1518 part, otyp);
1519 sema_p(&un->un_semoclose);
1521 mutex_enter(DCD_MUTEX);
1523 if (un->un_exclopen & (1<<part)) {
1524 un->un_exclopen &= ~(1<<part);
1527 if (otyp == OTYP_LYR) {
1528 un->un_ocmap.lyropen[part] -= 1;
1529 } else {
1530 un->un_ocmap.regopen[otyp] &= ~(1<<part);
1533 cp = &un->un_ocmap.chkd[0];
1534 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
1535 if (*cp != (uchar_t)0) {
1536 break;
1538 cp++;
1541 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
1542 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n");
1543 if (un->un_state == DCD_STATE_OFFLINE) {
1544 dcd_offline(un, 1);
1547 mutex_exit(DCD_MUTEX);
1548 (void) cmlb_close(un->un_dklbhandle, 0);
1550 _NOTE(NO_COMPETING_THREADS_NOW);
1551 if (un->un_stats) {
1552 kstat_delete(un->un_stats);
1553 un->un_stats = 0;
1555 for (i = 0; i < NDKMAP; i++) {
1556 if (un->un_pstats[i]) {
1557 kstat_delete(un->un_pstats[i]);
1558 un->un_pstats[i] = (kstat_t *)0;
1562 if (un->un_errstats) {
1563 kstat_delete(un->un_errstats);
1564 un->un_errstats = (kstat_t *)0;
1566 mutex_enter(DCD_MUTEX);
1568 _NOTE(COMPETING_THREADS_NOW);
1571 mutex_exit(DCD_MUTEX);
1572 sema_v(&un->un_semoclose);
1573 return (0);
1576 static void
1577 dcd_offline(struct dcd_disk *un, int bechatty)
1579 if (bechatty)
1580 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n");
1582 mutex_exit(DCD_MUTEX);
1583 cmlb_invalidate(un->un_dklbhandle, 0);
1584 mutex_enter(DCD_MUTEX);
1588 * Given the device number return the devinfo pointer
1589 * from the scsi_device structure.
1591 /*ARGSUSED*/
1592 static int
1593 dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1595 dev_t dev;
1596 struct dcd_disk *un;
1597 int instance, error;
1600 switch (infocmd) {
1601 case DDI_INFO_DEVT2DEVINFO:
1602 dev = (dev_t)arg;
1603 instance = DCDUNIT(dev);
1604 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)
1605 return (DDI_FAILURE);
1606 *result = (void *) DCD_DEVINFO;
1607 error = DDI_SUCCESS;
1608 break;
1609 case DDI_INFO_DEVT2INSTANCE:
1610 dev = (dev_t)arg;
1611 instance = DCDUNIT(dev);
1612 *result = (void *)(uintptr_t)instance;
1613 error = DDI_SUCCESS;
1614 break;
1615 default:
1616 error = DDI_FAILURE;
1618 return (error);
1622 * property operation routine. return the number of blocks for the partition
1623 * in question or forward the request to the propery facilities.
1625 static int
1626 dcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1627 char *name, caddr_t valuep, int *lengthp)
1629 struct dcd_disk *un;
1631 if ((un = ddi_get_soft_state(dcd_state, ddi_get_instance(dip))) == NULL)
1632 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1633 name, valuep, lengthp));
1635 return (cmlb_prop_op(un->un_dklbhandle,
1636 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
1637 DCDPART(dev), NULL));
1641 * These routines perform raw i/o operations.
1643 /*ARGSUSED*/
1644 void
1645 dcduscsimin(struct buf *bp)
1651 static void
1652 dcdmin(struct buf *bp)
1654 struct dcd_disk *un;
1655 int instance;
1656 minor_t minor = getminor(bp->b_edev);
1657 instance = minor >> DCDUNIT_SHIFT;
1658 un = ddi_get_soft_state(dcd_state, instance);
1660 if (bp->b_bcount > un->un_max_xfer_size)
1661 bp->b_bcount = un->un_max_xfer_size;
1665 /* ARGSUSED2 */
1666 static int
1667 dcdread(dev_t dev, struct uio *uio, cred_t *cred_p)
1669 int secmask;
1670 GET_SOFT_STATE(dev);
1671 secmask = un->un_secsize - 1;
1673 if (uio->uio_loffset & ((offset_t)(secmask))) {
1674 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1675 "file offset not modulo %d\n",
1676 un->un_secsize);
1677 return (EINVAL);
1678 } else if (uio->uio_iov->iov_len & (secmask)) {
1679 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1680 "transfer length not modulo %d\n", un->un_secsize);
1681 return (EINVAL);
1683 return (physio(dcdstrategy, NULL, dev, B_READ, dcdmin, uio));
1686 /* ARGSUSED2 */
1687 static int
1688 dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1690 int secmask;
1691 struct uio *uio = aio->aio_uio;
1692 GET_SOFT_STATE(dev);
1693 secmask = un->un_secsize - 1;
1695 if (uio->uio_loffset & ((offset_t)(secmask))) {
1696 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1697 "file offset not modulo %d\n",
1698 un->un_secsize);
1699 return (EINVAL);
1700 } else if (uio->uio_iov->iov_len & (secmask)) {
1701 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1702 "transfer length not modulo %d\n", un->un_secsize);
1703 return (EINVAL);
1705 return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio));
1708 /* ARGSUSED2 */
1709 static int
1710 dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
1712 int secmask;
1713 GET_SOFT_STATE(dev);
1714 secmask = un->un_secsize - 1;
1716 if (uio->uio_loffset & ((offset_t)(secmask))) {
1717 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1718 "file offset not modulo %d\n",
1719 un->un_secsize);
1720 return (EINVAL);
1721 } else if (uio->uio_iov->iov_len & (secmask)) {
1722 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1723 "transfer length not modulo %d\n", un->un_secsize);
1724 return (EINVAL);
1726 return (physio(dcdstrategy, NULL, dev, B_WRITE, dcdmin,
1727 uio));
1730 /* ARGSUSED2 */
1731 static int
1732 dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1734 int secmask;
1735 struct uio *uio = aio->aio_uio;
1736 GET_SOFT_STATE(dev);
1737 secmask = un->un_secsize - 1;
1739 if (uio->uio_loffset & ((offset_t)(secmask))) {
1740 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1741 "file offset not modulo %d\n",
1742 un->un_secsize);
1743 return (EINVAL);
1744 } else if (uio->uio_iov->iov_len & (secmask)) {
1745 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1746 "transfer length not modulo %d\n", un->un_secsize);
1747 return (EINVAL);
1749 return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio));
1753 * strategy routine
1755 static int
1756 dcdstrategy(struct buf *bp)
1758 struct dcd_disk *un;
1759 struct diskhd *dp;
1760 int i;
1761 minor_t minor = getminor(bp->b_edev);
1762 diskaddr_t p_lblksrt;
1763 diskaddr_t lblocks;
1764 diskaddr_t bn;
1766 if ((un = ddi_get_soft_state(dcd_state,
1767 minor >> DCDUNIT_SHIFT)) == NULL ||
1768 un->un_state == DCD_STATE_DUMPING ||
1769 ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)) {
1770 SET_BP_ERROR(bp, ((un) ? ENXIO : EIO));
1771 error:
1772 bp->b_resid = bp->b_bcount;
1773 biodone(bp);
1774 return (0);
1778 * If the request size (buf->b_bcount)is greater than the size
1779 * (un->un_max_xfer_size) supported by the target driver fail
1780 * the request with EINVAL error code.
1782 * We are not supposed to receive requests exceeding
1783 * un->un_max_xfer_size size because the caller is expected to
1784 * check what is the maximum size that is supported by this
1785 * driver either through ioctl or dcdmin routine(which is private
1786 * to this driver).
1787 * But we have seen cases (like meta driver(md))where dcdstrategy
1788 * called with more than supported size and cause data corruption.
1791 if (bp->b_bcount > un->un_max_xfer_size) {
1792 SET_BP_ERROR(bp, EINVAL);
1793 goto error;
1796 TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START,
1797 "dcdstrategy_start: bp 0x%p un 0x%p", bp, un);
1800 * Commands may sneak in while we released the mutex in
1801 * DDI_SUSPEND, we should block new commands.
1803 mutex_enter(DCD_MUTEX);
1804 while (un->un_state == DCD_STATE_SUSPENDED) {
1805 cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1808 if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1809 mutex_exit(DCD_MUTEX);
1810 (void) pm_idle_component(DCD_DEVINFO, 0);
1811 if (pm_raise_power(DCD_DEVINFO, 0,
1812 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
1813 SET_BP_ERROR(bp, EIO);
1814 goto error;
1816 mutex_enter(DCD_MUTEX);
1818 mutex_exit(DCD_MUTEX);
1821 * Map-in the buffer in case starting address is not word aligned.
1824 if (((uintptr_t)bp->b_un.b_addr) & 0x1)
1825 bp_mapin(bp);
1827 bp->b_flags &= ~(B_DONE|B_ERROR);
1828 bp->b_resid = 0;
1829 bp->av_forw = 0;
1831 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1832 "bp->b_bcount %lx\n", bp->b_bcount);
1834 if (bp != un->un_sbufp) {
1835 validated: if (cmlb_partinfo(un->un_dklbhandle,
1836 minor & DCDPART_MASK,
1837 &lblocks,
1838 &p_lblksrt,
1839 NULL,
1840 NULL,
1841 0) == 0) {
1843 bn = dkblock(bp);
1845 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1846 "dkblock(bp) is %llu\n", bn);
1848 i = 0;
1849 if (bn < 0) {
1850 i = -1;
1851 } else if (bn >= lblocks) {
1853 * For proper comparison, file system block
1854 * number has to be scaled to actual CD
1855 * transfer size.
1856 * Since all the CDROM operations
1857 * that have Sun Labels are in the correct
1858 * block size this will work for CD's. This
1859 * will have to change when we have different
1860 * sector sizes.
1862 * if bn == lblocks,
1863 * Not an error, resid == count
1865 if (bn > lblocks) {
1866 i = -1;
1867 } else {
1868 i = 1;
1870 } else if (bp->b_bcount & (un->un_secsize-1)) {
1872 * This should really be:
1874 * ... if (bp->b_bcount & (un->un_lbasize-1))
1877 i = -1;
1878 } else {
1879 if (!bp->b_bcount) {
1880 printf("Waring : Zero read or Write\n");
1881 goto error;
1884 * sort by absolute block number.
1886 bp->b_resid = bn;
1887 bp->b_resid += p_lblksrt;
1889 * zero out av_back - this will be a signal
1890 * to dcdstart to go and fetch the resources
1892 bp->av_back = NO_PKT_ALLOCATED;
1896 * Check to see whether or not we are done
1897 * (with or without errors).
1900 if (i != 0) {
1901 if (i < 0) {
1902 bp->b_flags |= B_ERROR;
1904 goto error;
1906 } else {
1908 * opened in NDELAY/NONBLOCK mode?
1909 * Check if disk is ready and has a valid geometry
1911 if (dcd_ready_and_valid(bp->b_edev, un) == 0) {
1912 goto validated;
1913 } else {
1914 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
1915 "i/o to invalid geometry\n");
1916 SET_BP_ERROR(bp, EIO);
1917 goto error;
1920 } else if (BP_HAS_NO_PKT(bp)) {
1921 struct udcd_cmd *tscmdp;
1922 struct dcd_cmd *tcmdp;
1924 * This indicates that it is a special buffer
1925 * This could be a udcd-cmd and hence call bp_mapin just
1926 * in case that it could be a PIO command issued.
1928 tscmdp = (struct udcd_cmd *)bp->b_forw;
1929 tcmdp = tscmdp->udcd_cmd;
1930 if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) &&
1931 (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) &&
1932 (tcmdp->cmd != IDENTIFY_DMA) &&
1933 (tcmdp->cmd != ATA_FLUSH_CACHE)) {
1934 bp_mapin(bp);
1939 * We are doing it a bit non-standard. That is, the
1940 * head of the b_actf chain is *not* the active command-
1941 * it is just the head of the wait queue. The reason
1942 * we do this is that the head of the b_actf chain is
1943 * guaranteed to not be moved by disksort(), so that
1944 * our restart command (pointed to by
1945 * b_forw) and the head of the wait queue (b_actf) can
1946 * have resources granted without it getting lost in
1947 * the queue at some later point (where we would have
1948 * to go and look for it).
1950 mutex_enter(DCD_MUTEX);
1952 DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
1954 dp = &un->un_utab;
1956 if (dp->b_actf == NULL) {
1957 dp->b_actf = bp;
1958 dp->b_actl = bp;
1959 } else if ((un->un_state == DCD_STATE_SUSPENDED) &&
1960 bp == un->un_sbufp) {
1961 bp->b_actf = dp->b_actf;
1962 dp->b_actf = bp;
1963 } else {
1964 TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START,
1965 "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p",
1966 dp, bp, un);
1967 disksort(dp, bp);
1968 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END,
1969 "dcdstrategy_disksort_end");
1972 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1973 "ncmd %x , throttle %x, forw 0x%p\n",
1974 un->un_ncmds, un->un_throttle, (void *)dp->b_forw);
1975 ASSERT(un->un_ncmds >= 0);
1976 ASSERT(un->un_throttle >= 0);
1977 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
1978 dcdstart(un);
1979 } else if (BP_HAS_NO_PKT(dp->b_actf)) {
1980 struct buf *cmd_bp;
1982 cmd_bp = dp->b_actf;
1983 cmd_bp->av_back = ALLOCATING_PKT;
1984 mutex_exit(DCD_MUTEX);
1986 * try and map this one
1988 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START,
1989 "dcdstrategy_small_window_call (begin)");
1991 make_dcd_cmd(un, cmd_bp, NULL_FUNC);
1993 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END,
1994 "dcdstrategy_small_window_call (end)");
1997 * there is a small window where the active cmd
1998 * completes before make_dcd_cmd returns.
1999 * consequently, this cmd never gets started so
2000 * we start it from here
2002 mutex_enter(DCD_MUTEX);
2003 if ((un->un_ncmds < un->un_throttle) &&
2004 (dp->b_forw == NULL)) {
2005 dcdstart(un);
2008 mutex_exit(DCD_MUTEX);
2010 done:
2011 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end");
2012 return (0);
2017 * Unit start and Completion
2018 * NOTE: we assume that the caller has at least checked for:
2019 * (un->un_ncmds < un->un_throttle)
2020 * if not, there is no real harm done, dcd_transport() will
2021 * return BUSY
2023 static void
2024 dcdstart(struct dcd_disk *un)
2026 int status, sort_key;
2027 struct buf *bp;
2028 struct diskhd *dp;
2029 uchar_t state = un->un_last_state;
2031 TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un);
2033 retry:
2034 ASSERT(mutex_owned(DCD_MUTEX));
2036 dp = &un->un_utab;
2037 if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) ||
2038 (dp->b_forw != NULL)) {
2039 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END,
2040 "dcdstart_end (no work)");
2041 return;
2045 * remove from active queue
2047 dp->b_actf = bp->b_actf;
2048 bp->b_actf = 0;
2051 * increment ncmds before calling dcd_transport because dcdintr
2052 * may be called before we return from dcd_transport!
2054 un->un_ncmds++;
2057 * If measuring stats, mark exit from wait queue and
2058 * entrance into run 'queue' if and only if we are
2059 * going to actually start a command.
2060 * Normally the bp already has a packet at this point
2062 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
2064 mutex_exit(DCD_MUTEX);
2066 if (BP_HAS_NO_PKT(bp)) {
2067 make_dcd_cmd(un, bp, dcdrunout);
2068 if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) {
2069 mutex_enter(DCD_MUTEX);
2070 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2072 bp->b_actf = dp->b_actf;
2073 dp->b_actf = bp;
2074 New_state(un, DCD_STATE_RWAIT);
2075 un->un_ncmds--;
2076 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END,
2077 "dcdstart_end (No Resources)");
2078 goto done;
2080 } else if (bp->b_flags & B_ERROR) {
2081 mutex_enter(DCD_MUTEX);
2082 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2084 un->un_ncmds--;
2085 bp->b_resid = bp->b_bcount;
2086 if (bp->b_error == 0) {
2087 SET_BP_ERROR(bp, EIO);
2091 * restore old state
2093 un->un_state = un->un_last_state;
2094 un->un_last_state = state;
2096 mutex_exit(DCD_MUTEX);
2098 biodone(bp);
2099 mutex_enter(DCD_MUTEX);
2100 if (un->un_state == DCD_STATE_SUSPENDED) {
2101 cv_broadcast(&un->un_disk_busy_cv);
2104 if ((un->un_ncmds < un->un_throttle) &&
2105 (dp->b_forw == NULL)) {
2106 goto retry;
2107 } else {
2108 goto done;
2114 * Restore resid from the packet, b_resid had been the
2115 * disksort key.
2117 sort_key = bp->b_resid;
2118 bp->b_resid = BP_PKT(bp)->pkt_resid;
2119 BP_PKT(bp)->pkt_resid = 0;
2121 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2122 "bp->b_resid %lx, pkt_resid %lx\n",
2123 bp->b_resid, BP_PKT(bp)->pkt_resid);
2126 * We used to check whether or not to try and link commands here.
2127 * Since we have found that there is no performance improvement
2128 * for linked commands, this has not made much sense.
2130 if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp)))
2131 != TRAN_ACCEPT) {
2132 mutex_enter(DCD_MUTEX);
2133 un->un_ncmds--;
2134 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2135 "transport returned %x\n", status);
2136 if (status == TRAN_BUSY) {
2137 DCD_DO_ERRSTATS(un, dcd_transerrs);
2138 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2139 dcd_handle_tran_busy(bp, dp, un);
2140 if (un->un_ncmds > 0) {
2141 bp->b_resid = sort_key;
2143 } else {
2144 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2145 mutex_exit(DCD_MUTEX);
2147 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2148 "transport rejected (%d)\n",
2149 status);
2150 SET_BP_ERROR(bp, EIO);
2151 bp->b_resid = bp->b_bcount;
2152 if (bp != un->un_sbufp) {
2153 dcd_destroy_pkt(BP_PKT(bp));
2155 biodone(bp);
2157 mutex_enter(DCD_MUTEX);
2158 if (un->un_state == DCD_STATE_SUSPENDED) {
2159 cv_broadcast(&un->un_disk_busy_cv);
2161 if ((un->un_ncmds < un->un_throttle) &&
2162 (dp->b_forw == NULL)) {
2163 goto retry;
2166 } else {
2167 mutex_enter(DCD_MUTEX);
2169 if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) {
2170 struct buf *cmd_bp;
2172 cmd_bp = dp->b_actf;
2173 cmd_bp->av_back = ALLOCATING_PKT;
2174 mutex_exit(DCD_MUTEX);
2176 * try and map this one
2178 TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START,
2179 "dcdstart_small_window_start");
2181 make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2183 TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END,
2184 "dcdstart_small_window_end");
2186 * there is a small window where the active cmd
2187 * completes before make_dcd_cmd returns.
2188 * consequently, this cmd never gets started so
2189 * we start it from here
2191 mutex_enter(DCD_MUTEX);
2192 if ((un->un_ncmds < un->un_throttle) &&
2193 (dp->b_forw == NULL)) {
2194 goto retry;
2199 done:
2200 ASSERT(mutex_owned(DCD_MUTEX));
2201 TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end");
2205 * make_dcd_cmd: create a pkt
2207 static void
2208 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)())
2210 auto int count, com, direction;
2211 struct dcd_pkt *pkt;
2212 int flags, tval;
2214 _NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp))
2215 TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START,
2216 "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un);
2219 flags = un->un_cmd_flags;
2221 if (bp != un->un_sbufp) {
2222 int partition = DCDPART(bp->b_edev);
2223 diskaddr_t p_lblksrt;
2224 diskaddr_t lblocks;
2225 long secnt;
2226 uint32_t blkno;
2227 int dkl_nblk, delta;
2228 long resid;
2230 if (cmlb_partinfo(un->un_dklbhandle,
2231 partition,
2232 &lblocks,
2233 &p_lblksrt,
2234 NULL,
2235 NULL,
2236 0) != NULL) {
2237 lblocks = 0;
2238 p_lblksrt = 0;
2241 dkl_nblk = (int)lblocks;
2244 * Make sure we don't run off the end of a partition.
2246 * Put this test here so that we can adjust b_count
2247 * to accurately reflect the actual amount we are
2248 * goint to transfer.
2252 * First, compute partition-relative block number
2254 blkno = dkblock(bp);
2255 secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv;
2256 count = MIN(secnt, dkl_nblk - blkno);
2257 if (count != secnt) {
2259 * We have an overrun
2261 resid = (secnt - count) << un->un_secdiv;
2262 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2263 "overrun by %ld sectors\n",
2264 secnt - count);
2265 bp->b_bcount -= resid;
2266 } else {
2267 resid = 0;
2271 * Adjust block number to absolute
2273 delta = (int)p_lblksrt;
2274 blkno += delta;
2276 mutex_enter(DCD_MUTEX);
2278 * This is for devices having block size different from
2279 * from DEV_BSIZE (e.g. 2K CDROMs).
2281 if (un->un_lbasize != un->un_secsize) {
2282 blkno >>= un->un_blknoshift;
2283 count >>= un->un_blknoshift;
2285 mutex_exit(DCD_MUTEX);
2287 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START,
2288 "make_dcd_cmd_init_pkt_call (begin)");
2289 pkt = dcd_init_pkt(ROUTE, NULL, bp,
2290 (uint32_t)sizeof (struct dcd_cmd),
2291 un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT,
2292 func, (caddr_t)un);
2293 TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END,
2294 "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt);
2295 if (!pkt) {
2296 bp->b_bcount += resid;
2297 bp->av_back = NO_PKT_ALLOCATED;
2298 TRACE_0(TR_FAC_DADA,
2299 TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END,
2300 "make_dcd_cmd_end (NO_PKT_ALLOCATED1)");
2301 return;
2303 if (bp->b_flags & B_READ) {
2304 if ((un->un_dp->options & DMA_SUPPORTTED) ==
2305 DMA_SUPPORTTED) {
2306 com = ATA_READ_DMA;
2307 } else {
2308 if (un->un_dp->options & BLOCK_MODE)
2309 com = ATA_READ_MULTIPLE;
2310 else
2311 com = ATA_READ;
2313 direction = DATA_READ;
2314 } else {
2315 if ((un->un_dp->options & DMA_SUPPORTTED) ==
2316 DMA_SUPPORTTED) {
2317 com = ATA_WRITE_DMA;
2318 } else {
2319 if (un->un_dp->options & BLOCK_MODE)
2320 com = ATA_WRITE_MULTIPLE;
2321 else
2322 com = ATA_WRITE;
2324 direction = DATA_WRITE;
2328 * Save the resid in the packet, temporarily until
2329 * we transport the command.
2331 pkt->pkt_resid = resid;
2333 makecommand(pkt, flags, com, blkno, ADD_LBA_MODE,
2334 bp->b_bcount, direction, 0);
2335 tval = dcd_io_time;
2336 } else {
2338 struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw;
2341 * set options
2343 if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) {
2344 flags |= FLAG_SILENT;
2346 if (scmd->udcd_flags & UDCD_DIAGNOSE)
2347 flags |= FLAG_DIAGNOSE;
2349 if (scmd->udcd_flags & UDCD_NOINTR)
2350 flags |= FLAG_NOINTR;
2352 pkt = dcd_init_pkt(ROUTE, NULL,
2353 (bp->b_bcount)? bp: NULL,
2354 (uint32_t)sizeof (struct dcd_cmd),
2355 2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un);
2357 if (!pkt) {
2358 bp->av_back = NO_PKT_ALLOCATED;
2359 return;
2362 makecommand(pkt, 0, scmd->udcd_cmd->cmd,
2363 scmd->udcd_cmd->sector_num.lba_num,
2364 scmd->udcd_cmd->address_mode,
2365 scmd->udcd_cmd->size,
2366 scmd->udcd_cmd->direction, scmd->udcd_cmd->features);
2368 pkt->pkt_flags = flags;
2369 if (scmd->udcd_timeout == 0)
2370 tval = dcd_io_time;
2371 else
2372 tval = scmd->udcd_timeout;
2373 /* UDAD interface should be decided. */
2374 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2375 "udcd interface\n");
2378 pkt->pkt_comp = dcdintr;
2379 pkt->pkt_time = tval;
2380 PKT_SET_BP(pkt, bp);
2381 bp->av_back = (struct buf *)pkt;
2383 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end");
2387 * Command completion processing
2389 static void
2390 dcdintr(struct dcd_pkt *pkt)
2392 struct dcd_disk *un;
2393 struct buf *bp;
2394 int action;
2395 int status;
2397 bp = PKT_GET_BP(pkt);
2398 un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev));
2400 TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un);
2401 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n");
2403 mutex_enter(DCD_MUTEX);
2404 un->un_ncmds--;
2405 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2406 ASSERT(un->un_ncmds >= 0);
2408 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2409 "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt));
2412 * do most common case first
2414 if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) {
2415 int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp);
2417 if (un->un_state == DCD_STATE_OFFLINE) {
2418 un->un_state = un->un_last_state;
2419 dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE,
2420 (const char *) diskokay);
2423 * If the command is a read or a write, and we have
2424 * a non-zero pkt_resid, that is an error. We should
2425 * attempt to retry the operation if possible.
2427 action = COMMAND_DONE;
2428 if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) {
2429 DCD_DO_ERRSTATS(un, dcd_harderrs);
2430 if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) {
2431 PKT_INCR_RETRY_CNT(pkt, 1);
2432 action = QUE_COMMAND;
2433 } else {
2435 * if we have exhausted retries
2436 * a command with a residual is in error in
2437 * this case.
2439 action = COMMAND_DONE_ERROR;
2441 dcd_log(DCD_DEVINFO, dcd_label,
2442 CE_WARN, "incomplete %s- %s\n",
2443 (bp->b_flags & B_READ)? "read" : "write",
2444 (action == QUE_COMMAND)? "retrying" :
2445 "giving up");
2449 * pkt_resid will reflect, at this point, a residual
2450 * of how many bytes left to be transferred there were
2451 * from the actual scsi command. Add this to b_resid i.e
2452 * the amount this driver could not see to transfer,
2453 * to get the total number of bytes not transfered.
2455 if (action != QUE_COMMAND) {
2456 bp->b_resid += pkt->pkt_resid;
2459 } else if (pkt->pkt_reason != CMD_CMPLT) {
2460 action = dcd_handle_incomplete(un, bp);
2464 * If we are in the middle of syncing or dumping, we have got
2465 * here because dcd_transport has called us explictly after
2466 * completing the command in a polled mode. We don't want to
2467 * have a recursive call into dcd_transport again.
2469 if (ddi_in_panic() && (action == QUE_COMMAND)) {
2470 action = COMMAND_DONE_ERROR;
2474 * save pkt reason; consecutive failures are not reported unless
2475 * fatal
2476 * do not reset last_pkt_reason when the cmd was retried and
2477 * succeeded because
2478 * there maybe more commands comming back with last_pkt_reason
2480 if ((un->un_last_pkt_reason != pkt->pkt_reason) &&
2481 ((pkt->pkt_reason != CMD_CMPLT) ||
2482 (PKT_GET_RETRY_CNT(pkt) == 0))) {
2483 un->un_last_pkt_reason = pkt->pkt_reason;
2486 switch (action) {
2487 case COMMAND_DONE_ERROR:
2488 error:
2489 if (bp->b_resid == 0) {
2490 bp->b_resid = bp->b_bcount;
2492 if (bp->b_error == 0) {
2493 struct dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp;
2494 if (cdbp->cmd == ATA_FLUSH_CACHE &&
2495 (pkt->pkt_scbp[0] & STATUS_ATA_ERR) &&
2496 (pkt->pkt_scbp[1] & ERR_ABORT)) {
2497 SET_BP_ERROR(bp, ENOTSUP);
2498 un->un_flush_not_supported = 1;
2499 } else {
2500 SET_BP_ERROR(bp, EIO);
2503 bp->b_flags |= B_ERROR;
2504 /*FALLTHROUGH*/
2505 case COMMAND_DONE:
2506 dcddone_and_mutex_exit(un, bp);
2508 TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END,
2509 "dcdintr_end (COMMAND_DONE)");
2510 return;
2512 case QUE_COMMAND:
2513 if (un->un_ncmds >= un->un_throttle) {
2514 struct diskhd *dp = &un->un_utab;
2516 bp->b_actf = dp->b_actf;
2517 dp->b_actf = bp;
2519 DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2521 mutex_exit(DCD_MUTEX);
2522 goto exit;
2525 un->un_ncmds++;
2526 /* reset the pkt reason again */
2527 pkt->pkt_reason = 0;
2528 DCD_DO_KSTATS(un, kstat_runq_enter, bp);
2529 mutex_exit(DCD_MUTEX);
2530 if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) {
2531 struct diskhd *dp = &un->un_utab;
2533 mutex_enter(DCD_MUTEX);
2534 un->un_ncmds--;
2535 if (status == TRAN_BUSY) {
2536 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2537 dcd_handle_tran_busy(bp, dp, un);
2538 mutex_exit(DCD_MUTEX);
2539 goto exit;
2541 DCD_DO_ERRSTATS(un, dcd_transerrs);
2542 DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2544 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2545 "requeue of command fails (%x)\n", status);
2546 SET_BP_ERROR(bp, EIO);
2547 bp->b_resid = bp->b_bcount;
2549 dcddone_and_mutex_exit(un, bp);
2550 goto exit;
2552 break;
2554 case JUST_RETURN:
2555 default:
2556 DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2557 mutex_exit(DCD_MUTEX);
2558 break;
2561 exit:
2562 TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end");
2567 * Done with a command.
2569 static void
2570 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp)
2572 struct diskhd *dp;
2574 TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un);
2576 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex));
2578 dp = &un->un_utab;
2579 if (bp == dp->b_forw) {
2580 dp->b_forw = NULL;
2583 if (un->un_stats) {
2584 ulong_t n_done = bp->b_bcount - bp->b_resid;
2585 if (bp->b_flags & B_READ) {
2586 IOSP->reads++;
2587 IOSP->nread += n_done;
2588 } else {
2589 IOSP->writes++;
2590 IOSP->nwritten += n_done;
2593 if (IO_PARTITION_STATS) {
2594 ulong_t n_done = bp->b_bcount - bp->b_resid;
2595 if (bp->b_flags & B_READ) {
2596 IOSP_PARTITION->reads++;
2597 IOSP_PARTITION->nread += n_done;
2598 } else {
2599 IOSP_PARTITION->writes++;
2600 IOSP_PARTITION->nwritten += n_done;
2605 * Start the next one before releasing resources on this one
2607 if (un->un_state == DCD_STATE_SUSPENDED) {
2608 cv_broadcast(&un->un_disk_busy_cv);
2609 } else if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
2610 (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) {
2611 dcdstart(un);
2614 mutex_exit(DCD_MUTEX);
2616 if (bp != un->un_sbufp) {
2617 dcd_destroy_pkt(BP_PKT(bp));
2618 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2619 "regular done: resid %ld\n", bp->b_resid);
2620 } else {
2621 ASSERT(un->un_sbuf_busy);
2623 TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call");
2625 biodone(bp);
2627 (void) pm_idle_component(DCD_DEVINFO, 0);
2629 TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end");
2634 * reset the disk unless the transport layer has already
2635 * cleared the problem
2637 #define C1 (STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED)
2638 static void
2639 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt)
2642 if ((pkt->pkt_statistics & C1) == 0) {
2643 mutex_exit(DCD_MUTEX);
2644 if (!dcd_reset(ROUTE, RESET_ALL)) {
2645 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2646 "Reset failed");
2648 mutex_enter(DCD_MUTEX);
2652 static int
2653 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp)
2655 static char *fail = "ATA transport failed: reason '%s': %s\n";
2656 static char *notresp = "disk not responding to selection\n";
2657 int rval = COMMAND_DONE_ERROR;
2658 int action = COMMAND_SOFT_ERROR;
2659 struct dcd_pkt *pkt = BP_PKT(bp);
2660 int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) &&
2661 (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT));
2663 ASSERT(mutex_owned(DCD_MUTEX));
2665 switch (pkt->pkt_reason) {
2667 case CMD_TIMEOUT:
2669 * This Indicates the already the HBA would have reset
2670 * so Just indicate to retry the command
2672 break;
2674 case CMD_INCOMPLETE:
2675 action = dcd_check_error(un, bp);
2676 DCD_DO_ERRSTATS(un, dcd_transerrs);
2677 if (action == COMMAND_HARD_ERROR) {
2678 (void) dcd_reset_disk(un, pkt);
2680 break;
2682 case CMD_FATAL:
2684 * Something drastic has gone wrong
2686 break;
2687 case CMD_DMA_DERR:
2688 case CMD_DATA_OVR:
2689 /* FALLTHROUGH */
2691 default:
2693 * the target may still be running the command,
2694 * so we should try and reset that target.
2696 DCD_DO_ERRSTATS(un, dcd_transerrs);
2697 if ((pkt->pkt_reason != CMD_RESET) &&
2698 (pkt->pkt_reason != CMD_ABORTED)) {
2699 (void) dcd_reset_disk(un, pkt);
2701 break;
2705 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got
2706 * reset/aborted because another disk on this bus caused it.
2707 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics
2708 * of STAT_TIMEOUT/STAT_DEV_RESET
2710 if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) {
2711 /* To be written : XXX */
2712 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2713 "Command aborted\n");
2716 if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) {
2717 rval = COMMAND_DONE_ERROR;
2718 } else {
2719 if ((rval == COMMAND_DONE_ERROR) &&
2720 (action == COMMAND_SOFT_ERROR) &&
2721 ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) {
2722 PKT_INCR_RETRY_CNT(pkt, 1);
2723 rval = QUE_COMMAND;
2727 if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) {
2729 * Looks like someone turned off this shoebox.
2731 if (un->un_state != DCD_STATE_OFFLINE) {
2732 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2733 (const char *) notresp);
2734 New_state(un, DCD_STATE_OFFLINE);
2736 } else if (pkt->pkt_reason == CMD_FATAL) {
2738 * Suppressing the following message for the time being
2739 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2740 * (const char *) notresp);
2742 PKT_INCR_RETRY_CNT(pkt, 6);
2743 rval = COMMAND_DONE_ERROR;
2744 New_state(un, DCD_STATE_FATAL);
2745 } else if (be_chatty) {
2746 int in_panic = ddi_in_panic();
2747 if (!in_panic || (rval == COMMAND_DONE_ERROR)) {
2748 if (((pkt->pkt_reason != un->un_last_pkt_reason) &&
2749 (pkt->pkt_reason != CMD_RESET)) ||
2750 (rval == COMMAND_DONE_ERROR) ||
2751 (dcd_error_level == DCD_ERR_ALL)) {
2752 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2753 fail, dcd_rname(pkt->pkt_reason),
2754 (rval == COMMAND_DONE_ERROR) ?
2755 "giving up": "retrying command");
2756 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2757 "retrycount=%x\n",
2758 PKT_GET_RETRY_CNT(pkt));
2762 error:
2763 return (rval);
2766 static int
2767 dcd_check_error(struct dcd_disk *un, struct buf *bp)
2769 struct diskhd *dp = &un->un_utab;
2770 struct dcd_pkt *pkt = BP_PKT(bp);
2771 int rval = 0;
2772 unsigned char status;
2773 unsigned char error;
2775 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start");
2776 ASSERT(mutex_owned(DCD_MUTEX));
2778 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2779 "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp);
2782 * Here we need to check status first and then if error is indicated
2783 * Then the error register.
2786 status = (pkt->pkt_scbp)[0];
2787 if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) {
2789 * There has been a Device Fault - reason for such error
2790 * is vendor specific
2791 * Action to be taken is - Indicate error and reset device.
2794 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n");
2795 rval = COMMAND_HARD_ERROR;
2796 } else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) {
2799 * The sector read or written is marginal and hence ECC
2800 * Correction has been applied. Indicate to repair
2801 * Here we need to probably re-assign based on the badblock
2802 * mapping.
2805 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2806 "Soft Error on block %x\n",
2807 ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2808 rval = COMMAND_SOFT_ERROR;
2809 } else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) {
2810 error = pkt->pkt_scbp[1];
2812 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2813 "Command:0x%x,Error:0x%x,Status:0x%x\n",
2814 GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp),
2815 error, status);
2816 if ((error & ERR_AMNF) == ERR_AMNF) {
2817 /* Address make not found */
2818 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2819 "Address Mark Not Found");
2820 } else if ((error & ERR_TKONF) == ERR_TKONF) {
2821 /* Track 0 Not found */
2822 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2823 "Track 0 Not found \n");
2824 } else if ((error & ERR_IDNF) == ERR_IDNF) {
2825 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2826 " ID not found \n");
2827 } else if ((error & ERR_UNC) == ERR_UNC) {
2828 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2829 "Uncorrectable data Error: Block %x\n",
2830 ((struct dcd_cmd *)pkt->pkt_cdbp)->
2831 sector_num.lba_num);
2832 } else if ((error & ERR_BBK) == ERR_BBK) {
2833 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2834 "Bad block detected: Block %x\n",
2835 ((struct dcd_cmd *)pkt->pkt_cdbp)->
2836 sector_num.lba_num);
2837 } else if ((error & ERR_ABORT) == ERR_ABORT) {
2838 /* Aborted Command */
2839 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2840 " Aborted Command \n");
2843 * Return the soft error so that the command
2844 * will be retried.
2846 rval = COMMAND_SOFT_ERROR;
2849 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end");
2850 return (rval);
2855 * System Crash Dump routine
2858 #define NDUMP_RETRIES 5
2860 static int
2861 dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
2863 struct dcd_pkt *pkt;
2864 int i;
2865 struct buf local, *bp;
2866 int err;
2867 unsigned char com;
2868 diskaddr_t p_lblksrt;
2869 diskaddr_t lblocks;
2871 GET_SOFT_STATE(dev);
2873 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
2875 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)
2876 return (ENXIO);
2878 if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev),
2879 &lblocks, &p_lblksrt, NULL, NULL, 0))
2880 return (ENXIO);
2882 if (blkno+nblk > lblocks) {
2883 return (EINVAL);
2887 if ((un->un_state == DCD_STATE_SUSPENDED) ||
2888 (un->un_state == DCD_STATE_PM_SUSPENDED)) {
2889 if (pm_raise_power(DCD_DEVINFO, 0,
2890 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
2891 return (EIO);
2896 * When cpr calls dcddump, we know that dad is in a
2897 * a good state, so no bus reset is required
2899 un->un_throttle = 0;
2901 if ((un->un_state != DCD_STATE_SUSPENDED) &&
2902 (un->un_state != DCD_STATE_DUMPING)) {
2904 New_state(un, DCD_STATE_DUMPING);
2907 * Reset the bus. I'd like to not have to do this,
2908 * but this is the safest thing to do...
2911 if (dcd_reset(ROUTE, RESET_ALL) == 0) {
2912 return (EIO);
2917 blkno += p_lblksrt;
2920 * It should be safe to call the allocator here without
2921 * worrying about being locked for DVMA mapping because
2922 * the address we're passed is already a DVMA mapping
2924 * We are also not going to worry about semaphore ownership
2925 * in the dump buffer. Dumping is single threaded at present.
2928 bp = &local;
2929 bzero((caddr_t)bp, sizeof (*bp));
2930 bp->b_flags = B_BUSY;
2931 bp->b_un.b_addr = addr;
2932 bp->b_bcount = nblk << DEV_BSHIFT;
2933 bp->b_resid = 0;
2935 for (i = 0; i < NDUMP_RETRIES; i++) {
2936 bp->b_flags &= ~B_ERROR;
2937 if ((pkt = dcd_init_pkt(ROUTE, NULL, bp,
2938 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
2939 PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) {
2940 break;
2942 if (i == 0) {
2943 if (bp->b_flags & B_ERROR) {
2944 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2945 "no resources for dumping; "
2946 "error code: 0x%x, retrying",
2947 geterror(bp));
2948 } else {
2949 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2950 "no resources for dumping; retrying");
2952 } else if (i != (NDUMP_RETRIES - 1)) {
2953 if (bp->b_flags & B_ERROR) {
2954 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no "
2955 "resources for dumping; error code: 0x%x, "
2956 "retrying\n", geterror(bp));
2958 } else {
2959 if (bp->b_flags & B_ERROR) {
2960 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2961 "no resources for dumping; "
2962 "error code: 0x%x, retries failed, "
2963 "giving up.\n", geterror(bp));
2964 } else {
2965 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2966 "no resources for dumping; "
2967 "retries failed, giving up.\n");
2969 return (EIO);
2971 delay(10);
2973 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
2974 com = ATA_WRITE_DMA;
2975 } else {
2976 if (un->un_dp->options & BLOCK_MODE)
2977 com = ATA_WRITE_MULTIPLE;
2978 else
2979 com = ATA_WRITE;
2982 makecommand(pkt, 0, com, blkno, ADD_LBA_MODE,
2983 (int)nblk*un->un_secsize, DATA_WRITE, 0);
2985 for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) {
2987 if (dcd_poll(pkt) == 0) {
2988 switch (SCBP_C(pkt)) {
2989 case STATUS_GOOD:
2990 if (pkt->pkt_resid == 0) {
2991 err = 0;
2993 break;
2994 case STATUS_ATA_BUSY:
2995 (void) dcd_reset(ROUTE, RESET_TARGET);
2996 break;
2997 default:
2998 mutex_enter(DCD_MUTEX);
2999 (void) dcd_reset_disk(un, pkt);
3000 mutex_exit(DCD_MUTEX);
3001 break;
3003 } else if (i > NDUMP_RETRIES/2) {
3004 (void) dcd_reset(ROUTE, RESET_ALL);
3008 dcd_destroy_pkt(pkt);
3009 return (err);
3013 * This routine implements the ioctl calls. It is called
3014 * from the device switch at normal priority.
3016 /* ARGSUSED3 */
3017 static int
3018 dcdioctl(dev_t dev, int cmd, intptr_t arg, int flag,
3019 cred_t *cred_p, int *rval_p)
3021 auto int32_t data[512 / (sizeof (int32_t))];
3022 struct dk_cinfo *info;
3023 struct dk_minfo media_info;
3024 struct udcd_cmd *scmd;
3025 int i, err;
3026 enum uio_seg uioseg = 0;
3027 enum dkio_state state = 0;
3028 #ifdef _MULTI_DATAMODEL
3029 struct dadkio_rwcmd rwcmd;
3030 #endif
3031 struct dadkio_rwcmd32 rwcmd32;
3032 struct dcd_cmd dcdcmd;
3034 GET_SOFT_STATE(dev);
3036 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3037 "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg);
3039 bzero((caddr_t)data, sizeof (data));
3041 switch (cmd) {
3043 #ifdef DCDDEBUG
3045 * Following ioctl are for testing RESET/ABORTS
3047 #define DKIOCRESET (DKIOC|14)
3048 #define DKIOCABORT (DKIOC|15)
3050 case DKIOCRESET:
3051 if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag))
3052 return (EFAULT);
3053 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3054 "DKIOCRESET: data = 0x%x\n", data[0]);
3055 if (dcd_reset(ROUTE, data[0])) {
3056 return (0);
3057 } else {
3058 return (EIO);
3060 case DKIOCABORT:
3061 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3062 "DKIOCABORT:\n");
3063 if (dcd_abort(ROUTE, NULL)) {
3064 return (0);
3065 } else {
3066 return (EIO);
3068 #endif
3070 case DKIOCINFO:
3072 * Controller Information
3074 info = (struct dk_cinfo *)data;
3076 mutex_enter(DCD_MUTEX);
3077 switch (un->un_dp->ctype) {
3078 default:
3079 info->dki_ctype = DKC_DIRECT;
3080 break;
3082 mutex_exit(DCD_MUTEX);
3083 info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO));
3084 (void) strcpy(info->dki_cname,
3085 ddi_get_name(ddi_get_parent(DCD_DEVINFO)));
3087 * Unit Information
3089 info->dki_unit = ddi_get_instance(DCD_DEVINFO);
3090 info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3);
3091 (void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO));
3092 info->dki_flags = DKI_FMTVOL;
3093 info->dki_partition = DCDPART(dev);
3096 * Max Transfer size of this device in blocks
3098 info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE;
3101 * We can't get from here to there yet
3103 info->dki_addr = 0;
3104 info->dki_space = 0;
3105 info->dki_prio = 0;
3106 info->dki_vec = 0;
3108 i = sizeof (struct dk_cinfo);
3109 if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag))
3110 return (EFAULT);
3111 else
3112 return (0);
3114 case DKIOCGMEDIAINFO:
3116 * As dad target driver is used for IDE disks only
3117 * Can keep the return value hardcoded to FIXED_DISK
3119 media_info.dki_media_type = DK_FIXED_DISK;
3121 mutex_enter(DCD_MUTEX);
3122 media_info.dki_lbsize = un->un_lbasize;
3123 media_info.dki_capacity = un->un_diskcapacity;
3124 mutex_exit(DCD_MUTEX);
3126 if (ddi_copyout(&media_info, (caddr_t)arg,
3127 sizeof (struct dk_minfo), flag))
3128 return (EFAULT);
3129 else
3130 return (0);
3132 case DKIOCGGEOM:
3133 case DKIOCGVTOC:
3134 case DKIOCGETEFI:
3136 mutex_enter(DCD_MUTEX);
3137 if (un->un_ncmds == 0) {
3138 if ((err = dcd_unit_ready(dev)) != 0) {
3139 mutex_exit(DCD_MUTEX);
3140 return (err);
3144 mutex_exit(DCD_MUTEX);
3145 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3146 arg, flag, cred_p, rval_p, 0);
3147 return (err);
3149 case DKIOCGAPART:
3150 case DKIOCSAPART:
3151 case DKIOCSGEOM:
3152 case DKIOCSVTOC:
3153 case DKIOCSETEFI:
3154 case DKIOCPARTITION:
3155 case DKIOCPARTINFO:
3156 case DKIOCGMBOOT:
3157 case DKIOCSMBOOT:
3159 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3160 arg, flag, cred_p, rval_p, 0);
3161 return (err);
3163 case DIOCTL_RWCMD:
3164 if (drv_priv(cred_p) != 0) {
3165 return (EPERM);
3168 #ifdef _MULTI_DATAMODEL
3169 switch (ddi_model_convert_from(flag & FMODELS)) {
3170 case DDI_MODEL_NONE:
3171 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd,
3172 sizeof (struct dadkio_rwcmd), flag)) {
3173 return (EFAULT);
3175 rwcmd32.cmd = rwcmd.cmd;
3176 rwcmd32.flags = rwcmd.flags;
3177 rwcmd32.blkaddr = rwcmd.blkaddr;
3178 rwcmd32.buflen = rwcmd.buflen;
3179 rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr;
3180 break;
3181 case DDI_MODEL_ILP32:
3182 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3183 sizeof (struct dadkio_rwcmd32), flag)) {
3184 return (EFAULT);
3186 break;
3188 #else
3189 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3190 sizeof (struct dadkio_rwcmd32), flag)) {
3191 return (EFAULT);
3193 #endif
3194 mutex_enter(DCD_MUTEX);
3196 uioseg = UIO_SYSSPACE;
3197 scmd = (struct udcd_cmd *)data;
3198 scmd->udcd_cmd = &dcdcmd;
3200 * Convert the dadkio_rwcmd structure to udcd_cmd so that
3201 * it can take the normal path to get the io done
3203 if (rwcmd32.cmd == DADKIO_RWCMD_READ) {
3204 if ((un->un_dp->options & DMA_SUPPORTTED) ==
3205 DMA_SUPPORTTED)
3206 scmd->udcd_cmd->cmd = ATA_READ_DMA;
3207 else
3208 scmd->udcd_cmd->cmd = ATA_READ;
3209 scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3210 scmd->udcd_cmd->direction = DATA_READ;
3211 scmd->udcd_flags |= UDCD_READ|UDCD_SILENT;
3212 } else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) {
3213 if ((un->un_dp->options & DMA_SUPPORTTED) ==
3214 DMA_SUPPORTTED)
3215 scmd->udcd_cmd->cmd = ATA_WRITE_DMA;
3216 else
3217 scmd->udcd_cmd->cmd = ATA_WRITE;
3218 scmd->udcd_cmd->direction = DATA_WRITE;
3219 scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT;
3220 } else {
3221 mutex_exit(DCD_MUTEX);
3222 return (EINVAL);
3225 scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3226 scmd->udcd_cmd->features = 0;
3227 scmd->udcd_cmd->size = rwcmd32.buflen;
3228 scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr;
3229 scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr;
3230 scmd->udcd_buflen = rwcmd32.buflen;
3231 scmd->udcd_timeout = (ushort_t)dcd_io_time;
3232 scmd->udcd_resid = 0ULL;
3233 scmd->udcd_status = 0;
3234 scmd->udcd_error_reg = 0;
3235 scmd->udcd_status_reg = 0;
3237 mutex_exit(DCD_MUTEX);
3239 i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE);
3240 mutex_enter(DCD_MUTEX);
3242 * After return convert the status from scmd to
3243 * dadkio_status
3245 (void) dcd_translate(&(rwcmd32.status), scmd);
3246 rwcmd32.status.resid = scmd->udcd_resid;
3247 mutex_exit(DCD_MUTEX);
3249 #ifdef _MULTI_DATAMODEL
3250 switch (ddi_model_convert_from(flag & FMODELS)) {
3251 case DDI_MODEL_NONE: {
3252 int counter;
3253 rwcmd.status.status = rwcmd32.status.status;
3254 rwcmd.status.resid = rwcmd32.status.resid;
3255 rwcmd.status.failed_blk_is_valid =
3256 rwcmd32.status.failed_blk_is_valid;
3257 rwcmd.status.failed_blk = rwcmd32.status.failed_blk;
3258 rwcmd.status.fru_code_is_valid =
3259 rwcmd32.status.fru_code_is_valid;
3260 rwcmd.status.fru_code = rwcmd32.status.fru_code;
3261 for (counter = 0;
3262 counter < DADKIO_ERROR_INFO_LEN; counter++)
3263 rwcmd.status.add_error_info[counter] =
3264 rwcmd32.status.add_error_info[counter];
3266 /* Copy out the result back to the user program */
3267 if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg,
3268 sizeof (struct dadkio_rwcmd), flag)) {
3269 if (i != 0) {
3270 i = EFAULT;
3273 break;
3274 case DDI_MODEL_ILP32:
3275 /* Copy out the result back to the user program */
3276 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3277 sizeof (struct dadkio_rwcmd32), flag)) {
3278 if (i != 0) {
3279 i = EFAULT;
3282 break;
3284 #else
3285 /* Copy out the result back to the user program */
3286 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3287 sizeof (struct dadkio_rwcmd32), flag)) {
3288 if (i != 0)
3289 i = EFAULT;
3291 #endif
3292 return (i);
3294 case UDCDCMD: {
3295 #ifdef _MULTI_DATAMODEL
3297 * For use when a 32 bit app makes a call into a
3298 * 64 bit ioctl
3300 struct udcd_cmd32 udcd_cmd_32_for_64;
3301 struct udcd_cmd32 *ucmd32 = &udcd_cmd_32_for_64;
3302 model_t model;
3303 #endif /* _MULTI_DATAMODEL */
3305 if (drv_priv(cred_p) != 0) {
3306 return (EPERM);
3309 scmd = (struct udcd_cmd *)data;
3311 #ifdef _MULTI_DATAMODEL
3312 switch (model = ddi_model_convert_from(flag & FMODELS)) {
3313 case DDI_MODEL_ILP32:
3314 if (ddi_copyin((caddr_t)arg, ucmd32,
3315 sizeof (struct udcd_cmd32), flag)) {
3316 return (EFAULT);
3319 * Convert the ILP32 uscsi data from the
3320 * application to LP64 for internal use.
3322 udcd_cmd32toudcd_cmd(ucmd32, scmd);
3323 break;
3324 case DDI_MODEL_NONE:
3325 if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd),
3326 flag)) {
3327 return (EFAULT);
3329 break;
3331 #else /* ! _MULTI_DATAMODEL */
3332 if (ddi_copyin((caddr_t)arg, (caddr_t)scmd,
3333 sizeof (*scmd), flag)) {
3334 return (EFAULT);
3336 #endif /* ! _MULTI_DATAMODEL */
3338 scmd->udcd_flags &= ~UDCD_NOINTR;
3339 uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE;
3341 i = dcdioctl_cmd(dev, scmd, uioseg, uioseg);
3342 #ifdef _MULTI_DATAMODEL
3343 switch (model) {
3344 case DDI_MODEL_ILP32:
3346 * Convert back to ILP32 before copyout to the
3347 * application
3349 udcd_cmdtoudcd_cmd32(scmd, ucmd32);
3350 if (ddi_copyout(ucmd32, (caddr_t)arg,
3351 sizeof (*ucmd32), flag)) {
3352 if (i != 0)
3353 i = EFAULT;
3355 break;
3356 case DDI_MODEL_NONE:
3357 if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd),
3358 flag)) {
3359 if (i != 0)
3360 i = EFAULT;
3362 break;
3364 #else /* ! _MULTI_DATAMODE */
3365 if (ddi_copyout((caddr_t)scmd, (caddr_t)arg,
3366 sizeof (*scmd), flag)) {
3367 if (i != 0)
3368 i = EFAULT;
3370 #endif
3371 return (i);
3373 case DKIOCFLUSHWRITECACHE: {
3374 struct dk_callback *dkc = (struct dk_callback *)arg;
3375 struct dcd_pkt *pkt;
3376 struct buf *bp;
3377 int is_sync = 1;
3379 mutex_enter(DCD_MUTEX);
3380 if (un->un_flush_not_supported ||
3381 ! un->un_write_cache_enabled) {
3382 i = un->un_flush_not_supported ? ENOTSUP : 0;
3383 mutex_exit(DCD_MUTEX);
3385 * If a callback was requested: a callback will
3386 * always be done if the caller saw the
3387 * DKIOCFLUSHWRITECACHE ioctl return 0, and
3388 * never done if the caller saw the ioctl return
3389 * an error.
3391 if ((flag & FKIOCTL) && dkc != NULL &&
3392 dkc->dkc_callback != NULL) {
3393 (*dkc->dkc_callback)(dkc->dkc_cookie, i);
3395 * Did callback and reported error.
3396 * Since we did a callback, ioctl
3397 * should return 0.
3399 i = 0;
3401 return (i);
3405 * Get the special buffer
3407 while (un->un_sbuf_busy) {
3408 cv_wait(&un->un_sbuf_cv, DCD_MUTEX);
3410 un->un_sbuf_busy = 1;
3411 bp = un->un_sbufp;
3412 mutex_exit(DCD_MUTEX);
3414 pkt = dcd_init_pkt(ROUTE, NULL,
3415 NULL, (uint32_t)sizeof (struct dcd_cmd),
3416 2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un);
3417 ASSERT(pkt != NULL);
3419 makecommand(pkt, un->un_cmd_flags | FLAG_SILENT,
3420 ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0);
3422 pkt->pkt_comp = dcdintr;
3423 pkt->pkt_time = DCD_FLUSH_TIME;
3424 PKT_SET_BP(pkt, bp);
3426 bp->av_back = (struct buf *)pkt;
3427 bp->b_forw = NULL;
3428 bp->b_flags = B_BUSY;
3429 bp->b_error = 0;
3430 bp->b_edev = dev;
3431 bp->b_dev = cmpdev(dev);
3432 bp->b_bcount = 0;
3433 bp->b_blkno = 0;
3434 bp->b_un.b_addr = 0;
3435 bp->b_iodone = NULL;
3436 bp->b_list = NULL;
3437 bp->b_private = NULL;
3439 if ((flag & FKIOCTL) && dkc != NULL &&
3440 dkc->dkc_callback != NULL) {
3441 struct dk_callback *dkc2 = (struct dk_callback *)
3442 kmem_zalloc(sizeof (*dkc2), KM_SLEEP);
3443 bcopy(dkc, dkc2, sizeof (*dkc2));
3445 bp->b_private = dkc2;
3446 bp->b_iodone = dcdflushdone;
3447 is_sync = 0;
3450 (void) dcdstrategy(bp);
3452 i = 0;
3453 if (is_sync) {
3454 i = biowait(bp);
3455 (void) dcdflushdone(bp);
3458 return (i);
3460 default:
3461 break;
3463 return (ENOTTY);
3467 static int
3468 dcdflushdone(struct buf *bp)
3470 struct dcd_disk *un = ddi_get_soft_state(dcd_state,
3471 DCDUNIT(bp->b_edev));
3472 struct dcd_pkt *pkt = BP_PKT(bp);
3473 struct dk_callback *dkc = bp->b_private;
3475 ASSERT(un != NULL);
3476 ASSERT(bp == un->un_sbufp);
3477 ASSERT(pkt != NULL);
3479 dcd_destroy_pkt(pkt);
3480 bp->av_back = NO_PKT_ALLOCATED;
3482 if (dkc != NULL) {
3483 ASSERT(bp->b_iodone != NULL);
3484 (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
3485 kmem_free(dkc, sizeof (*dkc));
3486 bp->b_iodone = NULL;
3487 bp->b_private = NULL;
3491 * Tell anybody who cares that the buffer is now free
3493 mutex_enter(DCD_MUTEX);
3494 un->un_sbuf_busy = 0;
3495 cv_signal(&un->un_sbuf_cv);
3496 mutex_exit(DCD_MUTEX);
3497 return (0);
3501 * dcdrunout:
3502 * the callback function for resource allocation
3504 * XXX it would be preferable that dcdrunout() scans the whole
3505 * list for possible candidates for dcdstart(); this avoids
3506 * that a bp at the head of the list whose request cannot be
3507 * satisfied is retried again and again
3509 /*ARGSUSED*/
3510 static int
3511 dcdrunout(caddr_t arg)
3513 int serviced;
3514 struct dcd_disk *un;
3515 struct diskhd *dp;
3517 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p",
3518 arg);
3519 serviced = 1;
3521 un = (struct dcd_disk *)arg;
3522 dp = &un->un_utab;
3525 * We now support passing a structure to the callback
3526 * routine.
3528 ASSERT(un != NULL);
3529 mutex_enter(DCD_MUTEX);
3530 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
3531 dcdstart(un);
3533 if (un->un_state == DCD_STATE_RWAIT) {
3534 serviced = 0;
3536 mutex_exit(DCD_MUTEX);
3537 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END,
3538 "dcdrunout_end: serviced %d", serviced);
3539 return (serviced);
3544 * This routine called to see whether unit is (still) there. Must not
3545 * be called when un->un_sbufp is in use, and must not be called with
3546 * an unattached disk. Soft state of disk is restored to what it was
3547 * upon entry- up to caller to set the correct state.
3549 * We enter with the disk mutex held.
3552 /* ARGSUSED0 */
3553 static int
3554 dcd_unit_ready(dev_t dev)
3556 auto struct udcd_cmd dcmd, *com = &dcmd;
3557 auto struct dcd_cmd cmdblk;
3558 int error;
3559 GET_SOFT_STATE(dev);
3562 * Now that we protect the special buffer with
3563 * a mutex, we could probably do a mutex_tryenter
3564 * on it here and return failure if it were held...
3567 error = 0;
3568 return (error);
3571 /* ARGSUSED0 */
3573 dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace,
3574 enum uio_seg dataspace)
3577 struct buf *bp;
3578 struct udcd_cmd *scmd;
3579 struct dcd_pkt *pkt;
3580 int err, rw;
3581 caddr_t cdb;
3582 int flags = 0;
3584 GET_SOFT_STATE(devp);
3588 * Is this a request to reset the bus?
3589 * if so, we need to do reseting.
3592 if (in->udcd_flags & UDCD_RESET) {
3593 int flag = RESET_TARGET;
3594 err = dcd_reset(ROUTE, flag) ? 0: EIO;
3595 return (err);
3598 scmd = in;
3601 /* Do some sanity checks */
3602 if (scmd->udcd_buflen <= 0) {
3603 if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) {
3604 return (EINVAL);
3605 } else {
3606 scmd->udcd_buflen = 0;
3610 /* Make a copy of the dcd_cmd passed */
3611 cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP);
3612 if (cdbspace == UIO_SYSSPACE) {
3613 flags |= FKIOCTL;
3616 if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd),
3617 flags)) {
3618 kmem_free(cdb, sizeof (struct dcd_cmd));
3619 return (EFAULT);
3621 scmd = kmem_alloc(sizeof (*scmd), KM_SLEEP);
3622 bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd));
3623 scmd->udcd_cmd = (struct dcd_cmd *)cdb;
3624 rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE;
3628 * Get the special buffer
3631 mutex_enter(DCD_MUTEX);
3632 while (un->un_sbuf_busy) {
3633 if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) {
3634 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3635 kmem_free((caddr_t)scmd, sizeof (*scmd));
3636 mutex_exit(DCD_MUTEX);
3637 return (EINTR);
3641 un->un_sbuf_busy = 1;
3642 bp = un->un_sbufp;
3643 mutex_exit(DCD_MUTEX);
3647 * If we are going to do actual I/O, let physio do all the
3648 * things
3650 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3651 "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen);
3653 if (scmd->udcd_buflen) {
3654 auto struct iovec aiov;
3655 auto struct uio auio;
3656 struct uio *uio = &auio;
3658 bzero((caddr_t)&auio, sizeof (struct uio));
3659 bzero((caddr_t)&aiov, sizeof (struct iovec));
3661 aiov.iov_base = scmd->udcd_bufaddr;
3662 aiov.iov_len = scmd->udcd_buflen;
3664 uio->uio_iov = &aiov;
3665 uio->uio_iovcnt = 1;
3666 uio->uio_resid = scmd->udcd_buflen;
3667 uio->uio_segflg = dataspace;
3670 * Let physio do the rest...
3672 bp->av_back = NO_PKT_ALLOCATED;
3673 bp->b_forw = (struct buf *)scmd;
3674 err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio);
3675 } else {
3677 * We have to mimic what physio would do here.
3679 bp->av_back = NO_PKT_ALLOCATED;
3680 bp->b_forw = (struct buf *)scmd;
3681 bp->b_flags = B_BUSY | rw;
3682 bp->b_edev = devp;
3683 bp->b_dev = cmpdev(devp);
3684 bp->b_bcount = bp->b_blkno = 0;
3685 (void) dcdstrategy(bp);
3686 err = biowait(bp);
3689 done:
3690 if ((pkt = BP_PKT(bp)) != NULL) {
3691 bp->av_back = NO_PKT_ALLOCATED;
3692 /* we need to update the completion status of udcd command */
3693 in->udcd_resid = bp->b_resid;
3694 in->udcd_status_reg = SCBP_C(pkt);
3695 /* XXX: we need to give error_reg also */
3696 dcd_destroy_pkt(pkt);
3699 * Tell anybody who cares that the buffer is now free
3701 mutex_enter(DCD_MUTEX);
3702 un->un_sbuf_busy = 0;
3703 cv_signal(&un->un_sbuf_cv);
3704 mutex_exit(DCD_MUTEX);
3706 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3707 kmem_free((caddr_t)scmd, sizeof (*scmd));
3708 return (err);
3711 static void
3712 dcdudcdmin(struct buf *bp)
3719 * restart a cmd from timeout() context
3721 * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero
3722 * a restart timeout request has been issued and no new timeouts should
3723 * be requested. b_forw is reset when the cmd eventually completes in
3724 * dcddone_and_mutex_exit()
3726 void
3727 dcdrestart(void *arg)
3729 struct dcd_disk *un = (struct dcd_disk *)arg;
3730 struct buf *bp;
3731 int status;
3733 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n");
3735 mutex_enter(DCD_MUTEX);
3736 bp = un->un_utab.b_forw;
3737 if (bp) {
3738 un->un_ncmds++;
3739 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
3743 if (bp) {
3744 struct dcd_pkt *pkt = BP_PKT(bp);
3746 mutex_exit(DCD_MUTEX);
3748 pkt->pkt_flags = 0;
3750 if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) {
3751 mutex_enter(DCD_MUTEX);
3752 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
3753 un->un_ncmds--;
3754 if (status == TRAN_BUSY) {
3755 /* XXX : To be checked */
3757 * if (un->un_throttle > 1) {
3758 * ASSERT(un->un_ncmds >= 0);
3759 * un->un_throttle = un->un_ncmds;
3762 un->un_reissued_timeid =
3763 timeout(dcdrestart, (caddr_t)un,
3764 DCD_BSY_TIMEOUT/500);
3765 mutex_exit(DCD_MUTEX);
3766 return;
3768 DCD_DO_ERRSTATS(un, dcd_transerrs);
3769 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
3770 "dcdrestart transport failed (%x)\n", status);
3771 bp->b_resid = bp->b_bcount;
3772 SET_BP_ERROR(bp, EIO);
3774 DCD_DO_KSTATS(un, kstat_waitq_exit, bp);
3775 un->un_reissued_timeid = 0L;
3776 dcddone_and_mutex_exit(un, bp);
3777 return;
3779 mutex_enter(DCD_MUTEX);
3781 un->un_reissued_timeid = 0L;
3782 mutex_exit(DCD_MUTEX);
3783 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n");
3787 * This routine gets called to reset the throttle to its saved
3788 * value wheneven we lower the throttle.
3790 void
3791 dcd_reset_throttle(caddr_t arg)
3793 struct dcd_disk *un = (struct dcd_disk *)arg;
3794 struct diskhd *dp;
3796 mutex_enter(DCD_MUTEX);
3797 dp = &un->un_utab;
3800 * start any commands that didn't start while throttling.
3802 if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
3803 (dp->b_forw == NULL)) {
3804 dcdstart(un);
3806 mutex_exit(DCD_MUTEX);
3811 * This routine handles the case when a TRAN_BUSY is
3812 * returned by HBA.
3814 * If there are some commands already in the transport, the
3815 * bp can be put back on queue and it will
3816 * be retried when the queue is emptied after command
3817 * completes. But if there is no command in the tranport
3818 * and it still return busy, we have to retry the command
3819 * after some time like 10ms.
3821 /* ARGSUSED0 */
3822 static void
3823 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un)
3825 ASSERT(mutex_owned(DCD_MUTEX));
3828 if (dp->b_forw == NULL || dp->b_forw == bp) {
3829 dp->b_forw = bp;
3830 } else if (dp->b_forw != bp) {
3831 bp->b_actf = dp->b_actf;
3832 dp->b_actf = bp;
3835 if (!un->un_reissued_timeid) {
3836 un->un_reissued_timeid =
3837 timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500);
3841 static int
3842 dcd_write_deviceid(struct dcd_disk *un)
3845 int status;
3846 diskaddr_t blk;
3847 struct udcd_cmd ucmd;
3848 struct dcd_cmd cdb;
3849 struct dk_devid *dkdevid;
3850 uint_t *ip, chksum;
3851 int i;
3852 dev_t dev;
3854 mutex_exit(DCD_MUTEX);
3855 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3856 mutex_enter(DCD_MUTEX);
3857 return (EINVAL);
3859 mutex_enter(DCD_MUTEX);
3861 /* Allocate the buffer */
3862 dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP);
3864 /* Fill in the revision */
3865 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
3866 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
3868 /* Copy in the device id */
3869 bcopy(un->un_devid, &dkdevid->dkd_devid,
3870 ddi_devid_sizeof(un->un_devid));
3872 /* Calculate the chksum */
3873 chksum = 0;
3874 ip = (uint_t *)dkdevid;
3875 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
3876 chksum ^= ip[i];
3878 /* Fill in the checksum */
3879 DKD_FORMCHKSUM(chksum, dkdevid);
3881 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3882 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
3884 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3885 cdb.cmd = ATA_WRITE_DMA;
3886 } else {
3887 if (un->un_dp->options & BLOCK_MODE)
3888 cdb.cmd = ATA_WRITE_MULTIPLE;
3889 else
3890 cdb.cmd = ATA_WRITE;
3892 cdb.size = un->un_secsize;
3893 cdb.sector_num.lba_num = blk;
3894 cdb.address_mode = ADD_LBA_MODE;
3895 cdb.direction = DATA_WRITE;
3897 ucmd.udcd_flags = UDCD_WRITE;
3898 ucmd.udcd_cmd = &cdb;
3899 ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3900 ucmd.udcd_buflen = un->un_secsize;
3901 ucmd.udcd_flags |= UDCD_SILENT;
3902 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3903 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3904 mutex_exit(DCD_MUTEX);
3905 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3906 mutex_enter(DCD_MUTEX);
3908 kmem_free(dkdevid, un->un_secsize);
3909 return (status);
3912 static int
3913 dcd_read_deviceid(struct dcd_disk *un)
3915 int status;
3916 diskaddr_t blk;
3917 struct udcd_cmd ucmd;
3918 struct dcd_cmd cdb;
3919 struct dk_devid *dkdevid;
3920 uint_t *ip;
3921 int chksum;
3922 int i, sz;
3923 dev_t dev;
3925 mutex_exit(DCD_MUTEX);
3926 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3927 mutex_enter(DCD_MUTEX);
3928 return (EINVAL);
3930 mutex_enter(DCD_MUTEX);
3932 dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP);
3934 (void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3935 (void) bzero((caddr_t)&cdb, sizeof (cdb));
3937 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3938 cdb.cmd = ATA_READ_DMA;
3939 } else {
3940 if (un->un_dp->options & BLOCK_MODE)
3941 cdb.cmd = ATA_READ_MULTIPLE;
3942 else
3943 cdb.cmd = ATA_READ;
3945 cdb.size = un->un_secsize;
3946 cdb.sector_num.lba_num = blk;
3947 cdb.address_mode = ADD_LBA_MODE;
3948 cdb.direction = DATA_READ;
3950 ucmd.udcd_flags = UDCD_READ;
3951 ucmd.udcd_cmd = &cdb;
3952 ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3953 ucmd.udcd_buflen = un->un_secsize;
3954 ucmd.udcd_flags |= UDCD_SILENT;
3955 dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3956 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3957 mutex_exit(DCD_MUTEX);
3958 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3959 mutex_enter(DCD_MUTEX);
3961 if (status != 0) {
3962 kmem_free((caddr_t)dkdevid, un->un_secsize);
3963 return (status);
3966 /* Validate the revision */
3968 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
3969 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
3970 kmem_free((caddr_t)dkdevid, un->un_secsize);
3971 return (EINVAL);
3974 /* Calculate the checksum */
3975 chksum = 0;
3976 ip = (uint_t *)dkdevid;
3977 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
3978 chksum ^= ip[i];
3980 /* Compare the checksums */
3982 if (DKD_GETCHKSUM(dkdevid) != chksum) {
3983 kmem_free((caddr_t)dkdevid, un->un_secsize);
3984 return (EINVAL);
3987 /* VAlidate the device id */
3988 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
3989 kmem_free((caddr_t)dkdevid, un->un_secsize);
3990 return (EINVAL);
3993 /* return a copy of the device id */
3994 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
3995 un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP);
3996 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
3997 kmem_free((caddr_t)dkdevid, un->un_secsize);
3999 return (0);
4003 * Return the device id for the device.
4004 * 1. If the device ID exists then just return it - nothing to do in that case.
4005 * 2. Build one from the drives model number and serial number.
4006 * 3. If there is a problem in building it from serial/model #, then try
4007 * to read it from the acyl region of the disk.
4008 * Note: If this function is unable to return a valid ID then the calling
4009 * point will invoke the routine to create a fabricated ID ans stor it on the
4010 * acyl region of the disk.
4012 static ddi_devid_t
4013 dcd_get_devid(struct dcd_disk *un)
4015 int rc;
4017 /* If already registered, return that value */
4018 if (un->un_devid != NULL)
4019 return (un->un_devid);
4021 /* Build a devid from model and serial number, if present */
4022 rc = dcd_make_devid_from_serial(un);
4024 if (rc != DDI_SUCCESS) {
4025 /* Read the devid from the disk. */
4026 if (dcd_read_deviceid(un))
4027 return (NULL);
4030 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4031 return (un->un_devid);
4035 static ddi_devid_t
4036 dcd_create_devid(struct dcd_disk *un)
4038 if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *)
4039 &un->un_devid) == DDI_FAILURE)
4040 return (NULL);
4042 if (dcd_write_deviceid(un)) {
4043 ddi_devid_free(un->un_devid);
4044 un->un_devid = NULL;
4045 return (NULL);
4048 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4049 return (un->un_devid);
4053 * Build a devid from the model and serial number, if present
4054 * Return DDI_SUCCESS or DDI_FAILURE.
4056 static int
4057 dcd_make_devid_from_serial(struct dcd_disk *un)
4059 int rc = DDI_SUCCESS;
4060 char *hwid;
4061 char *model;
4062 int model_len;
4063 char *serno;
4064 int serno_len;
4065 int total_len;
4067 /* initialize the model and serial number information */
4068 model = un->un_dcd->dcd_ident->dcd_model;
4069 model_len = DCD_MODEL_NUMBER_LENGTH;
4070 serno = un->un_dcd->dcd_ident->dcd_drvser;
4071 serno_len = DCD_SERIAL_NUMBER_LENGTH;
4073 /* Verify the model and serial number */
4074 dcd_validate_model_serial(model, &model_len, model_len);
4075 if (model_len == 0) {
4076 rc = DDI_FAILURE;
4077 goto out;
4079 dcd_validate_model_serial(serno, &serno_len, serno_len);
4080 if (serno_len == 0) {
4081 rc = DDI_FAILURE;
4082 goto out;
4086 * The device ID will be concatenation of the model number,
4087 * the '=' separator, the serial number. Allocate
4088 * the string and concatenate the components.
4090 total_len = model_len + 1 + serno_len;
4091 hwid = kmem_alloc(total_len, KM_SLEEP);
4092 bcopy((caddr_t)model, (caddr_t)hwid, model_len);
4093 bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1);
4094 bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len);
4096 /* Initialize the device ID, trailing NULL not included */
4097 rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len,
4098 hwid, (ddi_devid_t *)&un->un_devid);
4100 /* Free the allocated string */
4101 kmem_free(hwid, total_len);
4103 out: return (rc);
4107 * Test for a valid model or serial number. Assume that a valid representation
4108 * contains at least one character that is neither a space, 0 digit, or NULL.
4109 * Trim trailing blanks and NULLS from returned length.
4111 static void
4112 dcd_validate_model_serial(char *str, int *retlen, int totallen)
4114 char ch;
4115 boolean_t ret = B_FALSE;
4116 int i;
4117 int tb;
4119 for (i = 0, tb = 0; i < totallen; i++) {
4120 ch = *str++;
4121 if ((ch != ' ') && (ch != '\0') && (ch != '0'))
4122 ret = B_TRUE;
4123 if ((ch == ' ') || (ch == '\0'))
4124 tb++;
4125 else
4126 tb = 0;
4129 if (ret == B_TRUE) {
4130 /* Atleast one non 0 or blank character. */
4131 *retlen = totallen - tb;
4132 } else {
4133 *retlen = 0;
4137 void
4138 clean_print(dev_info_t *dev, char *label, uint_t level,
4139 char *title, char *data, int len)
4141 int i;
4142 char buf[256];
4144 (void) sprintf(buf, "%s:", title);
4145 for (i = 0; i < len; i++) {
4146 (void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff));
4148 (void) sprintf(&buf[strlen(buf)], "\n");
4150 dcd_log(dev, label, level, "%s", buf);
4154 * Print a piece of inquiry data- cleaned up for non-printable characters
4155 * and stopping at the first space character after the beginning of the
4156 * passed string;
4159 void
4160 inq_fill(char *p, int l, char *s)
4162 unsigned i = 0;
4163 char c;
4165 while (i++ < l) {
4166 if ((c = *p++) < ' ' || c >= 0177) {
4167 c = '*';
4168 } else if (i != 1 && c == ' ') {
4169 break;
4171 *s++ = c;
4173 *s++ = 0;
4176 char *
4177 dcd_sname(uchar_t status)
4179 switch (status & STATUS_ATA_MASK) {
4180 case STATUS_GOOD:
4181 return ("good status");
4183 case STATUS_ATA_BUSY:
4184 return ("busy");
4186 default:
4187 return ("<unknown status>");
4191 /* ARGSUSED0 */
4192 char *
4193 dcd_rname(int reason)
4195 static char *rnames[] = {
4196 "cmplt",
4197 "incomplete",
4198 "dma_derr",
4199 "tran_err",
4200 "reset",
4201 "aborted",
4202 "timeout",
4203 "data_ovr",
4205 if (reason > CMD_DATA_OVR) {
4206 return ("<unknown reason>");
4207 } else {
4208 return (rnames[reason]);
4214 /* ARGSUSED0 */
4216 dcd_check_wp(dev_t dev)
4219 return (0);
4223 * Create device error kstats
4225 static int
4226 dcd_create_errstats(struct dcd_disk *un, int instance)
4229 char kstatname[KSTAT_STRLEN];
4231 if (un->un_errstats == (kstat_t *)0) {
4232 (void) sprintf(kstatname, "dad%d,error", instance);
4233 un->un_errstats = kstat_create("daderror", instance, kstatname,
4234 "device_error", KSTAT_TYPE_NAMED,
4235 sizeof (struct dcd_errstats)/ sizeof (kstat_named_t),
4236 KSTAT_FLAG_PERSISTENT);
4238 if (un->un_errstats) {
4239 struct dcd_errstats *dtp;
4241 dtp = (struct dcd_errstats *)un->un_errstats->ks_data;
4242 kstat_named_init(&dtp->dcd_softerrs, "Soft Errors",
4243 KSTAT_DATA_UINT32);
4244 kstat_named_init(&dtp->dcd_harderrs, "Hard Errors",
4245 KSTAT_DATA_UINT32);
4246 kstat_named_init(&dtp->dcd_transerrs,
4247 "Transport Errors", KSTAT_DATA_UINT32);
4248 kstat_named_init(&dtp->dcd_model, "Model",
4249 KSTAT_DATA_CHAR);
4250 kstat_named_init(&dtp->dcd_revision, "Revision",
4251 KSTAT_DATA_CHAR);
4252 kstat_named_init(&dtp->dcd_serial, "Serial No",
4253 KSTAT_DATA_CHAR);
4254 kstat_named_init(&dtp->dcd_capacity, "Size",
4255 KSTAT_DATA_ULONGLONG);
4256 kstat_named_init(&dtp->dcd_rq_media_err, "Media Error",
4257 KSTAT_DATA_UINT32);
4258 kstat_named_init(&dtp->dcd_rq_ntrdy_err,
4259 "Device Not Ready", KSTAT_DATA_UINT32);
4260 kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device",
4261 KSTAT_DATA_UINT32);
4262 kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable",
4263 KSTAT_DATA_UINT32);
4264 kstat_named_init(&dtp->dcd_rq_illrq_err,
4265 "Illegal Request", KSTAT_DATA_UINT32);
4267 un->un_errstats->ks_private = un;
4268 un->un_errstats->ks_update = nulldev;
4269 kstat_install(un->un_errstats);
4271 (void) strncpy(&dtp->dcd_model.value.c[0],
4272 un->un_dcd->dcd_ident->dcd_model, 16);
4273 (void) strncpy(&dtp->dcd_serial.value.c[0],
4274 un->un_dcd->dcd_ident->dcd_drvser, 16);
4275 (void) strncpy(&dtp->dcd_revision.value.c[0],
4276 un->un_dcd->dcd_ident->dcd_fw, 8);
4277 dtp->dcd_capacity.value.ui64 =
4278 (uint64_t)((uint64_t)un->un_diskcapacity *
4279 (uint64_t)un->un_lbasize);
4282 return (0);
4287 * This has been moved from DADA layer as this does not do anything other than
4288 * retrying the command when it is busy or it does not complete
4291 dcd_poll(struct dcd_pkt *pkt)
4293 int busy_count, rval = -1, savef;
4294 clock_t savet;
4295 void (*savec)();
4299 * Save old flags
4301 savef = pkt->pkt_flags;
4302 savec = pkt->pkt_comp;
4303 savet = pkt->pkt_time;
4305 pkt->pkt_flags |= FLAG_NOINTR;
4309 * Set the Pkt_comp to NULL
4312 pkt->pkt_comp = 0;
4315 * Set the Pkt time for the polled command
4317 if (pkt->pkt_time == 0) {
4318 pkt->pkt_time = DCD_POLL_TIMEOUT;
4322 /* Now transport the command */
4323 for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) {
4324 if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) {
4325 if (pkt->pkt_reason == CMD_INCOMPLETE &&
4326 pkt->pkt_state == 0) {
4327 delay(100);
4328 } else if (pkt->pkt_reason == CMD_CMPLT) {
4329 rval = 0;
4330 break;
4333 if (rval == TRAN_BUSY) {
4334 delay(100);
4335 continue;
4339 pkt->pkt_flags = savef;
4340 pkt->pkt_comp = savec;
4341 pkt->pkt_time = savet;
4342 return (rval);
4346 void
4347 dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp)
4349 if (cmdp->udcd_status_reg & STATUS_ATA_BUSY)
4350 statp->status = DADKIO_STAT_NOT_READY;
4351 else if (cmdp->udcd_status_reg & STATUS_ATA_DWF)
4352 statp->status = DADKIO_STAT_HARDWARE_ERROR;
4353 else if (cmdp->udcd_status_reg & STATUS_ATA_CORR)
4354 statp->status = DADKIO_STAT_SOFT_ERROR;
4355 else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) {
4357 * The error register is valid only when BSY and DRQ not set
4358 * Assumed that HBA has checked this before it gives the data
4360 if (cmdp->udcd_error_reg & ERR_AMNF)
4361 statp->status = DADKIO_STAT_NOT_FORMATTED;
4362 else if (cmdp->udcd_error_reg & ERR_TKONF)
4363 statp->status = DADKIO_STAT_NOT_FORMATTED;
4364 else if (cmdp->udcd_error_reg & ERR_ABORT)
4365 statp->status = DADKIO_STAT_ILLEGAL_REQUEST;
4366 else if (cmdp->udcd_error_reg & ERR_IDNF)
4367 statp->status = DADKIO_STAT_NOT_FORMATTED;
4368 else if (cmdp->udcd_error_reg & ERR_UNC)
4369 statp->status = DADKIO_STAT_BUS_ERROR;
4370 else if (cmdp->udcd_error_reg & ERR_BBK)
4371 statp->status = DADKIO_STAT_MEDIUM_ERROR;
4372 } else
4373 statp->status = DADKIO_STAT_NO_ERROR;
4376 static void
4377 dcd_flush_cache(struct dcd_disk *un)
4379 struct dcd_pkt *pkt;
4380 int retry_count;
4383 if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL,
4384 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4385 PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) {
4386 return;
4389 makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0,
4390 NO_DATA_XFER, 0);
4393 * Send the command. There are chances it might fail on some
4394 * disks since it is not a mandatory command as per ata-4. Try
4395 * 3 times if it fails. The retry count has been randomly selected.
4396 * There is a need for retry since as per the spec FLUSH CACHE can fail
4397 * as a result of unrecoverable error encountered during execution
4398 * of writing data and subsequent command should continue flushing
4399 * cache.
4401 for (retry_count = 0; retry_count < 3; retry_count++) {
4403 * Set the packet fields.
4405 pkt->pkt_comp = 0;
4406 pkt->pkt_time = DCD_POLL_TIMEOUT;
4407 pkt->pkt_flags |= FLAG_FORCENOINTR;
4408 pkt->pkt_flags |= FLAG_NOINTR;
4409 if (dcd_transport(pkt) == TRAN_ACCEPT) {
4410 if (pkt->pkt_reason == CMD_CMPLT) {
4411 break;
4415 * Note the wait time value of 100ms is same as in the
4416 * dcd_poll routine.
4418 drv_usecwait(1000000);
4420 (void) dcd_destroy_pkt(pkt);
4423 static int
4424 dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr,
4425 diskaddr_t start_block, size_t reqlength, uchar_t cmd)
4427 struct dcd_pkt *pkt;
4428 struct buf *bp;
4429 diskaddr_t real_addr = start_block;
4430 size_t buffer_size = reqlength;
4431 uchar_t command, tmp;
4432 int i, rval = 0;
4433 struct dcd_disk *un;
4435 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4436 if (un == NULL)
4437 return (ENXIO);
4439 bp = dcd_alloc_consistent_buf(ROUTE, NULL,
4440 buffer_size, B_READ, NULL_FUNC, NULL);
4441 if (!bp) {
4442 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4443 "no bp for disk label\n");
4444 return (ENOMEM);
4447 pkt = dcd_init_pkt(ROUTE, NULL,
4448 bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4449 PKT_CONSISTENT, NULL_FUNC, NULL);
4451 if (!pkt) {
4452 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4453 "no memory for disk label\n");
4454 dcd_free_consistent_buf(bp);
4455 return (ENOMEM);
4458 if (cmd == TG_READ) {
4459 bzero(bp->b_un.b_addr, buffer_size);
4460 tmp = DATA_READ;
4461 } else {
4462 bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size);
4463 tmp = DATA_WRITE;
4466 mutex_enter(DCD_MUTEX);
4467 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
4468 if (cmd == TG_READ) {
4469 command = ATA_READ_DMA;
4470 } else {
4471 command = ATA_WRITE_DMA;
4473 } else {
4474 if (cmd == TG_READ) {
4475 if (un->un_dp->options & BLOCK_MODE)
4476 command = ATA_READ_MULTIPLE;
4477 else
4478 command = ATA_READ;
4479 } else {
4480 if (un->un_dp->options & BLOCK_MODE)
4481 command = ATA_READ_MULTIPLE;
4482 else
4483 command = ATA_WRITE;
4486 mutex_exit(DCD_MUTEX);
4487 (void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE,
4488 buffer_size, tmp, 0);
4490 for (i = 0; i < 3; i++) {
4491 if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD ||
4492 (pkt->pkt_state & STATE_XFERRED_DATA) == 0 ||
4493 (pkt->pkt_resid != 0)) {
4494 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4495 "Status %x, state %x, resid %lx\n",
4496 SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid);
4497 rval = EIO;
4498 } else {
4499 break;
4503 if (rval != 0) {
4504 dcd_destroy_pkt(pkt);
4505 dcd_free_consistent_buf(bp);
4506 return (EIO);
4509 if (cmd == TG_READ) {
4510 bcopy(bp->b_un.b_addr, bufaddr, reqlength);
4511 rval = 0;
4514 dcd_destroy_pkt(pkt);
4515 dcd_free_consistent_buf(bp);
4516 return (rval);
4519 static int dcd_compute_dk_capacity(struct dcd_device *devp,
4520 diskaddr_t *capacity)
4522 diskaddr_t cap;
4523 diskaddr_t no_of_lbasec;
4525 cap = devp->dcd_ident->dcd_fixcyls *
4526 devp->dcd_ident->dcd_heads *
4527 devp->dcd_ident->dcd_sectors;
4528 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4529 no_of_lbasec = no_of_lbasec << 16;
4530 no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0];
4532 if (no_of_lbasec > cap) {
4533 cap = no_of_lbasec;
4536 if (cap != ((uint32_t)-1))
4537 *capacity = cap;
4538 else
4539 return (EINVAL);
4540 return (0);
4543 /*ARGSUSED5*/
4544 static int
4545 dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
4546 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
4548 if (cmd != TG_READ && cmd != TG_WRITE)
4549 return (EINVAL);
4551 return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block,
4552 reqlength, cmd));
4555 static int
4556 dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp)
4558 struct dcd_device *devp;
4559 uint32_t no_of_lbasec, capacity, calculated_cylinders;
4561 devp = ddi_get_driver_private(devi);
4563 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
4564 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
4565 phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2;
4566 phygeomp->g_acyl = 2;
4567 phygeomp->g_nhead = devp->dcd_ident->dcd_heads;
4568 phygeomp->g_nsect = devp->dcd_ident->dcd_sectors;
4570 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4571 no_of_lbasec = no_of_lbasec << 16;
4572 no_of_lbasec = no_of_lbasec |
4573 devp->dcd_ident->dcd_addrsec[0];
4574 capacity = devp->dcd_ident->dcd_fixcyls *
4575 devp->dcd_ident->dcd_heads *
4576 devp->dcd_ident->dcd_sectors;
4577 if (no_of_lbasec > capacity) {
4578 capacity = no_of_lbasec;
4579 if (capacity > NUM_SECTORS_32G) {
4581 * if the capacity is greater than 32G,
4582 * then 255 is the sectors per track.
4583 * This should be good until 128G disk
4584 * capacity, which is the current ATA-4
4585 * limitation.
4587 phygeomp->g_nsect = 255;
4591 * If the disk capacity is >= 128GB then no. of
4592 * addressable sectors will be set to 0xfffffff
4593 * in the IDENTIFY info. In that case set the
4594 * no. of pcyl to the Max. 16bit value.
4597 calculated_cylinders = (capacity) /
4598 (phygeomp->g_nhead * phygeomp->g_nsect);
4599 if (calculated_cylinders >= USHRT_MAX) {
4600 phygeomp->g_ncyl = USHRT_MAX - 2;
4601 } else {
4602 phygeomp->g_ncyl =
4603 calculated_cylinders - 2;
4607 phygeomp->g_capacity = capacity;
4608 phygeomp->g_intrlv = 0;
4609 phygeomp->g_rpm = 5400;
4610 phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz;
4612 return (0);
4613 } else
4614 return (ENOTSUP);
4615 } else {
4616 return (EINVAL);
4621 /*ARGSUSED3*/
4622 static int
4623 dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
4625 struct dcd_disk *un;
4627 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4629 if (un == NULL)
4630 return (ENXIO);
4632 switch (cmd) {
4633 case TG_GETPHYGEOM:
4634 return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg));
4636 case TG_GETVIRTGEOM:
4637 return (-1);
4639 case TG_GETCAPACITY:
4640 case TG_GETBLOCKSIZE:
4641 mutex_enter(DCD_MUTEX);
4642 if (un->un_diskcapacity <= 0) {
4643 mutex_exit(DCD_MUTEX);
4644 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4645 "invalid disk capacity\n");
4646 return (EIO);
4648 if (cmd == TG_GETCAPACITY)
4649 *(diskaddr_t *)arg = un->un_diskcapacity;
4650 else
4651 *(uint32_t *)arg = DEV_BSIZE;
4653 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n",
4654 un->un_diskcapacity);
4655 mutex_exit(DCD_MUTEX);
4656 return (0);
4658 case TG_GETATTR:
4659 mutex_enter(DCD_MUTEX);
4660 *(tg_attribute_t *)arg = un->un_tgattribute;
4661 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4662 "media_is_writable %x\n",
4663 un->un_tgattribute.media_is_writable);
4664 mutex_exit(DCD_MUTEX);
4665 return (0);
4666 default:
4667 return (ENOTTY);