4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 * SCSI disk target driver.
32 #include <sys/scsi/scsi.h>
33 #include <sys/dkbad.h>
34 #include <sys/dklabel.h>
40 #include <sys/dktp/fdisk.h>
41 #include <sys/kstat.h>
42 #include <sys/vtrace.h>
44 #include <sys/thread.h>
46 #include <sys/efi_partition.h>
48 #include <sys/aio_req.h>
55 #if (defined(__fibre))
56 /* Note: is there a leadville version of the following? */
57 #include <sys/fc4/fcal_linkapp.h>
59 #include <sys/taskq.h>
61 #include <sys/byteorder.h>
66 #include <sys/scsi/targets/sddef.h>
68 #include <sys/sysevent/eventdefs.h>
69 #include <sys/sysevent/dev.h>
71 #include <sys/fm/protocol.h>
74 * Loadable module info.
76 #if (defined(__fibre))
77 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver"
78 char _depends_on
[] = "misc/scsi misc/cmlb drv/fcp";
80 #define SD_MODULE_NAME "SCSI Disk Driver"
81 char _depends_on
[] = "misc/scsi misc/cmlb";
85 * Define the interconnect type, to allow the driver to distinguish
86 * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
88 * This is really for backward compatibility. In the future, the driver
89 * should actually check the "interconnect-type" property as reported by
90 * the HBA; however at present this property is not defined by all HBAs,
91 * so we will use this #define (1) to permit the driver to run in
92 * backward-compatibility mode; and (2) to print a notification message
93 * if an FC HBA does not support the "interconnect-type" property. The
94 * behavior of the driver will be to assume parallel SCSI behaviors unless
95 * the "interconnect-type" property is defined by the HBA **AND** has a
96 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
97 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
98 * Channel behaviors (as per the old ssd). (Note that the
99 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
100 * will result in the driver assuming parallel SCSI behaviors.)
102 * (see common/sys/scsi/impl/services.h)
104 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
105 * since some FC HBAs may already support that, and there is some code in
106 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the
107 * default would confuse that code, and besides things should work fine
108 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
109 * "interconnect_type" property.
112 #if (defined(__fibre))
113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE
115 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
119 * The name of the driver, established from the module name in _init.
121 static char *sd_label
= NULL
;
124 * Driver name is unfortunately prefixed on some driver.conf properties.
126 #if (defined(__fibre))
127 #define sd_max_xfer_size ssd_max_xfer_size
128 #define sd_config_list ssd_config_list
129 static char *sd_max_xfer_size
= "ssd_max_xfer_size";
130 static char *sd_config_list
= "ssd-config-list";
132 static char *sd_max_xfer_size
= "sd_max_xfer_size";
133 static char *sd_config_list
= "sd-config-list";
137 * Driver global variables
140 #if (defined(__fibre))
142 * These #defines are to avoid namespace collisions that occur because this
143 * code is currently used to compile two separate driver modules: sd and ssd.
144 * All global variables need to be treated this way (even if declared static)
145 * in order to allow the debugger to resolve the names properly.
146 * It is anticipated that in the near future the ssd module will be obsoleted,
147 * at which time this namespace issue should go away.
149 #define sd_state ssd_state
150 #define sd_io_time ssd_io_time
151 #define sd_failfast_enable ssd_failfast_enable
152 #define sd_ua_retry_count ssd_ua_retry_count
153 #define sd_report_pfa ssd_report_pfa
154 #define sd_max_throttle ssd_max_throttle
155 #define sd_min_throttle ssd_min_throttle
156 #define sd_rot_delay ssd_rot_delay
158 #define sd_retry_on_reservation_conflict \
159 ssd_retry_on_reservation_conflict
160 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay
161 #define sd_resv_conflict_name ssd_resv_conflict_name
163 #define sd_component_mask ssd_component_mask
164 #define sd_level_mask ssd_level_mask
165 #define sd_debug_un ssd_debug_un
166 #define sd_error_level ssd_error_level
168 #define sd_xbuf_active_limit ssd_xbuf_active_limit
169 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit
172 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout
173 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout
174 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable
175 #define sd_check_media_time ssd_check_media_time
176 #define sd_wait_cmds_complete ssd_wait_cmds_complete
177 #define sd_label_mutex ssd_label_mutex
178 #define sd_detach_mutex ssd_detach_mutex
179 #define sd_log_buf ssd_log_buf
180 #define sd_log_mutex ssd_log_mutex
182 #define sd_disk_table ssd_disk_table
183 #define sd_disk_table_size ssd_disk_table_size
184 #define sd_sense_mutex ssd_sense_mutex
185 #define sd_cdbtab ssd_cdbtab
187 #define sd_cb_ops ssd_cb_ops
188 #define sd_ops ssd_ops
189 #define sd_additional_codes ssd_additional_codes
190 #define sd_tgops ssd_tgops
192 #define sd_minor_data ssd_minor_data
193 #define sd_minor_data_efi ssd_minor_data_efi
196 #define sd_wmr_tq ssd_wmr_tq
197 #define sd_taskq_name ssd_taskq_name
198 #define sd_wmr_taskq_name ssd_wmr_taskq_name
199 #define sd_taskq_minalloc ssd_taskq_minalloc
200 #define sd_taskq_maxalloc ssd_taskq_maxalloc
202 #define sd_dump_format_string ssd_dump_format_string
204 #define sd_iostart_chain ssd_iostart_chain
205 #define sd_iodone_chain ssd_iodone_chain
207 #define sd_pm_idletime ssd_pm_idletime
209 #define sd_force_pm_supported ssd_force_pm_supported
211 #define sd_dtype_optical_bind ssd_dtype_optical_bind
213 #define sd_ssc_init ssd_ssc_init
214 #define sd_ssc_send ssd_ssc_send
215 #define sd_ssc_fini ssd_ssc_fini
216 #define sd_ssc_assessment ssd_ssc_assessment
217 #define sd_ssc_post ssd_ssc_post
218 #define sd_ssc_print ssd_ssc_print
219 #define sd_ssc_ereport_post ssd_ssc_ereport_post
220 #define sd_ssc_set_info ssd_ssc_set_info
221 #define sd_ssc_extract_info ssd_ssc_extract_info
226 int sd_force_pm_supported
= 0;
229 void *sd_state
= NULL
;
230 int sd_io_time
= SD_IO_TIME
;
231 int sd_failfast_enable
= 1;
232 int sd_ua_retry_count
= SD_UA_RETRY_COUNT
;
233 int sd_report_pfa
= 1;
234 int sd_max_throttle
= SD_MAX_THROTTLE
;
235 int sd_min_throttle
= SD_MIN_THROTTLE
;
236 int sd_rot_delay
= 4; /* Default 4ms Rotation delay */
237 int sd_qfull_throttle_enable
= TRUE
;
239 int sd_retry_on_reservation_conflict
= 1;
240 int sd_reinstate_resv_delay
= SD_REINSTATE_RESV_DELAY
;
241 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay
))
243 static int sd_dtype_optical_bind
= -1;
245 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
246 static char *sd_resv_conflict_name
= "sd_retry_on_reservation_conflict";
249 * Global data for debug logging. To enable debug printing, sd_component_mask
250 * and sd_level_mask should be set to the desired bit patterns as outlined in
253 uint_t sd_component_mask
= 0x0;
254 uint_t sd_level_mask
= 0x0;
255 struct sd_lun
*sd_debug_un
= NULL
;
256 uint_t sd_error_level
= SCSI_ERR_RETRYABLE
;
258 /* Note: these may go away in the future... */
259 static uint32_t sd_xbuf_active_limit
= 512;
260 static uint32_t sd_xbuf_reserve_limit
= 16;
262 static struct sd_resv_reclaim_request sd_tr
= { NULL
, NULL
, NULL
, 0, 0, 0 };
265 * Timer value used to reset the throttle after it has been reduced
266 * (typically in response to TRAN_BUSY or STATUS_QFULL)
268 static int sd_reset_throttle_timeout
= SD_RESET_THROTTLE_TIMEOUT
;
269 static int sd_qfull_throttle_timeout
= SD_QFULL_THROTTLE_TIMEOUT
;
272 * Interval value associated with the media change scsi watch.
274 static int sd_check_media_time
= 3000000;
277 * Wait value used for in progress operations during a DDI_SUSPEND
279 static int sd_wait_cmds_complete
= SD_WAIT_CMDS_COMPLETE
;
282 * sd_label_mutex protects a static buffer used in the disk label
283 * component of the driver
285 static kmutex_t sd_label_mutex
;
288 * sd_detach_mutex protects un_layer_count, un_detach_count, and
289 * un_opens_in_progress in the sd_lun structure.
291 static kmutex_t sd_detach_mutex
;
293 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex
,
294 sd_lun::{un_layer_count un_detach_count un_opens_in_progress
}))
297 * Global buffer and mutex for debug logging
299 static char sd_log_buf
[1024];
300 static kmutex_t sd_log_mutex
;
303 * Structs and globals for recording attached lun information.
304 * This maintains a chain. Each node in the chain represents a SCSI controller.
305 * The structure records the number of luns attached to each target connected
306 * with the controller.
307 * For parallel scsi device only.
309 struct sd_scsi_hba_tgt_lun
{
310 struct sd_scsi_hba_tgt_lun
*next
;
312 int nlun
[NTARGETS_WIDE
];
316 * Flag to indicate the lun is attached or detached
318 #define SD_SCSI_LUN_ATTACH 0
319 #define SD_SCSI_LUN_DETACH 1
321 static kmutex_t sd_scsi_target_lun_mutex
;
322 static struct sd_scsi_hba_tgt_lun
*sd_scsi_target_lun_head
= NULL
;
324 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex
,
325 sd_scsi_hba_tgt_lun::next
sd_scsi_hba_tgt_lun::pdip
))
327 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex
,
328 sd_scsi_target_lun_head
))
331 * "Smart" Probe Caching structs, globals, #defines, etc.
332 * For parallel scsi and non-self-identify device only.
336 * The following resources and routines are implemented to support
337 * "smart" probing, which caches the scsi_probe() results in an array,
338 * in order to help avoid long probe times.
340 struct sd_scsi_probe_cache
{
341 struct sd_scsi_probe_cache
*next
;
343 int cache
[NTARGETS_WIDE
];
346 static kmutex_t sd_scsi_probe_cache_mutex
;
347 static struct sd_scsi_probe_cache
*sd_scsi_probe_cache_head
= NULL
;
350 * Really we only need protection on the head of the linked list, but
351 * better safe than sorry.
353 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex
,
354 sd_scsi_probe_cache::next
sd_scsi_probe_cache::pdip
))
356 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex
,
357 sd_scsi_probe_cache_head
))
360 * Power attribute table
362 static sd_power_attr_ss sd_pwr_ss
= {
363 { "NAME=spindle-motor", "0=off", "1=on", NULL
},
369 static sd_power_attr_pc sd_pwr_pc
= {
370 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
374 {15000, 15000, 1000, 0}
378 * Power level to power condition
380 static int sd_pl2pc
[] = {
381 SD_TARGET_START_VALID
,
388 * Vendor specific data name property declarations
391 #if defined(__fibre) || defined(__i386) ||defined(__amd64)
393 static sd_tunables seagate_properties
= {
394 SEAGATE_THROTTLE_VALUE
,
406 static sd_tunables fujitsu_properties
= {
407 FUJITSU_THROTTLE_VALUE
,
418 static sd_tunables ibm_properties
= {
430 static sd_tunables purple_properties
= {
431 PURPLE_THROTTLE_VALUE
,
435 PURPLE_RESET_RETRY_COUNT
,
436 PURPLE_RESERVE_RELEASE_TIME
,
442 static sd_tunables sve_properties
= {
447 SVE_RESET_RETRY_COUNT
,
448 SVE_RESERVE_RELEASE_TIME
,
449 SVE_MIN_THROTTLE_VALUE
,
450 SVE_DISKSORT_DISABLED_FLAG
,
454 static sd_tunables maserati_properties
= {
462 MASERATI_DISKSORT_DISABLED_FLAG
,
463 MASERATI_LUN_RESET_ENABLED_FLAG
466 static sd_tunables pirus_properties
= {
467 PIRUS_THROTTLE_VALUE
,
471 PIRUS_RESET_RETRY_COUNT
,
473 PIRUS_MIN_THROTTLE_VALUE
,
474 PIRUS_DISKSORT_DISABLED_FLAG
,
475 PIRUS_LUN_RESET_ENABLED_FLAG
480 #if (defined(__sparc) && !defined(__fibre)) || \
481 (defined(__i386) || defined(__amd64))
484 static sd_tunables elite_properties
= {
485 ELITE_THROTTLE_VALUE
,
496 static sd_tunables st31200n_properties
= {
497 ST31200N_THROTTLE_VALUE
,
508 #endif /* Fibre or not */
510 static sd_tunables lsi_properties_scsi
= {
513 LSI_NOTREADY_RETRIES
,
522 static sd_tunables symbios_properties
= {
523 SYMBIOS_THROTTLE_VALUE
,
525 SYMBIOS_NOTREADY_RETRIES
,
534 static sd_tunables lsi_properties
= {
537 LSI_NOTREADY_RETRIES
,
546 static sd_tunables lsi_oem_properties
= {
549 LSI_OEM_NOTREADY_RETRIES
,
561 #if (defined(SD_PROP_TST))
563 #define SD_TST_CTYPE_VAL CTYPE_CDROM
564 #define SD_TST_THROTTLE_VAL 16
565 #define SD_TST_NOTREADY_VAL 12
566 #define SD_TST_BUSY_VAL 60
567 #define SD_TST_RST_RETRY_VAL 36
568 #define SD_TST_RSV_REL_TIME 60
570 static sd_tunables tst_properties
= {
575 SD_TST_RST_RETRY_VAL
,
583 /* This is similar to the ANSI toupper implementation */
584 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
587 * Static Driver Configuration Table
589 * This is the table of disks which need throttle adjustment (or, perhaps
590 * something else as defined by the flags at a future time.) device_id
591 * is a string consisting of concatenated vid (vendor), pid (product/model)
592 * and revision strings as defined in the scsi_inquiry structure. Offsets of
593 * the parts of the string are as defined by the sizes in the scsi_inquiry
594 * structure. Device type is searched as far as the device_id string is
595 * defined. Flags defines which values are to be set in the driver from the
598 * Entries below which begin and end with a "*" are a special case.
599 * These do not have a specific vendor, and the string which follows
600 * can appear anywhere in the 16 byte PID portion of the inquiry data.
602 * Entries below which begin and end with a " " (blank) are a special
603 * case. The comparison function will treat multiple consecutive blanks
604 * as equivalent to a single blank. For example, this causes a
605 * sd_disk_table entry of " NEC CDROM " to match a device's id string
608 * Note: The MD21 controller type has been obsoleted.
609 * ST318202F is a Legacy device
610 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
611 * made with an FC connection. The entries here are a legacy.
613 static sd_disk_config_t sd_disk_table
[] = {
614 #if defined(__fibre) || defined(__i386) || defined(__amd64)
615 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
616 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
617 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
618 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
619 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
620 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
621 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
622 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
623 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
624 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
625 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
626 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
627 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
628 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE
, &seagate_properties
},
629 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
630 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
631 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
632 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
633 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
634 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
635 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
636 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
637 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE
, &fujitsu_properties
},
638 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE
, &ibm_properties
},
639 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE
, &ibm_properties
},
640 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE
, &ibm_properties
},
641 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE
, &ibm_properties
},
642 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
643 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
644 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
645 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
646 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
647 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
648 { "IBM 3526", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
649 { "IBM 3542", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
650 { "IBM 3552", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
651 { "IBM 1722", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
652 { "IBM 1742", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
653 { "IBM 1815", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
654 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
655 { "IBM 1814", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
656 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
657 { "IBM 1818", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
658 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
659 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
660 { "LSI INF", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
661 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
662 { "SGI TP", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
663 { "SGI IS", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
664 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT
|
665 SD_CONF_BSET_CACHE_IS_NV
, &lsi_oem_properties
},
666 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT
|
667 SD_CONF_BSET_CACHE_IS_NV
, &lsi_oem_properties
},
668 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE
, &lsi_oem_properties
},
669 { "LSI", SD_CONF_BSET_NRR_COUNT
, &lsi_properties
},
670 { "SUN T3", SD_CONF_BSET_THROTTLE
|
671 SD_CONF_BSET_BSY_RETRY_COUNT
|
672 SD_CONF_BSET_RST_RETRIES
|
673 SD_CONF_BSET_RSV_REL_TIME
,
674 &purple_properties
},
675 { "SUN SESS01", SD_CONF_BSET_THROTTLE
|
676 SD_CONF_BSET_BSY_RETRY_COUNT
|
677 SD_CONF_BSET_RST_RETRIES
|
678 SD_CONF_BSET_RSV_REL_TIME
|
679 SD_CONF_BSET_MIN_THROTTLE
|
680 SD_CONF_BSET_DISKSORT_DISABLED
,
682 { "SUN T4", SD_CONF_BSET_THROTTLE
|
683 SD_CONF_BSET_BSY_RETRY_COUNT
|
684 SD_CONF_BSET_RST_RETRIES
|
685 SD_CONF_BSET_RSV_REL_TIME
,
686 &purple_properties
},
687 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED
|
688 SD_CONF_BSET_LUN_RESET_ENABLED
,
689 &maserati_properties
},
690 { "SUN SE6920", SD_CONF_BSET_THROTTLE
|
691 SD_CONF_BSET_NRR_COUNT
|
692 SD_CONF_BSET_BSY_RETRY_COUNT
|
693 SD_CONF_BSET_RST_RETRIES
|
694 SD_CONF_BSET_MIN_THROTTLE
|
695 SD_CONF_BSET_DISKSORT_DISABLED
|
696 SD_CONF_BSET_LUN_RESET_ENABLED
,
698 { "SUN SE6940", SD_CONF_BSET_THROTTLE
|
699 SD_CONF_BSET_NRR_COUNT
|
700 SD_CONF_BSET_BSY_RETRY_COUNT
|
701 SD_CONF_BSET_RST_RETRIES
|
702 SD_CONF_BSET_MIN_THROTTLE
|
703 SD_CONF_BSET_DISKSORT_DISABLED
|
704 SD_CONF_BSET_LUN_RESET_ENABLED
,
706 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE
|
707 SD_CONF_BSET_NRR_COUNT
|
708 SD_CONF_BSET_BSY_RETRY_COUNT
|
709 SD_CONF_BSET_RST_RETRIES
|
710 SD_CONF_BSET_MIN_THROTTLE
|
711 SD_CONF_BSET_DISKSORT_DISABLED
|
712 SD_CONF_BSET_LUN_RESET_ENABLED
,
714 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE
|
715 SD_CONF_BSET_NRR_COUNT
|
716 SD_CONF_BSET_BSY_RETRY_COUNT
|
717 SD_CONF_BSET_RST_RETRIES
|
718 SD_CONF_BSET_MIN_THROTTLE
|
719 SD_CONF_BSET_DISKSORT_DISABLED
|
720 SD_CONF_BSET_LUN_RESET_ENABLED
,
722 { "SUN PSX1000", SD_CONF_BSET_THROTTLE
|
723 SD_CONF_BSET_NRR_COUNT
|
724 SD_CONF_BSET_BSY_RETRY_COUNT
|
725 SD_CONF_BSET_RST_RETRIES
|
726 SD_CONF_BSET_MIN_THROTTLE
|
727 SD_CONF_BSET_DISKSORT_DISABLED
|
728 SD_CONF_BSET_LUN_RESET_ENABLED
,
730 { "SUN SE6330", SD_CONF_BSET_THROTTLE
|
731 SD_CONF_BSET_NRR_COUNT
|
732 SD_CONF_BSET_BSY_RETRY_COUNT
|
733 SD_CONF_BSET_RST_RETRIES
|
734 SD_CONF_BSET_MIN_THROTTLE
|
735 SD_CONF_BSET_DISKSORT_DISABLED
|
736 SD_CONF_BSET_LUN_RESET_ENABLED
,
738 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
739 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
740 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
741 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
742 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
743 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT
, &lsi_oem_properties
},
744 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT
, &symbios_properties
},
745 #endif /* fibre or NON-sparc platforms */
746 #if ((defined(__sparc) && !defined(__fibre)) ||\
747 (defined(__i386) || defined(__amd64)))
748 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE
, &elite_properties
},
749 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE
, &st31200n_properties
},
750 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK
, NULL
},
751 { "CONNER CP30540", SD_CONF_BSET_NOCACHE
, NULL
},
752 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID
, NULL
},
753 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID
, NULL
},
754 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID
, NULL
},
755 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID
, NULL
},
756 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID
, NULL
},
757 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID
, NULL
},
758 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID
, NULL
},
759 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID
, NULL
},
760 { "SYMBIOS", SD_CONF_BSET_THROTTLE
|SD_CONF_BSET_NRR_COUNT
,
761 &symbios_properties
},
762 { "LSI", SD_CONF_BSET_THROTTLE
| SD_CONF_BSET_NRR_COUNT
,
763 &lsi_properties_scsi
},
764 #if defined(__i386) || defined(__amd64)
765 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
766 | SD_CONF_BSET_READSUB_BCD
767 | SD_CONF_BSET_READ_TOC_ADDR_BCD
768 | SD_CONF_BSET_NO_READ_HEADER
769 | SD_CONF_BSET_READ_CD_XD4
), NULL
},
771 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
772 | SD_CONF_BSET_READSUB_BCD
773 | SD_CONF_BSET_READ_TOC_ADDR_BCD
774 | SD_CONF_BSET_NO_READ_HEADER
775 | SD_CONF_BSET_READ_CD_XD4
), NULL
},
776 #endif /* __i386 || __amd64 */
777 #endif /* sparc NON-fibre or NON-sparc platforms */
779 #if (defined(SD_PROP_TST))
780 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
782 | SD_CONF_BSET_NRR_COUNT
783 | SD_CONF_BSET_FAB_DEVID
784 | SD_CONF_BSET_NOCACHE
785 | SD_CONF_BSET_BSY_RETRY_COUNT
786 | SD_CONF_BSET_PLAYMSF_BCD
787 | SD_CONF_BSET_READSUB_BCD
788 | SD_CONF_BSET_READ_TOC_TRK_BCD
789 | SD_CONF_BSET_READ_TOC_ADDR_BCD
790 | SD_CONF_BSET_NO_READ_HEADER
791 | SD_CONF_BSET_READ_CD_XD4
792 | SD_CONF_BSET_RST_RETRIES
793 | SD_CONF_BSET_RSV_REL_TIME
794 | SD_CONF_BSET_TUR_CHECK
), &tst_properties
},
798 static const int sd_disk_table_size
=
799 sizeof (sd_disk_table
)/ sizeof (sd_disk_config_t
);
802 * Emulation mode disk drive VID/PID table
804 static char sd_flash_dev_table
[][25] = {
805 "ATA MARVELL SD88SA02",
810 static const int sd_flash_dev_table_size
=
811 sizeof (sd_flash_dev_table
) / sizeof (sd_flash_dev_table
[0]);
813 #define SD_INTERCONNECT_PARALLEL 0
814 #define SD_INTERCONNECT_FABRIC 1
815 #define SD_INTERCONNECT_FIBRE 2
816 #define SD_INTERCONNECT_SSA 3
817 #define SD_INTERCONNECT_SATA 4
818 #define SD_INTERCONNECT_SAS 5
820 #define SD_IS_PARALLEL_SCSI(un) \
821 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
822 #define SD_IS_SERIAL(un) \
823 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
824 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
827 * Definitions used by device id registration routines
829 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
830 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */
831 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
833 static kmutex_t sd_sense_mutex
= {0};
836 * Macros for updates of the driver state
838 #define New_state(un, s) \
839 (un)->un_last_state = (un)->un_state, (un)->un_state = (s)
840 #define Restore_state(un) \
841 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
843 static struct sd_cdbinfo sd_cdbtab
[] = {
844 { CDB_GROUP0
, 0x00, 0x1FFFFF, 0xFF, },
845 { CDB_GROUP1
, SCMD_GROUP1
, 0xFFFFFFFF, 0xFFFF, },
846 { CDB_GROUP5
, SCMD_GROUP5
, 0xFFFFFFFF, 0xFFFFFFFF, },
847 { CDB_GROUP4
, SCMD_GROUP4
, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
851 * Specifies the number of seconds that must have elapsed since the last
852 * cmd. has completed for a device to be declared idle to the PM framework.
854 static int sd_pm_idletime
= 1;
857 * Internal function prototypes
860 #if (defined(__fibre))
862 * These #defines are to avoid namespace collisions that occur because this
863 * code is currently used to compile two separate driver modules: sd and ssd.
864 * All function names need to be treated this way (even if declared static)
865 * in order to allow the debugger to resolve the names properly.
866 * It is anticipated that in the near future the ssd module will be obsoleted,
867 * at which time this ugliness should go away.
869 #define sd_log_trace ssd_log_trace
870 #define sd_log_info ssd_log_info
871 #define sd_log_err ssd_log_err
872 #define sdprobe ssdprobe
873 #define sdinfo ssdinfo
874 #define sd_prop_op ssd_prop_op
875 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init
876 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini
877 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache
878 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache
879 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init
880 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini
881 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count
882 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target
883 #define sd_spin_up_unit ssd_spin_up_unit
884 #define sd_enable_descr_sense ssd_enable_descr_sense
885 #define sd_reenable_dsense_task ssd_reenable_dsense_task
886 #define sd_set_mmc_caps ssd_set_mmc_caps
887 #define sd_read_unit_properties ssd_read_unit_properties
888 #define sd_process_sdconf_file ssd_process_sdconf_file
889 #define sd_process_sdconf_table ssd_process_sdconf_table
890 #define sd_sdconf_id_match ssd_sdconf_id_match
891 #define sd_blank_cmp ssd_blank_cmp
892 #define sd_chk_vers1_data ssd_chk_vers1_data
893 #define sd_set_vers1_properties ssd_set_vers1_properties
894 #define sd_check_solid_state ssd_check_solid_state
895 #define sd_check_emulation_mode ssd_check_emulation_mode
897 #define sd_get_physical_geometry ssd_get_physical_geometry
898 #define sd_get_virtual_geometry ssd_get_virtual_geometry
899 #define sd_update_block_info ssd_update_block_info
900 #define sd_register_devid ssd_register_devid
901 #define sd_get_devid ssd_get_devid
902 #define sd_create_devid ssd_create_devid
903 #define sd_write_deviceid ssd_write_deviceid
904 #define sd_check_vpd_page_support ssd_check_vpd_page_support
905 #define sd_setup_pm ssd_setup_pm
906 #define sd_create_pm_components ssd_create_pm_components
907 #define sd_ddi_suspend ssd_ddi_suspend
908 #define sd_ddi_resume ssd_ddi_resume
909 #define sd_pm_state_change ssd_pm_state_change
910 #define sdpower ssdpower
911 #define sdattach ssdattach
912 #define sddetach ssddetach
913 #define sd_unit_attach ssd_unit_attach
914 #define sd_unit_detach ssd_unit_detach
915 #define sd_set_unit_attributes ssd_set_unit_attributes
916 #define sd_create_errstats ssd_create_errstats
917 #define sd_set_errstats ssd_set_errstats
918 #define sd_set_pstats ssd_set_pstats
919 #define sddump ssddump
920 #define sd_scsi_poll ssd_scsi_poll
921 #define sd_send_polled_RQS ssd_send_polled_RQS
922 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll
923 #define sd_init_event_callbacks ssd_init_event_callbacks
924 #define sd_event_callback ssd_event_callback
925 #define sd_cache_control ssd_cache_control
926 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled
927 #define sd_get_nv_sup ssd_get_nv_sup
928 #define sd_make_device ssd_make_device
929 #define sdopen ssdopen
930 #define sdclose ssdclose
931 #define sd_ready_and_valid ssd_ready_and_valid
933 #define sdread ssdread
934 #define sdwrite ssdwrite
935 #define sdaread ssdaread
936 #define sdawrite ssdawrite
937 #define sdstrategy ssdstrategy
938 #define sdioctl ssdioctl
939 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart
940 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart
941 #define sd_checksum_iostart ssd_checksum_iostart
942 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart
943 #define sd_pm_iostart ssd_pm_iostart
944 #define sd_core_iostart ssd_core_iostart
945 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone
946 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone
947 #define sd_checksum_iodone ssd_checksum_iodone
948 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone
949 #define sd_pm_iodone ssd_pm_iodone
950 #define sd_initpkt_for_buf ssd_initpkt_for_buf
951 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf
952 #define sd_setup_rw_pkt ssd_setup_rw_pkt
953 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt
954 #define sd_buf_iodone ssd_buf_iodone
955 #define sd_uscsi_strategy ssd_uscsi_strategy
956 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi
957 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi
958 #define sd_uscsi_iodone ssd_uscsi_iodone
959 #define sd_xbuf_strategy ssd_xbuf_strategy
960 #define sd_xbuf_init ssd_xbuf_init
961 #define sd_pm_entry ssd_pm_entry
962 #define sd_pm_exit ssd_pm_exit
964 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler
965 #define sd_pm_timeout_handler ssd_pm_timeout_handler
967 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq
968 #define sdintr ssdintr
969 #define sd_start_cmds ssd_start_cmds
970 #define sd_send_scsi_cmd ssd_send_scsi_cmd
971 #define sd_bioclone_alloc ssd_bioclone_alloc
972 #define sd_bioclone_free ssd_bioclone_free
973 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc
974 #define sd_shadow_buf_free ssd_shadow_buf_free
975 #define sd_print_transport_rejected_message \
976 ssd_print_transport_rejected_message
977 #define sd_retry_command ssd_retry_command
978 #define sd_set_retry_bp ssd_set_retry_bp
979 #define sd_send_request_sense_command ssd_send_request_sense_command
980 #define sd_start_retry_command ssd_start_retry_command
981 #define sd_start_direct_priority_command \
982 ssd_start_direct_priority_command
983 #define sd_return_failed_command ssd_return_failed_command
984 #define sd_return_failed_command_no_restart \
985 ssd_return_failed_command_no_restart
986 #define sd_return_command ssd_return_command
987 #define sd_sync_with_callback ssd_sync_with_callback
988 #define sdrunout ssdrunout
989 #define sd_mark_rqs_busy ssd_mark_rqs_busy
990 #define sd_mark_rqs_idle ssd_mark_rqs_idle
991 #define sd_reduce_throttle ssd_reduce_throttle
992 #define sd_restore_throttle ssd_restore_throttle
993 #define sd_print_incomplete_msg ssd_print_incomplete_msg
994 #define sd_init_cdb_limits ssd_init_cdb_limits
995 #define sd_pkt_status_good ssd_pkt_status_good
996 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition
997 #define sd_pkt_status_busy ssd_pkt_status_busy
998 #define sd_pkt_status_reservation_conflict \
999 ssd_pkt_status_reservation_conflict
1000 #define sd_pkt_status_qfull ssd_pkt_status_qfull
1001 #define sd_handle_request_sense ssd_handle_request_sense
1002 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense
1003 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg
1004 #define sd_validate_sense_data ssd_validate_sense_data
1005 #define sd_decode_sense ssd_decode_sense
1006 #define sd_print_sense_msg ssd_print_sense_msg
1007 #define sd_sense_key_no_sense ssd_sense_key_no_sense
1008 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error
1009 #define sd_sense_key_not_ready ssd_sense_key_not_ready
1010 #define sd_sense_key_medium_or_hardware_error \
1011 ssd_sense_key_medium_or_hardware_error
1012 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request
1013 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention
1014 #define sd_sense_key_fail_command ssd_sense_key_fail_command
1015 #define sd_sense_key_blank_check ssd_sense_key_blank_check
1016 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command
1017 #define sd_sense_key_default ssd_sense_key_default
1018 #define sd_print_retry_msg ssd_print_retry_msg
1019 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg
1020 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete
1021 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err
1022 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset
1023 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted
1024 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout
1025 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free
1026 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject
1027 #define sd_pkt_reason_default ssd_pkt_reason_default
1028 #define sd_reset_target ssd_reset_target
1029 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback
1030 #define sd_start_stop_unit_task ssd_start_stop_unit_task
1031 #define sd_taskq_create ssd_taskq_create
1032 #define sd_taskq_delete ssd_taskq_delete
1033 #define sd_target_change_task ssd_target_change_task
1034 #define sd_log_dev_status_event ssd_log_dev_status_event
1035 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event
1036 #define sd_log_eject_request_event ssd_log_eject_request_event
1037 #define sd_media_change_task ssd_media_change_task
1038 #define sd_handle_mchange ssd_handle_mchange
1039 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK
1040 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY
1041 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16
1042 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION
1043 #define sd_send_scsi_feature_GET_CONFIGURATION \
1044 sd_send_scsi_feature_GET_CONFIGURATION
1045 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT
1046 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY
1047 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY
1048 #define sd_send_scsi_PERSISTENT_RESERVE_IN \
1049 ssd_send_scsi_PERSISTENT_RESERVE_IN
1050 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \
1051 ssd_send_scsi_PERSISTENT_RESERVE_OUT
1052 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE
1053 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \
1054 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1055 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE
1056 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT
1057 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR
1058 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE
1059 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \
1060 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1061 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid
1062 #define sd_alloc_rqs ssd_alloc_rqs
1063 #define sd_free_rqs ssd_free_rqs
1064 #define sd_dump_memory ssd_dump_memory
1065 #define sd_get_media_info_com ssd_get_media_info_com
1066 #define sd_get_media_info ssd_get_media_info
1067 #define sd_get_media_info_ext ssd_get_media_info_ext
1068 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info
1069 #define sd_nvpair_str_decode ssd_nvpair_str_decode
1070 #define sd_strtok_r ssd_strtok_r
1071 #define sd_set_properties ssd_set_properties
1072 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf
1073 #define sd_setup_next_xfer ssd_setup_next_xfer
1074 #define sd_dkio_get_temp ssd_dkio_get_temp
1075 #define sd_check_mhd ssd_check_mhd
1076 #define sd_mhd_watch_cb ssd_mhd_watch_cb
1077 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete
1078 #define sd_sname ssd_sname
1079 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover
1080 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread
1081 #define sd_take_ownership ssd_take_ownership
1082 #define sd_reserve_release ssd_reserve_release
1083 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req
1084 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb
1085 #define sd_persistent_reservation_in_read_keys \
1086 ssd_persistent_reservation_in_read_keys
1087 #define sd_persistent_reservation_in_read_resv \
1088 ssd_persistent_reservation_in_read_resv
1089 #define sd_mhdioc_takeown ssd_mhdioc_takeown
1090 #define sd_mhdioc_failfast ssd_mhdioc_failfast
1091 #define sd_mhdioc_release ssd_mhdioc_release
1092 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid
1093 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys
1094 #define sd_mhdioc_inresv ssd_mhdioc_inresv
1095 #define sr_change_blkmode ssr_change_blkmode
1096 #define sr_change_speed ssr_change_speed
1097 #define sr_atapi_change_speed ssr_atapi_change_speed
1098 #define sr_pause_resume ssr_pause_resume
1099 #define sr_play_msf ssr_play_msf
1100 #define sr_play_trkind ssr_play_trkind
1101 #define sr_read_all_subcodes ssr_read_all_subcodes
1102 #define sr_read_subchannel ssr_read_subchannel
1103 #define sr_read_tocentry ssr_read_tocentry
1104 #define sr_read_tochdr ssr_read_tochdr
1105 #define sr_read_cdda ssr_read_cdda
1106 #define sr_read_cdxa ssr_read_cdxa
1107 #define sr_read_mode1 ssr_read_mode1
1108 #define sr_read_mode2 ssr_read_mode2
1109 #define sr_read_cd_mode2 ssr_read_cd_mode2
1110 #define sr_sector_mode ssr_sector_mode
1111 #define sr_eject ssr_eject
1112 #define sr_ejected ssr_ejected
1113 #define sr_check_wp ssr_check_wp
1114 #define sd_watch_request_submit ssd_watch_request_submit
1115 #define sd_check_media ssd_check_media
1116 #define sd_media_watch_cb ssd_media_watch_cb
1117 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast
1118 #define sr_volume_ctrl ssr_volume_ctrl
1119 #define sr_read_sony_session_offset ssr_read_sony_session_offset
1120 #define sd_log_page_supported ssd_log_page_supported
1121 #define sd_check_for_writable_cd ssd_check_for_writable_cd
1122 #define sd_wm_cache_constructor ssd_wm_cache_constructor
1123 #define sd_wm_cache_destructor ssd_wm_cache_destructor
1124 #define sd_range_lock ssd_range_lock
1125 #define sd_get_range ssd_get_range
1126 #define sd_free_inlist_wmap ssd_free_inlist_wmap
1127 #define sd_range_unlock ssd_range_unlock
1128 #define sd_read_modify_write_task ssd_read_modify_write_task
1129 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw
1131 #define sd_iostart_chain ssd_iostart_chain
1132 #define sd_iodone_chain ssd_iodone_chain
1133 #define sd_initpkt_map ssd_initpkt_map
1134 #define sd_destroypkt_map ssd_destroypkt_map
1135 #define sd_chain_type_map ssd_chain_type_map
1136 #define sd_chain_index_map ssd_chain_index_map
1138 #define sd_failfast_flushctl ssd_failfast_flushctl
1139 #define sd_failfast_flushq ssd_failfast_flushq
1140 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback
1142 #define sd_is_lsi ssd_is_lsi
1143 #define sd_tg_rdwr ssd_tg_rdwr
1144 #define sd_tg_getinfo ssd_tg_getinfo
1145 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler
1147 #endif /* #if (defined(__fibre)) */
1152 int _info(struct modinfo
*modinfop
);
1155 static void sd_log_trace(uint_t comp
, struct sd_lun
*un
, const char *fmt
, ...);
1157 static void sd_log_info(uint_t comp
, struct sd_lun
*un
, const char *fmt
, ...);
1159 static void sd_log_err(uint_t comp
, struct sd_lun
*un
, const char *fmt
, ...);
1161 static int sdprobe(dev_info_t
*devi
);
1162 static int sdinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
,
1164 static int sd_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
,
1165 int mod_flags
, char *name
, caddr_t valuep
, int *lengthp
);
1168 * Smart probe for parallel scsi
1170 static void sd_scsi_probe_cache_init(void);
1171 static void sd_scsi_probe_cache_fini(void);
1172 static void sd_scsi_clear_probe_cache(void);
1173 static int sd_scsi_probe_with_cache(struct scsi_device
*devp
, int (*fn
)());
1176 * Attached luns on target for parallel scsi
1178 static void sd_scsi_target_lun_init(void);
1179 static void sd_scsi_target_lun_fini(void);
1180 static int sd_scsi_get_target_lun_count(dev_info_t
*dip
, int target
);
1181 static void sd_scsi_update_lun_on_target(dev_info_t
*dip
, int target
, int flag
);
1183 static int sd_spin_up_unit(sd_ssc_t
*ssc
);
1186 * Using sd_ssc_init to establish sd_ssc_t struct
1187 * Using sd_ssc_send to send uscsi internal command
1188 * Using sd_ssc_fini to free sd_ssc_t struct
1190 static sd_ssc_t
*sd_ssc_init(struct sd_lun
*un
);
1191 static int sd_ssc_send(sd_ssc_t
*ssc
, struct uscsi_cmd
*incmd
,
1192 int flag
, enum uio_seg dataspace
, int path_flag
);
1193 static void sd_ssc_fini(sd_ssc_t
*ssc
);
1196 * Using sd_ssc_assessment to set correct type-of-assessment
1197 * Using sd_ssc_post to post ereport & system log
1198 * sd_ssc_post will call sd_ssc_print to print system log
1199 * sd_ssc_post will call sd_ssd_ereport_post to post ereport
1201 static void sd_ssc_assessment(sd_ssc_t
*ssc
,
1202 enum sd_type_assessment tp_assess
);
1204 static void sd_ssc_post(sd_ssc_t
*ssc
, enum sd_driver_assessment sd_assess
);
1205 static void sd_ssc_print(sd_ssc_t
*ssc
, int sd_severity
);
1206 static void sd_ssc_ereport_post(sd_ssc_t
*ssc
,
1207 enum sd_driver_assessment drv_assess
);
1210 * Using sd_ssc_set_info to mark an un-decodable-data error.
1211 * Using sd_ssc_extract_info to transfer information from internal
1212 * data structures to sd_ssc_t.
1214 static void sd_ssc_set_info(sd_ssc_t
*ssc
, int ssc_flags
, uint_t comp
,
1215 const char *fmt
, ...);
1216 static void sd_ssc_extract_info(sd_ssc_t
*ssc
, struct sd_lun
*un
,
1217 struct scsi_pkt
*pktp
, struct buf
*bp
, struct sd_xbuf
*xp
);
1219 static int sd_send_scsi_cmd(dev_t dev
, struct uscsi_cmd
*incmd
, int flag
,
1220 enum uio_seg dataspace
, int path_flag
);
1223 static void sd_enable_descr_sense(sd_ssc_t
*ssc
);
1224 static void sd_reenable_dsense_task(void *arg
);
1227 static void sd_set_mmc_caps(sd_ssc_t
*ssc
);
1229 static void sd_read_unit_properties(struct sd_lun
*un
);
1230 static int sd_process_sdconf_file(struct sd_lun
*un
);
1231 static void sd_nvpair_str_decode(struct sd_lun
*un
, char *nvpair_str
);
1232 static char *sd_strtok_r(char *string
, const char *sepset
, char **lasts
);
1233 static void sd_set_properties(struct sd_lun
*un
, char *name
, char *value
);
1234 static void sd_get_tunables_from_conf(struct sd_lun
*un
, int flags
,
1235 int *data_list
, sd_tunables
*values
);
1236 static void sd_process_sdconf_table(struct sd_lun
*un
);
1237 static int sd_sdconf_id_match(struct sd_lun
*un
, char *id
, int idlen
);
1238 static int sd_blank_cmp(struct sd_lun
*un
, char *id
, int idlen
);
1239 static int sd_chk_vers1_data(struct sd_lun
*un
, int flags
, int *prop_list
,
1240 int list_len
, char *dataname_ptr
);
1241 static void sd_set_vers1_properties(struct sd_lun
*un
, int flags
,
1242 sd_tunables
*prop_list
);
1244 static void sd_register_devid(sd_ssc_t
*ssc
, dev_info_t
*devi
,
1245 int reservation_flag
);
1246 static int sd_get_devid(sd_ssc_t
*ssc
);
1247 static ddi_devid_t
sd_create_devid(sd_ssc_t
*ssc
);
1248 static int sd_write_deviceid(sd_ssc_t
*ssc
);
1249 static int sd_get_devid_page(struct sd_lun
*un
, uchar_t
*wwn
, int *len
);
1250 static int sd_check_vpd_page_support(sd_ssc_t
*ssc
);
1252 static void sd_setup_pm(sd_ssc_t
*ssc
, dev_info_t
*devi
);
1253 static void sd_create_pm_components(dev_info_t
*devi
, struct sd_lun
*un
);
1255 static int sd_ddi_suspend(dev_info_t
*devi
);
1256 static int sd_ddi_resume(dev_info_t
*devi
);
1257 static int sd_pm_state_change(struct sd_lun
*un
, int level
, int flag
);
1258 static int sdpower(dev_info_t
*devi
, int component
, int level
);
1260 static int sdattach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
);
1261 static int sddetach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
);
1262 static int sd_unit_attach(dev_info_t
*devi
);
1263 static int sd_unit_detach(dev_info_t
*devi
);
1265 static void sd_set_unit_attributes(struct sd_lun
*un
, dev_info_t
*devi
);
1266 static void sd_create_errstats(struct sd_lun
*un
, int instance
);
1267 static void sd_set_errstats(struct sd_lun
*un
);
1268 static void sd_set_pstats(struct sd_lun
*un
);
1270 static int sddump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblk
);
1271 static int sd_scsi_poll(struct sd_lun
*un
, struct scsi_pkt
*pkt
);
1272 static int sd_send_polled_RQS(struct sd_lun
*un
);
1273 static int sd_ddi_scsi_poll(struct scsi_pkt
*pkt
);
1275 #if (defined(__fibre))
1277 * Event callbacks (photon)
1279 static void sd_init_event_callbacks(struct sd_lun
*un
);
1280 static void sd_event_callback(dev_info_t
*, ddi_eventcookie_t
, void *, void *);
1284 * Defines for sd_cache_control
1287 #define SD_CACHE_ENABLE 1
1288 #define SD_CACHE_DISABLE 0
1289 #define SD_CACHE_NOCHANGE -1
1291 static int sd_cache_control(sd_ssc_t
*ssc
, int rcd_flag
, int wce_flag
);
1292 static int sd_get_write_cache_enabled(sd_ssc_t
*ssc
, int *is_enabled
);
1293 static void sd_get_nv_sup(sd_ssc_t
*ssc
);
1294 static dev_t
sd_make_device(dev_info_t
*devi
);
1295 static void sd_check_solid_state(sd_ssc_t
*ssc
);
1296 static void sd_check_emulation_mode(sd_ssc_t
*ssc
);
1297 static void sd_update_block_info(struct sd_lun
*un
, uint32_t lbasize
,
1301 * Driver entry point functions.
1303 static int sdopen(dev_t
*dev_p
, int flag
, int otyp
, cred_t
*cred_p
);
1304 static int sdclose(dev_t dev
, int flag
, int otyp
, cred_t
*cred_p
);
1305 static int sd_ready_and_valid(sd_ssc_t
*ssc
, int part
);
1307 static void sdmin(struct buf
*bp
);
1308 static int sdread(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
);
1309 static int sdwrite(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
);
1310 static int sdaread(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
);
1311 static int sdawrite(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
);
1313 static int sdstrategy(struct buf
*bp
);
1314 static int sdioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
1317 * Function prototypes for layering functions in the iostart chain.
1319 static void sd_mapblockaddr_iostart(int index
, struct sd_lun
*un
,
1321 static void sd_mapblocksize_iostart(int index
, struct sd_lun
*un
,
1323 static void sd_checksum_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
);
1324 static void sd_checksum_uscsi_iostart(int index
, struct sd_lun
*un
,
1326 static void sd_pm_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
);
1327 static void sd_core_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
);
1330 * Function prototypes for layering functions in the iodone chain.
1332 static void sd_buf_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
);
1333 static void sd_uscsi_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
);
1334 static void sd_mapblockaddr_iodone(int index
, struct sd_lun
*un
,
1336 static void sd_mapblocksize_iodone(int index
, struct sd_lun
*un
,
1338 static void sd_checksum_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
);
1339 static void sd_checksum_uscsi_iodone(int index
, struct sd_lun
*un
,
1341 static void sd_pm_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
);
1344 * Prototypes for functions to support buf(9S) based IO.
1346 static void sd_xbuf_strategy(struct buf
*bp
, ddi_xbuf_t xp
, void *arg
);
1347 static int sd_initpkt_for_buf(struct buf
*, struct scsi_pkt
**);
1348 static void sd_destroypkt_for_buf(struct buf
*);
1349 static int sd_setup_rw_pkt(struct sd_lun
*un
, struct scsi_pkt
**pktpp
,
1350 struct buf
*bp
, int flags
,
1351 int (*callback
)(caddr_t
), caddr_t callback_arg
,
1352 diskaddr_t lba
, uint32_t blockcount
);
1353 static int sd_setup_next_rw_pkt(struct sd_lun
*un
, struct scsi_pkt
*pktp
,
1354 struct buf
*bp
, diskaddr_t lba
, uint32_t blockcount
);
1357 * Prototypes for functions to support USCSI IO.
1359 static int sd_uscsi_strategy(struct buf
*bp
);
1360 static int sd_initpkt_for_uscsi(struct buf
*, struct scsi_pkt
**);
1361 static void sd_destroypkt_for_uscsi(struct buf
*);
1363 static void sd_xbuf_init(struct sd_lun
*un
, struct buf
*bp
, struct sd_xbuf
*xp
,
1364 uchar_t chain_type
, void *pktinfop
);
1366 static int sd_pm_entry(struct sd_lun
*un
);
1367 static void sd_pm_exit(struct sd_lun
*un
);
1369 static void sd_pm_idletimeout_handler(void *arg
);
1372 * sd_core internal functions (used at the sd_core_io layer).
1374 static void sd_add_buf_to_waitq(struct sd_lun
*un
, struct buf
*bp
);
1375 static void sdintr(struct scsi_pkt
*pktp
);
1376 static void sd_start_cmds(struct sd_lun
*un
, struct buf
*immed_bp
);
1378 static int sd_send_scsi_cmd(dev_t dev
, struct uscsi_cmd
*incmd
, int flag
,
1379 enum uio_seg dataspace
, int path_flag
);
1381 static struct buf
*sd_bioclone_alloc(struct buf
*bp
, size_t datalen
,
1382 daddr_t blkno
, int (*func
)(struct buf
*));
1383 static struct buf
*sd_shadow_buf_alloc(struct buf
*bp
, size_t datalen
,
1384 uint_t bflags
, daddr_t blkno
, int (*func
)(struct buf
*));
1385 static void sd_bioclone_free(struct buf
*bp
);
1386 static void sd_shadow_buf_free(struct buf
*bp
);
1388 static void sd_print_transport_rejected_message(struct sd_lun
*un
,
1389 struct sd_xbuf
*xp
, int code
);
1390 static void sd_print_incomplete_msg(struct sd_lun
*un
, struct buf
*bp
,
1391 void *arg
, int code
);
1392 static void sd_print_sense_failed_msg(struct sd_lun
*un
, struct buf
*bp
,
1393 void *arg
, int code
);
1394 static void sd_print_cmd_incomplete_msg(struct sd_lun
*un
, struct buf
*bp
,
1395 void *arg
, int code
);
1397 static void sd_retry_command(struct sd_lun
*un
, struct buf
*bp
,
1398 int retry_check_flag
,
1399 void (*user_funcp
)(struct sd_lun
*un
, struct buf
*bp
, void *argp
,
1401 void *user_arg
, int failure_code
, clock_t retry_delay
,
1402 void (*statp
)(kstat_io_t
*));
1404 static void sd_set_retry_bp(struct sd_lun
*un
, struct buf
*bp
,
1405 clock_t retry_delay
, void (*statp
)(kstat_io_t
*));
1407 static void sd_send_request_sense_command(struct sd_lun
*un
, struct buf
*bp
,
1408 struct scsi_pkt
*pktp
);
1409 static void sd_start_retry_command(void *arg
);
1410 static void sd_start_direct_priority_command(void *arg
);
1411 static void sd_return_failed_command(struct sd_lun
*un
, struct buf
*bp
,
1413 static void sd_return_failed_command_no_restart(struct sd_lun
*un
,
1414 struct buf
*bp
, int errcode
);
1415 static void sd_return_command(struct sd_lun
*un
, struct buf
*bp
);
1416 static void sd_sync_with_callback(struct sd_lun
*un
);
1417 static int sdrunout(caddr_t arg
);
1419 static void sd_mark_rqs_busy(struct sd_lun
*un
, struct buf
*bp
);
1420 static struct buf
*sd_mark_rqs_idle(struct sd_lun
*un
, struct sd_xbuf
*xp
);
1422 static void sd_reduce_throttle(struct sd_lun
*un
, int throttle_type
);
1423 static void sd_restore_throttle(void *arg
);
1425 static void sd_init_cdb_limits(struct sd_lun
*un
);
1427 static void sd_pkt_status_good(struct sd_lun
*un
, struct buf
*bp
,
1428 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1431 * Error handling functions
1433 static void sd_pkt_status_check_condition(struct sd_lun
*un
, struct buf
*bp
,
1434 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1435 static void sd_pkt_status_busy(struct sd_lun
*un
, struct buf
*bp
,
1436 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1437 static void sd_pkt_status_reservation_conflict(struct sd_lun
*un
,
1438 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1439 static void sd_pkt_status_qfull(struct sd_lun
*un
, struct buf
*bp
,
1440 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1442 static void sd_handle_request_sense(struct sd_lun
*un
, struct buf
*bp
,
1443 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1444 static void sd_handle_auto_request_sense(struct sd_lun
*un
, struct buf
*bp
,
1445 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1446 static int sd_validate_sense_data(struct sd_lun
*un
, struct buf
*bp
,
1447 struct sd_xbuf
*xp
, size_t actual_len
);
1448 static void sd_decode_sense(struct sd_lun
*un
, struct buf
*bp
,
1449 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1451 static void sd_print_sense_msg(struct sd_lun
*un
, struct buf
*bp
,
1452 void *arg
, int code
);
1454 static void sd_sense_key_no_sense(struct sd_lun
*un
, struct buf
*bp
,
1455 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1456 static void sd_sense_key_recoverable_error(struct sd_lun
*un
,
1457 uint8_t *sense_datap
,
1458 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1459 static void sd_sense_key_not_ready(struct sd_lun
*un
,
1460 uint8_t *sense_datap
,
1461 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1462 static void sd_sense_key_medium_or_hardware_error(struct sd_lun
*un
,
1463 uint8_t *sense_datap
,
1464 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1465 static void sd_sense_key_illegal_request(struct sd_lun
*un
, struct buf
*bp
,
1466 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1467 static void sd_sense_key_unit_attention(struct sd_lun
*un
,
1468 uint8_t *sense_datap
,
1469 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1470 static void sd_sense_key_fail_command(struct sd_lun
*un
, struct buf
*bp
,
1471 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1472 static void sd_sense_key_blank_check(struct sd_lun
*un
, struct buf
*bp
,
1473 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1474 static void sd_sense_key_aborted_command(struct sd_lun
*un
, struct buf
*bp
,
1475 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1476 static void sd_sense_key_default(struct sd_lun
*un
,
1477 uint8_t *sense_datap
,
1478 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1480 static void sd_print_retry_msg(struct sd_lun
*un
, struct buf
*bp
,
1481 void *arg
, int flag
);
1483 static void sd_pkt_reason_cmd_incomplete(struct sd_lun
*un
, struct buf
*bp
,
1484 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1485 static void sd_pkt_reason_cmd_tran_err(struct sd_lun
*un
, struct buf
*bp
,
1486 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1487 static void sd_pkt_reason_cmd_reset(struct sd_lun
*un
, struct buf
*bp
,
1488 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1489 static void sd_pkt_reason_cmd_aborted(struct sd_lun
*un
, struct buf
*bp
,
1490 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1491 static void sd_pkt_reason_cmd_timeout(struct sd_lun
*un
, struct buf
*bp
,
1492 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1493 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun
*un
, struct buf
*bp
,
1494 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1495 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun
*un
, struct buf
*bp
,
1496 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1497 static void sd_pkt_reason_default(struct sd_lun
*un
, struct buf
*bp
,
1498 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
);
1500 static void sd_reset_target(struct sd_lun
*un
, struct scsi_pkt
*pktp
);
1502 static void sd_start_stop_unit_callback(void *arg
);
1503 static void sd_start_stop_unit_task(void *arg
);
1505 static void sd_taskq_create(void);
1506 static void sd_taskq_delete(void);
1507 static void sd_target_change_task(void *arg
);
1508 static void sd_log_dev_status_event(struct sd_lun
*un
, char *esc
, int km_flag
);
1509 static void sd_log_lun_expansion_event(struct sd_lun
*un
, int km_flag
);
1510 static void sd_log_eject_request_event(struct sd_lun
*un
, int km_flag
);
1511 static void sd_media_change_task(void *arg
);
1513 static int sd_handle_mchange(struct sd_lun
*un
);
1514 static int sd_send_scsi_DOORLOCK(sd_ssc_t
*ssc
, int flag
, int path_flag
);
1515 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t
*ssc
, uint64_t *capp
,
1516 uint32_t *lbap
, int path_flag
);
1517 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t
*ssc
, uint64_t *capp
,
1518 uint32_t *lbap
, uint32_t *psp
, int path_flag
);
1519 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t
*ssc
, int pc_flag
,
1520 int flag
, int path_flag
);
1521 static int sd_send_scsi_INQUIRY(sd_ssc_t
*ssc
, uchar_t
*bufaddr
,
1522 size_t buflen
, uchar_t evpd
, uchar_t page_code
, size_t *residp
);
1523 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t
*ssc
, int flag
);
1524 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t
*ssc
,
1525 uchar_t usr_cmd
, uint16_t data_len
, uchar_t
*data_bufp
);
1526 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t
*ssc
,
1527 uchar_t usr_cmd
, uchar_t
*usr_bufp
);
1528 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun
*un
,
1529 struct dk_callback
*dkc
);
1530 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf
*bp
);
1531 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t
*ssc
,
1532 struct uscsi_cmd
*ucmdbuf
, uchar_t
*rqbuf
, uint_t rqbuflen
,
1533 uchar_t
*bufaddr
, uint_t buflen
, int path_flag
);
1534 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t
*ssc
,
1535 struct uscsi_cmd
*ucmdbuf
, uchar_t
*rqbuf
, uint_t rqbuflen
,
1536 uchar_t
*bufaddr
, uint_t buflen
, char feature
, int path_flag
);
1537 static int sd_send_scsi_MODE_SENSE(sd_ssc_t
*ssc
, int cdbsize
,
1538 uchar_t
*bufaddr
, size_t buflen
, uchar_t page_code
, int path_flag
);
1539 static int sd_send_scsi_MODE_SELECT(sd_ssc_t
*ssc
, int cdbsize
,
1540 uchar_t
*bufaddr
, size_t buflen
, uchar_t save_page
, int path_flag
);
1541 static int sd_send_scsi_RDWR(sd_ssc_t
*ssc
, uchar_t cmd
, void *bufaddr
,
1542 size_t buflen
, daddr_t start_block
, int path_flag
);
1543 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
1544 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
1546 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
1547 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
1550 static int sd_send_scsi_LOG_SENSE(sd_ssc_t
*ssc
, uchar_t
*bufaddr
,
1551 uint16_t buflen
, uchar_t page_code
, uchar_t page_control
,
1552 uint16_t param_ptr
, int path_flag
);
1553 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t
*ssc
,
1554 uchar_t
*bufaddr
, size_t buflen
, uchar_t class_req
);
1555 static boolean_t
sd_gesn_media_data_valid(uchar_t
*data
);
1557 static int sd_alloc_rqs(struct scsi_device
*devp
, struct sd_lun
*un
);
1558 static void sd_free_rqs(struct sd_lun
*un
);
1560 static void sd_dump_memory(struct sd_lun
*un
, uint_t comp
, char *title
,
1561 uchar_t
*data
, int len
, int fmt
);
1562 static void sd_panic_for_res_conflict(struct sd_lun
*un
);
1565 * Disk Ioctl Function Prototypes
1567 static int sd_get_media_info(dev_t dev
, caddr_t arg
, int flag
);
1568 static int sd_get_media_info_ext(dev_t dev
, caddr_t arg
, int flag
);
1569 static int sd_dkio_ctrl_info(dev_t dev
, caddr_t arg
, int flag
);
1570 static int sd_dkio_get_temp(dev_t dev
, caddr_t arg
, int flag
);
1573 * Multi-host Ioctl Prototypes
1575 static int sd_check_mhd(dev_t dev
, int interval
);
1576 static int sd_mhd_watch_cb(caddr_t arg
, struct scsi_watch_result
*resultp
);
1577 static void sd_mhd_watch_incomplete(struct sd_lun
*un
, struct scsi_pkt
*pkt
);
1578 static char *sd_sname(uchar_t status
);
1579 static void sd_mhd_resvd_recover(void *arg
);
1580 static void sd_resv_reclaim_thread();
1581 static int sd_take_ownership(dev_t dev
, struct mhioctkown
*p
);
1582 static int sd_reserve_release(dev_t dev
, int cmd
);
1583 static void sd_rmv_resv_reclaim_req(dev_t dev
);
1584 static void sd_mhd_reset_notify_cb(caddr_t arg
);
1585 static int sd_persistent_reservation_in_read_keys(struct sd_lun
*un
,
1586 mhioc_inkeys_t
*usrp
, int flag
);
1587 static int sd_persistent_reservation_in_read_resv(struct sd_lun
*un
,
1588 mhioc_inresvs_t
*usrp
, int flag
);
1589 static int sd_mhdioc_takeown(dev_t dev
, caddr_t arg
, int flag
);
1590 static int sd_mhdioc_failfast(dev_t dev
, caddr_t arg
, int flag
);
1591 static int sd_mhdioc_release(dev_t dev
);
1592 static int sd_mhdioc_register_devid(dev_t dev
);
1593 static int sd_mhdioc_inkeys(dev_t dev
, caddr_t arg
, int flag
);
1594 static int sd_mhdioc_inresv(dev_t dev
, caddr_t arg
, int flag
);
1597 * SCSI removable prototypes
1599 static int sr_change_blkmode(dev_t dev
, int cmd
, intptr_t data
, int flag
);
1600 static int sr_change_speed(dev_t dev
, int cmd
, intptr_t data
, int flag
);
1601 static int sr_atapi_change_speed(dev_t dev
, int cmd
, intptr_t data
, int flag
);
1602 static int sr_pause_resume(dev_t dev
, int mode
);
1603 static int sr_play_msf(dev_t dev
, caddr_t data
, int flag
);
1604 static int sr_play_trkind(dev_t dev
, caddr_t data
, int flag
);
1605 static int sr_read_all_subcodes(dev_t dev
, caddr_t data
, int flag
);
1606 static int sr_read_subchannel(dev_t dev
, caddr_t data
, int flag
);
1607 static int sr_read_tocentry(dev_t dev
, caddr_t data
, int flag
);
1608 static int sr_read_tochdr(dev_t dev
, caddr_t data
, int flag
);
1609 static int sr_read_cdda(dev_t dev
, caddr_t data
, int flag
);
1610 static int sr_read_cdxa(dev_t dev
, caddr_t data
, int flag
);
1611 static int sr_read_mode1(dev_t dev
, caddr_t data
, int flag
);
1612 static int sr_read_mode2(dev_t dev
, caddr_t data
, int flag
);
1613 static int sr_read_cd_mode2(dev_t dev
, caddr_t data
, int flag
);
1614 static int sr_sector_mode(dev_t dev
, uint32_t blksize
);
1615 static int sr_eject(dev_t dev
);
1616 static void sr_ejected(register struct sd_lun
*un
);
1617 static int sr_check_wp(dev_t dev
);
1618 static opaque_t
sd_watch_request_submit(struct sd_lun
*un
);
1619 static int sd_check_media(dev_t dev
, enum dkio_state state
);
1620 static int sd_media_watch_cb(caddr_t arg
, struct scsi_watch_result
*resultp
);
1621 static void sd_delayed_cv_broadcast(void *arg
);
1622 static int sr_volume_ctrl(dev_t dev
, caddr_t data
, int flag
);
1623 static int sr_read_sony_session_offset(dev_t dev
, caddr_t data
, int flag
);
1625 static int sd_log_page_supported(sd_ssc_t
*ssc
, int log_page
);
1628 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1630 static void sd_check_for_writable_cd(sd_ssc_t
*ssc
, int path_flag
);
1631 static int sd_wm_cache_constructor(void *wm
, void *un
, int flags
);
1632 static void sd_wm_cache_destructor(void *wm
, void *un
);
1633 static struct sd_w_map
*sd_range_lock(struct sd_lun
*un
, daddr_t startb
,
1634 daddr_t endb
, ushort_t typ
);
1635 static struct sd_w_map
*sd_get_range(struct sd_lun
*un
, daddr_t startb
,
1637 static void sd_free_inlist_wmap(struct sd_lun
*un
, struct sd_w_map
*wmp
);
1638 static void sd_range_unlock(struct sd_lun
*un
, struct sd_w_map
*wm
);
1639 static void sd_read_modify_write_task(void * arg
);
1641 sddump_do_read_of_rmw(struct sd_lun
*un
, uint64_t blkno
, uint64_t nblk
,
1646 * Function prototypes for failfast support.
1648 static void sd_failfast_flushq(struct sd_lun
*un
);
1649 static int sd_failfast_flushq_callback(struct buf
*bp
);
1652 * Function prototypes to check for lsi devices
1654 static void sd_is_lsi(struct sd_lun
*un
);
1657 * Function prototypes for partial DMA support
1659 static int sd_setup_next_xfer(struct sd_lun
*un
, struct buf
*bp
,
1660 struct scsi_pkt
*pkt
, struct sd_xbuf
*xp
);
1663 /* Function prototypes for cmlb */
1664 static int sd_tg_rdwr(dev_info_t
*devi
, uchar_t cmd
, void *bufaddr
,
1665 diskaddr_t start_block
, size_t reqlength
, void *tg_cookie
);
1667 static int sd_tg_getinfo(dev_info_t
*devi
, int cmd
, void *arg
, void *tg_cookie
);
1670 * For printing RMW warning message timely
1672 static void sd_rmw_msg_print_handler(void *arg
);
1675 * Constants for failfast support:
1677 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO
1678 * failfast processing being performed.
1680 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1681 * failfast processing on all bufs with B_FAILFAST set.
1684 #define SD_FAILFAST_INACTIVE 0
1685 #define SD_FAILFAST_ACTIVE 1
1688 * Bitmask to control behavior of buf(9S) flushes when a transition to
1689 * the failfast state occurs. Optional bits include:
1691 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1692 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1695 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1696 * driver, in addition to the regular wait queue. This includes the xbuf
1697 * queues. When clear, only the driver's wait queue will be flushed.
1699 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
1700 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
1703 * The default behavior is to only flush bufs that have B_FAILFAST set, but
1704 * to flush all queues within the driver.
1706 static int sd_failfast_flushctl
= SD_FAILFAST_FLUSH_ALL_QUEUES
;
1710 * SD Testing Fault Injection
1712 #ifdef SD_FAULT_INJECTION
1713 static void sd_faultinjection_ioctl(int cmd
, intptr_t arg
, struct sd_lun
*un
);
1714 static void sd_faultinjection(struct scsi_pkt
*pktp
);
1715 static void sd_injection_log(char *buf
, struct sd_lun
*un
);
1719 * Device driver ops vector
1721 static struct cb_ops sd_cb_ops
= {
1723 sdclose
, /* close */
1724 sdstrategy
, /* strategy */
1728 sdwrite
, /* write */
1729 sdioctl
, /* ioctl */
1733 nochpoll
, /* poll */
1734 sd_prop_op
, /* cb_prop_op */
1736 D_64BIT
| D_MP
| D_NEW
| D_HOTPLUG
, /* Driver compatibility flags */
1737 CB_REV
, /* cb_rev */
1738 sdaread
, /* async I/O read entry point */
1739 sdawrite
/* async I/O write entry point */
1742 struct dev_ops sd_ops
= {
1743 DEVO_REV
, /* devo_rev, */
1746 nulldev
, /* identify */
1747 sdprobe
, /* probe */
1748 sdattach
, /* attach */
1749 sddetach
, /* detach */
1751 &sd_cb_ops
, /* driver operations */
1752 NULL
, /* bus operations */
1753 sdpower
, /* power */
1754 ddi_quiesce_not_needed
, /* quiesce */
1758 * This is the loadable module wrapper.
1760 #include <sys/modctl.h>
1762 #ifndef XPV_HVM_DRIVER
1763 static struct modldrv modldrv
= {
1764 &mod_driverops
, /* Type of module. This one is a driver */
1765 SD_MODULE_NAME
, /* Module name. */
1766 &sd_ops
/* driver ops */
1769 static struct modlinkage modlinkage
= {
1770 MODREV_1
, &modldrv
, NULL
1773 #else /* XPV_HVM_DRIVER */
1774 static struct modlmisc modlmisc
= {
1775 &mod_miscops
, /* Type of module. This one is a misc */
1776 "HVM " SD_MODULE_NAME
, /* Module name. */
1779 static struct modlinkage modlinkage
= {
1780 MODREV_1
, &modlmisc
, NULL
1783 #endif /* XPV_HVM_DRIVER */
1785 static cmlb_tg_ops_t sd_tgops
= {
1786 TG_DK_OPS_VERSION_1
,
1791 static struct scsi_asq_key_strings sd_additional_codes
[] = {
1792 0x81, 0, "Logical Unit is Reserved",
1793 0x85, 0, "Audio Address Not Valid",
1794 0xb6, 0, "Media Load Mechanism Failed",
1795 0xB9, 0, "Audio Play Operation Aborted",
1796 0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
1797 0x53, 2, "Medium removal prevented",
1798 0x6f, 0, "Authentication failed during key exchange",
1799 0x6f, 1, "Key not present",
1800 0x6f, 2, "Key not established",
1801 0x6f, 3, "Read without proper authentication",
1802 0x6f, 4, "Mismatched region to this logical unit",
1803 0x6f, 5, "Region reset count error",
1809 * Struct for passing printing information for sense data messages
1811 struct sd_sense_info
{
1817 * Table of function pointers for iostart-side routines. Separate "chains"
1818 * of layered function calls are formed by placing the function pointers
1819 * sequentially in the desired order. Functions are called according to an
1820 * incrementing table index ordering. The last function in each chain must
1821 * be sd_core_iostart(). The corresponding iodone-side routines are expected
1822 * in the sd_iodone_chain[] array.
1824 * Note: It may seem more natural to organize both the iostart and iodone
1825 * functions together, into an array of structures (or some similar
1826 * organization) with a common index, rather than two separate arrays which
1827 * must be maintained in synchronization. The purpose of this division is
1828 * to achieve improved performance: individual arrays allows for more
1829 * effective cache line utilization on certain platforms.
1832 typedef void (*sd_chain_t
)(int index
, struct sd_lun
*un
, struct buf
*bp
);
1835 static sd_chain_t sd_iostart_chain
[] = {
1837 /* Chain for buf IO for disk drive targets (PM enabled) */
1838 sd_mapblockaddr_iostart
, /* Index: 0 */
1839 sd_pm_iostart
, /* Index: 1 */
1840 sd_core_iostart
, /* Index: 2 */
1842 /* Chain for buf IO for disk drive targets (PM disabled) */
1843 sd_mapblockaddr_iostart
, /* Index: 3 */
1844 sd_core_iostart
, /* Index: 4 */
1847 * Chain for buf IO for removable-media or large sector size
1848 * disk drive targets with RMW needed (PM enabled)
1850 sd_mapblockaddr_iostart
, /* Index: 5 */
1851 sd_mapblocksize_iostart
, /* Index: 6 */
1852 sd_pm_iostart
, /* Index: 7 */
1853 sd_core_iostart
, /* Index: 8 */
1856 * Chain for buf IO for removable-media or large sector size
1857 * disk drive targets with RMW needed (PM disabled)
1859 sd_mapblockaddr_iostart
, /* Index: 9 */
1860 sd_mapblocksize_iostart
, /* Index: 10 */
1861 sd_core_iostart
, /* Index: 11 */
1863 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1864 sd_mapblockaddr_iostart
, /* Index: 12 */
1865 sd_checksum_iostart
, /* Index: 13 */
1866 sd_pm_iostart
, /* Index: 14 */
1867 sd_core_iostart
, /* Index: 15 */
1869 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1870 sd_mapblockaddr_iostart
, /* Index: 16 */
1871 sd_checksum_iostart
, /* Index: 17 */
1872 sd_core_iostart
, /* Index: 18 */
1874 /* Chain for USCSI commands (all targets) */
1875 sd_pm_iostart
, /* Index: 19 */
1876 sd_core_iostart
, /* Index: 20 */
1878 /* Chain for checksumming USCSI commands (all targets) */
1879 sd_checksum_uscsi_iostart
, /* Index: 21 */
1880 sd_pm_iostart
, /* Index: 22 */
1881 sd_core_iostart
, /* Index: 23 */
1883 /* Chain for "direct" USCSI commands (all targets) */
1884 sd_core_iostart
, /* Index: 24 */
1886 /* Chain for "direct priority" USCSI commands (all targets) */
1887 sd_core_iostart
, /* Index: 25 */
1890 * Chain for buf IO for large sector size disk drive targets
1891 * with RMW needed with checksumming (PM enabled)
1893 sd_mapblockaddr_iostart
, /* Index: 26 */
1894 sd_mapblocksize_iostart
, /* Index: 27 */
1895 sd_checksum_iostart
, /* Index: 28 */
1896 sd_pm_iostart
, /* Index: 29 */
1897 sd_core_iostart
, /* Index: 30 */
1900 * Chain for buf IO for large sector size disk drive targets
1901 * with RMW needed with checksumming (PM disabled)
1903 sd_mapblockaddr_iostart
, /* Index: 31 */
1904 sd_mapblocksize_iostart
, /* Index: 32 */
1905 sd_checksum_iostart
, /* Index: 33 */
1906 sd_core_iostart
, /* Index: 34 */
1911 * Macros to locate the first function of each iostart chain in the
1912 * sd_iostart_chain[] array. These are located by the index in the array.
1914 #define SD_CHAIN_DISK_IOSTART 0
1915 #define SD_CHAIN_DISK_IOSTART_NO_PM 3
1916 #define SD_CHAIN_MSS_DISK_IOSTART 5
1917 #define SD_CHAIN_RMMEDIA_IOSTART 5
1918 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
1919 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
1920 #define SD_CHAIN_CHKSUM_IOSTART 12
1921 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
1922 #define SD_CHAIN_USCSI_CMD_IOSTART 19
1923 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
1924 #define SD_CHAIN_DIRECT_CMD_IOSTART 24
1925 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25
1926 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26
1927 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
1931 * Table of function pointers for the iodone-side routines for the driver-
1932 * internal layering mechanism. The calling sequence for iodone routines
1933 * uses a decrementing table index, so the last routine called in a chain
1934 * must be at the lowest array index location for that chain. The last
1935 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs)
1936 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering
1937 * of the functions in an iodone side chain must correspond to the ordering
1938 * of the iostart routines for that chain. Note that there is no iodone
1939 * side routine that corresponds to sd_core_iostart(), so there is no
1940 * entry in the table for this.
1943 static sd_chain_t sd_iodone_chain
[] = {
1945 /* Chain for buf IO for disk drive targets (PM enabled) */
1946 sd_buf_iodone
, /* Index: 0 */
1947 sd_mapblockaddr_iodone
, /* Index: 1 */
1948 sd_pm_iodone
, /* Index: 2 */
1950 /* Chain for buf IO for disk drive targets (PM disabled) */
1951 sd_buf_iodone
, /* Index: 3 */
1952 sd_mapblockaddr_iodone
, /* Index: 4 */
1955 * Chain for buf IO for removable-media or large sector size
1956 * disk drive targets with RMW needed (PM enabled)
1958 sd_buf_iodone
, /* Index: 5 */
1959 sd_mapblockaddr_iodone
, /* Index: 6 */
1960 sd_mapblocksize_iodone
, /* Index: 7 */
1961 sd_pm_iodone
, /* Index: 8 */
1964 * Chain for buf IO for removable-media or large sector size
1965 * disk drive targets with RMW needed (PM disabled)
1967 sd_buf_iodone
, /* Index: 9 */
1968 sd_mapblockaddr_iodone
, /* Index: 10 */
1969 sd_mapblocksize_iodone
, /* Index: 11 */
1971 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1972 sd_buf_iodone
, /* Index: 12 */
1973 sd_mapblockaddr_iodone
, /* Index: 13 */
1974 sd_checksum_iodone
, /* Index: 14 */
1975 sd_pm_iodone
, /* Index: 15 */
1977 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1978 sd_buf_iodone
, /* Index: 16 */
1979 sd_mapblockaddr_iodone
, /* Index: 17 */
1980 sd_checksum_iodone
, /* Index: 18 */
1982 /* Chain for USCSI commands (non-checksum targets) */
1983 sd_uscsi_iodone
, /* Index: 19 */
1984 sd_pm_iodone
, /* Index: 20 */
1986 /* Chain for USCSI commands (checksum targets) */
1987 sd_uscsi_iodone
, /* Index: 21 */
1988 sd_checksum_uscsi_iodone
, /* Index: 22 */
1989 sd_pm_iodone
, /* Index: 22 */
1991 /* Chain for "direct" USCSI commands (all targets) */
1992 sd_uscsi_iodone
, /* Index: 24 */
1994 /* Chain for "direct priority" USCSI commands (all targets) */
1995 sd_uscsi_iodone
, /* Index: 25 */
1998 * Chain for buf IO for large sector size disk drive targets
1999 * with checksumming (PM enabled)
2001 sd_buf_iodone
, /* Index: 26 */
2002 sd_mapblockaddr_iodone
, /* Index: 27 */
2003 sd_mapblocksize_iodone
, /* Index: 28 */
2004 sd_checksum_iodone
, /* Index: 29 */
2005 sd_pm_iodone
, /* Index: 30 */
2008 * Chain for buf IO for large sector size disk drive targets
2009 * with checksumming (PM disabled)
2011 sd_buf_iodone
, /* Index: 31 */
2012 sd_mapblockaddr_iodone
, /* Index: 32 */
2013 sd_mapblocksize_iodone
, /* Index: 33 */
2014 sd_checksum_iodone
, /* Index: 34 */
2019 * Macros to locate the "first" function in the sd_iodone_chain[] array for
2020 * each iodone-side chain. These are located by the array index, but as the
2021 * iodone side functions are called in a decrementing-index order, the
2022 * highest index number in each chain must be specified (as these correspond
2023 * to the first function in the iodone chain that will be called by the core
2024 * at IO completion time).
2027 #define SD_CHAIN_DISK_IODONE 2
2028 #define SD_CHAIN_DISK_IODONE_NO_PM 4
2029 #define SD_CHAIN_RMMEDIA_IODONE 8
2030 #define SD_CHAIN_MSS_DISK_IODONE 8
2031 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
2032 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
2033 #define SD_CHAIN_CHKSUM_IODONE 15
2034 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
2035 #define SD_CHAIN_USCSI_CMD_IODONE 20
2036 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22
2037 #define SD_CHAIN_DIRECT_CMD_IODONE 24
2038 #define SD_CHAIN_PRIORITY_CMD_IODONE 25
2039 #define SD_CHAIN_MSS_CHKSUM_IODONE 30
2040 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
2045 * Array to map a layering chain index to the appropriate initpkt routine.
2046 * The redundant entries are present so that the index used for accessing
2047 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2048 * with this table as well.
2050 typedef int (*sd_initpkt_t
)(struct buf
*, struct scsi_pkt
**);
2052 static sd_initpkt_t sd_initpkt_map
[] = {
2054 /* Chain for buf IO for disk drive targets (PM enabled) */
2055 sd_initpkt_for_buf
, /* Index: 0 */
2056 sd_initpkt_for_buf
, /* Index: 1 */
2057 sd_initpkt_for_buf
, /* Index: 2 */
2059 /* Chain for buf IO for disk drive targets (PM disabled) */
2060 sd_initpkt_for_buf
, /* Index: 3 */
2061 sd_initpkt_for_buf
, /* Index: 4 */
2064 * Chain for buf IO for removable-media or large sector size
2065 * disk drive targets (PM enabled)
2067 sd_initpkt_for_buf
, /* Index: 5 */
2068 sd_initpkt_for_buf
, /* Index: 6 */
2069 sd_initpkt_for_buf
, /* Index: 7 */
2070 sd_initpkt_for_buf
, /* Index: 8 */
2073 * Chain for buf IO for removable-media or large sector size
2074 * disk drive targets (PM disabled)
2076 sd_initpkt_for_buf
, /* Index: 9 */
2077 sd_initpkt_for_buf
, /* Index: 10 */
2078 sd_initpkt_for_buf
, /* Index: 11 */
2080 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2081 sd_initpkt_for_buf
, /* Index: 12 */
2082 sd_initpkt_for_buf
, /* Index: 13 */
2083 sd_initpkt_for_buf
, /* Index: 14 */
2084 sd_initpkt_for_buf
, /* Index: 15 */
2086 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2087 sd_initpkt_for_buf
, /* Index: 16 */
2088 sd_initpkt_for_buf
, /* Index: 17 */
2089 sd_initpkt_for_buf
, /* Index: 18 */
2091 /* Chain for USCSI commands (non-checksum targets) */
2092 sd_initpkt_for_uscsi
, /* Index: 19 */
2093 sd_initpkt_for_uscsi
, /* Index: 20 */
2095 /* Chain for USCSI commands (checksum targets) */
2096 sd_initpkt_for_uscsi
, /* Index: 21 */
2097 sd_initpkt_for_uscsi
, /* Index: 22 */
2098 sd_initpkt_for_uscsi
, /* Index: 22 */
2100 /* Chain for "direct" USCSI commands (all targets) */
2101 sd_initpkt_for_uscsi
, /* Index: 24 */
2103 /* Chain for "direct priority" USCSI commands (all targets) */
2104 sd_initpkt_for_uscsi
, /* Index: 25 */
2107 * Chain for buf IO for large sector size disk drive targets
2108 * with checksumming (PM enabled)
2110 sd_initpkt_for_buf
, /* Index: 26 */
2111 sd_initpkt_for_buf
, /* Index: 27 */
2112 sd_initpkt_for_buf
, /* Index: 28 */
2113 sd_initpkt_for_buf
, /* Index: 29 */
2114 sd_initpkt_for_buf
, /* Index: 30 */
2117 * Chain for buf IO for large sector size disk drive targets
2118 * with checksumming (PM disabled)
2120 sd_initpkt_for_buf
, /* Index: 31 */
2121 sd_initpkt_for_buf
, /* Index: 32 */
2122 sd_initpkt_for_buf
, /* Index: 33 */
2123 sd_initpkt_for_buf
, /* Index: 34 */
2128 * Array to map a layering chain index to the appropriate destroypktpkt routine.
2129 * The redundant entries are present so that the index used for accessing
2130 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2131 * with this table as well.
2133 typedef void (*sd_destroypkt_t
)(struct buf
*);
2135 static sd_destroypkt_t sd_destroypkt_map
[] = {
2137 /* Chain for buf IO for disk drive targets (PM enabled) */
2138 sd_destroypkt_for_buf
, /* Index: 0 */
2139 sd_destroypkt_for_buf
, /* Index: 1 */
2140 sd_destroypkt_for_buf
, /* Index: 2 */
2142 /* Chain for buf IO for disk drive targets (PM disabled) */
2143 sd_destroypkt_for_buf
, /* Index: 3 */
2144 sd_destroypkt_for_buf
, /* Index: 4 */
2147 * Chain for buf IO for removable-media or large sector size
2148 * disk drive targets (PM enabled)
2150 sd_destroypkt_for_buf
, /* Index: 5 */
2151 sd_destroypkt_for_buf
, /* Index: 6 */
2152 sd_destroypkt_for_buf
, /* Index: 7 */
2153 sd_destroypkt_for_buf
, /* Index: 8 */
2156 * Chain for buf IO for removable-media or large sector size
2157 * disk drive targets (PM disabled)
2159 sd_destroypkt_for_buf
, /* Index: 9 */
2160 sd_destroypkt_for_buf
, /* Index: 10 */
2161 sd_destroypkt_for_buf
, /* Index: 11 */
2163 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2164 sd_destroypkt_for_buf
, /* Index: 12 */
2165 sd_destroypkt_for_buf
, /* Index: 13 */
2166 sd_destroypkt_for_buf
, /* Index: 14 */
2167 sd_destroypkt_for_buf
, /* Index: 15 */
2169 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2170 sd_destroypkt_for_buf
, /* Index: 16 */
2171 sd_destroypkt_for_buf
, /* Index: 17 */
2172 sd_destroypkt_for_buf
, /* Index: 18 */
2174 /* Chain for USCSI commands (non-checksum targets) */
2175 sd_destroypkt_for_uscsi
, /* Index: 19 */
2176 sd_destroypkt_for_uscsi
, /* Index: 20 */
2178 /* Chain for USCSI commands (checksum targets) */
2179 sd_destroypkt_for_uscsi
, /* Index: 21 */
2180 sd_destroypkt_for_uscsi
, /* Index: 22 */
2181 sd_destroypkt_for_uscsi
, /* Index: 22 */
2183 /* Chain for "direct" USCSI commands (all targets) */
2184 sd_destroypkt_for_uscsi
, /* Index: 24 */
2186 /* Chain for "direct priority" USCSI commands (all targets) */
2187 sd_destroypkt_for_uscsi
, /* Index: 25 */
2190 * Chain for buf IO for large sector size disk drive targets
2191 * with checksumming (PM disabled)
2193 sd_destroypkt_for_buf
, /* Index: 26 */
2194 sd_destroypkt_for_buf
, /* Index: 27 */
2195 sd_destroypkt_for_buf
, /* Index: 28 */
2196 sd_destroypkt_for_buf
, /* Index: 29 */
2197 sd_destroypkt_for_buf
, /* Index: 30 */
2200 * Chain for buf IO for large sector size disk drive targets
2201 * with checksumming (PM enabled)
2203 sd_destroypkt_for_buf
, /* Index: 31 */
2204 sd_destroypkt_for_buf
, /* Index: 32 */
2205 sd_destroypkt_for_buf
, /* Index: 33 */
2206 sd_destroypkt_for_buf
, /* Index: 34 */
2212 * Array to map a layering chain index to the appropriate chain "type".
2213 * The chain type indicates a specific property/usage of the chain.
2214 * The redundant entries are present so that the index used for accessing
2215 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2216 * with this table as well.
2219 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */
2220 #define SD_CHAIN_BUFIO 1 /* regular buf IO */
2221 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */
2222 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */
2223 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */
2224 /* (for error recovery) */
2226 static int sd_chain_type_map
[] = {
2228 /* Chain for buf IO for disk drive targets (PM enabled) */
2229 SD_CHAIN_BUFIO
, /* Index: 0 */
2230 SD_CHAIN_BUFIO
, /* Index: 1 */
2231 SD_CHAIN_BUFIO
, /* Index: 2 */
2233 /* Chain for buf IO for disk drive targets (PM disabled) */
2234 SD_CHAIN_BUFIO
, /* Index: 3 */
2235 SD_CHAIN_BUFIO
, /* Index: 4 */
2238 * Chain for buf IO for removable-media or large sector size
2239 * disk drive targets (PM enabled)
2241 SD_CHAIN_BUFIO
, /* Index: 5 */
2242 SD_CHAIN_BUFIO
, /* Index: 6 */
2243 SD_CHAIN_BUFIO
, /* Index: 7 */
2244 SD_CHAIN_BUFIO
, /* Index: 8 */
2247 * Chain for buf IO for removable-media or large sector size
2248 * disk drive targets (PM disabled)
2250 SD_CHAIN_BUFIO
, /* Index: 9 */
2251 SD_CHAIN_BUFIO
, /* Index: 10 */
2252 SD_CHAIN_BUFIO
, /* Index: 11 */
2254 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2255 SD_CHAIN_BUFIO
, /* Index: 12 */
2256 SD_CHAIN_BUFIO
, /* Index: 13 */
2257 SD_CHAIN_BUFIO
, /* Index: 14 */
2258 SD_CHAIN_BUFIO
, /* Index: 15 */
2260 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2261 SD_CHAIN_BUFIO
, /* Index: 16 */
2262 SD_CHAIN_BUFIO
, /* Index: 17 */
2263 SD_CHAIN_BUFIO
, /* Index: 18 */
2265 /* Chain for USCSI commands (non-checksum targets) */
2266 SD_CHAIN_USCSI
, /* Index: 19 */
2267 SD_CHAIN_USCSI
, /* Index: 20 */
2269 /* Chain for USCSI commands (checksum targets) */
2270 SD_CHAIN_USCSI
, /* Index: 21 */
2271 SD_CHAIN_USCSI
, /* Index: 22 */
2272 SD_CHAIN_USCSI
, /* Index: 23 */
2274 /* Chain for "direct" USCSI commands (all targets) */
2275 SD_CHAIN_DIRECT
, /* Index: 24 */
2277 /* Chain for "direct priority" USCSI commands (all targets) */
2278 SD_CHAIN_DIRECT_PRIORITY
, /* Index: 25 */
2281 * Chain for buf IO for large sector size disk drive targets
2282 * with checksumming (PM enabled)
2284 SD_CHAIN_BUFIO
, /* Index: 26 */
2285 SD_CHAIN_BUFIO
, /* Index: 27 */
2286 SD_CHAIN_BUFIO
, /* Index: 28 */
2287 SD_CHAIN_BUFIO
, /* Index: 29 */
2288 SD_CHAIN_BUFIO
, /* Index: 30 */
2291 * Chain for buf IO for large sector size disk drive targets
2292 * with checksumming (PM disabled)
2294 SD_CHAIN_BUFIO
, /* Index: 31 */
2295 SD_CHAIN_BUFIO
, /* Index: 32 */
2296 SD_CHAIN_BUFIO
, /* Index: 33 */
2297 SD_CHAIN_BUFIO
, /* Index: 34 */
2301 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */
2302 #define SD_IS_BUFIO(xp) \
2303 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
2305 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */
2306 #define SD_IS_DIRECT_PRIORITY(xp) \
2307 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
2312 * Struct, array, and macros to map a specific chain to the appropriate
2313 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays.
2315 * The sd_chain_index_map[] array is used at attach time to set the various
2316 * un_xxx_chain type members of the sd_lun softstate to the specific layering
2317 * chain to be used with the instance. This allows different instances to use
2318 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart
2319 * and xb_chain_iodone index values in the sd_xbuf are initialized to these
2320 * values at sd_xbuf init time, this allows (1) layering chains may be changed
2321 * dynamically & without the use of locking; and (2) a layer may update the
2322 * xb_chain_io[start|done] member in a given xbuf with its current index value,
2323 * to allow for deferred processing of an IO within the same chain from a
2324 * different execution context.
2327 struct sd_chain_index
{
2328 int sci_iostart_index
;
2329 int sci_iodone_index
;
2332 static struct sd_chain_index sd_chain_index_map
[] = {
2333 { SD_CHAIN_DISK_IOSTART
, SD_CHAIN_DISK_IODONE
},
2334 { SD_CHAIN_DISK_IOSTART_NO_PM
, SD_CHAIN_DISK_IODONE_NO_PM
},
2335 { SD_CHAIN_RMMEDIA_IOSTART
, SD_CHAIN_RMMEDIA_IODONE
},
2336 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM
, SD_CHAIN_RMMEDIA_IODONE_NO_PM
},
2337 { SD_CHAIN_CHKSUM_IOSTART
, SD_CHAIN_CHKSUM_IODONE
},
2338 { SD_CHAIN_CHKSUM_IOSTART_NO_PM
, SD_CHAIN_CHKSUM_IODONE_NO_PM
},
2339 { SD_CHAIN_USCSI_CMD_IOSTART
, SD_CHAIN_USCSI_CMD_IODONE
},
2340 { SD_CHAIN_USCSI_CHKSUM_IOSTART
, SD_CHAIN_USCSI_CHKSUM_IODONE
},
2341 { SD_CHAIN_DIRECT_CMD_IOSTART
, SD_CHAIN_DIRECT_CMD_IODONE
},
2342 { SD_CHAIN_PRIORITY_CMD_IOSTART
, SD_CHAIN_PRIORITY_CMD_IODONE
},
2343 { SD_CHAIN_MSS_CHKSUM_IOSTART
, SD_CHAIN_MSS_CHKSUM_IODONE
},
2344 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM
, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM
},
2350 * The following are indexes into the sd_chain_index_map[] array.
2353 /* un->un_buf_chain_type must be set to one of these */
2354 #define SD_CHAIN_INFO_DISK 0
2355 #define SD_CHAIN_INFO_DISK_NO_PM 1
2356 #define SD_CHAIN_INFO_RMMEDIA 2
2357 #define SD_CHAIN_INFO_MSS_DISK 2
2358 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
2359 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
2360 #define SD_CHAIN_INFO_CHKSUM 4
2361 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5
2362 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
2363 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
2365 /* un->un_uscsi_chain_type must be set to one of these */
2366 #define SD_CHAIN_INFO_USCSI_CMD 6
2367 /* USCSI with PM disabled is the same as DIRECT */
2368 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
2369 #define SD_CHAIN_INFO_USCSI_CHKSUM 7
2371 /* un->un_direct_chain_type must be set to one of these */
2372 #define SD_CHAIN_INFO_DIRECT_CMD 8
2374 /* un->un_priority_chain_type must be set to one of these */
2375 #define SD_CHAIN_INFO_PRIORITY_CMD 9
2377 /* size for devid inquiries */
2378 #define MAX_INQUIRY_SIZE 0xF0
2381 * Macros used by functions to pass a given buf(9S) struct along to the
2382 * next function in the layering chain for further processing.
2384 * In the following macros, passing more than three arguments to the called
2385 * routines causes the optimizer for the SPARC compiler to stop doing tail
2386 * call elimination which results in significant performance degradation.
2388 #define SD_BEGIN_IOSTART(index, un, bp) \
2389 ((*(sd_iostart_chain[index]))(index, un, bp))
2391 #define SD_BEGIN_IODONE(index, un, bp) \
2392 ((*(sd_iodone_chain[index]))(index, un, bp))
2394 #define SD_NEXT_IOSTART(index, un, bp) \
2395 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2397 #define SD_NEXT_IODONE(index, un, bp) \
2398 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
2403 * Description: This is the driver _init(9E) entry point.
2405 * Return Code: Returns the value from mod_install(9F) or
2406 * ddi_soft_state_init(9F) as appropriate.
2408 * Context: Called when driver module loaded.
2416 /* establish driver name from module name */
2417 sd_label
= (char *)mod_modname(&modlinkage
);
2419 #ifndef XPV_HVM_DRIVER
2420 err
= ddi_soft_state_init(&sd_state
, sizeof (struct sd_lun
),
2426 #else /* XPV_HVM_DRIVER */
2427 /* Remove the leading "hvm_" from the module name */
2428 ASSERT(strncmp(sd_label
, "hvm_", strlen("hvm_")) == 0);
2429 sd_label
+= strlen("hvm_");
2431 #endif /* XPV_HVM_DRIVER */
2433 mutex_init(&sd_detach_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2434 mutex_init(&sd_log_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2435 mutex_init(&sd_label_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2437 mutex_init(&sd_tr
.srq_resv_reclaim_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2438 cv_init(&sd_tr
.srq_resv_reclaim_cv
, NULL
, CV_DRIVER
, NULL
);
2439 cv_init(&sd_tr
.srq_inprocess_cv
, NULL
, CV_DRIVER
, NULL
);
2442 * it's ok to init here even for fibre device
2444 sd_scsi_probe_cache_init();
2446 sd_scsi_target_lun_init();
2449 * Creating taskq before mod_install ensures that all callers (threads)
2450 * that enter the module after a successful mod_install encounter
2455 err
= mod_install(&modlinkage
);
2457 /* delete taskq if install fails */
2460 mutex_destroy(&sd_detach_mutex
);
2461 mutex_destroy(&sd_log_mutex
);
2462 mutex_destroy(&sd_label_mutex
);
2464 mutex_destroy(&sd_tr
.srq_resv_reclaim_mutex
);
2465 cv_destroy(&sd_tr
.srq_resv_reclaim_cv
);
2466 cv_destroy(&sd_tr
.srq_inprocess_cv
);
2468 sd_scsi_probe_cache_fini();
2470 sd_scsi_target_lun_fini();
2472 #ifndef XPV_HVM_DRIVER
2473 ddi_soft_state_fini(&sd_state
);
2474 #endif /* !XPV_HVM_DRIVER */
2485 * Description: This is the driver _fini(9E) entry point.
2487 * Return Code: Returns the value from mod_remove(9F)
2489 * Context: Called when driver module is unloaded.
2497 if ((err
= mod_remove(&modlinkage
)) != 0) {
2503 mutex_destroy(&sd_detach_mutex
);
2504 mutex_destroy(&sd_log_mutex
);
2505 mutex_destroy(&sd_label_mutex
);
2506 mutex_destroy(&sd_tr
.srq_resv_reclaim_mutex
);
2508 sd_scsi_probe_cache_fini();
2510 sd_scsi_target_lun_fini();
2512 cv_destroy(&sd_tr
.srq_resv_reclaim_cv
);
2513 cv_destroy(&sd_tr
.srq_inprocess_cv
);
2515 #ifndef XPV_HVM_DRIVER
2516 ddi_soft_state_fini(&sd_state
);
2517 #endif /* !XPV_HVM_DRIVER */
2526 * Description: This is the driver _info(9E) entry point.
2528 * Arguments: modinfop - pointer to the driver modinfo structure
2530 * Return Code: Returns the value from mod_info(9F).
2532 * Context: Kernel thread context
2536 _info(struct modinfo
*modinfop
)
2538 return (mod_info(&modlinkage
, modinfop
));
2543 * The following routines implement the driver message logging facility.
2544 * They provide component- and level- based debug output filtering.
2545 * Output may also be restricted to messages for a single instance by
2546 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set
2547 * to NULL, then messages for all instances are printed.
2549 * These routines have been cloned from each other due to the language
2550 * constraints of macros and variable argument list processing.
2555 * Function: sd_log_err
2557 * Description: This routine is called by the SD_ERROR macro for debug
2558 * logging of error conditions.
2560 * Arguments: comp - driver component being logged
2561 * dev - pointer to driver info structure
2562 * fmt - error string and format to be logged
2566 sd_log_err(uint_t comp
, struct sd_lun
*un
, const char *fmt
, ...)
2572 dev
= SD_DEVINFO(un
);
2573 ASSERT(dev
!= NULL
);
2576 * Filter messages based on the global component and level masks.
2577 * Also print if un matches the value of sd_debug_un, or if
2578 * sd_debug_un is set to NULL.
2580 if ((sd_component_mask
& comp
) && (sd_level_mask
& SD_LOGMASK_ERROR
) &&
2581 ((sd_debug_un
== NULL
) || (sd_debug_un
== un
))) {
2582 mutex_enter(&sd_log_mutex
);
2584 (void) vsprintf(sd_log_buf
, fmt
, ap
);
2586 scsi_log(dev
, sd_label
, CE_CONT
, "%s", sd_log_buf
);
2587 mutex_exit(&sd_log_mutex
);
2589 #ifdef SD_FAULT_INJECTION
2590 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask
));
2591 if (un
->sd_injection_mask
& comp
) {
2592 mutex_enter(&sd_log_mutex
);
2594 (void) vsprintf(sd_log_buf
, fmt
, ap
);
2596 sd_injection_log(sd_log_buf
, un
);
2597 mutex_exit(&sd_log_mutex
);
2604 * Function: sd_log_info
2606 * Description: This routine is called by the SD_INFO macro for debug
2607 * logging of general purpose informational conditions.
2609 * Arguments: comp - driver component being logged
2610 * dev - pointer to driver info structure
2611 * fmt - info string and format to be logged
2615 sd_log_info(uint_t component
, struct sd_lun
*un
, const char *fmt
, ...)
2621 dev
= SD_DEVINFO(un
);
2622 ASSERT(dev
!= NULL
);
2625 * Filter messages based on the global component and level masks.
2626 * Also print if un matches the value of sd_debug_un, or if
2627 * sd_debug_un is set to NULL.
2629 if ((sd_component_mask
& component
) &&
2630 (sd_level_mask
& SD_LOGMASK_INFO
) &&
2631 ((sd_debug_un
== NULL
) || (sd_debug_un
== un
))) {
2632 mutex_enter(&sd_log_mutex
);
2634 (void) vsprintf(sd_log_buf
, fmt
, ap
);
2636 scsi_log(dev
, sd_label
, CE_CONT
, "%s", sd_log_buf
);
2637 mutex_exit(&sd_log_mutex
);
2639 #ifdef SD_FAULT_INJECTION
2640 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask
));
2641 if (un
->sd_injection_mask
& component
) {
2642 mutex_enter(&sd_log_mutex
);
2644 (void) vsprintf(sd_log_buf
, fmt
, ap
);
2646 sd_injection_log(sd_log_buf
, un
);
2647 mutex_exit(&sd_log_mutex
);
2654 * Function: sd_log_trace
2656 * Description: This routine is called by the SD_TRACE macro for debug
2657 * logging of trace conditions (i.e. function entry/exit).
2659 * Arguments: comp - driver component being logged
2660 * dev - pointer to driver info structure
2661 * fmt - trace string and format to be logged
2665 sd_log_trace(uint_t component
, struct sd_lun
*un
, const char *fmt
, ...)
2671 dev
= SD_DEVINFO(un
);
2672 ASSERT(dev
!= NULL
);
2675 * Filter messages based on the global component and level masks.
2676 * Also print if un matches the value of sd_debug_un, or if
2677 * sd_debug_un is set to NULL.
2679 if ((sd_component_mask
& component
) &&
2680 (sd_level_mask
& SD_LOGMASK_TRACE
) &&
2681 ((sd_debug_un
== NULL
) || (sd_debug_un
== un
))) {
2682 mutex_enter(&sd_log_mutex
);
2684 (void) vsprintf(sd_log_buf
, fmt
, ap
);
2686 scsi_log(dev
, sd_label
, CE_CONT
, "%s", sd_log_buf
);
2687 mutex_exit(&sd_log_mutex
);
2689 #ifdef SD_FAULT_INJECTION
2690 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask
));
2691 if (un
->sd_injection_mask
& component
) {
2692 mutex_enter(&sd_log_mutex
);
2694 (void) vsprintf(sd_log_buf
, fmt
, ap
);
2696 sd_injection_log(sd_log_buf
, un
);
2697 mutex_exit(&sd_log_mutex
);
2706 * Description: This is the driver probe(9e) entry point function.
2708 * Arguments: devi - opaque device info handle
2710 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful.
2711 * DDI_PROBE_FAILURE: If the probe failed.
2712 * DDI_PROBE_PARTIAL: If the instance is not present now,
2713 * but may be present in the future.
2717 sdprobe(dev_info_t
*devi
)
2719 struct scsi_device
*devp
;
2721 #ifndef XPV_HVM_DRIVER
2722 int instance
= ddi_get_instance(devi
);
2723 #endif /* !XPV_HVM_DRIVER */
2726 * if it wasn't for pln, sdprobe could actually be nulldev
2727 * in the "__fibre" case.
2729 if (ddi_dev_is_sid(devi
) == DDI_SUCCESS
) {
2730 return (DDI_PROBE_DONTCARE
);
2733 devp
= ddi_get_driver_private(devi
);
2736 /* Ooops... nexus driver is mis-configured... */
2737 return (DDI_PROBE_FAILURE
);
2740 #ifndef XPV_HVM_DRIVER
2741 if (ddi_get_soft_state(sd_state
, instance
) != NULL
) {
2742 return (DDI_PROBE_PARTIAL
);
2744 #endif /* !XPV_HVM_DRIVER */
2747 * Call the SCSA utility probe routine to see if we actually
2748 * have a target at this SCSI nexus.
2750 switch (sd_scsi_probe_with_cache(devp
, NULL_FUNC
)) {
2751 case SCSIPROBE_EXISTS
:
2752 switch (devp
->sd_inq
->inq_dtype
) {
2754 rval
= DDI_PROBE_SUCCESS
;
2756 case DTYPE_RODIRECT
:
2757 /* CDs etc. Can be removable media */
2758 rval
= DDI_PROBE_SUCCESS
;
2762 * Rewritable optical driver HP115AA
2763 * Can also be removable media
2767 * Do not attempt to bind to DTYPE_OPTICAL if
2768 * pre solaris 9 sparc sd behavior is required
2770 * If first time through and sd_dtype_optical_bind
2771 * has not been set in /etc/system check properties
2774 if (sd_dtype_optical_bind
< 0) {
2775 sd_dtype_optical_bind
= ddi_prop_get_int
2776 (DDI_DEV_T_ANY
, devi
, 0,
2777 "optical-device-bind", 1);
2780 if (sd_dtype_optical_bind
== 0) {
2781 rval
= DDI_PROBE_FAILURE
;
2783 rval
= DDI_PROBE_SUCCESS
;
2787 case DTYPE_NOTPRESENT
:
2789 rval
= DDI_PROBE_FAILURE
;
2794 rval
= DDI_PROBE_PARTIAL
;
2799 * This routine checks for resource allocation prior to freeing,
2800 * so it will take care of the "smart probing" case where a
2801 * scsi_probe() may or may not have been issued and will *not*
2802 * free previously-freed resources.
2812 * Description: This is the driver getinfo(9e) entry point function.
2813 * Given the device number, return the devinfo pointer from
2814 * the scsi_device structure or the instance number
2815 * associated with the dev_t.
2817 * Arguments: dip - pointer to device info structure
2818 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO,
2819 * DDI_INFO_DEVT2INSTANCE)
2820 * arg - driver dev_t
2821 * resultp - user buffer for request response
2823 * Return Code: DDI_SUCCESS
2828 sdinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
, void *arg
, void **result
)
2836 case DDI_INFO_DEVT2DEVINFO
:
2838 instance
= SDUNIT(dev
);
2839 if ((un
= ddi_get_soft_state(sd_state
, instance
)) == NULL
) {
2840 return (DDI_FAILURE
);
2842 *result
= (void *) SD_DEVINFO(un
);
2843 error
= DDI_SUCCESS
;
2845 case DDI_INFO_DEVT2INSTANCE
:
2847 instance
= SDUNIT(dev
);
2848 *result
= (void *)(uintptr_t)instance
;
2849 error
= DDI_SUCCESS
;
2852 error
= DDI_FAILURE
;
2858 * Function: sd_prop_op
2860 * Description: This is the driver prop_op(9e) entry point function.
2861 * Return the number of blocks for the partition in question
2862 * or forward the request to the property facilities.
2864 * Arguments: dev - device number
2865 * dip - pointer to device info structure
2866 * prop_op - property operator
2867 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent
2868 * name - pointer to property name
2869 * valuep - pointer or address of the user buffer
2870 * lengthp - property length
2872 * Return Code: DDI_PROP_SUCCESS
2873 * DDI_PROP_NOT_FOUND
2874 * DDI_PROP_UNDEFINED
2875 * DDI_PROP_NO_MEMORY
2876 * DDI_PROP_BUF_TOO_SMALL
2880 sd_prop_op(dev_t dev
, dev_info_t
*dip
, ddi_prop_op_t prop_op
, int mod_flags
,
2881 char *name
, caddr_t valuep
, int *lengthp
)
2885 if ((un
= ddi_get_soft_state(sd_state
, ddi_get_instance(dip
))) == NULL
)
2886 return (ddi_prop_op(dev
, dip
, prop_op
, mod_flags
,
2887 name
, valuep
, lengthp
));
2889 return (cmlb_prop_op(un
->un_cmlbhandle
,
2890 dev
, dip
, prop_op
, mod_flags
, name
, valuep
, lengthp
,
2891 SDPART(dev
), (void *)SD_PATH_DIRECT
));
2895 * The following functions are for smart probing:
2896 * sd_scsi_probe_cache_init()
2897 * sd_scsi_probe_cache_fini()
2898 * sd_scsi_clear_probe_cache()
2899 * sd_scsi_probe_with_cache()
2903 * Function: sd_scsi_probe_cache_init
2905 * Description: Initializes the probe response cache mutex and head pointer.
2907 * Context: Kernel thread context
2911 sd_scsi_probe_cache_init(void)
2913 mutex_init(&sd_scsi_probe_cache_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
2914 sd_scsi_probe_cache_head
= NULL
;
2919 * Function: sd_scsi_probe_cache_fini
2921 * Description: Frees all resources associated with the probe response cache.
2923 * Context: Kernel thread context
2927 sd_scsi_probe_cache_fini(void)
2929 struct sd_scsi_probe_cache
*cp
;
2930 struct sd_scsi_probe_cache
*ncp
;
2932 /* Clean up our smart probing linked list */
2933 for (cp
= sd_scsi_probe_cache_head
; cp
!= NULL
; cp
= ncp
) {
2935 kmem_free(cp
, sizeof (struct sd_scsi_probe_cache
));
2937 sd_scsi_probe_cache_head
= NULL
;
2938 mutex_destroy(&sd_scsi_probe_cache_mutex
);
2943 * Function: sd_scsi_clear_probe_cache
2945 * Description: This routine clears the probe response cache. This is
2946 * done when open() returns ENXIO so that when deferred
2947 * attach is attempted (possibly after a device has been
2948 * turned on) we will retry the probe. Since we don't know
2949 * which target we failed to open, we just clear the
2952 * Context: Kernel thread context
2956 sd_scsi_clear_probe_cache(void)
2958 struct sd_scsi_probe_cache
*cp
;
2961 mutex_enter(&sd_scsi_probe_cache_mutex
);
2962 for (cp
= sd_scsi_probe_cache_head
; cp
!= NULL
; cp
= cp
->next
) {
2964 * Reset all entries to SCSIPROBE_EXISTS. This will
2965 * force probing to be performed the next time
2966 * sd_scsi_probe_with_cache is called.
2968 for (i
= 0; i
< NTARGETS_WIDE
; i
++) {
2969 cp
->cache
[i
] = SCSIPROBE_EXISTS
;
2972 mutex_exit(&sd_scsi_probe_cache_mutex
);
2977 * Function: sd_scsi_probe_with_cache
2979 * Description: This routine implements support for a scsi device probe
2980 * with cache. The driver maintains a cache of the target
2981 * responses to scsi probes. If we get no response from a
2982 * target during a probe inquiry, we remember that, and we
2983 * avoid additional calls to scsi_probe on non-zero LUNs
2984 * on the same target until the cache is cleared. By doing
2985 * so we avoid the 1/4 sec selection timeout for nonzero
2986 * LUNs. lun0 of a target is always probed.
2988 * Arguments: devp - Pointer to a scsi_device(9S) structure
2989 * waitfunc - indicates what the allocator routines should
2990 * do when resources are not available. This value
2991 * is passed on to scsi_probe() when that routine
2994 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache;
2995 * otherwise the value returned by scsi_probe(9F).
2997 * Context: Kernel thread context
3001 sd_scsi_probe_with_cache(struct scsi_device
*devp
, int (*waitfn
)())
3003 struct sd_scsi_probe_cache
*cp
;
3004 dev_info_t
*pdip
= ddi_get_parent(devp
->sd_dev
);
3007 lun
= ddi_prop_get_int(DDI_DEV_T_ANY
, devp
->sd_dev
, DDI_PROP_DONTPASS
,
3008 SCSI_ADDR_PROP_LUN
, 0);
3009 tgt
= ddi_prop_get_int(DDI_DEV_T_ANY
, devp
->sd_dev
, DDI_PROP_DONTPASS
,
3010 SCSI_ADDR_PROP_TARGET
, -1);
3012 /* Make sure caching enabled and target in range */
3013 if ((tgt
< 0) || (tgt
>= NTARGETS_WIDE
)) {
3014 /* do it the old way (no cache) */
3015 return (scsi_probe(devp
, waitfn
));
3018 mutex_enter(&sd_scsi_probe_cache_mutex
);
3020 /* Find the cache for this scsi bus instance */
3021 for (cp
= sd_scsi_probe_cache_head
; cp
!= NULL
; cp
= cp
->next
) {
3022 if (cp
->pdip
== pdip
) {
3027 /* If we can't find a cache for this pdip, create one */
3031 cp
= kmem_zalloc(sizeof (struct sd_scsi_probe_cache
),
3034 cp
->next
= sd_scsi_probe_cache_head
;
3035 sd_scsi_probe_cache_head
= cp
;
3036 for (i
= 0; i
< NTARGETS_WIDE
; i
++) {
3037 cp
->cache
[i
] = SCSIPROBE_EXISTS
;
3041 mutex_exit(&sd_scsi_probe_cache_mutex
);
3043 /* Recompute the cache for this target if LUN zero */
3045 cp
->cache
[tgt
] = SCSIPROBE_EXISTS
;
3048 /* Don't probe if cache remembers a NORESP from a previous LUN. */
3049 if (cp
->cache
[tgt
] != SCSIPROBE_EXISTS
) {
3050 return (SCSIPROBE_NORESP
);
3053 /* Do the actual probe; save & return the result */
3054 return (cp
->cache
[tgt
] = scsi_probe(devp
, waitfn
));
3059 * Function: sd_scsi_target_lun_init
3061 * Description: Initializes the attached lun chain mutex and head pointer.
3063 * Context: Kernel thread context
3067 sd_scsi_target_lun_init(void)
3069 mutex_init(&sd_scsi_target_lun_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
3070 sd_scsi_target_lun_head
= NULL
;
3075 * Function: sd_scsi_target_lun_fini
3077 * Description: Frees all resources associated with the attached lun
3080 * Context: Kernel thread context
3084 sd_scsi_target_lun_fini(void)
3086 struct sd_scsi_hba_tgt_lun
*cp
;
3087 struct sd_scsi_hba_tgt_lun
*ncp
;
3089 for (cp
= sd_scsi_target_lun_head
; cp
!= NULL
; cp
= ncp
) {
3091 kmem_free(cp
, sizeof (struct sd_scsi_hba_tgt_lun
));
3093 sd_scsi_target_lun_head
= NULL
;
3094 mutex_destroy(&sd_scsi_target_lun_mutex
);
3099 * Function: sd_scsi_get_target_lun_count
3101 * Description: This routine will check in the attached lun chain to see
3102 * how many luns are attached on the required SCSI controller
3103 * and target. Currently, some capabilities like tagged queue
3104 * are supported per target based by HBA. So all luns in a
3105 * target have the same capabilities. Based on this assumption,
3106 * sd should only set these capabilities once per target. This
3107 * function is called when sd needs to decide how many luns
3108 * already attached on a target.
3110 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3111 * controller device.
3112 * target - The target ID on the controller's SCSI bus.
3114 * Return Code: The number of luns attached on the required target and
3116 * -1 if target ID is not in parallel SCSI scope or the given
3117 * dip is not in the chain.
3119 * Context: Kernel thread context
3123 sd_scsi_get_target_lun_count(dev_info_t
*dip
, int target
)
3125 struct sd_scsi_hba_tgt_lun
*cp
;
3127 if ((target
< 0) || (target
>= NTARGETS_WIDE
)) {
3131 mutex_enter(&sd_scsi_target_lun_mutex
);
3133 for (cp
= sd_scsi_target_lun_head
; cp
!= NULL
; cp
= cp
->next
) {
3134 if (cp
->pdip
== dip
) {
3139 mutex_exit(&sd_scsi_target_lun_mutex
);
3145 return (cp
->nlun
[target
]);
3150 * Function: sd_scsi_update_lun_on_target
3152 * Description: This routine is used to update the attached lun chain when a
3153 * lun is attached or detached on a target.
3155 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3156 * controller device.
3157 * target - The target ID on the controller's SCSI bus.
3158 * flag - Indicate the lun is attached or detached.
3160 * Context: Kernel thread context
3164 sd_scsi_update_lun_on_target(dev_info_t
*dip
, int target
, int flag
)
3166 struct sd_scsi_hba_tgt_lun
*cp
;
3168 mutex_enter(&sd_scsi_target_lun_mutex
);
3170 for (cp
= sd_scsi_target_lun_head
; cp
!= NULL
; cp
= cp
->next
) {
3171 if (cp
->pdip
== dip
) {
3176 if ((cp
== NULL
) && (flag
== SD_SCSI_LUN_ATTACH
)) {
3177 cp
= kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun
),
3180 cp
->next
= sd_scsi_target_lun_head
;
3181 sd_scsi_target_lun_head
= cp
;
3184 mutex_exit(&sd_scsi_target_lun_mutex
);
3187 if (flag
== SD_SCSI_LUN_ATTACH
) {
3188 cp
->nlun
[target
] ++;
3190 cp
->nlun
[target
] --;
3197 * Function: sd_spin_up_unit
3199 * Description: Issues the following commands to spin-up the device:
3200 * START STOP UNIT, and INQUIRY.
3202 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3203 * structure for this target.
3205 * Return Code: 0 - success
3207 * EACCES - reservation conflict
3209 * Context: Kernel thread context
3213 sd_spin_up_unit(sd_ssc_t
*ssc
)
3216 int has_conflict
= FALSE
;
3221 ASSERT(ssc
!= NULL
);
3226 * Send a throwaway START UNIT command.
3228 * If we fail on this, we don't care presently what precisely
3229 * is wrong. EMC's arrays will also fail this with a check
3230 * condition (0x2/0x4/0x3) if the device is "inactive," but
3231 * we don't want to fail the attach because it may become
3233 * We don't know if power condition is supported or not at
3234 * this stage, use START STOP bit.
3236 status
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
3237 SD_TARGET_START
, SD_PATH_DIRECT
);
3240 if (status
== EACCES
)
3241 has_conflict
= TRUE
;
3242 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3246 * Send another INQUIRY command to the target. This is necessary for
3247 * non-removable media direct access devices because their INQUIRY data
3248 * may not be fully qualified until they are spun up (perhaps via the
3249 * START command above). Note: This seems to be needed for some
3250 * legacy devices only.) The INQUIRY command should succeed even if a
3251 * Reservation Conflict is present.
3253 bufaddr
= kmem_zalloc(SUN_INQSIZE
, KM_SLEEP
);
3255 if (sd_send_scsi_INQUIRY(ssc
, bufaddr
, SUN_INQSIZE
, 0, 0, &resid
)
3257 kmem_free(bufaddr
, SUN_INQSIZE
);
3258 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
3263 * If we got enough INQUIRY data, copy it over the old INQUIRY data.
3264 * Note that this routine does not return a failure here even if the
3265 * INQUIRY command did not return any data. This is a legacy behavior.
3267 if ((SUN_INQSIZE
- resid
) >= SUN_MIN_INQLEN
) {
3268 bcopy(bufaddr
, SD_INQUIRY(un
), SUN_INQSIZE
);
3271 kmem_free(bufaddr
, SUN_INQSIZE
);
3273 /* If we hit a reservation conflict above, tell the caller. */
3274 if (has_conflict
== TRUE
) {
3283 * Function: sd_enable_descr_sense
3285 * Description: This routine attempts to select descriptor sense format
3286 * using the Control mode page. Devices that support 64 bit
3287 * LBAs (for >2TB luns) should also implement descriptor
3288 * sense data so we will call this function whenever we see
3289 * a lun larger than 2TB. If for some reason the device
3290 * supports 64 bit LBAs but doesn't support descriptor sense
3291 * presumably the mode select will fail. Everything will
3292 * continue to work normally except that we will not get
3293 * complete sense data for commands that fail with an LBA
3294 * larger than 32 bits.
3296 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3297 * structure for this target.
3299 * Context: Kernel thread context only
3303 sd_enable_descr_sense(sd_ssc_t
*ssc
)
3306 struct mode_control_scsi3
*ctrl_bufp
;
3312 ASSERT(ssc
!= NULL
);
3317 * Read MODE SENSE page 0xA, Control Mode Page
3319 buflen
= MODE_HEADER_LENGTH
+ MODE_BLK_DESC_LENGTH
+
3320 sizeof (struct mode_control_scsi3
);
3321 header
= kmem_zalloc(buflen
, KM_SLEEP
);
3323 status
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, header
, buflen
,
3324 MODEPAGE_CTRL_MODE
, SD_PATH_DIRECT
);
3327 SD_ERROR(SD_LOG_COMMON
, un
,
3328 "sd_enable_descr_sense: mode sense ctrl page failed\n");
3333 * Determine size of Block Descriptors in order to locate
3334 * the mode page data. ATAPI devices return 0, SCSI devices
3335 * should return MODE_BLK_DESC_LENGTH.
3337 bd_len
= ((struct mode_header
*)header
)->bdesc_length
;
3339 /* Clear the mode data length field for MODE SELECT */
3340 ((struct mode_header
*)header
)->length
= 0;
3342 ctrl_bufp
= (struct mode_control_scsi3
*)
3343 (header
+ MODE_HEADER_LENGTH
+ bd_len
);
3346 * If the page length is smaller than the expected value,
3347 * the target device doesn't support D_SENSE. Bail out here.
3349 if (ctrl_bufp
->mode_page
.length
<
3350 sizeof (struct mode_control_scsi3
) - 2) {
3351 SD_ERROR(SD_LOG_COMMON
, un
,
3352 "sd_enable_descr_sense: enable D_SENSE failed\n");
3357 * Clear PS bit for MODE SELECT
3359 ctrl_bufp
->mode_page
.ps
= 0;
3362 * Set D_SENSE to enable descriptor sense format.
3364 ctrl_bufp
->d_sense
= 1;
3366 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3369 * Use MODE SELECT to commit the change to the D_SENSE bit
3371 status
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
, header
,
3372 buflen
, SD_DONTSAVE_PAGE
, SD_PATH_DIRECT
);
3375 SD_INFO(SD_LOG_COMMON
, un
,
3376 "sd_enable_descr_sense: mode select ctrl page failed\n");
3378 kmem_free(header
, buflen
);
3383 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3384 kmem_free(header
, buflen
);
3388 * Function: sd_reenable_dsense_task
3390 * Description: Re-enable descriptor sense after device or bus reset
3392 * Context: Executes in a taskq() thread context
3395 sd_reenable_dsense_task(void *arg
)
3397 struct sd_lun
*un
= arg
;
3402 ssc
= sd_ssc_init(un
);
3403 sd_enable_descr_sense(ssc
);
3409 * Function: sd_set_mmc_caps
3411 * Description: This routine determines if the device is MMC compliant and if
3412 * the device supports CDDA via a mode sense of the CDVD
3413 * capabilities mode page. Also checks if the device is a
3414 * dvdram writable device.
3416 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3417 * structure for this target.
3419 * Context: Kernel thread context only
3423 sd_set_mmc_caps(sd_ssc_t
*ssc
)
3425 struct mode_header_grp2
*sense_mhp
;
3426 uchar_t
*sense_page
;
3430 struct uscsi_cmd com
;
3432 uchar_t
*out_data_rw
, *out_data_hd
;
3433 uchar_t
*rqbuf_rw
, *rqbuf_hd
;
3434 uchar_t
*out_data_gesn
;
3438 ASSERT(ssc
!= NULL
);
3443 * The flags which will be set in this function are - mmc compliant,
3444 * dvdram writable device, cdda support. Initialize them to FALSE
3445 * and if a capability is detected - it will be set to TRUE.
3447 un
->un_f_mmc_cap
= FALSE
;
3448 un
->un_f_dvdram_writable_device
= FALSE
;
3449 un
->un_f_cfg_cdda
= FALSE
;
3451 buf
= kmem_zalloc(BUFLEN_MODE_CDROM_CAP
, KM_SLEEP
);
3452 status
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, (uchar_t
*)buf
,
3453 BUFLEN_MODE_CDROM_CAP
, MODEPAGE_CDROM_CAP
, SD_PATH_DIRECT
);
3455 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3458 /* command failed; just return */
3459 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3463 * If the mode sense request for the CDROM CAPABILITIES
3464 * page (0x2A) succeeds the device is assumed to be MMC.
3466 un
->un_f_mmc_cap
= TRUE
;
3468 /* See if GET STATUS EVENT NOTIFICATION is supported */
3469 if (un
->un_f_mmc_gesn_polling
) {
3470 gesn_len
= SD_GESN_HEADER_LEN
+ SD_GESN_MEDIA_DATA_LEN
;
3471 out_data_gesn
= kmem_zalloc(gesn_len
, KM_SLEEP
);
3473 rtn
= sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc
,
3474 out_data_gesn
, gesn_len
, 1 << SD_GESN_MEDIA_CLASS
);
3476 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3478 if ((rtn
!= 0) || !sd_gesn_media_data_valid(out_data_gesn
)) {
3479 un
->un_f_mmc_gesn_polling
= FALSE
;
3480 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
3481 "sd_set_mmc_caps: gesn not supported "
3482 "%d %x %x %x %x\n", rtn
,
3483 out_data_gesn
[0], out_data_gesn
[1],
3484 out_data_gesn
[2], out_data_gesn
[3]);
3487 kmem_free(out_data_gesn
, gesn_len
);
3490 /* Get to the page data */
3491 sense_mhp
= (struct mode_header_grp2
*)buf
;
3492 bd_len
= (sense_mhp
->bdesc_length_hi
<< 8) |
3493 sense_mhp
->bdesc_length_lo
;
3494 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
3496 * We did not get back the expected block descriptor
3497 * length so we cannot determine if the device supports
3498 * CDDA. However, we still indicate the device is MMC
3499 * according to the successful response to the page
3500 * 0x2A mode sense request.
3502 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
3503 "sd_set_mmc_caps: Mode Sense returned "
3504 "invalid block descriptor length\n");
3505 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3509 /* See if read CDDA is supported */
3510 sense_page
= (uchar_t
*)(buf
+ MODE_HEADER_LENGTH_GRP2
+
3512 un
->un_f_cfg_cdda
= (sense_page
[5] & 0x01) ? TRUE
: FALSE
;
3514 /* See if writing DVD RAM is supported. */
3515 un
->un_f_dvdram_writable_device
= (sense_page
[3] & 0x20) ? TRUE
: FALSE
;
3516 if (un
->un_f_dvdram_writable_device
== TRUE
) {
3517 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3522 * If the device presents DVD or CD capabilities in the mode
3523 * page, we can return here since a RRD will not have
3524 * these capabilities.
3526 if ((sense_page
[2] & 0x3f) || (sense_page
[3] & 0x3f)) {
3527 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3530 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3533 * If un->un_f_dvdram_writable_device is still FALSE,
3534 * check for a Removable Rigid Disk (RRD). A RRD
3535 * device is identified by the features RANDOM_WRITABLE and
3536 * HARDWARE_DEFECT_MANAGEMENT.
3538 out_data_rw
= kmem_zalloc(SD_CURRENT_FEATURE_LEN
, KM_SLEEP
);
3539 rqbuf_rw
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
3541 rtn
= sd_send_scsi_feature_GET_CONFIGURATION(ssc
, &com
, rqbuf_rw
,
3542 SENSE_LENGTH
, out_data_rw
, SD_CURRENT_FEATURE_LEN
,
3543 RANDOM_WRITABLE
, SD_PATH_STANDARD
);
3545 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3548 kmem_free(out_data_rw
, SD_CURRENT_FEATURE_LEN
);
3549 kmem_free(rqbuf_rw
, SENSE_LENGTH
);
3553 out_data_hd
= kmem_zalloc(SD_CURRENT_FEATURE_LEN
, KM_SLEEP
);
3554 rqbuf_hd
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
3556 rtn
= sd_send_scsi_feature_GET_CONFIGURATION(ssc
, &com
, rqbuf_hd
,
3557 SENSE_LENGTH
, out_data_hd
, SD_CURRENT_FEATURE_LEN
,
3558 HARDWARE_DEFECT_MANAGEMENT
, SD_PATH_STANDARD
);
3560 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3564 * We have good information, check for random writable
3565 * and hardware defect features.
3567 if ((out_data_rw
[9] & RANDOM_WRITABLE
) &&
3568 (out_data_hd
[9] & HARDWARE_DEFECT_MANAGEMENT
)) {
3569 un
->un_f_dvdram_writable_device
= TRUE
;
3573 kmem_free(out_data_rw
, SD_CURRENT_FEATURE_LEN
);
3574 kmem_free(rqbuf_rw
, SENSE_LENGTH
);
3575 kmem_free(out_data_hd
, SD_CURRENT_FEATURE_LEN
);
3576 kmem_free(rqbuf_hd
, SENSE_LENGTH
);
3580 * Function: sd_check_for_writable_cd
3582 * Description: This routine determines if the media in the device is
3583 * writable or not. It uses the get configuration command (0x46)
3584 * to determine if the media is writable
3586 * Arguments: un - driver soft state (unit) structure
3587 * path_flag - SD_PATH_DIRECT to use the USCSI "direct"
3588 * chain and the normal command waitq, or
3589 * SD_PATH_DIRECT_PRIORITY to use the USCSI
3590 * "direct" chain and bypass the normal command
3593 * Context: Never called at interrupt context.
3597 sd_check_for_writable_cd(sd_ssc_t
*ssc
, int path_flag
)
3599 struct uscsi_cmd com
;
3603 uchar_t
*out_data_rw
, *out_data_hd
;
3604 uchar_t
*rqbuf_rw
, *rqbuf_hd
;
3605 struct mode_header_grp2
*sense_mhp
;
3606 uchar_t
*sense_page
;
3612 ASSERT(ssc
!= NULL
);
3615 ASSERT(mutex_owned(SD_MUTEX(un
)));
3618 * Initialize the writable media to false, if configuration info.
3619 * tells us otherwise then only we will set it.
3621 un
->un_f_mmc_writable_media
= FALSE
;
3622 mutex_exit(SD_MUTEX(un
));
3624 out_data
= kmem_zalloc(SD_PROFILE_HEADER_LEN
, KM_SLEEP
);
3625 rqbuf
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
3627 rtn
= sd_send_scsi_GET_CONFIGURATION(ssc
, &com
, rqbuf
, SENSE_LENGTH
,
3628 out_data
, SD_PROFILE_HEADER_LEN
, path_flag
);
3631 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3633 mutex_enter(SD_MUTEX(un
));
3636 * We have good information, check for writable DVD.
3638 if ((out_data
[6] == 0) && (out_data
[7] == 0x12)) {
3639 un
->un_f_mmc_writable_media
= TRUE
;
3640 kmem_free(out_data
, SD_PROFILE_HEADER_LEN
);
3641 kmem_free(rqbuf
, SENSE_LENGTH
);
3646 kmem_free(out_data
, SD_PROFILE_HEADER_LEN
);
3647 kmem_free(rqbuf
, SENSE_LENGTH
);
3650 * Determine if this is a RRD type device.
3652 mutex_exit(SD_MUTEX(un
));
3653 buf
= kmem_zalloc(BUFLEN_MODE_CDROM_CAP
, KM_SLEEP
);
3654 status
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, (uchar_t
*)buf
,
3655 BUFLEN_MODE_CDROM_CAP
, MODEPAGE_CDROM_CAP
, path_flag
);
3657 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3659 mutex_enter(SD_MUTEX(un
));
3661 /* command failed; just return */
3662 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3666 /* Get to the page data */
3667 sense_mhp
= (struct mode_header_grp2
*)buf
;
3668 bd_len
= (sense_mhp
->bdesc_length_hi
<< 8) | sense_mhp
->bdesc_length_lo
;
3669 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
3671 * We did not get back the expected block descriptor length so
3672 * we cannot check the mode page.
3674 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
3675 "sd_check_for_writable_cd: Mode Sense returned "
3676 "invalid block descriptor length\n");
3677 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3682 * If the device presents DVD or CD capabilities in the mode
3683 * page, we can return here since a RRD device will not have
3684 * these capabilities.
3686 sense_page
= (uchar_t
*)(buf
+ MODE_HEADER_LENGTH_GRP2
+ bd_len
);
3687 if ((sense_page
[2] & 0x3f) || (sense_page
[3] & 0x3f)) {
3688 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3691 kmem_free(buf
, BUFLEN_MODE_CDROM_CAP
);
3694 * If un->un_f_mmc_writable_media is still FALSE,
3695 * check for RRD type media. A RRD device is identified
3696 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT.
3698 mutex_exit(SD_MUTEX(un
));
3699 out_data_rw
= kmem_zalloc(SD_CURRENT_FEATURE_LEN
, KM_SLEEP
);
3700 rqbuf_rw
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
3702 rtn
= sd_send_scsi_feature_GET_CONFIGURATION(ssc
, &com
, rqbuf_rw
,
3703 SENSE_LENGTH
, out_data_rw
, SD_CURRENT_FEATURE_LEN
,
3704 RANDOM_WRITABLE
, path_flag
);
3706 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3708 kmem_free(out_data_rw
, SD_CURRENT_FEATURE_LEN
);
3709 kmem_free(rqbuf_rw
, SENSE_LENGTH
);
3710 mutex_enter(SD_MUTEX(un
));
3714 out_data_hd
= kmem_zalloc(SD_CURRENT_FEATURE_LEN
, KM_SLEEP
);
3715 rqbuf_hd
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
3717 rtn
= sd_send_scsi_feature_GET_CONFIGURATION(ssc
, &com
, rqbuf_hd
,
3718 SENSE_LENGTH
, out_data_hd
, SD_CURRENT_FEATURE_LEN
,
3719 HARDWARE_DEFECT_MANAGEMENT
, path_flag
);
3721 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
3722 mutex_enter(SD_MUTEX(un
));
3725 * We have good information, check for random writable
3726 * and hardware defect features as current.
3728 if ((out_data_rw
[9] & RANDOM_WRITABLE
) &&
3729 (out_data_rw
[10] & 0x1) &&
3730 (out_data_hd
[9] & HARDWARE_DEFECT_MANAGEMENT
) &&
3731 (out_data_hd
[10] & 0x1)) {
3732 un
->un_f_mmc_writable_media
= TRUE
;
3736 kmem_free(out_data_rw
, SD_CURRENT_FEATURE_LEN
);
3737 kmem_free(rqbuf_rw
, SENSE_LENGTH
);
3738 kmem_free(out_data_hd
, SD_CURRENT_FEATURE_LEN
);
3739 kmem_free(rqbuf_hd
, SENSE_LENGTH
);
3743 * Function: sd_read_unit_properties
3745 * Description: The following implements a property lookup mechanism.
3746 * Properties for particular disks (keyed on vendor, model
3747 * and rev numbers) are sought in the sd.conf file via
3748 * sd_process_sdconf_file(), and if not found there, are
3749 * looked for in a list hardcoded in this driver via
3750 * sd_process_sdconf_table() Once located the properties
3751 * are used to update the driver unit structure.
3753 * Arguments: un - driver soft state (unit) structure
3757 sd_read_unit_properties(struct sd_lun
*un
)
3760 * sd_process_sdconf_file returns SD_FAILURE if it cannot find
3761 * the "sd-config-list" property (from the sd.conf file) or if
3762 * there was not a match for the inquiry vid/pid. If this event
3763 * occurs the static driver configuration table is searched for
3767 if (sd_process_sdconf_file(un
) == SD_FAILURE
) {
3768 sd_process_sdconf_table(un
);
3771 /* check for LSI device */
3779 * Function: sd_process_sdconf_file
3781 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the
3782 * driver's config file (ie, sd.conf) and update the driver
3783 * soft state structure accordingly.
3785 * Arguments: un - driver soft state (unit) structure
3787 * Return Code: SD_SUCCESS - The properties were successfully set according
3788 * to the driver configuration file.
3789 * SD_FAILURE - The driver config list was not obtained or
3790 * there was no vid/pid match. This indicates that
3791 * the static config table should be used.
3793 * The config file has a property, "sd-config-list". Currently we support
3794 * two kinds of formats. For both formats, the value of this property
3795 * is a list of duplets:
3801 * For the improved format, where
3803 * <duplet>:= "<vid+pid>","<tunable-list>"
3807 * <tunable-list>:= <tunable> [, <tunable> ]*;
3808 * <tunable> = <name> : <value>
3810 * The <vid+pid> is the string that is returned by the target device on a
3811 * SCSI inquiry command, the <tunable-list> contains one or more tunables
3812 * to apply to all target devices with the specified <vid+pid>.
3814 * Each <tunable> is a "<name> : <value>" pair.
3816 * For the old format, the structure of each duplet is as follows:
3818 * <duplet>:= "<vid+pid>","<data-property-name_list>"
3820 * The first entry of the duplet is the device ID string (the concatenated
3821 * vid & pid; not to be confused with a device_id). This is defined in
3822 * the same way as in the sd_disk_table.
3824 * The second part of the duplet is a string that identifies a
3825 * data-property-name-list. The data-property-name-list is defined as
3828 * <data-property-name-list>:=<data-property-name> [<data-property-name>]
3830 * The syntax of <data-property-name> depends on the <version> field.
3832 * If version = SD_CONF_VERSION_1 we have the following syntax:
3834 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
3836 * where the prop0 value will be used to set prop0 if bit0 set in the
3837 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1
3842 sd_process_sdconf_file(struct sd_lun
*un
)
3844 char **config_list
= NULL
;
3850 char *dataname_lasts
;
3851 int *data_list
= NULL
;
3852 uint_t data_list_len
;
3853 int rval
= SD_FAILURE
;
3858 /* Obtain the configuration list associated with the .conf file */
3859 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY
, SD_DEVINFO(un
),
3860 DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
, sd_config_list
,
3861 &config_list
, &nelements
) != DDI_PROP_SUCCESS
) {
3862 return (SD_FAILURE
);
3866 * Compare vids in each duplet to the inquiry vid - if a match is
3867 * made, get the data value and update the soft state structure
3870 * Each duplet should show as a pair of strings, return SD_FAILURE
3873 if (nelements
& 1) {
3874 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
3875 "sd-config-list should show as pairs of strings.\n");
3877 ddi_prop_free(config_list
);
3878 return (SD_FAILURE
);
3881 for (i
= 0; i
< nelements
; i
+= 2) {
3883 * Note: The assumption here is that each vid entry is on
3884 * a unique line from its associated duplet.
3886 vidptr
= config_list
[i
];
3887 vidlen
= (int)strlen(vidptr
);
3888 if ((vidlen
== 0) ||
3889 (sd_sdconf_id_match(un
, vidptr
, vidlen
) != SD_SUCCESS
)) {
3894 * dnlist contains 1 or more blank separated
3895 * data-property-name entries
3897 dnlist_ptr
= config_list
[i
+ 1];
3899 if (strchr(dnlist_ptr
, ':') != NULL
) {
3901 * Decode the improved format sd-config-list.
3903 sd_nvpair_str_decode(un
, dnlist_ptr
);
3906 * The old format sd-config-list, loop through all
3907 * data-property-name entries in the
3908 * data-property-name-list
3909 * setting the properties for each.
3911 for (dataname_ptr
= sd_strtok_r(dnlist_ptr
, " \t",
3912 &dataname_lasts
); dataname_ptr
!= NULL
;
3913 dataname_ptr
= sd_strtok_r(NULL
, " \t",
3917 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
3918 "sd_process_sdconf_file: disk:%s, "
3919 "data:%s\n", vidptr
, dataname_ptr
);
3921 /* Get the data list */
3922 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
,
3923 SD_DEVINFO(un
), 0, dataname_ptr
, &data_list
,
3924 &data_list_len
) != DDI_PROP_SUCCESS
) {
3925 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
3926 "sd_process_sdconf_file: data "
3927 "property (%s) has no value\n",
3932 version
= data_list
[0];
3934 if (version
== SD_CONF_VERSION_1
) {
3937 /* Set the properties */
3938 if (sd_chk_vers1_data(un
, data_list
[1],
3939 &data_list
[2], data_list_len
,
3940 dataname_ptr
) == SD_SUCCESS
) {
3941 sd_get_tunables_from_conf(un
,
3942 data_list
[1], &data_list
[2],
3944 sd_set_vers1_properties(un
,
3945 data_list
[1], &values
);
3951 scsi_log(SD_DEVINFO(un
), sd_label
,
3952 CE_WARN
, "data property %s version "
3954 dataname_ptr
, version
);
3958 ddi_prop_free(data_list
);
3963 /* free up the memory allocated by ddi_prop_lookup_string_array(). */
3965 ddi_prop_free(config_list
);
3972 * Function: sd_nvpair_str_decode()
3974 * Description: Parse the improved format sd-config-list to get
3975 * each entry of tunable, which includes a name-value pair.
3976 * Then call sd_set_properties() to set the property.
3978 * Arguments: un - driver soft state (unit) structure
3979 * nvpair_str - the tunable list
3982 sd_nvpair_str_decode(struct sd_lun
*un
, char *nvpair_str
)
3984 char *nv
, *name
, *value
, *token
;
3985 char *nv_lasts
, *v_lasts
, *x_lasts
;
3987 for (nv
= sd_strtok_r(nvpair_str
, ",", &nv_lasts
); nv
!= NULL
;
3988 nv
= sd_strtok_r(NULL
, ",", &nv_lasts
)) {
3989 token
= sd_strtok_r(nv
, ":", &v_lasts
);
3990 name
= sd_strtok_r(token
, " \t", &x_lasts
);
3991 token
= sd_strtok_r(NULL
, ":", &v_lasts
);
3992 value
= sd_strtok_r(token
, " \t", &x_lasts
);
3993 if (name
== NULL
|| value
== NULL
) {
3994 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
3995 "sd_nvpair_str_decode: "
3996 "name or value is not valid!\n");
3998 sd_set_properties(un
, name
, value
);
4004 * Function: sd_strtok_r()
4006 * Description: This function uses strpbrk and strspn to break
4007 * string into tokens on sequentially subsequent calls. Return
4008 * NULL when no non-separator characters remain. The first
4009 * argument is NULL for subsequent calls.
4012 sd_strtok_r(char *string
, const char *sepset
, char **lasts
)
4016 /* First or subsequent call */
4023 /* Skip leading separators */
4024 q
= string
+ strspn(string
, sepset
);
4029 if ((r
= strpbrk(q
, sepset
)) == NULL
)
4039 * Function: sd_set_properties()
4041 * Description: Set device properties based on the improved
4042 * format sd-config-list.
4044 * Arguments: un - driver soft state (unit) structure
4045 * name - supported tunable name
4046 * value - tunable value
4049 sd_set_properties(struct sd_lun
*un
, char *name
, char *value
)
4051 char *endptr
= NULL
;
4054 if (strcasecmp(name
, "cache-nonvolatile") == 0) {
4055 if (strcasecmp(value
, "true") == 0) {
4056 un
->un_f_suppress_cache_flush
= TRUE
;
4057 } else if (strcasecmp(value
, "false") == 0) {
4058 un
->un_f_suppress_cache_flush
= FALSE
;
4062 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4063 "suppress_cache_flush flag set to %d\n",
4064 un
->un_f_suppress_cache_flush
);
4068 if (strcasecmp(name
, "controller-type") == 0) {
4069 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4074 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4075 "ctype set to %d\n", un
->un_ctype
);
4079 if (strcasecmp(name
, "delay-busy") == 0) {
4080 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4081 un
->un_busy_timeout
= drv_usectohz(val
/ 1000);
4085 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4086 "busy_timeout set to %d\n", un
->un_busy_timeout
);
4090 if (strcasecmp(name
, "disksort") == 0) {
4091 if (strcasecmp(value
, "true") == 0) {
4092 un
->un_f_disksort_disabled
= FALSE
;
4093 } else if (strcasecmp(value
, "false") == 0) {
4094 un
->un_f_disksort_disabled
= TRUE
;
4098 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4099 "disksort disabled flag set to %d\n",
4100 un
->un_f_disksort_disabled
);
4104 if (strcasecmp(name
, "power-condition") == 0) {
4105 if (strcasecmp(value
, "true") == 0) {
4106 un
->un_f_power_condition_disabled
= FALSE
;
4107 } else if (strcasecmp(value
, "false") == 0) {
4108 un
->un_f_power_condition_disabled
= TRUE
;
4112 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4113 "power condition disabled flag set to %d\n",
4114 un
->un_f_power_condition_disabled
);
4118 if (strcasecmp(name
, "timeout-releasereservation") == 0) {
4119 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4120 un
->un_reserve_release_time
= val
;
4124 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4125 "reservation release timeout set to %d\n",
4126 un
->un_reserve_release_time
);
4130 if (strcasecmp(name
, "reset-lun") == 0) {
4131 if (strcasecmp(value
, "true") == 0) {
4132 un
->un_f_lun_reset_enabled
= TRUE
;
4133 } else if (strcasecmp(value
, "false") == 0) {
4134 un
->un_f_lun_reset_enabled
= FALSE
;
4138 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4139 "lun reset enabled flag set to %d\n",
4140 un
->un_f_lun_reset_enabled
);
4144 if (strcasecmp(name
, "retries-busy") == 0) {
4145 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4146 un
->un_busy_retry_count
= val
;
4150 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4151 "busy retry count set to %d\n", un
->un_busy_retry_count
);
4155 if (strcasecmp(name
, "retries-timeout") == 0) {
4156 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4157 un
->un_retry_count
= val
;
4161 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4162 "timeout retry count set to %d\n", un
->un_retry_count
);
4166 if (strcasecmp(name
, "retries-notready") == 0) {
4167 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4168 un
->un_notready_retry_count
= val
;
4172 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4173 "notready retry count set to %d\n",
4174 un
->un_notready_retry_count
);
4178 if (strcasecmp(name
, "retries-reset") == 0) {
4179 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4180 un
->un_reset_retry_count
= val
;
4184 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4185 "reset retry count set to %d\n",
4186 un
->un_reset_retry_count
);
4190 if (strcasecmp(name
, "throttle-max") == 0) {
4191 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4192 un
->un_saved_throttle
= un
->un_throttle
= val
;
4196 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4197 "throttle set to %d\n", un
->un_throttle
);
4200 if (strcasecmp(name
, "throttle-min") == 0) {
4201 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4202 un
->un_min_throttle
= val
;
4206 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4207 "min throttle set to %d\n", un
->un_min_throttle
);
4210 if (strcasecmp(name
, "rmw-type") == 0) {
4211 if (ddi_strtol(value
, &endptr
, 0, &val
) == 0) {
4212 un
->un_f_rmw_type
= val
;
4216 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4217 "RMW type set to %d\n", un
->un_f_rmw_type
);
4221 * Validate the throttle values.
4222 * If any of the numbers are invalid, set everything to defaults.
4224 if ((un
->un_throttle
< SD_LOWEST_VALID_THROTTLE
) ||
4225 (un
->un_min_throttle
< SD_LOWEST_VALID_THROTTLE
) ||
4226 (un
->un_min_throttle
> un
->un_throttle
)) {
4227 un
->un_saved_throttle
= un
->un_throttle
= sd_max_throttle
;
4228 un
->un_min_throttle
= sd_min_throttle
;
4231 if (strcasecmp(name
, "mmc-gesn-polling") == 0) {
4232 if (strcasecmp(value
, "true") == 0) {
4233 un
->un_f_mmc_gesn_polling
= TRUE
;
4234 } else if (strcasecmp(value
, "false") == 0) {
4235 un
->un_f_mmc_gesn_polling
= FALSE
;
4239 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4240 "mmc-gesn-polling set to %d\n",
4241 un
->un_f_mmc_gesn_polling
);
4247 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_set_properties: "
4248 "value of prop %s is invalid\n", name
);
4252 * Function: sd_get_tunables_from_conf()
4255 * This function reads the data list from the sd.conf file and pulls
4256 * the values that can have numeric values as arguments and places
4257 * the values in the appropriate sd_tunables member.
4258 * Since the order of the data list members varies across platforms
4259 * This function reads them from the data list in a platform specific
4260 * order and places them into the correct sd_tunable member that is
4261 * consistent across all platforms.
4264 sd_get_tunables_from_conf(struct sd_lun
*un
, int flags
, int *data_list
,
4265 sd_tunables
*values
)
4270 bzero(values
, sizeof (sd_tunables
));
4272 for (i
= 0; i
< SD_CONF_MAX_ITEMS
; i
++) {
4279 switch (mask
& flags
) {
4280 case 0: /* This mask bit not set in flags */
4282 case SD_CONF_BSET_THROTTLE
:
4283 values
->sdt_throttle
= data_list
[i
];
4284 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4285 "sd_get_tunables_from_conf: throttle = %d\n",
4286 values
->sdt_throttle
);
4288 case SD_CONF_BSET_CTYPE
:
4289 values
->sdt_ctype
= data_list
[i
];
4290 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4291 "sd_get_tunables_from_conf: ctype = %d\n",
4294 case SD_CONF_BSET_NRR_COUNT
:
4295 values
->sdt_not_rdy_retries
= data_list
[i
];
4296 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4297 "sd_get_tunables_from_conf: not_rdy_retries = %d\n",
4298 values
->sdt_not_rdy_retries
);
4300 case SD_CONF_BSET_BSY_RETRY_COUNT
:
4301 values
->sdt_busy_retries
= data_list
[i
];
4302 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4303 "sd_get_tunables_from_conf: busy_retries = %d\n",
4304 values
->sdt_busy_retries
);
4306 case SD_CONF_BSET_RST_RETRIES
:
4307 values
->sdt_reset_retries
= data_list
[i
];
4308 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4309 "sd_get_tunables_from_conf: reset_retries = %d\n",
4310 values
->sdt_reset_retries
);
4312 case SD_CONF_BSET_RSV_REL_TIME
:
4313 values
->sdt_reserv_rel_time
= data_list
[i
];
4314 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4315 "sd_get_tunables_from_conf: reserv_rel_time = %d\n",
4316 values
->sdt_reserv_rel_time
);
4318 case SD_CONF_BSET_MIN_THROTTLE
:
4319 values
->sdt_min_throttle
= data_list
[i
];
4320 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4321 "sd_get_tunables_from_conf: min_throttle = %d\n",
4322 values
->sdt_min_throttle
);
4324 case SD_CONF_BSET_DISKSORT_DISABLED
:
4325 values
->sdt_disk_sort_dis
= data_list
[i
];
4326 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4327 "sd_get_tunables_from_conf: disk_sort_dis = %d\n",
4328 values
->sdt_disk_sort_dis
);
4330 case SD_CONF_BSET_LUN_RESET_ENABLED
:
4331 values
->sdt_lun_reset_enable
= data_list
[i
];
4332 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4333 "sd_get_tunables_from_conf: lun_reset_enable = %d"
4334 "\n", values
->sdt_lun_reset_enable
);
4336 case SD_CONF_BSET_CACHE_IS_NV
:
4337 values
->sdt_suppress_cache_flush
= data_list
[i
];
4338 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4339 "sd_get_tunables_from_conf: \
4340 suppress_cache_flush = %d"
4341 "\n", values
->sdt_suppress_cache_flush
);
4343 case SD_CONF_BSET_PC_DISABLED
:
4344 values
->sdt_disk_sort_dis
= data_list
[i
];
4345 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4346 "sd_get_tunables_from_conf: power_condition_dis = "
4347 "%d\n", values
->sdt_power_condition_dis
);
4354 * Function: sd_process_sdconf_table
4356 * Description: Search the static configuration table for a match on the
4357 * inquiry vid/pid and update the driver soft state structure
4358 * according to the table property values for the device.
4360 * The form of a configuration table entry is:
4361 * <vid+pid>,<flags>,<property-data>
4362 * "SEAGATE ST42400N",1,0x40000,
4363 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1;
4365 * Arguments: un - driver soft state (unit) structure
4369 sd_process_sdconf_table(struct sd_lun
*un
)
4376 for (table_index
= 0; table_index
< sd_disk_table_size
;
4378 id
= sd_disk_table
[table_index
].device_id
;
4385 * The static configuration table currently does not
4386 * implement version 10 properties. Additionally,
4387 * multiple data-property-name entries are not
4388 * implemented in the static configuration table.
4390 if (sd_sdconf_id_match(un
, id
, idlen
) == SD_SUCCESS
) {
4391 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4392 "sd_process_sdconf_table: disk %s\n", id
);
4393 sd_set_vers1_properties(un
,
4394 sd_disk_table
[table_index
].flags
,
4395 sd_disk_table
[table_index
].properties
);
4403 * Function: sd_sdconf_id_match
4405 * Description: This local function implements a case sensitive vid/pid
4406 * comparison as well as the boundary cases of wild card and
4409 * Note: An implicit assumption made here is that the scsi
4410 * inquiry structure will always keep the vid, pid and
4411 * revision strings in consecutive sequence, so they can be
4412 * read as a single string. If this assumption is not the
4413 * case, a separate string, to be used for the check, needs
4414 * to be built with these strings concatenated.
4416 * Arguments: un - driver soft state (unit) structure
4417 * id - table or config file vid/pid
4418 * idlen - length of the vid/pid (bytes)
4420 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4421 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4425 sd_sdconf_id_match(struct sd_lun
*un
, char *id
, int idlen
)
4427 struct scsi_inquiry
*sd_inq
;
4428 int rval
= SD_SUCCESS
;
4431 sd_inq
= un
->un_sd
->sd_inq
;
4435 * We use the inq_vid as a pointer to a buffer containing the
4436 * vid and pid and use the entire vid/pid length of the table
4437 * entry for the comparison. This works because the inq_pid
4438 * data member follows inq_vid in the scsi_inquiry structure.
4440 if (strncasecmp(sd_inq
->inq_vid
, id
, idlen
) != 0) {
4442 * The user id string is compared to the inquiry vid/pid
4443 * using a case insensitive comparison and ignoring
4446 rval
= sd_blank_cmp(un
, id
, idlen
);
4447 if (rval
!= SD_SUCCESS
) {
4449 * User id strings that start and end with a "*"
4450 * are a special case. These do not have a
4451 * specific vendor, and the product string can
4452 * appear anywhere in the 16 byte PID portion of
4453 * the inquiry data. This is a simple strstr()
4454 * type search for the user id in the inquiry data.
4456 if ((id
[0] == '*') && (id
[idlen
- 1] == '*')) {
4457 char *pidptr
= &id
[1];
4460 int pidstrlen
= idlen
- 2;
4461 j
= sizeof (SD_INQUIRY(un
)->inq_pid
) -
4465 return (SD_FAILURE
);
4467 for (i
= 0; i
< j
; i
++) {
4468 if (bcmp(&SD_INQUIRY(un
)->inq_pid
[i
],
4469 pidptr
, pidstrlen
) == 0) {
4482 * Function: sd_blank_cmp
4484 * Description: If the id string starts and ends with a space, treat
4485 * multiple consecutive spaces as equivalent to a single
4486 * space. For example, this causes a sd_disk_table entry
4487 * of " NEC CDROM " to match a device's id string of
4490 * Note: The success exit condition for this routine is if
4491 * the pointer to the table entry is '\0' and the cnt of
4492 * the inquiry length is zero. This will happen if the inquiry
4493 * string returned by the device is padded with spaces to be
4494 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The
4495 * SCSI spec states that the inquiry string is to be padded with
4498 * Arguments: un - driver soft state (unit) structure
4499 * id - table or config file vid/pid
4500 * idlen - length of the vid/pid (bytes)
4502 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4503 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4507 sd_blank_cmp(struct sd_lun
*un
, char *id
, int idlen
)
4512 cnt
= sizeof (SD_INQUIRY(un
)->inq_vid
) +
4513 sizeof (SD_INQUIRY(un
)->inq_pid
);
4516 p2
= un
->un_sd
->sd_inq
->inq_vid
;
4520 if ((id
[0] == ' ') && (id
[idlen
- 1] == ' ')) {
4522 * Note: string p1 is terminated by a NUL but string p2
4523 * isn't. The end of p2 is determined by cnt.
4526 /* skip over any extra blanks in both strings */
4527 while ((*p1
!= '\0') && (*p1
== ' ')) {
4530 while ((cnt
!= 0) && (*p2
== ' ')) {
4535 /* compare the two strings */
4537 (SD_TOUPPER(*p1
) != SD_TOUPPER(*p2
))) {
4541 (SD_TOUPPER(*p1
) == SD_TOUPPER(*p2
))) {
4549 /* return SD_SUCCESS if both strings match */
4550 return (((*p1
== '\0') && (cnt
== 0)) ? SD_SUCCESS
: SD_FAILURE
);
4555 * Function: sd_chk_vers1_data
4557 * Description: Verify the version 1 device properties provided by the
4558 * user via the configuration file
4560 * Arguments: un - driver soft state (unit) structure
4561 * flags - integer mask indicating properties to be set
4562 * prop_list - integer list of property values
4563 * list_len - number of the elements
4565 * Return Code: SD_SUCCESS - Indicates the user provided data is valid
4566 * SD_FAILURE - Indicates the user provided data is invalid
4570 sd_chk_vers1_data(struct sd_lun
*un
, int flags
, int *prop_list
,
4571 int list_len
, char *dataname_ptr
)
4579 /* Check for a NULL property name and list */
4580 if (dataname_ptr
== NULL
) {
4581 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
4582 "sd_chk_vers1_data: NULL data property name.");
4583 return (SD_FAILURE
);
4585 if (prop_list
== NULL
) {
4586 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
4587 "sd_chk_vers1_data: %s NULL data property list.",
4589 return (SD_FAILURE
);
4592 /* Display a warning if undefined bits are set in the flags */
4593 if (flags
& ~SD_CONF_BIT_MASK
) {
4594 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
4595 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
4596 "Properties not set.",
4597 (flags
& ~SD_CONF_BIT_MASK
), dataname_ptr
);
4598 return (SD_FAILURE
);
4602 * Verify the length of the list by identifying the highest bit set
4603 * in the flags and validating that the property list has a length
4604 * up to the index of this bit.
4606 for (i
= 0; i
< SD_CONF_MAX_ITEMS
; i
++) {
4612 if (list_len
< (index
+ 2)) {
4613 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
4614 "sd_chk_vers1_data: "
4615 "Data property list %s size is incorrect. "
4616 "Properties not set.", dataname_ptr
);
4617 scsi_log(SD_DEVINFO(un
), sd_label
, CE_CONT
, "Size expected: "
4618 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS
);
4619 return (SD_FAILURE
);
4621 return (SD_SUCCESS
);
4626 * Function: sd_set_vers1_properties
4628 * Description: Set version 1 device properties based on a property list
4629 * retrieved from the driver configuration file or static
4630 * configuration table. Version 1 properties have the format:
4632 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
4634 * where the prop0 value will be used to set prop0 if bit0
4635 * is set in the flags
4637 * Arguments: un - driver soft state (unit) structure
4638 * flags - integer mask indicating properties to be set
4639 * prop_list - integer list of property values
4643 sd_set_vers1_properties(struct sd_lun
*un
, int flags
, sd_tunables
*prop_list
)
4648 * Set the flag to indicate cache is to be disabled. An attempt
4649 * to disable the cache via sd_cache_control() will be made
4650 * later during attach once the basic initialization is complete.
4652 if (flags
& SD_CONF_BSET_NOCACHE
) {
4653 un
->un_f_opt_disable_cache
= TRUE
;
4654 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4655 "sd_set_vers1_properties: caching disabled flag set\n");
4658 /* CD-specific configuration parameters */
4659 if (flags
& SD_CONF_BSET_PLAYMSF_BCD
) {
4660 un
->un_f_cfg_playmsf_bcd
= TRUE
;
4661 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4662 "sd_set_vers1_properties: playmsf_bcd set\n");
4664 if (flags
& SD_CONF_BSET_READSUB_BCD
) {
4665 un
->un_f_cfg_readsub_bcd
= TRUE
;
4666 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4667 "sd_set_vers1_properties: readsub_bcd set\n");
4669 if (flags
& SD_CONF_BSET_READ_TOC_TRK_BCD
) {
4670 un
->un_f_cfg_read_toc_trk_bcd
= TRUE
;
4671 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4672 "sd_set_vers1_properties: read_toc_trk_bcd set\n");
4674 if (flags
& SD_CONF_BSET_READ_TOC_ADDR_BCD
) {
4675 un
->un_f_cfg_read_toc_addr_bcd
= TRUE
;
4676 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4677 "sd_set_vers1_properties: read_toc_addr_bcd set\n");
4679 if (flags
& SD_CONF_BSET_NO_READ_HEADER
) {
4680 un
->un_f_cfg_no_read_header
= TRUE
;
4681 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4682 "sd_set_vers1_properties: no_read_header set\n");
4684 if (flags
& SD_CONF_BSET_READ_CD_XD4
) {
4685 un
->un_f_cfg_read_cd_xd4
= TRUE
;
4686 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4687 "sd_set_vers1_properties: read_cd_xd4 set\n");
4690 /* Support for devices which do not have valid/unique serial numbers */
4691 if (flags
& SD_CONF_BSET_FAB_DEVID
) {
4692 un
->un_f_opt_fab_devid
= TRUE
;
4693 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4694 "sd_set_vers1_properties: fab_devid bit set\n");
4697 /* Support for user throttle configuration */
4698 if (flags
& SD_CONF_BSET_THROTTLE
) {
4699 ASSERT(prop_list
!= NULL
);
4700 un
->un_saved_throttle
= un
->un_throttle
=
4701 prop_list
->sdt_throttle
;
4702 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4703 "sd_set_vers1_properties: throttle set to %d\n",
4704 prop_list
->sdt_throttle
);
4707 /* Set the per disk retry count according to the conf file or table. */
4708 if (flags
& SD_CONF_BSET_NRR_COUNT
) {
4709 ASSERT(prop_list
!= NULL
);
4710 if (prop_list
->sdt_not_rdy_retries
) {
4711 un
->un_notready_retry_count
=
4712 prop_list
->sdt_not_rdy_retries
;
4713 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4714 "sd_set_vers1_properties: not ready retry count"
4715 " set to %d\n", un
->un_notready_retry_count
);
4719 /* The controller type is reported for generic disk driver ioctls */
4720 if (flags
& SD_CONF_BSET_CTYPE
) {
4721 ASSERT(prop_list
!= NULL
);
4722 switch (prop_list
->sdt_ctype
) {
4724 un
->un_ctype
= prop_list
->sdt_ctype
;
4725 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4726 "sd_set_vers1_properties: ctype set to "
4730 un
->un_ctype
= prop_list
->sdt_ctype
;
4731 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4732 "sd_set_vers1_properties: ctype set to "
4735 case CTYPE_ROD
: /* RW optical */
4736 un
->un_ctype
= prop_list
->sdt_ctype
;
4737 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4738 "sd_set_vers1_properties: ctype set to "
4742 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
4743 "sd_set_vers1_properties: Could not set "
4744 "invalid ctype value (%d)",
4745 prop_list
->sdt_ctype
);
4749 /* Purple failover timeout */
4750 if (flags
& SD_CONF_BSET_BSY_RETRY_COUNT
) {
4751 ASSERT(prop_list
!= NULL
);
4752 un
->un_busy_retry_count
=
4753 prop_list
->sdt_busy_retries
;
4754 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4755 "sd_set_vers1_properties: "
4756 "busy retry count set to %d\n",
4757 un
->un_busy_retry_count
);
4760 /* Purple reset retry count */
4761 if (flags
& SD_CONF_BSET_RST_RETRIES
) {
4762 ASSERT(prop_list
!= NULL
);
4763 un
->un_reset_retry_count
=
4764 prop_list
->sdt_reset_retries
;
4765 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4766 "sd_set_vers1_properties: "
4767 "reset retry count set to %d\n",
4768 un
->un_reset_retry_count
);
4771 /* Purple reservation release timeout */
4772 if (flags
& SD_CONF_BSET_RSV_REL_TIME
) {
4773 ASSERT(prop_list
!= NULL
);
4774 un
->un_reserve_release_time
=
4775 prop_list
->sdt_reserv_rel_time
;
4776 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4777 "sd_set_vers1_properties: "
4778 "reservation release timeout set to %d\n",
4779 un
->un_reserve_release_time
);
4783 * Driver flag telling the driver to verify that no commands are pending
4784 * for a device before issuing a Test Unit Ready. This is a workaround
4785 * for a firmware bug in some Seagate eliteI drives.
4787 if (flags
& SD_CONF_BSET_TUR_CHECK
) {
4788 un
->un_f_cfg_tur_check
= TRUE
;
4789 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4790 "sd_set_vers1_properties: tur queue check set\n");
4793 if (flags
& SD_CONF_BSET_MIN_THROTTLE
) {
4794 un
->un_min_throttle
= prop_list
->sdt_min_throttle
;
4795 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4796 "sd_set_vers1_properties: min throttle set to %d\n",
4797 un
->un_min_throttle
);
4800 if (flags
& SD_CONF_BSET_DISKSORT_DISABLED
) {
4801 un
->un_f_disksort_disabled
=
4802 (prop_list
->sdt_disk_sort_dis
!= 0) ?
4804 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4805 "sd_set_vers1_properties: disksort disabled "
4807 prop_list
->sdt_disk_sort_dis
);
4810 if (flags
& SD_CONF_BSET_LUN_RESET_ENABLED
) {
4811 un
->un_f_lun_reset_enabled
=
4812 (prop_list
->sdt_lun_reset_enable
!= 0) ?
4814 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4815 "sd_set_vers1_properties: lun reset enabled "
4817 prop_list
->sdt_lun_reset_enable
);
4820 if (flags
& SD_CONF_BSET_CACHE_IS_NV
) {
4821 un
->un_f_suppress_cache_flush
=
4822 (prop_list
->sdt_suppress_cache_flush
!= 0) ?
4824 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4825 "sd_set_vers1_properties: suppress_cache_flush "
4827 prop_list
->sdt_suppress_cache_flush
);
4830 if (flags
& SD_CONF_BSET_PC_DISABLED
) {
4831 un
->un_f_power_condition_disabled
=
4832 (prop_list
->sdt_power_condition_dis
!= 0) ?
4834 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
4835 "sd_set_vers1_properties: power_condition_disabled "
4837 prop_list
->sdt_power_condition_dis
);
4841 * Validate the throttle values.
4842 * If any of the numbers are invalid, set everything to defaults.
4844 if ((un
->un_throttle
< SD_LOWEST_VALID_THROTTLE
) ||
4845 (un
->un_min_throttle
< SD_LOWEST_VALID_THROTTLE
) ||
4846 (un
->un_min_throttle
> un
->un_throttle
)) {
4847 un
->un_saved_throttle
= un
->un_throttle
= sd_max_throttle
;
4848 un
->un_min_throttle
= sd_min_throttle
;
4853 * Function: sd_is_lsi()
4855 * Description: Check for lsi devices, step through the static device
4856 * table to match vid/pid.
4858 * Args: un - ptr to sd_lun
4860 * Notes: When creating new LSI property, need to add the new LSI property
4864 sd_is_lsi(struct sd_lun
*un
)
4872 for (table_index
= 0; table_index
< sd_disk_table_size
;
4874 id
= sd_disk_table
[table_index
].device_id
;
4880 if (sd_sdconf_id_match(un
, id
, idlen
) == SD_SUCCESS
) {
4881 prop
= sd_disk_table
[table_index
].properties
;
4882 if (prop
== &lsi_properties
||
4883 prop
== &lsi_oem_properties
||
4884 prop
== &lsi_properties_scsi
||
4885 prop
== &symbios_properties
) {
4886 un
->un_f_cfg_is_lsi
= TRUE
;
4894 * Function: sd_get_physical_geometry
4896 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and
4897 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the
4898 * target, and use this information to initialize the physical
4899 * geometry cache specified by pgeom_p.
4901 * MODE SENSE is an optional command, so failure in this case
4902 * does not necessarily denote an error. We want to use the
4903 * MODE SENSE commands to derive the physical geometry of the
4904 * device, but if either command fails, the logical geometry is
4905 * used as the fallback for disk label geometry in cmlb.
4907 * This requires that un->un_blockcount and un->un_tgt_blocksize
4908 * have already been initialized for the current target and
4909 * that the current values be passed as args so that we don't
4910 * end up ever trying to use -1 as a valid value. This could
4911 * happen if either value is reset while we're not holding
4914 * Arguments: un - driver soft state (unit) structure
4915 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
4916 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
4917 * to use the USCSI "direct" chain and bypass the normal
4920 * Context: Kernel thread only (can sleep).
4924 sd_get_physical_geometry(struct sd_lun
*un
, cmlb_geom_t
*pgeom_p
,
4925 diskaddr_t capacity
, int lbasize
, int path_flag
)
4927 struct mode_format
*page3p
;
4928 struct mode_geometry
*page4p
;
4929 struct mode_header
*headerp
;
4936 diskaddr_t modesense_capacity
;
4939 int mode_header_length
;
4953 lbasize
= un
->un_sys_blocksize
;
4956 pgeom_p
->g_secsize
= (unsigned short)lbasize
;
4959 * If the unit is a cd/dvd drive MODE SENSE page three
4960 * and MODE SENSE page four are reserved (see SBC spec
4961 * and MMC spec). To prevent soft errors just return
4962 * using the default LBA size.
4967 cdbsize
= (un
->un_f_cfg_is_atapi
== TRUE
) ? CDB_GROUP2
: CDB_GROUP0
;
4970 * Retrieve MODE SENSE page 3 - Format Device Page
4972 p3bufp
= kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH
, KM_SLEEP
);
4973 ssc
= sd_ssc_init(un
);
4974 status
= sd_send_scsi_MODE_SENSE(ssc
, cdbsize
, p3bufp
,
4975 SD_MODE_SENSE_PAGE3_LENGTH
, SD_MODE_SENSE_PAGE3_CODE
, path_flag
);
4977 SD_ERROR(SD_LOG_COMMON
, un
,
4978 "sd_get_physical_geometry: mode sense page 3 failed\n");
4983 * Determine size of Block Descriptors in order to locate the mode
4984 * page data. ATAPI devices return 0, SCSI devices should return
4985 * MODE_BLK_DESC_LENGTH.
4987 headerp
= (struct mode_header
*)p3bufp
;
4988 if (un
->un_f_cfg_is_atapi
== TRUE
) {
4989 struct mode_header_grp2
*mhp
=
4990 (struct mode_header_grp2
*)headerp
;
4991 mode_header_length
= MODE_HEADER_LENGTH_GRP2
;
4992 bd_len
= (mhp
->bdesc_length_hi
<< 8) | mhp
->bdesc_length_lo
;
4994 mode_header_length
= MODE_HEADER_LENGTH
;
4995 bd_len
= ((struct mode_header
*)headerp
)->bdesc_length
;
4998 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
4999 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, SD_LOG_COMMON
,
5000 "sd_get_physical_geometry: received unexpected bd_len "
5001 "of %d, page3\n", bd_len
);
5006 page3p
= (struct mode_format
*)
5007 ((caddr_t
)headerp
+ mode_header_length
+ bd_len
);
5009 if (page3p
->mode_page
.code
!= SD_MODE_SENSE_PAGE3_CODE
) {
5010 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, SD_LOG_COMMON
,
5011 "sd_get_physical_geometry: mode sense pg3 code mismatch "
5012 "%d\n", page3p
->mode_page
.code
);
5018 * Use this physical geometry data only if BOTH MODE SENSE commands
5019 * complete successfully; otherwise, revert to the logical geometry.
5020 * So, we need to save everything in temporary variables.
5022 sector_size
= BE_16(page3p
->data_bytes_sect
);
5025 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size
5027 if (sector_size
== 0) {
5028 sector_size
= un
->un_sys_blocksize
;
5030 sector_size
&= ~(un
->un_sys_blocksize
- 1);
5033 nsect
= BE_16(page3p
->sect_track
);
5034 intrlv
= BE_16(page3p
->interleave
);
5036 SD_INFO(SD_LOG_COMMON
, un
,
5037 "sd_get_physical_geometry: Format Parameters (page 3)\n");
5038 SD_INFO(SD_LOG_COMMON
, un
,
5039 " mode page: %d; nsect: %d; sector size: %d;\n",
5040 page3p
->mode_page
.code
, nsect
, sector_size
);
5041 SD_INFO(SD_LOG_COMMON
, un
,
5042 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv
,
5043 BE_16(page3p
->track_skew
),
5044 BE_16(page3p
->cylinder_skew
));
5046 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
5049 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page
5051 p4bufp
= kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH
, KM_SLEEP
);
5052 status
= sd_send_scsi_MODE_SENSE(ssc
, cdbsize
, p4bufp
,
5053 SD_MODE_SENSE_PAGE4_LENGTH
, SD_MODE_SENSE_PAGE4_CODE
, path_flag
);
5055 SD_ERROR(SD_LOG_COMMON
, un
,
5056 "sd_get_physical_geometry: mode sense page 4 failed\n");
5061 * Determine size of Block Descriptors in order to locate the mode
5062 * page data. ATAPI devices return 0, SCSI devices should return
5063 * MODE_BLK_DESC_LENGTH.
5065 headerp
= (struct mode_header
*)p4bufp
;
5066 if (un
->un_f_cfg_is_atapi
== TRUE
) {
5067 struct mode_header_grp2
*mhp
=
5068 (struct mode_header_grp2
*)headerp
;
5069 bd_len
= (mhp
->bdesc_length_hi
<< 8) | mhp
->bdesc_length_lo
;
5071 bd_len
= ((struct mode_header
*)headerp
)->bdesc_length
;
5074 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
5075 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, SD_LOG_COMMON
,
5076 "sd_get_physical_geometry: received unexpected bd_len of "
5077 "%d, page4\n", bd_len
);
5082 page4p
= (struct mode_geometry
*)
5083 ((caddr_t
)headerp
+ mode_header_length
+ bd_len
);
5085 if (page4p
->mode_page
.code
!= SD_MODE_SENSE_PAGE4_CODE
) {
5086 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, SD_LOG_COMMON
,
5087 "sd_get_physical_geometry: mode sense pg4 code mismatch "
5088 "%d\n", page4p
->mode_page
.code
);
5094 * Stash the data now, after we know that both commands completed.
5098 nhead
= (int)page4p
->heads
; /* uchar, so no conversion needed */
5099 spc
= nhead
* nsect
;
5100 ncyl
= (page4p
->cyl_ub
<< 16) + (page4p
->cyl_mb
<< 8) + page4p
->cyl_lb
;
5101 rpm
= BE_16(page4p
->rpm
);
5103 modesense_capacity
= spc
* ncyl
;
5105 SD_INFO(SD_LOG_COMMON
, un
,
5106 "sd_get_physical_geometry: Geometry Parameters (page 4)\n");
5107 SD_INFO(SD_LOG_COMMON
, un
,
5108 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl
, nhead
, rpm
);
5109 SD_INFO(SD_LOG_COMMON
, un
,
5110 " computed capacity(h*s*c): %d;\n", modesense_capacity
);
5111 SD_INFO(SD_LOG_COMMON
, un
, " pgeom_p: %p; read cap: %d\n",
5112 (void *)pgeom_p
, capacity
);
5115 * Compensate if the drive's geometry is not rectangular, i.e.,
5116 * the product of C * H * S returned by MODE SENSE >= that returned
5117 * by read capacity. This is an idiosyncrasy of the original x86
5120 if (modesense_capacity
>= capacity
) {
5121 SD_INFO(SD_LOG_COMMON
, un
,
5122 "sd_get_physical_geometry: adjusting acyl; "
5123 "old: %d; new: %d\n", pgeom_p
->g_acyl
,
5124 (modesense_capacity
- capacity
+ spc
- 1) / spc
);
5125 if (sector_size
!= 0) {
5126 /* 1243403: NEC D38x7 drives don't support sec size */
5127 pgeom_p
->g_secsize
= (unsigned short)sector_size
;
5129 pgeom_p
->g_nsect
= (unsigned short)nsect
;
5130 pgeom_p
->g_nhead
= (unsigned short)nhead
;
5131 pgeom_p
->g_capacity
= capacity
;
5133 (modesense_capacity
- pgeom_p
->g_capacity
+ spc
- 1) / spc
;
5134 pgeom_p
->g_ncyl
= ncyl
- pgeom_p
->g_acyl
;
5137 pgeom_p
->g_rpm
= (unsigned short)rpm
;
5138 pgeom_p
->g_intrlv
= (unsigned short)intrlv
;
5141 SD_INFO(SD_LOG_COMMON
, un
,
5142 "sd_get_physical_geometry: mode sense geometry:\n");
5143 SD_INFO(SD_LOG_COMMON
, un
,
5144 " nsect: %d; sector size: %d; interlv: %d\n",
5145 nsect
, sector_size
, intrlv
);
5146 SD_INFO(SD_LOG_COMMON
, un
,
5147 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
5148 nhead
, ncyl
, rpm
, modesense_capacity
);
5149 SD_INFO(SD_LOG_COMMON
, un
,
5150 "sd_get_physical_geometry: (cached)\n");
5151 SD_INFO(SD_LOG_COMMON
, un
,
5152 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
5153 pgeom_p
->g_ncyl
, pgeom_p
->g_acyl
,
5154 pgeom_p
->g_nhead
, pgeom_p
->g_nsect
);
5155 SD_INFO(SD_LOG_COMMON
, un
,
5156 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
5157 pgeom_p
->g_secsize
, pgeom_p
->g_capacity
,
5158 pgeom_p
->g_intrlv
, pgeom_p
->g_rpm
);
5159 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
5162 kmem_free(p4bufp
, SD_MODE_SENSE_PAGE4_LENGTH
);
5165 kmem_free(p3bufp
, SD_MODE_SENSE_PAGE3_LENGTH
);
5168 if (status
== EIO
) {
5170 * Some disks do not support mode sense(6), we
5171 * should ignore this kind of error(sense key is
5172 * 0x5 - illegal request).
5177 sensep
= (uint8_t *)ssc
->ssc_uscsi_cmd
->uscsi_rqbuf
;
5178 senlen
= (int)(ssc
->ssc_uscsi_cmd
->uscsi_rqlen
-
5179 ssc
->ssc_uscsi_cmd
->uscsi_rqresid
);
5182 scsi_sense_key(sensep
) == KEY_ILLEGAL_REQUEST
) {
5183 sd_ssc_assessment(ssc
,
5184 SD_FMT_IGNORE_COMPROMISE
);
5186 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
5189 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5197 * Function: sd_get_virtual_geometry
5199 * Description: Ask the controller to tell us about the target device.
5201 * Arguments: un - pointer to softstate
5202 * capacity - disk capacity in #blocks
5203 * lbasize - disk block size in bytes
5205 * Context: Kernel thread only
5209 sd_get_virtual_geometry(struct sd_lun
*un
, cmlb_geom_t
*lgeom_p
,
5210 diskaddr_t capacity
, int lbasize
)
5217 /* Set sector size, and total number of sectors */
5218 (void) scsi_ifsetcap(SD_ADDRESS(un
), "sector-size", lbasize
, 1);
5219 (void) scsi_ifsetcap(SD_ADDRESS(un
), "total-sectors", capacity
, 1);
5221 /* Let the HBA tell us its geometry */
5222 geombuf
= (uint_t
)scsi_ifgetcap(SD_ADDRESS(un
), "geometry", 1);
5224 /* A value of -1 indicates an undefined "geometry" property */
5225 if (geombuf
== (-1)) {
5229 /* Initialize the logical geometry cache. */
5230 lgeom_p
->g_nhead
= (geombuf
>> 16) & 0xffff;
5231 lgeom_p
->g_nsect
= geombuf
& 0xffff;
5232 lgeom_p
->g_secsize
= un
->un_sys_blocksize
;
5234 spc
= lgeom_p
->g_nhead
* lgeom_p
->g_nsect
;
5237 * Note: The driver originally converted the capacity value from
5238 * target blocks to system blocks. However, the capacity value passed
5239 * to this routine is already in terms of system blocks (this scaling
5240 * is done when the READ CAPACITY command is issued and processed).
5241 * This 'error' may have gone undetected because the usage of g_ncyl
5242 * (which is based upon g_capacity) is very limited within the driver
5244 lgeom_p
->g_capacity
= capacity
;
5247 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The
5248 * hba may return zero values if the device has been removed.
5251 lgeom_p
->g_ncyl
= 0;
5253 lgeom_p
->g_ncyl
= lgeom_p
->g_capacity
/ spc
;
5255 lgeom_p
->g_acyl
= 0;
5257 SD_INFO(SD_LOG_COMMON
, un
, "sd_get_virtual_geometry: (cached)\n");
5262 * Function: sd_update_block_info
5264 * Description: Calculate a byte count to sector count bitshift value
5267 * Arguments: un: unit struct.
5268 * lbasize: new target sector size
5269 * capacity: new target capacity, ie. block count
5271 * Context: Kernel thread context
5275 sd_update_block_info(struct sd_lun
*un
, uint32_t lbasize
, uint64_t capacity
)
5278 un
->un_tgt_blocksize
= lbasize
;
5279 un
->un_f_tgt_blocksize_is_valid
= TRUE
;
5280 if (!un
->un_f_has_removable_media
) {
5281 un
->un_sys_blocksize
= lbasize
;
5285 if (capacity
!= 0) {
5286 un
->un_blockcount
= capacity
;
5287 un
->un_f_blockcount_is_valid
= TRUE
;
5293 * Function: sd_register_devid
5295 * Description: This routine will obtain the device id information from the
5296 * target, obtain the serial number, and register the device
5297 * id with the ddi framework.
5299 * Arguments: devi - the system's dev_info_t for the device.
5300 * un - driver soft state (unit) structure
5301 * reservation_flag - indicates if a reservation conflict
5302 * occurred during attach
5304 * Context: Kernel Thread
5307 sd_register_devid(sd_ssc_t
*ssc
, dev_info_t
*devi
, int reservation_flag
)
5310 uchar_t
*inq80
= NULL
;
5311 size_t inq80_len
= MAX_INQUIRY_SIZE
;
5312 size_t inq80_resid
= 0;
5313 uchar_t
*inq83
= NULL
;
5314 size_t inq83_len
= MAX_INQUIRY_SIZE
;
5315 size_t inq83_resid
= 0;
5320 ASSERT(ssc
!= NULL
);
5323 ASSERT(mutex_owned(SD_MUTEX(un
)));
5324 ASSERT((SD_DEVINFO(un
)) == devi
);
5328 * We check the availability of the World Wide Name (0x83) and Unit
5329 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using
5330 * un_vpd_page_mask from them, we decide which way to get the WWN. If
5331 * 0x83 is available, that is the best choice. Our next choice is
5332 * 0x80. If neither are available, we munge the devid from the device
5333 * vid/pid/serial # for Sun qualified disks, or use the ddi framework
5334 * to fabricate a devid for non-Sun qualified disks.
5336 if (sd_check_vpd_page_support(ssc
) == 0) {
5337 /* collect page 80 data if available */
5338 if (un
->un_vpd_page_mask
& SD_VPD_UNIT_SERIAL_PG
) {
5340 mutex_exit(SD_MUTEX(un
));
5341 inq80
= kmem_zalloc(inq80_len
, KM_SLEEP
);
5343 rval
= sd_send_scsi_INQUIRY(ssc
, inq80
, inq80_len
,
5344 0x01, 0x80, &inq80_resid
);
5347 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5348 kmem_free(inq80
, inq80_len
);
5351 } else if (ddi_prop_exists(
5352 DDI_DEV_T_NONE
, SD_DEVINFO(un
),
5353 DDI_PROP_NOTPROM
| DDI_PROP_DONTPASS
,
5354 INQUIRY_SERIAL_NO
) == 0) {
5356 * If we don't already have a serial number
5357 * property, do quick verify of data returned
5358 * and define property.
5360 dlen
= inq80_len
- inq80_resid
;
5361 len
= (size_t)inq80
[3];
5362 if ((dlen
>= 4) && ((len
+ 4) <= dlen
)) {
5364 * Ensure sn termination, skip leading
5365 * blanks, and create property
5366 * 'inquiry-serial-no'.
5368 sn
= (char *)&inq80
[4];
5370 while (*sn
&& (*sn
== ' '))
5373 (void) ddi_prop_update_string(
5376 INQUIRY_SERIAL_NO
, sn
);
5380 mutex_enter(SD_MUTEX(un
));
5383 /* collect page 83 data if available */
5384 if (un
->un_vpd_page_mask
& SD_VPD_DEVID_WWN_PG
) {
5385 mutex_exit(SD_MUTEX(un
));
5386 inq83
= kmem_zalloc(inq83_len
, KM_SLEEP
);
5388 rval
= sd_send_scsi_INQUIRY(ssc
, inq83
, inq83_len
,
5389 0x01, 0x83, &inq83_resid
);
5392 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5393 kmem_free(inq83
, inq83_len
);
5397 mutex_enter(SD_MUTEX(un
));
5402 * If transport has already registered a devid for this target
5403 * then that takes precedence over the driver's determination
5406 * NOTE: The reason this check is done here instead of at the beginning
5407 * of the function is to allow the code above to create the
5408 * 'inquiry-serial-no' property.
5410 if (ddi_devid_get(SD_DEVINFO(un
), &un
->un_devid
) == DDI_SUCCESS
) {
5411 ASSERT(un
->un_devid
);
5412 un
->un_f_devid_transport_defined
= TRUE
;
5413 goto cleanup
; /* use devid registered by the transport */
5417 * This is the case of antiquated Sun disk drives that have the
5418 * FAB_DEVID property set in the disk_table. These drives
5419 * manage the devid's by storing them in last 2 available sectors
5420 * on the drive and have them fabricated by the ddi layer by calling
5421 * ddi_devid_init and passing the DEVID_FAB flag.
5423 if (un
->un_f_opt_fab_devid
== TRUE
) {
5425 * Depending on EINVAL isn't reliable, since a reserved disk
5426 * may result in invalid geometry, so check to make sure a
5427 * reservation conflict did not occur during attach.
5429 if ((sd_get_devid(ssc
) == EINVAL
) &&
5430 (reservation_flag
!= SD_TARGET_IS_RESERVED
)) {
5432 * The devid is invalid AND there is no reservation
5433 * conflict. Fabricate a new devid.
5435 (void) sd_create_devid(ssc
);
5438 /* Register the devid if it exists */
5439 if (un
->un_devid
!= NULL
) {
5440 (void) ddi_devid_register(SD_DEVINFO(un
),
5442 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
5443 "sd_register_devid: Devid Fabricated\n");
5448 /* encode best devid possible based on data available */
5449 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST
,
5450 (char *)ddi_driver_name(SD_DEVINFO(un
)),
5451 (uchar_t
*)SD_INQUIRY(un
), sizeof (*SD_INQUIRY(un
)),
5452 inq80
, inq80_len
- inq80_resid
, inq83
, inq83_len
-
5453 inq83_resid
, &un
->un_devid
) == DDI_SUCCESS
) {
5455 /* devid successfully encoded, register devid */
5456 (void) ddi_devid_register(SD_DEVINFO(un
), un
->un_devid
);
5460 * Unable to encode a devid based on data available.
5461 * This is not a Sun qualified disk. Older Sun disk
5462 * drives that have the SD_FAB_DEVID property
5463 * set in the disk_table and non Sun qualified
5464 * disks are treated in the same manner. These
5465 * drives manage the devid's by storing them in
5466 * last 2 available sectors on the drive and
5467 * have them fabricated by the ddi layer by
5468 * calling ddi_devid_init and passing the
5470 * Create a fabricate devid only if there's no
5471 * fabricate devid existed.
5473 if (sd_get_devid(ssc
) == EINVAL
) {
5474 (void) sd_create_devid(ssc
);
5476 un
->un_f_opt_fab_devid
= TRUE
;
5478 /* Register the devid if it exists */
5479 if (un
->un_devid
!= NULL
) {
5480 (void) ddi_devid_register(SD_DEVINFO(un
),
5482 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
5483 "sd_register_devid: devid fabricated using "
5489 /* clean up resources */
5490 if (inq80
!= NULL
) {
5491 kmem_free(inq80
, inq80_len
);
5493 if (inq83
!= NULL
) {
5494 kmem_free(inq83
, inq83_len
);
5501 * Function: sd_get_devid
5503 * Description: This routine will return 0 if a valid device id has been
5504 * obtained from the target and stored in the soft state. If a
5505 * valid device id has not been previously read and stored, a
5506 * read attempt will be made.
5508 * Arguments: un - driver soft state (unit) structure
5510 * Return Code: 0 if we successfully get the device id
5512 * Context: Kernel Thread
5516 sd_get_devid(sd_ssc_t
*ssc
)
5518 struct dk_devid
*dkdevid
;
5529 ASSERT(ssc
!= NULL
);
5532 ASSERT(mutex_owned(SD_MUTEX(un
)));
5534 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
, "sd_get_devid: entry: un: 0x%p\n",
5537 if (un
->un_devid
!= NULL
) {
5541 mutex_exit(SD_MUTEX(un
));
5542 if (cmlb_get_devid_block(un
->un_cmlbhandle
, &blk
,
5543 (void *)SD_PATH_DIRECT
) != 0) {
5544 mutex_enter(SD_MUTEX(un
));
5549 * Read and verify device id, stored in the reserved cylinders at the
5550 * end of the disk. Backup label is on the odd sectors of the last
5551 * track of the last cylinder. Device id will be on track of the next
5554 mutex_enter(SD_MUTEX(un
));
5555 buffer_size
= SD_REQBYTES2TGTBYTES(un
, sizeof (struct dk_devid
));
5556 mutex_exit(SD_MUTEX(un
));
5557 dkdevid
= kmem_alloc(buffer_size
, KM_SLEEP
);
5558 status
= sd_send_scsi_READ(ssc
, dkdevid
, buffer_size
, blk
,
5562 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5566 /* Validate the revision */
5567 if ((dkdevid
->dkd_rev_hi
!= DK_DEVID_REV_MSB
) ||
5568 (dkdevid
->dkd_rev_lo
!= DK_DEVID_REV_LSB
)) {
5573 /* Calculate the checksum */
5575 ip
= (uint_t
*)dkdevid
;
5576 for (i
= 0; i
< ((DEV_BSIZE
- sizeof (int)) / sizeof (int));
5581 /* Compare the checksums */
5582 if (DKD_GETCHKSUM(dkdevid
) != chksum
) {
5587 /* Validate the device id */
5588 if (ddi_devid_valid((ddi_devid_t
)&dkdevid
->dkd_devid
) != DDI_SUCCESS
) {
5594 * Store the device id in the driver soft state
5596 sz
= ddi_devid_sizeof((ddi_devid_t
)&dkdevid
->dkd_devid
);
5597 tmpid
= kmem_alloc(sz
, KM_SLEEP
);
5599 mutex_enter(SD_MUTEX(un
));
5601 un
->un_devid
= tmpid
;
5602 bcopy(&dkdevid
->dkd_devid
, un
->un_devid
, sz
);
5604 kmem_free(dkdevid
, buffer_size
);
5606 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
, "sd_get_devid: exit: un:0x%p\n", un
);
5610 mutex_enter(SD_MUTEX(un
));
5611 kmem_free(dkdevid
, buffer_size
);
5617 * Function: sd_create_devid
5619 * Description: This routine will fabricate the device id and write it
5622 * Arguments: un - driver soft state (unit) structure
5624 * Return Code: value of the fabricated device id
5626 * Context: Kernel Thread
5630 sd_create_devid(sd_ssc_t
*ssc
)
5634 ASSERT(ssc
!= NULL
);
5638 /* Fabricate the devid */
5639 if (ddi_devid_init(SD_DEVINFO(un
), DEVID_FAB
, 0, NULL
, &un
->un_devid
)
5644 /* Write the devid to disk */
5645 if (sd_write_deviceid(ssc
) != 0) {
5646 ddi_devid_free(un
->un_devid
);
5647 un
->un_devid
= NULL
;
5650 return (un
->un_devid
);
5655 * Function: sd_write_deviceid
5657 * Description: This routine will write the device id to the disk
5660 * Arguments: un - driver soft state (unit) structure
5662 * Return Code: EINVAL
5663 * value returned by sd_send_scsi_cmd
5665 * Context: Kernel Thread
5669 sd_write_deviceid(sd_ssc_t
*ssc
)
5671 struct dk_devid
*dkdevid
;
5679 ASSERT(ssc
!= NULL
);
5682 ASSERT(mutex_owned(SD_MUTEX(un
)));
5684 mutex_exit(SD_MUTEX(un
));
5685 if (cmlb_get_devid_block(un
->un_cmlbhandle
, &blk
,
5686 (void *)SD_PATH_DIRECT
) != 0) {
5687 mutex_enter(SD_MUTEX(un
));
5692 /* Allocate the buffer */
5693 buf
= kmem_zalloc(un
->un_sys_blocksize
, KM_SLEEP
);
5694 dkdevid
= (struct dk_devid
*)buf
;
5696 /* Fill in the revision */
5697 dkdevid
->dkd_rev_hi
= DK_DEVID_REV_MSB
;
5698 dkdevid
->dkd_rev_lo
= DK_DEVID_REV_LSB
;
5700 /* Copy in the device id */
5701 mutex_enter(SD_MUTEX(un
));
5702 bcopy(un
->un_devid
, &dkdevid
->dkd_devid
,
5703 ddi_devid_sizeof(un
->un_devid
));
5704 mutex_exit(SD_MUTEX(un
));
5706 /* Calculate the checksum */
5708 ip
= (uint_t
*)dkdevid
;
5709 for (i
= 0; i
< ((DEV_BSIZE
- sizeof (int)) / sizeof (int));
5714 /* Fill-in checksum */
5715 DKD_FORMCHKSUM(chksum
, dkdevid
);
5717 /* Write the reserved sector */
5718 status
= sd_send_scsi_WRITE(ssc
, buf
, un
->un_sys_blocksize
, blk
,
5721 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5723 kmem_free(buf
, un
->un_sys_blocksize
);
5725 mutex_enter(SD_MUTEX(un
));
5731 * Function: sd_check_vpd_page_support
5733 * Description: This routine sends an inquiry command with the EVPD bit set and
5734 * a page code of 0x00 to the device. It is used to determine which
5735 * vital product pages are available to find the devid. We are
5736 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1,
5737 * the device does not support that command.
5739 * Arguments: un - driver soft state (unit) structure
5741 * Return Code: 0 - success
5742 * 1 - check condition
5744 * Context: This routine can sleep.
5748 sd_check_vpd_page_support(sd_ssc_t
*ssc
)
5750 uchar_t
*page_list
= NULL
;
5751 uchar_t page_length
= 0xff; /* Use max possible length */
5752 uchar_t evpd
= 0x01; /* Set the EVPD bit */
5753 uchar_t page_code
= 0x00; /* Supported VPD Pages */
5758 ASSERT(ssc
!= NULL
);
5761 ASSERT(mutex_owned(SD_MUTEX(un
)));
5763 mutex_exit(SD_MUTEX(un
));
5766 * We'll set the page length to the maximum to save figuring it out
5767 * with an additional call.
5769 page_list
= kmem_zalloc(page_length
, KM_SLEEP
);
5771 rval
= sd_send_scsi_INQUIRY(ssc
, page_list
, page_length
, evpd
,
5775 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5777 mutex_enter(SD_MUTEX(un
));
5780 * Now we must validate that the device accepted the command, as some
5781 * drives do not support it. If the drive does support it, we will
5782 * return 0, and the supported pages will be in un_vpd_page_mask. If
5783 * not, we return -1.
5785 if ((rval
== 0) && (page_list
[VPD_MODE_PAGE
] == 0x00)) {
5786 /* Loop to find one of the 2 pages we need */
5787 counter
= 4; /* Supported pages start at byte 4, with 0x00 */
5790 * Pages are returned in ascending order, and 0x83 is what we
5793 while ((page_list
[counter
] <= 0xB1) &&
5794 (counter
<= (page_list
[VPD_PAGE_LENGTH
] +
5795 VPD_HEAD_OFFSET
))) {
5797 * Add 3 because page_list[3] is the number of
5801 switch (page_list
[counter
]) {
5803 un
->un_vpd_page_mask
|= SD_VPD_SUPPORTED_PG
;
5806 un
->un_vpd_page_mask
|= SD_VPD_UNIT_SERIAL_PG
;
5809 un
->un_vpd_page_mask
|= SD_VPD_OPERATING_PG
;
5812 un
->un_vpd_page_mask
|= SD_VPD_ASCII_OP_PG
;
5815 un
->un_vpd_page_mask
|= SD_VPD_DEVID_WWN_PG
;
5818 un
->un_vpd_page_mask
|= SD_VPD_EXTENDED_DATA_PG
;
5821 un
->un_vpd_page_mask
|= SD_VPD_DEV_CHARACTER_PG
;
5830 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
5831 "sd_check_vpd_page_support: This drive does not implement "
5835 kmem_free(page_list
, page_length
);
5842 * Function: sd_setup_pm
5844 * Description: Initialize Power Management on the device
5846 * Context: Kernel Thread
5850 sd_setup_pm(sd_ssc_t
*ssc
, dev_info_t
*devi
)
5852 uint_t log_page_size
;
5853 uchar_t
*log_page_data
;
5857 ASSERT(ssc
!= NULL
);
5862 * Since we are called from attach, holding a mutex for
5863 * un is unnecessary. Because some of the routines called
5864 * from here require SD_MUTEX to not be held, assert this
5867 ASSERT(!mutex_owned(SD_MUTEX(un
)));
5869 * Since the sd device does not have the 'reg' property,
5870 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
5871 * The following code is to tell cpr that this device
5872 * DOES need to be suspended and resumed.
5874 (void) ddi_prop_update_string(DDI_DEV_T_NONE
, devi
,
5875 "pm-hardware-state", "needs-suspend-resume");
5878 * This complies with the new power management framework
5879 * for certain desktop machines. Create the pm_components
5880 * property as a string array property.
5881 * If un_f_pm_supported is TRUE, that means the disk
5882 * attached HBA has set the "pm-capable" property and
5883 * the value of this property is bigger than 0.
5885 if (un
->un_f_pm_supported
) {
5887 * not all devices have a motor, try it first.
5888 * some devices may return ILLEGAL REQUEST, some
5890 * The following START_STOP_UNIT is used to check if target
5891 * device has a motor.
5893 un
->un_f_start_stop_supported
= TRUE
;
5895 if (un
->un_f_power_condition_supported
) {
5896 rval
= sd_send_scsi_START_STOP_UNIT(ssc
,
5897 SD_POWER_CONDITION
, SD_TARGET_ACTIVE
,
5900 un
->un_f_power_condition_supported
= FALSE
;
5903 if (!un
->un_f_power_condition_supported
) {
5904 rval
= sd_send_scsi_START_STOP_UNIT(ssc
,
5905 SD_START_STOP
, SD_TARGET_START
, SD_PATH_DIRECT
);
5908 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
5909 un
->un_f_start_stop_supported
= FALSE
;
5913 * create pm properties anyways otherwise the parent can't
5916 un
->un_f_pm_is_enabled
= TRUE
;
5917 (void) sd_create_pm_components(devi
, un
);
5920 * If it claims that log sense is supported, check it out.
5922 if (un
->un_f_log_sense_supported
) {
5923 rval
= sd_log_page_supported(ssc
,
5924 START_STOP_CYCLE_PAGE
);
5926 /* Page found, use it. */
5927 un
->un_start_stop_cycle_page
=
5928 START_STOP_CYCLE_PAGE
;
5931 * Page not found or log sense is not
5933 * Notice we do not check the old style
5934 * START_STOP_CYCLE_VU_PAGE because this
5935 * code path does not apply to old disks.
5937 un
->un_f_log_sense_supported
= FALSE
;
5938 un
->un_f_pm_log_sense_smart
= FALSE
;
5946 * For the disk whose attached HBA has not set the "pm-capable"
5947 * property, check if it supports the power management.
5949 if (!un
->un_f_log_sense_supported
) {
5950 un
->un_power_level
= SD_SPINDLE_ON
;
5951 un
->un_f_pm_is_enabled
= FALSE
;
5955 rval
= sd_log_page_supported(ssc
, START_STOP_CYCLE_PAGE
);
5958 if (sd_force_pm_supported
) {
5959 /* Force a successful result */
5965 * If the start-stop cycle counter log page is not supported
5966 * or if the pm-capable property is set to be false (0),
5967 * then we should not create the pm_components property.
5972 * Reading log sense failed, most likely this is
5973 * an older drive that does not support log sense.
5974 * If this fails auto-pm is not supported.
5976 un
->un_power_level
= SD_SPINDLE_ON
;
5977 un
->un_f_pm_is_enabled
= FALSE
;
5979 } else if (rval
== 0) {
5982 * The start stop cycle counter is implemented as page
5983 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For
5984 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE).
5986 if (sd_log_page_supported(ssc
, START_STOP_CYCLE_VU_PAGE
) == 1) {
5988 * Page found, use this one.
5990 un
->un_start_stop_cycle_page
= START_STOP_CYCLE_VU_PAGE
;
5991 un
->un_f_pm_is_enabled
= TRUE
;
5994 * Error or page not found.
5995 * auto-pm is not supported for this device.
5997 un
->un_power_level
= SD_SPINDLE_ON
;
5998 un
->un_f_pm_is_enabled
= FALSE
;
6002 * Page found, use it.
6004 un
->un_start_stop_cycle_page
= START_STOP_CYCLE_PAGE
;
6005 un
->un_f_pm_is_enabled
= TRUE
;
6009 if (un
->un_f_pm_is_enabled
== TRUE
) {
6010 log_page_size
= START_STOP_CYCLE_COUNTER_PAGE_SIZE
;
6011 log_page_data
= kmem_zalloc(log_page_size
, KM_SLEEP
);
6013 rval
= sd_send_scsi_LOG_SENSE(ssc
, log_page_data
,
6014 log_page_size
, un
->un_start_stop_cycle_page
,
6015 0x01, 0, SD_PATH_DIRECT
);
6018 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
6022 if (sd_force_pm_supported
) {
6023 /* Force a successful result */
6029 * If the Log sense for Page( Start/stop cycle counter page)
6030 * succeeds, then power management is supported and we can
6034 (void) sd_create_pm_components(devi
, un
);
6036 un
->un_power_level
= SD_SPINDLE_ON
;
6037 un
->un_f_pm_is_enabled
= FALSE
;
6040 kmem_free(log_page_data
, log_page_size
);
6046 * Function: sd_create_pm_components
6048 * Description: Initialize PM property.
6050 * Context: Kernel thread context
6054 sd_create_pm_components(dev_info_t
*devi
, struct sd_lun
*un
)
6056 ASSERT(!mutex_owned(SD_MUTEX(un
)));
6058 if (un
->un_f_power_condition_supported
) {
6059 if (ddi_prop_update_string_array(DDI_DEV_T_NONE
, devi
,
6060 "pm-components", sd_pwr_pc
.pm_comp
, 5)
6061 != DDI_PROP_SUCCESS
) {
6062 un
->un_power_level
= SD_SPINDLE_ACTIVE
;
6063 un
->un_f_pm_is_enabled
= FALSE
;
6067 if (ddi_prop_update_string_array(DDI_DEV_T_NONE
, devi
,
6068 "pm-components", sd_pwr_ss
.pm_comp
, 3)
6069 != DDI_PROP_SUCCESS
) {
6070 un
->un_power_level
= SD_SPINDLE_ON
;
6071 un
->un_f_pm_is_enabled
= FALSE
;
6076 * When components are initially created they are idle,
6077 * power up any non-removables.
6078 * Note: the return value of pm_raise_power can't be used
6079 * for determining if PM should be enabled for this device.
6080 * Even if you check the return values and remove this
6081 * property created above, the PM framework will not honor the
6082 * change after the first call to pm_raise_power. Hence,
6083 * removal of that property does not help if pm_raise_power
6084 * fails. In the case of removable media, the start/stop
6085 * will fail if the media is not present.
6087 if (un
->un_f_attach_spinup
&& (pm_raise_power(SD_DEVINFO(un
), 0,
6088 SD_PM_STATE_ACTIVE(un
)) == DDI_SUCCESS
)) {
6089 mutex_enter(SD_MUTEX(un
));
6090 un
->un_power_level
= SD_PM_STATE_ACTIVE(un
);
6091 mutex_enter(&un
->un_pm_mutex
);
6092 /* Set to on and not busy. */
6093 un
->un_pm_count
= 0;
6095 mutex_enter(SD_MUTEX(un
));
6096 un
->un_power_level
= SD_PM_STATE_STOPPED(un
);
6097 mutex_enter(&un
->un_pm_mutex
);
6099 un
->un_pm_count
= -1;
6101 mutex_exit(&un
->un_pm_mutex
);
6102 mutex_exit(SD_MUTEX(un
));
6107 * Function: sd_ddi_suspend
6109 * Description: Performs system power-down operations. This includes
6110 * setting the drive state to indicate its suspended so
6111 * that no new commands will be accepted. Also, wait for
6112 * all commands that are in transport or queued to a timer
6113 * for retry to complete. All timeout threads are cancelled.
6115 * Return Code: DDI_FAILURE or DDI_SUCCESS
6117 * Context: Kernel thread context
6121 sd_ddi_suspend(dev_info_t
*devi
)
6124 clock_t wait_cmds_complete
;
6126 un
= ddi_get_soft_state(sd_state
, ddi_get_instance(devi
));
6128 return (DDI_FAILURE
);
6131 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_suspend: entry\n");
6133 mutex_enter(SD_MUTEX(un
));
6135 /* Return success if the device is already suspended. */
6136 if (un
->un_state
== SD_STATE_SUSPENDED
) {
6137 mutex_exit(SD_MUTEX(un
));
6138 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_suspend: "
6139 "device already suspended, exiting\n");
6140 return (DDI_SUCCESS
);
6143 /* Return failure if the device is being used by HA */
6144 if (un
->un_resvd_status
&
6145 (SD_RESERVE
| SD_WANT_RESERVE
| SD_LOST_RESERVE
)) {
6146 mutex_exit(SD_MUTEX(un
));
6147 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_suspend: "
6148 "device in use by HA, exiting\n");
6149 return (DDI_FAILURE
);
6153 * Return failure if the device is in a resource wait
6154 * or power changing state.
6156 if ((un
->un_state
== SD_STATE_RWAIT
) ||
6157 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
6158 mutex_exit(SD_MUTEX(un
));
6159 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_suspend: "
6160 "device in resource wait state, exiting\n");
6161 return (DDI_FAILURE
);
6165 un
->un_save_state
= un
->un_last_state
;
6166 New_state(un
, SD_STATE_SUSPENDED
);
6169 * Wait for all commands that are in transport or queued to a timer
6170 * for retry to complete.
6172 * While waiting, no new commands will be accepted or sent because of
6173 * the new state we set above.
6175 * Wait till current operation has completed. If we are in the resource
6176 * wait state (with an intr outstanding) then we need to wait till the
6177 * intr completes and starts the next cmd. We want to wait for
6178 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND.
6180 wait_cmds_complete
= ddi_get_lbolt() +
6181 (sd_wait_cmds_complete
* drv_usectohz(1000000));
6183 while (un
->un_ncmds_in_transport
!= 0) {
6185 * Fail if commands do not finish in the specified time.
6187 if (cv_timedwait(&un
->un_disk_busy_cv
, SD_MUTEX(un
),
6188 wait_cmds_complete
) == -1) {
6190 * Undo the state changes made above. Everything
6191 * must go back to it's original value.
6194 un
->un_last_state
= un
->un_save_state
;
6195 /* Wake up any threads that might be waiting. */
6196 cv_broadcast(&un
->un_suspend_cv
);
6197 mutex_exit(SD_MUTEX(un
));
6198 SD_ERROR(SD_LOG_IO_PM
, un
,
6199 "sd_ddi_suspend: failed due to outstanding cmds\n");
6200 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_suspend: exiting\n");
6201 return (DDI_FAILURE
);
6206 * Cancel SCSI watch thread and timeouts, if any are active
6209 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un
)) {
6210 opaque_t temp_token
= un
->un_swr_token
;
6211 mutex_exit(SD_MUTEX(un
));
6212 scsi_watch_suspend(temp_token
);
6213 mutex_enter(SD_MUTEX(un
));
6216 if (un
->un_reset_throttle_timeid
!= NULL
) {
6217 timeout_id_t temp_id
= un
->un_reset_throttle_timeid
;
6218 un
->un_reset_throttle_timeid
= NULL
;
6219 mutex_exit(SD_MUTEX(un
));
6220 (void) untimeout(temp_id
);
6221 mutex_enter(SD_MUTEX(un
));
6224 if (un
->un_dcvb_timeid
!= NULL
) {
6225 timeout_id_t temp_id
= un
->un_dcvb_timeid
;
6226 un
->un_dcvb_timeid
= NULL
;
6227 mutex_exit(SD_MUTEX(un
));
6228 (void) untimeout(temp_id
);
6229 mutex_enter(SD_MUTEX(un
));
6232 mutex_enter(&un
->un_pm_mutex
);
6233 if (un
->un_pm_timeid
!= NULL
) {
6234 timeout_id_t temp_id
= un
->un_pm_timeid
;
6235 un
->un_pm_timeid
= NULL
;
6236 mutex_exit(&un
->un_pm_mutex
);
6237 mutex_exit(SD_MUTEX(un
));
6238 (void) untimeout(temp_id
);
6239 mutex_enter(SD_MUTEX(un
));
6241 mutex_exit(&un
->un_pm_mutex
);
6244 if (un
->un_rmw_msg_timeid
!= NULL
) {
6245 timeout_id_t temp_id
= un
->un_rmw_msg_timeid
;
6246 un
->un_rmw_msg_timeid
= NULL
;
6247 mutex_exit(SD_MUTEX(un
));
6248 (void) untimeout(temp_id
);
6249 mutex_enter(SD_MUTEX(un
));
6252 if (un
->un_retry_timeid
!= NULL
) {
6253 timeout_id_t temp_id
= un
->un_retry_timeid
;
6254 un
->un_retry_timeid
= NULL
;
6255 mutex_exit(SD_MUTEX(un
));
6256 (void) untimeout(temp_id
);
6257 mutex_enter(SD_MUTEX(un
));
6259 if (un
->un_retry_bp
!= NULL
) {
6260 un
->un_retry_bp
->av_forw
= un
->un_waitq_headp
;
6261 un
->un_waitq_headp
= un
->un_retry_bp
;
6262 if (un
->un_waitq_tailp
== NULL
) {
6263 un
->un_waitq_tailp
= un
->un_retry_bp
;
6265 un
->un_retry_bp
= NULL
;
6266 un
->un_retry_statp
= NULL
;
6270 if (un
->un_direct_priority_timeid
!= NULL
) {
6271 timeout_id_t temp_id
= un
->un_direct_priority_timeid
;
6272 un
->un_direct_priority_timeid
= NULL
;
6273 mutex_exit(SD_MUTEX(un
));
6274 (void) untimeout(temp_id
);
6275 mutex_enter(SD_MUTEX(un
));
6278 if (un
->un_f_is_fibre
== TRUE
) {
6280 * Remove callbacks for insert and remove events
6282 if (un
->un_insert_event
!= NULL
) {
6283 mutex_exit(SD_MUTEX(un
));
6284 (void) ddi_remove_event_handler(un
->un_insert_cb_id
);
6285 mutex_enter(SD_MUTEX(un
));
6286 un
->un_insert_event
= NULL
;
6289 if (un
->un_remove_event
!= NULL
) {
6290 mutex_exit(SD_MUTEX(un
));
6291 (void) ddi_remove_event_handler(un
->un_remove_cb_id
);
6292 mutex_enter(SD_MUTEX(un
));
6293 un
->un_remove_event
= NULL
;
6297 mutex_exit(SD_MUTEX(un
));
6299 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_suspend: exit\n");
6301 return (DDI_SUCCESS
);
6306 * Function: sd_ddi_resume
6308 * Description: Performs system power-up operations..
6310 * Return Code: DDI_SUCCESS
6313 * Context: Kernel thread context
6317 sd_ddi_resume(dev_info_t
*devi
)
6321 un
= ddi_get_soft_state(sd_state
, ddi_get_instance(devi
));
6323 return (DDI_FAILURE
);
6326 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_resume: entry\n");
6328 mutex_enter(SD_MUTEX(un
));
6332 * Restore the state which was saved to give the
6333 * the right state in un_last_state
6335 un
->un_last_state
= un
->un_save_state
;
6337 * Note: throttle comes back at full.
6338 * Also note: this MUST be done before calling pm_raise_power
6339 * otherwise the system can get hung in biowait. The scenario where
6340 * this'll happen is under cpr suspend. Writing of the system
6341 * state goes through sddump, which writes 0 to un_throttle. If
6342 * writing the system state then fails, example if the partition is
6343 * too small, then cpr attempts a resume. If throttle isn't restored
6344 * from the saved value until after calling pm_raise_power then
6345 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs
6348 un
->un_throttle
= un
->un_saved_throttle
;
6351 * The chance of failure is very rare as the only command done in power
6352 * entry point is START command when you transition from 0->1 or
6353 * unknown->1. Put it to SPINDLE ON state irrespective of the state at
6354 * which suspend was done. Ignore the return value as the resume should
6355 * not be failed. In the case of removable media the media need not be
6356 * inserted and hence there is a chance that raise power will fail with
6357 * media not present.
6359 if (un
->un_f_attach_spinup
) {
6360 mutex_exit(SD_MUTEX(un
));
6361 (void) pm_raise_power(SD_DEVINFO(un
), 0,
6362 SD_PM_STATE_ACTIVE(un
));
6363 mutex_enter(SD_MUTEX(un
));
6367 * Don't broadcast to the suspend cv and therefore possibly
6368 * start I/O until after power has been restored.
6370 cv_broadcast(&un
->un_suspend_cv
);
6371 cv_broadcast(&un
->un_state_cv
);
6373 /* restart thread */
6374 if (SD_OK_TO_RESUME_SCSI_WATCHER(un
)) {
6375 scsi_watch_resume(un
->un_swr_token
);
6378 #if (defined(__fibre))
6379 if (un
->un_f_is_fibre
== TRUE
) {
6381 * Add callbacks for insert and remove events
6383 if (strcmp(un
->un_node_type
, DDI_NT_BLOCK_CHAN
)) {
6384 sd_init_event_callbacks(un
);
6390 * Transport any pending commands to the target.
6392 * If this is a low-activity device commands in queue will have to wait
6393 * until new commands come in, which may take awhile. Also, we
6394 * specifically don't check un_ncmds_in_transport because we know that
6395 * there really are no commands in progress after the unit was
6396 * suspended and we could have reached the throttle level, been
6397 * suspended, and have no new commands coming in for awhile. Highly
6398 * unlikely, but so is the low-activity disk scenario.
6400 ddi_xbuf_dispatch(un
->un_xbuf_attr
);
6402 sd_start_cmds(un
, NULL
);
6403 mutex_exit(SD_MUTEX(un
));
6405 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_ddi_resume: exit\n");
6407 return (DDI_SUCCESS
);
6412 * Function: sd_pm_state_change
6414 * Description: Change the driver power state.
6415 * Someone else is required to actually change the driver
6418 * Arguments: un - driver soft state (unit) structure
6419 * level - the power level that is changed to
6420 * flag - to decide how to change the power state
6422 * Return Code: DDI_SUCCESS
6424 * Context: Kernel thread context
6427 sd_pm_state_change(struct sd_lun
*un
, int level
, int flag
)
6430 SD_TRACE(SD_LOG_POWER
, un
, "sd_pm_state_change: entry\n");
6432 ASSERT(!mutex_owned(SD_MUTEX(un
)));
6433 mutex_enter(SD_MUTEX(un
));
6435 if (flag
== SD_PM_STATE_ROLLBACK
|| SD_PM_IS_IO_CAPABLE(un
, level
)) {
6436 un
->un_power_level
= level
;
6437 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
6438 mutex_enter(&un
->un_pm_mutex
);
6439 if (SD_DEVICE_IS_IN_LOW_POWER(un
)) {
6441 ASSERT(un
->un_pm_count
== 0);
6443 mutex_exit(&un
->un_pm_mutex
);
6446 * Exit if power management is not enabled for this device,
6447 * or if the device is being used by HA.
6449 if ((un
->un_f_pm_is_enabled
== FALSE
) || (un
->un_resvd_status
&
6450 (SD_RESERVE
| SD_WANT_RESERVE
| SD_LOST_RESERVE
))) {
6451 mutex_exit(SD_MUTEX(un
));
6452 SD_TRACE(SD_LOG_POWER
, un
,
6453 "sd_pm_state_change: exiting\n");
6454 return (DDI_FAILURE
);
6457 SD_INFO(SD_LOG_POWER
, un
, "sd_pm_state_change: "
6458 "un_ncmds_in_driver=%ld\n", un
->un_ncmds_in_driver
);
6461 * See if the device is not busy, ie.:
6462 * - we have no commands in the driver for this device
6463 * - not waiting for resources
6465 if ((un
->un_ncmds_in_driver
== 0) &&
6466 (un
->un_state
!= SD_STATE_RWAIT
)) {
6468 * The device is not busy, so it is OK to go to low
6469 * power state. Indicate low power, but rely on someone
6470 * else to actually change it.
6472 mutex_enter(&un
->un_pm_mutex
);
6473 un
->un_pm_count
= -1;
6474 mutex_exit(&un
->un_pm_mutex
);
6475 un
->un_power_level
= level
;
6479 mutex_exit(SD_MUTEX(un
));
6481 SD_TRACE(SD_LOG_POWER
, un
, "sd_pm_state_change: exit\n");
6483 return (DDI_SUCCESS
);
6488 * Function: sd_pm_idletimeout_handler
6490 * Description: A timer routine that's active only while a device is busy.
6491 * The purpose is to extend slightly the pm framework's busy
6492 * view of the device to prevent busy/idle thrashing for
6493 * back-to-back commands. Do this by comparing the current time
6494 * to the time at which the last command completed and when the
6495 * difference is greater than sd_pm_idletime, call
6496 * pm_idle_component. In addition to indicating idle to the pm
6497 * framework, update the chain type to again use the internal pm
6498 * layers of the driver.
6500 * Arguments: arg - driver soft state (unit) structure
6502 * Context: Executes in a timeout(9F) thread context
6506 sd_pm_idletimeout_handler(void *arg
)
6508 struct sd_lun
*un
= arg
;
6512 mutex_enter(&sd_detach_mutex
);
6513 if (un
->un_detach_count
!= 0) {
6514 /* Abort if the instance is detaching */
6515 mutex_exit(&sd_detach_mutex
);
6518 mutex_exit(&sd_detach_mutex
);
6520 now
= ddi_get_time();
6522 * Grab both mutexes, in the proper order, since we're accessing
6523 * both PM and softstate variables.
6525 mutex_enter(SD_MUTEX(un
));
6526 mutex_enter(&un
->un_pm_mutex
);
6527 if (((now
- un
->un_pm_idle_time
) > sd_pm_idletime
) &&
6528 (un
->un_ncmds_in_driver
== 0) && (un
->un_pm_count
== 0)) {
6530 * Update the chain types.
6531 * This takes affect on the next new command received.
6533 if (un
->un_f_non_devbsize_supported
) {
6534 un
->un_buf_chain_type
= SD_CHAIN_INFO_RMMEDIA
;
6536 un
->un_buf_chain_type
= SD_CHAIN_INFO_DISK
;
6538 un
->un_uscsi_chain_type
= SD_CHAIN_INFO_USCSI_CMD
;
6540 SD_TRACE(SD_LOG_IO_PM
, un
,
6541 "sd_pm_idletimeout_handler: idling device\n");
6542 (void) pm_idle_component(SD_DEVINFO(un
), 0);
6543 un
->un_pm_idle_timeid
= NULL
;
6545 un
->un_pm_idle_timeid
=
6546 timeout(sd_pm_idletimeout_handler
, un
,
6547 (drv_usectohz((clock_t)300000))); /* 300 ms. */
6549 mutex_exit(&un
->un_pm_mutex
);
6550 mutex_exit(SD_MUTEX(un
));
6555 * Function: sd_pm_timeout_handler
6557 * Description: Callback to tell framework we are idle.
6559 * Context: timeout(9f) thread context.
6563 sd_pm_timeout_handler(void *arg
)
6565 struct sd_lun
*un
= arg
;
6567 (void) pm_idle_component(SD_DEVINFO(un
), 0);
6568 mutex_enter(&un
->un_pm_mutex
);
6569 un
->un_pm_timeid
= NULL
;
6570 mutex_exit(&un
->un_pm_mutex
);
6577 * Description: PM entry point.
6579 * Return Code: DDI_SUCCESS
6582 * Context: Kernel thread context
6586 sdpower(dev_info_t
*devi
, int component
, int level
)
6590 int rval
= DDI_SUCCESS
;
6591 uint_t i
, log_page_size
, maxcycles
, ncycles
;
6592 uchar_t
*log_page_data
;
6596 struct pm_trans_data sd_pm_tran_data
;
6599 uchar_t state_before_pm
;
6600 int got_semaphore_here
;
6602 int last_power_level
;
6604 instance
= ddi_get_instance(devi
);
6606 if (((un
= ddi_get_soft_state(sd_state
, instance
)) == NULL
) ||
6607 !SD_PM_IS_LEVEL_VALID(un
, level
) || component
!= 0) {
6608 return (DDI_FAILURE
);
6611 ssc
= sd_ssc_init(un
);
6613 SD_TRACE(SD_LOG_IO_PM
, un
, "sdpower: entry, level = %d\n", level
);
6616 * Must synchronize power down with close.
6617 * Attempt to decrement/acquire the open/close semaphore,
6618 * but do NOT wait on it. If it's not greater than zero,
6619 * ie. it can't be decremented without waiting, then
6620 * someone else, either open or close, already has it
6621 * and the try returns 0. Use that knowledge here to determine
6622 * if it's OK to change the device power level.
6623 * Also, only increment it on exit if it was decremented, ie. gotten,
6626 got_semaphore_here
= sema_tryp(&un
->un_semoclose
);
6628 mutex_enter(SD_MUTEX(un
));
6630 SD_INFO(SD_LOG_POWER
, un
, "sdpower: un_ncmds_in_driver = %ld\n",
6631 un
->un_ncmds_in_driver
);
6634 * If un_ncmds_in_driver is non-zero it indicates commands are
6635 * already being processed in the driver, or if the semaphore was
6636 * not gotten here it indicates an open or close is being processed.
6637 * At the same time somebody is requesting to go to a lower power
6638 * that can't perform I/O, which can't happen, therefore we need to
6641 if ((!SD_PM_IS_IO_CAPABLE(un
, level
)) &&
6642 ((un
->un_ncmds_in_driver
!= 0) || (got_semaphore_here
== 0))) {
6643 mutex_exit(SD_MUTEX(un
));
6645 if (got_semaphore_here
!= 0) {
6646 sema_v(&un
->un_semoclose
);
6648 SD_TRACE(SD_LOG_IO_PM
, un
,
6649 "sdpower: exit, device has queued cmds.\n");
6651 goto sdpower_failed
;
6655 * if it is OFFLINE that means the disk is completely dead
6656 * in our case we have to put the disk in on or off by sending commands
6657 * Of course that will fail anyway so return back here.
6659 * Power changes to a device that's OFFLINE or SUSPENDED
6662 if ((un
->un_state
== SD_STATE_OFFLINE
) ||
6663 (un
->un_state
== SD_STATE_SUSPENDED
)) {
6664 mutex_exit(SD_MUTEX(un
));
6666 if (got_semaphore_here
!= 0) {
6667 sema_v(&un
->un_semoclose
);
6669 SD_TRACE(SD_LOG_IO_PM
, un
,
6670 "sdpower: exit, device is off-line.\n");
6672 goto sdpower_failed
;
6676 * Change the device's state to indicate it's power level
6677 * is being changed. Do this to prevent a power off in the
6678 * middle of commands, which is especially bad on devices
6679 * that are really powered off instead of just spun down.
6681 state_before_pm
= un
->un_state
;
6682 un
->un_state
= SD_STATE_PM_CHANGING
;
6684 mutex_exit(SD_MUTEX(un
));
6687 * If log sense command is not supported, bypass the
6688 * following checking, otherwise, check the log sense
6689 * information for this device.
6691 if (SD_PM_STOP_MOTOR_NEEDED(un
, level
) &&
6692 un
->un_f_log_sense_supported
) {
6694 * Get the log sense information to understand whether the
6695 * the powercycle counts have gone beyond the threshhold.
6697 log_page_size
= START_STOP_CYCLE_COUNTER_PAGE_SIZE
;
6698 log_page_data
= kmem_zalloc(log_page_size
, KM_SLEEP
);
6700 mutex_enter(SD_MUTEX(un
));
6701 log_sense_page
= un
->un_start_stop_cycle_page
;
6702 mutex_exit(SD_MUTEX(un
));
6704 rval
= sd_send_scsi_LOG_SENSE(ssc
, log_page_data
,
6705 log_page_size
, log_sense_page
, 0x01, 0, SD_PATH_DIRECT
);
6709 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
6711 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
6715 if (sd_force_pm_supported
) {
6716 /* Force a successful result */
6721 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
6722 "Log Sense Failed\n");
6724 kmem_free(log_page_data
, log_page_size
);
6725 /* Cannot support power management on those drives */
6727 if (got_semaphore_here
!= 0) {
6728 sema_v(&un
->un_semoclose
);
6731 * On exit put the state back to it's original value
6732 * and broadcast to anyone waiting for the power
6733 * change completion.
6735 mutex_enter(SD_MUTEX(un
));
6736 un
->un_state
= state_before_pm
;
6737 cv_broadcast(&un
->un_suspend_cv
);
6738 mutex_exit(SD_MUTEX(un
));
6739 SD_TRACE(SD_LOG_IO_PM
, un
,
6740 "sdpower: exit, Log Sense Failed.\n");
6742 goto sdpower_failed
;
6746 * From the page data - Convert the essential information to
6750 (log_page_data
[0x1c] << 24) | (log_page_data
[0x1d] << 16) |
6751 (log_page_data
[0x1E] << 8) | log_page_data
[0x1F];
6754 (log_page_data
[0x24] << 24) | (log_page_data
[0x25] << 16) |
6755 (log_page_data
[0x26] << 8) | log_page_data
[0x27];
6757 if (un
->un_f_pm_log_sense_smart
) {
6758 sd_pm_tran_data
.un
.smart_count
.allowed
= maxcycles
;
6759 sd_pm_tran_data
.un
.smart_count
.consumed
= ncycles
;
6760 sd_pm_tran_data
.un
.smart_count
.flag
= 0;
6761 sd_pm_tran_data
.format
= DC_SMART_FORMAT
;
6763 sd_pm_tran_data
.un
.scsi_cycles
.lifemax
= maxcycles
;
6764 sd_pm_tran_data
.un
.scsi_cycles
.ncycles
= ncycles
;
6765 for (i
= 0; i
< DC_SCSI_MFR_LEN
; i
++) {
6766 sd_pm_tran_data
.un
.scsi_cycles
.svc_date
[i
] =
6769 sd_pm_tran_data
.un
.scsi_cycles
.flag
= 0;
6770 sd_pm_tran_data
.format
= DC_SCSI_FORMAT
;
6773 kmem_free(log_page_data
, log_page_size
);
6776 * Call pm_trans_check routine to get the Ok from
6779 rval
= pm_trans_check(&sd_pm_tran_data
, &intvlp
);
6781 if (sd_force_pm_supported
) {
6782 /* Force a successful result */
6789 * Not Ok to Power cycle or error in parameters passed
6790 * Would have given the advised time to consider power
6791 * cycle. Based on the new intvlp parameter we are
6792 * supposed to pretend we are busy so that pm framework
6793 * will never call our power entry point. Because of
6794 * that install a timeout handler and wait for the
6795 * recommended time to elapse so that power management
6796 * can be effective again.
6798 * To effect this behavior, call pm_busy_component to
6799 * indicate to the framework this device is busy.
6800 * By not adjusting un_pm_count the rest of PM in
6801 * the driver will function normally, and independent
6802 * of this but because the framework is told the device
6803 * is busy it won't attempt powering down until it gets
6804 * a matching idle. The timeout handler sends this.
6805 * Note: sd_pm_entry can't be called here to do this
6806 * because sdpower may have been called as a result
6807 * of a call to pm_raise_power from within sd_pm_entry.
6809 * If a timeout handler is already active then
6810 * don't install another.
6812 mutex_enter(&un
->un_pm_mutex
);
6813 if (un
->un_pm_timeid
== NULL
) {
6815 timeout(sd_pm_timeout_handler
,
6816 un
, intvlp
* drv_usectohz(1000000));
6817 mutex_exit(&un
->un_pm_mutex
);
6818 (void) pm_busy_component(SD_DEVINFO(un
), 0);
6820 mutex_exit(&un
->un_pm_mutex
);
6822 if (got_semaphore_here
!= 0) {
6823 sema_v(&un
->un_semoclose
);
6826 * On exit put the state back to it's original value
6827 * and broadcast to anyone waiting for the power
6828 * change completion.
6830 mutex_enter(SD_MUTEX(un
));
6831 un
->un_state
= state_before_pm
;
6832 cv_broadcast(&un
->un_suspend_cv
);
6833 mutex_exit(SD_MUTEX(un
));
6835 SD_TRACE(SD_LOG_IO_PM
, un
, "sdpower: exit, "
6836 "trans check Failed, not ok to power cycle.\n");
6838 goto sdpower_failed
;
6840 if (got_semaphore_here
!= 0) {
6841 sema_v(&un
->un_semoclose
);
6844 * On exit put the state back to it's original value
6845 * and broadcast to anyone waiting for the power
6846 * change completion.
6848 mutex_enter(SD_MUTEX(un
));
6849 un
->un_state
= state_before_pm
;
6850 cv_broadcast(&un
->un_suspend_cv
);
6851 mutex_exit(SD_MUTEX(un
));
6852 SD_TRACE(SD_LOG_IO_PM
, un
,
6853 "sdpower: exit, trans check command Failed.\n");
6855 goto sdpower_failed
;
6859 if (!SD_PM_IS_IO_CAPABLE(un
, level
)) {
6861 * Save the last state... if the STOP FAILS we need it
6864 mutex_enter(SD_MUTEX(un
));
6865 save_state
= un
->un_last_state
;
6866 last_power_level
= un
->un_power_level
;
6868 * There must not be any cmds. getting processed
6869 * in the driver when we get here. Power to the
6870 * device is potentially going off.
6872 ASSERT(un
->un_ncmds_in_driver
== 0);
6873 mutex_exit(SD_MUTEX(un
));
6876 * For now PM suspend the device completely before spindle is
6879 if ((rval
= sd_pm_state_change(un
, level
, SD_PM_STATE_CHANGE
))
6881 if (got_semaphore_here
!= 0) {
6882 sema_v(&un
->un_semoclose
);
6885 * On exit put the state back to it's original value
6886 * and broadcast to anyone waiting for the power
6887 * change completion.
6889 mutex_enter(SD_MUTEX(un
));
6890 un
->un_state
= state_before_pm
;
6891 un
->un_power_level
= last_power_level
;
6892 cv_broadcast(&un
->un_suspend_cv
);
6893 mutex_exit(SD_MUTEX(un
));
6894 SD_TRACE(SD_LOG_IO_PM
, un
,
6895 "sdpower: exit, PM suspend Failed.\n");
6897 goto sdpower_failed
;
6902 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open,
6903 * close, or strategy. Dump no long uses this routine, it uses it's
6904 * own code so it can be done in polled mode.
6907 medium_present
= TRUE
;
6910 * When powering up, issue a TUR in case the device is at unit
6911 * attention. Don't do retries. Bypass the PM layer, otherwise
6912 * a deadlock on un_pm_busy_cv will occur.
6914 if (SD_PM_IS_IO_CAPABLE(un
, level
)) {
6915 sval
= sd_send_scsi_TEST_UNIT_READY(ssc
,
6916 SD_DONT_RETRY_TUR
| SD_BYPASS_PM
);
6918 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
6921 if (un
->un_f_power_condition_supported
) {
6922 char *pm_condition_name
[] = {"STOPPED", "STANDBY",
6924 SD_TRACE(SD_LOG_IO_PM
, un
,
6925 "sdpower: sending \'%s\' power condition",
6926 pm_condition_name
[level
]);
6927 sval
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_POWER_CONDITION
,
6928 sd_pl2pc
[level
], SD_PATH_DIRECT
);
6930 SD_TRACE(SD_LOG_IO_PM
, un
, "sdpower: sending \'%s\' unit\n",
6931 ((level
== SD_SPINDLE_ON
) ? "START" : "STOP"));
6932 sval
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
6933 ((level
== SD_SPINDLE_ON
) ? SD_TARGET_START
:
6934 SD_TARGET_STOP
), SD_PATH_DIRECT
);
6938 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
6940 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
6943 /* Command failed, check for media present. */
6944 if ((sval
== ENXIO
) && un
->un_f_has_removable_media
) {
6945 medium_present
= FALSE
;
6949 * The conditions of interest here are:
6950 * if a spindle off with media present fails,
6951 * then restore the state and return an error.
6952 * else if a spindle on fails,
6953 * then return an error (there's no state to restore).
6954 * In all other cases we setup for the new state
6955 * and return success.
6957 if (!SD_PM_IS_IO_CAPABLE(un
, level
)) {
6958 if ((medium_present
== TRUE
) && (sval
!= 0)) {
6959 /* The stop command from above failed */
6962 * The stop command failed, and we have media
6963 * present. Put the level back by calling the
6964 * sd_pm_resume() and set the state back to
6965 * it's previous value.
6967 (void) sd_pm_state_change(un
, last_power_level
,
6968 SD_PM_STATE_ROLLBACK
);
6969 mutex_enter(SD_MUTEX(un
));
6970 un
->un_last_state
= save_state
;
6971 mutex_exit(SD_MUTEX(un
));
6972 } else if (un
->un_f_monitor_media_state
) {
6974 * The stop command from above succeeded.
6975 * Terminate watch thread in case of removable media
6976 * devices going into low power state. This is as per
6977 * the requirements of pm framework, otherwise commands
6978 * will be generated for the device (through watch
6979 * thread), even when the device is in low power state.
6981 mutex_enter(SD_MUTEX(un
));
6982 un
->un_f_watcht_stopped
= FALSE
;
6983 if (un
->un_swr_token
!= NULL
) {
6984 opaque_t temp_token
= un
->un_swr_token
;
6985 un
->un_f_watcht_stopped
= TRUE
;
6986 un
->un_swr_token
= NULL
;
6987 mutex_exit(SD_MUTEX(un
));
6988 (void) scsi_watch_request_terminate(temp_token
,
6989 SCSI_WATCH_TERMINATE_ALL_WAIT
);
6991 mutex_exit(SD_MUTEX(un
));
6996 * The level requested is I/O capable.
6997 * Legacy behavior: return success on a failed spinup
6998 * if there is no media in the drive.
6999 * Do this by looking at medium_present here.
7001 if ((sval
!= 0) && medium_present
) {
7002 /* The start command from above failed */
7006 * The start command from above succeeded
7007 * PM resume the devices now that we have
7010 (void) sd_pm_state_change(un
, level
,
7011 SD_PM_STATE_CHANGE
);
7014 * Resume the watch thread since it was suspended
7015 * when the device went into low power mode.
7017 if (un
->un_f_monitor_media_state
) {
7018 mutex_enter(SD_MUTEX(un
));
7019 if (un
->un_f_watcht_stopped
== TRUE
) {
7020 opaque_t temp_token
;
7022 un
->un_f_watcht_stopped
= FALSE
;
7023 mutex_exit(SD_MUTEX(un
));
7025 sd_watch_request_submit(un
);
7026 mutex_enter(SD_MUTEX(un
));
7027 un
->un_swr_token
= temp_token
;
7029 mutex_exit(SD_MUTEX(un
));
7034 if (got_semaphore_here
!= 0) {
7035 sema_v(&un
->un_semoclose
);
7038 * On exit put the state back to it's original value
7039 * and broadcast to anyone waiting for the power
7040 * change completion.
7042 mutex_enter(SD_MUTEX(un
));
7043 un
->un_state
= state_before_pm
;
7044 cv_broadcast(&un
->un_suspend_cv
);
7045 mutex_exit(SD_MUTEX(un
));
7047 SD_TRACE(SD_LOG_IO_PM
, un
, "sdpower: exit, status = 0x%x\n", rval
);
7055 return (DDI_FAILURE
);
7061 * Function: sdattach
7063 * Description: Driver's attach(9e) entry point function.
7065 * Arguments: devi - opaque device info handle
7068 * Return Code: DDI_SUCCESS
7071 * Context: Kernel thread context
7075 sdattach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
7079 return (sd_unit_attach(devi
));
7081 return (sd_ddi_resume(devi
));
7085 return (DDI_FAILURE
);
7090 * Function: sddetach
7092 * Description: Driver's detach(9E) entry point function.
7094 * Arguments: devi - opaque device info handle
7097 * Return Code: DDI_SUCCESS
7100 * Context: Kernel thread context
7104 sddetach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
)
7108 return (sd_unit_detach(devi
));
7110 return (sd_ddi_suspend(devi
));
7114 return (DDI_FAILURE
);
7119 * Function: sd_sync_with_callback
7121 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7122 * state while the callback routine is active.
7124 * Arguments: un: softstate structure for the instance
7126 * Context: Kernel thread context
7130 sd_sync_with_callback(struct sd_lun
*un
)
7134 mutex_enter(SD_MUTEX(un
));
7136 ASSERT(un
->un_in_callback
>= 0);
7138 while (un
->un_in_callback
> 0) {
7139 mutex_exit(SD_MUTEX(un
));
7141 mutex_enter(SD_MUTEX(un
));
7144 mutex_exit(SD_MUTEX(un
));
7148 * Function: sd_unit_attach
7150 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7151 * the soft state structure for the device and performs
7152 * all necessary structure and device initializations.
7154 * Arguments: devi: the system's dev_info_t for the device.
7156 * Return Code: DDI_SUCCESS if attach is successful.
7157 * DDI_FAILURE if any part of the attach fails.
7159 * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7160 * Kernel thread context only. Can sleep.
7164 sd_unit_attach(dev_info_t
*devi
)
7166 struct scsi_device
*devp
;
7170 int reservation_flag
= SD_TARGET_IS_UNRESERVED
;
7177 dev_info_t
*pdip
= ddi_get_parent(devi
);
7179 int geom_label_valid
= 0;
7182 struct sd_fm_internal
*sfip
= NULL
;
7186 * Retrieve the target driver's private data area. This was set
7189 devp
= ddi_get_driver_private(devi
);
7192 * Retrieve the target ID of the device.
7194 tgt
= ddi_prop_get_int(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
7195 SCSI_ADDR_PROP_TARGET
, -1);
7198 * Since we have no idea what state things were left in by the last
7199 * user of the device, set up some 'default' settings, ie. turn 'em
7200 * off. The scsi_ifsetcap calls force re-negotiations with the drive.
7201 * Do this before the scsi_probe, which sends an inquiry.
7202 * This is a fix for bug (4430280).
7203 * Of special importance is wide-xfer. The drive could have been left
7204 * in wide transfer mode by the last driver to communicate with it,
7205 * this includes us. If that's the case, and if the following is not
7206 * setup properly or we don't re-negotiate with the drive prior to
7207 * transferring data to/from the drive, it causes bus parity errors,
7208 * data overruns, and unexpected interrupts. This first occurred when
7209 * the fix for bug (4378686) was made.
7211 (void) scsi_ifsetcap(&devp
->sd_address
, "lun-reset", 0, 1);
7212 (void) scsi_ifsetcap(&devp
->sd_address
, "wide-xfer", 0, 1);
7213 (void) scsi_ifsetcap(&devp
->sd_address
, "auto-rqsense", 0, 1);
7216 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs
7217 * on a target. Setting it per lun instance actually sets the
7218 * capability of this target, which affects those luns already
7219 * attached on the same target. So during attach, we can only disable
7220 * this capability only when no other lun has been attached on this
7221 * target. By doing this, we assume a target has the same tagged-qing
7222 * capability for every lun. The condition can be removed when HBA
7223 * is changed to support per lun based tagged-qing capability.
7225 if (sd_scsi_get_target_lun_count(pdip
, tgt
) < 1) {
7226 (void) scsi_ifsetcap(&devp
->sd_address
, "tagged-qing", 0, 1);
7230 * Use scsi_probe() to issue an INQUIRY command to the device.
7231 * This call will allocate and fill in the scsi_inquiry structure
7232 * and point the sd_inq member of the scsi_device structure to it.
7233 * If the attach succeeds, then this memory will not be de-allocated
7234 * (via scsi_unprobe()) until the instance is detached.
7236 if (scsi_probe(devp
, SLEEP_FUNC
) != SCSIPROBE_EXISTS
) {
7241 * Check the device type as specified in the inquiry data and
7242 * claim it if it is of a type that we support.
7244 switch (devp
->sd_inq
->inq_dtype
) {
7247 case DTYPE_RODIRECT
:
7251 case DTYPE_NOTPRESENT
:
7253 /* Unsupported device type; fail the attach. */
7258 * Allocate the soft state structure for this unit.
7260 * We rely upon this memory being set to all zeroes by
7261 * ddi_soft_state_zalloc(). We assume that any member of the
7262 * soft state structure that is not explicitly initialized by
7263 * this routine will have a value of zero.
7265 instance
= ddi_get_instance(devp
->sd_dev
);
7266 #ifndef XPV_HVM_DRIVER
7267 if (ddi_soft_state_zalloc(sd_state
, instance
) != DDI_SUCCESS
) {
7270 #endif /* !XPV_HVM_DRIVER */
7273 * Retrieve a pointer to the newly-allocated soft state.
7275 * This should NEVER fail if the ddi_soft_state_zalloc() call above
7276 * was successful, unless something has gone horribly wrong and the
7277 * ddi's soft state internals are corrupt (in which case it is
7278 * probably better to halt here than just fail the attach....)
7280 if ((un
= ddi_get_soft_state(sd_state
, instance
)) == NULL
) {
7281 panic("sd_unit_attach: NULL soft state on instance:0x%x",
7287 * Link the back ptr of the driver soft state to the scsi_device
7288 * struct for this lun.
7289 * Save a pointer to the softstate in the driver-private area of
7290 * the scsi_device struct.
7291 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
7292 * we first set un->un_sd below.
7295 devp
->sd_private
= (opaque_t
)un
;
7298 * The following must be after devp is stored in the soft state struct.
7301 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
7302 "%s_unit_attach: un:0x%p instance:%d\n",
7303 ddi_driver_name(devi
), un
, instance
);
7307 * Set up the device type and node type (for the minor nodes).
7308 * By default we assume that the device can at least support the
7309 * Common Command Set. Call it a CD-ROM if it reports itself
7310 * as a RODIRECT device.
7312 switch (devp
->sd_inq
->inq_dtype
) {
7313 case DTYPE_RODIRECT
:
7314 un
->un_node_type
= DDI_NT_CD_CHAN
;
7315 un
->un_ctype
= CTYPE_CDROM
;
7318 un
->un_node_type
= DDI_NT_BLOCK_CHAN
;
7319 un
->un_ctype
= CTYPE_ROD
;
7322 un
->un_node_type
= DDI_NT_BLOCK_CHAN
;
7323 un
->un_ctype
= CTYPE_CCS
;
7328 * Try to read the interconnect type from the HBA.
7330 * Note: This driver is currently compiled as two binaries, a parallel
7331 * scsi version (sd) and a fibre channel version (ssd). All functional
7332 * differences are determined at compile time. In the future a single
7333 * binary will be provided and the interconnect type will be used to
7334 * differentiate between fibre and parallel scsi behaviors. At that time
7335 * it will be necessary for all fibre channel HBAs to support this
7338 * set un_f_is_fiber to TRUE ( default fiber )
7340 un
->un_f_is_fibre
= TRUE
;
7341 switch (scsi_ifgetcap(SD_ADDRESS(un
), "interconnect-type", -1)) {
7342 case INTERCONNECT_SSA
:
7343 un
->un_interconnect_type
= SD_INTERCONNECT_SSA
;
7344 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7345 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un
);
7347 case INTERCONNECT_PARALLEL
:
7348 un
->un_f_is_fibre
= FALSE
;
7349 un
->un_interconnect_type
= SD_INTERCONNECT_PARALLEL
;
7350 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7351 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un
);
7353 case INTERCONNECT_SAS
:
7354 un
->un_f_is_fibre
= FALSE
;
7355 un
->un_interconnect_type
= SD_INTERCONNECT_SAS
;
7356 un
->un_node_type
= DDI_NT_BLOCK_SAS
;
7357 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7358 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un
);
7360 case INTERCONNECT_SATA
:
7361 un
->un_f_is_fibre
= FALSE
;
7362 un
->un_interconnect_type
= SD_INTERCONNECT_SATA
;
7363 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7364 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un
);
7366 case INTERCONNECT_FIBRE
:
7367 un
->un_interconnect_type
= SD_INTERCONNECT_FIBRE
;
7368 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7369 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un
);
7371 case INTERCONNECT_FABRIC
:
7372 un
->un_interconnect_type
= SD_INTERCONNECT_FABRIC
;
7373 un
->un_node_type
= DDI_NT_BLOCK_FABRIC
;
7374 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7375 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un
);
7378 #ifdef SD_DEFAULT_INTERCONNECT_TYPE
7380 * The HBA does not support the "interconnect-type" property
7381 * (or did not provide a recognized type).
7383 * Note: This will be obsoleted when a single fibre channel
7384 * and parallel scsi driver is delivered. In the meantime the
7385 * interconnect type will be set to the platform default.If that
7386 * type is not parallel SCSI, it means that we should be
7387 * assuming "ssd" semantics. However, here this also means that
7388 * the FC HBA is not supporting the "interconnect-type" property
7389 * like we expect it to, so log this occurrence.
7391 un
->un_interconnect_type
= SD_DEFAULT_INTERCONNECT_TYPE
;
7392 if (!SD_IS_PARALLEL_SCSI(un
)) {
7393 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7394 "sd_unit_attach: un:0x%p Assuming "
7395 "INTERCONNECT_FIBRE\n", un
);
7397 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7398 "sd_unit_attach: un:0x%p Assuming "
7399 "INTERCONNECT_PARALLEL\n", un
);
7400 un
->un_f_is_fibre
= FALSE
;
7404 * Note: This source will be implemented when a single fibre
7405 * channel and parallel scsi driver is delivered. The default
7406 * will be to assume that if a device does not support the
7407 * "interconnect-type" property it is a parallel SCSI HBA and
7408 * we will set the interconnect type for parallel scsi.
7410 un
->un_interconnect_type
= SD_INTERCONNECT_PARALLEL
;
7411 un
->un_f_is_fibre
= FALSE
;
7416 if (un
->un_f_is_fibre
== TRUE
) {
7417 if (scsi_ifgetcap(SD_ADDRESS(un
), "scsi-version", 1) ==
7419 switch (un
->un_interconnect_type
) {
7420 case SD_INTERCONNECT_FIBRE
:
7421 case SD_INTERCONNECT_SSA
:
7422 un
->un_node_type
= DDI_NT_BLOCK_WWN
;
7431 * Initialize the Request Sense command for the target
7433 if (sd_alloc_rqs(devp
, un
) != DDI_SUCCESS
) {
7434 goto alloc_rqs_failed
;
7438 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7439 * with separate binary for sd and ssd.
7441 * x86 has 1 binary, un_retry_count is set base on connection type.
7442 * The hardcoded values will go away when Sparc uses 1 binary
7443 * for sd and ssd. This hardcoded values need to match
7444 * SD_RETRY_COUNT in sddef.h
7445 * The value used is base on interconnect type.
7446 * fibre = 3, parallel = 5
7448 #if defined(__i386) || defined(__amd64)
7449 un
->un_retry_count
= un
->un_f_is_fibre
? 3 : 5;
7451 un
->un_retry_count
= SD_RETRY_COUNT
;
7455 * Set the per disk retry count to the default number of retries
7456 * for disks and CDROMs. This value can be overridden by the
7457 * disk property list or an entry in sd.conf.
7459 un
->un_notready_retry_count
=
7460 ISCD(un
) ? CD_NOT_READY_RETRY_COUNT(un
)
7461 : DISK_NOT_READY_RETRY_COUNT(un
);
7464 * Set the busy retry count to the default value of un_retry_count.
7465 * This can be overridden by entries in sd.conf or the device
7468 un
->un_busy_retry_count
= un
->un_retry_count
;
7471 * Init the reset threshold for retries. This number determines
7472 * how many retries must be performed before a reset can be issued
7473 * (for certain error conditions). This can be overridden by entries
7474 * in sd.conf or the device config table.
7476 un
->un_reset_retry_count
= (un
->un_retry_count
/ 2);
7479 * Set the victim_retry_count to the default un_retry_count
7481 un
->un_victim_retry_count
= (2 * un
->un_retry_count
);
7484 * Set the reservation release timeout to the default value of
7485 * 5 seconds. This can be overridden by entries in ssd.conf or the
7486 * device config table.
7488 un
->un_reserve_release_time
= 5;
7491 * Set up the default maximum transfer size. Note that this may
7492 * get updated later in the attach, when setting up default wide
7493 * operations for disks.
7495 #if defined(__i386) || defined(__amd64)
7496 un
->un_max_xfer_size
= (uint_t
)SD_DEFAULT_MAX_XFER_SIZE
;
7497 un
->un_partial_dma_supported
= 1;
7499 un
->un_max_xfer_size
= (uint_t
)maxphys
;
7503 * Get "allow bus device reset" property (defaults to "enabled" if
7504 * the property was not defined). This is to disable bus resets for
7505 * certain kinds of error recovery. Note: In the future when a run-time
7506 * fibre check is available the soft state flag should default to
7509 if (un
->un_f_is_fibre
== TRUE
) {
7510 un
->un_f_allow_bus_device_reset
= TRUE
;
7512 if (ddi_getprop(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
7513 "allow-bus-device-reset", 1) != 0) {
7514 un
->un_f_allow_bus_device_reset
= TRUE
;
7515 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7516 "sd_unit_attach: un:0x%p Bus device reset "
7519 un
->un_f_allow_bus_device_reset
= FALSE
;
7520 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7521 "sd_unit_attach: un:0x%p Bus device reset "
7527 * Check if this is an ATAPI device. ATAPI devices use Group 1
7528 * Read/Write commands and Group 2 Mode Sense/Select commands.
7530 * Note: The "obsolete" way of doing this is to check for the "atapi"
7531 * property. The new "variant" property with a value of "atapi" has been
7532 * introduced so that future 'variants' of standard SCSI behavior (like
7533 * atapi) could be specified by the underlying HBA drivers by supplying
7534 * a new value for the "variant" property, instead of having to define a
7537 if (ddi_prop_get_int(DDI_DEV_T_ANY
, devi
, 0, "atapi", -1) != -1) {
7538 un
->un_f_cfg_is_atapi
= TRUE
;
7539 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7540 "sd_unit_attach: un:0x%p Atapi device\n", un
);
7542 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, devi
, 0, "variant",
7543 &variantp
) == DDI_PROP_SUCCESS
) {
7544 if (strcmp(variantp
, "atapi") == 0) {
7545 un
->un_f_cfg_is_atapi
= TRUE
;
7546 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7547 "sd_unit_attach: un:0x%p Atapi device\n", un
);
7549 ddi_prop_free(variantp
);
7552 un
->un_cmd_timeout
= SD_IO_TIME
;
7554 un
->un_busy_timeout
= SD_BSY_TIMEOUT
;
7556 /* Info on current states, statuses, etc. (Updated frequently) */
7557 un
->un_state
= SD_STATE_NORMAL
;
7558 un
->un_last_state
= SD_STATE_NORMAL
;
7560 /* Control & status info for command throttling */
7561 un
->un_throttle
= sd_max_throttle
;
7562 un
->un_saved_throttle
= sd_max_throttle
;
7563 un
->un_min_throttle
= sd_min_throttle
;
7565 if (un
->un_f_is_fibre
== TRUE
) {
7566 un
->un_f_use_adaptive_throttle
= TRUE
;
7568 un
->un_f_use_adaptive_throttle
= FALSE
;
7571 /* Removable media support. */
7572 cv_init(&un
->un_state_cv
, NULL
, CV_DRIVER
, NULL
);
7573 un
->un_mediastate
= DKIO_NONE
;
7574 un
->un_specified_mediastate
= DKIO_NONE
;
7576 /* CVs for suspend/resume (PM or DR) */
7577 cv_init(&un
->un_suspend_cv
, NULL
, CV_DRIVER
, NULL
);
7578 cv_init(&un
->un_disk_busy_cv
, NULL
, CV_DRIVER
, NULL
);
7580 /* Power management support. */
7581 un
->un_power_level
= SD_SPINDLE_UNINIT
;
7583 cv_init(&un
->un_wcc_cv
, NULL
, CV_DRIVER
, NULL
);
7584 un
->un_f_wcc_inprog
= 0;
7587 * The open/close semaphore is used to serialize threads executing
7588 * in the driver's open & close entry point routines for a given
7591 (void) sema_init(&un
->un_semoclose
, 1, NULL
, SEMA_DRIVER
, NULL
);
7594 * The conf file entry and softstate variable is a forceful override,
7595 * meaning a non-zero value must be entered to change the default.
7597 un
->un_f_disksort_disabled
= FALSE
;
7598 un
->un_f_rmw_type
= SD_RMW_TYPE_DEFAULT
;
7599 un
->un_f_enable_rmw
= FALSE
;
7602 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but
7603 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7605 un
->un_f_mmc_gesn_polling
= TRUE
;
7608 * Retrieve the properties from the static driver table or the driver
7609 * configuration file (.conf) for this unit and update the soft state
7610 * for the device as needed for the indicated properties.
7611 * Note: the property configuration needs to occur here as some of the
7612 * following routines may have dependencies on soft state flags set
7613 * as part of the driver property configuration.
7615 sd_read_unit_properties(un
);
7616 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
7617 "sd_unit_attach: un:0x%p property configuration complete.\n", un
);
7620 * Only if a device has "hotpluggable" property, it is
7621 * treated as hotpluggable device. Otherwise, it is
7622 * regarded as non-hotpluggable one.
7624 if (ddi_prop_get_int(DDI_DEV_T_ANY
, devi
, 0, "hotpluggable",
7626 un
->un_f_is_hotpluggable
= TRUE
;
7630 * set unit's attributes(flags) according to "hotpluggable" and
7631 * RMB bit in INQUIRY data.
7633 sd_set_unit_attributes(un
, devi
);
7636 * By default, we mark the capacity, lbasize, and geometry
7637 * as invalid. Only if we successfully read a valid capacity
7638 * will we update the un_blockcount and un_tgt_blocksize with the
7639 * valid values (the geometry will be validated later).
7641 un
->un_f_blockcount_is_valid
= FALSE
;
7642 un
->un_f_tgt_blocksize_is_valid
= FALSE
;
7645 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine
7648 un
->un_tgt_blocksize
= un
->un_sys_blocksize
= DEV_BSIZE
;
7649 un
->un_blockcount
= 0;
7652 * physical sector size default to DEV_BSIZE currently.
7654 un
->un_phy_blocksize
= DEV_BSIZE
;
7657 * Set up the per-instance info needed to determine the correct
7658 * CDBs and other info for issuing commands to the target.
7660 sd_init_cdb_limits(un
);
7663 * Set up the IO chains to use, based upon the target type.
7665 if (un
->un_f_non_devbsize_supported
) {
7666 un
->un_buf_chain_type
= SD_CHAIN_INFO_RMMEDIA
;
7668 un
->un_buf_chain_type
= SD_CHAIN_INFO_DISK
;
7670 un
->un_uscsi_chain_type
= SD_CHAIN_INFO_USCSI_CMD
;
7671 un
->un_direct_chain_type
= SD_CHAIN_INFO_DIRECT_CMD
;
7672 un
->un_priority_chain_type
= SD_CHAIN_INFO_PRIORITY_CMD
;
7674 un
->un_xbuf_attr
= ddi_xbuf_attr_create(sizeof (struct sd_xbuf
),
7675 sd_xbuf_strategy
, un
, sd_xbuf_active_limit
, sd_xbuf_reserve_limit
,
7676 ddi_driver_major(devi
), DDI_XBUF_QTHREAD_DRIVER
);
7677 ddi_xbuf_attr_register_devinfo(un
->un_xbuf_attr
, devi
);
7681 un
->un_additional_codes
= sd_additional_codes
;
7683 un
->un_additional_codes
= NULL
;
7687 * Create the kstats here so they can be available for attach-time
7688 * routines that send commands to the unit (either polled or via
7689 * sd_send_scsi_cmd).
7691 * Note: This is a critical sequence that needs to be maintained:
7692 * 1) Instantiate the kstats here, before any routines using the
7693 * iopath (i.e. sd_send_scsi_cmd).
7694 * 2) Instantiate and initialize the partition stats
7696 * 3) Initialize the error stats (sd_set_errstats), following
7697 * sd_validate_geometry(),sd_register_devid(),
7698 * and sd_cache_control().
7701 un
->un_stats
= kstat_create(sd_label
, instance
,
7702 NULL
, "disk", KSTAT_TYPE_IO
, 1, KSTAT_FLAG_PERSISTENT
);
7703 if (un
->un_stats
!= NULL
) {
7704 un
->un_stats
->ks_lock
= SD_MUTEX(un
);
7705 kstat_install(un
->un_stats
);
7707 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
7708 "sd_unit_attach: un:0x%p un_stats created\n", un
);
7710 sd_create_errstats(un
, instance
);
7711 if (un
->un_errstats
== NULL
) {
7712 goto create_errstats_failed
;
7714 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
7715 "sd_unit_attach: un:0x%p errstats created\n", un
);
7718 * The following if/else code was relocated here from below as part
7719 * of the fix for bug (4430280). However with the default setup added
7720 * on entry to this routine, it's no longer absolutely necessary for
7721 * this to be before the call to sd_spin_up_unit.
7723 if (SD_IS_PARALLEL_SCSI(un
) || SD_IS_SERIAL(un
)) {
7724 int tq_trigger_flag
= (((devp
->sd_inq
->inq_ansi
== 4) ||
7725 (devp
->sd_inq
->inq_ansi
== 5)) &&
7726 devp
->sd_inq
->inq_bque
) || devp
->sd_inq
->inq_cmdque
;
7729 * If tagged queueing is supported by the target
7730 * and by the host adapter then we will enable it
7732 un
->un_tagflags
= 0;
7733 if ((devp
->sd_inq
->inq_rdf
== RDF_SCSI2
) && tq_trigger_flag
&&
7734 (un
->un_f_arq_enabled
== TRUE
)) {
7735 if (scsi_ifsetcap(SD_ADDRESS(un
), "tagged-qing",
7737 un
->un_tagflags
= FLAG_STAG
;
7738 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7739 "sd_unit_attach: un:0x%p tag queueing "
7741 } else if (scsi_ifgetcap(SD_ADDRESS(un
),
7742 "untagged-qing", 0) == 1) {
7743 un
->un_f_opt_queueing
= TRUE
;
7744 un
->un_saved_throttle
= un
->un_throttle
=
7745 min(un
->un_throttle
, 3);
7747 un
->un_f_opt_queueing
= FALSE
;
7748 un
->un_saved_throttle
= un
->un_throttle
= 1;
7750 } else if ((scsi_ifgetcap(SD_ADDRESS(un
), "untagged-qing", 0)
7751 == 1) && (un
->un_f_arq_enabled
== TRUE
)) {
7752 /* The Host Adapter supports internal queueing. */
7753 un
->un_f_opt_queueing
= TRUE
;
7754 un
->un_saved_throttle
= un
->un_throttle
=
7755 min(un
->un_throttle
, 3);
7757 un
->un_f_opt_queueing
= FALSE
;
7758 un
->un_saved_throttle
= un
->un_throttle
= 1;
7759 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7760 "sd_unit_attach: un:0x%p no tag queueing\n", un
);
7764 * Enable large transfers for SATA/SAS drives
7766 if (SD_IS_SERIAL(un
)) {
7767 un
->un_max_xfer_size
=
7768 ddi_getprop(DDI_DEV_T_ANY
, devi
, 0,
7769 sd_max_xfer_size
, SD_MAX_XFER_SIZE
);
7770 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7771 "sd_unit_attach: un:0x%p max transfer "
7772 "size=0x%x\n", un
, un
->un_max_xfer_size
);
7776 /* Setup or tear down default wide operations for disks */
7779 * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7780 * and "ssd_max_xfer_size" to exist simultaneously on the same
7781 * system and be set to different values. In the future this
7782 * code may need to be updated when the ssd module is
7783 * obsoleted and removed from the system. (4299588)
7785 if (SD_IS_PARALLEL_SCSI(un
) &&
7786 (devp
->sd_inq
->inq_rdf
== RDF_SCSI2
) &&
7787 (devp
->sd_inq
->inq_wbus16
|| devp
->sd_inq
->inq_wbus32
)) {
7788 if (scsi_ifsetcap(SD_ADDRESS(un
), "wide-xfer",
7790 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7791 "sd_unit_attach: un:0x%p Wide Transfer "
7796 * If tagged queuing has also been enabled, then
7797 * enable large xfers
7799 if (un
->un_saved_throttle
== sd_max_throttle
) {
7800 un
->un_max_xfer_size
=
7801 ddi_getprop(DDI_DEV_T_ANY
, devi
, 0,
7802 sd_max_xfer_size
, SD_MAX_XFER_SIZE
);
7803 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7804 "sd_unit_attach: un:0x%p max transfer "
7805 "size=0x%x\n", un
, un
->un_max_xfer_size
);
7808 if (scsi_ifsetcap(SD_ADDRESS(un
), "wide-xfer",
7810 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
7811 "sd_unit_attach: un:0x%p "
7812 "Wide Transfer disabled\n", un
);
7816 un
->un_tagflags
= FLAG_STAG
;
7817 un
->un_max_xfer_size
= ddi_getprop(DDI_DEV_T_ANY
,
7818 devi
, 0, sd_max_xfer_size
, SD_MAX_XFER_SIZE
);
7822 * If this target supports LUN reset, try to enable it.
7824 if (un
->un_f_lun_reset_enabled
) {
7825 if (scsi_ifsetcap(SD_ADDRESS(un
), "lun-reset", 1, 1) == 1) {
7826 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_unit_attach: "
7827 "un:0x%p lun_reset capability set\n", un
);
7829 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_unit_attach: "
7830 "un:0x%p lun-reset capability not set\n", un
);
7835 * Adjust the maximum transfer size. This is to fix
7836 * the problem of partial DMA support on SPARC. Some
7837 * HBA driver, like aac, has very small dma_attr_maxxfer
7838 * size, which requires partial DMA support on SPARC.
7839 * In the future the SPARC pci nexus driver may solve
7840 * the problem instead of this fix.
7842 max_xfer_size
= scsi_ifgetcap(SD_ADDRESS(un
), "dma-max", 1);
7843 if ((max_xfer_size
> 0) && (max_xfer_size
< un
->un_max_xfer_size
)) {
7844 /* We need DMA partial even on sparc to ensure sddump() works */
7845 un
->un_max_xfer_size
= max_xfer_size
;
7846 if (un
->un_partial_dma_supported
== 0)
7847 un
->un_partial_dma_supported
= 1;
7849 if (ddi_prop_get_int(DDI_DEV_T_ANY
, SD_DEVINFO(un
),
7850 DDI_PROP_DONTPASS
, "buf_break", 0) == 1) {
7851 if (ddi_xbuf_attr_setup_brk(un
->un_xbuf_attr
,
7852 un
->un_max_xfer_size
) == 1) {
7853 un
->un_buf_breakup_supported
= 1;
7854 SD_INFO(SD_LOG_ATTACH_DETACH
, un
, "sd_unit_attach: "
7855 "un:0x%p Buf breakup enabled\n", un
);
7860 * Set PKT_DMA_PARTIAL flag.
7862 if (un
->un_partial_dma_supported
== 1) {
7863 un
->un_pkt_flags
= PKT_DMA_PARTIAL
;
7865 un
->un_pkt_flags
= 0;
7868 /* Initialize sd_ssc_t for internal uscsi commands */
7869 ssc
= sd_ssc_init(un
);
7873 * Allocate memory for SCSI FMA stuffs.
7876 kmem_zalloc(sizeof (struct sd_fm_internal
), KM_SLEEP
);
7877 sfip
= (struct sd_fm_internal
*)un
->un_fm_private
;
7878 sfip
->fm_ssc
.ssc_uscsi_cmd
= &sfip
->fm_ucmd
;
7879 sfip
->fm_ssc
.ssc_uscsi_info
= &sfip
->fm_uinfo
;
7880 sfip
->fm_ssc
.ssc_un
= un
;
7883 un
->un_f_has_removable_media
||
7884 devp
->sd_fm_capable
== DDI_FM_NOT_CAPABLE
) {
7886 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device.
7887 * Their log are unchanged.
7889 sfip
->fm_log_level
= SD_FM_LOG_NSUP
;
7892 * If enter here, it should be non-CDROM and FM-capable
7893 * device, and it will not keep the old scsi_log as before
7894 * in /var/adm/messages. However, the property
7895 * "fm-scsi-log" will control whether the FM telemetry will
7896 * be logged in /var/adm/messages.
7899 fm_scsi_log
= ddi_prop_get_int(DDI_DEV_T_ANY
, SD_DEVINFO(un
),
7900 DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
, "fm-scsi-log", 0);
7903 sfip
->fm_log_level
= SD_FM_LOG_EREPORT
;
7905 sfip
->fm_log_level
= SD_FM_LOG_SILENT
;
7909 * At this point in the attach, we have enough info in the
7910 * soft state to be able to issue commands to the target.
7912 * All command paths used below MUST issue their commands as
7913 * SD_PATH_DIRECT. This is important as intermediate layers
7914 * are not all initialized yet (such as PM).
7918 * Send a TEST UNIT READY command to the device. This should clear
7919 * any outstanding UNIT ATTENTION that may be present.
7921 * Note: Don't check for success, just track if there is a reservation,
7922 * this is a throw away command to clear any unit attentions.
7924 * Note: This MUST be the first command issued to the target during
7925 * attach to ensure power on UNIT ATTENTIONS are cleared.
7926 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
7927 * with attempts at spinning up a device with no media.
7929 status
= sd_send_scsi_TEST_UNIT_READY(ssc
, SD_DONT_RETRY_TUR
);
7931 if (status
== EACCES
)
7932 reservation_flag
= SD_TARGET_IS_RESERVED
;
7933 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
7937 * If the device is NOT a removable media device, attempt to spin
7938 * it up (using the START_STOP_UNIT command) and read its capacity
7939 * (using the READ CAPACITY command). Note, however, that either
7940 * of these could fail and in some cases we would continue with
7941 * the attach despite the failure (see below).
7943 if (un
->un_f_descr_format_supported
) {
7945 switch (sd_spin_up_unit(ssc
)) {
7948 * Spin-up was successful; now try to read the
7949 * capacity. If successful then save the results
7950 * and mark the capacity & lbasize as valid.
7952 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
7953 "sd_unit_attach: un:0x%p spin-up successful\n", un
);
7955 status
= sd_send_scsi_READ_CAPACITY(ssc
, &capacity
,
7956 &lbasize
, SD_PATH_DIRECT
);
7960 if (capacity
> DK_MAX_BLOCKS
) {
7962 if ((capacity
+ 1) >
7963 SD_GROUP1_MAX_ADDRESS
) {
7965 * Enable descriptor format
7966 * sense data so that we can
7967 * get 64 bit sense data
7970 sd_enable_descr_sense(ssc
);
7973 /* 32-bit kernels can't handle this */
7974 scsi_log(SD_DEVINFO(un
),
7976 "disk has %llu blocks, which "
7977 "is too large for a 32-bit "
7978 "kernel", capacity
);
7980 #if defined(__i386) || defined(__amd64)
7982 * 1TB disk was treated as (1T - 512)B
7983 * in the past, so that it might have
7984 * valid VTOC and solaris partitions,
7985 * we have to allow it to continue to
7988 if (capacity
-1 > DK_MAX_BLOCKS
)
7995 * Here it's not necessary to check the case:
7996 * the capacity of the device is bigger than
7997 * what the max hba cdb can support. Because
7998 * sd_send_scsi_READ_CAPACITY will retrieve
7999 * the capacity by sending USCSI command, which
8000 * is constrained by the max hba cdb. Actually,
8001 * sd_send_scsi_READ_CAPACITY will return
8002 * EINVAL when using bigger cdb than required
8003 * cdb length. Will handle this case in
8008 * The following relies on
8009 * sd_send_scsi_READ_CAPACITY never
8010 * returning 0 for capacity and/or lbasize.
8012 sd_update_block_info(un
, lbasize
, capacity
);
8014 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8015 "sd_unit_attach: un:0x%p capacity = %ld "
8016 "blocks; lbasize= %ld.\n", un
,
8017 un
->un_blockcount
, un
->un_tgt_blocksize
);
8023 * In the case where the max-cdb-length property
8024 * is smaller than the required CDB length for
8025 * a SCSI device, a target driver can fail to
8026 * attach to that device.
8028 scsi_log(SD_DEVINFO(un
),
8030 "disk capacity is too large "
8031 "for current cdb length");
8032 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
8037 * Should never get here if the spin-up
8038 * succeeded, but code it in anyway.
8039 * From here, just continue with the attach...
8041 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8042 "sd_unit_attach: un:0x%p "
8043 "sd_send_scsi_READ_CAPACITY "
8044 "returned reservation conflict\n", un
);
8045 reservation_flag
= SD_TARGET_IS_RESERVED
;
8046 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
8050 * Likewise, should never get here if the
8051 * spin-up succeeded. Just continue with
8055 sd_ssc_assessment(ssc
,
8056 SD_FMT_STATUS_CHECK
);
8058 sd_ssc_assessment(ssc
,
8065 * Device is reserved by another host. In this case
8066 * we could not spin it up or read the capacity, but
8067 * we continue with the attach anyway.
8069 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8070 "sd_unit_attach: un:0x%p spin-up reservation "
8072 reservation_flag
= SD_TARGET_IS_RESERVED
;
8075 /* Fail the attach if the spin-up failed. */
8076 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8077 "sd_unit_attach: un:0x%p spin-up failed.", un
);
8084 * Check to see if this is a MMC drive
8087 sd_set_mmc_caps(ssc
);
8091 * Add a zero-length attribute to tell the world we support
8092 * kernel ioctls (for layered drivers)
8094 (void) ddi_prop_create(DDI_DEV_T_NONE
, devi
, DDI_PROP_CANSLEEP
,
8095 DDI_KERNEL_IOCTL
, NULL
, 0);
8098 * Add a boolean property to tell the world we support
8099 * the B_FAILFAST flag (for layered drivers)
8101 (void) ddi_prop_create(DDI_DEV_T_NONE
, devi
, DDI_PROP_CANSLEEP
,
8102 "ddi-failfast-supported", NULL
, 0);
8105 * Initialize power management
8107 mutex_init(&un
->un_pm_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
8108 cv_init(&un
->un_pm_busy_cv
, NULL
, CV_DRIVER
, NULL
);
8109 sd_setup_pm(ssc
, devi
);
8110 if (un
->un_f_pm_is_enabled
== FALSE
) {
8112 * For performance, point to a jump table that does
8114 * The direct and priority chains don't change with PM.
8116 * Note: this is currently done based on individual device
8117 * capabilities. When an interface for determining system
8118 * power enabled state becomes available, or when additional
8119 * layers are added to the command chain, these values will
8120 * have to be re-evaluated for correctness.
8122 if (un
->un_f_non_devbsize_supported
) {
8123 un
->un_buf_chain_type
= SD_CHAIN_INFO_RMMEDIA_NO_PM
;
8125 un
->un_buf_chain_type
= SD_CHAIN_INFO_DISK_NO_PM
;
8127 un
->un_uscsi_chain_type
= SD_CHAIN_INFO_USCSI_CMD_NO_PM
;
8131 * This property is set to 0 by HA software to avoid retries
8132 * on a reserved disk. (The preferred property name is
8133 * "retry-on-reservation-conflict") (1189689)
8135 * Note: The use of a global here can have unintended consequences. A
8136 * per instance variable is preferable to match the capabilities of
8137 * different underlying hba's (4402600)
8139 sd_retry_on_reservation_conflict
= ddi_getprop(DDI_DEV_T_ANY
, devi
,
8140 DDI_PROP_DONTPASS
, "retry-on-reservation-conflict",
8141 sd_retry_on_reservation_conflict
);
8142 if (sd_retry_on_reservation_conflict
!= 0) {
8143 sd_retry_on_reservation_conflict
= ddi_getprop(DDI_DEV_T_ANY
,
8144 devi
, DDI_PROP_DONTPASS
, sd_resv_conflict_name
,
8145 sd_retry_on_reservation_conflict
);
8148 /* Set up options for QFULL handling. */
8149 if ((rval
= ddi_getprop(DDI_DEV_T_ANY
, devi
, 0,
8150 "qfull-retries", -1)) != -1) {
8151 (void) scsi_ifsetcap(SD_ADDRESS(un
), "qfull-retries",
8154 if ((rval
= ddi_getprop(DDI_DEV_T_ANY
, devi
, 0,
8155 "qfull-retry-interval", -1)) != -1) {
8156 (void) scsi_ifsetcap(SD_ADDRESS(un
), "qfull-retry-interval",
8161 * This just prints a message that announces the existence of the
8162 * device. The message is always printed in the system logfile, but
8163 * only appears on the console if the system is booted with the
8164 * -v (verbose) argument.
8166 ddi_report_dev(devi
);
8168 un
->un_mediastate
= DKIO_NONE
;
8171 * Check if this is a SSD(Solid State Drive).
8173 sd_check_solid_state(ssc
);
8176 * Check whether the drive is in emulation mode.
8178 sd_check_emulation_mode(ssc
);
8180 cmlb_alloc_handle(&un
->un_cmlbhandle
);
8182 #if defined(__i386) || defined(__amd64)
8184 * On x86, compensate for off-by-1 legacy error
8186 if (!un
->un_f_has_removable_media
&& !un
->un_f_is_hotpluggable
&&
8187 (lbasize
== un
->un_sys_blocksize
))
8188 offbyone
= CMLB_OFF_BY_ONE
;
8191 if (cmlb_attach(devi
, &sd_tgops
, (int)devp
->sd_inq
->inq_dtype
,
8192 VOID2BOOLEAN(un
->un_f_has_removable_media
!= 0),
8193 VOID2BOOLEAN(un
->un_f_is_hotpluggable
!= 0),
8194 un
->un_node_type
, offbyone
, un
->un_cmlbhandle
,
8195 (void *)SD_PATH_DIRECT
) != 0) {
8196 goto cmlb_attach_failed
;
8201 * Read and validate the device's geometry (ie, disk label)
8202 * A new unformatted drive will not have a valid geometry, but
8203 * the driver needs to successfully attach to this device so
8204 * the drive can be formatted via ioctls.
8206 geom_label_valid
= (cmlb_validate(un
->un_cmlbhandle
, 0,
8207 (void *)SD_PATH_DIRECT
) == 0) ? 1: 0;
8209 mutex_enter(SD_MUTEX(un
));
8212 * Read and initialize the devid for the unit.
8214 if (un
->un_f_devid_supported
) {
8215 sd_register_devid(ssc
, devi
, reservation_flag
);
8217 mutex_exit(SD_MUTEX(un
));
8219 #if (defined(__fibre))
8221 * Register callbacks for fibre only. You can't do this solely
8222 * on the basis of the devid_type because this is hba specific.
8223 * We need to query our hba capabilities to find out whether to
8226 if (un
->un_f_is_fibre
) {
8227 if (strcmp(un
->un_node_type
, DDI_NT_BLOCK_CHAN
)) {
8228 sd_init_event_callbacks(un
);
8229 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
8230 "sd_unit_attach: un:0x%p event callbacks inserted",
8236 if (un
->un_f_opt_disable_cache
== TRUE
) {
8238 * Disable both read cache and write cache. This is
8239 * the historic behavior of the keywords in the config file.
8241 if (sd_cache_control(ssc
, SD_CACHE_DISABLE
, SD_CACHE_DISABLE
) !=
8243 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8244 "sd_unit_attach: un:0x%p Could not disable "
8251 * Check the value of the WCE bit now and
8252 * set un_f_write_cache_enabled accordingly.
8254 (void) sd_get_write_cache_enabled(ssc
, &wc_enabled
);
8255 mutex_enter(SD_MUTEX(un
));
8256 un
->un_f_write_cache_enabled
= (wc_enabled
!= 0);
8257 mutex_exit(SD_MUTEX(un
));
8259 if ((un
->un_f_rmw_type
!= SD_RMW_TYPE_RETURN_ERROR
&&
8260 un
->un_tgt_blocksize
!= DEV_BSIZE
) ||
8261 un
->un_f_enable_rmw
) {
8262 if (!(un
->un_wm_cache
)) {
8263 (void) snprintf(name_str
, sizeof (name_str
),
8265 ddi_driver_name(SD_DEVINFO(un
)),
8266 ddi_get_instance(SD_DEVINFO(un
)));
8267 un
->un_wm_cache
= kmem_cache_create(
8268 name_str
, sizeof (struct sd_w_map
),
8269 8, sd_wm_cache_constructor
,
8270 sd_wm_cache_destructor
, NULL
,
8271 (void *)un
, NULL
, 0);
8272 if (!(un
->un_wm_cache
)) {
8273 goto wm_cache_failed
;
8279 * Check the value of the NV_SUP bit and set
8280 * un_f_suppress_cache_flush accordingly.
8285 * Find out what type of reservation this disk supports.
8287 status
= sd_send_scsi_PERSISTENT_RESERVE_IN(ssc
, SD_READ_KEYS
, 0, NULL
);
8292 * SCSI-3 reservations are supported.
8294 un
->un_reservation_type
= SD_SCSI3_RESERVATION
;
8295 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8296 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un
);
8300 * The PERSISTENT RESERVE IN command would not be recognized by
8301 * a SCSI-2 device, so assume the reservation type is SCSI-2.
8303 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8304 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un
);
8305 un
->un_reservation_type
= SD_SCSI2_RESERVATION
;
8307 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
8311 * default to SCSI-3 reservations
8313 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
8314 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un
);
8315 un
->un_reservation_type
= SD_SCSI3_RESERVATION
;
8317 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
8322 * Set the pstat and error stat values here, so data obtained during the
8323 * previous attach-time routines is available.
8325 * Note: This is a critical sequence that needs to be maintained:
8326 * 1) Instantiate the kstats before any routines using the iopath
8327 * (i.e. sd_send_scsi_cmd).
8328 * 2) Initialize the error stats (sd_set_errstats) and partition
8329 * stats (sd_set_pstats)here, following
8330 * cmlb_validate_geometry(), sd_register_devid(), and
8331 * sd_cache_control().
8334 if (un
->un_f_pkstats_enabled
&& geom_label_valid
) {
8336 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
8337 "sd_unit_attach: un:0x%p pstats created and set\n", un
);
8340 sd_set_errstats(un
);
8341 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
8342 "sd_unit_attach: un:0x%p errstats set\n", un
);
8346 * After successfully attaching an instance, we record the information
8347 * of how many luns have been attached on the relative target and
8348 * controller for parallel SCSI. This information is used when sd tries
8349 * to set the tagged queuing capability in HBA.
8351 if (SD_IS_PARALLEL_SCSI(un
) && (tgt
>= 0) && (tgt
< NTARGETS_WIDE
)) {
8352 sd_scsi_update_lun_on_target(pdip
, tgt
, SD_SCSI_LUN_ATTACH
);
8355 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
8356 "sd_unit_attach: un:0x%p exit success\n", un
);
8358 /* Uninitialize sd_ssc_t pointer */
8361 return (DDI_SUCCESS
);
8364 * An error occurred during the attach; clean up & return failure.
8370 ddi_remove_minor_node(devi
, NULL
);
8374 * Cleanup from the scsi_ifsetcap() calls (437868)
8376 (void) scsi_ifsetcap(SD_ADDRESS(un
), "lun-reset", 0, 1);
8377 (void) scsi_ifsetcap(SD_ADDRESS(un
), "wide-xfer", 0, 1);
8380 * Refer to the comments of setting tagged-qing in the beginning of
8381 * sd_unit_attach. We can only disable tagged queuing when there is
8382 * no lun attached on the target.
8384 if (sd_scsi_get_target_lun_count(pdip
, tgt
) < 1) {
8385 (void) scsi_ifsetcap(SD_ADDRESS(un
), "tagged-qing", 0, 1);
8388 if (un
->un_f_is_fibre
== FALSE
) {
8389 (void) scsi_ifsetcap(SD_ADDRESS(un
), "auto-rqsense", 0, 1);
8394 /* Uninitialize sd_ssc_t pointer */
8397 mutex_enter(SD_MUTEX(un
));
8399 /* Deallocate SCSI FMA memory spaces */
8400 kmem_free(un
->un_fm_private
, sizeof (struct sd_fm_internal
));
8402 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8403 if (un
->un_direct_priority_timeid
!= NULL
) {
8404 timeout_id_t temp_id
= un
->un_direct_priority_timeid
;
8405 un
->un_direct_priority_timeid
= NULL
;
8406 mutex_exit(SD_MUTEX(un
));
8407 (void) untimeout(temp_id
);
8408 mutex_enter(SD_MUTEX(un
));
8411 /* Cancel any pending start/stop timeouts */
8412 if (un
->un_startstop_timeid
!= NULL
) {
8413 timeout_id_t temp_id
= un
->un_startstop_timeid
;
8414 un
->un_startstop_timeid
= NULL
;
8415 mutex_exit(SD_MUTEX(un
));
8416 (void) untimeout(temp_id
);
8417 mutex_enter(SD_MUTEX(un
));
8420 /* Cancel any pending reset-throttle timeouts */
8421 if (un
->un_reset_throttle_timeid
!= NULL
) {
8422 timeout_id_t temp_id
= un
->un_reset_throttle_timeid
;
8423 un
->un_reset_throttle_timeid
= NULL
;
8424 mutex_exit(SD_MUTEX(un
));
8425 (void) untimeout(temp_id
);
8426 mutex_enter(SD_MUTEX(un
));
8429 /* Cancel rmw warning message timeouts */
8430 if (un
->un_rmw_msg_timeid
!= NULL
) {
8431 timeout_id_t temp_id
= un
->un_rmw_msg_timeid
;
8432 un
->un_rmw_msg_timeid
= NULL
;
8433 mutex_exit(SD_MUTEX(un
));
8434 (void) untimeout(temp_id
);
8435 mutex_enter(SD_MUTEX(un
));
8438 /* Cancel any pending retry timeouts */
8439 if (un
->un_retry_timeid
!= NULL
) {
8440 timeout_id_t temp_id
= un
->un_retry_timeid
;
8441 un
->un_retry_timeid
= NULL
;
8442 mutex_exit(SD_MUTEX(un
));
8443 (void) untimeout(temp_id
);
8444 mutex_enter(SD_MUTEX(un
));
8447 /* Cancel any pending delayed cv broadcast timeouts */
8448 if (un
->un_dcvb_timeid
!= NULL
) {
8449 timeout_id_t temp_id
= un
->un_dcvb_timeid
;
8450 un
->un_dcvb_timeid
= NULL
;
8451 mutex_exit(SD_MUTEX(un
));
8452 (void) untimeout(temp_id
);
8453 mutex_enter(SD_MUTEX(un
));
8456 mutex_exit(SD_MUTEX(un
));
8458 /* There should not be any in-progress I/O so ASSERT this check */
8459 ASSERT(un
->un_ncmds_in_transport
== 0);
8460 ASSERT(un
->un_ncmds_in_driver
== 0);
8462 /* Do not free the softstate if the callback routine is active */
8463 sd_sync_with_callback(un
);
8466 * Partition stats apparently are not used with removables. These would
8467 * not have been created during attach, so no need to clean them up...
8469 if (un
->un_errstats
!= NULL
) {
8470 kstat_delete(un
->un_errstats
);
8471 un
->un_errstats
= NULL
;
8474 create_errstats_failed
:
8476 if (un
->un_stats
!= NULL
) {
8477 kstat_delete(un
->un_stats
);
8478 un
->un_stats
= NULL
;
8481 ddi_xbuf_attr_unregister_devinfo(un
->un_xbuf_attr
, devi
);
8482 ddi_xbuf_attr_destroy(un
->un_xbuf_attr
);
8484 ddi_prop_remove_all(devi
);
8485 sema_destroy(&un
->un_semoclose
);
8486 cv_destroy(&un
->un_state_cv
);
8494 devp
->sd_private
= NULL
;
8495 bzero(un
, sizeof (struct sd_lun
)); /* Clear any stale data! */
8497 get_softstate_failed
:
8499 * Note: the man pages are unclear as to whether or not doing a
8500 * ddi_soft_state_free(sd_state, instance) is the right way to
8501 * clean up after the ddi_soft_state_zalloc() if the subsequent
8502 * ddi_get_soft_state() fails. The implication seems to be
8503 * that the get_soft_state cannot fail if the zalloc succeeds.
8505 #ifndef XPV_HVM_DRIVER
8506 ddi_soft_state_free(sd_state
, instance
);
8507 #endif /* !XPV_HVM_DRIVER */
8512 return (DDI_FAILURE
);
8517 * Function: sd_unit_detach
8519 * Description: Performs DDI_DETACH processing for sddetach().
8521 * Return Code: DDI_SUCCESS
8524 * Context: Kernel thread context
8528 sd_unit_detach(dev_info_t
*devi
)
8530 struct scsi_device
*devp
;
8535 dev_info_t
*pdip
= ddi_get_parent(devi
);
8536 #ifndef XPV_HVM_DRIVER
8537 int instance
= ddi_get_instance(devi
);
8538 #endif /* !XPV_HVM_DRIVER */
8540 mutex_enter(&sd_detach_mutex
);
8543 * Fail the detach for any of the following:
8544 * - Unable to get the sd_lun struct for the instance
8545 * - A layered driver has an outstanding open on the instance
8546 * - Another thread is already detaching this instance
8547 * - Another thread is currently performing an open
8549 devp
= ddi_get_driver_private(devi
);
8550 if ((devp
== NULL
) ||
8551 ((un
= (struct sd_lun
*)devp
->sd_private
) == NULL
) ||
8552 (un
->un_ncmds_in_driver
!= 0) || (un
->un_layer_count
!= 0) ||
8553 (un
->un_detach_count
!= 0) || (un
->un_opens_in_progress
!= 0)) {
8554 mutex_exit(&sd_detach_mutex
);
8555 return (DDI_FAILURE
);
8558 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
, "sd_unit_detach: entry 0x%p\n", un
);
8561 * Mark this instance as currently in a detach, to inhibit any
8562 * opens from a layered driver.
8564 un
->un_detach_count
++;
8565 mutex_exit(&sd_detach_mutex
);
8567 tgt
= ddi_prop_get_int(DDI_DEV_T_ANY
, devi
, DDI_PROP_DONTPASS
,
8568 SCSI_ADDR_PROP_TARGET
, -1);
8570 dev
= sd_make_device(SD_DEVINFO(un
));
8573 _NOTE(COMPETING_THREADS_NOW
);
8576 mutex_enter(SD_MUTEX(un
));
8579 * Fail the detach if there are any outstanding layered
8580 * opens on this device.
8582 for (i
= 0; i
< NDKMAP
; i
++) {
8583 if (un
->un_ocmap
.lyropen
[i
] != 0) {
8589 * Verify there are NO outstanding commands issued to this device.
8590 * ie, un_ncmds_in_transport == 0.
8591 * It's possible to have outstanding commands through the physio
8592 * code path, even though everything's closed.
8594 if ((un
->un_ncmds_in_transport
!= 0) || (un
->un_retry_timeid
!= NULL
) ||
8595 (un
->un_direct_priority_timeid
!= NULL
) ||
8596 (un
->un_state
== SD_STATE_RWAIT
)) {
8597 mutex_exit(SD_MUTEX(un
));
8598 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8599 "sd_dr_detach: Detach failure due to outstanding cmds\n");
8604 * If we have the device reserved, release the reservation.
8606 if ((un
->un_resvd_status
& SD_RESERVE
) &&
8607 !(un
->un_resvd_status
& SD_LOST_RESERVE
)) {
8608 mutex_exit(SD_MUTEX(un
));
8610 * Note: sd_reserve_release sends a command to the device
8611 * via the sd_ioctlcmd() path, and can sleep.
8613 if (sd_reserve_release(dev
, SD_RELEASE
) != 0) {
8614 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8615 "sd_dr_detach: Cannot release reservation \n");
8618 mutex_exit(SD_MUTEX(un
));
8622 * Untimeout any reserve recover, throttle reset, restart unit
8623 * and delayed broadcast timeout threads. Protect the timeout pointer
8624 * from getting nulled by their callback functions.
8626 mutex_enter(SD_MUTEX(un
));
8627 if (un
->un_resvd_timeid
!= NULL
) {
8628 timeout_id_t temp_id
= un
->un_resvd_timeid
;
8629 un
->un_resvd_timeid
= NULL
;
8630 mutex_exit(SD_MUTEX(un
));
8631 (void) untimeout(temp_id
);
8632 mutex_enter(SD_MUTEX(un
));
8635 if (un
->un_reset_throttle_timeid
!= NULL
) {
8636 timeout_id_t temp_id
= un
->un_reset_throttle_timeid
;
8637 un
->un_reset_throttle_timeid
= NULL
;
8638 mutex_exit(SD_MUTEX(un
));
8639 (void) untimeout(temp_id
);
8640 mutex_enter(SD_MUTEX(un
));
8643 if (un
->un_startstop_timeid
!= NULL
) {
8644 timeout_id_t temp_id
= un
->un_startstop_timeid
;
8645 un
->un_startstop_timeid
= NULL
;
8646 mutex_exit(SD_MUTEX(un
));
8647 (void) untimeout(temp_id
);
8648 mutex_enter(SD_MUTEX(un
));
8651 if (un
->un_rmw_msg_timeid
!= NULL
) {
8652 timeout_id_t temp_id
= un
->un_rmw_msg_timeid
;
8653 un
->un_rmw_msg_timeid
= NULL
;
8654 mutex_exit(SD_MUTEX(un
));
8655 (void) untimeout(temp_id
);
8656 mutex_enter(SD_MUTEX(un
));
8659 if (un
->un_dcvb_timeid
!= NULL
) {
8660 timeout_id_t temp_id
= un
->un_dcvb_timeid
;
8661 un
->un_dcvb_timeid
= NULL
;
8662 mutex_exit(SD_MUTEX(un
));
8663 (void) untimeout(temp_id
);
8665 mutex_exit(SD_MUTEX(un
));
8668 /* Remove any pending reservation reclaim requests for this device */
8669 sd_rmv_resv_reclaim_req(dev
);
8671 mutex_enter(SD_MUTEX(un
));
8673 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8674 if (un
->un_direct_priority_timeid
!= NULL
) {
8675 timeout_id_t temp_id
= un
->un_direct_priority_timeid
;
8676 un
->un_direct_priority_timeid
= NULL
;
8677 mutex_exit(SD_MUTEX(un
));
8678 (void) untimeout(temp_id
);
8679 mutex_enter(SD_MUTEX(un
));
8682 /* Cancel any active multi-host disk watch thread requests */
8683 if (un
->un_mhd_token
!= NULL
) {
8684 mutex_exit(SD_MUTEX(un
));
8685 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token
));
8686 if (scsi_watch_request_terminate(un
->un_mhd_token
,
8687 SCSI_WATCH_TERMINATE_NOWAIT
)) {
8688 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8689 "sd_dr_detach: Cannot cancel mhd watch request\n");
8691 * Note: We are returning here after having removed
8692 * some driver timeouts above. This is consistent with
8693 * the legacy implementation but perhaps the watch
8694 * terminate call should be made with the wait flag set.
8698 mutex_enter(SD_MUTEX(un
));
8699 un
->un_mhd_token
= NULL
;
8702 if (un
->un_swr_token
!= NULL
) {
8703 mutex_exit(SD_MUTEX(un
));
8704 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token
));
8705 if (scsi_watch_request_terminate(un
->un_swr_token
,
8706 SCSI_WATCH_TERMINATE_NOWAIT
)) {
8707 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8708 "sd_dr_detach: Cannot cancel swr watch request\n");
8710 * Note: We are returning here after having removed
8711 * some driver timeouts above. This is consistent with
8712 * the legacy implementation but perhaps the watch
8713 * terminate call should be made with the wait flag set.
8717 mutex_enter(SD_MUTEX(un
));
8718 un
->un_swr_token
= NULL
;
8721 mutex_exit(SD_MUTEX(un
));
8724 * Clear any scsi_reset_notifies. We clear the reset notifies
8725 * if we have not registered one.
8726 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8728 (void) scsi_reset_notify(SD_ADDRESS(un
), SCSI_RESET_CANCEL
,
8729 sd_mhd_reset_notify_cb
, (caddr_t
)un
);
8732 * protect the timeout pointers from getting nulled by
8733 * their callback functions during the cancellation process.
8734 * In such a scenario untimeout can be invoked with a null value.
8736 _NOTE(NO_COMPETING_THREADS_NOW
);
8738 mutex_enter(&un
->un_pm_mutex
);
8739 if (un
->un_pm_idle_timeid
!= NULL
) {
8740 timeout_id_t temp_id
= un
->un_pm_idle_timeid
;
8741 un
->un_pm_idle_timeid
= NULL
;
8742 mutex_exit(&un
->un_pm_mutex
);
8745 * Timeout is active; cancel it.
8746 * Note that it'll never be active on a device
8747 * that does not support PM therefore we don't
8748 * have to check before calling pm_idle_component.
8750 (void) untimeout(temp_id
);
8751 (void) pm_idle_component(SD_DEVINFO(un
), 0);
8752 mutex_enter(&un
->un_pm_mutex
);
8756 * Check whether there is already a timeout scheduled for power
8757 * management. If yes then don't lower the power here, that's.
8758 * the timeout handler's job.
8760 if (un
->un_pm_timeid
!= NULL
) {
8761 timeout_id_t temp_id
= un
->un_pm_timeid
;
8762 un
->un_pm_timeid
= NULL
;
8763 mutex_exit(&un
->un_pm_mutex
);
8765 * Timeout is active; cancel it.
8766 * Note that it'll never be active on a device
8767 * that does not support PM therefore we don't
8768 * have to check before calling pm_idle_component.
8770 (void) untimeout(temp_id
);
8771 (void) pm_idle_component(SD_DEVINFO(un
), 0);
8774 mutex_exit(&un
->un_pm_mutex
);
8775 if ((un
->un_f_pm_is_enabled
== TRUE
) &&
8776 (pm_lower_power(SD_DEVINFO(un
), 0, SD_PM_STATE_STOPPED(un
))
8778 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8779 "sd_dr_detach: Lower power request failed, ignoring.\n");
8781 * Fix for bug: 4297749, item # 13
8782 * The above test now includes a check to see if PM is
8783 * supported by this device before call
8785 * Note, the following is not dead code. The call to
8786 * pm_lower_power above will generate a call back into
8787 * our sdpower routine which might result in a timeout
8788 * handler getting activated. Therefore the following
8789 * code is valid and necessary.
8791 mutex_enter(&un
->un_pm_mutex
);
8792 if (un
->un_pm_timeid
!= NULL
) {
8793 timeout_id_t temp_id
= un
->un_pm_timeid
;
8794 un
->un_pm_timeid
= NULL
;
8795 mutex_exit(&un
->un_pm_mutex
);
8796 (void) untimeout(temp_id
);
8797 (void) pm_idle_component(SD_DEVINFO(un
), 0);
8799 mutex_exit(&un
->un_pm_mutex
);
8805 * Cleanup from the scsi_ifsetcap() calls (437868)
8806 * Relocated here from above to be after the call to
8807 * pm_lower_power, which was getting errors.
8809 (void) scsi_ifsetcap(SD_ADDRESS(un
), "lun-reset", 0, 1);
8810 (void) scsi_ifsetcap(SD_ADDRESS(un
), "wide-xfer", 0, 1);
8813 * Currently, tagged queuing is supported per target based by HBA.
8814 * Setting this per lun instance actually sets the capability of this
8815 * target in HBA, which affects those luns already attached on the
8816 * same target. So during detach, we can only disable this capability
8817 * only when this is the only lun left on this target. By doing
8818 * this, we assume a target has the same tagged queuing capability
8819 * for every lun. The condition can be removed when HBA is changed to
8820 * support per lun based tagged queuing capability.
8822 if (sd_scsi_get_target_lun_count(pdip
, tgt
) <= 1) {
8823 (void) scsi_ifsetcap(SD_ADDRESS(un
), "tagged-qing", 0, 1);
8826 if (un
->un_f_is_fibre
== FALSE
) {
8827 (void) scsi_ifsetcap(SD_ADDRESS(un
), "auto-rqsense", 0, 1);
8831 * Remove any event callbacks, fibre only
8833 if (un
->un_f_is_fibre
== TRUE
) {
8834 if ((un
->un_insert_event
!= NULL
) &&
8835 (ddi_remove_event_handler(un
->un_insert_cb_id
) !=
8838 * Note: We are returning here after having done
8839 * substantial cleanup above. This is consistent
8840 * with the legacy implementation but this may not
8841 * be the right thing to do.
8843 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8844 "sd_dr_detach: Cannot cancel insert event\n");
8845 goto err_remove_event
;
8847 un
->un_insert_event
= NULL
;
8849 if ((un
->un_remove_event
!= NULL
) &&
8850 (ddi_remove_event_handler(un
->un_remove_cb_id
) !=
8853 * Note: We are returning here after having done
8854 * substantial cleanup above. This is consistent
8855 * with the legacy implementation but this may not
8856 * be the right thing to do.
8858 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
8859 "sd_dr_detach: Cannot cancel remove event\n");
8860 goto err_remove_event
;
8862 un
->un_remove_event
= NULL
;
8865 /* Do not free the softstate if the callback routine is active */
8866 sd_sync_with_callback(un
);
8868 cmlb_detach(un
->un_cmlbhandle
, (void *)SD_PATH_DIRECT
);
8869 cmlb_free_handle(&un
->un_cmlbhandle
);
8872 * Hold the detach mutex here, to make sure that no other threads ever
8873 * can access a (partially) freed soft state structure.
8875 mutex_enter(&sd_detach_mutex
);
8878 * Clean up the soft state struct.
8879 * Cleanup is done in reverse order of allocs/inits.
8880 * At this point there should be no competing threads anymore.
8886 * Deallocate memory for SCSI FMA.
8888 kmem_free(un
->un_fm_private
, sizeof (struct sd_fm_internal
));
8891 * Unregister and free device id if it was not registered
8894 if (un
->un_f_devid_transport_defined
== FALSE
)
8895 ddi_devid_unregister(devi
);
8898 * free the devid structure if allocated before (by ddi_devid_init()
8899 * or ddi_devid_get()).
8902 ddi_devid_free(un
->un_devid
);
8903 un
->un_devid
= NULL
;
8907 * Destroy wmap cache if it exists.
8909 if (un
->un_wm_cache
!= NULL
) {
8910 kmem_cache_destroy(un
->un_wm_cache
);
8911 un
->un_wm_cache
= NULL
;
8915 * kstat cleanup is done in detach for all device types (4363169).
8916 * We do not want to fail detach if the device kstats are not deleted
8917 * since there is a confusion about the devo_refcnt for the device.
8918 * We just delete the kstats and let detach complete successfully.
8920 if (un
->un_stats
!= NULL
) {
8921 kstat_delete(un
->un_stats
);
8922 un
->un_stats
= NULL
;
8924 if (un
->un_errstats
!= NULL
) {
8925 kstat_delete(un
->un_errstats
);
8926 un
->un_errstats
= NULL
;
8929 /* Remove partition stats */
8930 if (un
->un_f_pkstats_enabled
) {
8931 for (i
= 0; i
< NSDMAP
; i
++) {
8932 if (un
->un_pstats
[i
] != NULL
) {
8933 kstat_delete(un
->un_pstats
[i
]);
8934 un
->un_pstats
[i
] = NULL
;
8939 /* Remove xbuf registration */
8940 ddi_xbuf_attr_unregister_devinfo(un
->un_xbuf_attr
, devi
);
8941 ddi_xbuf_attr_destroy(un
->un_xbuf_attr
);
8943 /* Remove driver properties */
8944 ddi_prop_remove_all(devi
);
8946 mutex_destroy(&un
->un_pm_mutex
);
8947 cv_destroy(&un
->un_pm_busy_cv
);
8949 cv_destroy(&un
->un_wcc_cv
);
8951 /* Open/close semaphore */
8952 sema_destroy(&un
->un_semoclose
);
8954 /* Removable media condvar. */
8955 cv_destroy(&un
->un_state_cv
);
8957 /* Suspend/resume condvar. */
8958 cv_destroy(&un
->un_suspend_cv
);
8959 cv_destroy(&un
->un_disk_busy_cv
);
8963 /* Free up soft state */
8964 devp
->sd_private
= NULL
;
8966 bzero(un
, sizeof (struct sd_lun
));
8967 #ifndef XPV_HVM_DRIVER
8968 ddi_soft_state_free(sd_state
, instance
);
8969 #endif /* !XPV_HVM_DRIVER */
8971 mutex_exit(&sd_detach_mutex
);
8973 /* This frees up the INQUIRY data associated with the device. */
8977 * After successfully detaching an instance, we update the information
8978 * of how many luns have been attached in the relative target and
8979 * controller for parallel SCSI. This information is used when sd tries
8980 * to set the tagged queuing capability in HBA.
8981 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
8982 * check if the device is parallel SCSI. However, we don't need to
8983 * check here because we've already checked during attach. No device
8984 * that is not parallel SCSI is in the chain.
8986 if ((tgt
>= 0) && (tgt
< NTARGETS_WIDE
)) {
8987 sd_scsi_update_lun_on_target(pdip
, tgt
, SD_SCSI_LUN_DETACH
);
8990 return (DDI_SUCCESS
);
8993 mutex_exit(SD_MUTEX(un
));
8996 _NOTE(NO_COMPETING_THREADS_NOW
);
8999 mutex_enter(&sd_detach_mutex
);
9000 un
->un_detach_count
--;
9001 mutex_exit(&sd_detach_mutex
);
9003 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
, "sd_unit_detach: exit failure\n");
9004 return (DDI_FAILURE
);
9009 * Function: sd_create_errstats
9011 * Description: This routine instantiates the device error stats.
9013 * Note: During attach the stats are instantiated first so they are
9014 * available for attach-time routines that utilize the driver
9015 * iopath to send commands to the device. The stats are initialized
9016 * separately so data obtained during some attach-time routines is
9017 * available. (4362483)
9019 * Arguments: un - driver soft state (unit) structure
9020 * instance - driver instance
9022 * Context: Kernel thread context
9026 sd_create_errstats(struct sd_lun
*un
, int instance
)
9028 struct sd_errstats
*stp
;
9029 char kstatmodule_err
[KSTAT_STRLEN
];
9030 char kstatname
[KSTAT_STRLEN
];
9031 int ndata
= (sizeof (struct sd_errstats
) / sizeof (kstat_named_t
));
9035 if (un
->un_errstats
!= NULL
) {
9039 (void) snprintf(kstatmodule_err
, sizeof (kstatmodule_err
),
9041 (void) snprintf(kstatname
, sizeof (kstatname
),
9042 "%s%d,err", sd_label
, instance
);
9044 un
->un_errstats
= kstat_create(kstatmodule_err
, instance
, kstatname
,
9045 "device_error", KSTAT_TYPE_NAMED
, ndata
, KSTAT_FLAG_PERSISTENT
);
9047 if (un
->un_errstats
== NULL
) {
9048 SD_ERROR(SD_LOG_ATTACH_DETACH
, un
,
9049 "sd_create_errstats: Failed kstat_create\n");
9053 stp
= (struct sd_errstats
*)un
->un_errstats
->ks_data
;
9054 kstat_named_init(&stp
->sd_softerrs
, "Soft Errors",
9056 kstat_named_init(&stp
->sd_harderrs
, "Hard Errors",
9058 kstat_named_init(&stp
->sd_transerrs
, "Transport Errors",
9060 kstat_named_init(&stp
->sd_vid
, "Vendor",
9062 kstat_named_init(&stp
->sd_pid
, "Product",
9064 kstat_named_init(&stp
->sd_revision
, "Revision",
9066 kstat_named_init(&stp
->sd_serial
, "Serial No",
9068 kstat_named_init(&stp
->sd_capacity
, "Size",
9069 KSTAT_DATA_ULONGLONG
);
9070 kstat_named_init(&stp
->sd_rq_media_err
, "Media Error",
9072 kstat_named_init(&stp
->sd_rq_ntrdy_err
, "Device Not Ready",
9074 kstat_named_init(&stp
->sd_rq_nodev_err
, "No Device",
9076 kstat_named_init(&stp
->sd_rq_recov_err
, "Recoverable",
9078 kstat_named_init(&stp
->sd_rq_illrq_err
, "Illegal Request",
9080 kstat_named_init(&stp
->sd_rq_pfa_err
, "Predictive Failure Analysis",
9083 un
->un_errstats
->ks_private
= un
;
9084 un
->un_errstats
->ks_update
= nulldev
;
9086 kstat_install(un
->un_errstats
);
9091 * Function: sd_set_errstats
9093 * Description: This routine sets the value of the vendor id, product id,
9094 * revision, serial number, and capacity device error stats.
9096 * Note: During attach the stats are instantiated first so they are
9097 * available for attach-time routines that utilize the driver
9098 * iopath to send commands to the device. The stats are initialized
9099 * separately so data obtained during some attach-time routines is
9100 * available. (4362483)
9102 * Arguments: un - driver soft state (unit) structure
9104 * Context: Kernel thread context
9108 sd_set_errstats(struct sd_lun
*un
)
9110 struct sd_errstats
*stp
;
9114 ASSERT(un
->un_errstats
!= NULL
);
9115 stp
= (struct sd_errstats
*)un
->un_errstats
->ks_data
;
9116 ASSERT(stp
!= NULL
);
9117 (void) strncpy(stp
->sd_vid
.value
.c
, un
->un_sd
->sd_inq
->inq_vid
, 8);
9118 (void) strncpy(stp
->sd_pid
.value
.c
, un
->un_sd
->sd_inq
->inq_pid
, 16);
9119 (void) strncpy(stp
->sd_revision
.value
.c
,
9120 un
->un_sd
->sd_inq
->inq_revision
, 4);
9123 * All the errstats are persistent across detach/attach,
9124 * so reset all the errstats here in case of the hot
9125 * replacement of disk drives, except for not changed
9126 * Sun qualified drives.
9128 if ((bcmp(&SD_INQUIRY(un
)->inq_pid
[9], "SUN", 3) != 0) ||
9129 (bcmp(&SD_INQUIRY(un
)->inq_serial
, stp
->sd_serial
.value
.c
,
9130 sizeof (SD_INQUIRY(un
)->inq_serial
)) != 0)) {
9131 stp
->sd_softerrs
.value
.ui32
= 0;
9132 stp
->sd_harderrs
.value
.ui32
= 0;
9133 stp
->sd_transerrs
.value
.ui32
= 0;
9134 stp
->sd_rq_media_err
.value
.ui32
= 0;
9135 stp
->sd_rq_ntrdy_err
.value
.ui32
= 0;
9136 stp
->sd_rq_nodev_err
.value
.ui32
= 0;
9137 stp
->sd_rq_recov_err
.value
.ui32
= 0;
9138 stp
->sd_rq_illrq_err
.value
.ui32
= 0;
9139 stp
->sd_rq_pfa_err
.value
.ui32
= 0;
9143 * Set the "Serial No" kstat for Sun qualified drives (indicated by
9144 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid)
9147 if (bcmp(&SD_INQUIRY(un
)->inq_pid
[9], "SUN", 3) == 0) {
9148 bcopy(&SD_INQUIRY(un
)->inq_serial
, stp
->sd_serial
.value
.c
,
9149 sizeof (SD_INQUIRY(un
)->inq_serial
));
9152 * Set the "Serial No" kstat for non-Sun qualified drives
9154 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, SD_DEVINFO(un
),
9155 DDI_PROP_NOTPROM
| DDI_PROP_DONTPASS
,
9156 INQUIRY_SERIAL_NO
, &sn
) == DDI_SUCCESS
) {
9157 (void) strlcpy(stp
->sd_serial
.value
.c
, sn
,
9158 sizeof (stp
->sd_serial
.value
.c
));
9163 if (un
->un_f_blockcount_is_valid
!= TRUE
) {
9165 * Set capacity error stat to 0 for no media. This ensures
9166 * a valid capacity is displayed in response to 'iostat -E'
9167 * when no media is present in the device.
9169 stp
->sd_capacity
.value
.ui64
= 0;
9172 * Multiply un_blockcount by un->un_sys_blocksize to get
9175 * Note: for non-512 blocksize devices "un_blockcount" has been
9176 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by
9177 * (un_tgt_blocksize / un->un_sys_blocksize).
9179 stp
->sd_capacity
.value
.ui64
= (uint64_t)
9180 ((uint64_t)un
->un_blockcount
* un
->un_sys_blocksize
);
9186 * Function: sd_set_pstats
9188 * Description: This routine instantiates and initializes the partition
9189 * stats for each partition with more than zero blocks.
9192 * Arguments: un - driver soft state (unit) structure
9194 * Context: Kernel thread context
9198 sd_set_pstats(struct sd_lun
*un
)
9200 char kstatname
[KSTAT_STRLEN
];
9203 diskaddr_t nblks
= 0;
9204 char *partname
= NULL
;
9208 instance
= ddi_get_instance(SD_DEVINFO(un
));
9210 /* Note:x86: is this a VTOC8/VTOC16 difference? */
9211 for (i
= 0; i
< NSDMAP
; i
++) {
9213 if (cmlb_partinfo(un
->un_cmlbhandle
, i
,
9214 &nblks
, NULL
, &partname
, NULL
, (void *)SD_PATH_DIRECT
) != 0)
9216 mutex_enter(SD_MUTEX(un
));
9218 if ((un
->un_pstats
[i
] == NULL
) &&
9221 (void) snprintf(kstatname
, sizeof (kstatname
),
9222 "%s%d,%s", sd_label
, instance
,
9225 un
->un_pstats
[i
] = kstat_create(sd_label
,
9226 instance
, kstatname
, "partition", KSTAT_TYPE_IO
,
9227 1, KSTAT_FLAG_PERSISTENT
);
9228 if (un
->un_pstats
[i
] != NULL
) {
9229 un
->un_pstats
[i
]->ks_lock
= SD_MUTEX(un
);
9230 kstat_install(un
->un_pstats
[i
]);
9233 mutex_exit(SD_MUTEX(un
));
9238 #if (defined(__fibre))
9240 * Function: sd_init_event_callbacks
9242 * Description: This routine initializes the insertion and removal event
9243 * callbacks. (fibre only)
9245 * Arguments: un - driver soft state (unit) structure
9247 * Context: Kernel thread context
9251 sd_init_event_callbacks(struct sd_lun
*un
)
9255 if ((un
->un_insert_event
== NULL
) &&
9256 (ddi_get_eventcookie(SD_DEVINFO(un
), FCAL_INSERT_EVENT
,
9257 &un
->un_insert_event
) == DDI_SUCCESS
)) {
9259 * Add the callback for an insertion event
9261 (void) ddi_add_event_handler(SD_DEVINFO(un
),
9262 un
->un_insert_event
, sd_event_callback
, (void *)un
,
9263 &(un
->un_insert_cb_id
));
9266 if ((un
->un_remove_event
== NULL
) &&
9267 (ddi_get_eventcookie(SD_DEVINFO(un
), FCAL_REMOVE_EVENT
,
9268 &un
->un_remove_event
) == DDI_SUCCESS
)) {
9270 * Add the callback for a removal event
9272 (void) ddi_add_event_handler(SD_DEVINFO(un
),
9273 un
->un_remove_event
, sd_event_callback
, (void *)un
,
9274 &(un
->un_remove_cb_id
));
9280 * Function: sd_event_callback
9282 * Description: This routine handles insert/remove events (photon). The
9283 * state is changed to OFFLINE which can be used to supress
9284 * error msgs. (fibre only)
9286 * Arguments: un - driver soft state (unit) structure
9288 * Context: Callout thread context
9292 sd_event_callback(dev_info_t
*dip
, ddi_eventcookie_t event
, void *arg
,
9295 struct sd_lun
*un
= (struct sd_lun
*)arg
;
9297 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event
));
9298 if (event
== un
->un_insert_event
) {
9299 SD_TRACE(SD_LOG_COMMON
, un
, "sd_event_callback: insert event");
9300 mutex_enter(SD_MUTEX(un
));
9301 if (un
->un_state
== SD_STATE_OFFLINE
) {
9302 if (un
->un_last_state
!= SD_STATE_SUSPENDED
) {
9303 un
->un_state
= un
->un_last_state
;
9306 * We have gone through SUSPEND/RESUME while
9307 * we were offline. Restore the last state
9309 un
->un_state
= un
->un_save_state
;
9312 mutex_exit(SD_MUTEX(un
));
9314 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event
));
9315 } else if (event
== un
->un_remove_event
) {
9316 SD_TRACE(SD_LOG_COMMON
, un
, "sd_event_callback: remove event");
9317 mutex_enter(SD_MUTEX(un
));
9319 * We need to handle an event callback that occurs during
9320 * the suspend operation, since we don't prevent it.
9322 if (un
->un_state
!= SD_STATE_OFFLINE
) {
9323 if (un
->un_state
!= SD_STATE_SUSPENDED
) {
9324 New_state(un
, SD_STATE_OFFLINE
);
9326 un
->un_last_state
= SD_STATE_OFFLINE
;
9329 mutex_exit(SD_MUTEX(un
));
9331 scsi_log(SD_DEVINFO(un
), sd_label
, CE_NOTE
,
9332 "!Unknown event\n");
9339 * Function: sd_cache_control()
9341 * Description: This routine is the driver entry point for setting
9342 * read and write caching by modifying the WCE (write cache
9343 * enable) and RCD (read cache disable) bits of mode
9344 * page 8 (MODEPAGE_CACHING).
9346 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9347 * structure for this target.
9348 * rcd_flag - flag for controlling the read cache
9349 * wce_flag - flag for controlling the write cache
9352 * code returned by sd_send_scsi_MODE_SENSE and
9353 * sd_send_scsi_MODE_SELECT
9355 * Context: Kernel Thread
9359 sd_cache_control(sd_ssc_t
*ssc
, int rcd_flag
, int wce_flag
)
9361 struct mode_caching
*mode_caching_page
;
9367 struct mode_header_grp2
*mhp
;
9371 ASSERT(ssc
!= NULL
);
9376 * Do a test unit ready, otherwise a mode sense may not work if this
9377 * is the first command sent to the device after boot.
9379 status
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
9381 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9383 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9384 hdrlen
= MODE_HEADER_LENGTH_GRP2
;
9386 hdrlen
= MODE_HEADER_LENGTH
;
9390 * Allocate memory for the retrieved mode page and its headers. Set
9391 * a pointer to the page itself. Use mode_cache_scsi3 to insure
9392 * we get all of the mode sense data otherwise, the mode select
9393 * will fail. mode_cache_scsi3 is a superset of mode_caching.
9395 buflen
= hdrlen
+ MODE_BLK_DESC_LENGTH
+
9396 sizeof (struct mode_cache_scsi3
);
9398 header
= kmem_zalloc(buflen
, KM_SLEEP
);
9400 /* Get the information from the device. */
9401 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9402 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, header
, buflen
,
9403 MODEPAGE_CACHING
, SD_PATH_DIRECT
);
9405 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, header
, buflen
,
9406 MODEPAGE_CACHING
, SD_PATH_DIRECT
);
9410 SD_ERROR(SD_LOG_IOCTL_RMMEDIA
, un
,
9411 "sd_cache_control: Mode Sense Failed\n");
9412 goto mode_sense_failed
;
9416 * Determine size of Block Descriptors in order to locate
9417 * the mode page data. ATAPI devices return 0, SCSI devices
9418 * should return MODE_BLK_DESC_LENGTH.
9420 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9421 mhp
= (struct mode_header_grp2
*)header
;
9422 bd_len
= (mhp
->bdesc_length_hi
<< 8) | mhp
->bdesc_length_lo
;
9424 bd_len
= ((struct mode_header
*)header
)->bdesc_length
;
9427 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
9428 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, 0,
9429 "sd_cache_control: Mode Sense returned invalid block "
9430 "descriptor length\n");
9432 goto mode_sense_failed
;
9435 mode_caching_page
= (struct mode_caching
*)(header
+ hdrlen
+ bd_len
);
9436 if (mode_caching_page
->mode_page
.code
!= MODEPAGE_CACHING
) {
9437 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, SD_LOG_COMMON
,
9438 "sd_cache_control: Mode Sense caching page code mismatch "
9439 "%d\n", mode_caching_page
->mode_page
.code
);
9441 goto mode_sense_failed
;
9444 /* Check the relevant bits on successful mode sense. */
9445 if ((mode_caching_page
->rcd
&& rcd_flag
== SD_CACHE_ENABLE
) ||
9446 (!mode_caching_page
->rcd
&& rcd_flag
== SD_CACHE_DISABLE
) ||
9447 (mode_caching_page
->wce
&& wce_flag
== SD_CACHE_DISABLE
) ||
9448 (!mode_caching_page
->wce
&& wce_flag
== SD_CACHE_ENABLE
)) {
9454 * Construct select buffer length based on the
9455 * length of the sense data returned.
9457 sbuflen
= hdrlen
+ bd_len
+
9458 sizeof (struct mode_page
) +
9459 (int)mode_caching_page
->mode_page
.length
;
9462 * Set the caching bits as requested.
9464 if (rcd_flag
== SD_CACHE_ENABLE
)
9465 mode_caching_page
->rcd
= 0;
9466 else if (rcd_flag
== SD_CACHE_DISABLE
)
9467 mode_caching_page
->rcd
= 1;
9469 if (wce_flag
== SD_CACHE_ENABLE
)
9470 mode_caching_page
->wce
= 1;
9471 else if (wce_flag
== SD_CACHE_DISABLE
)
9472 mode_caching_page
->wce
= 0;
9475 * Save the page if the mode sense says the
9476 * drive supports it.
9478 save_pg
= mode_caching_page
->mode_page
.ps
?
9479 SD_SAVE_PAGE
: SD_DONTSAVE_PAGE
;
9481 /* Clear reserved bits before mode select. */
9482 mode_caching_page
->mode_page
.ps
= 0;
9485 * Clear out mode header for mode select.
9486 * The rest of the retrieved page will be reused.
9488 bzero(header
, hdrlen
);
9490 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9491 mhp
= (struct mode_header_grp2
*)header
;
9492 mhp
->bdesc_length_hi
= bd_len
>> 8;
9493 mhp
->bdesc_length_lo
= (uchar_t
)bd_len
& 0xff;
9495 ((struct mode_header
*)header
)->bdesc_length
= bd_len
;
9498 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9500 /* Issue mode select to change the cache settings */
9501 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9502 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP1
, header
,
9503 sbuflen
, save_pg
, SD_PATH_DIRECT
);
9505 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
, header
,
9506 sbuflen
, save_pg
, SD_PATH_DIRECT
);
9514 kmem_free(header
, buflen
);
9518 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
9520 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9527 * Function: sd_get_write_cache_enabled()
9529 * Description: This routine is the driver entry point for determining if
9530 * write caching is enabled. It examines the WCE (write cache
9531 * enable) bits of mode page 8 (MODEPAGE_CACHING).
9533 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9534 * structure for this target.
9535 * is_enabled - pointer to int where write cache enabled state
9536 * is returned (non-zero -> write cache enabled)
9540 * code returned by sd_send_scsi_MODE_SENSE
9542 * Context: Kernel Thread
9544 * NOTE: If ioctl is added to disable write cache, this sequence should
9545 * be followed so that no locking is required for accesses to
9546 * un->un_f_write_cache_enabled:
9547 * do mode select to clear wce
9548 * do synchronize cache to flush cache
9549 * set un->un_f_write_cache_enabled = FALSE
9551 * Conversely, an ioctl to enable the write cache should be done
9553 * set un->un_f_write_cache_enabled = TRUE
9554 * do mode select to set wce
9558 sd_get_write_cache_enabled(sd_ssc_t
*ssc
, int *is_enabled
)
9560 struct mode_caching
*mode_caching_page
;
9569 ASSERT(ssc
!= NULL
);
9572 ASSERT(is_enabled
!= NULL
);
9574 /* in case of error, flag as enabled */
9578 * Do a test unit ready, otherwise a mode sense may not work if this
9579 * is the first command sent to the device after boot.
9581 status
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
9584 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9586 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9587 hdrlen
= MODE_HEADER_LENGTH_GRP2
;
9589 hdrlen
= MODE_HEADER_LENGTH
;
9593 * Allocate memory for the retrieved mode page and its headers. Set
9594 * a pointer to the page itself.
9596 buflen
= hdrlen
+ MODE_BLK_DESC_LENGTH
+ sizeof (struct mode_caching
);
9597 header
= kmem_zalloc(buflen
, KM_SLEEP
);
9599 /* Get the information from the device. */
9600 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9601 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, header
, buflen
,
9602 MODEPAGE_CACHING
, SD_PATH_DIRECT
);
9604 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, header
, buflen
,
9605 MODEPAGE_CACHING
, SD_PATH_DIRECT
);
9609 SD_ERROR(SD_LOG_IOCTL_RMMEDIA
, un
,
9610 "sd_get_write_cache_enabled: Mode Sense Failed\n");
9611 goto mode_sense_failed
;
9615 * Determine size of Block Descriptors in order to locate
9616 * the mode page data. ATAPI devices return 0, SCSI devices
9617 * should return MODE_BLK_DESC_LENGTH.
9619 if (un
->un_f_cfg_is_atapi
== TRUE
) {
9620 struct mode_header_grp2
*mhp
;
9621 mhp
= (struct mode_header_grp2
*)header
;
9622 bd_len
= (mhp
->bdesc_length_hi
<< 8) | mhp
->bdesc_length_lo
;
9624 bd_len
= ((struct mode_header
*)header
)->bdesc_length
;
9627 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
9628 /* FMA should make upset complain here */
9629 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, 0,
9630 "sd_get_write_cache_enabled: Mode Sense returned invalid "
9631 "block descriptor length\n");
9633 goto mode_sense_failed
;
9636 mode_caching_page
= (struct mode_caching
*)(header
+ hdrlen
+ bd_len
);
9637 if (mode_caching_page
->mode_page
.code
!= MODEPAGE_CACHING
) {
9638 /* FMA could make upset complain here */
9639 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, SD_LOG_COMMON
,
9640 "sd_get_write_cache_enabled: Mode Sense caching page "
9641 "code mismatch %d\n", mode_caching_page
->mode_page
.code
);
9643 goto mode_sense_failed
;
9645 *is_enabled
= mode_caching_page
->wce
;
9649 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
9650 } else if (rval
== EIO
) {
9652 * Some disks do not support mode sense(6), we
9653 * should ignore this kind of error(sense key is
9654 * 0x5 - illegal request).
9659 sensep
= (uint8_t *)ssc
->ssc_uscsi_cmd
->uscsi_rqbuf
;
9660 senlen
= (int)(ssc
->ssc_uscsi_cmd
->uscsi_rqlen
-
9661 ssc
->ssc_uscsi_cmd
->uscsi_rqresid
);
9664 scsi_sense_key(sensep
) == KEY_ILLEGAL_REQUEST
) {
9665 sd_ssc_assessment(ssc
, SD_FMT_IGNORE_COMPROMISE
);
9667 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
9670 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9672 kmem_free(header
, buflen
);
9677 * Function: sd_get_nv_sup()
9679 * Description: This routine is the driver entry point for
9680 * determining whether non-volatile cache is supported. This
9681 * determination process works as follows:
9683 * 1. sd first queries sd.conf on whether
9684 * suppress_cache_flush bit is set for this device.
9686 * 2. if not there, then queries the internal disk table.
9688 * 3. if either sd.conf or internal disk table specifies
9689 * cache flush be suppressed, we don't bother checking
9692 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries
9693 * the optional INQUIRY VPD page 0x86. If the device
9694 * supports VPD page 0x86, sd examines the NV_SUP
9695 * (non-volatile cache support) bit in the INQUIRY VPD page
9697 * o If NV_SUP bit is set, sd assumes the device has a
9698 * non-volatile cache and set the
9699 * un_f_sync_nv_supported to TRUE.
9700 * o Otherwise cache is not non-volatile,
9701 * un_f_sync_nv_supported is set to FALSE.
9703 * Arguments: un - driver soft state (unit) structure
9707 * Context: Kernel Thread
9711 sd_get_nv_sup(sd_ssc_t
*ssc
)
9714 uchar_t
*inq86
= NULL
;
9715 size_t inq86_len
= MAX_INQUIRY_SIZE
;
9716 size_t inq86_resid
= 0;
9717 struct dk_callback
*dkc
;
9720 ASSERT(ssc
!= NULL
);
9724 mutex_enter(SD_MUTEX(un
));
9727 * Be conservative on the device's support of
9728 * SYNC_NV bit: un_f_sync_nv_supported is
9729 * initialized to be false.
9731 un
->un_f_sync_nv_supported
= FALSE
;
9734 * If either sd.conf or internal disk table
9735 * specifies cache flush be suppressed, then
9736 * we don't bother checking NV_SUP bit.
9738 if (un
->un_f_suppress_cache_flush
== TRUE
) {
9739 mutex_exit(SD_MUTEX(un
));
9743 if (sd_check_vpd_page_support(ssc
) == 0 &&
9744 un
->un_vpd_page_mask
& SD_VPD_EXTENDED_DATA_PG
) {
9745 mutex_exit(SD_MUTEX(un
));
9746 /* collect page 86 data if available */
9747 inq86
= kmem_zalloc(inq86_len
, KM_SLEEP
);
9749 rval
= sd_send_scsi_INQUIRY(ssc
, inq86
, inq86_len
,
9750 0x01, 0x86, &inq86_resid
);
9752 if (rval
== 0 && (inq86_len
- inq86_resid
> 6)) {
9753 SD_TRACE(SD_LOG_COMMON
, un
,
9755 successfully get VPD page: %x \
9756 PAGE LENGTH: %x BYTE 6: %x\n",
9757 inq86
[1], inq86
[3], inq86
[6]);
9759 mutex_enter(SD_MUTEX(un
));
9761 * check the value of NV_SUP bit: only if the device
9762 * reports NV_SUP bit to be 1, the
9763 * un_f_sync_nv_supported bit will be set to true.
9765 if (inq86
[6] & SD_VPD_NV_SUP
) {
9766 un
->un_f_sync_nv_supported
= TRUE
;
9768 mutex_exit(SD_MUTEX(un
));
9769 } else if (rval
!= 0) {
9770 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9773 kmem_free(inq86
, inq86_len
);
9775 mutex_exit(SD_MUTEX(un
));
9779 * Send a SYNC CACHE command to check whether
9780 * SYNC_NV bit is supported. This command should have
9781 * un_f_sync_nv_supported set to correct value.
9783 mutex_enter(SD_MUTEX(un
));
9784 if (un
->un_f_sync_nv_supported
) {
9785 mutex_exit(SD_MUTEX(un
));
9786 dkc
= kmem_zalloc(sizeof (struct dk_callback
), KM_SLEEP
);
9787 dkc
->dkc_flag
= FLUSH_VOLATILE
;
9788 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un
, dkc
);
9791 * Send a TEST UNIT READY command to the device. This should
9792 * clear any outstanding UNIT ATTENTION that may be present.
9794 rval
= sd_send_scsi_TEST_UNIT_READY(ssc
, SD_DONT_RETRY_TUR
);
9796 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
9798 kmem_free(dkc
, sizeof (struct dk_callback
));
9800 mutex_exit(SD_MUTEX(un
));
9803 SD_TRACE(SD_LOG_COMMON
, un
, "sd_get_nv_sup: \
9804 un_f_suppress_cache_flush is set to %d\n",
9805 un
->un_f_suppress_cache_flush
);
9809 * Function: sd_make_device
9811 * Description: Utility routine to return the Solaris device number from
9812 * the data in the device's dev_info structure.
9814 * Return Code: The Solaris device number
9820 sd_make_device(dev_info_t
*devi
)
9822 return (makedevice(ddi_driver_major(devi
),
9823 ddi_get_instance(devi
) << SDUNIT_SHIFT
));
9828 * Function: sd_pm_entry
9830 * Description: Called at the start of a new command to manage power
9831 * and busy status of a device. This includes determining whether
9832 * the current power state of the device is sufficient for
9833 * performing the command or whether it must be changed.
9834 * The PM framework is notified appropriately.
9835 * Only with a return status of DDI_SUCCESS will the
9836 * component be busy to the framework.
9838 * All callers of sd_pm_entry must check the return status
9839 * and only call sd_pm_exit it it was DDI_SUCCESS. A status
9840 * of DDI_FAILURE indicates the device failed to power up.
9841 * In this case un_pm_count has been adjusted so the result
9842 * on exit is still powered down, ie. count is less than 0.
9843 * Calling sd_pm_exit with this count value hits an ASSERT.
9845 * Return Code: DDI_SUCCESS or DDI_FAILURE
9847 * Context: Kernel thread context.
9851 sd_pm_entry(struct sd_lun
*un
)
9853 int return_status
= DDI_SUCCESS
;
9855 ASSERT(!mutex_owned(SD_MUTEX(un
)));
9856 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
9858 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_entry: entry\n");
9860 if (un
->un_f_pm_is_enabled
== FALSE
) {
9861 SD_TRACE(SD_LOG_IO_PM
, un
,
9862 "sd_pm_entry: exiting, PM not enabled\n");
9863 return (return_status
);
9867 * Just increment a counter if PM is enabled. On the transition from
9868 * 0 ==> 1, mark the device as busy. The iodone side will decrement
9869 * the count with each IO and mark the device as idle when the count
9872 * If the count is less than 0 the device is powered down. If a powered
9873 * down device is successfully powered up then the count must be
9874 * incremented to reflect the power up. Note that it'll get incremented
9875 * a second time to become busy.
9877 * Because the following has the potential to change the device state
9878 * and must release the un_pm_mutex to do so, only one thread can be
9879 * allowed through at a time.
9882 mutex_enter(&un
->un_pm_mutex
);
9883 while (un
->un_pm_busy
== TRUE
) {
9884 cv_wait(&un
->un_pm_busy_cv
, &un
->un_pm_mutex
);
9886 un
->un_pm_busy
= TRUE
;
9888 if (un
->un_pm_count
< 1) {
9890 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_entry: busy component\n");
9893 * Indicate we are now busy so the framework won't attempt to
9894 * power down the device. This call will only fail if either
9895 * we passed a bad component number or the device has no
9896 * components. Neither of these should ever happen.
9898 mutex_exit(&un
->un_pm_mutex
);
9899 return_status
= pm_busy_component(SD_DEVINFO(un
), 0);
9900 ASSERT(return_status
== DDI_SUCCESS
);
9902 mutex_enter(&un
->un_pm_mutex
);
9904 if (un
->un_pm_count
< 0) {
9905 mutex_exit(&un
->un_pm_mutex
);
9907 SD_TRACE(SD_LOG_IO_PM
, un
,
9908 "sd_pm_entry: power up component\n");
9911 * pm_raise_power will cause sdpower to be called
9912 * which brings the device power level to the
9913 * desired state, If successful, un_pm_count and
9914 * un_power_level will be updated appropriately.
9916 return_status
= pm_raise_power(SD_DEVINFO(un
), 0,
9917 SD_PM_STATE_ACTIVE(un
));
9919 mutex_enter(&un
->un_pm_mutex
);
9921 if (return_status
!= DDI_SUCCESS
) {
9924 * Idle the device and adjust the count
9925 * so the result on exit is that we're
9926 * still powered down, ie. count is less than 0.
9928 SD_TRACE(SD_LOG_IO_PM
, un
,
9929 "sd_pm_entry: power up failed,"
9930 " idle the component\n");
9932 (void) pm_idle_component(SD_DEVINFO(un
), 0);
9936 * Device is powered up, verify the
9937 * count is non-negative.
9938 * This is debug only.
9940 ASSERT(un
->un_pm_count
== 0);
9944 if (return_status
== DDI_SUCCESS
) {
9946 * For performance, now that the device has been tagged
9947 * as busy, and it's known to be powered up, update the
9948 * chain types to use jump tables that do not include
9949 * pm. This significantly lowers the overhead and
9950 * therefore improves performance.
9953 mutex_exit(&un
->un_pm_mutex
);
9954 mutex_enter(SD_MUTEX(un
));
9955 SD_TRACE(SD_LOG_IO_PM
, un
,
9956 "sd_pm_entry: changing uscsi_chain_type from %d\n",
9957 un
->un_uscsi_chain_type
);
9959 if (un
->un_f_non_devbsize_supported
) {
9960 un
->un_buf_chain_type
=
9961 SD_CHAIN_INFO_RMMEDIA_NO_PM
;
9963 un
->un_buf_chain_type
=
9964 SD_CHAIN_INFO_DISK_NO_PM
;
9966 un
->un_uscsi_chain_type
= SD_CHAIN_INFO_USCSI_CMD_NO_PM
;
9968 SD_TRACE(SD_LOG_IO_PM
, un
,
9969 " changed uscsi_chain_type to %d\n",
9970 un
->un_uscsi_chain_type
);
9971 mutex_exit(SD_MUTEX(un
));
9972 mutex_enter(&un
->un_pm_mutex
);
9974 if (un
->un_pm_idle_timeid
== NULL
) {
9976 un
->un_pm_idle_timeid
=
9977 timeout(sd_pm_idletimeout_handler
, un
,
9978 (drv_usectohz((clock_t)300000)));
9980 * Include an extra call to busy which keeps the
9981 * device busy with-respect-to the PM layer
9982 * until the timer fires, at which time it'll
9983 * get the extra idle call.
9985 (void) pm_busy_component(SD_DEVINFO(un
), 0);
9989 un
->un_pm_busy
= FALSE
;
9991 cv_signal(&un
->un_pm_busy_cv
);
9995 SD_TRACE(SD_LOG_IO_PM
, un
,
9996 "sd_pm_entry: exiting, un_pm_count = %d\n", un
->un_pm_count
);
9998 mutex_exit(&un
->un_pm_mutex
);
10000 return (return_status
);
10005 * Function: sd_pm_exit
10007 * Description: Called at the completion of a command to manage busy
10008 * status for the device. If the device becomes idle the
10009 * PM framework is notified.
10011 * Context: Kernel thread context
10015 sd_pm_exit(struct sd_lun
*un
)
10017 ASSERT(!mutex_owned(SD_MUTEX(un
)));
10018 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
10020 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_exit: entry\n");
10023 * After attach the following flag is only read, so don't
10024 * take the penalty of acquiring a mutex for it.
10026 if (un
->un_f_pm_is_enabled
== TRUE
) {
10028 mutex_enter(&un
->un_pm_mutex
);
10031 SD_TRACE(SD_LOG_IO_PM
, un
,
10032 "sd_pm_exit: un_pm_count = %d\n", un
->un_pm_count
);
10034 ASSERT(un
->un_pm_count
>= 0);
10035 if (un
->un_pm_count
== 0) {
10036 mutex_exit(&un
->un_pm_mutex
);
10038 SD_TRACE(SD_LOG_IO_PM
, un
,
10039 "sd_pm_exit: idle component\n");
10041 (void) pm_idle_component(SD_DEVINFO(un
), 0);
10044 mutex_exit(&un
->un_pm_mutex
);
10048 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_exit: exiting\n");
10055 * Description: Driver's open(9e) entry point function.
10057 * Arguments: dev_i - pointer to device number
10058 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE)
10059 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10060 * cred_p - user credential pointer
10062 * Return Code: EINVAL
10068 * Context: Kernel thread context
10072 sdopen(dev_t
*dev_p
, int flag
, int otyp
, cred_t
*cred_p
)
10081 diskaddr_t nblks
= 0;
10082 diskaddr_t label_cap
;
10084 /* Validate the open type */
10085 if (otyp
>= OTYPCNT
) {
10090 instance
= SDUNIT(dev
);
10091 mutex_enter(&sd_detach_mutex
);
10094 * Fail the open if there is no softstate for the instance, or
10095 * if another thread somewhere is trying to detach the instance.
10097 if (((un
= ddi_get_soft_state(sd_state
, instance
)) == NULL
) ||
10098 (un
->un_detach_count
!= 0)) {
10099 mutex_exit(&sd_detach_mutex
);
10101 * The probe cache only needs to be cleared when open (9e) fails
10102 * with ENXIO (4238046).
10105 * un-conditionally clearing probe cache is ok with
10106 * separate sd/ssd binaries
10107 * x86 platform can be an issue with both parallel
10108 * and fibre in 1 binary
10110 sd_scsi_clear_probe_cache();
10115 * The un_layer_count is to prevent another thread in specfs from
10116 * trying to detach the instance, which can happen when we are
10117 * called from a higher-layer driver instead of thru specfs.
10118 * This will not be needed when DDI provides a layered driver
10119 * interface that allows specfs to know that an instance is in
10120 * use by a layered driver & should not be detached.
10122 * Note: the semantics for layered driver opens are exactly one
10123 * close for every open.
10125 if (otyp
== OTYP_LYR
) {
10126 un
->un_layer_count
++;
10130 * Keep a count of the current # of opens in progress. This is because
10131 * some layered drivers try to call us as a regular open. This can
10132 * cause problems that we cannot prevent, however by keeping this count
10133 * we can at least keep our open and detach routines from racing against
10134 * each other under such conditions.
10136 un
->un_opens_in_progress
++;
10137 mutex_exit(&sd_detach_mutex
);
10139 nodelay
= (flag
& (FNDELAY
| FNONBLOCK
));
10140 part
= SDPART(dev
);
10141 partmask
= 1 << part
;
10144 * We use a semaphore here in order to serialize
10145 * open and close requests on the device.
10147 sema_p(&un
->un_semoclose
);
10149 mutex_enter(SD_MUTEX(un
));
10152 * All device accesses go thru sdstrategy() where we check
10153 * on suspend status but there could be a scsi_poll command,
10154 * which bypasses sdstrategy(), so we need to check pm
10159 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
10160 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
10161 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
10164 mutex_exit(SD_MUTEX(un
));
10165 if (sd_pm_entry(un
) != DDI_SUCCESS
) {
10167 SD_ERROR(SD_LOG_OPEN_CLOSE
, un
,
10168 "sdopen: sd_pm_entry failed\n");
10169 goto open_failed_with_pm
;
10171 mutex_enter(SD_MUTEX(un
));
10174 /* check for previous exclusive open */
10175 SD_TRACE(SD_LOG_OPEN_CLOSE
, un
, "sdopen: un=%p\n", (void *)un
);
10176 SD_TRACE(SD_LOG_OPEN_CLOSE
, un
,
10177 "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10178 un
->un_exclopen
, flag
, un
->un_ocmap
.regopen
[otyp
]);
10180 if (un
->un_exclopen
& (partmask
)) {
10181 goto excl_open_fail
;
10184 if (flag
& FEXCL
) {
10186 if (un
->un_ocmap
.lyropen
[part
]) {
10187 goto excl_open_fail
;
10189 for (i
= 0; i
< (OTYPCNT
- 1); i
++) {
10190 if (un
->un_ocmap
.regopen
[i
] & (partmask
)) {
10191 goto excl_open_fail
;
10197 * Check the write permission if this is a removable media device,
10198 * NDELAY has not been set, and writable permission is requested.
10200 * Note: If NDELAY was set and this is write-protected media the WRITE
10201 * attempt will fail with EIO as part of the I/O processing. This is a
10202 * more permissive implementation that allows the open to succeed and
10203 * WRITE attempts to fail when appropriate.
10205 if (un
->un_f_chk_wp_open
) {
10206 if ((flag
& FWRITE
) && (!nodelay
)) {
10207 mutex_exit(SD_MUTEX(un
));
10209 * Defer the check for write permission on writable
10210 * DVD drive till sdstrategy and will not fail open even
10211 * if FWRITE is set as the device can be writable
10212 * depending upon the media and the media can change
10213 * after the call to open().
10215 if (un
->un_f_dvdram_writable_device
== FALSE
) {
10216 if (ISCD(un
) || sr_check_wp(dev
)) {
10218 mutex_enter(SD_MUTEX(un
));
10219 SD_ERROR(SD_LOG_OPEN_CLOSE
, un
, "sdopen: "
10220 "write to cd or write protected media\n");
10224 mutex_enter(SD_MUTEX(un
));
10229 * If opening in NDELAY/NONBLOCK mode, just return.
10230 * Check if disk is ready and has a valid geometry later.
10235 mutex_exit(SD_MUTEX(un
));
10236 ssc
= sd_ssc_init(un
);
10237 rval
= sd_ready_and_valid(ssc
, part
);
10239 mutex_enter(SD_MUTEX(un
));
10241 * Fail if device is not ready or if the number of disk
10242 * blocks is zero or negative for non CD devices.
10247 if (rval
== SD_READY_VALID
&& (!ISCD(un
))) {
10248 /* if cmlb_partinfo fails, nblks remains 0 */
10249 mutex_exit(SD_MUTEX(un
));
10250 (void) cmlb_partinfo(un
->un_cmlbhandle
, part
, &nblks
,
10251 NULL
, NULL
, NULL
, (void *)SD_PATH_DIRECT
);
10252 mutex_enter(SD_MUTEX(un
));
10255 if ((rval
!= SD_READY_VALID
) ||
10256 (!ISCD(un
) && nblks
<= 0)) {
10257 rval
= un
->un_f_has_removable_media
? ENXIO
: EIO
;
10258 SD_ERROR(SD_LOG_OPEN_CLOSE
, un
, "sdopen: "
10259 "device not ready or invalid disk block value\n");
10262 #if defined(__i386) || defined(__amd64)
10266 * x86 requires special nodelay handling, so that p0 is
10267 * always defined and accessible.
10268 * Invalidate geometry only if device is not already open.
10270 cp
= &un
->un_ocmap
.chkd
[0];
10271 while (cp
< &un
->un_ocmap
.chkd
[OCSIZE
]) {
10272 if (*cp
!= (uchar_t
)0) {
10277 if (cp
== &un
->un_ocmap
.chkd
[OCSIZE
]) {
10278 mutex_exit(SD_MUTEX(un
));
10279 cmlb_invalidate(un
->un_cmlbhandle
,
10280 (void *)SD_PATH_DIRECT
);
10281 mutex_enter(SD_MUTEX(un
));
10287 if (otyp
== OTYP_LYR
) {
10288 un
->un_ocmap
.lyropen
[part
]++;
10290 un
->un_ocmap
.regopen
[otyp
] |= partmask
;
10293 /* Set up open and exclusive open flags */
10294 if (flag
& FEXCL
) {
10295 un
->un_exclopen
|= (partmask
);
10299 * If the lun is EFI labeled and lun capacity is greater than the
10300 * capacity contained in the label, log a sys-event to notify the
10301 * interested module.
10302 * To avoid an infinite loop of logging sys-event, we only log the
10303 * event when the lun is not opened in NDELAY mode. The event handler
10304 * should open the lun in NDELAY mode.
10307 mutex_exit(SD_MUTEX(un
));
10308 if (cmlb_efi_label_capacity(un
->un_cmlbhandle
, &label_cap
,
10309 (void*)SD_PATH_DIRECT
) == 0) {
10310 mutex_enter(SD_MUTEX(un
));
10311 if (un
->un_f_blockcount_is_valid
&&
10312 un
->un_blockcount
> label_cap
&&
10313 un
->un_f_expnevent
== B_FALSE
) {
10314 un
->un_f_expnevent
= B_TRUE
;
10315 mutex_exit(SD_MUTEX(un
));
10316 sd_log_lun_expansion_event(un
,
10317 (nodelay
? KM_NOSLEEP
: KM_SLEEP
));
10318 mutex_enter(SD_MUTEX(un
));
10321 mutex_enter(SD_MUTEX(un
));
10325 SD_TRACE(SD_LOG_OPEN_CLOSE
, un
, "sdopen: "
10326 "open of part %d type %d\n", part
, otyp
);
10328 mutex_exit(SD_MUTEX(un
));
10333 sema_v(&un
->un_semoclose
);
10335 mutex_enter(&sd_detach_mutex
);
10336 un
->un_opens_in_progress
--;
10337 mutex_exit(&sd_detach_mutex
);
10339 SD_TRACE(SD_LOG_OPEN_CLOSE
, un
, "sdopen: exit success\n");
10340 return (DDI_SUCCESS
);
10343 SD_ERROR(SD_LOG_OPEN_CLOSE
, un
, "sdopen: fail exclusive open\n");
10347 mutex_exit(SD_MUTEX(un
));
10350 * On a failed open we must exit the pm management.
10355 open_failed_with_pm
:
10356 sema_v(&un
->un_semoclose
);
10358 mutex_enter(&sd_detach_mutex
);
10359 un
->un_opens_in_progress
--;
10360 if (otyp
== OTYP_LYR
) {
10361 un
->un_layer_count
--;
10363 mutex_exit(&sd_detach_mutex
);
10370 * Function: sdclose
10372 * Description: Driver's close(9e) entry point function.
10374 * Arguments: dev - device number
10375 * flag - file status flag, informational only
10376 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10377 * cred_p - user credential pointer
10379 * Return Code: ENXIO
10381 * Context: Kernel thread context
10385 sdclose(dev_t dev
, int flag
, int otyp
, cred_t
*cred_p
)
10393 /* Validate the open type */
10394 if (otyp
>= OTYPCNT
) {
10398 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
10402 part
= SDPART(dev
);
10403 nodelay
= flag
& (FNDELAY
| FNONBLOCK
);
10405 SD_TRACE(SD_LOG_OPEN_CLOSE
, un
,
10406 "sdclose: close of part %d type %d\n", part
, otyp
);
10409 * We use a semaphore here in order to serialize
10410 * open and close requests on the device.
10412 sema_p(&un
->un_semoclose
);
10414 mutex_enter(SD_MUTEX(un
));
10416 /* Don't proceed if power is being changed. */
10417 while (un
->un_state
== SD_STATE_PM_CHANGING
) {
10418 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
10421 if (un
->un_exclopen
& (1 << part
)) {
10422 un
->un_exclopen
&= ~(1 << part
);
10425 /* Update the open partition map */
10426 if (otyp
== OTYP_LYR
) {
10427 un
->un_ocmap
.lyropen
[part
] -= 1;
10429 un
->un_ocmap
.regopen
[otyp
] &= ~(1 << part
);
10432 cp
= &un
->un_ocmap
.chkd
[0];
10433 while (cp
< &un
->un_ocmap
.chkd
[OCSIZE
]) {
10440 if (cp
== &un
->un_ocmap
.chkd
[OCSIZE
]) {
10441 SD_TRACE(SD_LOG_OPEN_CLOSE
, un
, "sdclose: last close\n");
10444 * We avoid persistance upon the last close, and set
10445 * the throttle back to the maximum.
10447 un
->un_throttle
= un
->un_saved_throttle
;
10449 if (un
->un_state
== SD_STATE_OFFLINE
) {
10450 if (un
->un_f_is_fibre
== FALSE
) {
10451 scsi_log(SD_DEVINFO(un
), sd_label
,
10452 CE_WARN
, "offline\n");
10454 mutex_exit(SD_MUTEX(un
));
10455 cmlb_invalidate(un
->un_cmlbhandle
,
10456 (void *)SD_PATH_DIRECT
);
10457 mutex_enter(SD_MUTEX(un
));
10461 * Flush any outstanding writes in NVRAM cache.
10462 * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10463 * cmd, it may not work for non-Pluto devices.
10464 * SYNCHRONIZE CACHE is not required for removables,
10465 * except DVD-RAM drives.
10467 * Also note: because SYNCHRONIZE CACHE is currently
10468 * the only command issued here that requires the
10469 * drive be powered up, only do the power up before
10470 * sending the Sync Cache command. If additional
10471 * commands are added which require a powered up
10472 * drive, the following sequence may have to change.
10474 * And finally, note that parallel SCSI on SPARC
10475 * only issues a Sync Cache to DVD-RAM, a newly
10476 * supported device.
10478 #if defined(__i386) || defined(__amd64)
10479 if ((un
->un_f_sync_cache_supported
&&
10480 un
->un_f_sync_cache_required
) ||
10481 un
->un_f_dvdram_writable_device
== TRUE
) {
10483 if (un
->un_f_dvdram_writable_device
== TRUE
) {
10485 mutex_exit(SD_MUTEX(un
));
10486 if (sd_pm_entry(un
) == DDI_SUCCESS
) {
10488 sd_send_scsi_SYNCHRONIZE_CACHE(un
,
10490 /* ignore error if not supported */
10491 if (rval
== ENOTSUP
) {
10493 } else if (rval
!= 0) {
10500 mutex_enter(SD_MUTEX(un
));
10504 * For devices which supports DOOR_LOCK, send an ALLOW
10505 * MEDIA REMOVAL command, but don't get upset if it
10506 * fails. We need to raise the power of the drive before
10507 * we can call sd_send_scsi_DOORLOCK()
10509 if (un
->un_f_doorlock_supported
) {
10510 mutex_exit(SD_MUTEX(un
));
10511 if (sd_pm_entry(un
) == DDI_SUCCESS
) {
10514 ssc
= sd_ssc_init(un
);
10515 rval
= sd_send_scsi_DOORLOCK(ssc
,
10516 SD_REMOVAL_ALLOW
, SD_PATH_DIRECT
);
10518 sd_ssc_assessment(ssc
,
10523 if (ISCD(un
) && (rval
!= 0) &&
10530 mutex_enter(SD_MUTEX(un
));
10534 * If a device has removable media, invalidate all
10535 * parameters related to media, such as geometry,
10536 * blocksize, and blockcount.
10538 if (un
->un_f_has_removable_media
) {
10543 * Destroy the cache (if it exists) which was
10544 * allocated for the write maps since this is
10545 * the last close for this media.
10547 if (un
->un_wm_cache
) {
10549 * Check if there are pending commands.
10550 * and if there are give a warning and
10551 * do not destroy the cache.
10553 if (un
->un_ncmds_in_driver
> 0) {
10554 scsi_log(SD_DEVINFO(un
),
10556 "Unable to clean up memory "
10557 "because of pending I/O\n");
10559 kmem_cache_destroy(
10561 un
->un_wm_cache
= NULL
;
10567 mutex_exit(SD_MUTEX(un
));
10568 sema_v(&un
->un_semoclose
);
10570 if (otyp
== OTYP_LYR
) {
10571 mutex_enter(&sd_detach_mutex
);
10573 * The detach routine may run when the layer count
10576 un
->un_layer_count
--;
10577 mutex_exit(&sd_detach_mutex
);
10585 * Function: sd_ready_and_valid
10587 * Description: Test if device is ready and has a valid geometry.
10589 * Arguments: ssc - sd_ssc_t will contain un
10590 * un - driver soft state (unit) structure
10592 * Return Code: SD_READY_VALID ready and valid label
10593 * SD_NOT_READY_VALID not ready, no label
10594 * SD_RESERVED_BY_OTHERS reservation conflict
10596 * Context: Never called at interrupt context.
10600 sd_ready_and_valid(sd_ssc_t
*ssc
, int part
)
10602 struct sd_errstats
*stp
;
10605 int rval
= SD_READY_VALID
;
10607 boolean_t is_valid
;
10611 ASSERT(ssc
!= NULL
);
10613 ASSERT(un
!= NULL
);
10614 ASSERT(!mutex_owned(SD_MUTEX(un
)));
10616 mutex_enter(SD_MUTEX(un
));
10618 * If a device has removable media, we must check if media is
10619 * ready when checking if this device is ready and valid.
10621 if (un
->un_f_has_removable_media
) {
10622 mutex_exit(SD_MUTEX(un
));
10623 status
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
10626 rval
= SD_NOT_READY_VALID
;
10627 mutex_enter(SD_MUTEX(un
));
10629 /* Ignore all failed status for removalbe media */
10630 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
10635 is_valid
= SD_IS_VALID_LABEL(un
);
10636 mutex_enter(SD_MUTEX(un
));
10638 (un
->un_f_blockcount_is_valid
== FALSE
) ||
10639 (un
->un_f_tgt_blocksize_is_valid
== FALSE
)) {
10641 /* capacity has to be read every open. */
10642 mutex_exit(SD_MUTEX(un
));
10643 status
= sd_send_scsi_READ_CAPACITY(ssc
, &capacity
,
10644 &lbasize
, SD_PATH_DIRECT
);
10647 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
10649 cmlb_invalidate(un
->un_cmlbhandle
,
10650 (void *)SD_PATH_DIRECT
);
10651 mutex_enter(SD_MUTEX(un
));
10652 rval
= SD_NOT_READY_VALID
;
10656 mutex_enter(SD_MUTEX(un
));
10657 sd_update_block_info(un
, lbasize
, capacity
);
10662 * Check if the media in the device is writable or not.
10664 if (!is_valid
&& ISCD(un
)) {
10665 sd_check_for_writable_cd(ssc
, SD_PATH_DIRECT
);
10670 * Do a test unit ready to clear any unit attention from non-cd
10673 mutex_exit(SD_MUTEX(un
));
10675 status
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
10677 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
10680 mutex_enter(SD_MUTEX(un
));
10685 * If this is a non 512 block device, allocate space for
10686 * the wmap cache. This is being done here since every time
10687 * a media is changed this routine will be called and the
10688 * block size is a function of media rather than device.
10690 if (((un
->un_f_rmw_type
!= SD_RMW_TYPE_RETURN_ERROR
||
10691 un
->un_f_non_devbsize_supported
) &&
10692 un
->un_tgt_blocksize
!= DEV_BSIZE
) ||
10693 un
->un_f_enable_rmw
) {
10694 if (!(un
->un_wm_cache
)) {
10695 (void) snprintf(name_str
, sizeof (name_str
),
10697 ddi_driver_name(SD_DEVINFO(un
)),
10698 ddi_get_instance(SD_DEVINFO(un
)));
10699 un
->un_wm_cache
= kmem_cache_create(
10700 name_str
, sizeof (struct sd_w_map
),
10701 8, sd_wm_cache_constructor
,
10702 sd_wm_cache_destructor
, NULL
,
10703 (void *)un
, NULL
, 0);
10704 if (!(un
->un_wm_cache
)) {
10711 if (un
->un_state
== SD_STATE_NORMAL
) {
10713 * If the target is not yet ready here (defined by a TUR
10714 * failure), invalidate the geometry and print an 'offline'
10715 * message. This is a legacy message, as the state of the
10716 * target is not actually changed to SD_STATE_OFFLINE.
10718 * If the TUR fails for EACCES (Reservation Conflict),
10719 * SD_RESERVED_BY_OTHERS will be returned to indicate
10720 * reservation conflict. If the TUR fails for other
10721 * reasons, SD_NOT_READY_VALID will be returned.
10725 mutex_exit(SD_MUTEX(un
));
10726 err
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
10727 mutex_enter(SD_MUTEX(un
));
10730 mutex_exit(SD_MUTEX(un
));
10731 cmlb_invalidate(un
->un_cmlbhandle
,
10732 (void *)SD_PATH_DIRECT
);
10733 mutex_enter(SD_MUTEX(un
));
10734 if (err
== EACCES
) {
10735 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
10736 "reservation conflict\n");
10737 rval
= SD_RESERVED_BY_OTHERS
;
10738 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
10740 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
10741 "drive offline\n");
10742 rval
= SD_NOT_READY_VALID
;
10743 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
10749 if (un
->un_f_format_in_progress
== FALSE
) {
10750 mutex_exit(SD_MUTEX(un
));
10752 (void) cmlb_validate(un
->un_cmlbhandle
, 0,
10753 (void *)SD_PATH_DIRECT
);
10754 if (cmlb_partinfo(un
->un_cmlbhandle
, part
, NULL
, NULL
, NULL
,
10755 NULL
, (void *) SD_PATH_DIRECT
) != 0) {
10756 rval
= SD_NOT_READY_VALID
;
10757 mutex_enter(SD_MUTEX(un
));
10761 if (un
->un_f_pkstats_enabled
) {
10763 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
10764 "sd_ready_and_valid: un:0x%p pstats created and "
10767 mutex_enter(SD_MUTEX(un
));
10771 * If this device supports DOOR_LOCK command, try and send
10772 * this command to PREVENT MEDIA REMOVAL, but don't get upset
10773 * if it fails. For a CD, however, it is an error
10775 if (un
->un_f_doorlock_supported
) {
10776 mutex_exit(SD_MUTEX(un
));
10777 status
= sd_send_scsi_DOORLOCK(ssc
, SD_REMOVAL_PREVENT
,
10780 if ((status
!= 0) && ISCD(un
)) {
10781 rval
= SD_NOT_READY_VALID
;
10782 mutex_enter(SD_MUTEX(un
));
10784 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
10787 } else if (status
!= 0)
10788 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
10789 mutex_enter(SD_MUTEX(un
));
10792 /* The state has changed, inform the media watch routines */
10793 un
->un_mediastate
= DKIO_INSERTED
;
10794 cv_broadcast(&un
->un_state_cv
);
10795 rval
= SD_READY_VALID
;
10800 * Initialize the capacity kstat value, if no media previously
10801 * (capacity kstat is 0) and a media has been inserted
10802 * (un_blockcount > 0).
10804 if (un
->un_errstats
!= NULL
) {
10805 stp
= (struct sd_errstats
*)un
->un_errstats
->ks_data
;
10806 if ((stp
->sd_capacity
.value
.ui64
== 0) &&
10807 (un
->un_f_blockcount_is_valid
== TRUE
)) {
10808 stp
->sd_capacity
.value
.ui64
=
10809 (uint64_t)((uint64_t)un
->un_blockcount
*
10810 un
->un_sys_blocksize
);
10814 mutex_exit(SD_MUTEX(un
));
10822 * Description: Routine to limit the size of a data transfer. Used in
10823 * conjunction with physio(9F).
10825 * Arguments: bp - pointer to the indicated buf(9S) struct.
10827 * Context: Kernel thread context.
10831 sdmin(struct buf
*bp
)
10836 instance
= SDUNIT(bp
->b_edev
);
10838 un
= ddi_get_soft_state(sd_state
, instance
);
10839 ASSERT(un
!= NULL
);
10842 * We depend on buf breakup to restrict
10843 * IO size if it is enabled.
10845 if (un
->un_buf_breakup_supported
) {
10849 if (bp
->b_bcount
> un
->un_max_xfer_size
) {
10850 bp
->b_bcount
= un
->un_max_xfer_size
;
10858 * Description: Driver's read(9e) entry point function.
10860 * Arguments: dev - device number
10861 * uio - structure pointer describing where data is to be stored
10863 * cred_p - user credential pointer
10865 * Return Code: ENXIO
10868 * value returned by physio
10870 * Context: Kernel thread context.
10874 sdread(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
)
10876 struct sd_lun
*un
= NULL
;
10881 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
10885 ASSERT(!mutex_owned(SD_MUTEX(un
)));
10888 if (!SD_IS_VALID_LABEL(un
) && !ISCD(un
)) {
10889 mutex_enter(SD_MUTEX(un
));
10891 * Because the call to sd_ready_and_valid will issue I/O we
10892 * must wait here if either the device is suspended or
10893 * if it's power level is changing.
10895 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
10896 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
10897 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
10899 un
->un_ncmds_in_driver
++;
10900 mutex_exit(SD_MUTEX(un
));
10902 /* Initialize sd_ssc_t for internal uscsi commands */
10903 ssc
= sd_ssc_init(un
);
10904 if ((sd_ready_and_valid(ssc
, SDPART(dev
))) != SD_READY_VALID
) {
10911 mutex_enter(SD_MUTEX(un
));
10912 un
->un_ncmds_in_driver
--;
10913 ASSERT(un
->un_ncmds_in_driver
>= 0);
10914 mutex_exit(SD_MUTEX(un
));
10920 * Read requests are restricted to multiples of the system block size.
10922 if (un
->un_f_rmw_type
== SD_RMW_TYPE_RETURN_ERROR
&&
10923 !un
->un_f_enable_rmw
)
10924 secmask
= un
->un_tgt_blocksize
- 1;
10926 secmask
= DEV_BSIZE
- 1;
10928 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
10929 SD_ERROR(SD_LOG_READ_WRITE
, un
,
10930 "sdread: file offset not modulo %d\n",
10933 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
10934 SD_ERROR(SD_LOG_READ_WRITE
, un
,
10935 "sdread: transfer length not modulo %d\n",
10939 err
= physio(sdstrategy
, NULL
, dev
, B_READ
, sdmin
, uio
);
10947 * Function: sdwrite
10949 * Description: Driver's write(9e) entry point function.
10951 * Arguments: dev - device number
10952 * uio - structure pointer describing where data is stored in
10954 * cred_p - user credential pointer
10956 * Return Code: ENXIO
10959 * value returned by physio
10961 * Context: Kernel thread context.
10965 sdwrite(dev_t dev
, struct uio
*uio
, cred_t
*cred_p
)
10967 struct sd_lun
*un
= NULL
;
10972 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
10976 ASSERT(!mutex_owned(SD_MUTEX(un
)));
10978 if (!SD_IS_VALID_LABEL(un
) && !ISCD(un
)) {
10979 mutex_enter(SD_MUTEX(un
));
10981 * Because the call to sd_ready_and_valid will issue I/O we
10982 * must wait here if either the device is suspended or
10983 * if it's power level is changing.
10985 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
10986 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
10987 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
10989 un
->un_ncmds_in_driver
++;
10990 mutex_exit(SD_MUTEX(un
));
10992 /* Initialize sd_ssc_t for internal uscsi commands */
10993 ssc
= sd_ssc_init(un
);
10994 if ((sd_ready_and_valid(ssc
, SDPART(dev
))) != SD_READY_VALID
) {
11001 mutex_enter(SD_MUTEX(un
));
11002 un
->un_ncmds_in_driver
--;
11003 ASSERT(un
->un_ncmds_in_driver
>= 0);
11004 mutex_exit(SD_MUTEX(un
));
11010 * Write requests are restricted to multiples of the system block size.
11012 if (un
->un_f_rmw_type
== SD_RMW_TYPE_RETURN_ERROR
&&
11013 !un
->un_f_enable_rmw
)
11014 secmask
= un
->un_tgt_blocksize
- 1;
11016 secmask
= DEV_BSIZE
- 1;
11018 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
11019 SD_ERROR(SD_LOG_READ_WRITE
, un
,
11020 "sdwrite: file offset not modulo %d\n",
11023 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
11024 SD_ERROR(SD_LOG_READ_WRITE
, un
,
11025 "sdwrite: transfer length not modulo %d\n",
11029 err
= physio(sdstrategy
, NULL
, dev
, B_WRITE
, sdmin
, uio
);
11037 * Function: sdaread
11039 * Description: Driver's aread(9e) entry point function.
11041 * Arguments: dev - device number
11042 * aio - structure pointer describing where data is to be stored
11043 * cred_p - user credential pointer
11045 * Return Code: ENXIO
11048 * value returned by aphysio
11050 * Context: Kernel thread context.
11054 sdaread(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
)
11056 struct sd_lun
*un
= NULL
;
11057 struct uio
*uio
= aio
->aio_uio
;
11062 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
11066 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11068 if (!SD_IS_VALID_LABEL(un
) && !ISCD(un
)) {
11069 mutex_enter(SD_MUTEX(un
));
11071 * Because the call to sd_ready_and_valid will issue I/O we
11072 * must wait here if either the device is suspended or
11073 * if it's power level is changing.
11075 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
11076 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
11077 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
11079 un
->un_ncmds_in_driver
++;
11080 mutex_exit(SD_MUTEX(un
));
11082 /* Initialize sd_ssc_t for internal uscsi commands */
11083 ssc
= sd_ssc_init(un
);
11084 if ((sd_ready_and_valid(ssc
, SDPART(dev
))) != SD_READY_VALID
) {
11091 mutex_enter(SD_MUTEX(un
));
11092 un
->un_ncmds_in_driver
--;
11093 ASSERT(un
->un_ncmds_in_driver
>= 0);
11094 mutex_exit(SD_MUTEX(un
));
11100 * Read requests are restricted to multiples of the system block size.
11102 if (un
->un_f_rmw_type
== SD_RMW_TYPE_RETURN_ERROR
&&
11103 !un
->un_f_enable_rmw
)
11104 secmask
= un
->un_tgt_blocksize
- 1;
11106 secmask
= DEV_BSIZE
- 1;
11108 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
11109 SD_ERROR(SD_LOG_READ_WRITE
, un
,
11110 "sdaread: file offset not modulo %d\n",
11113 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
11114 SD_ERROR(SD_LOG_READ_WRITE
, un
,
11115 "sdaread: transfer length not modulo %d\n",
11119 err
= aphysio(sdstrategy
, anocancel
, dev
, B_READ
, sdmin
, aio
);
11127 * Function: sdawrite
11129 * Description: Driver's awrite(9e) entry point function.
11131 * Arguments: dev - device number
11132 * aio - structure pointer describing where data is stored
11133 * cred_p - user credential pointer
11135 * Return Code: ENXIO
11138 * value returned by aphysio
11140 * Context: Kernel thread context.
11144 sdawrite(dev_t dev
, struct aio_req
*aio
, cred_t
*cred_p
)
11146 struct sd_lun
*un
= NULL
;
11147 struct uio
*uio
= aio
->aio_uio
;
11152 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
11156 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11158 if (!SD_IS_VALID_LABEL(un
) && !ISCD(un
)) {
11159 mutex_enter(SD_MUTEX(un
));
11161 * Because the call to sd_ready_and_valid will issue I/O we
11162 * must wait here if either the device is suspended or
11163 * if it's power level is changing.
11165 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
11166 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
11167 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
11169 un
->un_ncmds_in_driver
++;
11170 mutex_exit(SD_MUTEX(un
));
11172 /* Initialize sd_ssc_t for internal uscsi commands */
11173 ssc
= sd_ssc_init(un
);
11174 if ((sd_ready_and_valid(ssc
, SDPART(dev
))) != SD_READY_VALID
) {
11181 mutex_enter(SD_MUTEX(un
));
11182 un
->un_ncmds_in_driver
--;
11183 ASSERT(un
->un_ncmds_in_driver
>= 0);
11184 mutex_exit(SD_MUTEX(un
));
11190 * Write requests are restricted to multiples of the system block size.
11192 if (un
->un_f_rmw_type
== SD_RMW_TYPE_RETURN_ERROR
&&
11193 !un
->un_f_enable_rmw
)
11194 secmask
= un
->un_tgt_blocksize
- 1;
11196 secmask
= DEV_BSIZE
- 1;
11198 if (uio
->uio_loffset
& ((offset_t
)(secmask
))) {
11199 SD_ERROR(SD_LOG_READ_WRITE
, un
,
11200 "sdawrite: file offset not modulo %d\n",
11203 } else if (uio
->uio_iov
->iov_len
& (secmask
)) {
11204 SD_ERROR(SD_LOG_READ_WRITE
, un
,
11205 "sdawrite: transfer length not modulo %d\n",
11209 err
= aphysio(sdstrategy
, anocancel
, dev
, B_WRITE
, sdmin
, aio
);
11220 * Driver IO processing follows the following sequence:
11222 * sdioctl(9E) sdstrategy(9E) biodone(9F)
11225 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+
11228 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone()
11231 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | |
11233 * +---+ | +------------+ +-------+
11235 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11237 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() |
11239 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11241 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() |
11243 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11245 * | sd_checksum_iostart() sd_checksum_iodone() |
11247 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+
11249 * | sd_pm_iostart() sd_pm_iodone() |
11252 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+
11255 * sd_core_iostart() |
11257 * | +------>(*destroypkt)()
11258 * +-> sd_start_cmds() <-+ | |
11260 * | | | scsi_destroy_pkt(9F)
11262 * +->(*initpkt)() +- sdintr()
11264 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx()
11265 * | +-> scsi_setup_cdb(9F) |
11267 * +--> scsi_transport(9F) |
11269 * +----> SCSA ---->+
11272 * This code is based upon the following presumptions:
11274 * - iostart and iodone functions operate on buf(9S) structures. These
11275 * functions perform the necessary operations on the buf(9S) and pass
11276 * them along to the next function in the chain by using the macros
11277 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE()
11278 * (for iodone side functions).
11280 * - The iostart side functions may sleep. The iodone side functions
11281 * are called under interrupt context and may NOT sleep. Therefore
11282 * iodone side functions also may not call iostart side functions.
11283 * (NOTE: iostart side functions should NOT sleep for memory, as
11284 * this could result in deadlock.)
11286 * - An iostart side function may call its corresponding iodone side
11287 * function directly (if necessary).
11289 * - In the event of an error, an iostart side function can return a buf(9S)
11290 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and
11291 * b_error in the usual way of course).
11293 * - The taskq mechanism may be used by the iodone side functions to dispatch
11294 * requests to the iostart side functions. The iostart side functions in
11295 * this case would be called under the context of a taskq thread, so it's
11296 * OK for them to block/sleep/spin in this case.
11298 * - iostart side functions may allocate "shadow" buf(9S) structs and
11299 * pass them along to the next function in the chain. The corresponding
11300 * iodone side functions must coalesce the "shadow" bufs and return
11301 * the "original" buf to the next higher layer.
11303 * - The b_private field of the buf(9S) struct holds a pointer to
11304 * an sd_xbuf struct, which contains information needed to
11305 * construct the scsi_pkt for the command.
11307 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11308 * layer must acquire & release the SD_MUTEX(un) as needed.
11313 * Create taskq for all targets in the system. This is created at
11314 * _init(9E) and destroyed at _fini(9E).
11316 * Note: here we set the minalloc to a reasonably high number to ensure that
11317 * we will have an adequate supply of task entries available at interrupt time.
11318 * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11319 * sd_create_taskq(). Since we do not want to sleep for allocations at
11320 * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11321 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11322 * requests any one instant in time.
11324 #define SD_TASKQ_NUMTHREADS 8
11325 #define SD_TASKQ_MINALLOC 256
11326 #define SD_TASKQ_MAXALLOC 256
11328 static taskq_t
*sd_tq
= NULL
;
11329 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq
))
11331 static int sd_taskq_minalloc
= SD_TASKQ_MINALLOC
;
11332 static int sd_taskq_maxalloc
= SD_TASKQ_MAXALLOC
;
11335 * The following task queue is being created for the write part of
11336 * read-modify-write of non-512 block size devices.
11337 * Limit the number of threads to 1 for now. This number has been chosen
11338 * considering the fact that it applies only to dvd ram drives/MO drives
11339 * currently. Performance for which is not main criteria at this stage.
11340 * Note: It needs to be explored if we can use a single taskq in future
11342 #define SD_WMR_TASKQ_NUMTHREADS 1
11343 static taskq_t
*sd_wmr_tq
= NULL
;
11344 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq
))
11347 * Function: sd_taskq_create
11349 * Description: Create taskq thread(s) and preallocate task entries
11351 * Return Code: Returns a pointer to the allocated taskq_t.
11353 * Context: Can sleep. Requires blockable context.
11355 * Notes: - The taskq() facility currently is NOT part of the DDI.
11356 * (definitely NOT recommeded for 3rd-party drivers!) :-)
11357 * - taskq_create() will block for memory, also it will panic
11358 * if it cannot create the requested number of threads.
11359 * - Currently taskq_create() creates threads that cannot be
11361 * - We use TASKQ_PREPOPULATE to ensure we have an adequate
11362 * supply of taskq entries at interrupt time (ie, so that we
11363 * do not have to sleep for memory)
11367 sd_taskq_create(void)
11369 char taskq_name
[TASKQ_NAMELEN
];
11371 ASSERT(sd_tq
== NULL
);
11372 ASSERT(sd_wmr_tq
== NULL
);
11374 (void) snprintf(taskq_name
, sizeof (taskq_name
),
11375 "%s_drv_taskq", sd_label
);
11376 sd_tq
= (taskq_create(taskq_name
, SD_TASKQ_NUMTHREADS
,
11377 (v
.v_maxsyspri
- 2), sd_taskq_minalloc
, sd_taskq_maxalloc
,
11378 TASKQ_PREPOPULATE
));
11380 (void) snprintf(taskq_name
, sizeof (taskq_name
),
11381 "%s_rmw_taskq", sd_label
);
11382 sd_wmr_tq
= (taskq_create(taskq_name
, SD_WMR_TASKQ_NUMTHREADS
,
11383 (v
.v_maxsyspri
- 2), sd_taskq_minalloc
, sd_taskq_maxalloc
,
11384 TASKQ_PREPOPULATE
));
11389 * Function: sd_taskq_delete
11391 * Description: Complementary cleanup routine for sd_taskq_create().
11393 * Context: Kernel thread context.
11397 sd_taskq_delete(void)
11399 ASSERT(sd_tq
!= NULL
);
11400 ASSERT(sd_wmr_tq
!= NULL
);
11401 taskq_destroy(sd_tq
);
11402 taskq_destroy(sd_wmr_tq
);
11409 * Function: sdstrategy
11411 * Description: Driver's strategy (9E) entry point function.
11413 * Arguments: bp - pointer to buf(9S)
11415 * Return Code: Always returns zero
11417 * Context: Kernel thread context.
11421 sdstrategy(struct buf
*bp
)
11425 un
= ddi_get_soft_state(sd_state
, SD_GET_INSTANCE_FROM_BUF(bp
));
11428 bp
->b_resid
= bp
->b_bcount
;
11433 /* As was done in the past, fail new cmds. if state is dumping. */
11434 if (un
->un_state
== SD_STATE_DUMPING
) {
11435 bioerror(bp
, ENXIO
);
11436 bp
->b_resid
= bp
->b_bcount
;
11441 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11444 * Commands may sneak in while we released the mutex in
11445 * DDI_SUSPEND, we should block new commands. However, old
11446 * commands that are still in the driver at this point should
11447 * still be allowed to drain.
11449 mutex_enter(SD_MUTEX(un
));
11451 * Must wait here if either the device is suspended or
11452 * if it's power level is changing.
11454 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
11455 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
11456 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
11459 un
->un_ncmds_in_driver
++;
11462 * atapi: Since we are running the CD for now in PIO mode we need to
11463 * call bp_mapin here to avoid bp_mapin called interrupt context under
11464 * the HBA's init_pkt routine.
11466 if (un
->un_f_cfg_is_atapi
== TRUE
) {
11467 mutex_exit(SD_MUTEX(un
));
11469 mutex_enter(SD_MUTEX(un
));
11471 SD_INFO(SD_LOG_IO
, un
, "sdstrategy: un_ncmds_in_driver = %ld\n",
11472 un
->un_ncmds_in_driver
);
11474 if (bp
->b_flags
& B_WRITE
)
11475 un
->un_f_sync_cache_required
= TRUE
;
11477 mutex_exit(SD_MUTEX(un
));
11480 * This will (eventually) allocate the sd_xbuf area and
11481 * call sd_xbuf_strategy(). We just want to return the
11482 * result of ddi_xbuf_qstrategy so that we have an opt-
11483 * imized tail call which saves us a stack frame.
11485 return (ddi_xbuf_qstrategy(bp
, un
->un_xbuf_attr
));
11490 * Function: sd_xbuf_strategy
11492 * Description: Function for initiating IO operations via the
11493 * ddi_xbuf_qstrategy() mechanism.
11495 * Context: Kernel thread context.
11499 sd_xbuf_strategy(struct buf
*bp
, ddi_xbuf_t xp
, void *arg
)
11501 struct sd_lun
*un
= arg
;
11503 ASSERT(bp
!= NULL
);
11504 ASSERT(xp
!= NULL
);
11505 ASSERT(un
!= NULL
);
11506 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11509 * Initialize the fields in the xbuf and save a pointer to the
11510 * xbuf in bp->b_private.
11512 sd_xbuf_init(un
, bp
, xp
, SD_CHAIN_BUFIO
, NULL
);
11514 /* Send the buf down the iostart chain */
11515 SD_BEGIN_IOSTART(((struct sd_xbuf
*)xp
)->xb_chain_iostart
, un
, bp
);
11520 * Function: sd_xbuf_init
11522 * Description: Prepare the given sd_xbuf struct for use.
11524 * Arguments: un - ptr to softstate
11525 * bp - ptr to associated buf(9S)
11526 * xp - ptr to associated sd_xbuf
11527 * chain_type - IO chain type to use:
11532 * SD_CHAIN_DIRECT_PRIORITY
11533 * pktinfop - ptr to private data struct for scsi_pkt(9S)
11534 * initialization; may be NULL if none.
11536 * Context: Kernel thread context
11540 sd_xbuf_init(struct sd_lun
*un
, struct buf
*bp
, struct sd_xbuf
*xp
,
11541 uchar_t chain_type
, void *pktinfop
)
11545 ASSERT(un
!= NULL
);
11546 ASSERT(bp
!= NULL
);
11547 ASSERT(xp
!= NULL
);
11549 SD_INFO(SD_LOG_IO
, un
, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
11553 xp
->xb_pktp
= NULL
;
11554 xp
->xb_pktinfo
= pktinfop
;
11555 xp
->xb_private
= bp
->b_private
;
11556 xp
->xb_blkno
= (daddr_t
)bp
->b_blkno
;
11559 * Set up the iostart and iodone chain indexes in the xbuf, based
11560 * upon the specified chain type to use.
11562 switch (chain_type
) {
11563 case SD_CHAIN_NULL
:
11565 * Fall thru to just use the values for the buf type, even
11566 * tho for the NULL chain these values will never be used.
11569 case SD_CHAIN_BUFIO
:
11570 index
= un
->un_buf_chain_type
;
11571 if ((!un
->un_f_has_removable_media
) &&
11572 (un
->un_tgt_blocksize
!= 0) &&
11573 (un
->un_tgt_blocksize
!= DEV_BSIZE
||
11574 un
->un_f_enable_rmw
)) {
11575 int secmask
= 0, blknomask
= 0;
11576 if (un
->un_f_enable_rmw
) {
11578 (un
->un_phy_blocksize
/ DEV_BSIZE
) - 1;
11579 secmask
= un
->un_phy_blocksize
- 1;
11582 (un
->un_tgt_blocksize
/ DEV_BSIZE
) - 1;
11583 secmask
= un
->un_tgt_blocksize
- 1;
11586 if ((bp
->b_lblkno
& (blknomask
)) ||
11587 (bp
->b_bcount
& (secmask
))) {
11588 if ((un
->un_f_rmw_type
!=
11589 SD_RMW_TYPE_RETURN_ERROR
) ||
11590 un
->un_f_enable_rmw
) {
11591 if (un
->un_f_pm_is_enabled
== FALSE
)
11593 SD_CHAIN_INFO_MSS_DSK_NO_PM
;
11596 SD_CHAIN_INFO_MSS_DISK
;
11601 case SD_CHAIN_USCSI
:
11602 index
= un
->un_uscsi_chain_type
;
11604 case SD_CHAIN_DIRECT
:
11605 index
= un
->un_direct_chain_type
;
11607 case SD_CHAIN_DIRECT_PRIORITY
:
11608 index
= un
->un_priority_chain_type
;
11611 /* We're really broken if we ever get here... */
11612 panic("sd_xbuf_init: illegal chain type!");
11616 xp
->xb_chain_iostart
= sd_chain_index_map
[index
].sci_iostart_index
;
11617 xp
->xb_chain_iodone
= sd_chain_index_map
[index
].sci_iodone_index
;
11620 * It might be a bit easier to simply bzero the entire xbuf above,
11621 * but it turns out that since we init a fair number of members anyway,
11622 * we save a fair number cycles by doing explicit assignment of zero.
11624 xp
->xb_pkt_flags
= 0;
11625 xp
->xb_dma_resid
= 0;
11626 xp
->xb_retry_count
= 0;
11627 xp
->xb_victim_retry_count
= 0;
11628 xp
->xb_ua_retry_count
= 0;
11629 xp
->xb_nr_retry_count
= 0;
11630 xp
->xb_sense_bp
= NULL
;
11631 xp
->xb_sense_status
= 0;
11632 xp
->xb_sense_state
= 0;
11633 xp
->xb_sense_resid
= 0;
11636 bp
->b_private
= xp
;
11637 bp
->b_flags
&= ~(B_DONE
| B_ERROR
);
11639 bp
->av_forw
= NULL
;
11640 bp
->av_back
= NULL
;
11643 SD_INFO(SD_LOG_IO
, un
, "sd_xbuf_init: done.\n");
11648 * Function: sd_uscsi_strategy
11650 * Description: Wrapper for calling into the USCSI chain via physio(9F)
11652 * Arguments: bp - buf struct ptr
11654 * Return Code: Always returns 0
11656 * Context: Kernel thread context
11660 sd_uscsi_strategy(struct buf
*bp
)
11663 struct sd_uscsi_info
*uip
;
11664 struct sd_xbuf
*xp
;
11665 uchar_t chain_type
;
11668 ASSERT(bp
!= NULL
);
11670 un
= ddi_get_soft_state(sd_state
, SD_GET_INSTANCE_FROM_BUF(bp
));
11673 bp
->b_resid
= bp
->b_bcount
;
11678 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11680 SD_TRACE(SD_LOG_IO
, un
, "sd_uscsi_strategy: entry: buf:0x%p\n", bp
);
11683 * A pointer to a struct sd_uscsi_info is expected in bp->b_private
11685 ASSERT(bp
->b_private
!= NULL
);
11686 uip
= (struct sd_uscsi_info
*)bp
->b_private
;
11687 cmd
= ((struct uscsi_cmd
*)(uip
->ui_cmdp
))->uscsi_cdb
[0];
11689 mutex_enter(SD_MUTEX(un
));
11691 * atapi: Since we are running the CD for now in PIO mode we need to
11692 * call bp_mapin here to avoid bp_mapin called interrupt context under
11693 * the HBA's init_pkt routine.
11695 if (un
->un_f_cfg_is_atapi
== TRUE
) {
11696 mutex_exit(SD_MUTEX(un
));
11698 mutex_enter(SD_MUTEX(un
));
11700 un
->un_ncmds_in_driver
++;
11701 SD_INFO(SD_LOG_IO
, un
, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11702 un
->un_ncmds_in_driver
);
11704 if ((bp
->b_flags
& B_WRITE
) && (bp
->b_bcount
!= 0) &&
11705 (cmd
!= SCMD_MODE_SELECT
) && (cmd
!= SCMD_MODE_SELECT_G1
))
11706 un
->un_f_sync_cache_required
= TRUE
;
11708 mutex_exit(SD_MUTEX(un
));
11710 switch (uip
->ui_flags
) {
11711 case SD_PATH_DIRECT
:
11712 chain_type
= SD_CHAIN_DIRECT
;
11714 case SD_PATH_DIRECT_PRIORITY
:
11715 chain_type
= SD_CHAIN_DIRECT_PRIORITY
;
11718 chain_type
= SD_CHAIN_USCSI
;
11723 * We may allocate extra buf for external USCSI commands. If the
11724 * application asks for bigger than 20-byte sense data via USCSI,
11725 * SCSA layer will allocate 252 bytes sense buf for that command.
11727 if (((struct uscsi_cmd
*)(uip
->ui_cmdp
))->uscsi_rqlen
>
11729 xp
= kmem_zalloc(sizeof (struct sd_xbuf
) - SENSE_LENGTH
+
11730 MAX_SENSE_LENGTH
, KM_SLEEP
);
11732 xp
= kmem_zalloc(sizeof (struct sd_xbuf
), KM_SLEEP
);
11735 sd_xbuf_init(un
, bp
, xp
, chain_type
, uip
->ui_cmdp
);
11737 /* Use the index obtained within xbuf_init */
11738 SD_BEGIN_IOSTART(xp
->xb_chain_iostart
, un
, bp
);
11740 SD_TRACE(SD_LOG_IO
, un
, "sd_uscsi_strategy: exit: buf:0x%p\n", bp
);
11746 * Function: sd_send_scsi_cmd
11748 * Description: Runs a USCSI command for user (when called thru sdioctl),
11749 * or for the driver
11751 * Arguments: dev - the dev_t for the device
11752 * incmd - ptr to a valid uscsi_cmd struct
11753 * flag - bit flag, indicating open settings, 32/64 bit type
11754 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11755 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11756 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11757 * to use the USCSI "direct" chain and bypass the normal
11760 * Return Code: 0 - successful completion of the given command
11761 * EIO - scsi_uscsi_handle_command() failed
11762 * ENXIO - soft state not found for specified dev
11764 * EFAULT - copyin/copyout error
11765 * return code of scsi_uscsi_handle_command():
11770 * Context: Waits for command to complete. Can sleep.
11774 sd_send_scsi_cmd(dev_t dev
, struct uscsi_cmd
*incmd
, int flag
,
11775 enum uio_seg dataspace
, int path_flag
)
11781 un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
));
11787 * Using sd_ssc_send to handle uscsi cmd
11789 ssc
= sd_ssc_init(un
);
11790 rval
= sd_ssc_send(ssc
, incmd
, flag
, dataspace
, path_flag
);
11797 * Function: sd_ssc_init
11799 * Description: Uscsi end-user call this function to initialize necessary
11800 * fields, such as uscsi_cmd and sd_uscsi_info struct.
11802 * The return value of sd_send_scsi_cmd will be treated as a
11803 * fault in various conditions. Even it is not Zero, some
11804 * callers may ignore the return value. That is to say, we can
11805 * not make an accurate assessment in sdintr, since if a
11806 * command is failed in sdintr it does not mean the caller of
11807 * sd_send_scsi_cmd will treat it as a real failure.
11809 * To avoid printing too many error logs for a failed uscsi
11810 * packet that the caller may not treat it as a failure, the
11811 * sd will keep silent for handling all uscsi commands.
11813 * During detach->attach and attach-open, for some types of
11814 * problems, the driver should be providing information about
11815 * the problem encountered. Device use USCSI_SILENT, which
11816 * suppresses all driver information. The result is that no
11817 * information about the problem is available. Being
11818 * completely silent during this time is inappropriate. The
11819 * driver needs a more selective filter than USCSI_SILENT, so
11820 * that information related to faults is provided.
11822 * To make the accurate accessment, the caller of
11823 * sd_send_scsi_USCSI_CMD should take the ownership and
11824 * get necessary information to print error messages.
11826 * If we want to print necessary info of uscsi command, we need to
11827 * keep the uscsi_cmd and sd_uscsi_info till we can make the
11828 * assessment. We use sd_ssc_init to alloc necessary
11829 * structs for sending an uscsi command and we are also
11830 * responsible for free the memory by calling
11833 * The calling secquences will look like:
11838 * sd_send_scsi_USCSI_CMD->
11839 * sd_ssc_send-> - - - sdintr
11842 * if we think the return value should be treated as a
11843 * failure, we make the accessment here and print out
11844 * necessary by retrieving uscsi_cmd and sd_uscsi_info'
11851 * Arguments: un - pointer to driver soft state (unit) structure for this
11854 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains
11855 * uscsi_cmd and sd_uscsi_info.
11856 * NULL - if can not alloc memory for sd_ssc_t struct
11858 * Context: Kernel Thread.
11861 sd_ssc_init(struct sd_lun
*un
)
11864 struct uscsi_cmd
*ucmdp
;
11865 struct sd_uscsi_info
*uip
;
11867 ASSERT(un
!= NULL
);
11868 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11871 * Allocate sd_ssc_t structure
11873 ssc
= kmem_zalloc(sizeof (sd_ssc_t
), KM_SLEEP
);
11876 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine
11878 ucmdp
= scsi_uscsi_alloc();
11881 * Allocate sd_uscsi_info structure
11883 uip
= kmem_zalloc(sizeof (struct sd_uscsi_info
), KM_SLEEP
);
11885 ssc
->ssc_uscsi_cmd
= ucmdp
;
11886 ssc
->ssc_uscsi_info
= uip
;
11893 * Function: sd_ssc_fini
11895 * Description: To free sd_ssc_t and it's hanging off
11897 * Arguments: ssc - struct pointer of sd_ssc_t.
11900 sd_ssc_fini(sd_ssc_t
*ssc
)
11902 scsi_uscsi_free(ssc
->ssc_uscsi_cmd
);
11904 if (ssc
->ssc_uscsi_info
!= NULL
) {
11905 kmem_free(ssc
->ssc_uscsi_info
, sizeof (struct sd_uscsi_info
));
11906 ssc
->ssc_uscsi_info
= NULL
;
11909 kmem_free(ssc
, sizeof (sd_ssc_t
));
11914 * Function: sd_ssc_send
11916 * Description: Runs a USCSI command for user when called through sdioctl,
11917 * or for the driver.
11919 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
11920 * sd_uscsi_info in.
11921 * incmd - ptr to a valid uscsi_cmd struct
11922 * flag - bit flag, indicating open settings, 32/64 bit type
11923 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11924 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11925 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11926 * to use the USCSI "direct" chain and bypass the normal
11929 * Return Code: 0 - successful completion of the given command
11930 * EIO - scsi_uscsi_handle_command() failed
11931 * ENXIO - soft state not found for specified dev
11932 * ECANCELED - command cancelled due to low power
11934 * EFAULT - copyin/copyout error
11935 * return code of scsi_uscsi_handle_command():
11940 * Context: Kernel Thread;
11941 * Waits for command to complete. Can sleep.
11944 sd_ssc_send(sd_ssc_t
*ssc
, struct uscsi_cmd
*incmd
, int flag
,
11945 enum uio_seg dataspace
, int path_flag
)
11947 struct sd_uscsi_info
*uip
;
11948 struct uscsi_cmd
*uscmd
;
11955 ASSERT(ssc
!= NULL
);
11957 ASSERT(un
!= NULL
);
11958 uscmd
= ssc
->ssc_uscsi_cmd
;
11959 ASSERT(uscmd
!= NULL
);
11960 ASSERT(!mutex_owned(SD_MUTEX(un
)));
11961 if (ssc
->ssc_flags
& SSC_FLAGS_NEED_ASSESSMENT
) {
11963 * If enter here, it indicates that the previous uscsi
11964 * command has not been processed by sd_ssc_assessment.
11965 * This is violating our rules of FMA telemetry processing.
11966 * We should print out this message and the last undisposed
11969 if (uscmd
->uscsi_cdb
!= NULL
) {
11970 SD_INFO(SD_LOG_SDTEST
, un
,
11971 "sd_ssc_send is missing the alternative "
11972 "sd_ssc_assessment when running command 0x%x.\n",
11973 uscmd
->uscsi_cdb
[0]);
11976 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be
11977 * the initial status.
11979 ssc
->ssc_flags
= SSC_FLAGS_UNKNOWN
;
11983 * We need to make sure sd_ssc_send will have sd_ssc_assessment
11984 * followed to avoid missing FMA telemetries.
11986 ssc
->ssc_flags
|= SSC_FLAGS_NEED_ASSESSMENT
;
11989 * if USCSI_PMFAILFAST is set and un is in low power, fail the
11990 * command immediately.
11992 mutex_enter(SD_MUTEX(un
));
11993 mutex_enter(&un
->un_pm_mutex
);
11994 if ((uscmd
->uscsi_flags
& USCSI_PMFAILFAST
) &&
11995 SD_DEVICE_IS_IN_LOW_POWER(un
)) {
11996 SD_TRACE(SD_LOG_IO
, un
, "sd_ssc_send:"
11997 "un:0x%p is in low power\n", un
);
11998 mutex_exit(&un
->un_pm_mutex
);
11999 mutex_exit(SD_MUTEX(un
));
12000 return (ECANCELED
);
12002 mutex_exit(&un
->un_pm_mutex
);
12003 mutex_exit(SD_MUTEX(un
));
12006 switch (dataspace
) {
12007 case UIO_USERSPACE
:
12008 SD_TRACE(SD_LOG_IO
, un
,
12009 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un
);
12012 SD_TRACE(SD_LOG_IO
, un
,
12013 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un
);
12016 SD_TRACE(SD_LOG_IO
, un
,
12017 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un
);
12022 rval
= scsi_uscsi_copyin((intptr_t)incmd
, flag
,
12023 SD_ADDRESS(un
), &uscmd
);
12025 SD_TRACE(SD_LOG_IO
, un
, "sd_sense_scsi_cmd: "
12026 "scsi_uscsi_alloc_and_copyin failed\n", un
);
12030 if ((uscmd
->uscsi_cdb
!= NULL
) &&
12031 (uscmd
->uscsi_cdb
[0] == SCMD_FORMAT
)) {
12032 mutex_enter(SD_MUTEX(un
));
12033 un
->un_f_format_in_progress
= TRUE
;
12034 mutex_exit(SD_MUTEX(un
));
12039 * Allocate an sd_uscsi_info struct and fill it with the info
12040 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
12041 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
12042 * since we allocate the buf here in this function, we do not
12043 * need to preserve the prior contents of b_private.
12044 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
12046 uip
= ssc
->ssc_uscsi_info
;
12047 uip
->ui_flags
= path_flag
;
12048 uip
->ui_cmdp
= uscmd
;
12051 * Commands sent with priority are intended for error recovery
12052 * situations, and do not have retries performed.
12054 if (path_flag
== SD_PATH_DIRECT_PRIORITY
) {
12055 uscmd
->uscsi_flags
|= USCSI_DIAGNOSE
;
12057 uscmd
->uscsi_flags
&= ~USCSI_NOINTR
;
12059 dev
= SD_GET_DEV(un
);
12060 rval
= scsi_uscsi_handle_cmd(dev
, dataspace
, uscmd
,
12061 sd_uscsi_strategy
, NULL
, uip
);
12064 * mark ssc_flags right after handle_cmd to make sure
12065 * the uscsi has been sent
12067 ssc
->ssc_flags
|= SSC_FLAGS_CMD_ISSUED
;
12070 SD_INFO(SD_LOG_IO
, un
, "sd_ssc_send: "
12071 "uscsi_status: 0x%02x uscsi_resid:0x%x\n",
12072 uscmd
->uscsi_status
, uscmd
->uscsi_resid
);
12073 if (uscmd
->uscsi_bufaddr
!= NULL
) {
12074 SD_INFO(SD_LOG_IO
, un
, "sd_ssc_send: "
12075 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
12076 uscmd
->uscsi_bufaddr
, uscmd
->uscsi_buflen
);
12077 if (dataspace
== UIO_SYSSPACE
) {
12078 SD_DUMP_MEMORY(un
, SD_LOG_IO
,
12079 "data", (uchar_t
*)uscmd
->uscsi_bufaddr
,
12080 uscmd
->uscsi_buflen
, SD_LOG_HEX
);
12086 mutex_enter(SD_MUTEX(un
));
12087 un
->un_f_format_in_progress
= FALSE
;
12088 mutex_exit(SD_MUTEX(un
));
12091 (void) scsi_uscsi_copyout((intptr_t)incmd
, uscmd
);
12097 * Function: sd_ssc_print
12099 * Description: Print information available to the console.
12101 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12102 * sd_uscsi_info in.
12103 * sd_severity - log level.
12104 * Context: Kernel thread or interrupt context.
12107 sd_ssc_print(sd_ssc_t
*ssc
, int sd_severity
)
12109 struct uscsi_cmd
*ucmdp
;
12110 struct scsi_device
*devp
;
12111 dev_info_t
*devinfo
;
12114 union scsi_cdb
*cdbp
;
12116 extern struct scsi_key_strings scsi_cmds
[];
12118 ASSERT(ssc
!= NULL
);
12119 ASSERT(ssc
->ssc_un
!= NULL
);
12121 if (SD_FM_LOG(ssc
->ssc_un
) != SD_FM_LOG_EREPORT
)
12123 ucmdp
= ssc
->ssc_uscsi_cmd
;
12124 devp
= SD_SCSI_DEVP(ssc
->ssc_un
);
12125 devinfo
= SD_DEVINFO(ssc
->ssc_un
);
12126 ASSERT(ucmdp
!= NULL
);
12127 ASSERT(devp
!= NULL
);
12128 ASSERT(devinfo
!= NULL
);
12129 sensep
= (uint8_t *)ucmdp
->uscsi_rqbuf
;
12130 senlen
= ucmdp
->uscsi_rqlen
- ucmdp
->uscsi_rqresid
;
12131 cdbp
= (union scsi_cdb
*)ucmdp
->uscsi_cdb
;
12133 /* In certain case (like DOORLOCK), the cdb could be NULL. */
12136 /* We don't print log if no sense data available. */
12139 com
= cdbp
->scc_cmd
;
12140 scsi_generic_errmsg(devp
, sd_label
, sd_severity
, 0, 0, com
,
12141 scsi_cmds
, sensep
, ssc
->ssc_un
->un_additional_codes
, NULL
);
12145 * Function: sd_ssc_assessment
12147 * Description: We use this function to make an assessment at the point
12148 * where SD driver may encounter a potential error.
12150 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12151 * sd_uscsi_info in.
12152 * tp_assess - a hint of strategy for ereport posting.
12153 * Possible values of tp_assess include:
12154 * SD_FMT_IGNORE - we don't post any ereport because we're
12155 * sure that it is ok to ignore the underlying problems.
12156 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now
12157 * but it might be not correct to ignore the underlying hardware
12159 * SD_FMT_STATUS_CHECK - we will post an ereport with the
12160 * payload driver-assessment of value "fail" or
12161 * "fatal"(depending on what information we have here). This
12162 * assessment value is usually set when SD driver think there
12163 * is a potential error occurred(Typically, when return value
12164 * of the SCSI command is EIO).
12165 * SD_FMT_STANDARD - we will post an ereport with the payload
12166 * driver-assessment of value "info". This assessment value is
12167 * set when the SCSI command returned successfully and with
12168 * sense data sent back.
12170 * Context: Kernel thread.
12173 sd_ssc_assessment(sd_ssc_t
*ssc
, enum sd_type_assessment tp_assess
)
12176 struct uscsi_cmd
*ucmdp
= NULL
;
12179 ASSERT(ssc
!= NULL
);
12181 ASSERT(un
!= NULL
);
12182 ucmdp
= ssc
->ssc_uscsi_cmd
;
12183 ASSERT(ucmdp
!= NULL
);
12185 if (ssc
->ssc_flags
& SSC_FLAGS_NEED_ASSESSMENT
) {
12186 ssc
->ssc_flags
&= ~SSC_FLAGS_NEED_ASSESSMENT
;
12189 * If enter here, it indicates that we have a wrong
12190 * calling sequence of sd_ssc_send and sd_ssc_assessment,
12191 * both of which should be called in a pair in case of
12192 * loss of FMA telemetries.
12194 if (ucmdp
->uscsi_cdb
!= NULL
) {
12195 SD_INFO(SD_LOG_SDTEST
, un
,
12196 "sd_ssc_assessment is missing the "
12197 "alternative sd_ssc_send when running 0x%x, "
12198 "or there are superfluous sd_ssc_assessment for "
12199 "the same sd_ssc_send.\n",
12200 ucmdp
->uscsi_cdb
[0]);
12203 * Set the ssc_flags to the initial value to avoid passing
12204 * down dirty flags to the following sd_ssc_send function.
12206 ssc
->ssc_flags
= SSC_FLAGS_UNKNOWN
;
12211 * Only handle an issued command which is waiting for assessment.
12212 * A command which is not issued will not have
12213 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here.
12215 if (!(ssc
->ssc_flags
& SSC_FLAGS_CMD_ISSUED
)) {
12216 sd_ssc_print(ssc
, SCSI_ERR_INFO
);
12220 * For an issued command, we should clear this flag in
12221 * order to make the sd_ssc_t structure be used off
12222 * multiple uscsi commands.
12224 ssc
->ssc_flags
&= ~SSC_FLAGS_CMD_ISSUED
;
12228 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set)
12229 * commands here. And we should clear the ssc_flags before return.
12231 if (ucmdp
->uscsi_flags
& USCSI_DIAGNOSE
) {
12232 ssc
->ssc_flags
= SSC_FLAGS_UNKNOWN
;
12236 switch (tp_assess
) {
12237 case SD_FMT_IGNORE
:
12238 case SD_FMT_IGNORE_COMPROMISE
:
12240 case SD_FMT_STATUS_CHECK
:
12242 * For a failed command(including the succeeded command
12243 * with invalid data sent back).
12245 sd_ssc_post(ssc
, SD_FM_DRV_FATAL
);
12247 case SD_FMT_STANDARD
:
12249 * Always for the succeeded commands probably with sense
12252 * We can only handle a succeeded command with sense
12253 * data sent back when auto-request-sense is enabled.
12255 senlen
= ssc
->ssc_uscsi_cmd
->uscsi_rqlen
-
12256 ssc
->ssc_uscsi_cmd
->uscsi_rqresid
;
12257 if ((ssc
->ssc_uscsi_info
->ui_pkt_state
& STATE_ARQ_DONE
) &&
12258 (un
->un_f_arq_enabled
== TRUE
) &&
12260 ssc
->ssc_uscsi_cmd
->uscsi_rqbuf
!= NULL
) {
12261 sd_ssc_post(ssc
, SD_FM_DRV_NOTICE
);
12266 * Should not have other type of assessment.
12268 scsi_log(SD_DEVINFO(un
), sd_label
, CE_CONT
,
12269 "sd_ssc_assessment got wrong "
12270 "sd_type_assessment %d.\n", tp_assess
);
12274 * Clear up the ssc_flags before return.
12276 ssc
->ssc_flags
= SSC_FLAGS_UNKNOWN
;
12280 * Function: sd_ssc_post
12282 * Description: 1. read the driver property to get fm-scsi-log flag.
12283 * 2. print log if fm_log_capable is non-zero.
12284 * 3. call sd_ssc_ereport_post to post ereport if possible.
12286 * Context: May be called from kernel thread or interrupt context.
12289 sd_ssc_post(sd_ssc_t
*ssc
, enum sd_driver_assessment sd_assess
)
12294 ASSERT(ssc
!= NULL
);
12296 ASSERT(un
!= NULL
);
12299 * We may enter here from sd_ssc_assessment(for USCSI command) or
12300 * by directly called from sdintr context.
12301 * We don't handle a non-disk drive(CD-ROM, removable media).
12302 * Clear the ssc_flags before return in case we've set
12303 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk
12306 if (ISCD(un
) || un
->un_f_has_removable_media
) {
12307 ssc
->ssc_flags
= SSC_FLAGS_UNKNOWN
;
12311 switch (sd_assess
) {
12312 case SD_FM_DRV_FATAL
:
12313 sd_severity
= SCSI_ERR_FATAL
;
12315 case SD_FM_DRV_RECOVERY
:
12316 sd_severity
= SCSI_ERR_RECOVERED
;
12318 case SD_FM_DRV_RETRY
:
12319 sd_severity
= SCSI_ERR_RETRYABLE
;
12321 case SD_FM_DRV_NOTICE
:
12322 sd_severity
= SCSI_ERR_INFO
;
12325 sd_severity
= SCSI_ERR_UNKNOWN
;
12328 sd_ssc_print(ssc
, sd_severity
);
12330 /* always post ereport */
12331 sd_ssc_ereport_post(ssc
, sd_assess
);
12335 * Function: sd_ssc_set_info
12337 * Description: Mark ssc_flags and set ssc_info which would be the
12338 * payload of uderr ereport. This function will cause
12339 * sd_ssc_ereport_post to post uderr ereport only.
12340 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI),
12341 * the function will also call SD_ERROR or scsi_log for a
12342 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device.
12344 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12345 * sd_uscsi_info in.
12346 * ssc_flags - indicate the sub-category of a uderr.
12347 * comp - this argument is meaningful only when
12348 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible
12350 * > 0, SD_ERROR is used with comp as the driver logging
12352 * = 0, scsi-log is used to log error telemetries;
12353 * < 0, no log available for this telemetry.
12355 * Context: Kernel thread or interrupt context
12358 sd_ssc_set_info(sd_ssc_t
*ssc
, int ssc_flags
, uint_t comp
, const char *fmt
, ...)
12362 ASSERT(ssc
!= NULL
);
12363 ASSERT(ssc
->ssc_un
!= NULL
);
12365 ssc
->ssc_flags
|= ssc_flags
;
12367 (void) vsnprintf(ssc
->ssc_info
, sizeof (ssc
->ssc_info
), fmt
, ap
);
12371 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command
12372 * with invalid data sent back. For non-uscsi command, the
12373 * following code will be bypassed.
12375 if (ssc_flags
& SSC_FLAGS_INVALID_DATA
) {
12376 if (SD_FM_LOG(ssc
->ssc_un
) == SD_FM_LOG_NSUP
) {
12378 * If the error belong to certain component and we
12379 * do not want it to show up on the console, we
12380 * will use SD_ERROR, otherwise scsi_log is
12384 SD_ERROR(comp
, ssc
->ssc_un
, ssc
->ssc_info
);
12385 } else if (comp
== 0) {
12386 scsi_log(SD_DEVINFO(ssc
->ssc_un
), sd_label
,
12387 CE_WARN
, ssc
->ssc_info
);
12394 * Function: sd_buf_iodone
12396 * Description: Frees the sd_xbuf & returns the buf to its originator.
12398 * Context: May be called from interrupt context.
12402 sd_buf_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
12404 struct sd_xbuf
*xp
;
12406 ASSERT(un
!= NULL
);
12407 ASSERT(bp
!= NULL
);
12408 ASSERT(!mutex_owned(SD_MUTEX(un
)));
12410 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_buf_iodone: entry.\n");
12412 xp
= SD_GET_XBUF(bp
);
12413 ASSERT(xp
!= NULL
);
12415 /* xbuf is gone after this */
12416 if (ddi_xbuf_done(bp
, un
->un_xbuf_attr
)) {
12417 mutex_enter(SD_MUTEX(un
));
12420 * Grab time when the cmd completed.
12421 * This is used for determining if the system has been
12422 * idle long enough to make it idle to the PM framework.
12423 * This is for lowering the overhead, and therefore improving
12424 * performance per I/O operation.
12426 un
->un_pm_idle_time
= ddi_get_time();
12428 un
->un_ncmds_in_driver
--;
12429 ASSERT(un
->un_ncmds_in_driver
>= 0);
12430 SD_INFO(SD_LOG_IO
, un
,
12431 "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12432 un
->un_ncmds_in_driver
);
12434 mutex_exit(SD_MUTEX(un
));
12437 biodone(bp
); /* bp is gone after this */
12439 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_buf_iodone: exit.\n");
12444 * Function: sd_uscsi_iodone
12446 * Description: Frees the sd_xbuf & returns the buf to its originator.
12448 * Context: May be called from interrupt context.
12452 sd_uscsi_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
12454 struct sd_xbuf
*xp
;
12456 ASSERT(un
!= NULL
);
12457 ASSERT(bp
!= NULL
);
12459 xp
= SD_GET_XBUF(bp
);
12460 ASSERT(xp
!= NULL
);
12461 ASSERT(!mutex_owned(SD_MUTEX(un
)));
12463 SD_INFO(SD_LOG_IO
, un
, "sd_uscsi_iodone: entry.\n");
12465 bp
->b_private
= xp
->xb_private
;
12467 mutex_enter(SD_MUTEX(un
));
12470 * Grab time when the cmd completed.
12471 * This is used for determining if the system has been
12472 * idle long enough to make it idle to the PM framework.
12473 * This is for lowering the overhead, and therefore improving
12474 * performance per I/O operation.
12476 un
->un_pm_idle_time
= ddi_get_time();
12478 un
->un_ncmds_in_driver
--;
12479 ASSERT(un
->un_ncmds_in_driver
>= 0);
12480 SD_INFO(SD_LOG_IO
, un
, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12481 un
->un_ncmds_in_driver
);
12483 mutex_exit(SD_MUTEX(un
));
12485 if (((struct uscsi_cmd
*)(xp
->xb_pktinfo
))->uscsi_rqlen
>
12487 kmem_free(xp
, sizeof (struct sd_xbuf
) - SENSE_LENGTH
+
12490 kmem_free(xp
, sizeof (struct sd_xbuf
));
12495 SD_INFO(SD_LOG_IO
, un
, "sd_uscsi_iodone: exit.\n");
12500 * Function: sd_mapblockaddr_iostart
12502 * Description: Verify request lies within the partition limits for
12503 * the indicated minor device. Issue "overrun" buf if
12504 * request would exceed partition range. Converts
12505 * partition-relative block address to absolute.
12507 * Upon exit of this function:
12509 * xp->xb_blkno represents the absolute sector address
12510 * 2.I/O is misaligned
12511 * xp->xb_blkno represents the absolute logical block address
12512 * based on DEV_BSIZE. The logical block address will be
12513 * converted to physical sector address in sd_mapblocksize_\
12515 * 3.I/O is misaligned but is aligned in "overrun" buf
12516 * xp->xb_blkno represents the absolute logical block address
12517 * based on DEV_BSIZE. The logical block address will be
12518 * converted to physical sector address in sd_mapblocksize_\
12519 * iostart. But no RMW will be issued in this case.
12521 * Context: Can sleep
12523 * Issues: This follows what the old code did, in terms of accessing
12524 * some of the partition info in the unit struct without holding
12525 * the mutext. This is a general issue, if the partition info
12526 * can be altered while IO is in progress... as soon as we send
12527 * a buf, its partitioning can be invalid before it gets to the
12528 * device. Probably the right fix is to move partitioning out
12529 * of the driver entirely.
12533 sd_mapblockaddr_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
)
12535 diskaddr_t nblocks
; /* #blocks in the given partition */
12536 daddr_t blocknum
; /* Block number specified by the buf */
12537 size_t requested_nblocks
;
12538 size_t available_nblocks
;
12540 diskaddr_t partition_offset
;
12541 struct sd_xbuf
*xp
;
12542 int secmask
= 0, blknomask
= 0;
12543 ushort_t is_aligned
= TRUE
;
12545 ASSERT(un
!= NULL
);
12546 ASSERT(bp
!= NULL
);
12547 ASSERT(!mutex_owned(SD_MUTEX(un
)));
12549 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
12550 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp
);
12552 xp
= SD_GET_XBUF(bp
);
12553 ASSERT(xp
!= NULL
);
12556 * If the geometry is not indicated as valid, attempt to access
12557 * the unit & verify the geometry/label. This can be the case for
12558 * removable-media devices, of if the device was opened in
12559 * NDELAY/NONBLOCK mode.
12561 partition
= SDPART(bp
->b_edev
);
12563 if (!SD_IS_VALID_LABEL(un
)) {
12566 * Initialize sd_ssc_t for internal uscsi commands
12567 * In case of potential porformance issue, we need
12568 * to alloc memory only if there is invalid label
12570 ssc
= sd_ssc_init(un
);
12572 if (sd_ready_and_valid(ssc
, partition
) != SD_READY_VALID
) {
12574 * For removable devices it is possible to start an
12575 * I/O without a media by opening the device in nodelay
12576 * mode. Also for writable CDs there can be many
12577 * scenarios where there is no geometry yet but volume
12578 * manager is trying to issue a read() just because
12579 * it can see TOC on the CD. So do not print a message
12582 if (!un
->un_f_has_removable_media
) {
12583 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
12584 "i/o to invalid geometry\n");
12587 bp
->b_resid
= bp
->b_bcount
;
12588 SD_BEGIN_IODONE(index
, un
, bp
);
12597 (void) cmlb_partinfo(un
->un_cmlbhandle
, partition
,
12598 &nblocks
, &partition_offset
, NULL
, NULL
, (void *)SD_PATH_DIRECT
);
12600 if (un
->un_f_enable_rmw
) {
12601 blknomask
= (un
->un_phy_blocksize
/ DEV_BSIZE
) - 1;
12602 secmask
= un
->un_phy_blocksize
- 1;
12604 blknomask
= (un
->un_tgt_blocksize
/ DEV_BSIZE
) - 1;
12605 secmask
= un
->un_tgt_blocksize
- 1;
12608 if ((bp
->b_lblkno
& (blknomask
)) || (bp
->b_bcount
& (secmask
))) {
12609 is_aligned
= FALSE
;
12612 if (!(NOT_DEVBSIZE(un
)) || un
->un_f_enable_rmw
) {
12614 * If I/O is aligned, no need to involve RMW(Read Modify Write)
12615 * Convert the logical block number to target's physical sector
12619 xp
->xb_blkno
= SD_SYS2TGTBLOCK(un
, xp
->xb_blkno
);
12621 switch (un
->un_f_rmw_type
) {
12622 case SD_RMW_TYPE_RETURN_ERROR
:
12623 if (un
->un_f_enable_rmw
)
12626 bp
->b_flags
|= B_ERROR
;
12630 case SD_RMW_TYPE_DEFAULT
:
12631 mutex_enter(SD_MUTEX(un
));
12632 if (!un
->un_f_enable_rmw
&&
12633 un
->un_rmw_msg_timeid
== NULL
) {
12634 scsi_log(SD_DEVINFO(un
), sd_label
,
12635 CE_WARN
, "I/O request is not "
12636 "aligned with %d disk sector size. "
12637 "It is handled through Read Modify "
12638 "Write but the performance is "
12640 un
->un_tgt_blocksize
);
12641 un
->un_rmw_msg_timeid
=
12642 timeout(sd_rmw_msg_print_handler
,
12643 un
, SD_RMW_MSG_PRINT_TIMEOUT
);
12645 un
->un_rmw_incre_count
++;
12647 mutex_exit(SD_MUTEX(un
));
12650 case SD_RMW_TYPE_NO_WARNING
:
12655 nblocks
= SD_TGT2SYSBLOCK(un
, nblocks
);
12656 partition_offset
= SD_TGT2SYSBLOCK(un
,
12662 * blocknum is the starting block number of the request. At this
12663 * point it is still relative to the start of the minor device.
12665 blocknum
= xp
->xb_blkno
;
12668 * Legacy: If the starting block number is one past the last block
12669 * in the partition, do not set B_ERROR in the buf.
12671 if (blocknum
== nblocks
) {
12676 * Confirm that the first block of the request lies within the
12677 * partition limits. Also the requested number of bytes must be
12678 * a multiple of the system block size.
12680 if ((blocknum
< 0) || (blocknum
>= nblocks
) ||
12681 ((bp
->b_bcount
& (DEV_BSIZE
- 1)) != 0)) {
12682 bp
->b_flags
|= B_ERROR
;
12687 * If the requsted # blocks exceeds the available # blocks, that
12688 * is an overrun of the partition.
12690 if ((!NOT_DEVBSIZE(un
)) && is_aligned
) {
12691 requested_nblocks
= SD_BYTES2TGTBLOCKS(un
, bp
->b_bcount
);
12693 requested_nblocks
= SD_BYTES2SYSBLOCKS(bp
->b_bcount
);
12696 available_nblocks
= (size_t)(nblocks
- blocknum
);
12697 ASSERT(nblocks
>= blocknum
);
12699 if (requested_nblocks
> available_nblocks
) {
12703 * Allocate an "overrun" buf to allow the request to proceed
12704 * for the amount of space available in the partition. The
12705 * amount not transferred will be added into the b_resid
12706 * when the operation is complete. The overrun buf
12707 * replaces the original buf here, and the original buf
12708 * is saved inside the overrun buf, for later use.
12710 if ((!NOT_DEVBSIZE(un
)) && is_aligned
) {
12711 resid
= SD_TGTBLOCKS2BYTES(un
,
12712 (offset_t
)(requested_nblocks
- available_nblocks
));
12714 resid
= SD_SYSBLOCKS2BYTES(
12715 (offset_t
)(requested_nblocks
- available_nblocks
));
12718 size_t count
= bp
->b_bcount
- resid
;
12720 * Note: count is an unsigned entity thus it'll NEVER
12721 * be less than 0 so ASSERT the original values are
12724 ASSERT(bp
->b_bcount
>= resid
);
12726 bp
= sd_bioclone_alloc(bp
, count
, blocknum
,
12727 (int (*)(struct buf
*)) sd_mapblockaddr_iodone
);
12728 xp
= SD_GET_XBUF(bp
); /* Update for 'new' bp! */
12729 ASSERT(xp
!= NULL
);
12732 /* At this point there should be no residual for this buf. */
12733 ASSERT(bp
->b_resid
== 0);
12735 /* Convert the block number to an absolute address. */
12736 xp
->xb_blkno
+= partition_offset
;
12738 SD_NEXT_IOSTART(index
, un
, bp
);
12740 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
12741 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp
);
12746 bp
->b_resid
= bp
->b_bcount
;
12747 SD_BEGIN_IODONE(index
, un
, bp
);
12748 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
12749 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp
);
12754 * Function: sd_mapblockaddr_iodone
12756 * Description: Completion-side processing for partition management.
12758 * Context: May be called under interrupt context
12762 sd_mapblockaddr_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
12764 /* int partition; */ /* Not used, see below. */
12765 ASSERT(un
!= NULL
);
12766 ASSERT(bp
!= NULL
);
12767 ASSERT(!mutex_owned(SD_MUTEX(un
)));
12769 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
12770 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp
);
12772 if (bp
->b_iodone
== (int (*)(struct buf
*)) sd_mapblockaddr_iodone
) {
12774 * We have an "overrun" buf to deal with...
12776 struct sd_xbuf
*xp
;
12777 struct buf
*obp
; /* ptr to the original buf */
12779 xp
= SD_GET_XBUF(bp
);
12780 ASSERT(xp
!= NULL
);
12782 /* Retrieve the pointer to the original buf */
12783 obp
= (struct buf
*)xp
->xb_private
;
12784 ASSERT(obp
!= NULL
);
12786 obp
->b_resid
= obp
->b_bcount
- (bp
->b_bcount
- bp
->b_resid
);
12787 bioerror(obp
, bp
->b_error
);
12789 sd_bioclone_free(bp
);
12792 * Get back the original buf.
12793 * Note that since the restoration of xb_blkno below
12794 * was removed, the sd_xbuf is not needed.
12798 * xp = SD_GET_XBUF(bp);
12799 * ASSERT(xp != NULL);
12804 * Convert sd->xb_blkno back to a minor-device relative value.
12805 * Note: this has been commented out, as it is not needed in the
12806 * current implementation of the driver (ie, since this function
12807 * is at the top of the layering chains, so the info will be
12808 * discarded) and it is in the "hot" IO path.
12810 * partition = getminor(bp->b_edev) & SDPART_MASK;
12811 * xp->xb_blkno -= un->un_offset[partition];
12814 SD_NEXT_IODONE(index
, un
, bp
);
12816 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
12817 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp
);
12822 * Function: sd_mapblocksize_iostart
12824 * Description: Convert between system block size (un->un_sys_blocksize)
12825 * and target block size (un->un_tgt_blocksize).
12827 * Context: Can sleep to allocate resources.
12829 * Assumptions: A higher layer has already performed any partition validation,
12830 * and converted the xp->xb_blkno to an absolute value relative
12831 * to the start of the device.
12833 * It is also assumed that the higher layer has implemented
12834 * an "overrun" mechanism for the case where the request would
12835 * read/write beyond the end of a partition. In this case we
12836 * assume (and ASSERT) that bp->b_resid == 0.
12838 * Note: The implementation for this routine assumes the target
12839 * block size remains constant between allocation and transport.
12843 sd_mapblocksize_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
)
12845 struct sd_mapblocksize_info
*bsp
;
12846 struct sd_xbuf
*xp
;
12847 offset_t first_byte
;
12848 daddr_t start_block
, end_block
;
12849 daddr_t request_bytes
;
12850 ushort_t is_aligned
= FALSE
;
12852 ASSERT(un
!= NULL
);
12853 ASSERT(bp
!= NULL
);
12854 ASSERT(!mutex_owned(SD_MUTEX(un
)));
12855 ASSERT(bp
->b_resid
== 0);
12857 SD_TRACE(SD_LOG_IO_RMMEDIA
, un
,
12858 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp
);
12861 * For a non-writable CD, a write request is an error
12863 if (ISCD(un
) && ((bp
->b_flags
& B_READ
) == 0) &&
12864 (un
->un_f_mmc_writable_media
== FALSE
)) {
12866 bp
->b_resid
= bp
->b_bcount
;
12867 SD_BEGIN_IODONE(index
, un
, bp
);
12872 * We do not need a shadow buf if the device is using
12873 * un->un_sys_blocksize as its block size or if bcount == 0.
12874 * In this case there is no layer-private data block allocated.
12876 if ((un
->un_tgt_blocksize
== DEV_BSIZE
&& !un
->un_f_enable_rmw
) ||
12877 (bp
->b_bcount
== 0)) {
12881 #if defined(__i386) || defined(__amd64)
12882 /* We do not support non-block-aligned transfers for ROD devices */
12883 ASSERT(!ISROD(un
));
12886 xp
= SD_GET_XBUF(bp
);
12887 ASSERT(xp
!= NULL
);
12889 SD_INFO(SD_LOG_IO_RMMEDIA
, un
, "sd_mapblocksize_iostart: "
12890 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12891 un
->un_tgt_blocksize
, DEV_BSIZE
);
12892 SD_INFO(SD_LOG_IO_RMMEDIA
, un
, "sd_mapblocksize_iostart: "
12893 "request start block:0x%x\n", xp
->xb_blkno
);
12894 SD_INFO(SD_LOG_IO_RMMEDIA
, un
, "sd_mapblocksize_iostart: "
12895 "request len:0x%x\n", bp
->b_bcount
);
12898 * Allocate the layer-private data area for the mapblocksize layer.
12899 * Layers are allowed to use the xp_private member of the sd_xbuf
12900 * struct to store the pointer to their layer-private data block, but
12901 * each layer also has the responsibility of restoring the prior
12902 * contents of xb_private before returning the buf/xbuf to the
12903 * higher layer that sent it.
12905 * Here we save the prior contents of xp->xb_private into the
12906 * bsp->mbs_oprivate field of our layer-private data area. This value
12907 * is restored by sd_mapblocksize_iodone() just prior to freeing up
12908 * the layer-private area and returning the buf/xbuf to the layer
12911 * Note that here we use kmem_zalloc for the allocation as there are
12912 * parts of the mapblocksize code that expect certain fields to be
12913 * zero unless explicitly set to a required value.
12915 bsp
= kmem_zalloc(sizeof (struct sd_mapblocksize_info
), KM_SLEEP
);
12916 bsp
->mbs_oprivate
= xp
->xb_private
;
12917 xp
->xb_private
= bsp
;
12920 * This treats the data on the disk (target) as an array of bytes.
12921 * first_byte is the byte offset, from the beginning of the device,
12922 * to the location of the request. This is converted from a
12923 * un->un_sys_blocksize block address to a byte offset, and then back
12924 * to a block address based upon a un->un_tgt_blocksize block size.
12926 * xp->xb_blkno should be absolute upon entry into this function,
12927 * but, but it is based upon partitions that use the "system"
12928 * block size. It must be adjusted to reflect the block size of
12931 * Note that end_block is actually the block that follows the last
12932 * block of the request, but that's what is needed for the computation.
12934 first_byte
= SD_SYSBLOCKS2BYTES((offset_t
)xp
->xb_blkno
);
12935 if (un
->un_f_enable_rmw
) {
12936 start_block
= xp
->xb_blkno
=
12937 (first_byte
/ un
->un_phy_blocksize
) *
12938 (un
->un_phy_blocksize
/ DEV_BSIZE
);
12939 end_block
= ((first_byte
+ bp
->b_bcount
+
12940 un
->un_phy_blocksize
- 1) / un
->un_phy_blocksize
) *
12941 (un
->un_phy_blocksize
/ DEV_BSIZE
);
12943 start_block
= xp
->xb_blkno
= first_byte
/ un
->un_tgt_blocksize
;
12944 end_block
= (first_byte
+ bp
->b_bcount
+
12945 un
->un_tgt_blocksize
- 1) / un
->un_tgt_blocksize
;
12948 /* request_bytes is rounded up to a multiple of the target block size */
12949 request_bytes
= (end_block
- start_block
) * un
->un_tgt_blocksize
;
12952 * See if the starting address of the request and the request
12953 * length are aligned on a un->un_tgt_blocksize boundary. If aligned
12954 * then we do not need to allocate a shadow buf to handle the request.
12956 if (un
->un_f_enable_rmw
) {
12957 if (((first_byte
% un
->un_phy_blocksize
) == 0) &&
12958 ((bp
->b_bcount
% un
->un_phy_blocksize
) == 0)) {
12962 if (((first_byte
% un
->un_tgt_blocksize
) == 0) &&
12963 ((bp
->b_bcount
% un
->un_tgt_blocksize
) == 0)) {
12968 if ((bp
->b_flags
& B_READ
) == 0) {
12970 * Lock the range for a write operation. An aligned request is
12971 * considered a simple write; otherwise the request must be a
12972 * read-modify-write.
12974 bsp
->mbs_wmp
= sd_range_lock(un
, start_block
, end_block
- 1,
12975 (is_aligned
== TRUE
) ? SD_WTYPE_SIMPLE
: SD_WTYPE_RMW
);
12979 * Alloc a shadow buf if the request is not aligned. Also, this is
12980 * where the READ command is generated for a read-modify-write. (The
12981 * write phase is deferred until after the read completes.)
12983 if (is_aligned
== FALSE
) {
12985 struct sd_mapblocksize_info
*shadow_bsp
;
12986 struct sd_xbuf
*shadow_xp
;
12987 struct buf
*shadow_bp
;
12990 * Allocate the shadow buf and it associated xbuf. Note that
12991 * after this call the xb_blkno value in both the original
12992 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the
12993 * same: absolute relative to the start of the device, and
12994 * adjusted for the target block size. The b_blkno in the
12995 * shadow buf will also be set to this value. We should never
12996 * change b_blkno in the original bp however.
12998 * Note also that the shadow buf will always need to be a
12999 * READ command, regardless of whether the incoming command
13000 * is a READ or a WRITE.
13002 shadow_bp
= sd_shadow_buf_alloc(bp
, request_bytes
, B_READ
,
13004 (int (*)(struct buf
*)) sd_mapblocksize_iodone
);
13006 shadow_xp
= SD_GET_XBUF(shadow_bp
);
13009 * Allocate the layer-private data for the shadow buf.
13010 * (No need to preserve xb_private in the shadow xbuf.)
13012 shadow_xp
->xb_private
= shadow_bsp
=
13013 kmem_zalloc(sizeof (struct sd_mapblocksize_info
), KM_SLEEP
);
13016 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone
13017 * to figure out where the start of the user data is (based upon
13018 * the system block size) in the data returned by the READ
13019 * command (which will be based upon the target blocksize). Note
13020 * that this is only really used if the request is unaligned.
13022 if (un
->un_f_enable_rmw
) {
13023 bsp
->mbs_copy_offset
= (ssize_t
)(first_byte
-
13024 ((offset_t
)xp
->xb_blkno
* un
->un_sys_blocksize
));
13025 ASSERT((bsp
->mbs_copy_offset
>= 0) &&
13026 (bsp
->mbs_copy_offset
< un
->un_phy_blocksize
));
13028 bsp
->mbs_copy_offset
= (ssize_t
)(first_byte
-
13029 ((offset_t
)xp
->xb_blkno
* un
->un_tgt_blocksize
));
13030 ASSERT((bsp
->mbs_copy_offset
>= 0) &&
13031 (bsp
->mbs_copy_offset
< un
->un_tgt_blocksize
));
13034 shadow_bsp
->mbs_copy_offset
= bsp
->mbs_copy_offset
;
13036 shadow_bsp
->mbs_layer_index
= bsp
->mbs_layer_index
= index
;
13038 /* Transfer the wmap (if any) to the shadow buf */
13039 shadow_bsp
->mbs_wmp
= bsp
->mbs_wmp
;
13040 bsp
->mbs_wmp
= NULL
;
13043 * The shadow buf goes on from here in place of the
13046 shadow_bsp
->mbs_orig_bp
= bp
;
13050 SD_INFO(SD_LOG_IO_RMMEDIA
, un
,
13051 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp
->xb_blkno
);
13052 SD_INFO(SD_LOG_IO_RMMEDIA
, un
,
13053 "sd_mapblocksize_iostart: tgt request len:0x%x\n",
13055 SD_INFO(SD_LOG_IO_RMMEDIA
, un
,
13056 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp
);
13059 SD_NEXT_IOSTART(index
, un
, bp
);
13061 SD_TRACE(SD_LOG_IO_RMMEDIA
, un
,
13062 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp
);
13067 * Function: sd_mapblocksize_iodone
13069 * Description: Completion side processing for block-size mapping.
13071 * Context: May be called under interrupt context
13075 sd_mapblocksize_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
13077 struct sd_mapblocksize_info
*bsp
;
13078 struct sd_xbuf
*xp
;
13079 struct sd_xbuf
*orig_xp
; /* sd_xbuf for the original buf */
13080 struct buf
*orig_bp
; /* ptr to the original buf */
13081 offset_t shadow_end
;
13082 offset_t request_end
;
13083 offset_t shadow_start
;
13084 ssize_t copy_offset
;
13085 size_t copy_length
;
13087 uint_t is_write
; /* TRUE if this bp is a WRITE */
13088 uint_t has_wmap
; /* TRUE is this bp has a wmap */
13090 ASSERT(un
!= NULL
);
13091 ASSERT(bp
!= NULL
);
13093 SD_TRACE(SD_LOG_IO_RMMEDIA
, un
,
13094 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp
);
13097 * There is no shadow buf or layer-private data if the target is
13098 * using un->un_sys_blocksize as its block size or if bcount == 0.
13100 if ((un
->un_tgt_blocksize
== DEV_BSIZE
&& !un
->un_f_enable_rmw
) ||
13101 (bp
->b_bcount
== 0)) {
13105 xp
= SD_GET_XBUF(bp
);
13106 ASSERT(xp
!= NULL
);
13108 /* Retrieve the pointer to the layer-private data area from the xbuf. */
13109 bsp
= xp
->xb_private
;
13111 is_write
= ((bp
->b_flags
& B_READ
) == 0) ? TRUE
: FALSE
;
13112 has_wmap
= (bsp
->mbs_wmp
!= NULL
) ? TRUE
: FALSE
;
13116 * For a WRITE request we must free up the block range that
13117 * we have locked up. This holds regardless of whether this is
13118 * an aligned write request or a read-modify-write request.
13120 sd_range_unlock(un
, bsp
->mbs_wmp
);
13121 bsp
->mbs_wmp
= NULL
;
13124 if ((bp
->b_iodone
!= (int(*)(struct buf
*))sd_mapblocksize_iodone
)) {
13126 * An aligned read or write command will have no shadow buf;
13127 * there is not much else to do with it.
13132 orig_bp
= bsp
->mbs_orig_bp
;
13133 ASSERT(orig_bp
!= NULL
);
13134 orig_xp
= SD_GET_XBUF(orig_bp
);
13135 ASSERT(orig_xp
!= NULL
);
13136 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13138 if (!is_write
&& has_wmap
) {
13140 * A READ with a wmap means this is the READ phase of a
13141 * read-modify-write. If an error occurred on the READ then
13142 * we do not proceed with the WRITE phase or copy any data.
13143 * Just release the write maps and return with an error.
13145 if ((bp
->b_resid
!= 0) || (bp
->b_error
!= 0)) {
13146 orig_bp
->b_resid
= orig_bp
->b_bcount
;
13147 bioerror(orig_bp
, bp
->b_error
);
13148 sd_range_unlock(un
, bsp
->mbs_wmp
);
13154 * Here is where we set up to copy the data from the shadow buf
13155 * into the space associated with the original buf.
13157 * To deal with the conversion between block sizes, these
13158 * computations treat the data as an array of bytes, with the
13159 * first byte (byte 0) corresponding to the first byte in the
13160 * first block on the disk.
13164 * shadow_start and shadow_len indicate the location and size of
13165 * the data returned with the shadow IO request.
13167 if (un
->un_f_enable_rmw
) {
13168 shadow_start
= SD_SYSBLOCKS2BYTES((offset_t
)xp
->xb_blkno
);
13170 shadow_start
= SD_TGTBLOCKS2BYTES(un
, (offset_t
)xp
->xb_blkno
);
13172 shadow_end
= shadow_start
+ bp
->b_bcount
- bp
->b_resid
;
13175 * copy_offset gives the offset (in bytes) from the start of the first
13176 * block of the READ request to the beginning of the data. We retrieve
13177 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved
13178 * there by sd_mapblockize_iostart(). copy_length gives the amount of
13179 * data to be copied (in bytes).
13181 copy_offset
= bsp
->mbs_copy_offset
;
13182 if (un
->un_f_enable_rmw
) {
13183 ASSERT((copy_offset
>= 0) &&
13184 (copy_offset
< un
->un_phy_blocksize
));
13186 ASSERT((copy_offset
>= 0) &&
13187 (copy_offset
< un
->un_tgt_blocksize
));
13190 copy_length
= orig_bp
->b_bcount
;
13191 request_end
= shadow_start
+ copy_offset
+ orig_bp
->b_bcount
;
13194 * Set up the resid and error fields of orig_bp as appropriate.
13196 if (shadow_end
>= request_end
) {
13197 /* We got all the requested data; set resid to zero */
13198 orig_bp
->b_resid
= 0;
13201 * We failed to get enough data to fully satisfy the original
13202 * request. Just copy back whatever data we got and set
13203 * up the residual and error code as required.
13205 * 'shortfall' is the amount by which the data received with the
13206 * shadow buf has "fallen short" of the requested amount.
13208 shortfall
= (size_t)(request_end
- shadow_end
);
13210 if (shortfall
> orig_bp
->b_bcount
) {
13212 * We did not get enough data to even partially
13213 * fulfill the original request. The residual is
13214 * equal to the amount requested.
13216 orig_bp
->b_resid
= orig_bp
->b_bcount
;
13219 * We did not get all the data that we requested
13220 * from the device, but we will try to return what
13221 * portion we did get.
13223 orig_bp
->b_resid
= shortfall
;
13225 ASSERT(copy_length
>= orig_bp
->b_resid
);
13226 copy_length
-= orig_bp
->b_resid
;
13229 /* Propagate the error code from the shadow buf to the original buf */
13230 bioerror(orig_bp
, bp
->b_error
);
13233 goto freebuf_done
; /* No data copying for a WRITE */
13238 * This is a READ command from the READ phase of a
13239 * read-modify-write request. We have to copy the data given
13240 * by the user OVER the data returned by the READ command,
13241 * then convert the command from a READ to a WRITE and send
13242 * it back to the target.
13244 bcopy(orig_bp
->b_un
.b_addr
, bp
->b_un
.b_addr
+ copy_offset
,
13247 bp
->b_flags
&= ~((int)B_READ
); /* Convert to a WRITE */
13250 * Dispatch the WRITE command to the taskq thread, which
13251 * will in turn send the command to the target. When the
13252 * WRITE command completes, we (sd_mapblocksize_iodone())
13253 * will get called again as part of the iodone chain
13254 * processing for it. Note that we will still be dealing
13255 * with the shadow buf at that point.
13257 if (taskq_dispatch(sd_wmr_tq
, sd_read_modify_write_task
, bp
,
13258 KM_NOSLEEP
) != 0) {
13260 * Dispatch was successful so we are done. Return
13261 * without going any higher up the iodone chain. Do
13262 * not free up any layer-private data until after the
13269 * Dispatch of the WRITE command failed; set up the error
13270 * condition and send this IO back up the iodone chain.
13272 bioerror(orig_bp
, EIO
);
13273 orig_bp
->b_resid
= orig_bp
->b_bcount
;
13277 * This is a regular READ request (ie, not a RMW). Copy the
13278 * data from the shadow buf into the original buf. The
13279 * copy_offset compensates for any "misalignment" between the
13280 * shadow buf (with its un->un_tgt_blocksize blocks) and the
13281 * original buf (with its un->un_sys_blocksize blocks).
13283 bcopy(bp
->b_un
.b_addr
+ copy_offset
, orig_bp
->b_un
.b_addr
,
13290 * At this point we still have both the shadow buf AND the original
13291 * buf to deal with, as well as the layer-private data area in each.
13292 * Local variables are as follows:
13294 * bp -- points to shadow buf
13295 * xp -- points to xbuf of shadow buf
13296 * bsp -- points to layer-private data area of shadow buf
13297 * orig_bp -- points to original buf
13299 * First free the shadow buf and its associated xbuf, then free the
13300 * layer-private data area from the shadow buf. There is no need to
13301 * restore xb_private in the shadow xbuf.
13303 sd_shadow_buf_free(bp
);
13304 kmem_free(bsp
, sizeof (struct sd_mapblocksize_info
));
13307 * Now update the local variables to point to the original buf, xbuf,
13308 * and layer-private area.
13311 xp
= SD_GET_XBUF(bp
);
13312 ASSERT(xp
!= NULL
);
13313 ASSERT(xp
== orig_xp
);
13314 bsp
= xp
->xb_private
;
13315 ASSERT(bsp
!= NULL
);
13319 * Restore xb_private to whatever it was set to by the next higher
13320 * layer in the chain, then free the layer-private data area.
13322 xp
->xb_private
= bsp
->mbs_oprivate
;
13323 kmem_free(bsp
, sizeof (struct sd_mapblocksize_info
));
13326 SD_TRACE(SD_LOG_IO_RMMEDIA
, SD_GET_UN(bp
),
13327 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp
);
13329 SD_NEXT_IODONE(index
, un
, bp
);
13334 * Function: sd_checksum_iostart
13336 * Description: A stub function for a layer that's currently not used.
13337 * For now just a placeholder.
13339 * Context: Kernel thread context
13343 sd_checksum_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
)
13345 ASSERT(un
!= NULL
);
13346 ASSERT(bp
!= NULL
);
13347 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13348 SD_NEXT_IOSTART(index
, un
, bp
);
13353 * Function: sd_checksum_iodone
13355 * Description: A stub function for a layer that's currently not used.
13356 * For now just a placeholder.
13358 * Context: May be called under interrupt context
13362 sd_checksum_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
13364 ASSERT(un
!= NULL
);
13365 ASSERT(bp
!= NULL
);
13366 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13367 SD_NEXT_IODONE(index
, un
, bp
);
13372 * Function: sd_checksum_uscsi_iostart
13374 * Description: A stub function for a layer that's currently not used.
13375 * For now just a placeholder.
13377 * Context: Kernel thread context
13381 sd_checksum_uscsi_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
)
13383 ASSERT(un
!= NULL
);
13384 ASSERT(bp
!= NULL
);
13385 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13386 SD_NEXT_IOSTART(index
, un
, bp
);
13391 * Function: sd_checksum_uscsi_iodone
13393 * Description: A stub function for a layer that's currently not used.
13394 * For now just a placeholder.
13396 * Context: May be called under interrupt context
13400 sd_checksum_uscsi_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
13402 ASSERT(un
!= NULL
);
13403 ASSERT(bp
!= NULL
);
13404 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13405 SD_NEXT_IODONE(index
, un
, bp
);
13410 * Function: sd_pm_iostart
13412 * Description: iostart-side routine for Power mangement.
13414 * Context: Kernel thread context
13418 sd_pm_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
)
13420 ASSERT(un
!= NULL
);
13421 ASSERT(bp
!= NULL
);
13422 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13423 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
13425 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_iostart: entry\n");
13427 if (sd_pm_entry(un
) != DDI_SUCCESS
) {
13429 * Set up to return the failed buf back up the 'iodone'
13430 * side of the calling chain.
13433 bp
->b_resid
= bp
->b_bcount
;
13435 SD_BEGIN_IODONE(index
, un
, bp
);
13437 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_iostart: exit\n");
13441 SD_NEXT_IOSTART(index
, un
, bp
);
13443 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_iostart: exit\n");
13448 * Function: sd_pm_iodone
13450 * Description: iodone-side routine for power mangement.
13452 * Context: may be called from interrupt context
13456 sd_pm_iodone(int index
, struct sd_lun
*un
, struct buf
*bp
)
13458 ASSERT(un
!= NULL
);
13459 ASSERT(bp
!= NULL
);
13460 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
13462 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_iodone: entry\n");
13465 * After attach the following flag is only read, so don't
13466 * take the penalty of acquiring a mutex for it.
13468 if (un
->un_f_pm_is_enabled
== TRUE
) {
13472 SD_NEXT_IODONE(index
, un
, bp
);
13474 SD_TRACE(SD_LOG_IO_PM
, un
, "sd_pm_iodone: exit\n");
13479 * Function: sd_core_iostart
13481 * Description: Primary driver function for enqueuing buf(9S) structs from
13482 * the system and initiating IO to the target device
13484 * Context: Kernel thread context. Can sleep.
13486 * Assumptions: - The given xp->xb_blkno is absolute
13487 * (ie, relative to the start of the device).
13488 * - The IO is to be done using the native blocksize of
13489 * the device, as specified in un->un_tgt_blocksize.
13493 sd_core_iostart(int index
, struct sd_lun
*un
, struct buf
*bp
)
13495 struct sd_xbuf
*xp
;
13497 ASSERT(un
!= NULL
);
13498 ASSERT(bp
!= NULL
);
13499 ASSERT(!mutex_owned(SD_MUTEX(un
)));
13500 ASSERT(bp
->b_resid
== 0);
13502 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_core_iostart: entry: bp:0x%p\n", bp
);
13504 xp
= SD_GET_XBUF(bp
);
13505 ASSERT(xp
!= NULL
);
13507 mutex_enter(SD_MUTEX(un
));
13510 * If we are currently in the failfast state, fail any new IO
13511 * that has B_FAILFAST set, then return.
13513 if ((bp
->b_flags
& B_FAILFAST
) &&
13514 (un
->un_failfast_state
== SD_FAILFAST_ACTIVE
)) {
13515 mutex_exit(SD_MUTEX(un
));
13517 bp
->b_resid
= bp
->b_bcount
;
13518 SD_BEGIN_IODONE(index
, un
, bp
);
13522 if (SD_IS_DIRECT_PRIORITY(xp
)) {
13524 * Priority command -- transport it immediately.
13526 * Note: We may want to assert that USCSI_DIAGNOSE is set,
13527 * because all direct priority commands should be associated
13528 * with error recovery actions which we don't want to retry.
13530 sd_start_cmds(un
, bp
);
13533 * Normal command -- add it to the wait queue, then start
13534 * transporting commands from the wait queue.
13536 sd_add_buf_to_waitq(un
, bp
);
13537 SD_UPDATE_KSTATS(un
, kstat_waitq_enter
, bp
);
13538 sd_start_cmds(un
, NULL
);
13541 mutex_exit(SD_MUTEX(un
));
13543 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_core_iostart: exit: bp:0x%p\n", bp
);
13548 * Function: sd_init_cdb_limits
13550 * Description: This is to handle scsi_pkt initialization differences
13551 * between the driver platforms.
13553 * Legacy behaviors:
13555 * If the block number or the sector count exceeds the
13556 * capabilities of a Group 0 command, shift over to a
13557 * Group 1 command. We don't blindly use Group 1
13558 * commands because a) some drives (CDC Wren IVs) get a
13559 * bit confused, and b) there is probably a fair amount
13560 * of speed difference for a target to receive and decode
13561 * a 10 byte command instead of a 6 byte command.
13563 * The xfer time difference of 6 vs 10 byte CDBs is
13564 * still significant so this code is still worthwhile.
13565 * 10 byte CDBs are very inefficient with the fas HBA driver
13566 * and older disks. Each CDB byte took 1 usec with some
13569 * Context: Must be called at attach time
13573 sd_init_cdb_limits(struct sd_lun
*un
)
13578 * Use CDB_GROUP1 commands for most devices except for
13579 * parallel SCSI fixed drives in which case we get better
13580 * performance using CDB_GROUP0 commands (where applicable).
13582 un
->un_mincdb
= SD_CDB_GROUP1
;
13583 #if !defined(__fibre)
13584 if (!un
->un_f_is_fibre
&& !un
->un_f_cfg_is_atapi
&& !ISROD(un
) &&
13585 !un
->un_f_has_removable_media
) {
13586 un
->un_mincdb
= SD_CDB_GROUP0
;
13591 * Try to read the max-cdb-length supported by HBA.
13593 un
->un_max_hba_cdb
= scsi_ifgetcap(SD_ADDRESS(un
), "max-cdb-length", 1);
13594 if (0 >= un
->un_max_hba_cdb
) {
13595 un
->un_max_hba_cdb
= CDB_GROUP4
;
13596 hba_cdb_limit
= SD_CDB_GROUP4
;
13597 } else if (0 < un
->un_max_hba_cdb
&&
13598 un
->un_max_hba_cdb
< CDB_GROUP1
) {
13599 hba_cdb_limit
= SD_CDB_GROUP0
;
13600 } else if (CDB_GROUP1
<= un
->un_max_hba_cdb
&&
13601 un
->un_max_hba_cdb
< CDB_GROUP5
) {
13602 hba_cdb_limit
= SD_CDB_GROUP1
;
13603 } else if (CDB_GROUP5
<= un
->un_max_hba_cdb
&&
13604 un
->un_max_hba_cdb
< CDB_GROUP4
) {
13605 hba_cdb_limit
= SD_CDB_GROUP5
;
13607 hba_cdb_limit
= SD_CDB_GROUP4
;
13611 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4
13612 * commands for fixed disks unless we are building for a 32 bit
13616 un
->un_maxcdb
= (un
->un_f_has_removable_media
) ? SD_CDB_GROUP5
:
13617 min(hba_cdb_limit
, SD_CDB_GROUP4
);
13619 un
->un_maxcdb
= (un
->un_f_has_removable_media
) ? SD_CDB_GROUP5
:
13620 min(hba_cdb_limit
, SD_CDB_GROUP1
);
13623 un
->un_status_len
= (int)((un
->un_f_arq_enabled
== TRUE
)
13624 ? sizeof (struct scsi_arq_status
) : 1);
13625 un
->un_cmd_timeout
= (ushort_t
)sd_io_time
;
13626 un
->un_uscsi_timeout
= ((ISCD(un
)) ? 2 : 1) * un
->un_cmd_timeout
;
13631 * Function: sd_initpkt_for_buf
13633 * Description: Allocate and initialize for transport a scsi_pkt struct,
13634 * based upon the info specified in the given buf struct.
13636 * Assumes the xb_blkno in the request is absolute (ie,
13637 * relative to the start of the device (NOT partition!).
13638 * Also assumes that the request is using the native block
13639 * size of the device (as returned by the READ CAPACITY
13642 * Return Code: SD_PKT_ALLOC_SUCCESS
13643 * SD_PKT_ALLOC_FAILURE
13644 * SD_PKT_ALLOC_FAILURE_NO_DMA
13645 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13647 * Context: Kernel thread and may be called from software interrupt context
13648 * as part of a sdrunout callback. This function may not block or
13649 * call routines that block
13653 sd_initpkt_for_buf(struct buf
*bp
, struct scsi_pkt
**pktpp
)
13655 struct sd_xbuf
*xp
;
13656 struct scsi_pkt
*pktp
= NULL
;
13659 daddr_t startblock
;
13663 ASSERT(bp
!= NULL
);
13664 ASSERT(pktpp
!= NULL
);
13665 xp
= SD_GET_XBUF(bp
);
13666 ASSERT(xp
!= NULL
);
13667 un
= SD_GET_UN(bp
);
13668 ASSERT(un
!= NULL
);
13669 ASSERT(mutex_owned(SD_MUTEX(un
)));
13670 ASSERT(bp
->b_resid
== 0);
13672 SD_TRACE(SD_LOG_IO_CORE
, un
,
13673 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp
);
13675 mutex_exit(SD_MUTEX(un
));
13677 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13678 if (xp
->xb_pkt_flags
& SD_XB_DMA_FREED
) {
13680 * Already have a scsi_pkt -- just need DMA resources.
13681 * We must recompute the CDB in case the mapping returns
13682 * a nonzero pkt_resid.
13683 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13684 * that is being retried, the unmap/remap of the DMA resouces
13685 * will result in the entire transfer starting over again
13686 * from the very first block.
13688 ASSERT(xp
->xb_pktp
!= NULL
);
13689 pktp
= xp
->xb_pktp
;
13693 #endif /* __i386 || __amd64 */
13695 startblock
= xp
->xb_blkno
; /* Absolute block num. */
13696 blockcount
= SD_BYTES2TGTBLOCKS(un
, bp
->b_bcount
);
13698 cmd_flags
= un
->un_pkt_flags
| (xp
->xb_pkt_flags
& SD_XB_INITPKT_MASK
);
13701 * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13702 * call scsi_init_pkt, and build the CDB.
13704 rval
= sd_setup_rw_pkt(un
, &pktp
, bp
,
13705 cmd_flags
, sdrunout
, (caddr_t
)un
,
13706 startblock
, blockcount
);
13712 * If partial DMA is being used and required for this transfer.
13715 if ((un
->un_pkt_flags
& PKT_DMA_PARTIAL
) != 0 &&
13716 (pktp
->pkt_resid
!= 0)) {
13719 * Save the CDB length and pkt_resid for the
13722 xp
->xb_dma_resid
= pktp
->pkt_resid
;
13725 pktp
->pkt_resid
= 0;
13728 xp
->xb_dma_resid
= 0;
13731 pktp
->pkt_flags
= un
->un_tagflags
;
13732 pktp
->pkt_time
= un
->un_cmd_timeout
;
13733 pktp
->pkt_comp
= sdintr
;
13735 pktp
->pkt_private
= bp
;
13738 SD_TRACE(SD_LOG_IO_CORE
, un
,
13739 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp
);
13741 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13742 xp
->xb_pkt_flags
&= ~SD_XB_DMA_FREED
;
13745 mutex_enter(SD_MUTEX(un
));
13746 return (SD_PKT_ALLOC_SUCCESS
);
13751 * SD_PKT_ALLOC_FAILURE is the only expected failure code
13752 * from sd_setup_rw_pkt.
13754 ASSERT(rval
== SD_PKT_ALLOC_FAILURE
);
13756 if (rval
== SD_PKT_ALLOC_FAILURE
) {
13759 * Set the driver state to RWAIT to indicate the driver
13760 * is waiting on resource allocations. The driver will not
13761 * suspend, pm_suspend, or detatch while the state is RWAIT.
13763 mutex_enter(SD_MUTEX(un
));
13764 New_state(un
, SD_STATE_RWAIT
);
13766 SD_ERROR(SD_LOG_IO_CORE
, un
,
13767 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp
);
13769 if ((bp
->b_flags
& B_ERROR
) != 0) {
13770 return (SD_PKT_ALLOC_FAILURE_NO_DMA
);
13772 return (SD_PKT_ALLOC_FAILURE
);
13775 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13777 * This should never happen. Maybe someone messed with the
13778 * kernel's minphys?
13780 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
13781 "Request rejected: too large for CDB: "
13782 "lba:0x%08lx len:0x%08lx\n", startblock
, blockcount
);
13783 SD_ERROR(SD_LOG_IO_CORE
, un
,
13784 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp
);
13785 mutex_enter(SD_MUTEX(un
));
13786 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
);
13793 * Function: sd_destroypkt_for_buf
13795 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing).
13797 * Context: Kernel thread or interrupt context
13801 sd_destroypkt_for_buf(struct buf
*bp
)
13803 ASSERT(bp
!= NULL
);
13804 ASSERT(SD_GET_UN(bp
) != NULL
);
13806 SD_TRACE(SD_LOG_IO_CORE
, SD_GET_UN(bp
),
13807 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp
);
13809 ASSERT(SD_GET_PKTP(bp
) != NULL
);
13810 scsi_destroy_pkt(SD_GET_PKTP(bp
));
13812 SD_TRACE(SD_LOG_IO_CORE
, SD_GET_UN(bp
),
13813 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp
);
13817 * Function: sd_setup_rw_pkt
13819 * Description: Determines appropriate CDB group for the requested LBA
13820 * and transfer length, calls scsi_init_pkt, and builds
13821 * the CDB. Do not use for partial DMA transfers except
13822 * for the initial transfer since the CDB size must
13825 * Context: Kernel thread and may be called from software interrupt
13826 * context as part of a sdrunout callback. This function may not
13827 * block or call routines that block
13832 sd_setup_rw_pkt(struct sd_lun
*un
,
13833 struct scsi_pkt
**pktpp
, struct buf
*bp
, int flags
,
13834 int (*callback
)(caddr_t
), caddr_t callback_arg
,
13835 diskaddr_t lba
, uint32_t blockcount
)
13837 struct scsi_pkt
*return_pktp
;
13838 union scsi_cdb
*cdbp
;
13839 struct sd_cdbinfo
*cp
= NULL
;
13843 * See which size CDB to use, based upon the request.
13845 for (i
= un
->un_mincdb
; i
<= un
->un_maxcdb
; i
++) {
13848 * Check lba and block count against sd_cdbtab limits.
13849 * In the partial DMA case, we have to use the same size
13850 * CDB for all the transfers. Check lba + blockcount
13851 * against the max LBA so we know that segment of the
13852 * transfer can use the CDB we select.
13854 if ((lba
+ blockcount
- 1 <= sd_cdbtab
[i
].sc_maxlba
) &&
13855 (blockcount
<= sd_cdbtab
[i
].sc_maxlen
)) {
13858 * The command will fit into the CDB type
13859 * specified by sd_cdbtab[i].
13861 cp
= sd_cdbtab
+ i
;
13864 * Call scsi_init_pkt so we can fill in the
13867 return_pktp
= scsi_init_pkt(SD_ADDRESS(un
), *pktpp
,
13868 bp
, cp
->sc_grpcode
, un
->un_status_len
, 0,
13869 flags
, callback
, callback_arg
);
13871 if (return_pktp
!= NULL
) {
13874 * Return new value of pkt
13876 *pktpp
= return_pktp
;
13879 * To be safe, zero the CDB insuring there is
13880 * no leftover data from a previous command.
13882 bzero(return_pktp
->pkt_cdbp
, cp
->sc_grpcode
);
13885 * Handle partial DMA mapping
13887 if (return_pktp
->pkt_resid
!= 0) {
13890 * Not going to xfer as many blocks as
13891 * originally expected
13894 SD_BYTES2TGTBLOCKS(un
,
13895 return_pktp
->pkt_resid
);
13898 cdbp
= (union scsi_cdb
*)return_pktp
->pkt_cdbp
;
13901 * Set command byte based on the CDB
13904 cdbp
->scc_cmd
= cp
->sc_grpmask
|
13905 ((bp
->b_flags
& B_READ
) ?
13906 SCMD_READ
: SCMD_WRITE
);
13908 SD_FILL_SCSI1_LUN(un
, return_pktp
);
13911 * Fill in LBA and length
13913 ASSERT((cp
->sc_grpcode
== CDB_GROUP1
) ||
13914 (cp
->sc_grpcode
== CDB_GROUP4
) ||
13915 (cp
->sc_grpcode
== CDB_GROUP0
) ||
13916 (cp
->sc_grpcode
== CDB_GROUP5
));
13918 if (cp
->sc_grpcode
== CDB_GROUP1
) {
13919 FORMG1ADDR(cdbp
, lba
);
13920 FORMG1COUNT(cdbp
, blockcount
);
13922 } else if (cp
->sc_grpcode
== CDB_GROUP4
) {
13923 FORMG4LONGADDR(cdbp
, lba
);
13924 FORMG4COUNT(cdbp
, blockcount
);
13926 } else if (cp
->sc_grpcode
== CDB_GROUP0
) {
13927 FORMG0ADDR(cdbp
, lba
);
13928 FORMG0COUNT(cdbp
, blockcount
);
13930 } else if (cp
->sc_grpcode
== CDB_GROUP5
) {
13931 FORMG5ADDR(cdbp
, lba
);
13932 FORMG5COUNT(cdbp
, blockcount
);
13937 * It should be impossible to not match one
13938 * of the CDB types above, so we should never
13939 * reach this point. Set the CDB command byte
13940 * to test-unit-ready to avoid writing
13941 * to somewhere we don't intend.
13943 cdbp
->scc_cmd
= SCMD_TEST_UNIT_READY
;
13944 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
);
13947 * Couldn't get scsi_pkt
13949 return (SD_PKT_ALLOC_FAILURE
);
13955 * None of the available CDB types were suitable. This really
13956 * should never happen: on a 64 bit system we support
13957 * READ16/WRITE16 which will hold an entire 64 bit disk address
13958 * and on a 32 bit system we will refuse to bind to a device
13959 * larger than 2TB so addresses will never be larger than 32 bits.
13961 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
);
13965 * Function: sd_setup_next_rw_pkt
13967 * Description: Setup packet for partial DMA transfers, except for the
13968 * initial transfer. sd_setup_rw_pkt should be used for
13969 * the initial transfer.
13971 * Context: Kernel thread and may be called from interrupt context.
13975 sd_setup_next_rw_pkt(struct sd_lun
*un
,
13976 struct scsi_pkt
*pktp
, struct buf
*bp
,
13977 diskaddr_t lba
, uint32_t blockcount
)
13980 union scsi_cdb
*cdbp
;
13981 uchar_t cdb_group_id
;
13983 ASSERT(pktp
!= NULL
);
13984 ASSERT(pktp
->pkt_cdbp
!= NULL
);
13986 cdbp
= (union scsi_cdb
*)pktp
->pkt_cdbp
;
13987 com
= cdbp
->scc_cmd
;
13988 cdb_group_id
= CDB_GROUPID(com
);
13990 ASSERT((cdb_group_id
== CDB_GROUPID_0
) ||
13991 (cdb_group_id
== CDB_GROUPID_1
) ||
13992 (cdb_group_id
== CDB_GROUPID_4
) ||
13993 (cdb_group_id
== CDB_GROUPID_5
));
13996 * Move pkt to the next portion of the xfer.
13997 * func is NULL_FUNC so we do not have to release
13998 * the disk mutex here.
14000 if (scsi_init_pkt(SD_ADDRESS(un
), pktp
, bp
, 0, 0, 0, 0,
14001 NULL_FUNC
, NULL
) == pktp
) {
14002 /* Success. Handle partial DMA */
14003 if (pktp
->pkt_resid
!= 0) {
14005 SD_BYTES2TGTBLOCKS(un
, pktp
->pkt_resid
);
14008 cdbp
->scc_cmd
= com
;
14009 SD_FILL_SCSI1_LUN(un
, pktp
);
14010 if (cdb_group_id
== CDB_GROUPID_1
) {
14011 FORMG1ADDR(cdbp
, lba
);
14012 FORMG1COUNT(cdbp
, blockcount
);
14014 } else if (cdb_group_id
== CDB_GROUPID_4
) {
14015 FORMG4LONGADDR(cdbp
, lba
);
14016 FORMG4COUNT(cdbp
, blockcount
);
14018 } else if (cdb_group_id
== CDB_GROUPID_0
) {
14019 FORMG0ADDR(cdbp
, lba
);
14020 FORMG0COUNT(cdbp
, blockcount
);
14022 } else if (cdb_group_id
== CDB_GROUPID_5
) {
14023 FORMG5ADDR(cdbp
, lba
);
14024 FORMG5COUNT(cdbp
, blockcount
);
14029 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
);
14033 * Error setting up next portion of cmd transfer.
14034 * Something is definitely very wrong and this
14035 * should not happen.
14037 return (SD_PKT_ALLOC_FAILURE
);
14041 * Function: sd_initpkt_for_uscsi
14043 * Description: Allocate and initialize for transport a scsi_pkt struct,
14044 * based upon the info specified in the given uscsi_cmd struct.
14046 * Return Code: SD_PKT_ALLOC_SUCCESS
14047 * SD_PKT_ALLOC_FAILURE
14048 * SD_PKT_ALLOC_FAILURE_NO_DMA
14049 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
14051 * Context: Kernel thread and may be called from software interrupt context
14052 * as part of a sdrunout callback. This function may not block or
14053 * call routines that block
14057 sd_initpkt_for_uscsi(struct buf
*bp
, struct scsi_pkt
**pktpp
)
14059 struct uscsi_cmd
*uscmd
;
14060 struct sd_xbuf
*xp
;
14061 struct scsi_pkt
*pktp
;
14063 uint32_t flags
= 0;
14065 ASSERT(bp
!= NULL
);
14066 ASSERT(pktpp
!= NULL
);
14067 xp
= SD_GET_XBUF(bp
);
14068 ASSERT(xp
!= NULL
);
14069 un
= SD_GET_UN(bp
);
14070 ASSERT(un
!= NULL
);
14071 ASSERT(mutex_owned(SD_MUTEX(un
)));
14073 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14074 uscmd
= (struct uscsi_cmd
*)xp
->xb_pktinfo
;
14075 ASSERT(uscmd
!= NULL
);
14077 SD_TRACE(SD_LOG_IO_CORE
, un
,
14078 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp
);
14081 * Allocate the scsi_pkt for the command.
14082 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path
14083 * during scsi_init_pkt time and will continue to use the
14084 * same path as long as the same scsi_pkt is used without
14085 * intervening scsi_dma_free(). Since uscsi command does
14086 * not call scsi_dmafree() before retry failed command, it
14087 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT
14088 * set such that scsi_vhci can use other available path for
14089 * retry. Besides, ucsci command does not allow DMA breakup,
14090 * so there is no need to set PKT_DMA_PARTIAL flag.
14092 if (uscmd
->uscsi_rqlen
> SENSE_LENGTH
) {
14093 pktp
= scsi_init_pkt(SD_ADDRESS(un
), NULL
,
14094 ((bp
->b_bcount
!= 0) ? bp
: NULL
), uscmd
->uscsi_cdblen
,
14095 ((int)(uscmd
->uscsi_rqlen
) + sizeof (struct scsi_arq_status
)
14096 - sizeof (struct scsi_extended_sense
)), 0,
14097 (un
->un_pkt_flags
& ~PKT_DMA_PARTIAL
) | PKT_XARQ
,
14098 sdrunout
, (caddr_t
)un
);
14100 pktp
= scsi_init_pkt(SD_ADDRESS(un
), NULL
,
14101 ((bp
->b_bcount
!= 0) ? bp
: NULL
), uscmd
->uscsi_cdblen
,
14102 sizeof (struct scsi_arq_status
), 0,
14103 (un
->un_pkt_flags
& ~PKT_DMA_PARTIAL
),
14104 sdrunout
, (caddr_t
)un
);
14107 if (pktp
== NULL
) {
14110 * Set the driver state to RWAIT to indicate the driver
14111 * is waiting on resource allocations. The driver will not
14112 * suspend, pm_suspend, or detatch while the state is RWAIT.
14114 New_state(un
, SD_STATE_RWAIT
);
14116 SD_ERROR(SD_LOG_IO_CORE
, un
,
14117 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp
);
14119 if ((bp
->b_flags
& B_ERROR
) != 0) {
14120 return (SD_PKT_ALLOC_FAILURE_NO_DMA
);
14122 return (SD_PKT_ALLOC_FAILURE
);
14126 * We do not do DMA breakup for USCSI commands, so return failure
14127 * here if all the needed DMA resources were not allocated.
14129 if ((un
->un_pkt_flags
& PKT_DMA_PARTIAL
) &&
14130 (bp
->b_bcount
!= 0) && (pktp
->pkt_resid
!= 0)) {
14131 scsi_destroy_pkt(pktp
);
14132 SD_ERROR(SD_LOG_IO_CORE
, un
, "sd_initpkt_for_uscsi: "
14133 "No partial DMA for USCSI. exit: buf:0x%p\n", bp
);
14134 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL
);
14137 /* Init the cdb from the given uscsi struct */
14138 (void) scsi_setup_cdb((union scsi_cdb
*)pktp
->pkt_cdbp
,
14139 uscmd
->uscsi_cdb
[0], 0, 0, 0);
14141 SD_FILL_SCSI1_LUN(un
, pktp
);
14144 * Set up the optional USCSI flags. See the uscsi (7I) man page
14145 * for listing of the supported flags.
14148 if (uscmd
->uscsi_flags
& USCSI_SILENT
) {
14149 flags
|= FLAG_SILENT
;
14152 if (uscmd
->uscsi_flags
& USCSI_DIAGNOSE
) {
14153 flags
|= FLAG_DIAGNOSE
;
14156 if (uscmd
->uscsi_flags
& USCSI_ISOLATE
) {
14157 flags
|= FLAG_ISOLATE
;
14160 if (un
->un_f_is_fibre
== FALSE
) {
14161 if (uscmd
->uscsi_flags
& USCSI_RENEGOT
) {
14162 flags
|= FLAG_RENEGOTIATE_WIDE_SYNC
;
14167 * Set the pkt flags here so we save time later.
14168 * Note: These flags are NOT in the uscsi man page!!!
14170 if (uscmd
->uscsi_flags
& USCSI_HEAD
) {
14171 flags
|= FLAG_HEAD
;
14174 if (uscmd
->uscsi_flags
& USCSI_NOINTR
) {
14175 flags
|= FLAG_NOINTR
;
14179 * For tagged queueing, things get a bit complicated.
14180 * Check first for head of queue and last for ordered queue.
14181 * If neither head nor order, use the default driver tag flags.
14183 if ((uscmd
->uscsi_flags
& USCSI_NOTAG
) == 0) {
14184 if (uscmd
->uscsi_flags
& USCSI_HTAG
) {
14185 flags
|= FLAG_HTAG
;
14186 } else if (uscmd
->uscsi_flags
& USCSI_OTAG
) {
14187 flags
|= FLAG_OTAG
;
14189 flags
|= un
->un_tagflags
& FLAG_TAGMASK
;
14193 if (uscmd
->uscsi_flags
& USCSI_NODISCON
) {
14194 flags
= (flags
& ~FLAG_TAGMASK
) | FLAG_NODISCON
;
14197 pktp
->pkt_flags
= flags
;
14199 /* Transfer uscsi information to scsi_pkt */
14200 (void) scsi_uscsi_pktinit(uscmd
, pktp
);
14202 /* Copy the caller's CDB into the pkt... */
14203 bcopy(uscmd
->uscsi_cdb
, pktp
->pkt_cdbp
, uscmd
->uscsi_cdblen
);
14205 if (uscmd
->uscsi_timeout
== 0) {
14206 pktp
->pkt_time
= un
->un_uscsi_timeout
;
14208 pktp
->pkt_time
= uscmd
->uscsi_timeout
;
14211 /* need it later to identify USCSI request in sdintr */
14212 xp
->xb_pkt_flags
|= SD_XB_USCSICMD
;
14214 xp
->xb_sense_resid
= uscmd
->uscsi_rqresid
;
14216 pktp
->pkt_private
= bp
;
14217 pktp
->pkt_comp
= sdintr
;
14220 SD_TRACE(SD_LOG_IO_CORE
, un
,
14221 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp
);
14223 return (SD_PKT_ALLOC_SUCCESS
);
14228 * Function: sd_destroypkt_for_uscsi
14230 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi
14231 * IOs.. Also saves relevant info into the associated uscsi_cmd
14234 * Context: May be called under interrupt context
14238 sd_destroypkt_for_uscsi(struct buf
*bp
)
14240 struct uscsi_cmd
*uscmd
;
14241 struct sd_xbuf
*xp
;
14242 struct scsi_pkt
*pktp
;
14244 struct sd_uscsi_info
*suip
;
14246 ASSERT(bp
!= NULL
);
14247 xp
= SD_GET_XBUF(bp
);
14248 ASSERT(xp
!= NULL
);
14249 un
= SD_GET_UN(bp
);
14250 ASSERT(un
!= NULL
);
14251 ASSERT(!mutex_owned(SD_MUTEX(un
)));
14252 pktp
= SD_GET_PKTP(bp
);
14253 ASSERT(pktp
!= NULL
);
14255 SD_TRACE(SD_LOG_IO_CORE
, un
,
14256 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp
);
14258 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14259 uscmd
= (struct uscsi_cmd
*)xp
->xb_pktinfo
;
14260 ASSERT(uscmd
!= NULL
);
14262 /* Save the status and the residual into the uscsi_cmd struct */
14263 uscmd
->uscsi_status
= ((*(pktp
)->pkt_scbp
) & STATUS_MASK
);
14264 uscmd
->uscsi_resid
= bp
->b_resid
;
14266 /* Transfer scsi_pkt information to uscsi */
14267 (void) scsi_uscsi_pktfini(pktp
, uscmd
);
14270 * If enabled, copy any saved sense data into the area specified
14271 * by the uscsi command.
14273 if (((uscmd
->uscsi_flags
& USCSI_RQENABLE
) != 0) &&
14274 (uscmd
->uscsi_rqlen
!= 0) && (uscmd
->uscsi_rqbuf
!= NULL
)) {
14276 * Note: uscmd->uscsi_rqbuf should always point to a buffer
14277 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd())
14279 uscmd
->uscsi_rqstatus
= xp
->xb_sense_status
;
14280 uscmd
->uscsi_rqresid
= xp
->xb_sense_resid
;
14281 if (uscmd
->uscsi_rqlen
> SENSE_LENGTH
) {
14282 bcopy(xp
->xb_sense_data
, uscmd
->uscsi_rqbuf
,
14285 bcopy(xp
->xb_sense_data
, uscmd
->uscsi_rqbuf
,
14290 * The following assignments are for SCSI FMA.
14292 ASSERT(xp
->xb_private
!= NULL
);
14293 suip
= (struct sd_uscsi_info
*)xp
->xb_private
;
14294 suip
->ui_pkt_reason
= pktp
->pkt_reason
;
14295 suip
->ui_pkt_state
= pktp
->pkt_state
;
14296 suip
->ui_pkt_statistics
= pktp
->pkt_statistics
;
14297 suip
->ui_lba
= (uint64_t)SD_GET_BLKNO(bp
);
14299 /* We are done with the scsi_pkt; free it now */
14300 ASSERT(SD_GET_PKTP(bp
) != NULL
);
14301 scsi_destroy_pkt(SD_GET_PKTP(bp
));
14303 SD_TRACE(SD_LOG_IO_CORE
, un
,
14304 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp
);
14309 * Function: sd_bioclone_alloc
14311 * Description: Allocate a buf(9S) and init it as per the given buf
14312 * and the various arguments. The associated sd_xbuf
14313 * struct is (nearly) duplicated. The struct buf *bp
14314 * argument is saved in new_xp->xb_private.
14316 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14317 * datalen - size of data area for the shadow bp
14318 * blkno - starting LBA
14319 * func - function pointer for b_iodone in the shadow buf. (May
14320 * be NULL if none.)
14322 * Return Code: Pointer to allocates buf(9S) struct
14324 * Context: Can sleep.
14327 static struct buf
*
14328 sd_bioclone_alloc(struct buf
*bp
, size_t datalen
,
14329 daddr_t blkno
, int (*func
)(struct buf
*))
14332 struct sd_xbuf
*xp
;
14333 struct sd_xbuf
*new_xp
;
14334 struct buf
*new_bp
;
14336 ASSERT(bp
!= NULL
);
14337 xp
= SD_GET_XBUF(bp
);
14338 ASSERT(xp
!= NULL
);
14339 un
= SD_GET_UN(bp
);
14340 ASSERT(un
!= NULL
);
14341 ASSERT(!mutex_owned(SD_MUTEX(un
)));
14343 new_bp
= bioclone(bp
, 0, datalen
, SD_GET_DEV(un
), blkno
, func
,
14346 new_bp
->b_lblkno
= blkno
;
14349 * Allocate an xbuf for the shadow bp and copy the contents of the
14350 * original xbuf into it.
14352 new_xp
= kmem_alloc(sizeof (struct sd_xbuf
), KM_SLEEP
);
14353 bcopy(xp
, new_xp
, sizeof (struct sd_xbuf
));
14356 * The given bp is automatically saved in the xb_private member
14357 * of the new xbuf. Callers are allowed to depend on this.
14359 new_xp
->xb_private
= bp
;
14361 new_bp
->b_private
= new_xp
;
14367 * Function: sd_shadow_buf_alloc
14369 * Description: Allocate a buf(9S) and init it as per the given buf
14370 * and the various arguments. The associated sd_xbuf
14371 * struct is (nearly) duplicated. The struct buf *bp
14372 * argument is saved in new_xp->xb_private.
14374 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14375 * datalen - size of data area for the shadow bp
14376 * bflags - B_READ or B_WRITE (pseudo flag)
14377 * blkno - starting LBA
14378 * func - function pointer for b_iodone in the shadow buf. (May
14379 * be NULL if none.)
14381 * Return Code: Pointer to allocates buf(9S) struct
14383 * Context: Can sleep.
14386 static struct buf
*
14387 sd_shadow_buf_alloc(struct buf
*bp
, size_t datalen
, uint_t bflags
,
14388 daddr_t blkno
, int (*func
)(struct buf
*))
14391 struct sd_xbuf
*xp
;
14392 struct sd_xbuf
*new_xp
;
14393 struct buf
*new_bp
;
14395 ASSERT(bp
!= NULL
);
14396 xp
= SD_GET_XBUF(bp
);
14397 ASSERT(xp
!= NULL
);
14398 un
= SD_GET_UN(bp
);
14399 ASSERT(un
!= NULL
);
14400 ASSERT(!mutex_owned(SD_MUTEX(un
)));
14402 if (bp
->b_flags
& (B_PAGEIO
| B_PHYS
)) {
14406 bflags
&= (B_READ
| B_WRITE
);
14407 #if defined(__i386) || defined(__amd64)
14408 new_bp
= getrbuf(KM_SLEEP
);
14409 new_bp
->b_un
.b_addr
= kmem_zalloc(datalen
, KM_SLEEP
);
14410 new_bp
->b_bcount
= datalen
;
14411 new_bp
->b_flags
= bflags
|
14412 (bp
->b_flags
& ~(B_PAGEIO
| B_PHYS
| B_REMAPPED
| B_SHADOW
));
14414 new_bp
= scsi_alloc_consistent_buf(SD_ADDRESS(un
), NULL
,
14415 datalen
, bflags
, SLEEP_FUNC
, NULL
);
14417 new_bp
->av_forw
= NULL
;
14418 new_bp
->av_back
= NULL
;
14419 new_bp
->b_dev
= bp
->b_dev
;
14420 new_bp
->b_blkno
= blkno
;
14421 new_bp
->b_iodone
= func
;
14422 new_bp
->b_edev
= bp
->b_edev
;
14423 new_bp
->b_resid
= 0;
14425 /* We need to preserve the B_FAILFAST flag */
14426 if (bp
->b_flags
& B_FAILFAST
) {
14427 new_bp
->b_flags
|= B_FAILFAST
;
14431 * Allocate an xbuf for the shadow bp and copy the contents of the
14432 * original xbuf into it.
14434 new_xp
= kmem_alloc(sizeof (struct sd_xbuf
), KM_SLEEP
);
14435 bcopy(xp
, new_xp
, sizeof (struct sd_xbuf
));
14437 /* Need later to copy data between the shadow buf & original buf! */
14438 new_xp
->xb_pkt_flags
|= PKT_CONSISTENT
;
14441 * The given bp is automatically saved in the xb_private member
14442 * of the new xbuf. Callers are allowed to depend on this.
14444 new_xp
->xb_private
= bp
;
14446 new_bp
->b_private
= new_xp
;
14452 * Function: sd_bioclone_free
14454 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations
14455 * in the larger than partition operation.
14457 * Context: May be called under interrupt context
14461 sd_bioclone_free(struct buf
*bp
)
14463 struct sd_xbuf
*xp
;
14465 ASSERT(bp
!= NULL
);
14466 xp
= SD_GET_XBUF(bp
);
14467 ASSERT(xp
!= NULL
);
14470 * Call bp_mapout() before freeing the buf, in case a lower
14471 * layer or HBA had done a bp_mapin(). we must do this here
14472 * as we are the "originator" of the shadow buf.
14477 * Null out b_iodone before freeing the bp, to ensure that the driver
14478 * never gets confused by a stale value in this field. (Just a little
14479 * extra defensiveness here.)
14481 bp
->b_iodone
= NULL
;
14485 kmem_free(xp
, sizeof (struct sd_xbuf
));
14489 * Function: sd_shadow_buf_free
14491 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations.
14493 * Context: May be called under interrupt context
14497 sd_shadow_buf_free(struct buf
*bp
)
14499 struct sd_xbuf
*xp
;
14501 ASSERT(bp
!= NULL
);
14502 xp
= SD_GET_XBUF(bp
);
14503 ASSERT(xp
!= NULL
);
14505 #if defined(__sparc)
14507 * Call bp_mapout() before freeing the buf, in case a lower
14508 * layer or HBA had done a bp_mapin(). we must do this here
14509 * as we are the "originator" of the shadow buf.
14515 * Null out b_iodone before freeing the bp, to ensure that the driver
14516 * never gets confused by a stale value in this field. (Just a little
14517 * extra defensiveness here.)
14519 bp
->b_iodone
= NULL
;
14521 #if defined(__i386) || defined(__amd64)
14522 kmem_free(bp
->b_un
.b_addr
, bp
->b_bcount
);
14525 scsi_free_consistent_buf(bp
);
14528 kmem_free(xp
, sizeof (struct sd_xbuf
));
14533 * Function: sd_print_transport_rejected_message
14535 * Description: This implements the ludicrously complex rules for printing
14536 * a "transport rejected" message. This is to address the
14537 * specific problem of having a flood of this error message
14538 * produced when a failover occurs.
14544 sd_print_transport_rejected_message(struct sd_lun
*un
, struct sd_xbuf
*xp
,
14547 ASSERT(un
!= NULL
);
14548 ASSERT(mutex_owned(SD_MUTEX(un
)));
14549 ASSERT(xp
!= NULL
);
14552 * Print the "transport rejected" message under the following
14555 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set
14556 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR.
14557 * - If the error code IS a TRAN_FATAL_ERROR, then the message is
14558 * printed the FIRST time a TRAN_FATAL_ERROR is returned from
14559 * scsi_transport(9F) (which indicates that the target might have
14560 * gone off-line). This uses the un->un_tran_fatal_count
14561 * count, which is incremented whenever a TRAN_FATAL_ERROR is
14562 * received, and reset to zero whenver a TRAN_ACCEPT is returned
14563 * from scsi_transport().
14565 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of
14566 * the preceeding cases in order for the message to be printed.
14568 if (((xp
->xb_pktp
->pkt_flags
& FLAG_SILENT
) == 0) &&
14569 (SD_FM_LOG(un
) == SD_FM_LOG_NSUP
)) {
14570 if ((sd_level_mask
& SD_LOGMASK_DIAG
) ||
14571 (code
!= TRAN_FATAL_ERROR
) ||
14572 (un
->un_tran_fatal_count
== 1)) {
14575 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
14576 "transport rejected bad packet\n");
14578 case TRAN_FATAL_ERROR
:
14579 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
14580 "transport rejected fatal error\n");
14583 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
14584 "transport rejected (%d)\n", code
);
14593 * Function: sd_add_buf_to_waitq
14595 * Description: Add the given buf(9S) struct to the wait queue for the
14596 * instance. If sorting is enabled, then the buf is added
14597 * to the queue via an elevator sort algorithm (a la
14598 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key.
14599 * If sorting is not enabled, then the buf is just added
14600 * to the end of the wait queue.
14602 * Return Code: void
14604 * Context: Does not sleep/block, therefore technically can be called
14605 * from any context. However if sorting is enabled then the
14606 * execution time is indeterminate, and may take long if
14607 * the wait queue grows large.
14611 sd_add_buf_to_waitq(struct sd_lun
*un
, struct buf
*bp
)
14615 ASSERT(bp
!= NULL
);
14616 ASSERT(un
!= NULL
);
14617 ASSERT(mutex_owned(SD_MUTEX(un
)));
14619 /* If the queue is empty, add the buf as the only entry & return. */
14620 if (un
->un_waitq_headp
== NULL
) {
14621 ASSERT(un
->un_waitq_tailp
== NULL
);
14622 un
->un_waitq_headp
= un
->un_waitq_tailp
= bp
;
14623 bp
->av_forw
= NULL
;
14627 ASSERT(un
->un_waitq_tailp
!= NULL
);
14630 * If sorting is disabled, just add the buf to the tail end of
14631 * the wait queue and return.
14633 if (un
->un_f_disksort_disabled
|| un
->un_f_enable_rmw
) {
14634 un
->un_waitq_tailp
->av_forw
= bp
;
14635 un
->un_waitq_tailp
= bp
;
14636 bp
->av_forw
= NULL
;
14641 * Sort thru the list of requests currently on the wait queue
14642 * and add the new buf request at the appropriate position.
14644 * The un->un_waitq_headp is an activity chain pointer on which
14645 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The
14646 * first queue holds those requests which are positioned after
14647 * the current SD_GET_BLKNO() (in the first request); the second holds
14648 * requests which came in after their SD_GET_BLKNO() number was passed.
14649 * Thus we implement a one way scan, retracting after reaching
14650 * the end of the drive to the first request on the second
14651 * queue, at which time it becomes the first queue.
14652 * A one-way scan is natural because of the way UNIX read-ahead
14653 * blocks are allocated.
14655 * If we lie after the first request, then we must locate the
14656 * second request list and add ourselves to it.
14658 ap
= un
->un_waitq_headp
;
14659 if (SD_GET_BLKNO(bp
) < SD_GET_BLKNO(ap
)) {
14660 while (ap
->av_forw
!= NULL
) {
14662 * Look for an "inversion" in the (normally
14663 * ascending) block numbers. This indicates
14664 * the start of the second request list.
14666 if (SD_GET_BLKNO(ap
->av_forw
) < SD_GET_BLKNO(ap
)) {
14668 * Search the second request list for the
14669 * first request at a larger block number.
14670 * We go before that; however if there is
14671 * no such request, we go at the end.
14674 if (SD_GET_BLKNO(bp
) <
14675 SD_GET_BLKNO(ap
->av_forw
)) {
14679 } while (ap
->av_forw
!= NULL
);
14680 goto insert
; /* after last */
14686 * No inversions... we will go after the last, and
14687 * be the first request in the second request list.
14693 * Request is at/after the current request...
14694 * sort in the first request list.
14696 while (ap
->av_forw
!= NULL
) {
14698 * We want to go after the current request (1) if
14699 * there is an inversion after it (i.e. it is the end
14700 * of the first request list), or (2) if the next
14701 * request is a larger block no. than our request.
14703 if ((SD_GET_BLKNO(ap
->av_forw
) < SD_GET_BLKNO(ap
)) ||
14704 (SD_GET_BLKNO(bp
) < SD_GET_BLKNO(ap
->av_forw
))) {
14711 * Neither a second list nor a larger request, therefore
14712 * we go at the end of the first list (which is the same
14713 * as the end of the whole schebang).
14716 bp
->av_forw
= ap
->av_forw
;
14720 * If we inserted onto the tail end of the waitq, make sure the
14721 * tail pointer is updated.
14723 if (ap
== un
->un_waitq_tailp
) {
14724 un
->un_waitq_tailp
= bp
;
14730 * Function: sd_start_cmds
14732 * Description: Remove and transport cmds from the driver queues.
14734 * Arguments: un - pointer to the unit (soft state) struct for the target.
14736 * immed_bp - ptr to a buf to be transported immediately. Only
14737 * the immed_bp is transported; bufs on the waitq are not
14738 * processed and the un_retry_bp is not checked. If immed_bp is
14739 * NULL, then normal queue processing is performed.
14741 * Context: May be called from kernel thread context, interrupt context,
14742 * or runout callback context. This function may not block or
14743 * call routines that block.
14747 sd_start_cmds(struct sd_lun
*un
, struct buf
*immed_bp
)
14749 struct sd_xbuf
*xp
;
14751 void (*statp
)(kstat_io_t
*);
14752 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14753 void (*saved_statp
)(kstat_io_t
*);
14756 struct sd_fm_internal
*sfip
= NULL
;
14758 ASSERT(un
!= NULL
);
14759 ASSERT(mutex_owned(SD_MUTEX(un
)));
14760 ASSERT(un
->un_ncmds_in_transport
>= 0);
14761 ASSERT(un
->un_throttle
>= 0);
14763 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_start_cmds: entry\n");
14766 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14767 saved_statp
= NULL
;
14771 * If we are syncing or dumping, fail the command to
14772 * avoid recursively calling back into scsi_transport().
14773 * The dump I/O itself uses a separate code path so this
14774 * only prevents non-dump I/O from being sent while dumping.
14775 * File system sync takes place before dumping begins.
14776 * During panic, filesystem I/O is allowed provided
14777 * un_in_callback is <= 1. This is to prevent recursion
14778 * such as sd_start_cmds -> scsi_transport -> sdintr ->
14779 * sd_start_cmds and so on. See panic.c for more information
14780 * about the states the system can be in during panic.
14782 if ((un
->un_state
== SD_STATE_DUMPING
) ||
14783 (ddi_in_panic() && (un
->un_in_callback
> 1))) {
14784 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
14785 "sd_start_cmds: panicking\n");
14789 if ((bp
= immed_bp
) != NULL
) {
14791 * We have a bp that must be transported immediately.
14792 * It's OK to transport the immed_bp here without doing
14793 * the throttle limit check because the immed_bp is
14794 * always used in a retry/recovery case. This means
14795 * that we know we are not at the throttle limit by
14796 * virtue of the fact that to get here we must have
14797 * already gotten a command back via sdintr(). This also
14798 * relies on (1) the command on un_retry_bp preventing
14799 * further commands from the waitq from being issued;
14800 * and (2) the code in sd_retry_command checking the
14801 * throttle limit before issuing a delayed or immediate
14802 * retry. This holds even if the throttle limit is
14803 * currently ratcheted down from its maximum value.
14805 statp
= kstat_runq_enter
;
14806 if (bp
== un
->un_retry_bp
) {
14807 ASSERT((un
->un_retry_statp
== NULL
) ||
14808 (un
->un_retry_statp
== kstat_waitq_enter
) ||
14809 (un
->un_retry_statp
==
14810 kstat_runq_back_to_waitq
));
14812 * If the waitq kstat was incremented when
14813 * sd_set_retry_bp() queued this bp for a retry,
14814 * then we must set up statp so that the waitq
14815 * count will get decremented correctly below.
14816 * Also we must clear un->un_retry_statp to
14817 * ensure that we do not act on a stale value
14820 if ((un
->un_retry_statp
== kstat_waitq_enter
) ||
14821 (un
->un_retry_statp
==
14822 kstat_runq_back_to_waitq
)) {
14823 statp
= kstat_waitq_to_runq
;
14825 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14826 saved_statp
= un
->un_retry_statp
;
14828 un
->un_retry_statp
= NULL
;
14830 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
,
14831 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14832 "un_throttle:%d un_ncmds_in_transport:%d\n",
14833 un
, un
->un_retry_bp
, un
->un_throttle
,
14834 un
->un_ncmds_in_transport
);
14836 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_start_cmds: "
14837 "processing priority bp:0x%p\n", bp
);
14840 } else if ((bp
= un
->un_waitq_headp
) != NULL
) {
14842 * A command on the waitq is ready to go, but do not
14845 * (1) the throttle limit has been reached, or
14846 * (2) a retry is pending, or
14847 * (3) a START_STOP_UNIT callback pending, or
14848 * (4) a callback for a SD_PATH_DIRECT_PRIORITY
14849 * command is pending.
14851 * For all of these conditions, IO processing will
14852 * restart after the condition is cleared.
14854 if (un
->un_ncmds_in_transport
>= un
->un_throttle
) {
14855 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
14856 "sd_start_cmds: exiting, "
14857 "throttle limit reached!\n");
14860 if (un
->un_retry_bp
!= NULL
) {
14861 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
14862 "sd_start_cmds: exiting, retry pending!\n");
14865 if (un
->un_startstop_timeid
!= NULL
) {
14866 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
14867 "sd_start_cmds: exiting, "
14868 "START_STOP pending!\n");
14871 if (un
->un_direct_priority_timeid
!= NULL
) {
14872 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
14873 "sd_start_cmds: exiting, "
14874 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
14878 /* Dequeue the command */
14879 un
->un_waitq_headp
= bp
->av_forw
;
14880 if (un
->un_waitq_headp
== NULL
) {
14881 un
->un_waitq_tailp
= NULL
;
14883 bp
->av_forw
= NULL
;
14884 statp
= kstat_waitq_to_runq
;
14885 SD_TRACE(SD_LOG_IO_CORE
, un
,
14886 "sd_start_cmds: processing waitq bp:0x%p\n", bp
);
14889 /* No work to do so bail out now */
14890 SD_TRACE(SD_LOG_IO_CORE
, un
,
14891 "sd_start_cmds: no more work, exiting!\n");
14896 * Reset the state to normal. This is the mechanism by which
14897 * the state transitions from either SD_STATE_RWAIT or
14898 * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14899 * If state is SD_STATE_PM_CHANGING then this command is
14900 * part of the device power control and the state must
14901 * not be put back to normal. Doing so would would
14902 * allow new commands to proceed when they shouldn't,
14903 * the device may be going off.
14905 if ((un
->un_state
!= SD_STATE_SUSPENDED
) &&
14906 (un
->un_state
!= SD_STATE_PM_CHANGING
)) {
14907 New_state(un
, SD_STATE_NORMAL
);
14910 xp
= SD_GET_XBUF(bp
);
14911 ASSERT(xp
!= NULL
);
14913 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14915 * Allocate the scsi_pkt if we need one, or attach DMA
14916 * resources if we have a scsi_pkt that needs them. The
14917 * latter should only occur for commands that are being
14920 if ((xp
->xb_pktp
== NULL
) ||
14921 ((xp
->xb_pkt_flags
& SD_XB_DMA_FREED
) != 0)) {
14923 if (xp
->xb_pktp
== NULL
) {
14926 * There is no scsi_pkt allocated for this buf. Call
14927 * the initpkt function to allocate & init one.
14929 * The scsi_init_pkt runout callback functionality is
14930 * implemented as follows:
14932 * 1) The initpkt function always calls
14933 * scsi_init_pkt(9F) with sdrunout specified as the
14934 * callback routine.
14935 * 2) A successful packet allocation is initialized and
14936 * the I/O is transported.
14937 * 3) The I/O associated with an allocation resource
14938 * failure is left on its queue to be retried via
14939 * runout or the next I/O.
14940 * 4) The I/O associated with a DMA error is removed
14941 * from the queue and failed with EIO. Processing of
14942 * the transport queues is also halted to be
14943 * restarted via runout or the next I/O.
14944 * 5) The I/O associated with a CDB size or packet
14945 * size error is removed from the queue and failed
14946 * with EIO. Processing of the transport queues is
14949 * Note: there is no interface for canceling a runout
14950 * callback. To prevent the driver from detaching or
14951 * suspending while a runout is pending the driver
14952 * state is set to SD_STATE_RWAIT
14954 * Note: using the scsi_init_pkt callback facility can
14955 * result in an I/O request persisting at the head of
14956 * the list which cannot be satisfied even after
14957 * multiple retries. In the future the driver may
14958 * implement some kind of maximum runout count before
14961 * Note: the use of funcp below may seem superfluous,
14962 * but it helps warlock figure out the correct
14963 * initpkt function calls (see [s]sd.wlcmd).
14965 struct scsi_pkt
*pktp
;
14966 int (*funcp
)(struct buf
*bp
, struct scsi_pkt
**pktp
);
14968 ASSERT(bp
!= un
->un_rqs_bp
);
14970 funcp
= sd_initpkt_map
[xp
->xb_chain_iostart
];
14971 switch ((*funcp
)(bp
, &pktp
)) {
14972 case SD_PKT_ALLOC_SUCCESS
:
14973 xp
->xb_pktp
= pktp
;
14974 SD_TRACE(SD_LOG_IO_CORE
, un
,
14975 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
14979 case SD_PKT_ALLOC_FAILURE
:
14981 * Temporary (hopefully) resource depletion.
14982 * Since retries and RQS commands always have a
14983 * scsi_pkt allocated, these cases should never
14984 * get here. So the only cases this needs to
14985 * handle is a bp from the waitq (which we put
14986 * back onto the waitq for sdrunout), or a bp
14987 * sent as an immed_bp (which we just fail).
14989 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
14990 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
14992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14994 if (bp
== immed_bp
) {
14996 * If SD_XB_DMA_FREED is clear, then
14997 * this is a failure to allocate a
14998 * scsi_pkt, and we must fail the
15001 if ((xp
->xb_pkt_flags
&
15002 SD_XB_DMA_FREED
) == 0) {
15007 * If this immediate command is NOT our
15008 * un_retry_bp, then we must fail it.
15010 if (bp
!= un
->un_retry_bp
) {
15015 * We get here if this cmd is our
15016 * un_retry_bp that was DMAFREED, but
15017 * scsi_init_pkt() failed to reallocate
15018 * DMA resources when we attempted to
15019 * retry it. This can happen when an
15020 * mpxio failover is in progress, but
15021 * we don't want to just fail the
15022 * command in this case.
15024 * Use timeout(9F) to restart it after
15025 * a 100ms delay. We don't want to
15026 * let sdrunout() restart it, because
15027 * sdrunout() is just supposed to start
15028 * commands that are sitting on the
15029 * wait queue. The un_retry_bp stays
15030 * set until the command completes, but
15031 * sdrunout can be called many times
15032 * before that happens. Since sdrunout
15033 * cannot tell if the un_retry_bp is
15034 * already in the transport, it could
15035 * end up calling scsi_transport() for
15036 * the un_retry_bp multiple times.
15038 * Also: don't schedule the callback
15039 * if some other callback is already
15042 if (un
->un_retry_statp
== NULL
) {
15044 * restore the kstat pointer to
15045 * keep kstat counts coherent
15046 * when we do retry the command.
15048 un
->un_retry_statp
=
15052 if ((un
->un_startstop_timeid
== NULL
) &&
15053 (un
->un_retry_timeid
== NULL
) &&
15054 (un
->un_direct_priority_timeid
==
15057 un
->un_retry_timeid
=
15059 sd_start_retry_command
,
15060 un
, SD_RESTART_TIMEOUT
);
15066 if (bp
== immed_bp
) {
15067 break; /* Just fail the command */
15071 /* Add the buf back to the head of the waitq */
15072 bp
->av_forw
= un
->un_waitq_headp
;
15073 un
->un_waitq_headp
= bp
;
15074 if (un
->un_waitq_tailp
== NULL
) {
15075 un
->un_waitq_tailp
= bp
;
15079 case SD_PKT_ALLOC_FAILURE_NO_DMA
:
15081 * HBA DMA resource failure. Fail the command
15082 * and continue processing of the queues.
15084 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15086 "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15089 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL
:
15091 * Note:x86: Partial DMA mapping not supported
15092 * for USCSI commands, and all the needed DMA
15093 * resources were not allocated.
15095 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15097 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15100 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
:
15102 * Note:x86: Request cannot fit into CDB based
15105 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15107 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15111 /* Should NEVER get here! */
15112 panic("scsi_initpkt error");
15117 * Fatal error in allocating a scsi_pkt for this buf.
15118 * Update kstats & return the buf with an error code.
15119 * We must use sd_return_failed_command_no_restart() to
15120 * avoid a recursive call back into sd_start_cmds().
15121 * However this also means that we must keep processing
15122 * the waitq here in order to avoid stalling.
15124 if (statp
== kstat_waitq_to_runq
) {
15125 SD_UPDATE_KSTATS(un
, kstat_waitq_exit
, bp
);
15127 sd_return_failed_command_no_restart(un
, bp
, EIO
);
15128 if (bp
== immed_bp
) {
15129 /* immed_bp is gone by now, so clear this */
15135 if (bp
== immed_bp
) {
15136 /* goto the head of the class.... */
15137 xp
->xb_pktp
->pkt_flags
|= FLAG_HEAD
;
15140 un
->un_ncmds_in_transport
++;
15141 SD_UPDATE_KSTATS(un
, statp
, bp
);
15144 * Call scsi_transport() to send the command to the target.
15145 * According to SCSA architecture, we must drop the mutex here
15146 * before calling scsi_transport() in order to avoid deadlock.
15147 * Note that the scsi_pkt's completion routine can be executed
15148 * (from interrupt context) even before the call to
15149 * scsi_transport() returns.
15151 SD_TRACE(SD_LOG_IO_CORE
, un
,
15152 "sd_start_cmds: calling scsi_transport()\n");
15153 DTRACE_PROBE1(scsi__transport__dispatch
, struct buf
*, bp
);
15155 mutex_exit(SD_MUTEX(un
));
15156 rval
= scsi_transport(xp
->xb_pktp
);
15157 mutex_enter(SD_MUTEX(un
));
15159 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15160 "sd_start_cmds: scsi_transport() returned %d\n", rval
);
15164 /* Clear this with every pkt accepted by the HBA */
15165 un
->un_tran_fatal_count
= 0;
15166 break; /* Success; try the next cmd (if any) */
15169 un
->un_ncmds_in_transport
--;
15170 ASSERT(un
->un_ncmds_in_transport
>= 0);
15173 * Don't retry request sense, the sense data
15174 * is lost when another request is sent.
15175 * Free up the rqs buf and retry
15176 * the original failed cmd. Update kstat.
15178 if (bp
== un
->un_rqs_bp
) {
15179 SD_UPDATE_KSTATS(un
, kstat_runq_exit
, bp
);
15180 bp
= sd_mark_rqs_idle(un
, xp
);
15181 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
,
15182 NULL
, NULL
, EIO
, un
->un_busy_timeout
/ 500,
15183 kstat_waitq_enter
);
15187 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15189 * Free the DMA resources for the scsi_pkt. This will
15190 * allow mpxio to select another path the next time
15191 * we call scsi_transport() with this scsi_pkt.
15192 * See sdintr() for the rationalization behind this.
15194 if ((un
->un_f_is_fibre
== TRUE
) &&
15195 ((xp
->xb_pkt_flags
& SD_XB_USCSICMD
) == 0) &&
15196 ((xp
->xb_pktp
->pkt_flags
& FLAG_SENSING
) == 0)) {
15197 scsi_dmafree(xp
->xb_pktp
);
15198 xp
->xb_pkt_flags
|= SD_XB_DMA_FREED
;
15202 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp
))) {
15204 * Commands that are SD_PATH_DIRECT_PRIORITY
15205 * are for error recovery situations. These do
15206 * not use the normal command waitq, so if they
15207 * get a TRAN_BUSY we cannot put them back onto
15208 * the waitq for later retry. One possible
15209 * problem is that there could already be some
15210 * other command on un_retry_bp that is waiting
15211 * for this one to complete, so we would be
15212 * deadlocked if we put this command back onto
15213 * the waitq for later retry (since un_retry_bp
15214 * must complete before the driver gets back to
15215 * commands on the waitq).
15217 * To avoid deadlock we must schedule a callback
15218 * that will restart this command after a set
15219 * interval. This should keep retrying for as
15220 * long as the underlying transport keeps
15221 * returning TRAN_BUSY (just like for other
15222 * commands). Use the same timeout interval as
15223 * for the ordinary TRAN_BUSY retry.
15225 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15226 "sd_start_cmds: scsi_transport() returned "
15227 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
15229 SD_UPDATE_KSTATS(un
, kstat_runq_exit
, bp
);
15230 un
->un_direct_priority_timeid
=
15231 timeout(sd_start_direct_priority_command
,
15232 bp
, un
->un_busy_timeout
/ 500);
15238 * For TRAN_BUSY, we want to reduce the throttle value,
15239 * unless we are retrying a command.
15241 if (bp
!= un
->un_retry_bp
) {
15242 sd_reduce_throttle(un
, SD_THROTTLE_TRAN_BUSY
);
15246 * Set up the bp to be tried again 10 ms later.
15247 * Note:x86: Is there a timeout value in the sd_lun
15248 * for this condition?
15250 sd_set_retry_bp(un
, bp
, un
->un_busy_timeout
/ 500,
15251 kstat_runq_back_to_waitq
);
15254 case TRAN_FATAL_ERROR
:
15255 un
->un_tran_fatal_count
++;
15260 un
->un_ncmds_in_transport
--;
15261 ASSERT(un
->un_ncmds_in_transport
>= 0);
15264 * If this is our REQUEST SENSE command with a
15265 * transport error, we must get back the pointers
15266 * to the original buf, and mark the REQUEST
15267 * SENSE command as "available".
15269 if (bp
== un
->un_rqs_bp
) {
15270 bp
= sd_mark_rqs_idle(un
, xp
);
15271 xp
= SD_GET_XBUF(bp
);
15274 * Legacy behavior: do not update transport
15275 * error count for request sense commands.
15277 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
15280 SD_UPDATE_KSTATS(un
, kstat_runq_exit
, bp
);
15281 sd_print_transport_rejected_message(un
, xp
, rval
);
15284 * This command will be terminated by SD driver due
15285 * to a fatal transport error. We should post
15286 * ereport.io.scsi.cmd.disk.tran with driver-assessment
15287 * of "fail" for any command to indicate this
15290 if (xp
->xb_ena
> 0) {
15291 ASSERT(un
->un_fm_private
!= NULL
);
15292 sfip
= un
->un_fm_private
;
15293 sfip
->fm_ssc
.ssc_flags
|= SSC_FLAGS_TRAN_ABORT
;
15294 sd_ssc_extract_info(&sfip
->fm_ssc
, un
,
15295 xp
->xb_pktp
, bp
, xp
);
15296 sd_ssc_post(&sfip
->fm_ssc
, SD_FM_DRV_FATAL
);
15300 * We must use sd_return_failed_command_no_restart() to
15301 * avoid a recursive call back into sd_start_cmds().
15302 * However this also means that we must keep processing
15303 * the waitq here in order to avoid stalling.
15305 sd_return_failed_command_no_restart(un
, bp
, EIO
);
15308 * Notify any threads waiting in sd_ddi_suspend() that
15309 * a command completion has occurred.
15311 if (un
->un_state
== SD_STATE_SUSPENDED
) {
15312 cv_broadcast(&un
->un_disk_busy_cv
);
15315 if (bp
== immed_bp
) {
15316 /* immed_bp is gone by now, so clear this */
15322 } while (immed_bp
== NULL
);
15325 ASSERT(mutex_owned(SD_MUTEX(un
)));
15326 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_start_cmds: exit\n");
15331 * Function: sd_return_command
15333 * Description: Returns a command to its originator (with or without an
15334 * error). Also starts commands waiting to be transported
15337 * Context: May be called from interrupt, kernel, or timeout context
15341 sd_return_command(struct sd_lun
*un
, struct buf
*bp
)
15343 struct sd_xbuf
*xp
;
15344 struct scsi_pkt
*pktp
;
15345 struct sd_fm_internal
*sfip
;
15347 ASSERT(bp
!= NULL
);
15348 ASSERT(un
!= NULL
);
15349 ASSERT(mutex_owned(SD_MUTEX(un
)));
15350 ASSERT(bp
!= un
->un_rqs_bp
);
15351 xp
= SD_GET_XBUF(bp
);
15352 ASSERT(xp
!= NULL
);
15354 pktp
= SD_GET_PKTP(bp
);
15355 sfip
= (struct sd_fm_internal
*)un
->un_fm_private
;
15356 ASSERT(sfip
!= NULL
);
15358 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_return_command: entry\n");
15361 * Note: check for the "sdrestart failed" case.
15363 if ((un
->un_partial_dma_supported
== 1) &&
15364 ((xp
->xb_pkt_flags
& SD_XB_USCSICMD
) != SD_XB_USCSICMD
) &&
15365 (geterror(bp
) == 0) && (xp
->xb_dma_resid
!= 0) &&
15366 (xp
->xb_pktp
->pkt_resid
== 0)) {
15368 if (sd_setup_next_xfer(un
, bp
, pktp
, xp
) != 0) {
15370 * Successfully set up next portion of cmd
15371 * transfer, try sending it
15373 sd_retry_command(un
, bp
, SD_RETRIES_NOCHECK
,
15374 NULL
, NULL
, 0, (clock_t)0, NULL
);
15375 sd_start_cmds(un
, NULL
);
15376 return; /* Note:x86: need a return here? */
15381 * If this is the failfast bp, clear it from un_failfast_bp. This
15382 * can happen if upon being re-tried the failfast bp either
15383 * succeeded or encountered another error (possibly even a different
15384 * error than the one that precipitated the failfast state, but in
15385 * that case it would have had to exhaust retries as well). Regardless,
15386 * this should not occur whenever the instance is in the active
15389 if (bp
== un
->un_failfast_bp
) {
15390 ASSERT(un
->un_failfast_state
== SD_FAILFAST_INACTIVE
);
15391 un
->un_failfast_bp
= NULL
;
15395 * Clear the failfast state upon successful completion of ANY cmd.
15397 if (bp
->b_error
== 0) {
15398 un
->un_failfast_state
= SD_FAILFAST_INACTIVE
;
15400 * If this is a successful command, but used to be retried,
15401 * we will take it as a recovered command and post an
15402 * ereport with driver-assessment of "recovered".
15404 if (xp
->xb_ena
> 0) {
15405 sd_ssc_extract_info(&sfip
->fm_ssc
, un
, pktp
, bp
, xp
);
15406 sd_ssc_post(&sfip
->fm_ssc
, SD_FM_DRV_RECOVERY
);
15410 * If this is a failed non-USCSI command we will post an
15411 * ereport with driver-assessment set accordingly("fail" or
15414 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
15415 sd_ssc_extract_info(&sfip
->fm_ssc
, un
, pktp
, bp
, xp
);
15416 sd_ssc_post(&sfip
->fm_ssc
, SD_FM_DRV_FATAL
);
15421 * This is used if the command was retried one or more times. Show that
15422 * we are done with it, and allow processing of the waitq to resume.
15424 if (bp
== un
->un_retry_bp
) {
15425 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15426 "sd_return_command: un:0x%p: "
15427 "RETURNING retry_bp:0x%p\n", un
, un
->un_retry_bp
);
15428 un
->un_retry_bp
= NULL
;
15429 un
->un_retry_statp
= NULL
;
15432 SD_UPDATE_RDWR_STATS(un
, bp
);
15433 SD_UPDATE_PARTITION_STATS(un
, bp
);
15435 switch (un
->un_state
) {
15436 case SD_STATE_SUSPENDED
:
15438 * Notify any threads waiting in sd_ddi_suspend() that
15439 * a command completion has occurred.
15441 cv_broadcast(&un
->un_disk_busy_cv
);
15444 sd_start_cmds(un
, NULL
);
15448 /* Return this command up the iodone chain to its originator. */
15449 mutex_exit(SD_MUTEX(un
));
15451 (*(sd_destroypkt_map
[xp
->xb_chain_iodone
]))(bp
);
15452 xp
->xb_pktp
= NULL
;
15454 SD_BEGIN_IODONE(xp
->xb_chain_iodone
, un
, bp
);
15456 ASSERT(!mutex_owned(SD_MUTEX(un
)));
15457 mutex_enter(SD_MUTEX(un
));
15459 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_return_command: exit\n");
15464 * Function: sd_return_failed_command
15466 * Description: Command completion when an error occurred.
15468 * Context: May be called from interrupt context
15472 sd_return_failed_command(struct sd_lun
*un
, struct buf
*bp
, int errcode
)
15474 ASSERT(bp
!= NULL
);
15475 ASSERT(un
!= NULL
);
15476 ASSERT(mutex_owned(SD_MUTEX(un
)));
15478 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15479 "sd_return_failed_command: entry\n");
15482 * b_resid could already be nonzero due to a partial data
15483 * transfer, so do not change it here.
15485 SD_BIOERROR(bp
, errcode
);
15487 sd_return_command(un
, bp
);
15488 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15489 "sd_return_failed_command: exit\n");
15494 * Function: sd_return_failed_command_no_restart
15496 * Description: Same as sd_return_failed_command, but ensures that no
15497 * call back into sd_start_cmds will be issued.
15499 * Context: May be called from interrupt context
15503 sd_return_failed_command_no_restart(struct sd_lun
*un
, struct buf
*bp
,
15506 struct sd_xbuf
*xp
;
15508 ASSERT(bp
!= NULL
);
15509 ASSERT(un
!= NULL
);
15510 ASSERT(mutex_owned(SD_MUTEX(un
)));
15511 xp
= SD_GET_XBUF(bp
);
15512 ASSERT(xp
!= NULL
);
15513 ASSERT(errcode
!= 0);
15515 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15516 "sd_return_failed_command_no_restart: entry\n");
15519 * b_resid could already be nonzero due to a partial data
15520 * transfer, so do not change it here.
15522 SD_BIOERROR(bp
, errcode
);
15525 * If this is the failfast bp, clear it. This can happen if the
15526 * failfast bp encounterd a fatal error when we attempted to
15527 * re-try it (such as a scsi_transport(9F) failure). However
15528 * we should NOT be in an active failfast state if the failfast
15531 if (bp
== un
->un_failfast_bp
) {
15532 ASSERT(un
->un_failfast_state
== SD_FAILFAST_INACTIVE
);
15533 un
->un_failfast_bp
= NULL
;
15536 if (bp
== un
->un_retry_bp
) {
15538 * This command was retried one or more times. Show that we are
15539 * done with it, and allow processing of the waitq to resume.
15541 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15542 "sd_return_failed_command_no_restart: "
15543 " un:0x%p: RETURNING retry_bp:0x%p\n", un
, un
->un_retry_bp
);
15544 un
->un_retry_bp
= NULL
;
15545 un
->un_retry_statp
= NULL
;
15548 SD_UPDATE_RDWR_STATS(un
, bp
);
15549 SD_UPDATE_PARTITION_STATS(un
, bp
);
15551 mutex_exit(SD_MUTEX(un
));
15553 if (xp
->xb_pktp
!= NULL
) {
15554 (*(sd_destroypkt_map
[xp
->xb_chain_iodone
]))(bp
);
15555 xp
->xb_pktp
= NULL
;
15558 SD_BEGIN_IODONE(xp
->xb_chain_iodone
, un
, bp
);
15560 mutex_enter(SD_MUTEX(un
));
15562 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15563 "sd_return_failed_command_no_restart: exit\n");
15568 * Function: sd_retry_command
15570 * Description: queue up a command for retry, or (optionally) fail it
15571 * if retry counts are exhausted.
15573 * Arguments: un - Pointer to the sd_lun struct for the target.
15575 * bp - Pointer to the buf for the command to be retried.
15577 * retry_check_flag - Flag to see which (if any) of the retry
15578 * counts should be decremented/checked. If the indicated
15579 * retry count is exhausted, then the command will not be
15580 * retried; it will be failed instead. This should use a
15581 * value equal to one of the following:
15583 * SD_RETRIES_NOCHECK
15584 * SD_RESD_RETRIES_STANDARD
15585 * SD_RETRIES_VICTIM
15587 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15588 * if the check should be made to see of FLAG_ISOLATE is set
15589 * in the pkt. If FLAG_ISOLATE is set, then the command is
15590 * not retried, it is simply failed.
15592 * user_funcp - Ptr to function to call before dispatching the
15593 * command. May be NULL if no action needs to be performed.
15594 * (Primarily intended for printing messages.)
15596 * user_arg - Optional argument to be passed along to
15597 * the user_funcp call.
15599 * failure_code - errno return code to set in the bp if the
15600 * command is going to be failed.
15602 * retry_delay - Retry delay interval in (clock_t) units. May
15603 * be zero which indicates that the retry should be retried
15604 * immediately (ie, without an intervening delay).
15606 * statp - Ptr to kstat function to be updated if the command
15607 * is queued for a delayed retry. May be NULL if no kstat
15608 * update is desired.
15610 * Context: May be called from interrupt context.
15614 sd_retry_command(struct sd_lun
*un
, struct buf
*bp
, int retry_check_flag
,
15615 void (*user_funcp
)(struct sd_lun
*un
, struct buf
*bp
, void *argp
, int
15616 code
), void *user_arg
, int failure_code
, clock_t retry_delay
,
15617 void (*statp
)(kstat_io_t
*))
15619 struct sd_xbuf
*xp
;
15620 struct scsi_pkt
*pktp
;
15621 struct sd_fm_internal
*sfip
;
15623 ASSERT(un
!= NULL
);
15624 ASSERT(mutex_owned(SD_MUTEX(un
)));
15625 ASSERT(bp
!= NULL
);
15626 xp
= SD_GET_XBUF(bp
);
15627 ASSERT(xp
!= NULL
);
15628 pktp
= SD_GET_PKTP(bp
);
15629 ASSERT(pktp
!= NULL
);
15631 sfip
= (struct sd_fm_internal
*)un
->un_fm_private
;
15632 ASSERT(sfip
!= NULL
);
15634 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
,
15635 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp
, xp
);
15638 * If we are syncing or dumping, fail the command to avoid
15639 * recursively calling back into scsi_transport().
15641 if (ddi_in_panic()) {
15642 goto fail_command_no_log
;
15646 * We should never be be retrying a command with FLAG_DIAGNOSE set, so
15647 * log an error and fail the command.
15649 if ((pktp
->pkt_flags
& FLAG_DIAGNOSE
) != 0) {
15650 scsi_log(SD_DEVINFO(un
), sd_label
, CE_NOTE
,
15651 "ERROR, retrying FLAG_DIAGNOSE command.\n");
15652 sd_dump_memory(un
, SD_LOG_IO
, "CDB",
15653 (uchar_t
*)pktp
->pkt_cdbp
, CDB_SIZE
, SD_LOG_HEX
);
15654 sd_dump_memory(un
, SD_LOG_IO
, "Sense Data",
15655 (uchar_t
*)xp
->xb_sense_data
, SENSE_LENGTH
, SD_LOG_HEX
);
15660 * If we are suspended, then put the command onto head of the
15661 * wait queue since we don't want to start more commands, and
15662 * clear the un_retry_bp. Next time when we are resumed, will
15663 * handle the command in the wait queue.
15665 switch (un
->un_state
) {
15666 case SD_STATE_SUSPENDED
:
15667 case SD_STATE_DUMPING
:
15668 bp
->av_forw
= un
->un_waitq_headp
;
15669 un
->un_waitq_headp
= bp
;
15670 if (un
->un_waitq_tailp
== NULL
) {
15671 un
->un_waitq_tailp
= bp
;
15673 if (bp
== un
->un_retry_bp
) {
15674 un
->un_retry_bp
= NULL
;
15675 un
->un_retry_statp
= NULL
;
15677 SD_UPDATE_KSTATS(un
, kstat_waitq_enter
, bp
);
15678 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_retry_command: "
15679 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp
);
15686 * If the caller wants us to check FLAG_ISOLATE, then see if that
15687 * is set; if it is then we do not want to retry the command.
15688 * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15690 if ((retry_check_flag
& SD_RETRIES_ISOLATE
) != 0) {
15691 if ((pktp
->pkt_flags
& FLAG_ISOLATE
) != 0) {
15698 * If SD_RETRIES_FAILFAST is set, it indicates that either a
15699 * command timeout or a selection timeout has occurred. This means
15700 * that we were unable to establish an kind of communication with
15701 * the target, and subsequent retries and/or commands are likely
15702 * to encounter similar results and take a long time to complete.
15704 * If this is a failfast error condition, we need to update the
15705 * failfast state, even if this bp does not have B_FAILFAST set.
15707 if (retry_check_flag
& SD_RETRIES_FAILFAST
) {
15708 if (un
->un_failfast_state
== SD_FAILFAST_ACTIVE
) {
15709 ASSERT(un
->un_failfast_bp
== NULL
);
15711 * If we are already in the active failfast state, and
15712 * another failfast error condition has been detected,
15713 * then fail this command if it has B_FAILFAST set.
15714 * If B_FAILFAST is clear, then maintain the legacy
15715 * behavior of retrying heroically, even tho this will
15716 * take a lot more time to fail the command.
15718 if (bp
->b_flags
& B_FAILFAST
) {
15723 * We're not in the active failfast state, but we
15724 * have a failfast error condition, so we must begin
15725 * transition to the next state. We do this regardless
15726 * of whether or not this bp has B_FAILFAST set.
15728 if (un
->un_failfast_bp
== NULL
) {
15730 * This is the first bp to meet a failfast
15731 * condition so save it on un_failfast_bp &
15732 * do normal retry processing. Do not enter
15733 * active failfast state yet. This marks
15734 * entry into the "failfast pending" state.
15736 un
->un_failfast_bp
= bp
;
15738 } else if (un
->un_failfast_bp
== bp
) {
15740 * This is the second time *this* bp has
15741 * encountered a failfast error condition,
15742 * so enter active failfast state & flush
15743 * queues as appropriate.
15745 un
->un_failfast_state
= SD_FAILFAST_ACTIVE
;
15746 un
->un_failfast_bp
= NULL
;
15747 sd_failfast_flushq(un
);
15750 * Fail this bp now if B_FAILFAST set;
15751 * otherwise continue with retries. (It would
15752 * be pretty ironic if this bp succeeded on a
15753 * subsequent retry after we just flushed all
15756 if (bp
->b_flags
& B_FAILFAST
) {
15760 #if !defined(lint) && !defined(__lint)
15763 * If neither of the preceeding conditionals
15764 * was true, it means that there is some
15765 * *other* bp that has met an inital failfast
15766 * condition and is currently either being
15767 * retried or is waiting to be retried. In
15768 * that case we should perform normal retry
15769 * processing on *this* bp, since there is a
15770 * chance that the current failfast condition
15771 * is transient and recoverable. If that does
15772 * not turn out to be the case, then retries
15773 * will be cleared when the wait queue is
15781 * SD_RETRIES_FAILFAST is clear, which indicates that we
15782 * likely were able to at least establish some level of
15783 * communication with the target and subsequent commands
15784 * and/or retries are likely to get through to the target,
15785 * In this case we want to be aggressive about clearing
15786 * the failfast state. Note that this does not affect
15787 * the "failfast pending" condition.
15789 un
->un_failfast_state
= SD_FAILFAST_INACTIVE
;
15794 * Check the specified retry count to see if we can still do
15795 * any retries with this pkt before we should fail it.
15797 switch (retry_check_flag
& SD_RETRIES_MASK
) {
15798 case SD_RETRIES_VICTIM
:
15800 * Check the victim retry count. If exhausted, then fall
15801 * thru & check against the standard retry count.
15803 if (xp
->xb_victim_retry_count
< un
->un_victim_retry_count
) {
15804 /* Increment count & proceed with the retry */
15805 xp
->xb_victim_retry_count
++;
15808 /* Victim retries exhausted, fall back to std. retries... */
15811 case SD_RETRIES_STANDARD
:
15812 if (xp
->xb_retry_count
>= un
->un_retry_count
) {
15813 /* Retries exhausted, fail the command */
15814 SD_TRACE(SD_LOG_IO_CORE
, un
,
15815 "sd_retry_command: retries exhausted!\n");
15817 * update b_resid for failed SCMD_READ & SCMD_WRITE
15818 * commands with nonzero pkt_resid.
15820 if ((pktp
->pkt_reason
== CMD_CMPLT
) &&
15821 (SD_GET_PKT_STATUS(pktp
) == STATUS_GOOD
) &&
15822 (pktp
->pkt_resid
!= 0)) {
15823 uchar_t op
= SD_GET_PKT_OPCODE(pktp
) & 0x1F;
15824 if ((op
== SCMD_READ
) || (op
== SCMD_WRITE
)) {
15825 SD_UPDATE_B_RESID(bp
, pktp
);
15830 xp
->xb_retry_count
++;
15831 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15832 "sd_retry_command: retry count:%d\n", xp
->xb_retry_count
);
15835 case SD_RETRIES_UA
:
15836 if (xp
->xb_ua_retry_count
>= sd_ua_retry_count
) {
15837 /* Retries exhausted, fail the command */
15838 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
15839 "Unit Attention retries exhausted. "
15840 "Check the target.\n");
15843 xp
->xb_ua_retry_count
++;
15844 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15845 "sd_retry_command: retry count:%d\n",
15846 xp
->xb_ua_retry_count
);
15849 case SD_RETRIES_BUSY
:
15850 if (xp
->xb_retry_count
>= un
->un_busy_retry_count
) {
15851 /* Retries exhausted, fail the command */
15852 SD_TRACE(SD_LOG_IO_CORE
, un
,
15853 "sd_retry_command: retries exhausted!\n");
15856 xp
->xb_retry_count
++;
15857 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15858 "sd_retry_command: retry count:%d\n", xp
->xb_retry_count
);
15861 case SD_RETRIES_NOCHECK
:
15863 /* No retry count to check. Just proceed with the retry */
15867 xp
->xb_pktp
->pkt_flags
|= FLAG_HEAD
;
15870 * If this is a non-USCSI command being retried
15871 * during execution last time, we should post an ereport with
15872 * driver-assessment of the value "retry".
15873 * For partial DMA, request sense and STATUS_QFULL, there are no
15874 * hardware errors, we bypass ereport posting.
15876 if (failure_code
!= 0) {
15877 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
15878 sd_ssc_extract_info(&sfip
->fm_ssc
, un
, pktp
, bp
, xp
);
15879 sd_ssc_post(&sfip
->fm_ssc
, SD_FM_DRV_RETRY
);
15884 * If we were given a zero timeout, we must attempt to retry the
15885 * command immediately (ie, without a delay).
15887 if (retry_delay
== 0) {
15889 * Check some limiting conditions to see if we can actually
15890 * do the immediate retry. If we cannot, then we must
15891 * fall back to queueing up a delayed retry.
15893 if (un
->un_ncmds_in_transport
>= un
->un_throttle
) {
15895 * We are at the throttle limit for the target,
15896 * fall back to delayed retry.
15898 retry_delay
= un
->un_busy_timeout
;
15899 statp
= kstat_waitq_enter
;
15900 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15901 "sd_retry_command: immed. retry hit "
15905 * We're clear to proceed with the immediate retry.
15906 * First call the user-provided function (if any)
15908 if (user_funcp
!= NULL
) {
15909 (*user_funcp
)(un
, bp
, user_arg
,
15910 SD_IMMEDIATE_RETRY_ISSUED
);
15912 sd_print_incomplete_msg(un
, bp
, user_arg
,
15913 SD_IMMEDIATE_RETRY_ISSUED
);
15914 sd_print_cmd_incomplete_msg(un
, bp
, user_arg
,
15915 SD_IMMEDIATE_RETRY_ISSUED
);
15916 sd_print_sense_failed_msg(un
, bp
, user_arg
,
15917 SD_IMMEDIATE_RETRY_ISSUED
);
15921 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15922 "sd_retry_command: issuing immediate retry\n");
15925 * Call sd_start_cmds() to transport the command to
15928 sd_start_cmds(un
, bp
);
15930 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15931 "sd_retry_command exit\n");
15937 * Set up to retry the command after a delay.
15938 * First call the user-provided function (if any)
15940 if (user_funcp
!= NULL
) {
15941 (*user_funcp
)(un
, bp
, user_arg
, SD_DELAYED_RETRY_ISSUED
);
15944 sd_set_retry_bp(un
, bp
, retry_delay
, statp
);
15946 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_retry_command: exit\n");
15951 if (user_funcp
!= NULL
) {
15952 (*user_funcp
)(un
, bp
, user_arg
, SD_NO_RETRY_ISSUED
);
15955 fail_command_no_log
:
15957 SD_INFO(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
15958 "sd_retry_command: returning failed command\n");
15960 sd_return_failed_command(un
, bp
, failure_code
);
15962 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_retry_command: exit\n");
15967 * Function: sd_set_retry_bp
15969 * Description: Set up the given bp for retry.
15971 * Arguments: un - ptr to associated softstate
15972 * bp - ptr to buf(9S) for the command
15973 * retry_delay - time interval before issuing retry (may be 0)
15974 * statp - optional pointer to kstat function
15976 * Context: May be called under interrupt context
15980 sd_set_retry_bp(struct sd_lun
*un
, struct buf
*bp
, clock_t retry_delay
,
15981 void (*statp
)(kstat_io_t
*))
15983 ASSERT(un
!= NULL
);
15984 ASSERT(mutex_owned(SD_MUTEX(un
)));
15985 ASSERT(bp
!= NULL
);
15987 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
,
15988 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un
, bp
);
15991 * Indicate that the command is being retried. This will not allow any
15992 * other commands on the wait queue to be transported to the target
15993 * until this command has been completed (success or failure). The
15994 * "retry command" is not transported to the target until the given
15995 * time delay expires, unless the user specified a 0 retry_delay.
15997 * Note: the timeout(9F) callback routine is what actually calls
15998 * sd_start_cmds() to transport the command, with the exception of a
15999 * zero retry_delay. The only current implementor of a zero retry delay
16000 * is the case where a START_STOP_UNIT is sent to spin-up a device.
16002 if (un
->un_retry_bp
== NULL
) {
16003 ASSERT(un
->un_retry_statp
== NULL
);
16004 un
->un_retry_bp
= bp
;
16007 * If the user has not specified a delay the command should
16008 * be queued and no timeout should be scheduled.
16010 if (retry_delay
== 0) {
16012 * Save the kstat pointer that will be used in the
16013 * call to SD_UPDATE_KSTATS() below, so that
16014 * sd_start_cmds() can correctly decrement the waitq
16015 * count when it is time to transport this command.
16017 un
->un_retry_statp
= statp
;
16022 if (un
->un_retry_bp
== bp
) {
16024 * Save the kstat pointer that will be used in the call to
16025 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can
16026 * correctly decrement the waitq count when it is time to
16027 * transport this command.
16029 un
->un_retry_statp
= statp
;
16032 * Schedule a timeout if:
16033 * 1) The user has specified a delay.
16034 * 2) There is not a START_STOP_UNIT callback pending.
16036 * If no delay has been specified, then it is up to the caller
16037 * to ensure that IO processing continues without stalling.
16038 * Effectively, this means that the caller will issue the
16039 * required call to sd_start_cmds(). The START_STOP_UNIT
16040 * callback does this after the START STOP UNIT command has
16041 * completed. In either of these cases we should not schedule
16042 * a timeout callback here. Also don't schedule the timeout if
16043 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart.
16045 if ((retry_delay
!= 0) && (un
->un_startstop_timeid
== NULL
) &&
16046 (un
->un_direct_priority_timeid
== NULL
)) {
16047 un
->un_retry_timeid
=
16048 timeout(sd_start_retry_command
, un
, retry_delay
);
16049 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16050 "sd_set_retry_bp: setting timeout: un: 0x%p"
16051 " bp:0x%p un_retry_timeid:0x%p\n",
16052 un
, bp
, un
->un_retry_timeid
);
16056 * We only get in here if there is already another command
16057 * waiting to be retried. In this case, we just put the
16058 * given command onto the wait queue, so it can be transported
16059 * after the current retry command has completed.
16061 * Also we have to make sure that if the command at the head
16062 * of the wait queue is the un_failfast_bp, that we do not
16063 * put ahead of it any other commands that are to be retried.
16065 if ((un
->un_failfast_bp
!= NULL
) &&
16066 (un
->un_failfast_bp
== un
->un_waitq_headp
)) {
16068 * Enqueue this command AFTER the first command on
16069 * the wait queue (which is also un_failfast_bp).
16071 bp
->av_forw
= un
->un_waitq_headp
->av_forw
;
16072 un
->un_waitq_headp
->av_forw
= bp
;
16073 if (un
->un_waitq_headp
== un
->un_waitq_tailp
) {
16074 un
->un_waitq_tailp
= bp
;
16077 /* Enqueue this command at the head of the waitq. */
16078 bp
->av_forw
= un
->un_waitq_headp
;
16079 un
->un_waitq_headp
= bp
;
16080 if (un
->un_waitq_tailp
== NULL
) {
16081 un
->un_waitq_tailp
= bp
;
16085 if (statp
== NULL
) {
16086 statp
= kstat_waitq_enter
;
16088 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16089 "sd_set_retry_bp: un:0x%p already delayed retry\n", un
);
16093 if (statp
!= NULL
) {
16094 SD_UPDATE_KSTATS(un
, statp
, bp
);
16097 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16098 "sd_set_retry_bp: exit un:0x%p\n", un
);
16103 * Function: sd_start_retry_command
16105 * Description: Start the command that has been waiting on the target's
16106 * retry queue. Called from timeout(9F) context after the
16107 * retry delay interval has expired.
16109 * Arguments: arg - pointer to associated softstate for the device.
16111 * Context: timeout(9F) thread context. May not sleep.
16115 sd_start_retry_command(void *arg
)
16117 struct sd_lun
*un
= arg
;
16119 ASSERT(un
!= NULL
);
16120 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16122 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16123 "sd_start_retry_command: entry\n");
16125 mutex_enter(SD_MUTEX(un
));
16127 un
->un_retry_timeid
= NULL
;
16129 if (un
->un_retry_bp
!= NULL
) {
16130 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16131 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
16132 un
, un
->un_retry_bp
);
16133 sd_start_cmds(un
, un
->un_retry_bp
);
16136 mutex_exit(SD_MUTEX(un
));
16138 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16139 "sd_start_retry_command: exit\n");
16143 * Function: sd_rmw_msg_print_handler
16145 * Description: If RMW mode is enabled and warning message is triggered
16146 * print I/O count during a fixed interval.
16148 * Arguments: arg - pointer to associated softstate for the device.
16150 * Context: timeout(9F) thread context. May not sleep.
16153 sd_rmw_msg_print_handler(void *arg
)
16155 struct sd_lun
*un
= arg
;
16157 ASSERT(un
!= NULL
);
16158 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16160 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16161 "sd_rmw_msg_print_handler: entry\n");
16163 mutex_enter(SD_MUTEX(un
));
16165 if (un
->un_rmw_incre_count
> 0) {
16166 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
16167 "%"PRIu64
" I/O requests are not aligned with %d disk "
16168 "sector size in %ld seconds. They are handled through "
16169 "Read Modify Write but the performance is very low!\n",
16170 un
->un_rmw_incre_count
, un
->un_tgt_blocksize
,
16171 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT
) / 1000000);
16172 un
->un_rmw_incre_count
= 0;
16173 un
->un_rmw_msg_timeid
= timeout(sd_rmw_msg_print_handler
,
16174 un
, SD_RMW_MSG_PRINT_TIMEOUT
);
16176 un
->un_rmw_msg_timeid
= NULL
;
16179 mutex_exit(SD_MUTEX(un
));
16181 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16182 "sd_rmw_msg_print_handler: exit\n");
16186 * Function: sd_start_direct_priority_command
16188 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had
16189 * received TRAN_BUSY when we called scsi_transport() to send it
16190 * to the underlying HBA. This function is called from timeout(9F)
16191 * context after the delay interval has expired.
16193 * Arguments: arg - pointer to associated buf(9S) to be restarted.
16195 * Context: timeout(9F) thread context. May not sleep.
16199 sd_start_direct_priority_command(void *arg
)
16201 struct buf
*priority_bp
= arg
;
16204 ASSERT(priority_bp
!= NULL
);
16205 un
= SD_GET_UN(priority_bp
);
16206 ASSERT(un
!= NULL
);
16207 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16209 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16210 "sd_start_direct_priority_command: entry\n");
16212 mutex_enter(SD_MUTEX(un
));
16213 un
->un_direct_priority_timeid
= NULL
;
16214 sd_start_cmds(un
, priority_bp
);
16215 mutex_exit(SD_MUTEX(un
));
16217 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16218 "sd_start_direct_priority_command: exit\n");
16223 * Function: sd_send_request_sense_command
16225 * Description: Sends a REQUEST SENSE command to the target
16227 * Context: May be called from interrupt context.
16231 sd_send_request_sense_command(struct sd_lun
*un
, struct buf
*bp
,
16232 struct scsi_pkt
*pktp
)
16234 ASSERT(bp
!= NULL
);
16235 ASSERT(un
!= NULL
);
16236 ASSERT(mutex_owned(SD_MUTEX(un
)));
16238 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
, "sd_send_request_sense_command: "
16239 "entry: buf:0x%p\n", bp
);
16242 * If we are syncing or dumping, then fail the command to avoid a
16243 * recursive callback into scsi_transport(). Also fail the command
16244 * if we are suspended (legacy behavior).
16246 if (ddi_in_panic() || (un
->un_state
== SD_STATE_SUSPENDED
) ||
16247 (un
->un_state
== SD_STATE_DUMPING
)) {
16248 sd_return_failed_command(un
, bp
, EIO
);
16249 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16250 "sd_send_request_sense_command: syncing/dumping, exit\n");
16255 * Retry the failed command and don't issue the request sense if:
16256 * 1) the sense buf is busy
16257 * 2) we have 1 or more outstanding commands on the target
16258 * (the sense data will be cleared or invalidated any way)
16260 * Note: There could be an issue with not checking a retry limit here,
16261 * the problem is determining which retry limit to check.
16263 if ((un
->un_sense_isbusy
!= 0) || (un
->un_ncmds_in_transport
> 0)) {
16264 /* Don't retry if the command is flagged as non-retryable */
16265 if ((pktp
->pkt_flags
& FLAG_DIAGNOSE
) == 0) {
16266 sd_retry_command(un
, bp
, SD_RETRIES_NOCHECK
,
16267 NULL
, NULL
, 0, un
->un_busy_timeout
,
16268 kstat_waitq_enter
);
16269 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16270 "sd_send_request_sense_command: "
16271 "at full throttle, retrying exit\n");
16273 sd_return_failed_command(un
, bp
, EIO
);
16274 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16275 "sd_send_request_sense_command: "
16276 "at full throttle, non-retryable exit\n");
16281 sd_mark_rqs_busy(un
, bp
);
16282 sd_start_cmds(un
, un
->un_rqs_bp
);
16284 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16285 "sd_send_request_sense_command: exit\n");
16290 * Function: sd_mark_rqs_busy
16292 * Description: Indicate that the request sense bp for this instance is
16295 * Context: May be called under interrupt context
16299 sd_mark_rqs_busy(struct sd_lun
*un
, struct buf
*bp
)
16301 struct sd_xbuf
*sense_xp
;
16303 ASSERT(un
!= NULL
);
16304 ASSERT(bp
!= NULL
);
16305 ASSERT(mutex_owned(SD_MUTEX(un
)));
16306 ASSERT(un
->un_sense_isbusy
== 0);
16308 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_mark_rqs_busy: entry: "
16309 "buf:0x%p xp:0x%p un:0x%p\n", bp
, SD_GET_XBUF(bp
), un
);
16311 sense_xp
= SD_GET_XBUF(un
->un_rqs_bp
);
16312 ASSERT(sense_xp
!= NULL
);
16314 SD_INFO(SD_LOG_IO
, un
,
16315 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp
);
16317 ASSERT(sense_xp
->xb_pktp
!= NULL
);
16318 ASSERT((sense_xp
->xb_pktp
->pkt_flags
& (FLAG_SENSING
| FLAG_HEAD
))
16319 == (FLAG_SENSING
| FLAG_HEAD
));
16321 un
->un_sense_isbusy
= 1;
16322 un
->un_rqs_bp
->b_resid
= 0;
16323 sense_xp
->xb_pktp
->pkt_resid
= 0;
16324 sense_xp
->xb_pktp
->pkt_reason
= 0;
16326 /* So we can get back the bp at interrupt time! */
16327 sense_xp
->xb_sense_bp
= bp
;
16329 bzero(un
->un_rqs_bp
->b_un
.b_addr
, SENSE_LENGTH
);
16332 * Mark this buf as awaiting sense data. (This is already set in
16333 * the pkt_flags for the RQS packet.)
16335 ((SD_GET_XBUF(bp
))->xb_pktp
)->pkt_flags
|= FLAG_SENSING
;
16337 /* Request sense down same path */
16338 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp
))->xb_pktp
) &&
16339 ((SD_GET_XBUF(bp
))->xb_pktp
)->pkt_path_instance
)
16340 sense_xp
->xb_pktp
->pkt_path_instance
=
16341 ((SD_GET_XBUF(bp
))->xb_pktp
)->pkt_path_instance
;
16343 sense_xp
->xb_retry_count
= 0;
16344 sense_xp
->xb_victim_retry_count
= 0;
16345 sense_xp
->xb_ua_retry_count
= 0;
16346 sense_xp
->xb_nr_retry_count
= 0;
16347 sense_xp
->xb_dma_resid
= 0;
16349 /* Clean up the fields for auto-request sense */
16350 sense_xp
->xb_sense_status
= 0;
16351 sense_xp
->xb_sense_state
= 0;
16352 sense_xp
->xb_sense_resid
= 0;
16353 bzero(sense_xp
->xb_sense_data
, sizeof (sense_xp
->xb_sense_data
));
16355 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_mark_rqs_busy: exit\n");
16360 * Function: sd_mark_rqs_idle
16362 * Description: SD_MUTEX must be held continuously through this routine
16363 * to prevent reuse of the rqs struct before the caller can
16364 * complete it's processing.
16366 * Return Code: Pointer to the RQS buf
16368 * Context: May be called under interrupt context
16371 static struct buf
*
16372 sd_mark_rqs_idle(struct sd_lun
*un
, struct sd_xbuf
*sense_xp
)
16375 ASSERT(un
!= NULL
);
16376 ASSERT(sense_xp
!= NULL
);
16377 ASSERT(mutex_owned(SD_MUTEX(un
)));
16378 ASSERT(un
->un_sense_isbusy
!= 0);
16380 un
->un_sense_isbusy
= 0;
16381 bp
= sense_xp
->xb_sense_bp
;
16382 sense_xp
->xb_sense_bp
= NULL
;
16384 /* This pkt is no longer interested in getting sense data */
16385 ((SD_GET_XBUF(bp
))->xb_pktp
)->pkt_flags
&= ~FLAG_SENSING
;
16393 * Function: sd_alloc_rqs
16395 * Description: Set up the unit to receive auto request sense data
16397 * Return Code: DDI_SUCCESS or DDI_FAILURE
16399 * Context: Called under attach(9E) context
16403 sd_alloc_rqs(struct scsi_device
*devp
, struct sd_lun
*un
)
16405 struct sd_xbuf
*xp
;
16407 ASSERT(un
!= NULL
);
16408 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16409 ASSERT(un
->un_rqs_bp
== NULL
);
16410 ASSERT(un
->un_rqs_pktp
== NULL
);
16413 * First allocate the required buf and scsi_pkt structs, then set up
16414 * the CDB in the scsi_pkt for a REQUEST SENSE command.
16416 un
->un_rqs_bp
= scsi_alloc_consistent_buf(&devp
->sd_address
, NULL
,
16417 MAX_SENSE_LENGTH
, B_READ
, SLEEP_FUNC
, NULL
);
16418 if (un
->un_rqs_bp
== NULL
) {
16419 return (DDI_FAILURE
);
16422 un
->un_rqs_pktp
= scsi_init_pkt(&devp
->sd_address
, NULL
, un
->un_rqs_bp
,
16423 CDB_GROUP0
, 1, 0, PKT_CONSISTENT
, SLEEP_FUNC
, NULL
);
16425 if (un
->un_rqs_pktp
== NULL
) {
16427 return (DDI_FAILURE
);
16430 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16431 (void) scsi_setup_cdb((union scsi_cdb
*)un
->un_rqs_pktp
->pkt_cdbp
,
16432 SCMD_REQUEST_SENSE
, 0, MAX_SENSE_LENGTH
, 0);
16434 SD_FILL_SCSI1_LUN(un
, un
->un_rqs_pktp
);
16436 /* Set up the other needed members in the ARQ scsi_pkt. */
16437 un
->un_rqs_pktp
->pkt_comp
= sdintr
;
16438 un
->un_rqs_pktp
->pkt_time
= sd_io_time
;
16439 un
->un_rqs_pktp
->pkt_flags
|=
16440 (FLAG_SENSING
| FLAG_HEAD
); /* (1222170) */
16443 * Allocate & init the sd_xbuf struct for the RQS command. Do not
16444 * provide any intpkt, destroypkt routines as we take care of
16445 * scsi_pkt allocation/freeing here and in sd_free_rqs().
16447 xp
= kmem_alloc(sizeof (struct sd_xbuf
), KM_SLEEP
);
16448 sd_xbuf_init(un
, un
->un_rqs_bp
, xp
, SD_CHAIN_NULL
, NULL
);
16449 xp
->xb_pktp
= un
->un_rqs_pktp
;
16450 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
16451 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
16452 un
, xp
, un
->un_rqs_pktp
, un
->un_rqs_bp
);
16455 * Save the pointer to the request sense private bp so it can
16456 * be retrieved in sdintr.
16458 un
->un_rqs_pktp
->pkt_private
= un
->un_rqs_bp
;
16459 ASSERT(un
->un_rqs_bp
->b_private
== xp
);
16462 * See if the HBA supports auto-request sense for the specified
16463 * target/lun. If it does, then try to enable it (if not already
16466 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16467 * failure, while for other HBAs (pln) scsi_ifsetcap will always
16468 * return success. However, in both of these cases ARQ is always
16469 * enabled and scsi_ifgetcap will always return true. The best approach
16470 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16472 * The 3rd case is the HBA (adp) always return enabled on
16473 * scsi_ifgetgetcap even when it's not enable, the best approach
16474 * is issue a scsi_ifsetcap then a scsi_ifgetcap
16475 * Note: this case is to circumvent the Adaptec bug. (x86 only)
16478 if (un
->un_f_is_fibre
== TRUE
) {
16479 un
->un_f_arq_enabled
= TRUE
;
16481 #if defined(__i386) || defined(__amd64)
16483 * Circumvent the Adaptec bug, remove this code when
16486 (void) scsi_ifsetcap(SD_ADDRESS(un
), "auto-rqsense", 1, 1);
16488 switch (scsi_ifgetcap(SD_ADDRESS(un
), "auto-rqsense", 1)) {
16490 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
16491 "sd_alloc_rqs: HBA supports ARQ\n");
16493 * ARQ is supported by this HBA but currently is not
16494 * enabled. Attempt to enable it and if successful then
16495 * mark this instance as ARQ enabled.
16497 if (scsi_ifsetcap(SD_ADDRESS(un
), "auto-rqsense", 1, 1)
16499 /* Successfully enabled ARQ in the HBA */
16500 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
16501 "sd_alloc_rqs: ARQ enabled\n");
16502 un
->un_f_arq_enabled
= TRUE
;
16504 /* Could not enable ARQ in the HBA */
16505 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
16506 "sd_alloc_rqs: failed ARQ enable\n");
16507 un
->un_f_arq_enabled
= FALSE
;
16512 * ARQ is supported by this HBA and is already enabled.
16513 * Just mark ARQ as enabled for this instance.
16515 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
16516 "sd_alloc_rqs: ARQ already enabled\n");
16517 un
->un_f_arq_enabled
= TRUE
;
16521 * ARQ is not supported by this HBA; disable it for this
16524 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
16525 "sd_alloc_rqs: HBA does not support ARQ\n");
16526 un
->un_f_arq_enabled
= FALSE
;
16531 return (DDI_SUCCESS
);
16536 * Function: sd_free_rqs
16538 * Description: Cleanup for the pre-instance RQS command.
16540 * Context: Kernel thread context
16544 sd_free_rqs(struct sd_lun
*un
)
16546 ASSERT(un
!= NULL
);
16548 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_free_rqs: entry\n");
16551 * If consistent memory is bound to a scsi_pkt, the pkt
16552 * has to be destroyed *before* freeing the consistent memory.
16553 * Don't change the sequence of this operations.
16554 * scsi_destroy_pkt() might access memory, which isn't allowed,
16555 * after it was freed in scsi_free_consistent_buf().
16557 if (un
->un_rqs_pktp
!= NULL
) {
16558 scsi_destroy_pkt(un
->un_rqs_pktp
);
16559 un
->un_rqs_pktp
= NULL
;
16562 if (un
->un_rqs_bp
!= NULL
) {
16563 struct sd_xbuf
*xp
= SD_GET_XBUF(un
->un_rqs_bp
);
16565 kmem_free(xp
, sizeof (struct sd_xbuf
));
16567 scsi_free_consistent_buf(un
->un_rqs_bp
);
16568 un
->un_rqs_bp
= NULL
;
16570 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_free_rqs: exit\n");
16576 * Function: sd_reduce_throttle
16578 * Description: Reduces the maximum # of outstanding commands on a
16579 * target to the current number of outstanding commands.
16580 * Queues a tiemout(9F) callback to restore the limit
16581 * after a specified interval has elapsed.
16582 * Typically used when we get a TRAN_BUSY return code
16583 * back from scsi_transport().
16585 * Arguments: un - ptr to the sd_lun softstate struct
16586 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL
16588 * Context: May be called from interrupt context
16592 sd_reduce_throttle(struct sd_lun
*un
, int throttle_type
)
16594 ASSERT(un
!= NULL
);
16595 ASSERT(mutex_owned(SD_MUTEX(un
)));
16596 ASSERT(un
->un_ncmds_in_transport
>= 0);
16598 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_reduce_throttle: "
16599 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
16600 un
, un
->un_throttle
, un
->un_ncmds_in_transport
);
16602 if (un
->un_throttle
> 1) {
16603 if (un
->un_f_use_adaptive_throttle
== TRUE
) {
16604 switch (throttle_type
) {
16605 case SD_THROTTLE_TRAN_BUSY
:
16606 if (un
->un_busy_throttle
== 0) {
16607 un
->un_busy_throttle
= un
->un_throttle
;
16610 case SD_THROTTLE_QFULL
:
16611 un
->un_busy_throttle
= 0;
16617 if (un
->un_ncmds_in_transport
> 0) {
16618 un
->un_throttle
= un
->un_ncmds_in_transport
;
16622 if (un
->un_ncmds_in_transport
== 0) {
16623 un
->un_throttle
= 1;
16625 un
->un_throttle
= un
->un_ncmds_in_transport
;
16630 /* Reschedule the timeout if none is currently active */
16631 if (un
->un_reset_throttle_timeid
== NULL
) {
16632 un
->un_reset_throttle_timeid
= timeout(sd_restore_throttle
,
16633 un
, SD_THROTTLE_RESET_INTERVAL
);
16634 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16635 "sd_reduce_throttle: timeout scheduled!\n");
16638 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_reduce_throttle: "
16639 "exit: un:0x%p un_throttle:%d\n", un
, un
->un_throttle
);
16645 * Function: sd_restore_throttle
16647 * Description: Callback function for timeout(9F). Resets the current
16648 * value of un->un_throttle to its default.
16650 * Arguments: arg - pointer to associated softstate for the device.
16652 * Context: May be called from interrupt context
16656 sd_restore_throttle(void *arg
)
16658 struct sd_lun
*un
= arg
;
16660 ASSERT(un
!= NULL
);
16661 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16663 mutex_enter(SD_MUTEX(un
));
16665 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
, "sd_restore_throttle: "
16666 "entry: un:0x%p un_throttle:%d\n", un
, un
->un_throttle
);
16668 un
->un_reset_throttle_timeid
= NULL
;
16670 if (un
->un_f_use_adaptive_throttle
== TRUE
) {
16672 * If un_busy_throttle is nonzero, then it contains the
16673 * value that un_throttle was when we got a TRAN_BUSY back
16674 * from scsi_transport(). We want to revert back to this
16677 * In the QFULL case, the throttle limit will incrementally
16678 * increase until it reaches max throttle.
16680 if (un
->un_busy_throttle
> 0) {
16681 un
->un_throttle
= un
->un_busy_throttle
;
16682 un
->un_busy_throttle
= 0;
16685 * increase throttle by 10% open gate slowly, schedule
16686 * another restore if saved throttle has not been
16690 if (sd_qfull_throttle_enable
) {
16691 throttle
= un
->un_throttle
+
16692 max((un
->un_throttle
/ 10), 1);
16694 (throttle
< un
->un_saved_throttle
) ?
16695 throttle
: un
->un_saved_throttle
;
16696 if (un
->un_throttle
< un
->un_saved_throttle
) {
16697 un
->un_reset_throttle_timeid
=
16698 timeout(sd_restore_throttle
,
16700 SD_QFULL_THROTTLE_RESET_INTERVAL
);
16706 * If un_throttle has fallen below the low-water mark, we
16707 * restore the maximum value here (and allow it to ratchet
16708 * down again if necessary).
16710 if (un
->un_throttle
< un
->un_min_throttle
) {
16711 un
->un_throttle
= un
->un_saved_throttle
;
16714 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
, "sd_restore_throttle: "
16715 "restoring limit from 0x%x to 0x%x\n",
16716 un
->un_throttle
, un
->un_saved_throttle
);
16717 un
->un_throttle
= un
->un_saved_throttle
;
16720 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
,
16721 "sd_restore_throttle: calling sd_start_cmds!\n");
16723 sd_start_cmds(un
, NULL
);
16725 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
,
16726 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
16727 un
, un
->un_throttle
);
16729 mutex_exit(SD_MUTEX(un
));
16731 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
, "sd_restore_throttle: exit\n");
16735 * Function: sdrunout
16737 * Description: Callback routine for scsi_init_pkt when a resource allocation
16740 * Arguments: arg - a pointer to the sd_lun unit struct for the particular
16741 * soft state instance.
16743 * Return Code: The scsi_init_pkt routine allows for the callback function to
16744 * return a 0 indicating the callback should be rescheduled or a 1
16745 * indicating not to reschedule. This routine always returns 1
16746 * because the driver always provides a callback function to
16747 * scsi_init_pkt. This results in a callback always being scheduled
16748 * (via the scsi_init_pkt callback implementation) if a resource
16751 * Context: This callback function may not block or call routines that block
16753 * Note: Using the scsi_init_pkt callback facility can result in an I/O
16754 * request persisting at the head of the list which cannot be
16755 * satisfied even after multiple retries. In the future the driver
16756 * may implement some time of maximum runout count before failing
16761 sdrunout(caddr_t arg
)
16763 struct sd_lun
*un
= (struct sd_lun
*)arg
;
16765 ASSERT(un
!= NULL
);
16766 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16768 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sdrunout: entry\n");
16770 mutex_enter(SD_MUTEX(un
));
16771 sd_start_cmds(un
, NULL
);
16772 mutex_exit(SD_MUTEX(un
));
16774 * This callback routine always returns 1 (i.e. do not reschedule)
16775 * because we always specify sdrunout as the callback handler for
16776 * scsi_init_pkt inside the call to sd_start_cmds.
16778 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sdrunout: exit\n");
16786 * Description: Completion callback routine for scsi_pkt(9S) structs
16787 * sent to the HBA driver via scsi_transport(9F).
16789 * Context: Interrupt context
16793 sdintr(struct scsi_pkt
*pktp
)
16796 struct sd_xbuf
*xp
;
16801 ASSERT(pktp
!= NULL
);
16802 bp
= (struct buf
*)pktp
->pkt_private
;
16803 ASSERT(bp
!= NULL
);
16804 xp
= SD_GET_XBUF(bp
);
16805 ASSERT(xp
!= NULL
);
16806 ASSERT(xp
->xb_pktp
!= NULL
);
16807 un
= SD_GET_UN(bp
);
16808 ASSERT(un
!= NULL
);
16809 ASSERT(!mutex_owned(SD_MUTEX(un
)));
16811 #ifdef SD_FAULT_INJECTION
16813 SD_INFO(SD_LOG_IOERR
, un
, "sdintr: sdintr calling Fault injection\n");
16814 /* SD FaultInjection */
16815 sd_faultinjection(pktp
);
16817 #endif /* SD_FAULT_INJECTION */
16819 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sdintr: entry: buf:0x%p,"
16820 " xp:0x%p, un:0x%p\n", bp
, xp
, un
);
16822 mutex_enter(SD_MUTEX(un
));
16824 ASSERT(un
->un_fm_private
!= NULL
);
16825 sscp
= &((struct sd_fm_internal
*)(un
->un_fm_private
))->fm_ssc
;
16826 ASSERT(sscp
!= NULL
);
16828 /* Reduce the count of the #commands currently in transport */
16829 un
->un_ncmds_in_transport
--;
16830 ASSERT(un
->un_ncmds_in_transport
>= 0);
16832 /* Increment counter to indicate that the callback routine is active */
16833 un
->un_in_callback
++;
16835 SD_UPDATE_KSTATS(un
, kstat_runq_exit
, bp
);
16838 if (bp
== un
->un_retry_bp
) {
16839 SD_TRACE(SD_LOG_IO
| SD_LOG_ERROR
, un
, "sdintr: "
16840 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16841 un
, un
->un_retry_bp
, un
->un_ncmds_in_transport
);
16846 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
16849 if (pktp
->pkt_reason
== CMD_DEV_GONE
) {
16850 /* Prevent multiple console messages for the same failure. */
16851 if (un
->un_last_pkt_reason
!= CMD_DEV_GONE
) {
16852 un
->un_last_pkt_reason
= CMD_DEV_GONE
;
16853 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
16854 "Command failed to complete...Device is gone\n");
16856 if (un
->un_mediastate
!= DKIO_DEV_GONE
) {
16857 un
->un_mediastate
= DKIO_DEV_GONE
;
16858 cv_broadcast(&un
->un_state_cv
);
16861 * If the command happens to be the REQUEST SENSE command,
16862 * free up the rqs buf and fail the original command.
16864 if (bp
== un
->un_rqs_bp
) {
16865 bp
= sd_mark_rqs_idle(un
, xp
);
16867 sd_return_failed_command(un
, bp
, EIO
);
16871 if (pktp
->pkt_state
& STATE_XARQ_DONE
) {
16872 SD_TRACE(SD_LOG_COMMON
, un
,
16873 "sdintr: extra sense data received. pkt=%p\n", pktp
);
16877 * First see if the pkt has auto-request sense data with it....
16878 * Look at the packet state first so we don't take a performance
16879 * hit looking at the arq enabled flag unless absolutely necessary.
16881 if ((pktp
->pkt_state
& STATE_ARQ_DONE
) &&
16882 (un
->un_f_arq_enabled
== TRUE
)) {
16884 * The HBA did an auto request sense for this command so check
16885 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
16886 * driver command that should not be retried.
16888 if ((pktp
->pkt_flags
& FLAG_DIAGNOSE
) != 0) {
16890 * Save the relevant sense info into the xp for the
16893 struct scsi_arq_status
*asp
;
16894 asp
= (struct scsi_arq_status
*)(pktp
->pkt_scbp
);
16895 xp
->xb_sense_status
=
16896 *((uchar_t
*)(&(asp
->sts_rqpkt_status
)));
16897 xp
->xb_sense_state
= asp
->sts_rqpkt_state
;
16898 xp
->xb_sense_resid
= asp
->sts_rqpkt_resid
;
16899 if (pktp
->pkt_state
& STATE_XARQ_DONE
) {
16900 actual_len
= MAX_SENSE_LENGTH
-
16901 xp
->xb_sense_resid
;
16902 bcopy(&asp
->sts_sensedata
, xp
->xb_sense_data
,
16905 if (xp
->xb_sense_resid
> SENSE_LENGTH
) {
16906 actual_len
= MAX_SENSE_LENGTH
-
16907 xp
->xb_sense_resid
;
16909 actual_len
= SENSE_LENGTH
-
16910 xp
->xb_sense_resid
;
16912 if (xp
->xb_pkt_flags
& SD_XB_USCSICMD
) {
16913 if ((((struct uscsi_cmd
*)
16914 (xp
->xb_pktinfo
))->uscsi_rqlen
) >
16916 xp
->xb_sense_resid
=
16917 (((struct uscsi_cmd
*)
16918 (xp
->xb_pktinfo
))->
16919 uscsi_rqlen
) - actual_len
;
16921 xp
->xb_sense_resid
= 0;
16924 bcopy(&asp
->sts_sensedata
, xp
->xb_sense_data
,
16928 /* fail the command */
16929 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16930 "sdintr: arq done and FLAG_DIAGNOSE set\n");
16931 sd_return_failed_command(un
, bp
, EIO
);
16935 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
16937 * We want to either retry or fail this command, so free
16938 * the DMA resources here. If we retry the command then
16939 * the DMA resources will be reallocated in sd_start_cmds().
16940 * Note that when PKT_DMA_PARTIAL is used, this reallocation
16941 * causes the *entire* transfer to start over again from the
16942 * beginning of the request, even for PARTIAL chunks that
16943 * have already transferred successfully.
16945 if ((un
->un_f_is_fibre
== TRUE
) &&
16946 ((xp
->xb_pkt_flags
& SD_XB_USCSICMD
) == 0) &&
16947 ((pktp
->pkt_flags
& FLAG_SENSING
) == 0)) {
16948 scsi_dmafree(pktp
);
16949 xp
->xb_pkt_flags
|= SD_XB_DMA_FREED
;
16953 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16954 "sdintr: arq done, sd_handle_auto_request_sense\n");
16956 sd_handle_auto_request_sense(un
, bp
, xp
, pktp
);
16960 /* Next see if this is the REQUEST SENSE pkt for the instance */
16961 if (pktp
->pkt_flags
& FLAG_SENSING
) {
16962 /* This pktp is from the unit's REQUEST_SENSE command */
16963 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
16964 "sdintr: sd_handle_request_sense\n");
16965 sd_handle_request_sense(un
, bp
, xp
, pktp
);
16970 * Check to see if the command successfully completed as requested;
16971 * this is the most common case (and also the hot performance path).
16973 * Requirements for successful completion are:
16974 * pkt_reason is CMD_CMPLT and packet status is status good.
16976 * - A residual of zero indicates successful completion no matter what
16978 * - If the residual is not zero and the command is not a read or
16979 * write, then it's still defined as successful completion. In other
16980 * words, if the command is a read or write the residual must be
16981 * zero for successful completion.
16982 * - If the residual is not zero and the command is a read or
16983 * write, and it's a USCSICMD, then it's still defined as
16984 * successful completion.
16986 if ((pktp
->pkt_reason
== CMD_CMPLT
) &&
16987 (SD_GET_PKT_STATUS(pktp
) == STATUS_GOOD
)) {
16990 * Since this command is returned with a good status, we
16991 * can reset the count for Sonoma failover.
16993 un
->un_sonoma_failure_count
= 0;
16996 * Return all USCSI commands on good status
16998 if (pktp
->pkt_resid
== 0) {
16999 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17000 "sdintr: returning command for resid == 0\n");
17001 } else if (((SD_GET_PKT_OPCODE(pktp
) & 0x1F) != SCMD_READ
) &&
17002 ((SD_GET_PKT_OPCODE(pktp
) & 0x1F) != SCMD_WRITE
)) {
17003 SD_UPDATE_B_RESID(bp
, pktp
);
17004 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17005 "sdintr: returning command for resid != 0\n");
17006 } else if (xp
->xb_pkt_flags
& SD_XB_USCSICMD
) {
17007 SD_UPDATE_B_RESID(bp
, pktp
);
17008 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17009 "sdintr: returning uscsi command\n");
17011 goto not_successful
;
17013 sd_return_command(un
, bp
);
17016 * Decrement counter to indicate that the callback routine
17019 un
->un_in_callback
--;
17020 ASSERT(un
->un_in_callback
>= 0);
17021 mutex_exit(SD_MUTEX(un
));
17028 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
17030 * The following is based upon knowledge of the underlying transport
17031 * and its use of DMA resources. This code should be removed when
17032 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17033 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17034 * and sd_start_cmds().
17036 * Free any DMA resources associated with this command if there
17037 * is a chance it could be retried or enqueued for later retry.
17038 * If we keep the DMA binding then mpxio cannot reissue the
17039 * command on another path whenever a path failure occurs.
17041 * Note that when PKT_DMA_PARTIAL is used, free/reallocation
17042 * causes the *entire* transfer to start over again from the
17043 * beginning of the request, even for PARTIAL chunks that
17044 * have already transferred successfully.
17046 * This is only done for non-uscsi commands (and also skipped for the
17047 * driver's internal RQS command). Also just do this for Fibre Channel
17048 * devices as these are the only ones that support mpxio.
17050 if ((un
->un_f_is_fibre
== TRUE
) &&
17051 ((xp
->xb_pkt_flags
& SD_XB_USCSICMD
) == 0) &&
17052 ((pktp
->pkt_flags
& FLAG_SENSING
) == 0)) {
17053 scsi_dmafree(pktp
);
17054 xp
->xb_pkt_flags
|= SD_XB_DMA_FREED
;
17059 * The command did not successfully complete as requested so check
17060 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17061 * driver command that should not be retried so just return. If
17062 * FLAG_DIAGNOSE is not set the error will be processed below.
17064 if ((pktp
->pkt_flags
& FLAG_DIAGNOSE
) != 0) {
17065 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17066 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17068 * Issue a request sense if a check condition caused the error
17069 * (we handle the auto request sense case above), otherwise
17070 * just fail the command.
17072 if ((pktp
->pkt_reason
== CMD_CMPLT
) &&
17073 (SD_GET_PKT_STATUS(pktp
) == STATUS_CHECK
)) {
17074 sd_send_request_sense_command(un
, bp
, pktp
);
17076 sd_return_failed_command(un
, bp
, EIO
);
17082 * The command did not successfully complete as requested so process
17083 * the error, retry, and/or attempt recovery.
17085 switch (pktp
->pkt_reason
) {
17087 switch (SD_GET_PKT_STATUS(pktp
)) {
17090 * The command completed successfully with a non-zero
17093 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17094 "sdintr: STATUS_GOOD \n");
17095 sd_pkt_status_good(un
, bp
, xp
, pktp
);
17099 case STATUS_TERMINATED
:
17100 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17101 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
17102 sd_pkt_status_check_condition(un
, bp
, xp
, pktp
);
17106 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17107 "sdintr: STATUS_BUSY\n");
17108 sd_pkt_status_busy(un
, bp
, xp
, pktp
);
17111 case STATUS_RESERVATION_CONFLICT
:
17112 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17113 "sdintr: STATUS_RESERVATION_CONFLICT\n");
17114 sd_pkt_status_reservation_conflict(un
, bp
, xp
, pktp
);
17118 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17119 "sdintr: STATUS_QFULL\n");
17120 sd_pkt_status_qfull(un
, bp
, xp
, pktp
);
17124 case STATUS_INTERMEDIATE
:
17126 case STATUS_INTERMEDIATE_MET
:
17127 case STATUS_ACA_ACTIVE
:
17128 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
17129 "Unexpected SCSI status received: 0x%x\n",
17130 SD_GET_PKT_STATUS(pktp
));
17132 * Mark the ssc_flags when detected invalid status
17133 * code for non-USCSI command.
17135 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
17136 sd_ssc_set_info(sscp
, SSC_FLAGS_INVALID_STATUS
,
17139 sd_return_failed_command(un
, bp
, EIO
);
17143 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
17144 "Invalid SCSI status received: 0x%x\n",
17145 SD_GET_PKT_STATUS(pktp
));
17146 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
17147 sd_ssc_set_info(sscp
, SSC_FLAGS_INVALID_STATUS
,
17150 sd_return_failed_command(un
, bp
, EIO
);
17156 case CMD_INCOMPLETE
:
17157 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17158 "sdintr: CMD_INCOMPLETE\n");
17159 sd_pkt_reason_cmd_incomplete(un
, bp
, xp
, pktp
);
17162 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17163 "sdintr: CMD_TRAN_ERR\n");
17164 sd_pkt_reason_cmd_tran_err(un
, bp
, xp
, pktp
);
17167 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17168 "sdintr: CMD_RESET \n");
17169 sd_pkt_reason_cmd_reset(un
, bp
, xp
, pktp
);
17172 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17173 "sdintr: CMD_ABORTED \n");
17174 sd_pkt_reason_cmd_aborted(un
, bp
, xp
, pktp
);
17177 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17178 "sdintr: CMD_TIMEOUT\n");
17179 sd_pkt_reason_cmd_timeout(un
, bp
, xp
, pktp
);
17181 case CMD_UNX_BUS_FREE
:
17182 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17183 "sdintr: CMD_UNX_BUS_FREE \n");
17184 sd_pkt_reason_cmd_unx_bus_free(un
, bp
, xp
, pktp
);
17186 case CMD_TAG_REJECT
:
17187 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17188 "sdintr: CMD_TAG_REJECT\n");
17189 sd_pkt_reason_cmd_tag_reject(un
, bp
, xp
, pktp
);
17192 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
17193 "sdintr: default\n");
17195 * Mark the ssc_flags for detecting invliad pkt_reason.
17197 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
17198 sd_ssc_set_info(sscp
, SSC_FLAGS_INVALID_PKT_REASON
,
17201 sd_pkt_reason_default(un
, bp
, xp
, pktp
);
17206 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sdintr: exit\n");
17208 /* Decrement counter to indicate that the callback routine is done. */
17209 un
->un_in_callback
--;
17210 ASSERT(un
->un_in_callback
>= 0);
17213 * At this point, the pkt has been dispatched, ie, it is either
17214 * being re-tried or has been returned to its caller and should
17215 * not be referenced.
17218 mutex_exit(SD_MUTEX(un
));
17223 * Function: sd_print_incomplete_msg
17225 * Description: Prints the error message for a CMD_INCOMPLETE error.
17227 * Arguments: un - ptr to associated softstate for the device.
17228 * bp - ptr to the buf(9S) for the command.
17229 * arg - message string ptr
17230 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED,
17231 * or SD_NO_RETRY_ISSUED.
17233 * Context: May be called under interrupt context
17237 sd_print_incomplete_msg(struct sd_lun
*un
, struct buf
*bp
, void *arg
, int code
)
17239 struct scsi_pkt
*pktp
;
17243 ASSERT(un
!= NULL
);
17244 ASSERT(mutex_owned(SD_MUTEX(un
)));
17245 ASSERT(bp
!= NULL
);
17246 ASSERT(arg
!= NULL
);
17247 pktp
= SD_GET_PKTP(bp
);
17248 ASSERT(pktp
!= NULL
);
17251 case SD_DELAYED_RETRY_ISSUED
:
17252 case SD_IMMEDIATE_RETRY_ISSUED
:
17255 case SD_NO_RETRY_ISSUED
:
17257 msgp
= "giving up";
17261 if ((pktp
->pkt_flags
& FLAG_SILENT
) == 0) {
17262 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
17263 "incomplete %s- %s\n", cmdp
, msgp
);
17270 * Function: sd_pkt_status_good
17272 * Description: Processing for a STATUS_GOOD code in pkt_status.
17274 * Context: May be called under interrupt context
17278 sd_pkt_status_good(struct sd_lun
*un
, struct buf
*bp
,
17279 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
17283 ASSERT(un
!= NULL
);
17284 ASSERT(mutex_owned(SD_MUTEX(un
)));
17285 ASSERT(bp
!= NULL
);
17286 ASSERT(xp
!= NULL
);
17287 ASSERT(pktp
!= NULL
);
17288 ASSERT(pktp
->pkt_reason
== CMD_CMPLT
);
17289 ASSERT(SD_GET_PKT_STATUS(pktp
) == STATUS_GOOD
);
17290 ASSERT(pktp
->pkt_resid
!= 0);
17292 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_pkt_status_good: entry\n");
17294 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
17295 switch (SD_GET_PKT_OPCODE(pktp
) & 0x1F) {
17303 SD_UPDATE_B_RESID(bp
, pktp
);
17304 sd_return_command(un
, bp
);
17305 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_pkt_status_good: exit\n");
17310 * See if we can retry the read/write, preferrably immediately.
17311 * If retries are exhaused, then sd_retry_command() will update
17312 * the b_resid count.
17314 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, sd_print_incomplete_msg
,
17315 cmdp
, EIO
, (clock_t)0, NULL
);
17317 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_pkt_status_good: exit\n");
17325 * Function: sd_handle_request_sense
17327 * Description: Processing for non-auto Request Sense command.
17329 * Arguments: un - ptr to associated softstate
17330 * sense_bp - ptr to buf(9S) for the RQS command
17331 * sense_xp - ptr to the sd_xbuf for the RQS command
17332 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command
17334 * Context: May be called under interrupt context
17338 sd_handle_request_sense(struct sd_lun
*un
, struct buf
*sense_bp
,
17339 struct sd_xbuf
*sense_xp
, struct scsi_pkt
*sense_pktp
)
17341 struct buf
*cmd_bp
; /* buf for the original command */
17342 struct sd_xbuf
*cmd_xp
; /* sd_xbuf for the original command */
17343 struct scsi_pkt
*cmd_pktp
; /* pkt for the original command */
17344 size_t actual_len
; /* actual sense data length */
17346 ASSERT(un
!= NULL
);
17347 ASSERT(mutex_owned(SD_MUTEX(un
)));
17348 ASSERT(sense_bp
!= NULL
);
17349 ASSERT(sense_xp
!= NULL
);
17350 ASSERT(sense_pktp
!= NULL
);
17353 * Note the sense_bp, sense_xp, and sense_pktp here are for the
17354 * RQS command and not the original command.
17356 ASSERT(sense_pktp
== un
->un_rqs_pktp
);
17357 ASSERT(sense_bp
== un
->un_rqs_bp
);
17358 ASSERT((sense_pktp
->pkt_flags
& (FLAG_SENSING
| FLAG_HEAD
)) ==
17359 (FLAG_SENSING
| FLAG_HEAD
));
17360 ASSERT((((SD_GET_XBUF(sense_xp
->xb_sense_bp
))->xb_pktp
->pkt_flags
) &
17361 FLAG_SENSING
) == FLAG_SENSING
);
17363 /* These are the bp, xp, and pktp for the original command */
17364 cmd_bp
= sense_xp
->xb_sense_bp
;
17365 cmd_xp
= SD_GET_XBUF(cmd_bp
);
17366 cmd_pktp
= SD_GET_PKTP(cmd_bp
);
17368 if (sense_pktp
->pkt_reason
!= CMD_CMPLT
) {
17370 * The REQUEST SENSE command failed. Release the REQUEST
17371 * SENSE command for re-use, get back the bp for the original
17372 * command, and attempt to re-try the original command if
17373 * FLAG_DIAGNOSE is not set in the original packet.
17375 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
17376 if ((cmd_pktp
->pkt_flags
& FLAG_DIAGNOSE
) == 0) {
17377 cmd_bp
= sd_mark_rqs_idle(un
, sense_xp
);
17378 sd_retry_command(un
, cmd_bp
, SD_RETRIES_STANDARD
,
17379 NULL
, NULL
, EIO
, (clock_t)0, NULL
);
17385 * Save the relevant sense info into the xp for the original cmd.
17387 * Note: if the request sense failed the state info will be zero
17388 * as set in sd_mark_rqs_busy()
17390 cmd_xp
->xb_sense_status
= *(sense_pktp
->pkt_scbp
);
17391 cmd_xp
->xb_sense_state
= sense_pktp
->pkt_state
;
17392 actual_len
= MAX_SENSE_LENGTH
- sense_pktp
->pkt_resid
;
17393 if ((cmd_xp
->xb_pkt_flags
& SD_XB_USCSICMD
) &&
17394 (((struct uscsi_cmd
*)cmd_xp
->xb_pktinfo
)->uscsi_rqlen
>
17396 bcopy(sense_bp
->b_un
.b_addr
, cmd_xp
->xb_sense_data
,
17398 cmd_xp
->xb_sense_resid
= sense_pktp
->pkt_resid
;
17400 bcopy(sense_bp
->b_un
.b_addr
, cmd_xp
->xb_sense_data
,
17402 if (actual_len
< SENSE_LENGTH
) {
17403 cmd_xp
->xb_sense_resid
= SENSE_LENGTH
- actual_len
;
17405 cmd_xp
->xb_sense_resid
= 0;
17410 * Free up the RQS command....
17412 * Must do this BEFORE calling sd_validate_sense_data!
17413 * sd_validate_sense_data may return the original command in
17414 * which case the pkt will be freed and the flags can no
17415 * longer be touched.
17416 * SD_MUTEX is held through this process until the command
17417 * is dispatched based upon the sense data, so there are
17418 * no race conditions.
17420 (void) sd_mark_rqs_idle(un
, sense_xp
);
17423 * For a retryable command see if we have valid sense data, if so then
17424 * turn it over to sd_decode_sense() to figure out the right course of
17425 * action. Just fail a non-retryable command.
17427 if ((cmd_pktp
->pkt_flags
& FLAG_DIAGNOSE
) == 0) {
17428 if (sd_validate_sense_data(un
, cmd_bp
, cmd_xp
, actual_len
) ==
17429 SD_SENSE_DATA_IS_VALID
) {
17430 sd_decode_sense(un
, cmd_bp
, cmd_xp
, cmd_pktp
);
17433 SD_DUMP_MEMORY(un
, SD_LOG_IO_CORE
, "Failed CDB",
17434 (uchar_t
*)cmd_pktp
->pkt_cdbp
, CDB_SIZE
, SD_LOG_HEX
);
17435 SD_DUMP_MEMORY(un
, SD_LOG_IO_CORE
, "Sense Data",
17436 (uchar_t
*)cmd_xp
->xb_sense_data
, SENSE_LENGTH
, SD_LOG_HEX
);
17437 sd_return_failed_command(un
, cmd_bp
, EIO
);
17445 * Function: sd_handle_auto_request_sense
17447 * Description: Processing for auto-request sense information.
17449 * Arguments: un - ptr to associated softstate
17450 * bp - ptr to buf(9S) for the command
17451 * xp - ptr to the sd_xbuf for the command
17452 * pktp - ptr to the scsi_pkt(9S) for the command
17454 * Context: May be called under interrupt context
17458 sd_handle_auto_request_sense(struct sd_lun
*un
, struct buf
*bp
,
17459 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
17461 struct scsi_arq_status
*asp
;
17464 ASSERT(un
!= NULL
);
17465 ASSERT(mutex_owned(SD_MUTEX(un
)));
17466 ASSERT(bp
!= NULL
);
17467 ASSERT(xp
!= NULL
);
17468 ASSERT(pktp
!= NULL
);
17469 ASSERT(pktp
!= un
->un_rqs_pktp
);
17470 ASSERT(bp
!= un
->un_rqs_bp
);
17473 * For auto-request sense, we get a scsi_arq_status back from
17474 * the HBA, with the sense data in the sts_sensedata member.
17475 * The pkt_scbp of the packet points to this scsi_arq_status.
17477 asp
= (struct scsi_arq_status
*)(pktp
->pkt_scbp
);
17479 if (asp
->sts_rqpkt_reason
!= CMD_CMPLT
) {
17481 * The auto REQUEST SENSE failed; see if we can re-try
17482 * the original command.
17484 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
17485 "auto request sense failed (reason=%s)\n",
17486 scsi_rname(asp
->sts_rqpkt_reason
));
17488 sd_reset_target(un
, pktp
);
17490 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
,
17491 NULL
, NULL
, EIO
, (clock_t)0, NULL
);
17495 /* Save the relevant sense info into the xp for the original cmd. */
17496 xp
->xb_sense_status
= *((uchar_t
*)(&(asp
->sts_rqpkt_status
)));
17497 xp
->xb_sense_state
= asp
->sts_rqpkt_state
;
17498 xp
->xb_sense_resid
= asp
->sts_rqpkt_resid
;
17499 if (xp
->xb_sense_state
& STATE_XARQ_DONE
) {
17500 actual_len
= MAX_SENSE_LENGTH
- xp
->xb_sense_resid
;
17501 bcopy(&asp
->sts_sensedata
, xp
->xb_sense_data
,
17504 if (xp
->xb_sense_resid
> SENSE_LENGTH
) {
17505 actual_len
= MAX_SENSE_LENGTH
- xp
->xb_sense_resid
;
17507 actual_len
= SENSE_LENGTH
- xp
->xb_sense_resid
;
17509 if (xp
->xb_pkt_flags
& SD_XB_USCSICMD
) {
17510 if ((((struct uscsi_cmd
*)
17511 (xp
->xb_pktinfo
))->uscsi_rqlen
) > actual_len
) {
17512 xp
->xb_sense_resid
= (((struct uscsi_cmd
*)
17513 (xp
->xb_pktinfo
))->uscsi_rqlen
) -
17516 xp
->xb_sense_resid
= 0;
17519 bcopy(&asp
->sts_sensedata
, xp
->xb_sense_data
, SENSE_LENGTH
);
17523 * See if we have valid sense data, if so then turn it over to
17524 * sd_decode_sense() to figure out the right course of action.
17526 if (sd_validate_sense_data(un
, bp
, xp
, actual_len
) ==
17527 SD_SENSE_DATA_IS_VALID
) {
17528 sd_decode_sense(un
, bp
, xp
, pktp
);
17534 * Function: sd_print_sense_failed_msg
17536 * Description: Print log message when RQS has failed.
17538 * Arguments: un - ptr to associated softstate
17539 * bp - ptr to buf(9S) for the command
17540 * arg - generic message string ptr
17541 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17542 * or SD_NO_RETRY_ISSUED
17544 * Context: May be called from interrupt context
17548 sd_print_sense_failed_msg(struct sd_lun
*un
, struct buf
*bp
, void *arg
,
17553 ASSERT(un
!= NULL
);
17554 ASSERT(mutex_owned(SD_MUTEX(un
)));
17555 ASSERT(bp
!= NULL
);
17557 if ((code
== SD_NO_RETRY_ISSUED
) && (msgp
!= NULL
)) {
17558 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
, msgp
);
17564 * Function: sd_validate_sense_data
17566 * Description: Check the given sense data for validity.
17567 * If the sense data is not valid, the command will
17568 * be either failed or retried!
17570 * Return Code: SD_SENSE_DATA_IS_INVALID
17571 * SD_SENSE_DATA_IS_VALID
17573 * Context: May be called from interrupt context
17577 sd_validate_sense_data(struct sd_lun
*un
, struct buf
*bp
, struct sd_xbuf
*xp
,
17580 struct scsi_extended_sense
*esp
;
17581 struct scsi_pkt
*pktp
;
17585 ASSERT(un
!= NULL
);
17586 ASSERT(mutex_owned(SD_MUTEX(un
)));
17587 ASSERT(bp
!= NULL
);
17588 ASSERT(bp
!= un
->un_rqs_bp
);
17589 ASSERT(xp
!= NULL
);
17590 ASSERT(un
->un_fm_private
!= NULL
);
17592 pktp
= SD_GET_PKTP(bp
);
17593 ASSERT(pktp
!= NULL
);
17595 sscp
= &((struct sd_fm_internal
*)(un
->un_fm_private
))->fm_ssc
;
17596 ASSERT(sscp
!= NULL
);
17599 * Check the status of the RQS command (auto or manual).
17601 switch (xp
->xb_sense_status
& STATUS_MASK
) {
17605 case STATUS_RESERVATION_CONFLICT
:
17606 sd_pkt_status_reservation_conflict(un
, bp
, xp
, pktp
);
17607 return (SD_SENSE_DATA_IS_INVALID
);
17610 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
17611 "Busy Status on REQUEST SENSE\n");
17612 sd_retry_command(un
, bp
, SD_RETRIES_BUSY
, NULL
,
17613 NULL
, EIO
, un
->un_busy_timeout
/ 500, kstat_waitq_enter
);
17614 return (SD_SENSE_DATA_IS_INVALID
);
17617 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
17618 "QFULL Status on REQUEST SENSE\n");
17619 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, NULL
,
17620 NULL
, EIO
, un
->un_busy_timeout
/ 500, kstat_waitq_enter
);
17621 return (SD_SENSE_DATA_IS_INVALID
);
17624 case STATUS_TERMINATED
:
17625 msgp
= "Check Condition on REQUEST SENSE\n";
17629 msgp
= "Not STATUS_GOOD on REQUEST_SENSE\n";
17634 * See if we got the minimum required amount of sense data.
17635 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes
17638 if (((xp
->xb_sense_state
& STATE_XFERRED_DATA
) == 0) ||
17639 (actual_len
== 0)) {
17640 msgp
= "Request Sense couldn't get sense data\n";
17644 if (actual_len
< SUN_MIN_SENSE_LENGTH
) {
17645 msgp
= "Not enough sense information\n";
17646 /* Mark the ssc_flags for detecting invalid sense data */
17647 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
17648 sd_ssc_set_info(sscp
, SSC_FLAGS_INVALID_SENSE
, 0,
17655 * We require the extended sense data
17657 esp
= (struct scsi_extended_sense
*)xp
->xb_sense_data
;
17658 if (esp
->es_class
!= CLASS_EXTENDED_SENSE
) {
17659 if ((pktp
->pkt_flags
& FLAG_SILENT
) == 0) {
17660 static char tmp
[8];
17661 static char buf
[148];
17662 char *p
= (char *)(xp
->xb_sense_data
);
17665 mutex_enter(&sd_sense_mutex
);
17666 (void) strcpy(buf
, "undecodable sense information:");
17667 for (i
= 0; i
< actual_len
; i
++) {
17668 (void) sprintf(tmp
, " 0x%x", *(p
++)&0xff);
17669 (void) strcpy(&buf
[strlen(buf
)], tmp
);
17672 (void) strcpy(&buf
[i
], "-(assumed fatal)\n");
17674 if (SD_FM_LOG(un
) == SD_FM_LOG_NSUP
) {
17675 scsi_log(SD_DEVINFO(un
), sd_label
,
17678 mutex_exit(&sd_sense_mutex
);
17681 /* Mark the ssc_flags for detecting invalid sense data */
17682 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
17683 sd_ssc_set_info(sscp
, SSC_FLAGS_INVALID_SENSE
, 0,
17687 /* Note: Legacy behavior, fail the command with no retry */
17688 sd_return_failed_command(un
, bp
, EIO
);
17689 return (SD_SENSE_DATA_IS_INVALID
);
17693 * Check that es_code is valid (es_class concatenated with es_code
17694 * make up the "response code" field. es_class will always be 7, so
17695 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the
17698 if ((esp
->es_code
!= CODE_FMT_FIXED_CURRENT
) &&
17699 (esp
->es_code
!= CODE_FMT_FIXED_DEFERRED
) &&
17700 (esp
->es_code
!= CODE_FMT_DESCR_CURRENT
) &&
17701 (esp
->es_code
!= CODE_FMT_DESCR_DEFERRED
) &&
17702 (esp
->es_code
!= CODE_FMT_VENDOR_SPECIFIC
)) {
17703 /* Mark the ssc_flags for detecting invalid sense data */
17704 if (!(xp
->xb_pkt_flags
& SD_XB_USCSICMD
)) {
17705 sd_ssc_set_info(sscp
, SSC_FLAGS_INVALID_SENSE
, 0,
17711 return (SD_SENSE_DATA_IS_VALID
);
17715 * If the request sense failed (for whatever reason), attempt
17716 * to retry the original command.
17718 #if defined(__i386) || defined(__amd64)
17720 * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17721 * sddef.h for Sparc platform, and x86 uses 1 binary
17722 * for both SCSI/FC.
17723 * The SD_RETRY_DELAY value need to be adjusted here
17724 * when SD_RETRY_DELAY change in sddef.h
17726 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
,
17727 sd_print_sense_failed_msg
, msgp
, EIO
,
17728 un
->un_f_is_fibre
?drv_usectohz(100000):(clock_t)0, NULL
);
17730 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
,
17731 sd_print_sense_failed_msg
, msgp
, EIO
, SD_RETRY_DELAY
, NULL
);
17734 return (SD_SENSE_DATA_IS_INVALID
);
17738 * Function: sd_decode_sense
17740 * Description: Take recovery action(s) when SCSI Sense Data is received.
17742 * Context: Interrupt context.
17746 sd_decode_sense(struct sd_lun
*un
, struct buf
*bp
, struct sd_xbuf
*xp
,
17747 struct scsi_pkt
*pktp
)
17751 ASSERT(un
!= NULL
);
17752 ASSERT(mutex_owned(SD_MUTEX(un
)));
17753 ASSERT(bp
!= NULL
);
17754 ASSERT(bp
!= un
->un_rqs_bp
);
17755 ASSERT(xp
!= NULL
);
17756 ASSERT(pktp
!= NULL
);
17758 sense_key
= scsi_sense_key(xp
->xb_sense_data
);
17760 switch (sense_key
) {
17762 sd_sense_key_no_sense(un
, bp
, xp
, pktp
);
17764 case KEY_RECOVERABLE_ERROR
:
17765 sd_sense_key_recoverable_error(un
, xp
->xb_sense_data
,
17768 case KEY_NOT_READY
:
17769 sd_sense_key_not_ready(un
, xp
->xb_sense_data
,
17772 case KEY_MEDIUM_ERROR
:
17773 case KEY_HARDWARE_ERROR
:
17774 sd_sense_key_medium_or_hardware_error(un
,
17775 xp
->xb_sense_data
, bp
, xp
, pktp
);
17777 case KEY_ILLEGAL_REQUEST
:
17778 sd_sense_key_illegal_request(un
, bp
, xp
, pktp
);
17780 case KEY_UNIT_ATTENTION
:
17781 sd_sense_key_unit_attention(un
, xp
->xb_sense_data
,
17784 case KEY_WRITE_PROTECT
:
17785 case KEY_VOLUME_OVERFLOW
:
17786 case KEY_MISCOMPARE
:
17787 sd_sense_key_fail_command(un
, bp
, xp
, pktp
);
17789 case KEY_BLANK_CHECK
:
17790 sd_sense_key_blank_check(un
, bp
, xp
, pktp
);
17792 case KEY_ABORTED_COMMAND
:
17793 sd_sense_key_aborted_command(un
, bp
, xp
, pktp
);
17795 case KEY_VENDOR_UNIQUE
:
17796 case KEY_COPY_ABORTED
:
17800 sd_sense_key_default(un
, xp
->xb_sense_data
,
17808 * Function: sd_dump_memory
17810 * Description: Debug logging routine to print the contents of a user provided
17811 * buffer. The output of the buffer is broken up into 256 byte
17812 * segments due to a size constraint of the scsi_log.
17815 * Arguments: un - ptr to softstate
17816 * comp - component mask
17817 * title - "title" string to preceed data when printed
17818 * data - ptr to data block to be printed
17819 * len - size of data block to be printed
17820 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c)
17822 * Context: May be called from interrupt context
17825 #define SD_DUMP_MEMORY_BUF_SIZE 256
17827 static char *sd_dump_format_string
[] = {
17833 sd_dump_memory(struct sd_lun
*un
, uint_t comp
, char *title
, uchar_t
*data
,
17843 char *format_string
;
17845 ASSERT((fmt
== SD_LOG_HEX
) || (fmt
== SD_LOG_CHAR
));
17848 * In the debug version of the driver, this function is called from a
17849 * number of places which are NOPs in the release driver.
17850 * The debug driver therefore has additional methods of filtering
17855 * In the debug version of the driver we can reduce the amount of debug
17856 * messages by setting sd_error_level to something other than
17857 * SCSI_ERR_ALL and clearing bits in sd_level_mask and
17858 * sd_component_mask.
17860 if (((sd_level_mask
& (SD_LOGMASK_DUMP_MEM
| SD_LOGMASK_DIAG
)) == 0) ||
17861 (sd_error_level
!= SCSI_ERR_ALL
)) {
17864 if (((sd_component_mask
& comp
) == 0) ||
17865 (sd_error_level
!= SCSI_ERR_ALL
)) {
17869 if (sd_error_level
!= SCSI_ERR_ALL
) {
17874 local_buf
= kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE
, KM_SLEEP
);
17877 * Available length is the length of local_buf[], minus the
17878 * length of the title string, minus one for the ":", minus
17879 * one for the newline, minus one for the NULL terminator.
17880 * This gives the #bytes available for holding the printed
17881 * values from the given data buffer.
17883 if (fmt
== SD_LOG_HEX
) {
17884 format_string
= sd_dump_format_string
[0];
17885 } else /* SD_LOG_CHAR */ {
17886 format_string
= sd_dump_format_string
[1];
17889 * Available count is the number of elements from the given
17890 * data buffer that we can fit into the available length.
17891 * This is based upon the size of the format string used.
17892 * Make one entry and find it's size.
17894 (void) sprintf(bufp
, format_string
, data
[0]);
17895 entry_len
= strlen(bufp
);
17896 avail_count
= (SD_DUMP_MEMORY_BUF_SIZE
- strlen(title
) - 3) / entry_len
;
17901 bzero(bufp
, SD_DUMP_MEMORY_BUF_SIZE
);
17904 end_offset
= start_offset
+ avail_count
;
17906 (void) sprintf(bufp
, "%s:", title
);
17907 bufp
+= strlen(bufp
);
17908 for (i
= start_offset
; ((i
< end_offset
) && (j
< len
));
17910 (void) sprintf(bufp
, format_string
, data
[i
]);
17913 (void) sprintf(bufp
, "\n");
17915 scsi_log(SD_DEVINFO(un
), sd_label
, CE_NOTE
, "%s", local_buf
);
17917 kmem_free(local_buf
, SD_DUMP_MEMORY_BUF_SIZE
);
17921 * Function: sd_print_sense_msg
17923 * Description: Log a message based upon the given sense data.
17925 * Arguments: un - ptr to associated softstate
17926 * bp - ptr to buf(9S) for the command
17927 * arg - ptr to associate sd_sense_info struct
17928 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17929 * or SD_NO_RETRY_ISSUED
17931 * Context: May be called from interrupt context
17935 sd_print_sense_msg(struct sd_lun
*un
, struct buf
*bp
, void *arg
, int code
)
17937 struct sd_xbuf
*xp
;
17938 struct scsi_pkt
*pktp
;
17940 daddr_t request_blkno
;
17941 diskaddr_t err_blkno
;
17944 extern struct scsi_key_strings scsi_cmds
[];
17946 ASSERT(un
!= NULL
);
17947 ASSERT(mutex_owned(SD_MUTEX(un
)));
17948 ASSERT(bp
!= NULL
);
17949 xp
= SD_GET_XBUF(bp
);
17950 ASSERT(xp
!= NULL
);
17951 pktp
= SD_GET_PKTP(bp
);
17952 ASSERT(pktp
!= NULL
);
17953 ASSERT(arg
!= NULL
);
17955 severity
= ((struct sd_sense_info
*)(arg
))->ssi_severity
;
17956 pfa_flag
= ((struct sd_sense_info
*)(arg
))->ssi_pfa_flag
;
17958 if ((code
== SD_DELAYED_RETRY_ISSUED
) ||
17959 (code
== SD_IMMEDIATE_RETRY_ISSUED
)) {
17960 severity
= SCSI_ERR_RETRYABLE
;
17963 /* Use absolute block number for the request block number */
17964 request_blkno
= xp
->xb_blkno
;
17967 * Now try to get the error block number from the sense data
17969 sensep
= xp
->xb_sense_data
;
17971 if (scsi_sense_info_uint64(sensep
, SENSE_LENGTH
,
17972 (uint64_t *)&err_blkno
)) {
17974 * We retrieved the error block number from the information
17975 * portion of the sense data.
17977 * For USCSI commands we are better off using the error
17978 * block no. as the requested block no. (This is the best
17979 * we can estimate.)
17981 if ((SD_IS_BUFIO(xp
) == FALSE
) &&
17982 ((pktp
->pkt_flags
& FLAG_SILENT
) == 0)) {
17983 request_blkno
= err_blkno
;
17987 * Without the es_valid bit set (for fixed format) or an
17988 * information descriptor (for descriptor format) we cannot
17989 * be certain of the error blkno, so just use the
17992 err_blkno
= (diskaddr_t
)request_blkno
;
17996 * The following will log the buffer contents for the release driver
17997 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error
17998 * level is set to verbose.
18000 sd_dump_memory(un
, SD_LOG_IO
, "Failed CDB",
18001 (uchar_t
*)pktp
->pkt_cdbp
, CDB_SIZE
, SD_LOG_HEX
);
18002 sd_dump_memory(un
, SD_LOG_IO
, "Sense Data",
18003 (uchar_t
*)sensep
, SENSE_LENGTH
, SD_LOG_HEX
);
18005 if (pfa_flag
== FALSE
) {
18006 /* This is normally only set for USCSI */
18007 if ((pktp
->pkt_flags
& FLAG_SILENT
) != 0) {
18011 if ((SD_IS_BUFIO(xp
) == TRUE
) &&
18012 (((sd_level_mask
& SD_LOGMASK_DIAG
) == 0) &&
18013 (severity
< sd_error_level
))) {
18018 * Check for Sonoma Failover and keep a count of how many failed I/O's
18020 if ((SD_IS_LSI(un
)) &&
18021 (scsi_sense_key(sensep
) == KEY_ILLEGAL_REQUEST
) &&
18022 (scsi_sense_asc(sensep
) == 0x94) &&
18023 (scsi_sense_ascq(sensep
) == 0x01)) {
18024 un
->un_sonoma_failure_count
++;
18025 if (un
->un_sonoma_failure_count
> 1) {
18030 if (SD_FM_LOG(un
) == SD_FM_LOG_NSUP
||
18031 ((scsi_sense_key(sensep
) == KEY_RECOVERABLE_ERROR
) &&
18032 (pktp
->pkt_resid
== 0))) {
18033 scsi_vu_errmsg(SD_SCSI_DEVP(un
), pktp
, sd_label
, severity
,
18034 request_blkno
, err_blkno
, scsi_cmds
,
18035 (struct scsi_extended_sense
*)sensep
,
18036 un
->un_additional_codes
, NULL
);
18041 * Function: sd_sense_key_no_sense
18043 * Description: Recovery action when sense data was not received.
18045 * Context: May be called from interrupt context
18049 sd_sense_key_no_sense(struct sd_lun
*un
, struct buf
*bp
,
18050 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18052 struct sd_sense_info si
;
18054 ASSERT(un
!= NULL
);
18055 ASSERT(mutex_owned(SD_MUTEX(un
)));
18056 ASSERT(bp
!= NULL
);
18057 ASSERT(xp
!= NULL
);
18058 ASSERT(pktp
!= NULL
);
18060 si
.ssi_severity
= SCSI_ERR_FATAL
;
18061 si
.ssi_pfa_flag
= FALSE
;
18063 SD_UPDATE_ERRSTATS(un
, sd_softerrs
);
18065 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, sd_print_sense_msg
,
18066 &si
, EIO
, (clock_t)0, NULL
);
18071 * Function: sd_sense_key_recoverable_error
18073 * Description: Recovery actions for a SCSI "Recovered Error" sense key.
18075 * Context: May be called from interrupt context
18079 sd_sense_key_recoverable_error(struct sd_lun
*un
,
18080 uint8_t *sense_datap
,
18081 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18083 struct sd_sense_info si
;
18084 uint8_t asc
= scsi_sense_asc(sense_datap
);
18086 ASSERT(un
!= NULL
);
18087 ASSERT(mutex_owned(SD_MUTEX(un
)));
18088 ASSERT(bp
!= NULL
);
18089 ASSERT(xp
!= NULL
);
18090 ASSERT(pktp
!= NULL
);
18093 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED
18095 if ((asc
== 0x5D) && (sd_report_pfa
!= 0)) {
18096 SD_UPDATE_ERRSTATS(un
, sd_rq_pfa_err
);
18097 si
.ssi_severity
= SCSI_ERR_INFO
;
18098 si
.ssi_pfa_flag
= TRUE
;
18100 SD_UPDATE_ERRSTATS(un
, sd_softerrs
);
18101 SD_UPDATE_ERRSTATS(un
, sd_rq_recov_err
);
18102 si
.ssi_severity
= SCSI_ERR_RECOVERED
;
18103 si
.ssi_pfa_flag
= FALSE
;
18106 if (pktp
->pkt_resid
== 0) {
18107 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
18108 sd_return_command(un
, bp
);
18112 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, sd_print_sense_msg
,
18113 &si
, EIO
, (clock_t)0, NULL
);
18120 * Function: sd_sense_key_not_ready
18122 * Description: Recovery actions for a SCSI "Not Ready" sense key.
18124 * Context: May be called from interrupt context
18128 sd_sense_key_not_ready(struct sd_lun
*un
,
18129 uint8_t *sense_datap
,
18130 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18132 struct sd_sense_info si
;
18133 uint8_t asc
= scsi_sense_asc(sense_datap
);
18134 uint8_t ascq
= scsi_sense_ascq(sense_datap
);
18136 ASSERT(un
!= NULL
);
18137 ASSERT(mutex_owned(SD_MUTEX(un
)));
18138 ASSERT(bp
!= NULL
);
18139 ASSERT(xp
!= NULL
);
18140 ASSERT(pktp
!= NULL
);
18142 si
.ssi_severity
= SCSI_ERR_FATAL
;
18143 si
.ssi_pfa_flag
= FALSE
;
18146 * Update error stats after first NOT READY error. Disks may have
18147 * been powered down and may need to be restarted. For CDROMs,
18148 * report NOT READY errors only if media is present.
18150 if ((ISCD(un
) && (asc
== 0x3A)) ||
18151 (xp
->xb_nr_retry_count
> 0)) {
18152 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
18153 SD_UPDATE_ERRSTATS(un
, sd_rq_ntrdy_err
);
18157 * Just fail if the "not ready" retry limit has been reached.
18159 if (xp
->xb_nr_retry_count
>= un
->un_notready_retry_count
) {
18160 /* Special check for error message printing for removables. */
18161 if (un
->un_f_has_removable_media
&& (asc
== 0x04) &&
18163 si
.ssi_severity
= SCSI_ERR_ALL
;
18169 * Check the ASC and ASCQ in the sense data as needed, to determine
18173 case 0x04: /* LOGICAL UNIT NOT READY */
18175 * disk drives that don't spin up result in a very long delay
18176 * in format without warning messages. We will log a message
18177 * if the error level is set to verbose.
18179 if (sd_error_level
< SCSI_ERR_RETRYABLE
) {
18180 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
18181 "logical unit not ready, resetting disk\n");
18185 * There are different requirements for CDROMs and disks for
18186 * the number of retries. If a CD-ROM is giving this, it is
18187 * probably reading TOC and is in the process of getting
18188 * ready, so we should keep on trying for a long time to make
18189 * sure that all types of media are taken in account (for
18190 * some media the drive takes a long time to read TOC). For
18191 * disks we do not want to retry this too many times as this
18192 * can cause a long hang in format when the drive refuses to
18193 * spin up (a very common failure).
18196 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */
18198 * Disk drives frequently refuse to spin up which
18199 * results in a very long hang in format without
18200 * warning messages.
18202 * Note: This code preserves the legacy behavior of
18203 * comparing xb_nr_retry_count against zero for fibre
18204 * channel targets instead of comparing against the
18205 * un_reset_retry_count value. The reason for this
18206 * discrepancy has been so utterly lost beneath the
18207 * Sands of Time that even Indiana Jones could not
18210 if (un
->un_f_is_fibre
== TRUE
) {
18211 if (((sd_level_mask
& SD_LOGMASK_DIAG
) ||
18212 (xp
->xb_nr_retry_count
> 0)) &&
18213 (un
->un_startstop_timeid
== NULL
)) {
18214 scsi_log(SD_DEVINFO(un
), sd_label
,
18215 CE_WARN
, "logical unit not ready, "
18216 "resetting disk\n");
18217 sd_reset_target(un
, pktp
);
18220 if (((sd_level_mask
& SD_LOGMASK_DIAG
) ||
18221 (xp
->xb_nr_retry_count
>
18222 un
->un_reset_retry_count
)) &&
18223 (un
->un_startstop_timeid
== NULL
)) {
18224 scsi_log(SD_DEVINFO(un
), sd_label
,
18225 CE_WARN
, "logical unit not ready, "
18226 "resetting disk\n");
18227 sd_reset_target(un
, pktp
);
18232 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */
18234 * If the target is in the process of becoming
18235 * ready, just proceed with the retry. This can
18236 * happen with CD-ROMs that take a long time to
18237 * read TOC after a power cycle or reset.
18241 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */
18244 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */
18246 * Retries cannot help here so just fail right away.
18252 * Vendor-unique code for T3/T4: it indicates a
18253 * path problem in a mutipathed config, but as far as
18254 * the target driver is concerned it equates to a fatal
18255 * error, so we should just fail the command right away
18256 * (without printing anything to the console). If this
18257 * is not a T3/T4, fall thru to the default recovery
18259 * T3/T4 is FC only, don't need to check is_fibre
18261 if (SD_IS_T3(un
) || SD_IS_T4(un
)) {
18262 sd_return_failed_command(un
, bp
, EIO
);
18267 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */
18268 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */
18269 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */
18270 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */
18271 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */
18272 default: /* Possible future codes in SCSI spec? */
18274 * For removable-media devices, do not retry if
18275 * ASCQ > 2 as these result mostly from USCSI commands
18276 * on MMC devices issued to check status of an
18277 * operation initiated in immediate mode. Also for
18278 * ASCQ >= 4 do not print console messages as these
18279 * mainly represent a user-initiated operation
18280 * instead of a system failure.
18282 if (un
->un_f_has_removable_media
) {
18283 si
.ssi_severity
= SCSI_ERR_ALL
;
18290 * As part of our recovery attempt for the NOT READY
18291 * condition, we issue a START STOP UNIT command. However
18292 * we want to wait for a short delay before attempting this
18293 * as there may still be more commands coming back from the
18294 * target with the check condition. To do this we use
18295 * timeout(9F) to call sd_start_stop_unit_callback() after
18296 * the delay interval expires. (sd_start_stop_unit_callback()
18297 * dispatches sd_start_stop_unit_task(), which will issue
18298 * the actual START STOP UNIT command. The delay interval
18299 * is one-half of the delay that we will use to retry the
18300 * command that generated the NOT READY condition.
18302 * Note that we could just dispatch sd_start_stop_unit_task()
18303 * from here and allow it to sleep for the delay interval,
18304 * but then we would be tying up the taskq thread
18305 * uncesessarily for the duration of the delay.
18307 * Do not issue the START STOP UNIT if the current command
18308 * is already a START STOP UNIT.
18310 if (pktp
->pkt_cdbp
[0] == SCMD_START_STOP
) {
18315 * Do not schedule the timeout if one is already pending.
18317 if (un
->un_startstop_timeid
!= NULL
) {
18318 SD_INFO(SD_LOG_ERROR
, un
,
18319 "sd_sense_key_not_ready: restart already issued to"
18320 " %s%d\n", ddi_driver_name(SD_DEVINFO(un
)),
18321 ddi_get_instance(SD_DEVINFO(un
)));
18326 * Schedule the START STOP UNIT command, then queue the command
18329 * Note: A timeout is not scheduled for this retry because we
18330 * want the retry to be serial with the START_STOP_UNIT. The
18331 * retry will be started when the START_STOP_UNIT is completed
18332 * in sd_start_stop_unit_task.
18334 un
->un_startstop_timeid
= timeout(sd_start_stop_unit_callback
,
18335 un
, un
->un_busy_timeout
/ 2);
18336 xp
->xb_nr_retry_count
++;
18337 sd_set_retry_bp(un
, bp
, 0, kstat_waitq_enter
);
18340 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */
18341 if (sd_error_level
< SCSI_ERR_RETRYABLE
) {
18342 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
18343 "unit does not respond to selection\n");
18347 case 0x3A: /* MEDIUM NOT PRESENT */
18348 if (sd_error_level
>= SCSI_ERR_FATAL
) {
18349 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
18350 "Caddy not inserted in drive\n");
18354 un
->un_mediastate
= DKIO_EJECTED
;
18355 /* The state has changed, inform the media watch routines */
18356 cv_broadcast(&un
->un_state_cv
);
18357 /* Just fail if no media is present in the drive. */
18361 if (sd_error_level
< SCSI_ERR_RETRYABLE
) {
18362 scsi_log(SD_DEVINFO(un
), sd_label
, CE_NOTE
,
18363 "Unit not Ready. Additional sense code 0x%x\n",
18372 * Retry the command, as some targets may report NOT READY for
18373 * several seconds after being reset.
18375 xp
->xb_nr_retry_count
++;
18376 si
.ssi_severity
= SCSI_ERR_RETRYABLE
;
18377 sd_retry_command(un
, bp
, SD_RETRIES_NOCHECK
, sd_print_sense_msg
,
18378 &si
, EIO
, un
->un_busy_timeout
, NULL
);
18383 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
18384 sd_return_failed_command(un
, bp
, EIO
);
18390 * Function: sd_sense_key_medium_or_hardware_error
18392 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error"
18395 * Context: May be called from interrupt context
18399 sd_sense_key_medium_or_hardware_error(struct sd_lun
*un
,
18400 uint8_t *sense_datap
,
18401 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18403 struct sd_sense_info si
;
18404 uint8_t sense_key
= scsi_sense_key(sense_datap
);
18405 uint8_t asc
= scsi_sense_asc(sense_datap
);
18407 ASSERT(un
!= NULL
);
18408 ASSERT(mutex_owned(SD_MUTEX(un
)));
18409 ASSERT(bp
!= NULL
);
18410 ASSERT(xp
!= NULL
);
18411 ASSERT(pktp
!= NULL
);
18413 si
.ssi_severity
= SCSI_ERR_FATAL
;
18414 si
.ssi_pfa_flag
= FALSE
;
18416 if (sense_key
== KEY_MEDIUM_ERROR
) {
18417 SD_UPDATE_ERRSTATS(un
, sd_rq_media_err
);
18420 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
18422 if ((un
->un_reset_retry_count
!= 0) &&
18423 (xp
->xb_retry_count
== un
->un_reset_retry_count
)) {
18424 mutex_exit(SD_MUTEX(un
));
18425 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */
18426 if (un
->un_f_allow_bus_device_reset
== TRUE
) {
18428 boolean_t try_resetting_target
= B_TRUE
;
18431 * We need to be able to handle specific ASC when we are
18432 * handling a KEY_HARDWARE_ERROR. In particular
18433 * taking the default action of resetting the target may
18434 * not be the appropriate way to attempt recovery.
18435 * Resetting a target because of a single LUN failure
18436 * victimizes all LUNs on that target.
18438 * This is true for the LSI arrays, if an LSI
18439 * array controller returns an ASC of 0x84 (LUN Dead) we
18443 if (sense_key
== KEY_HARDWARE_ERROR
) {
18446 if (SD_IS_LSI(un
)) {
18447 try_resetting_target
= B_FALSE
;
18455 if (try_resetting_target
== B_TRUE
) {
18456 int reset_retval
= 0;
18457 if (un
->un_f_lun_reset_enabled
== TRUE
) {
18458 SD_TRACE(SD_LOG_IO_CORE
, un
,
18459 "sd_sense_key_medium_or_hardware_"
18460 "error: issuing RESET_LUN\n");
18462 scsi_reset(SD_ADDRESS(un
),
18465 if (reset_retval
== 0) {
18466 SD_TRACE(SD_LOG_IO_CORE
, un
,
18467 "sd_sense_key_medium_or_hardware_"
18468 "error: issuing RESET_TARGET\n");
18469 (void) scsi_reset(SD_ADDRESS(un
),
18474 mutex_enter(SD_MUTEX(un
));
18478 * This really ought to be a fatal error, but we will retry anyway
18479 * as some drives report this as a spurious error.
18481 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, sd_print_sense_msg
,
18482 &si
, EIO
, (clock_t)0, NULL
);
18488 * Function: sd_sense_key_illegal_request
18490 * Description: Recovery actions for a SCSI "Illegal Request" sense key.
18492 * Context: May be called from interrupt context
18496 sd_sense_key_illegal_request(struct sd_lun
*un
, struct buf
*bp
,
18497 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18499 struct sd_sense_info si
;
18501 ASSERT(un
!= NULL
);
18502 ASSERT(mutex_owned(SD_MUTEX(un
)));
18503 ASSERT(bp
!= NULL
);
18504 ASSERT(xp
!= NULL
);
18505 ASSERT(pktp
!= NULL
);
18507 SD_UPDATE_ERRSTATS(un
, sd_rq_illrq_err
);
18509 si
.ssi_severity
= SCSI_ERR_INFO
;
18510 si
.ssi_pfa_flag
= FALSE
;
18512 /* Pointless to retry if the target thinks it's an illegal request */
18513 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
18514 sd_return_failed_command(un
, bp
, EIO
);
18521 * Function: sd_sense_key_unit_attention
18523 * Description: Recovery actions for a SCSI "Unit Attention" sense key.
18525 * Context: May be called from interrupt context
18529 sd_sense_key_unit_attention(struct sd_lun
*un
,
18530 uint8_t *sense_datap
,
18531 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18534 * For UNIT ATTENTION we allow retries for one minute. Devices
18535 * like Sonoma can return UNIT ATTENTION close to a minute
18536 * under certain conditions.
18538 int retry_check_flag
= SD_RETRIES_UA
;
18539 boolean_t kstat_updated
= B_FALSE
;
18540 struct sd_sense_info si
;
18541 uint8_t asc
= scsi_sense_asc(sense_datap
);
18542 uint8_t ascq
= scsi_sense_ascq(sense_datap
);
18544 ASSERT(un
!= NULL
);
18545 ASSERT(mutex_owned(SD_MUTEX(un
)));
18546 ASSERT(bp
!= NULL
);
18547 ASSERT(xp
!= NULL
);
18548 ASSERT(pktp
!= NULL
);
18550 si
.ssi_severity
= SCSI_ERR_INFO
;
18551 si
.ssi_pfa_flag
= FALSE
;
18555 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */
18556 if (sd_report_pfa
!= 0) {
18557 SD_UPDATE_ERRSTATS(un
, sd_rq_pfa_err
);
18558 si
.ssi_pfa_flag
= TRUE
;
18559 retry_check_flag
= SD_RETRIES_STANDARD
;
18565 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
18566 if ((un
->un_resvd_status
& SD_RESERVE
) == SD_RESERVE
) {
18567 un
->un_resvd_status
|=
18568 (SD_LOST_RESERVE
| SD_WANT_RESERVE
);
18571 if (un
->un_blockcount
+ 1 > SD_GROUP1_MAX_ADDRESS
) {
18572 if (taskq_dispatch(sd_tq
, sd_reenable_dsense_task
,
18573 un
, KM_NOSLEEP
) == 0) {
18575 * If we can't dispatch the task we'll just
18576 * live without descriptor sense. We can
18577 * try again on the next "unit attention"
18579 SD_ERROR(SD_LOG_ERROR
, un
,
18580 "sd_sense_key_unit_attention: "
18581 "Could not dispatch "
18582 "sd_reenable_dsense_task\n");
18588 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
18589 if (!un
->un_f_has_removable_media
) {
18594 * When we get a unit attention from a removable-media device,
18595 * it may be in a state that will take a long time to recover
18596 * (e.g., from a reset). Since we are executing in interrupt
18597 * context here, we cannot wait around for the device to come
18598 * back. So hand this command off to sd_media_change_task()
18599 * for deferred processing under taskq thread context. (Note
18600 * that the command still may be failed if a problem is
18601 * encountered at a later time.)
18603 if (taskq_dispatch(sd_tq
, sd_media_change_task
, pktp
,
18604 KM_NOSLEEP
) == 0) {
18606 * Cannot dispatch the request so fail the command.
18608 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
18609 SD_UPDATE_ERRSTATS(un
, sd_rq_nodev_err
);
18610 si
.ssi_severity
= SCSI_ERR_FATAL
;
18611 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
18612 sd_return_failed_command(un
, bp
, EIO
);
18616 * If failed to dispatch sd_media_change_task(), we already
18617 * updated kstat. If succeed to dispatch sd_media_change_task(),
18618 * we should update kstat later if it encounters an error. So,
18619 * we update kstat_updated flag here.
18621 kstat_updated
= B_TRUE
;
18624 * Either the command has been successfully dispatched to a
18625 * task Q for retrying, or the dispatch failed. In either case
18626 * do NOT retry again by calling sd_retry_command. This sets up
18627 * two retries of the same command and when one completes and
18628 * frees the resources the other will access freed memory,
18639 * 2A 09 Capacity data has changed
18640 * 2A 01 Mode parameters changed
18641 * 3F 0E Reported luns data has changed
18642 * Arrays that support logical unit expansion should report
18643 * capacity changes(2Ah/09). Mode parameters changed and
18644 * reported luns data has changed are the approximation.
18646 if (((asc
== 0x2a) && (ascq
== 0x09)) ||
18647 ((asc
== 0x2a) && (ascq
== 0x01)) ||
18648 ((asc
== 0x3f) && (ascq
== 0x0e))) {
18649 if (taskq_dispatch(sd_tq
, sd_target_change_task
, un
,
18650 KM_NOSLEEP
) == 0) {
18651 SD_ERROR(SD_LOG_ERROR
, un
,
18652 "sd_sense_key_unit_attention: "
18653 "Could not dispatch sd_target_change_task\n");
18658 * Update kstat if we haven't done that.
18660 if (!kstat_updated
) {
18661 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
18662 SD_UPDATE_ERRSTATS(un
, sd_rq_nodev_err
);
18666 sd_retry_command(un
, bp
, retry_check_flag
, sd_print_sense_msg
, &si
,
18667 EIO
, SD_UA_RETRY_DELAY
, NULL
);
18673 * Function: sd_sense_key_fail_command
18675 * Description: Use to fail a command when we don't like the sense key that
18678 * Context: May be called from interrupt context
18682 sd_sense_key_fail_command(struct sd_lun
*un
, struct buf
*bp
,
18683 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18685 struct sd_sense_info si
;
18687 ASSERT(un
!= NULL
);
18688 ASSERT(mutex_owned(SD_MUTEX(un
)));
18689 ASSERT(bp
!= NULL
);
18690 ASSERT(xp
!= NULL
);
18691 ASSERT(pktp
!= NULL
);
18693 si
.ssi_severity
= SCSI_ERR_FATAL
;
18694 si
.ssi_pfa_flag
= FALSE
;
18696 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
18697 sd_return_failed_command(un
, bp
, EIO
);
18703 * Function: sd_sense_key_blank_check
18705 * Description: Recovery actions for a SCSI "Blank Check" sense key.
18706 * Has no monetary connotation.
18708 * Context: May be called from interrupt context
18712 sd_sense_key_blank_check(struct sd_lun
*un
, struct buf
*bp
,
18713 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18715 struct sd_sense_info si
;
18717 ASSERT(un
!= NULL
);
18718 ASSERT(mutex_owned(SD_MUTEX(un
)));
18719 ASSERT(bp
!= NULL
);
18720 ASSERT(xp
!= NULL
);
18721 ASSERT(pktp
!= NULL
);
18724 * Blank check is not fatal for removable devices, therefore
18725 * it does not require a console message.
18727 si
.ssi_severity
= (un
->un_f_has_removable_media
) ? SCSI_ERR_ALL
:
18729 si
.ssi_pfa_flag
= FALSE
;
18731 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
18732 sd_return_failed_command(un
, bp
, EIO
);
18739 * Function: sd_sense_key_aborted_command
18741 * Description: Recovery actions for a SCSI "Aborted Command" sense key.
18743 * Context: May be called from interrupt context
18747 sd_sense_key_aborted_command(struct sd_lun
*un
, struct buf
*bp
,
18748 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18750 struct sd_sense_info si
;
18752 ASSERT(un
!= NULL
);
18753 ASSERT(mutex_owned(SD_MUTEX(un
)));
18754 ASSERT(bp
!= NULL
);
18755 ASSERT(xp
!= NULL
);
18756 ASSERT(pktp
!= NULL
);
18758 si
.ssi_severity
= SCSI_ERR_FATAL
;
18759 si
.ssi_pfa_flag
= FALSE
;
18761 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
18764 * This really ought to be a fatal error, but we will retry anyway
18765 * as some drives report this as a spurious error.
18767 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, sd_print_sense_msg
,
18768 &si
, EIO
, drv_usectohz(100000), NULL
);
18774 * Function: sd_sense_key_default
18776 * Description: Default recovery action for several SCSI sense keys (basically
18777 * attempts a retry).
18779 * Context: May be called from interrupt context
18783 sd_sense_key_default(struct sd_lun
*un
,
18784 uint8_t *sense_datap
,
18785 struct buf
*bp
, struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18787 struct sd_sense_info si
;
18788 uint8_t sense_key
= scsi_sense_key(sense_datap
);
18790 ASSERT(un
!= NULL
);
18791 ASSERT(mutex_owned(SD_MUTEX(un
)));
18792 ASSERT(bp
!= NULL
);
18793 ASSERT(xp
!= NULL
);
18794 ASSERT(pktp
!= NULL
);
18796 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
18799 * Undecoded sense key. Attempt retries and hope that will fix
18800 * the problem. Otherwise, we're dead.
18802 if ((pktp
->pkt_flags
& FLAG_SILENT
) == 0) {
18803 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
18804 "Unhandled Sense Key '%s'\n", sense_keys
[sense_key
]);
18807 si
.ssi_severity
= SCSI_ERR_FATAL
;
18808 si
.ssi_pfa_flag
= FALSE
;
18810 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, sd_print_sense_msg
,
18811 &si
, EIO
, (clock_t)0, NULL
);
18817 * Function: sd_print_retry_msg
18819 * Description: Print a message indicating the retry action being taken.
18821 * Arguments: un - ptr to associated softstate
18822 * bp - ptr to buf(9S) for the command
18824 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18825 * or SD_NO_RETRY_ISSUED
18827 * Context: May be called from interrupt context
18831 sd_print_retry_msg(struct sd_lun
*un
, struct buf
*bp
, void *arg
, int flag
)
18833 struct sd_xbuf
*xp
;
18834 struct scsi_pkt
*pktp
;
18838 ASSERT(un
!= NULL
);
18839 ASSERT(mutex_owned(SD_MUTEX(un
)));
18840 ASSERT(bp
!= NULL
);
18841 pktp
= SD_GET_PKTP(bp
);
18842 ASSERT(pktp
!= NULL
);
18843 xp
= SD_GET_XBUF(bp
);
18844 ASSERT(xp
!= NULL
);
18846 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
18847 mutex_enter(&un
->un_pm_mutex
);
18848 if ((un
->un_state
== SD_STATE_SUSPENDED
) ||
18849 (SD_DEVICE_IS_IN_LOW_POWER(un
)) ||
18850 (pktp
->pkt_flags
& FLAG_SILENT
)) {
18851 mutex_exit(&un
->un_pm_mutex
);
18852 goto update_pkt_reason
;
18854 mutex_exit(&un
->un_pm_mutex
);
18857 * Suppress messages if they are all the same pkt_reason; with
18858 * TQ, many (up to 256) are returned with the same pkt_reason.
18859 * If we are in panic, then suppress the retry messages.
18862 case SD_NO_RETRY_ISSUED
:
18863 msgp
= "giving up";
18865 case SD_IMMEDIATE_RETRY_ISSUED
:
18866 case SD_DELAYED_RETRY_ISSUED
:
18867 if (ddi_in_panic() || (un
->un_state
== SD_STATE_OFFLINE
) ||
18868 ((pktp
->pkt_reason
== un
->un_last_pkt_reason
) &&
18869 (sd_error_level
!= SCSI_ERR_ALL
))) {
18872 msgp
= "retrying command";
18875 goto update_pkt_reason
;
18878 reasonp
= (((pktp
->pkt_statistics
& STAT_PERR
) != 0) ? "parity error" :
18879 scsi_rname(pktp
->pkt_reason
));
18881 if (SD_FM_LOG(un
) == SD_FM_LOG_NSUP
) {
18882 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
18883 "SCSI transport failed: reason '%s': %s\n", reasonp
, msgp
);
18888 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason.
18889 * This is to prevent multiple console messages for the same failure
18890 * condition. Note that un->un_last_pkt_reason is NOT restored if &
18891 * when the command is retried successfully because there still may be
18892 * more commands coming back with the same value of pktp->pkt_reason.
18894 if ((pktp
->pkt_reason
!= CMD_CMPLT
) || (xp
->xb_retry_count
== 0)) {
18895 un
->un_last_pkt_reason
= pktp
->pkt_reason
;
18901 * Function: sd_print_cmd_incomplete_msg
18903 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason.
18905 * Arguments: un - ptr to associated softstate
18906 * bp - ptr to buf(9S) for the command
18907 * arg - passed to sd_print_retry_msg()
18908 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18909 * or SD_NO_RETRY_ISSUED
18911 * Context: May be called from interrupt context
18915 sd_print_cmd_incomplete_msg(struct sd_lun
*un
, struct buf
*bp
, void *arg
,
18920 ASSERT(un
!= NULL
);
18921 ASSERT(mutex_owned(SD_MUTEX(un
)));
18922 ASSERT(bp
!= NULL
);
18925 case SD_NO_RETRY_ISSUED
:
18926 /* Command was failed. Someone turned off this target? */
18927 if (un
->un_state
!= SD_STATE_OFFLINE
) {
18929 * Suppress message if we are detaching and
18930 * device has been disconnected
18931 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation
18932 * private interface and not part of the DDI
18934 dip
= un
->un_sd
->sd_dev
;
18935 if (!(DEVI_IS_DETACHING(dip
) &&
18936 DEVI_IS_DEVICE_REMOVED(dip
))) {
18937 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
18938 "disk not responding to selection\n");
18940 New_state(un
, SD_STATE_OFFLINE
);
18944 case SD_DELAYED_RETRY_ISSUED
:
18945 case SD_IMMEDIATE_RETRY_ISSUED
:
18947 /* Command was successfully queued for retry */
18948 sd_print_retry_msg(un
, bp
, arg
, code
);
18955 * Function: sd_pkt_reason_cmd_incomplete
18957 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason.
18959 * Context: May be called from interrupt context
18963 sd_pkt_reason_cmd_incomplete(struct sd_lun
*un
, struct buf
*bp
,
18964 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
18966 int flag
= SD_RETRIES_STANDARD
| SD_RETRIES_ISOLATE
;
18968 ASSERT(un
!= NULL
);
18969 ASSERT(mutex_owned(SD_MUTEX(un
)));
18970 ASSERT(bp
!= NULL
);
18971 ASSERT(xp
!= NULL
);
18972 ASSERT(pktp
!= NULL
);
18974 /* Do not do a reset if selection did not complete */
18975 /* Note: Should this not just check the bit? */
18976 if (pktp
->pkt_state
!= STATE_GOT_BUS
) {
18977 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
18978 sd_reset_target(un
, pktp
);
18982 * If the target was not successfully selected, then set
18983 * SD_RETRIES_FAILFAST to indicate that we lost communication
18984 * with the target, and further retries and/or commands are
18985 * likely to take a long time.
18987 if ((pktp
->pkt_state
& STATE_GOT_TARGET
) == 0) {
18988 flag
|= SD_RETRIES_FAILFAST
;
18991 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
18993 sd_retry_command(un
, bp
, flag
,
18994 sd_print_cmd_incomplete_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19000 * Function: sd_pkt_reason_cmd_tran_err
19002 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason.
19004 * Context: May be called from interrupt context
19008 sd_pkt_reason_cmd_tran_err(struct sd_lun
*un
, struct buf
*bp
,
19009 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19011 ASSERT(un
!= NULL
);
19012 ASSERT(mutex_owned(SD_MUTEX(un
)));
19013 ASSERT(bp
!= NULL
);
19014 ASSERT(xp
!= NULL
);
19015 ASSERT(pktp
!= NULL
);
19018 * Do not reset if we got a parity error, or if
19019 * selection did not complete.
19021 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
19022 /* Note: Should this not just check the bit for pkt_state? */
19023 if (((pktp
->pkt_statistics
& STAT_PERR
) == 0) &&
19024 (pktp
->pkt_state
!= STATE_GOT_BUS
)) {
19025 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
19026 sd_reset_target(un
, pktp
);
19029 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19031 sd_retry_command(un
, bp
, (SD_RETRIES_STANDARD
| SD_RETRIES_ISOLATE
),
19032 sd_print_retry_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19038 * Function: sd_pkt_reason_cmd_reset
19040 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason.
19042 * Context: May be called from interrupt context
19046 sd_pkt_reason_cmd_reset(struct sd_lun
*un
, struct buf
*bp
,
19047 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19049 ASSERT(un
!= NULL
);
19050 ASSERT(mutex_owned(SD_MUTEX(un
)));
19051 ASSERT(bp
!= NULL
);
19052 ASSERT(xp
!= NULL
);
19053 ASSERT(pktp
!= NULL
);
19055 /* The target may still be running the command, so try to reset. */
19056 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
19057 sd_reset_target(un
, pktp
);
19059 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19062 * If pkt_reason is CMD_RESET chances are that this pkt got
19063 * reset because another target on this bus caused it. The target
19064 * that caused it should get CMD_TIMEOUT with pkt_statistics
19065 * of STAT_TIMEOUT/STAT_DEV_RESET.
19068 sd_retry_command(un
, bp
, (SD_RETRIES_VICTIM
| SD_RETRIES_ISOLATE
),
19069 sd_print_retry_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19076 * Function: sd_pkt_reason_cmd_aborted
19078 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason.
19080 * Context: May be called from interrupt context
19084 sd_pkt_reason_cmd_aborted(struct sd_lun
*un
, struct buf
*bp
,
19085 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19087 ASSERT(un
!= NULL
);
19088 ASSERT(mutex_owned(SD_MUTEX(un
)));
19089 ASSERT(bp
!= NULL
);
19090 ASSERT(xp
!= NULL
);
19091 ASSERT(pktp
!= NULL
);
19093 /* The target may still be running the command, so try to reset. */
19094 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
19095 sd_reset_target(un
, pktp
);
19097 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19100 * If pkt_reason is CMD_ABORTED chances are that this pkt got
19101 * aborted because another target on this bus caused it. The target
19102 * that caused it should get CMD_TIMEOUT with pkt_statistics
19103 * of STAT_TIMEOUT/STAT_DEV_RESET.
19106 sd_retry_command(un
, bp
, (SD_RETRIES_VICTIM
| SD_RETRIES_ISOLATE
),
19107 sd_print_retry_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19113 * Function: sd_pkt_reason_cmd_timeout
19115 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason.
19117 * Context: May be called from interrupt context
19121 sd_pkt_reason_cmd_timeout(struct sd_lun
*un
, struct buf
*bp
,
19122 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19124 ASSERT(un
!= NULL
);
19125 ASSERT(mutex_owned(SD_MUTEX(un
)));
19126 ASSERT(bp
!= NULL
);
19127 ASSERT(xp
!= NULL
);
19128 ASSERT(pktp
!= NULL
);
19131 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
19132 sd_reset_target(un
, pktp
);
19134 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19137 * A command timeout indicates that we could not establish
19138 * communication with the target, so set SD_RETRIES_FAILFAST
19139 * as further retries/commands are likely to take a long time.
19141 sd_retry_command(un
, bp
,
19142 (SD_RETRIES_STANDARD
| SD_RETRIES_ISOLATE
| SD_RETRIES_FAILFAST
),
19143 sd_print_retry_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19149 * Function: sd_pkt_reason_cmd_unx_bus_free
19151 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason.
19153 * Context: May be called from interrupt context
19157 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun
*un
, struct buf
*bp
,
19158 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19160 void (*funcp
)(struct sd_lun
*un
, struct buf
*bp
, void *arg
, int code
);
19162 ASSERT(un
!= NULL
);
19163 ASSERT(mutex_owned(SD_MUTEX(un
)));
19164 ASSERT(bp
!= NULL
);
19165 ASSERT(xp
!= NULL
);
19166 ASSERT(pktp
!= NULL
);
19168 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
19169 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19171 funcp
= ((pktp
->pkt_statistics
& STAT_PERR
) == 0) ?
19172 sd_print_retry_msg
: NULL
;
19174 sd_retry_command(un
, bp
, (SD_RETRIES_STANDARD
| SD_RETRIES_ISOLATE
),
19175 funcp
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19180 * Function: sd_pkt_reason_cmd_tag_reject
19182 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19184 * Context: May be called from interrupt context
19188 sd_pkt_reason_cmd_tag_reject(struct sd_lun
*un
, struct buf
*bp
,
19189 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19191 ASSERT(un
!= NULL
);
19192 ASSERT(mutex_owned(SD_MUTEX(un
)));
19193 ASSERT(bp
!= NULL
);
19194 ASSERT(xp
!= NULL
);
19195 ASSERT(pktp
!= NULL
);
19197 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
19198 pktp
->pkt_flags
= 0;
19199 un
->un_tagflags
= 0;
19200 if (un
->un_f_opt_queueing
== TRUE
) {
19201 un
->un_throttle
= min(un
->un_throttle
, 3);
19203 un
->un_throttle
= 1;
19205 mutex_exit(SD_MUTEX(un
));
19206 (void) scsi_ifsetcap(SD_ADDRESS(un
), "tagged-qing", 0, 1);
19207 mutex_enter(SD_MUTEX(un
));
19209 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19211 /* Legacy behavior not to check retry counts here. */
19212 sd_retry_command(un
, bp
, (SD_RETRIES_NOCHECK
| SD_RETRIES_ISOLATE
),
19213 sd_print_retry_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19218 * Function: sd_pkt_reason_default
19220 * Description: Default recovery actions for SCSA pkt_reason values that
19221 * do not have more explicit recovery actions.
19223 * Context: May be called from interrupt context
19227 sd_pkt_reason_default(struct sd_lun
*un
, struct buf
*bp
,
19228 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19230 ASSERT(un
!= NULL
);
19231 ASSERT(mutex_owned(SD_MUTEX(un
)));
19232 ASSERT(bp
!= NULL
);
19233 ASSERT(xp
!= NULL
);
19234 ASSERT(pktp
!= NULL
);
19236 SD_UPDATE_ERRSTATS(un
, sd_transerrs
);
19237 sd_reset_target(un
, pktp
);
19239 SD_UPDATE_RESERVATION_STATUS(un
, pktp
);
19241 sd_retry_command(un
, bp
, (SD_RETRIES_STANDARD
| SD_RETRIES_ISOLATE
),
19242 sd_print_retry_msg
, NULL
, EIO
, SD_RESTART_TIMEOUT
, NULL
);
19248 * Function: sd_pkt_status_check_condition
19250 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status.
19252 * Context: May be called from interrupt context
19256 sd_pkt_status_check_condition(struct sd_lun
*un
, struct buf
*bp
,
19257 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19259 ASSERT(un
!= NULL
);
19260 ASSERT(mutex_owned(SD_MUTEX(un
)));
19261 ASSERT(bp
!= NULL
);
19262 ASSERT(xp
!= NULL
);
19263 ASSERT(pktp
!= NULL
);
19265 SD_TRACE(SD_LOG_IO
, un
, "sd_pkt_status_check_condition: "
19266 "entry: buf:0x%p xp:0x%p\n", bp
, xp
);
19269 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19270 * command will be retried after the request sense). Otherwise, retry
19271 * the command. Note: we are issuing the request sense even though the
19272 * retry limit may have been reached for the failed command.
19274 if (un
->un_f_arq_enabled
== FALSE
) {
19275 SD_INFO(SD_LOG_IO_CORE
, un
, "sd_pkt_status_check_condition: "
19276 "no ARQ, sending request sense command\n");
19277 sd_send_request_sense_command(un
, bp
, pktp
);
19279 SD_INFO(SD_LOG_IO_CORE
, un
, "sd_pkt_status_check_condition: "
19280 "ARQ,retrying request sense command\n");
19281 #if defined(__i386) || defined(__amd64)
19283 * The SD_RETRY_DELAY value need to be adjusted here
19284 * when SD_RETRY_DELAY change in sddef.h
19286 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, NULL
, NULL
, EIO
,
19287 un
->un_f_is_fibre
?drv_usectohz(100000):(clock_t)0,
19290 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, NULL
, NULL
,
19291 EIO
, SD_RETRY_DELAY
, NULL
);
19295 SD_TRACE(SD_LOG_IO_CORE
, un
, "sd_pkt_status_check_condition: exit\n");
19300 * Function: sd_pkt_status_busy
19302 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
19304 * Context: May be called from interrupt context
19308 sd_pkt_status_busy(struct sd_lun
*un
, struct buf
*bp
, struct sd_xbuf
*xp
,
19309 struct scsi_pkt
*pktp
)
19311 ASSERT(un
!= NULL
);
19312 ASSERT(mutex_owned(SD_MUTEX(un
)));
19313 ASSERT(bp
!= NULL
);
19314 ASSERT(xp
!= NULL
);
19315 ASSERT(pktp
!= NULL
);
19317 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19318 "sd_pkt_status_busy: entry\n");
19320 /* If retries are exhausted, just fail the command. */
19321 if (xp
->xb_retry_count
>= un
->un_busy_retry_count
) {
19322 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
19323 "device busy too long\n");
19324 sd_return_failed_command(un
, bp
, EIO
);
19325 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19326 "sd_pkt_status_busy: exit\n");
19329 xp
->xb_retry_count
++;
19332 * Try to reset the target. However, we do not want to perform
19333 * more than one reset if the device continues to fail. The reset
19334 * will be performed when the retry count reaches the reset
19335 * threshold. This threshold should be set such that at least
19336 * one retry is issued before the reset is performed.
19338 if (xp
->xb_retry_count
==
19339 ((un
->un_reset_retry_count
< 2) ? 2 : un
->un_reset_retry_count
)) {
19341 mutex_exit(SD_MUTEX(un
));
19342 if (un
->un_f_allow_bus_device_reset
== TRUE
) {
19344 * First try to reset the LUN; if we cannot then
19345 * try to reset the target.
19347 if (un
->un_f_lun_reset_enabled
== TRUE
) {
19348 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19349 "sd_pkt_status_busy: RESET_LUN\n");
19350 rval
= scsi_reset(SD_ADDRESS(un
), RESET_LUN
);
19353 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19354 "sd_pkt_status_busy: RESET_TARGET\n");
19355 rval
= scsi_reset(SD_ADDRESS(un
), RESET_TARGET
);
19360 * If the RESET_LUN and/or RESET_TARGET failed,
19363 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19364 "sd_pkt_status_busy: RESET_ALL\n");
19365 rval
= scsi_reset(SD_ADDRESS(un
), RESET_ALL
);
19367 mutex_enter(SD_MUTEX(un
));
19370 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed.
19371 * At this point we give up & fail the command.
19373 sd_return_failed_command(un
, bp
, EIO
);
19374 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19375 "sd_pkt_status_busy: exit (failed cmd)\n");
19381 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as
19382 * we have already checked the retry counts above.
19384 sd_retry_command(un
, bp
, SD_RETRIES_NOCHECK
, NULL
, NULL
,
19385 EIO
, un
->un_busy_timeout
, NULL
);
19387 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19388 "sd_pkt_status_busy: exit\n");
19393 * Function: sd_pkt_status_reservation_conflict
19395 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI
19398 * Context: May be called from interrupt context
19402 sd_pkt_status_reservation_conflict(struct sd_lun
*un
, struct buf
*bp
,
19403 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19405 ASSERT(un
!= NULL
);
19406 ASSERT(mutex_owned(SD_MUTEX(un
)));
19407 ASSERT(bp
!= NULL
);
19408 ASSERT(xp
!= NULL
);
19409 ASSERT(pktp
!= NULL
);
19412 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation
19413 * conflict could be due to various reasons like incorrect keys, not
19414 * registered or not reserved etc. So, we return EACCES to the caller.
19416 if (un
->un_reservation_type
== SD_SCSI3_RESERVATION
) {
19417 int cmd
= SD_GET_PKT_OPCODE(pktp
);
19418 if ((cmd
== SCMD_PERSISTENT_RESERVE_IN
) ||
19419 (cmd
== SCMD_PERSISTENT_RESERVE_OUT
)) {
19420 sd_return_failed_command(un
, bp
, EACCES
);
19425 un
->un_resvd_status
|= SD_RESERVATION_CONFLICT
;
19427 if ((un
->un_resvd_status
& SD_FAILFAST
) != 0) {
19428 if (sd_failfast_enable
!= 0) {
19429 /* By definition, we must panic here.... */
19430 sd_panic_for_res_conflict(un
);
19433 SD_ERROR(SD_LOG_IO
, un
,
19434 "sd_handle_resv_conflict: Disk Reserved\n");
19435 sd_return_failed_command(un
, bp
, EACCES
);
19440 * 1147670: retry only if sd_retry_on_reservation_conflict
19441 * property is set (default is 1). Retries will not succeed
19442 * on a disk reserved by another initiator. HA systems
19443 * may reset this via sd.conf to avoid these retries.
19445 * Note: The legacy return code for this failure is EIO, however EACCES
19446 * seems more appropriate for a reservation conflict.
19448 if (sd_retry_on_reservation_conflict
== 0) {
19449 SD_ERROR(SD_LOG_IO
, un
,
19450 "sd_handle_resv_conflict: Device Reserved\n");
19451 sd_return_failed_command(un
, bp
, EIO
);
19456 * Retry the command if we can.
19458 * Note: The legacy return code for this failure is EIO, however EACCES
19459 * seems more appropriate for a reservation conflict.
19461 sd_retry_command(un
, bp
, SD_RETRIES_STANDARD
, NULL
, NULL
, EIO
,
19468 * Function: sd_pkt_status_qfull
19470 * Description: Handle a QUEUE FULL condition from the target. This can
19471 * occur if the HBA does not handle the queue full condition.
19472 * (Basically this means third-party HBAs as Sun HBAs will
19473 * handle the queue full condition.) Note that if there are
19474 * some commands already in the transport, then the queue full
19475 * has occurred because the queue for this nexus is actually
19476 * full. If there are no commands in the transport, then the
19477 * queue full is resulting from some other initiator or lun
19478 * consuming all the resources at the target.
19480 * Context: May be called from interrupt context
19484 sd_pkt_status_qfull(struct sd_lun
*un
, struct buf
*bp
,
19485 struct sd_xbuf
*xp
, struct scsi_pkt
*pktp
)
19487 ASSERT(un
!= NULL
);
19488 ASSERT(mutex_owned(SD_MUTEX(un
)));
19489 ASSERT(bp
!= NULL
);
19490 ASSERT(xp
!= NULL
);
19491 ASSERT(pktp
!= NULL
);
19493 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19494 "sd_pkt_status_qfull: entry\n");
19497 * Just lower the QFULL throttle and retry the command. Note that
19498 * we do not limit the number of retries here.
19500 sd_reduce_throttle(un
, SD_THROTTLE_QFULL
);
19501 sd_retry_command(un
, bp
, SD_RETRIES_NOCHECK
, NULL
, NULL
, 0,
19502 SD_RESTART_TIMEOUT
, NULL
);
19504 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19505 "sd_pkt_status_qfull: exit\n");
19510 * Function: sd_reset_target
19512 * Description: Issue a scsi_reset(9F), with either RESET_LUN,
19513 * RESET_TARGET, or RESET_ALL.
19515 * Context: May be called under interrupt context.
19519 sd_reset_target(struct sd_lun
*un
, struct scsi_pkt
*pktp
)
19523 ASSERT(un
!= NULL
);
19524 ASSERT(mutex_owned(SD_MUTEX(un
)));
19525 ASSERT(pktp
!= NULL
);
19527 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_reset_target: entry\n");
19530 * No need to reset if the transport layer has already done so.
19532 if ((pktp
->pkt_statistics
&
19533 (STAT_BUS_RESET
| STAT_DEV_RESET
| STAT_ABORTED
)) != 0) {
19534 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19535 "sd_reset_target: no reset\n");
19539 mutex_exit(SD_MUTEX(un
));
19541 if (un
->un_f_allow_bus_device_reset
== TRUE
) {
19542 if (un
->un_f_lun_reset_enabled
== TRUE
) {
19543 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19544 "sd_reset_target: RESET_LUN\n");
19545 rval
= scsi_reset(SD_ADDRESS(un
), RESET_LUN
);
19548 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19549 "sd_reset_target: RESET_TARGET\n");
19550 rval
= scsi_reset(SD_ADDRESS(un
), RESET_TARGET
);
19555 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
19556 "sd_reset_target: RESET_ALL\n");
19557 (void) scsi_reset(SD_ADDRESS(un
), RESET_ALL
);
19560 mutex_enter(SD_MUTEX(un
));
19562 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
, "sd_reset_target: exit\n");
19566 * Function: sd_target_change_task
19568 * Description: Handle dynamic target change
19570 * Context: Executes in a taskq() thread context
19573 sd_target_change_task(void *arg
)
19575 struct sd_lun
*un
= arg
;
19577 diskaddr_t label_cap
;
19581 ASSERT(un
!= NULL
);
19582 ASSERT(!mutex_owned(SD_MUTEX(un
)));
19584 if ((un
->un_f_blockcount_is_valid
== FALSE
) ||
19585 (un
->un_f_tgt_blocksize_is_valid
== FALSE
)) {
19589 ssc
= sd_ssc_init(un
);
19591 if (sd_send_scsi_READ_CAPACITY(ssc
, &capacity
,
19592 &lbasize
, SD_PATH_DIRECT
) != 0) {
19593 SD_ERROR(SD_LOG_ERROR
, un
,
19594 "sd_target_change_task: fail to read capacity\n");
19595 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
19599 mutex_enter(SD_MUTEX(un
));
19600 if (capacity
<= un
->un_blockcount
) {
19601 mutex_exit(SD_MUTEX(un
));
19605 sd_update_block_info(un
, lbasize
, capacity
);
19606 mutex_exit(SD_MUTEX(un
));
19609 * If lun is EFI labeled and lun capacity is greater than the
19610 * capacity contained in the label, log a sys event.
19612 if (cmlb_efi_label_capacity(un
->un_cmlbhandle
, &label_cap
,
19613 (void*)SD_PATH_DIRECT
) == 0) {
19614 mutex_enter(SD_MUTEX(un
));
19615 if (un
->un_f_blockcount_is_valid
&&
19616 un
->un_blockcount
> label_cap
) {
19617 mutex_exit(SD_MUTEX(un
));
19618 sd_log_lun_expansion_event(un
, KM_SLEEP
);
19620 mutex_exit(SD_MUTEX(un
));
19630 * Function: sd_log_dev_status_event
19632 * Description: Log EC_dev_status sysevent
19634 * Context: Never called from interrupt context
19637 sd_log_dev_status_event(struct sd_lun
*un
, char *esc
, int km_flag
)
19641 nvlist_t
*attr_list
;
19643 /* Allocate and build sysevent attribute list */
19644 err
= nvlist_alloc(&attr_list
, NV_UNIQUE_NAME_TYPE
, km_flag
);
19646 SD_ERROR(SD_LOG_ERROR
, un
,
19647 "sd_log_dev_status_event: fail to allocate space\n");
19651 path
= kmem_alloc(MAXPATHLEN
, km_flag
);
19652 if (path
== NULL
) {
19653 nvlist_free(attr_list
);
19654 SD_ERROR(SD_LOG_ERROR
, un
,
19655 "sd_log_dev_status_event: fail to allocate space\n");
19659 * Add path attribute to identify the lun.
19660 * We are using minor node 'a' as the sysevent attribute.
19662 (void) snprintf(path
, MAXPATHLEN
, "/devices");
19663 (void) ddi_pathname(SD_DEVINFO(un
), path
+ strlen(path
));
19664 (void) snprintf(path
+ strlen(path
), MAXPATHLEN
- strlen(path
),
19667 err
= nvlist_add_string(attr_list
, DEV_PHYS_PATH
, path
);
19669 nvlist_free(attr_list
);
19670 kmem_free(path
, MAXPATHLEN
);
19671 SD_ERROR(SD_LOG_ERROR
, un
,
19672 "sd_log_dev_status_event: fail to add attribute\n");
19676 /* Log dynamic lun expansion sysevent */
19677 err
= ddi_log_sysevent(SD_DEVINFO(un
), SUNW_VENDOR
, EC_DEV_STATUS
,
19678 esc
, attr_list
, NULL
, km_flag
);
19679 if (err
!= DDI_SUCCESS
) {
19680 SD_ERROR(SD_LOG_ERROR
, un
,
19681 "sd_log_dev_status_event: fail to log sysevent\n");
19684 nvlist_free(attr_list
);
19685 kmem_free(path
, MAXPATHLEN
);
19690 * Function: sd_log_lun_expansion_event
19692 * Description: Log lun expansion sys event
19694 * Context: Never called from interrupt context
19697 sd_log_lun_expansion_event(struct sd_lun
*un
, int km_flag
)
19699 sd_log_dev_status_event(un
, ESC_DEV_DLE
, km_flag
);
19704 * Function: sd_log_eject_request_event
19706 * Description: Log eject request sysevent
19708 * Context: Never called from interrupt context
19711 sd_log_eject_request_event(struct sd_lun
*un
, int km_flag
)
19713 sd_log_dev_status_event(un
, ESC_DEV_EJECT_REQUEST
, km_flag
);
19718 * Function: sd_media_change_task
19720 * Description: Recovery action for CDROM to become available.
19722 * Context: Executes in a taskq() thread context
19726 sd_media_change_task(void *arg
)
19728 struct scsi_pkt
*pktp
= arg
;
19731 struct sd_xbuf
*xp
;
19733 int retry_count
= 0;
19734 int retry_limit
= SD_UNIT_ATTENTION_RETRY
/10;
19735 struct sd_sense_info si
;
19737 ASSERT(pktp
!= NULL
);
19738 bp
= (struct buf
*)pktp
->pkt_private
;
19739 ASSERT(bp
!= NULL
);
19740 xp
= SD_GET_XBUF(bp
);
19741 ASSERT(xp
!= NULL
);
19742 un
= SD_GET_UN(bp
);
19743 ASSERT(un
!= NULL
);
19744 ASSERT(!mutex_owned(SD_MUTEX(un
)));
19745 ASSERT(un
->un_f_monitor_media_state
);
19747 si
.ssi_severity
= SCSI_ERR_INFO
;
19748 si
.ssi_pfa_flag
= FALSE
;
19751 * When a reset is issued on a CDROM, it takes a long time to
19752 * recover. First few attempts to read capacity and other things
19753 * related to handling unit attention fail (with a ASC 0x4 and
19754 * ASCQ 0x1). In that case we want to do enough retries and we want
19755 * to limit the retries in other cases of genuine failures like
19756 * no media in drive.
19758 while (retry_count
++ < retry_limit
) {
19759 if ((err
= sd_handle_mchange(un
)) == 0) {
19762 if (err
== EAGAIN
) {
19763 retry_limit
= SD_UNIT_ATTENTION_RETRY
;
19765 /* Sleep for 0.5 sec. & try again */
19766 delay(drv_usectohz(500000));
19770 * Dispatch (retry or fail) the original command here,
19771 * along with appropriate console messages....
19773 * Must grab the mutex before calling sd_retry_command,
19774 * sd_print_sense_msg and sd_return_failed_command.
19776 mutex_enter(SD_MUTEX(un
));
19777 if (err
!= SD_CMD_SUCCESS
) {
19778 SD_UPDATE_ERRSTATS(un
, sd_harderrs
);
19779 SD_UPDATE_ERRSTATS(un
, sd_rq_nodev_err
);
19780 si
.ssi_severity
= SCSI_ERR_FATAL
;
19781 sd_print_sense_msg(un
, bp
, &si
, SD_NO_RETRY_ISSUED
);
19782 sd_return_failed_command(un
, bp
, EIO
);
19784 sd_retry_command(un
, bp
, SD_RETRIES_UA
, sd_print_sense_msg
,
19785 &si
, EIO
, (clock_t)0, NULL
);
19787 mutex_exit(SD_MUTEX(un
));
19793 * Function: sd_handle_mchange
19795 * Description: Perform geometry validation & other recovery when CDROM
19796 * has been removed from drive.
19798 * Return Code: 0 for success
19799 * errno-type return code of either sd_send_scsi_DOORLOCK() or
19800 * sd_send_scsi_READ_CAPACITY()
19802 * Context: Executes in a taskq() thread context
19806 sd_handle_mchange(struct sd_lun
*un
)
19813 ASSERT(!mutex_owned(SD_MUTEX(un
)));
19814 ASSERT(un
->un_f_monitor_media_state
);
19816 ssc
= sd_ssc_init(un
);
19817 rval
= sd_send_scsi_READ_CAPACITY(ssc
, &capacity
, &lbasize
,
19818 SD_PATH_DIRECT_PRIORITY
);
19823 mutex_enter(SD_MUTEX(un
));
19824 sd_update_block_info(un
, lbasize
, capacity
);
19826 if (un
->un_errstats
!= NULL
) {
19827 struct sd_errstats
*stp
=
19828 (struct sd_errstats
*)un
->un_errstats
->ks_data
;
19829 stp
->sd_capacity
.value
.ui64
= (uint64_t)
19830 ((uint64_t)un
->un_blockcount
*
19831 (uint64_t)un
->un_tgt_blocksize
);
19835 * Check if the media in the device is writable or not
19838 sd_check_for_writable_cd(ssc
, SD_PATH_DIRECT_PRIORITY
);
19842 * Note: Maybe let the strategy/partitioning chain worry about getting
19845 mutex_exit(SD_MUTEX(un
));
19846 cmlb_invalidate(un
->un_cmlbhandle
, (void *)SD_PATH_DIRECT_PRIORITY
);
19849 if (cmlb_validate(un
->un_cmlbhandle
, 0,
19850 (void *)SD_PATH_DIRECT_PRIORITY
) != 0) {
19854 if (un
->un_f_pkstats_enabled
) {
19856 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
19857 "sd_handle_mchange: un:0x%p pstats created and "
19863 * Try to lock the door
19865 rval
= sd_send_scsi_DOORLOCK(ssc
, SD_REMOVAL_PREVENT
,
19866 SD_PATH_DIRECT_PRIORITY
);
19869 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
19876 * Function: sd_send_scsi_DOORLOCK
19878 * Description: Issue the scsi DOOR LOCK command
19880 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
19881 * structure for this target.
19882 * flag - SD_REMOVAL_ALLOW
19883 * SD_REMOVAL_PREVENT
19884 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19885 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19886 * to use the USCSI "direct" chain and bypass the normal
19887 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19888 * command is issued as part of an error recovery action.
19890 * Return Code: 0 - Success
19891 * errno return code from sd_ssc_send()
19893 * Context: Can sleep.
19897 sd_send_scsi_DOORLOCK(sd_ssc_t
*ssc
, int flag
, int path_flag
)
19899 struct scsi_extended_sense sense_buf
;
19900 union scsi_cdb cdb
;
19901 struct uscsi_cmd ucmd_buf
;
19905 ASSERT(ssc
!= NULL
);
19907 ASSERT(un
!= NULL
);
19908 ASSERT(!mutex_owned(SD_MUTEX(un
)));
19910 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un
);
19912 /* already determined doorlock is not supported, fake success */
19913 if (un
->un_f_doorlock_supported
== FALSE
) {
19918 * If we are ejecting and see an SD_REMOVAL_PREVENT
19919 * ignore the command so we can complete the eject
19922 if (flag
== SD_REMOVAL_PREVENT
) {
19923 mutex_enter(SD_MUTEX(un
));
19924 if (un
->un_f_ejecting
== TRUE
) {
19925 mutex_exit(SD_MUTEX(un
));
19928 mutex_exit(SD_MUTEX(un
));
19931 bzero(&cdb
, sizeof (cdb
));
19932 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
19934 cdb
.scc_cmd
= SCMD_DOORLOCK
;
19935 cdb
.cdb_opaque
[4] = (uchar_t
)flag
;
19937 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
19938 ucmd_buf
.uscsi_cdblen
= CDB_GROUP0
;
19939 ucmd_buf
.uscsi_bufaddr
= NULL
;
19940 ucmd_buf
.uscsi_buflen
= 0;
19941 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
19942 ucmd_buf
.uscsi_rqlen
= sizeof (sense_buf
);
19943 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_SILENT
;
19944 ucmd_buf
.uscsi_timeout
= 15;
19946 SD_TRACE(SD_LOG_IO
, un
,
19947 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
19949 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
19950 UIO_SYSSPACE
, path_flag
);
19953 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
19955 if ((status
== EIO
) && (ucmd_buf
.uscsi_status
== STATUS_CHECK
) &&
19956 (ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
19957 (scsi_sense_key((uint8_t *)&sense_buf
) == KEY_ILLEGAL_REQUEST
)) {
19958 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
19960 /* fake success and skip subsequent doorlock commands */
19961 un
->un_f_doorlock_supported
= FALSE
;
19969 * Function: sd_send_scsi_READ_CAPACITY
19971 * Description: This routine uses the scsi READ CAPACITY command to determine
19972 * the device capacity in number of blocks and the device native
19973 * block size. If this function returns a failure, then the
19974 * values in *capp and *lbap are undefined. If the capacity
19975 * returned is 0xffffffff then the lun is too large for a
19976 * normal READ CAPACITY command and the results of a
19977 * READ CAPACITY 16 will be used instead.
19979 * Arguments: ssc - ssc contains ptr to soft state struct for the target
19980 * capp - ptr to unsigned 64-bit variable to receive the
19981 * capacity value from the command.
19982 * lbap - ptr to unsigned 32-bit varaible to receive the
19983 * block size value from the command
19984 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19985 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19986 * to use the USCSI "direct" chain and bypass the normal
19987 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19988 * command is issued as part of an error recovery action.
19990 * Return Code: 0 - Success
19992 * EACCES - Reservation conflict detected
19993 * EAGAIN - Device is becoming ready
19994 * errno return code from sd_ssc_send()
19996 * Context: Can sleep. Blocks until command completes.
19999 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
20002 sd_send_scsi_READ_CAPACITY(sd_ssc_t
*ssc
, uint64_t *capp
, uint32_t *lbap
,
20005 struct scsi_extended_sense sense_buf
;
20006 struct uscsi_cmd ucmd_buf
;
20007 union scsi_cdb cdb
;
20008 uint32_t *capacity_buf
;
20015 ASSERT(ssc
!= NULL
);
20018 ASSERT(un
!= NULL
);
20019 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20020 ASSERT(capp
!= NULL
);
20021 ASSERT(lbap
!= NULL
);
20023 SD_TRACE(SD_LOG_IO
, un
,
20024 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un
);
20027 * First send a READ_CAPACITY command to the target.
20028 * (This command is mandatory under SCSI-2.)
20030 * Set up the CDB for the READ_CAPACITY command. The Partial
20031 * Medium Indicator bit is cleared. The address field must be
20032 * zero if the PMI bit is zero.
20034 bzero(&cdb
, sizeof (cdb
));
20035 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20037 capacity_buf
= kmem_zalloc(SD_CAPACITY_SIZE
, KM_SLEEP
);
20039 cdb
.scc_cmd
= SCMD_READ_CAPACITY
;
20041 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
20042 ucmd_buf
.uscsi_cdblen
= CDB_GROUP1
;
20043 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)capacity_buf
;
20044 ucmd_buf
.uscsi_buflen
= SD_CAPACITY_SIZE
;
20045 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
20046 ucmd_buf
.uscsi_rqlen
= sizeof (sense_buf
);
20047 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_READ
| USCSI_SILENT
;
20048 ucmd_buf
.uscsi_timeout
= 60;
20050 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
20051 UIO_SYSSPACE
, path_flag
);
20055 /* Return failure if we did not get valid capacity data. */
20056 if (ucmd_buf
.uscsi_resid
!= 0) {
20057 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, -1,
20058 "sd_send_scsi_READ_CAPACITY received invalid "
20060 kmem_free(capacity_buf
, SD_CAPACITY_SIZE
);
20064 * Read capacity and block size from the READ CAPACITY 10 data.
20065 * This data may be adjusted later due to device specific
20068 * According to the SCSI spec, the READ CAPACITY 10
20069 * command returns the following:
20071 * bytes 0-3: Maximum logical block address available.
20072 * (MSB in byte:0 & LSB in byte:3)
20074 * bytes 4-7: Block length in bytes
20075 * (MSB in byte:4 & LSB in byte:7)
20078 capacity
= BE_32(capacity_buf
[0]);
20079 lbasize
= BE_32(capacity_buf
[1]);
20082 * Done with capacity_buf
20084 kmem_free(capacity_buf
, SD_CAPACITY_SIZE
);
20087 * if the reported capacity is set to all 0xf's, then
20088 * this disk is too large and requires SBC-2 commands.
20089 * Reissue the request using READ CAPACITY 16.
20091 if (capacity
== 0xffffffff) {
20092 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
20093 status
= sd_send_scsi_READ_CAPACITY_16(ssc
, &capacity
,
20094 &lbasize
, &pbsize
, path_flag
);
20101 break; /* Success! */
20103 switch (ucmd_buf
.uscsi_status
) {
20104 case STATUS_RESERVATION_CONFLICT
:
20109 * Check condition; look for ASC/ASCQ of 0x04/0x01
20110 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20112 if ((ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
20113 (scsi_sense_asc((uint8_t *)&sense_buf
) == 0x04) &&
20114 (scsi_sense_ascq((uint8_t *)&sense_buf
) == 0x01)) {
20115 kmem_free(capacity_buf
, SD_CAPACITY_SIZE
);
20124 kmem_free(capacity_buf
, SD_CAPACITY_SIZE
);
20129 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20130 * (2352 and 0 are common) so for these devices always force the value
20131 * to 2048 as required by the ATAPI specs.
20133 if ((un
->un_f_cfg_is_atapi
== TRUE
) && (ISCD(un
))) {
20138 * Get the maximum LBA value from the READ CAPACITY data.
20139 * Here we assume that the Partial Medium Indicator (PMI) bit
20140 * was cleared when issuing the command. This means that the LBA
20141 * returned from the device is the LBA of the last logical block
20142 * on the logical unit. The actual logical block count will be
20143 * this value plus one.
20148 * Currently, for removable media, the capacity is saved in terms
20149 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20151 if (un
->un_f_has_removable_media
)
20152 capacity
*= (lbasize
/ un
->un_sys_blocksize
);
20157 * Copy the values from the READ CAPACITY command into the space
20158 * provided by the caller.
20163 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_READ_CAPACITY: "
20164 "capacity:0x%llx lbasize:0x%x\n", capacity
, lbasize
);
20167 * Both the lbasize and capacity from the device must be nonzero,
20168 * otherwise we assume that the values are not valid and return
20169 * failure to the caller. (4203735)
20171 if ((capacity
== 0) || (lbasize
== 0)) {
20172 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, -1,
20173 "sd_send_scsi_READ_CAPACITY received invalid value "
20174 "capacity %llu lbasize %d", capacity
, lbasize
);
20177 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
20182 * Function: sd_send_scsi_READ_CAPACITY_16
20184 * Description: This routine uses the scsi READ CAPACITY 16 command to
20185 * determine the device capacity in number of blocks and the
20186 * device native block size. If this function returns a failure,
20187 * then the values in *capp and *lbap are undefined.
20188 * This routine should be called by sd_send_scsi_READ_CAPACITY
20189 * which will apply any device specific adjustments to capacity
20190 * and lbasize. One exception is it is also called by
20191 * sd_get_media_info_ext. In that function, there is no need to
20192 * adjust the capacity and lbasize.
20194 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20195 * capp - ptr to unsigned 64-bit variable to receive the
20196 * capacity value from the command.
20197 * lbap - ptr to unsigned 32-bit varaible to receive the
20198 * block size value from the command
20199 * psp - ptr to unsigned 32-bit variable to receive the
20200 * physical block size value from the command
20201 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20202 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20203 * to use the USCSI "direct" chain and bypass the normal
20204 * command waitq. SD_PATH_DIRECT_PRIORITY is used when
20205 * this command is issued as part of an error recovery
20208 * Return Code: 0 - Success
20210 * EACCES - Reservation conflict detected
20211 * EAGAIN - Device is becoming ready
20212 * errno return code from sd_ssc_send()
20214 * Context: Can sleep. Blocks until command completes.
20217 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
20220 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t
*ssc
, uint64_t *capp
,
20221 uint32_t *lbap
, uint32_t *psp
, int path_flag
)
20223 struct scsi_extended_sense sense_buf
;
20224 struct uscsi_cmd ucmd_buf
;
20225 union scsi_cdb cdb
;
20226 uint64_t *capacity16_buf
;
20234 ASSERT(ssc
!= NULL
);
20237 ASSERT(un
!= NULL
);
20238 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20239 ASSERT(capp
!= NULL
);
20240 ASSERT(lbap
!= NULL
);
20242 SD_TRACE(SD_LOG_IO
, un
,
20243 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un
);
20246 * First send a READ_CAPACITY_16 command to the target.
20248 * Set up the CDB for the READ_CAPACITY_16 command. The Partial
20249 * Medium Indicator bit is cleared. The address field must be
20250 * zero if the PMI bit is zero.
20252 bzero(&cdb
, sizeof (cdb
));
20253 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20255 capacity16_buf
= kmem_zalloc(SD_CAPACITY_16_SIZE
, KM_SLEEP
);
20257 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
20258 ucmd_buf
.uscsi_cdblen
= CDB_GROUP4
;
20259 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)capacity16_buf
;
20260 ucmd_buf
.uscsi_buflen
= SD_CAPACITY_16_SIZE
;
20261 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
20262 ucmd_buf
.uscsi_rqlen
= sizeof (sense_buf
);
20263 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_READ
| USCSI_SILENT
;
20264 ucmd_buf
.uscsi_timeout
= 60;
20267 * Read Capacity (16) is a Service Action In command. One
20268 * command byte (0x9E) is overloaded for multiple operations,
20269 * with the second CDB byte specifying the desired operation
20271 cdb
.scc_cmd
= SCMD_SVC_ACTION_IN_G4
;
20272 cdb
.cdb_opaque
[1] = SSVC_ACTION_READ_CAPACITY_G4
;
20275 * Fill in allocation length field
20277 FORMG4COUNT(&cdb
, ucmd_buf
.uscsi_buflen
);
20279 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
20280 UIO_SYSSPACE
, path_flag
);
20284 /* Return failure if we did not get valid capacity data. */
20285 if (ucmd_buf
.uscsi_resid
> 20) {
20286 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, -1,
20287 "sd_send_scsi_READ_CAPACITY_16 received invalid "
20289 kmem_free(capacity16_buf
, SD_CAPACITY_16_SIZE
);
20294 * Read capacity and block size from the READ CAPACITY 16 data.
20295 * This data may be adjusted later due to device specific
20298 * According to the SCSI spec, the READ CAPACITY 16
20299 * command returns the following:
20301 * bytes 0-7: Maximum logical block address available.
20302 * (MSB in byte:0 & LSB in byte:7)
20304 * bytes 8-11: Block length in bytes
20305 * (MSB in byte:8 & LSB in byte:11)
20307 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
20309 capacity
= BE_64(capacity16_buf
[0]);
20310 lbasize
= BE_32(*(uint32_t *)&capacity16_buf
[1]);
20311 lbpb_exp
= (BE_64(capacity16_buf
[1]) >> 16) & 0x0f;
20313 pbsize
= lbasize
<< lbpb_exp
;
20316 * Done with capacity16_buf
20318 kmem_free(capacity16_buf
, SD_CAPACITY_16_SIZE
);
20321 * if the reported capacity is set to all 0xf's, then
20322 * this disk is too large. This could only happen with
20323 * a device that supports LBAs larger than 64 bits which
20324 * are not defined by any current T10 standards.
20326 if (capacity
== 0xffffffffffffffff) {
20327 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, -1,
20328 "disk is too large");
20331 break; /* Success! */
20333 switch (ucmd_buf
.uscsi_status
) {
20334 case STATUS_RESERVATION_CONFLICT
:
20339 * Check condition; look for ASC/ASCQ of 0x04/0x01
20340 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20342 if ((ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
20343 (scsi_sense_asc((uint8_t *)&sense_buf
) == 0x04) &&
20344 (scsi_sense_ascq((uint8_t *)&sense_buf
) == 0x01)) {
20345 kmem_free(capacity16_buf
, SD_CAPACITY_16_SIZE
);
20354 kmem_free(capacity16_buf
, SD_CAPACITY_16_SIZE
);
20359 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20360 * (2352 and 0 are common) so for these devices always force the value
20361 * to 2048 as required by the ATAPI specs.
20363 if ((un
->un_f_cfg_is_atapi
== TRUE
) && (ISCD(un
))) {
20368 * Get the maximum LBA value from the READ CAPACITY 16 data.
20369 * Here we assume that the Partial Medium Indicator (PMI) bit
20370 * was cleared when issuing the command. This means that the LBA
20371 * returned from the device is the LBA of the last logical block
20372 * on the logical unit. The actual logical block count will be
20373 * this value plus one.
20378 * Currently, for removable media, the capacity is saved in terms
20379 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20381 if (un
->un_f_has_removable_media
)
20382 capacity
*= (lbasize
/ un
->un_sys_blocksize
);
20388 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_READ_CAPACITY_16: "
20389 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
20390 capacity
, lbasize
, pbsize
);
20392 if ((capacity
== 0) || (lbasize
== 0) || (pbsize
== 0)) {
20393 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, -1,
20394 "sd_send_scsi_READ_CAPACITY_16 received invalid value "
20395 "capacity %llu lbasize %d pbsize %d", capacity
, lbasize
);
20399 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
20405 * Function: sd_send_scsi_START_STOP_UNIT
20407 * Description: Issue a scsi START STOP UNIT command to the target.
20409 * Arguments: ssc - ssc contatins pointer to driver soft state (unit)
20410 * structure for this target.
20411 * pc_flag - SD_POWER_CONDITION
20413 * flag - SD_TARGET_START
20417 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20418 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20419 * to use the USCSI "direct" chain and bypass the normal
20420 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20421 * command is issued as part of an error recovery action.
20423 * Return Code: 0 - Success
20425 * EACCES - Reservation conflict detected
20426 * ENXIO - Not Ready, medium not present
20427 * errno return code from sd_ssc_send()
20429 * Context: Can sleep.
20433 sd_send_scsi_START_STOP_UNIT(sd_ssc_t
*ssc
, int pc_flag
, int flag
,
20436 struct scsi_extended_sense sense_buf
;
20437 union scsi_cdb cdb
;
20438 struct uscsi_cmd ucmd_buf
;
20442 ASSERT(ssc
!= NULL
);
20444 ASSERT(un
!= NULL
);
20445 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20447 SD_TRACE(SD_LOG_IO
, un
,
20448 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un
);
20450 if (un
->un_f_check_start_stop
&&
20451 (pc_flag
== SD_START_STOP
) &&
20452 ((flag
== SD_TARGET_START
) || (flag
== SD_TARGET_STOP
)) &&
20453 (un
->un_f_start_stop_supported
!= TRUE
)) {
20458 * If we are performing an eject operation and
20459 * we receive any command other than SD_TARGET_EJECT
20460 * we should immediately return.
20462 if (flag
!= SD_TARGET_EJECT
) {
20463 mutex_enter(SD_MUTEX(un
));
20464 if (un
->un_f_ejecting
== TRUE
) {
20465 mutex_exit(SD_MUTEX(un
));
20468 mutex_exit(SD_MUTEX(un
));
20471 bzero(&cdb
, sizeof (cdb
));
20472 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20473 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
20475 cdb
.scc_cmd
= SCMD_START_STOP
;
20476 cdb
.cdb_opaque
[4] = (pc_flag
== SD_POWER_CONDITION
) ?
20477 (uchar_t
)(flag
<< 4) : (uchar_t
)flag
;
20479 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
20480 ucmd_buf
.uscsi_cdblen
= CDB_GROUP0
;
20481 ucmd_buf
.uscsi_bufaddr
= NULL
;
20482 ucmd_buf
.uscsi_buflen
= 0;
20483 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
20484 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
20485 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_SILENT
;
20486 ucmd_buf
.uscsi_timeout
= 200;
20488 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
20489 UIO_SYSSPACE
, path_flag
);
20493 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
20494 break; /* Success! */
20496 switch (ucmd_buf
.uscsi_status
) {
20497 case STATUS_RESERVATION_CONFLICT
:
20501 if (ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) {
20502 switch (scsi_sense_key(
20503 (uint8_t *)&sense_buf
)) {
20504 case KEY_ILLEGAL_REQUEST
:
20507 case KEY_NOT_READY
:
20508 if (scsi_sense_asc(
20509 (uint8_t *)&sense_buf
)
20527 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_START_STOP_UNIT: exit\n");
20534 * Function: sd_start_stop_unit_callback
20536 * Description: timeout(9F) callback to begin recovery process for a
20537 * device that has spun down.
20539 * Arguments: arg - pointer to associated softstate struct.
20541 * Context: Executes in a timeout(9F) thread context
20545 sd_start_stop_unit_callback(void *arg
)
20547 struct sd_lun
*un
= arg
;
20548 ASSERT(un
!= NULL
);
20549 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20551 SD_TRACE(SD_LOG_IO
, un
, "sd_start_stop_unit_callback: entry\n");
20553 (void) taskq_dispatch(sd_tq
, sd_start_stop_unit_task
, un
, KM_NOSLEEP
);
20558 * Function: sd_start_stop_unit_task
20560 * Description: Recovery procedure when a drive is spun down.
20562 * Arguments: arg - pointer to associated softstate struct.
20564 * Context: Executes in a taskq() thread context
20568 sd_start_stop_unit_task(void *arg
)
20570 struct sd_lun
*un
= arg
;
20575 ASSERT(un
!= NULL
);
20576 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20578 SD_TRACE(SD_LOG_IO
, un
, "sd_start_stop_unit_task: entry\n");
20581 * Some unformatted drives report not ready error, no need to
20582 * restart if format has been initiated.
20584 mutex_enter(SD_MUTEX(un
));
20585 if (un
->un_f_format_in_progress
== TRUE
) {
20586 mutex_exit(SD_MUTEX(un
));
20589 mutex_exit(SD_MUTEX(un
));
20591 ssc
= sd_ssc_init(un
);
20593 * When a START STOP command is issued from here, it is part of a
20594 * failure recovery operation and must be issued before any other
20595 * commands, including any pending retries. Thus it must be sent
20596 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up
20597 * succeeds or not, we will start I/O after the attempt.
20598 * If power condition is supported and the current power level
20599 * is capable of performing I/O, we should set the power condition
20600 * to that level. Otherwise, set the power condition to ACTIVE.
20602 if (un
->un_f_power_condition_supported
) {
20603 mutex_enter(SD_MUTEX(un
));
20604 ASSERT(SD_PM_IS_LEVEL_VALID(un
, un
->un_power_level
));
20605 power_level
= sd_pwr_pc
.ran_perf
[un
->un_power_level
]
20606 > 0 ? un
->un_power_level
: SD_SPINDLE_ACTIVE
;
20607 mutex_exit(SD_MUTEX(un
));
20608 rval
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_POWER_CONDITION
,
20609 sd_pl2pc
[power_level
], SD_PATH_DIRECT_PRIORITY
);
20611 rval
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
20612 SD_TARGET_START
, SD_PATH_DIRECT_PRIORITY
);
20616 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
20619 * The above call blocks until the START_STOP_UNIT command completes.
20620 * Now that it has completed, we must re-try the original IO that
20621 * received the NOT READY condition in the first place. There are
20622 * three possible conditions here:
20624 * (1) The original IO is on un_retry_bp.
20625 * (2) The original IO is on the regular wait queue, and un_retry_bp
20627 * (3) The original IO is on the regular wait queue, and un_retry_bp
20628 * points to some other, unrelated bp.
20630 * For each case, we must call sd_start_cmds() with un_retry_bp
20631 * as the argument. If un_retry_bp is NULL, this will initiate
20632 * processing of the regular wait queue. If un_retry_bp is not NULL,
20633 * then this will process the bp on un_retry_bp. That may or may not
20634 * be the original IO, but that does not matter: the important thing
20635 * is to keep the IO processing going at this point.
20637 * Note: This is a very specific error recovery sequence associated
20638 * with a drive that is not spun up. We attempt a START_STOP_UNIT and
20639 * serialize the I/O with completion of the spin-up.
20641 mutex_enter(SD_MUTEX(un
));
20642 SD_TRACE(SD_LOG_IO_CORE
| SD_LOG_ERROR
, un
,
20643 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
20644 un
, un
->un_retry_bp
);
20645 un
->un_startstop_timeid
= NULL
; /* Timeout is no longer pending */
20646 sd_start_cmds(un
, un
->un_retry_bp
);
20647 mutex_exit(SD_MUTEX(un
));
20649 SD_TRACE(SD_LOG_IO
, un
, "sd_start_stop_unit_task: exit\n");
20654 * Function: sd_send_scsi_INQUIRY
20656 * Description: Issue the scsi INQUIRY command.
20658 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20659 * structure for this target.
20666 * Return Code: 0 - Success
20667 * errno return code from sd_ssc_send()
20669 * Context: Can sleep. Does not return until command is completed.
20673 sd_send_scsi_INQUIRY(sd_ssc_t
*ssc
, uchar_t
*bufaddr
, size_t buflen
,
20674 uchar_t evpd
, uchar_t page_code
, size_t *residp
)
20676 union scsi_cdb cdb
;
20677 struct uscsi_cmd ucmd_buf
;
20681 ASSERT(ssc
!= NULL
);
20683 ASSERT(un
!= NULL
);
20684 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20685 ASSERT(bufaddr
!= NULL
);
20687 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un
);
20689 bzero(&cdb
, sizeof (cdb
));
20690 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20691 bzero(bufaddr
, buflen
);
20693 cdb
.scc_cmd
= SCMD_INQUIRY
;
20694 cdb
.cdb_opaque
[1] = evpd
;
20695 cdb
.cdb_opaque
[2] = page_code
;
20696 FORMG0COUNT(&cdb
, buflen
);
20698 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
20699 ucmd_buf
.uscsi_cdblen
= CDB_GROUP0
;
20700 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)bufaddr
;
20701 ucmd_buf
.uscsi_buflen
= buflen
;
20702 ucmd_buf
.uscsi_rqbuf
= NULL
;
20703 ucmd_buf
.uscsi_rqlen
= 0;
20704 ucmd_buf
.uscsi_flags
= USCSI_READ
| USCSI_SILENT
;
20705 ucmd_buf
.uscsi_timeout
= 200; /* Excessive legacy value */
20707 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
20708 UIO_SYSSPACE
, SD_PATH_DIRECT
);
20711 * Only handle status == 0, the upper-level caller
20712 * will put different assessment based on the context.
20715 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
20717 if ((status
== 0) && (residp
!= NULL
)) {
20718 *residp
= ucmd_buf
.uscsi_resid
;
20721 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_INQUIRY: exit\n");
20728 * Function: sd_send_scsi_TEST_UNIT_READY
20730 * Description: Issue the scsi TEST UNIT READY command.
20731 * This routine can be told to set the flag USCSI_DIAGNOSE to
20732 * prevent retrying failed commands. Use this when the intent
20733 * is either to check for device readiness, to clear a Unit
20734 * Attention, or to clear any outstanding sense data.
20735 * However under specific conditions the expected behavior
20736 * is for retries to bring a device ready, so use the flag
20739 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20740 * structure for this target.
20741 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present
20742 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE.
20743 * 0: dont check for media present, do retries on cmd.
20745 * Return Code: 0 - Success
20747 * EACCES - Reservation conflict detected
20748 * ENXIO - Not Ready, medium not present
20749 * errno return code from sd_ssc_send()
20751 * Context: Can sleep. Does not return until command is completed.
20755 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t
*ssc
, int flag
)
20757 struct scsi_extended_sense sense_buf
;
20758 union scsi_cdb cdb
;
20759 struct uscsi_cmd ucmd_buf
;
20763 ASSERT(ssc
!= NULL
);
20765 ASSERT(un
!= NULL
);
20766 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20768 SD_TRACE(SD_LOG_IO
, un
,
20769 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un
);
20772 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect
20773 * timeouts when they receive a TUR and the queue is not empty. Check
20774 * the configuration flag set during attach (indicating the drive has
20775 * this firmware bug) and un_ncmds_in_transport before issuing the
20776 * TUR. If there are
20777 * pending commands return success, this is a bit arbitrary but is ok
20778 * for non-removables (i.e. the eliteI disks) and non-clustering
20781 if (un
->un_f_cfg_tur_check
== TRUE
) {
20782 mutex_enter(SD_MUTEX(un
));
20783 if (un
->un_ncmds_in_transport
!= 0) {
20784 mutex_exit(SD_MUTEX(un
));
20787 mutex_exit(SD_MUTEX(un
));
20790 bzero(&cdb
, sizeof (cdb
));
20791 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20792 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
20794 cdb
.scc_cmd
= SCMD_TEST_UNIT_READY
;
20796 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
20797 ucmd_buf
.uscsi_cdblen
= CDB_GROUP0
;
20798 ucmd_buf
.uscsi_bufaddr
= NULL
;
20799 ucmd_buf
.uscsi_buflen
= 0;
20800 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
20801 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
20802 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_SILENT
;
20804 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20805 if ((flag
& SD_DONT_RETRY_TUR
) != 0) {
20806 ucmd_buf
.uscsi_flags
|= USCSI_DIAGNOSE
;
20808 ucmd_buf
.uscsi_timeout
= 60;
20810 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
20811 UIO_SYSSPACE
, ((flag
& SD_BYPASS_PM
) ? SD_PATH_DIRECT
:
20812 SD_PATH_STANDARD
));
20816 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
20817 break; /* Success! */
20819 switch (ucmd_buf
.uscsi_status
) {
20820 case STATUS_RESERVATION_CONFLICT
:
20824 if ((flag
& SD_CHECK_FOR_MEDIA
) == 0) {
20827 if ((ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
20828 (scsi_sense_key((uint8_t *)&sense_buf
) ==
20830 (scsi_sense_asc((uint8_t *)&sense_buf
) == 0x3A)) {
20842 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_TEST_UNIT_READY: exit\n");
20848 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN
20850 * Description: Issue the scsi PERSISTENT RESERVE IN command.
20852 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20853 * structure for this target.
20855 * Return Code: 0 - Success
20858 * errno return code from sd_ssc_send()
20860 * Context: Can sleep. Does not return until command is completed.
20864 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t
*ssc
, uchar_t usr_cmd
,
20865 uint16_t data_len
, uchar_t
*data_bufp
)
20867 struct scsi_extended_sense sense_buf
;
20868 union scsi_cdb cdb
;
20869 struct uscsi_cmd ucmd_buf
;
20871 int no_caller_buf
= FALSE
;
20874 ASSERT(ssc
!= NULL
);
20876 ASSERT(un
!= NULL
);
20877 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20878 ASSERT((usr_cmd
== SD_READ_KEYS
) || (usr_cmd
== SD_READ_RESV
));
20880 SD_TRACE(SD_LOG_IO
, un
,
20881 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un
);
20883 bzero(&cdb
, sizeof (cdb
));
20884 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20885 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
20886 if (data_bufp
== NULL
) {
20887 /* Allocate a default buf if the caller did not give one */
20888 ASSERT(data_len
== 0);
20889 data_len
= MHIOC_RESV_KEY_SIZE
;
20890 data_bufp
= kmem_zalloc(MHIOC_RESV_KEY_SIZE
, KM_SLEEP
);
20891 no_caller_buf
= TRUE
;
20894 cdb
.scc_cmd
= SCMD_PERSISTENT_RESERVE_IN
;
20895 cdb
.cdb_opaque
[1] = usr_cmd
;
20896 FORMG1COUNT(&cdb
, data_len
);
20898 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
20899 ucmd_buf
.uscsi_cdblen
= CDB_GROUP1
;
20900 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)data_bufp
;
20901 ucmd_buf
.uscsi_buflen
= data_len
;
20902 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
20903 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
20904 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_READ
| USCSI_SILENT
;
20905 ucmd_buf
.uscsi_timeout
= 60;
20907 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
20908 UIO_SYSSPACE
, SD_PATH_STANDARD
);
20912 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
20914 break; /* Success! */
20916 switch (ucmd_buf
.uscsi_status
) {
20917 case STATUS_RESERVATION_CONFLICT
:
20921 if ((ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
20922 (scsi_sense_key((uint8_t *)&sense_buf
) ==
20923 KEY_ILLEGAL_REQUEST
)) {
20935 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
20937 if (no_caller_buf
== TRUE
) {
20938 kmem_free(data_bufp
, data_len
);
20946 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT
20948 * Description: This routine is the driver entry point for handling CD-ROM
20949 * multi-host persistent reservation requests (MHIOCGRP_INKEYS,
20950 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the
20953 * Arguments: ssc - ssc contains un - pointer to soft state struct
20955 * usr_cmd SCSI-3 reservation facility command (one of
20956 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE,
20957 * SD_SCSI3_PREEMPTANDABORT)
20958 * usr_bufp - user provided pointer register, reserve descriptor or
20959 * preempt and abort structure (mhioc_register_t,
20960 * mhioc_resv_desc_t, mhioc_preemptandabort_t)
20962 * Return Code: 0 - Success
20965 * errno return code from sd_ssc_send()
20967 * Context: Can sleep. Does not return until command is completed.
20971 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t
*ssc
, uchar_t usr_cmd
,
20974 struct scsi_extended_sense sense_buf
;
20975 union scsi_cdb cdb
;
20976 struct uscsi_cmd ucmd_buf
;
20978 uchar_t data_len
= sizeof (sd_prout_t
);
20982 ASSERT(ssc
!= NULL
);
20984 ASSERT(un
!= NULL
);
20985 ASSERT(!mutex_owned(SD_MUTEX(un
)));
20986 ASSERT(data_len
== 24); /* required by scsi spec */
20988 SD_TRACE(SD_LOG_IO
, un
,
20989 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un
);
20991 if (usr_bufp
== NULL
) {
20995 bzero(&cdb
, sizeof (cdb
));
20996 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
20997 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
20998 prp
= kmem_zalloc(data_len
, KM_SLEEP
);
21000 cdb
.scc_cmd
= SCMD_PERSISTENT_RESERVE_OUT
;
21001 cdb
.cdb_opaque
[1] = usr_cmd
;
21002 FORMG1COUNT(&cdb
, data_len
);
21004 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
21005 ucmd_buf
.uscsi_cdblen
= CDB_GROUP1
;
21006 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)prp
;
21007 ucmd_buf
.uscsi_buflen
= data_len
;
21008 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
21009 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
21010 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_WRITE
| USCSI_SILENT
;
21011 ucmd_buf
.uscsi_timeout
= 60;
21014 case SD_SCSI3_REGISTER
: {
21015 mhioc_register_t
*ptr
= (mhioc_register_t
*)usr_bufp
;
21017 bcopy(ptr
->oldkey
.key
, prp
->res_key
, MHIOC_RESV_KEY_SIZE
);
21018 bcopy(ptr
->newkey
.key
, prp
->service_key
,
21019 MHIOC_RESV_KEY_SIZE
);
21020 prp
->aptpl
= ptr
->aptpl
;
21023 case SD_SCSI3_RESERVE
:
21024 case SD_SCSI3_RELEASE
: {
21025 mhioc_resv_desc_t
*ptr
= (mhioc_resv_desc_t
*)usr_bufp
;
21027 bcopy(ptr
->key
.key
, prp
->res_key
, MHIOC_RESV_KEY_SIZE
);
21028 prp
->scope_address
= BE_32(ptr
->scope_specific_addr
);
21029 cdb
.cdb_opaque
[2] = ptr
->type
;
21032 case SD_SCSI3_PREEMPTANDABORT
: {
21033 mhioc_preemptandabort_t
*ptr
=
21034 (mhioc_preemptandabort_t
*)usr_bufp
;
21036 bcopy(ptr
->resvdesc
.key
.key
, prp
->res_key
, MHIOC_RESV_KEY_SIZE
);
21037 bcopy(ptr
->victim_key
.key
, prp
->service_key
,
21038 MHIOC_RESV_KEY_SIZE
);
21039 prp
->scope_address
= BE_32(ptr
->resvdesc
.scope_specific_addr
);
21040 cdb
.cdb_opaque
[2] = ptr
->resvdesc
.type
;
21041 ucmd_buf
.uscsi_flags
|= USCSI_HEAD
;
21044 case SD_SCSI3_REGISTERANDIGNOREKEY
:
21046 mhioc_registerandignorekey_t
*ptr
;
21047 ptr
= (mhioc_registerandignorekey_t
*)usr_bufp
;
21048 bcopy(ptr
->newkey
.key
,
21049 prp
->service_key
, MHIOC_RESV_KEY_SIZE
);
21050 prp
->aptpl
= ptr
->aptpl
;
21058 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
21059 UIO_SYSSPACE
, SD_PATH_STANDARD
);
21063 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
21064 break; /* Success! */
21066 switch (ucmd_buf
.uscsi_status
) {
21067 case STATUS_RESERVATION_CONFLICT
:
21071 if ((ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
21072 (scsi_sense_key((uint8_t *)&sense_buf
) ==
21073 KEY_ILLEGAL_REQUEST
)) {
21085 kmem_free(prp
, data_len
);
21086 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
21092 * Function: sd_send_scsi_SYNCHRONIZE_CACHE
21094 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target
21096 * Arguments: un - pointer to the target's soft state struct
21097 * dkc - pointer to the callback structure
21099 * Return Code: 0 - success
21100 * errno-type error code
21102 * Context: kernel thread context only.
21104 * _______________________________________________________________
21105 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE |
21106 * |FLUSH_VOLATILE| | operation |
21107 * |______________|______________|_________________________________|
21108 * | 0 | NULL | Synchronous flush on both |
21109 * | | | volatile and non-volatile cache |
21110 * |______________|______________|_________________________________|
21111 * | 1 | NULL | Synchronous flush on volatile |
21112 * | | | cache; disk drivers may suppress|
21113 * | | | flush if disk table indicates |
21114 * | | | non-volatile cache |
21115 * |______________|______________|_________________________________|
21116 * | 0 | !NULL | Asynchronous flush on both |
21117 * | | | volatile and non-volatile cache;|
21118 * |______________|______________|_________________________________|
21119 * | 1 | !NULL | Asynchronous flush on volatile |
21120 * | | | cache; disk drivers may suppress|
21121 * | | | flush if disk table indicates |
21122 * | | | non-volatile cache |
21123 * |______________|______________|_________________________________|
21128 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun
*un
, struct dk_callback
*dkc
)
21130 struct sd_uscsi_info
*uip
;
21131 struct uscsi_cmd
*uscmd
;
21132 union scsi_cdb
*cdb
;
21137 SD_TRACE(SD_LOG_IO
, un
,
21138 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un
);
21140 ASSERT(un
!= NULL
);
21141 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21143 if (dkc
== NULL
|| dkc
->dkc_callback
== NULL
) {
21149 mutex_enter(SD_MUTEX(un
));
21150 /* check whether cache flush should be suppressed */
21151 if (un
->un_f_suppress_cache_flush
== TRUE
) {
21152 mutex_exit(SD_MUTEX(un
));
21154 * suppress the cache flush if the device is told to do
21155 * so by sd.conf or disk table
21157 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_SYNCHRONIZE_CACHE: \
21158 skip the cache flush since suppress_cache_flush is %d!\n",
21159 un
->un_f_suppress_cache_flush
);
21161 if (is_async
== TRUE
) {
21162 /* invoke callback for asynchronous flush */
21163 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
, 0);
21167 mutex_exit(SD_MUTEX(un
));
21170 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be
21173 cdb
= kmem_zalloc(CDB_GROUP1
, KM_SLEEP
);
21174 cdb
->scc_cmd
= SCMD_SYNCHRONIZE_CACHE
;
21176 mutex_enter(SD_MUTEX(un
));
21177 if (dkc
!= NULL
&& un
->un_f_sync_nv_supported
&&
21178 (dkc
->dkc_flag
& FLUSH_VOLATILE
)) {
21180 * if the device supports SYNC_NV bit, turn on
21181 * the SYNC_NV bit to only flush volatile cache
21183 cdb
->cdb_un
.tag
|= SD_SYNC_NV_BIT
;
21185 mutex_exit(SD_MUTEX(un
));
21188 * First get some memory for the uscsi_cmd struct and cdb
21189 * and initialize for SYNCHRONIZE_CACHE cmd.
21191 uscmd
= kmem_zalloc(sizeof (struct uscsi_cmd
), KM_SLEEP
);
21192 uscmd
->uscsi_cdblen
= CDB_GROUP1
;
21193 uscmd
->uscsi_cdb
= (caddr_t
)cdb
;
21194 uscmd
->uscsi_bufaddr
= NULL
;
21195 uscmd
->uscsi_buflen
= 0;
21196 uscmd
->uscsi_rqbuf
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
21197 uscmd
->uscsi_rqlen
= SENSE_LENGTH
;
21198 uscmd
->uscsi_rqresid
= SENSE_LENGTH
;
21199 uscmd
->uscsi_flags
= USCSI_RQENABLE
| USCSI_SILENT
;
21200 uscmd
->uscsi_timeout
= sd_io_time
;
21203 * Allocate an sd_uscsi_info struct and fill it with the info
21204 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
21205 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
21206 * since we allocate the buf here in this function, we do not
21207 * need to preserve the prior contents of b_private.
21208 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21210 uip
= kmem_zalloc(sizeof (struct sd_uscsi_info
), KM_SLEEP
);
21211 uip
->ui_flags
= SD_PATH_DIRECT
;
21212 uip
->ui_cmdp
= uscmd
;
21214 bp
= getrbuf(KM_SLEEP
);
21215 bp
->b_private
= uip
;
21218 * Setup buffer to carry uscsi request.
21220 bp
->b_flags
= B_BUSY
;
21224 if (is_async
== TRUE
) {
21225 bp
->b_iodone
= sd_send_scsi_SYNCHRONIZE_CACHE_biodone
;
21226 uip
->ui_dkc
= *dkc
;
21229 bp
->b_edev
= SD_GET_DEV(un
);
21230 bp
->b_dev
= cmpdev(bp
->b_edev
); /* maybe unnecessary? */
21233 * Unset un_f_sync_cache_required flag
21235 mutex_enter(SD_MUTEX(un
));
21236 un
->un_f_sync_cache_required
= FALSE
;
21237 mutex_exit(SD_MUTEX(un
));
21239 (void) sd_uscsi_strategy(bp
);
21242 * If synchronous request, wait for completion
21243 * If async just return and let b_iodone callback
21245 * NOTE: On return, u_ncmds_in_driver will be decremented,
21246 * but it was also incremented in sd_uscsi_strategy(), so
21249 if (is_async
== FALSE
) {
21250 (void) biowait(bp
);
21251 rval
= sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp
);
21259 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf
*bp
)
21261 struct sd_uscsi_info
*uip
;
21262 struct uscsi_cmd
*uscmd
;
21263 uint8_t *sense_buf
;
21266 union scsi_cdb
*cdb
;
21268 uip
= (struct sd_uscsi_info
*)(bp
->b_private
);
21269 ASSERT(uip
!= NULL
);
21271 uscmd
= uip
->ui_cmdp
;
21272 ASSERT(uscmd
!= NULL
);
21274 sense_buf
= (uint8_t *)uscmd
->uscsi_rqbuf
;
21275 ASSERT(sense_buf
!= NULL
);
21277 un
= ddi_get_soft_state(sd_state
, SD_GET_INSTANCE_FROM_BUF(bp
));
21278 ASSERT(un
!= NULL
);
21280 cdb
= (union scsi_cdb
*)uscmd
->uscsi_cdb
;
21282 status
= geterror(bp
);
21285 break; /* Success! */
21287 switch (uscmd
->uscsi_status
) {
21288 case STATUS_RESERVATION_CONFLICT
:
21289 /* Ignore reservation conflict */
21294 if ((uscmd
->uscsi_rqstatus
== STATUS_GOOD
) &&
21295 (scsi_sense_key(sense_buf
) ==
21296 KEY_ILLEGAL_REQUEST
)) {
21297 /* Ignore Illegal Request error */
21298 if (cdb
->cdb_un
.tag
&SD_SYNC_NV_BIT
) {
21299 mutex_enter(SD_MUTEX(un
));
21300 un
->un_f_sync_nv_supported
= FALSE
;
21301 mutex_exit(SD_MUTEX(un
));
21303 SD_TRACE(SD_LOG_IO
, un
,
21304 "un_f_sync_nv_supported \
21305 is set to false.\n");
21309 mutex_enter(SD_MUTEX(un
));
21310 un
->un_f_sync_cache_supported
= FALSE
;
21311 mutex_exit(SD_MUTEX(un
));
21312 SD_TRACE(SD_LOG_IO
, un
,
21313 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
21314 un_f_sync_cache_supported set to false \
21315 with asc = %x, ascq = %x\n",
21316 scsi_sense_asc(sense_buf
),
21317 scsi_sense_ascq(sense_buf
));
21328 * Turn on the un_f_sync_cache_required flag
21329 * since the SYNC CACHE command failed
21331 mutex_enter(SD_MUTEX(un
));
21332 un
->un_f_sync_cache_required
= TRUE
;
21333 mutex_exit(SD_MUTEX(un
));
21336 * Don't log an error message if this device
21337 * has removable media.
21339 if (!un
->un_f_has_removable_media
) {
21340 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
21341 "SYNCHRONIZE CACHE command failed (%d)\n", status
);
21347 if (uip
->ui_dkc
.dkc_callback
!= NULL
) {
21348 (*uip
->ui_dkc
.dkc_callback
)(uip
->ui_dkc
.dkc_cookie
, status
);
21351 ASSERT((bp
->b_flags
& B_REMAPPED
) == 0);
21353 kmem_free(uip
, sizeof (struct sd_uscsi_info
));
21354 kmem_free(uscmd
->uscsi_rqbuf
, SENSE_LENGTH
);
21355 kmem_free(uscmd
->uscsi_cdb
, (size_t)uscmd
->uscsi_cdblen
);
21356 kmem_free(uscmd
, sizeof (struct uscsi_cmd
));
21363 * Function: sd_send_scsi_GET_CONFIGURATION
21365 * Description: Issues the get configuration command to the device.
21366 * Called from sd_check_for_writable_cd & sd_get_media_info
21367 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21376 * Return Code: 0 - Success
21377 * errno return code from sd_ssc_send()
21379 * Context: Can sleep. Does not return until command is completed.
21384 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t
*ssc
, struct uscsi_cmd
*ucmdbuf
,
21385 uchar_t
*rqbuf
, uint_t rqbuflen
, uchar_t
*bufaddr
, uint_t buflen
,
21388 char cdb
[CDB_GROUP1
];
21392 ASSERT(ssc
!= NULL
);
21394 ASSERT(un
!= NULL
);
21395 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21396 ASSERT(bufaddr
!= NULL
);
21397 ASSERT(ucmdbuf
!= NULL
);
21398 ASSERT(rqbuf
!= NULL
);
21400 SD_TRACE(SD_LOG_IO
, un
,
21401 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un
);
21403 bzero(cdb
, sizeof (cdb
));
21404 bzero(ucmdbuf
, sizeof (struct uscsi_cmd
));
21405 bzero(rqbuf
, rqbuflen
);
21406 bzero(bufaddr
, buflen
);
21409 * Set up cdb field for the get configuration command.
21411 cdb
[0] = SCMD_GET_CONFIGURATION
;
21412 cdb
[1] = 0x02; /* Requested Type */
21413 cdb
[8] = SD_PROFILE_HEADER_LEN
;
21414 ucmdbuf
->uscsi_cdb
= cdb
;
21415 ucmdbuf
->uscsi_cdblen
= CDB_GROUP1
;
21416 ucmdbuf
->uscsi_bufaddr
= (caddr_t
)bufaddr
;
21417 ucmdbuf
->uscsi_buflen
= buflen
;
21418 ucmdbuf
->uscsi_timeout
= sd_io_time
;
21419 ucmdbuf
->uscsi_rqbuf
= (caddr_t
)rqbuf
;
21420 ucmdbuf
->uscsi_rqlen
= rqbuflen
;
21421 ucmdbuf
->uscsi_flags
= USCSI_RQENABLE
|USCSI_SILENT
|USCSI_READ
;
21423 status
= sd_ssc_send(ssc
, ucmdbuf
, FKIOCTL
,
21424 UIO_SYSSPACE
, path_flag
);
21428 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
21429 break; /* Success! */
21431 switch (ucmdbuf
->uscsi_status
) {
21432 case STATUS_RESERVATION_CONFLICT
:
21444 SD_DUMP_MEMORY(un
, SD_LOG_IO
,
21445 "sd_send_scsi_GET_CONFIGURATION: data",
21446 (uchar_t
*)bufaddr
, SD_PROFILE_HEADER_LEN
, SD_LOG_HEX
);
21449 SD_TRACE(SD_LOG_IO
, un
,
21450 "sd_send_scsi_GET_CONFIGURATION: exit\n");
21456 * Function: sd_send_scsi_feature_GET_CONFIGURATION
21458 * Description: Issues the get configuration command to the device to
21459 * retrieve a specific feature. Called from
21460 * sd_check_for_writable_cd & sd_set_mmc_caps.
21469 * Return Code: 0 - Success
21470 * errno return code from sd_ssc_send()
21472 * Context: Can sleep. Does not return until command is completed.
21476 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t
*ssc
,
21477 struct uscsi_cmd
*ucmdbuf
, uchar_t
*rqbuf
, uint_t rqbuflen
,
21478 uchar_t
*bufaddr
, uint_t buflen
, char feature
, int path_flag
)
21480 char cdb
[CDB_GROUP1
];
21484 ASSERT(ssc
!= NULL
);
21486 ASSERT(un
!= NULL
);
21487 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21488 ASSERT(bufaddr
!= NULL
);
21489 ASSERT(ucmdbuf
!= NULL
);
21490 ASSERT(rqbuf
!= NULL
);
21492 SD_TRACE(SD_LOG_IO
, un
,
21493 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un
);
21495 bzero(cdb
, sizeof (cdb
));
21496 bzero(ucmdbuf
, sizeof (struct uscsi_cmd
));
21497 bzero(rqbuf
, rqbuflen
);
21498 bzero(bufaddr
, buflen
);
21501 * Set up cdb field for the get configuration command.
21503 cdb
[0] = SCMD_GET_CONFIGURATION
;
21504 cdb
[1] = 0x02; /* Requested Type */
21507 ucmdbuf
->uscsi_cdb
= cdb
;
21508 ucmdbuf
->uscsi_cdblen
= CDB_GROUP1
;
21509 ucmdbuf
->uscsi_bufaddr
= (caddr_t
)bufaddr
;
21510 ucmdbuf
->uscsi_buflen
= buflen
;
21511 ucmdbuf
->uscsi_timeout
= sd_io_time
;
21512 ucmdbuf
->uscsi_rqbuf
= (caddr_t
)rqbuf
;
21513 ucmdbuf
->uscsi_rqlen
= rqbuflen
;
21514 ucmdbuf
->uscsi_flags
= USCSI_RQENABLE
|USCSI_SILENT
|USCSI_READ
;
21516 status
= sd_ssc_send(ssc
, ucmdbuf
, FKIOCTL
,
21517 UIO_SYSSPACE
, path_flag
);
21522 break; /* Success! */
21524 switch (ucmdbuf
->uscsi_status
) {
21525 case STATUS_RESERVATION_CONFLICT
:
21537 SD_DUMP_MEMORY(un
, SD_LOG_IO
,
21538 "sd_send_scsi_feature_GET_CONFIGURATION: data",
21539 (uchar_t
*)bufaddr
, SD_PROFILE_HEADER_LEN
, SD_LOG_HEX
);
21542 SD_TRACE(SD_LOG_IO
, un
,
21543 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
21550 * Function: sd_send_scsi_MODE_SENSE
21552 * Description: Utility function for issuing a scsi MODE SENSE command.
21553 * Note: This routine uses a consistent implementation for Group0,
21554 * Group1, and Group2 commands across all platforms. ATAPI devices
21555 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21557 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21558 * structure for this target.
21559 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21560 * CDB_GROUP[1|2] (10 byte).
21561 * bufaddr - buffer for page data retrieved from the target.
21562 * buflen - size of page to be retrieved.
21563 * page_code - page code of data to be retrieved from the target.
21564 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21565 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21566 * to use the USCSI "direct" chain and bypass the normal
21569 * Return Code: 0 - Success
21570 * errno return code from sd_ssc_send()
21572 * Context: Can sleep. Does not return until command is completed.
21576 sd_send_scsi_MODE_SENSE(sd_ssc_t
*ssc
, int cdbsize
, uchar_t
*bufaddr
,
21577 size_t buflen
, uchar_t page_code
, int path_flag
)
21579 struct scsi_extended_sense sense_buf
;
21580 union scsi_cdb cdb
;
21581 struct uscsi_cmd ucmd_buf
;
21586 ASSERT(ssc
!= NULL
);
21588 ASSERT(un
!= NULL
);
21589 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21590 ASSERT(bufaddr
!= NULL
);
21591 ASSERT((cdbsize
== CDB_GROUP0
) || (cdbsize
== CDB_GROUP1
) ||
21592 (cdbsize
== CDB_GROUP2
));
21594 SD_TRACE(SD_LOG_IO
, un
,
21595 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un
);
21597 bzero(&cdb
, sizeof (cdb
));
21598 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
21599 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
21600 bzero(bufaddr
, buflen
);
21602 if (cdbsize
== CDB_GROUP0
) {
21603 cdb
.scc_cmd
= SCMD_MODE_SENSE
;
21604 cdb
.cdb_opaque
[2] = page_code
;
21605 FORMG0COUNT(&cdb
, buflen
);
21606 headlen
= MODE_HEADER_LENGTH
;
21608 cdb
.scc_cmd
= SCMD_MODE_SENSE_G1
;
21609 cdb
.cdb_opaque
[2] = page_code
;
21610 FORMG1COUNT(&cdb
, buflen
);
21611 headlen
= MODE_HEADER_LENGTH_GRP2
;
21614 ASSERT(headlen
<= buflen
);
21615 SD_FILL_SCSI1_LUN_CDB(un
, &cdb
);
21617 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
21618 ucmd_buf
.uscsi_cdblen
= (uchar_t
)cdbsize
;
21619 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)bufaddr
;
21620 ucmd_buf
.uscsi_buflen
= buflen
;
21621 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
21622 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
21623 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_READ
| USCSI_SILENT
;
21624 ucmd_buf
.uscsi_timeout
= 60;
21626 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
21627 UIO_SYSSPACE
, path_flag
);
21632 * sr_check_wp() uses 0x3f page code and check the header of
21633 * mode page to determine if target device is write-protected.
21634 * But some USB devices return 0 bytes for 0x3f page code. For
21635 * this case, make sure that mode page header is returned at
21638 if (buflen
- ucmd_buf
.uscsi_resid
< headlen
) {
21640 sd_ssc_set_info(ssc
, SSC_FLAGS_INVALID_DATA
, -1,
21641 "mode page header is not returned");
21643 break; /* Success! */
21645 switch (ucmd_buf
.uscsi_status
) {
21646 case STATUS_RESERVATION_CONFLICT
:
21658 SD_DUMP_MEMORY(un
, SD_LOG_IO
, "sd_send_scsi_MODE_SENSE: data",
21659 (uchar_t
*)bufaddr
, buflen
, SD_LOG_HEX
);
21661 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_MODE_SENSE: exit\n");
21668 * Function: sd_send_scsi_MODE_SELECT
21670 * Description: Utility function for issuing a scsi MODE SELECT command.
21671 * Note: This routine uses a consistent implementation for Group0,
21672 * Group1, and Group2 commands across all platforms. ATAPI devices
21673 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21675 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21676 * structure for this target.
21677 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21678 * CDB_GROUP[1|2] (10 byte).
21679 * bufaddr - buffer for page data retrieved from the target.
21680 * buflen - size of page to be retrieved.
21681 * save_page - boolean to determin if SP bit should be set.
21682 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21683 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21684 * to use the USCSI "direct" chain and bypass the normal
21687 * Return Code: 0 - Success
21688 * errno return code from sd_ssc_send()
21690 * Context: Can sleep. Does not return until command is completed.
21694 sd_send_scsi_MODE_SELECT(sd_ssc_t
*ssc
, int cdbsize
, uchar_t
*bufaddr
,
21695 size_t buflen
, uchar_t save_page
, int path_flag
)
21697 struct scsi_extended_sense sense_buf
;
21698 union scsi_cdb cdb
;
21699 struct uscsi_cmd ucmd_buf
;
21703 ASSERT(ssc
!= NULL
);
21705 ASSERT(un
!= NULL
);
21706 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21707 ASSERT(bufaddr
!= NULL
);
21708 ASSERT((cdbsize
== CDB_GROUP0
) || (cdbsize
== CDB_GROUP1
) ||
21709 (cdbsize
== CDB_GROUP2
));
21711 SD_TRACE(SD_LOG_IO
, un
,
21712 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un
);
21714 bzero(&cdb
, sizeof (cdb
));
21715 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
21716 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
21718 /* Set the PF bit for many third party drives */
21719 cdb
.cdb_opaque
[1] = 0x10;
21721 /* Set the savepage(SP) bit if given */
21722 if (save_page
== SD_SAVE_PAGE
) {
21723 cdb
.cdb_opaque
[1] |= 0x01;
21726 if (cdbsize
== CDB_GROUP0
) {
21727 cdb
.scc_cmd
= SCMD_MODE_SELECT
;
21728 FORMG0COUNT(&cdb
, buflen
);
21730 cdb
.scc_cmd
= SCMD_MODE_SELECT_G1
;
21731 FORMG1COUNT(&cdb
, buflen
);
21734 SD_FILL_SCSI1_LUN_CDB(un
, &cdb
);
21736 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
21737 ucmd_buf
.uscsi_cdblen
= (uchar_t
)cdbsize
;
21738 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)bufaddr
;
21739 ucmd_buf
.uscsi_buflen
= buflen
;
21740 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
21741 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
21742 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_WRITE
| USCSI_SILENT
;
21743 ucmd_buf
.uscsi_timeout
= 60;
21745 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
21746 UIO_SYSSPACE
, path_flag
);
21750 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
21751 break; /* Success! */
21753 switch (ucmd_buf
.uscsi_status
) {
21754 case STATUS_RESERVATION_CONFLICT
:
21766 SD_DUMP_MEMORY(un
, SD_LOG_IO
, "sd_send_scsi_MODE_SELECT: data",
21767 (uchar_t
*)bufaddr
, buflen
, SD_LOG_HEX
);
21769 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_MODE_SELECT: exit\n");
21776 * Function: sd_send_scsi_RDWR
21778 * Description: Issue a scsi READ or WRITE command with the given parameters.
21780 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21781 * structure for this target.
21782 * cmd: SCMD_READ or SCMD_WRITE
21783 * bufaddr: Address of caller's buffer to receive the RDWR data
21784 * buflen: Length of caller's buffer receive the RDWR data.
21785 * start_block: Block number for the start of the RDWR operation.
21786 * (Assumes target-native block size.)
21787 * residp: Pointer to variable to receive the redisual of the
21788 * RDWR operation (may be NULL of no residual requested).
21789 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21790 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21791 * to use the USCSI "direct" chain and bypass the normal
21794 * Return Code: 0 - Success
21795 * errno return code from sd_ssc_send()
21797 * Context: Can sleep. Does not return until command is completed.
21801 sd_send_scsi_RDWR(sd_ssc_t
*ssc
, uchar_t cmd
, void *bufaddr
,
21802 size_t buflen
, daddr_t start_block
, int path_flag
)
21804 struct scsi_extended_sense sense_buf
;
21805 union scsi_cdb cdb
;
21806 struct uscsi_cmd ucmd_buf
;
21807 uint32_t block_count
;
21813 ASSERT(ssc
!= NULL
);
21815 ASSERT(un
!= NULL
);
21816 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21817 ASSERT(bufaddr
!= NULL
);
21818 ASSERT((cmd
== SCMD_READ
) || (cmd
== SCMD_WRITE
));
21820 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_RDWR: entry: un:0x%p\n", un
);
21822 if (un
->un_f_tgt_blocksize_is_valid
!= TRUE
) {
21826 mutex_enter(SD_MUTEX(un
));
21827 block_count
= SD_BYTES2TGTBLOCKS(un
, buflen
);
21828 mutex_exit(SD_MUTEX(un
));
21830 flag
= (cmd
== SCMD_READ
) ? USCSI_READ
: USCSI_WRITE
;
21832 SD_INFO(SD_LOG_IO
, un
, "sd_send_scsi_RDWR: "
21833 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
21834 bufaddr
, buflen
, start_block
, block_count
);
21836 bzero(&cdb
, sizeof (cdb
));
21837 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
21838 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
21840 /* Compute CDB size to use */
21841 if (start_block
> 0xffffffff)
21842 cdbsize
= CDB_GROUP4
;
21843 else if ((start_block
& 0xFFE00000) ||
21844 (un
->un_f_cfg_is_atapi
== TRUE
))
21845 cdbsize
= CDB_GROUP1
;
21847 cdbsize
= CDB_GROUP0
;
21850 case CDB_GROUP0
: /* 6-byte CDBs */
21852 FORMG0ADDR(&cdb
, start_block
);
21853 FORMG0COUNT(&cdb
, block_count
);
21855 case CDB_GROUP1
: /* 10-byte CDBs */
21856 cdb
.scc_cmd
= cmd
| SCMD_GROUP1
;
21857 FORMG1ADDR(&cdb
, start_block
);
21858 FORMG1COUNT(&cdb
, block_count
);
21860 case CDB_GROUP4
: /* 16-byte CDBs */
21861 cdb
.scc_cmd
= cmd
| SCMD_GROUP4
;
21862 FORMG4LONGADDR(&cdb
, (uint64_t)start_block
);
21863 FORMG4COUNT(&cdb
, block_count
);
21865 case CDB_GROUP5
: /* 12-byte CDBs (currently unsupported) */
21867 /* All others reserved */
21871 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21872 SD_FILL_SCSI1_LUN_CDB(un
, &cdb
);
21874 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
21875 ucmd_buf
.uscsi_cdblen
= (uchar_t
)cdbsize
;
21876 ucmd_buf
.uscsi_bufaddr
= bufaddr
;
21877 ucmd_buf
.uscsi_buflen
= buflen
;
21878 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
21879 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
21880 ucmd_buf
.uscsi_flags
= flag
| USCSI_RQENABLE
| USCSI_SILENT
;
21881 ucmd_buf
.uscsi_timeout
= 60;
21882 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
21883 UIO_SYSSPACE
, path_flag
);
21887 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
21888 break; /* Success! */
21890 switch (ucmd_buf
.uscsi_status
) {
21891 case STATUS_RESERVATION_CONFLICT
:
21903 SD_DUMP_MEMORY(un
, SD_LOG_IO
, "sd_send_scsi_RDWR: data",
21904 (uchar_t
*)bufaddr
, buflen
, SD_LOG_HEX
);
21907 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_RDWR: exit\n");
21914 * Function: sd_send_scsi_LOG_SENSE
21916 * Description: Issue a scsi LOG_SENSE command with the given parameters.
21918 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21919 * structure for this target.
21921 * Return Code: 0 - Success
21922 * errno return code from sd_ssc_send()
21924 * Context: Can sleep. Does not return until command is completed.
21928 sd_send_scsi_LOG_SENSE(sd_ssc_t
*ssc
, uchar_t
*bufaddr
, uint16_t buflen
,
21929 uchar_t page_code
, uchar_t page_control
, uint16_t param_ptr
,
21933 struct scsi_extended_sense sense_buf
;
21934 union scsi_cdb cdb
;
21935 struct uscsi_cmd ucmd_buf
;
21939 ASSERT(ssc
!= NULL
);
21941 ASSERT(un
!= NULL
);
21942 ASSERT(!mutex_owned(SD_MUTEX(un
)));
21944 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un
);
21946 bzero(&cdb
, sizeof (cdb
));
21947 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
21948 bzero(&sense_buf
, sizeof (struct scsi_extended_sense
));
21950 cdb
.scc_cmd
= SCMD_LOG_SENSE_G1
;
21951 cdb
.cdb_opaque
[2] = (page_control
<< 6) | page_code
;
21952 cdb
.cdb_opaque
[5] = (uchar_t
)((param_ptr
& 0xFF00) >> 8);
21953 cdb
.cdb_opaque
[6] = (uchar_t
)(param_ptr
& 0x00FF);
21954 FORMG1COUNT(&cdb
, buflen
);
21956 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
21957 ucmd_buf
.uscsi_cdblen
= CDB_GROUP1
;
21958 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)bufaddr
;
21959 ucmd_buf
.uscsi_buflen
= buflen
;
21960 ucmd_buf
.uscsi_rqbuf
= (caddr_t
)&sense_buf
;
21961 ucmd_buf
.uscsi_rqlen
= sizeof (struct scsi_extended_sense
);
21962 ucmd_buf
.uscsi_flags
= USCSI_RQENABLE
| USCSI_READ
| USCSI_SILENT
;
21963 ucmd_buf
.uscsi_timeout
= 60;
21965 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
21966 UIO_SYSSPACE
, path_flag
);
21972 switch (ucmd_buf
.uscsi_status
) {
21973 case STATUS_RESERVATION_CONFLICT
:
21977 if ((ucmd_buf
.uscsi_rqstatus
== STATUS_GOOD
) &&
21978 (scsi_sense_key((uint8_t *)&sense_buf
) ==
21979 KEY_ILLEGAL_REQUEST
) &&
21980 (scsi_sense_asc((uint8_t *)&sense_buf
) == 0x24)) {
21982 * ASC 0x24: INVALID FIELD IN CDB
21984 switch (page_code
) {
21985 case START_STOP_CYCLE_PAGE
:
21987 * The start stop cycle counter is
21988 * implemented as page 0x31 in earlier
21989 * generation disks. In new generation
21990 * disks the start stop cycle counter is
21991 * implemented as page 0xE. To properly
21992 * handle this case if an attempt for
21993 * log page 0xE is made and fails we
21994 * will try again using page 0x31.
21996 * Network storage BU committed to
21997 * maintain the page 0x31 for this
21998 * purpose and will not have any other
21999 * page implemented with page code 0x31
22000 * until all disks transition to the
22003 mutex_enter(SD_MUTEX(un
));
22004 un
->un_start_stop_cycle_page
=
22005 START_STOP_CYCLE_VU_PAGE
;
22006 cdb
.cdb_opaque
[2] =
22007 (char)(page_control
<< 6) |
22008 un
->un_start_stop_cycle_page
;
22009 mutex_exit(SD_MUTEX(un
));
22010 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
22011 status
= sd_ssc_send(
22012 ssc
, &ucmd_buf
, FKIOCTL
,
22013 UIO_SYSSPACE
, path_flag
);
22016 case TEMPERATURE_PAGE
:
22033 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
22034 SD_DUMP_MEMORY(un
, SD_LOG_IO
, "sd_send_scsi_LOG_SENSE: data",
22035 (uchar_t
*)bufaddr
, buflen
, SD_LOG_HEX
);
22038 SD_TRACE(SD_LOG_IO
, un
, "sd_send_scsi_LOG_SENSE: exit\n");
22045 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
22047 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command.
22049 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
22050 * structure for this target.
22055 * Return Code: 0 - Success
22056 * errno return code from sd_ssc_send()
22058 * Context: Can sleep. Does not return until command is completed.
22062 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t
*ssc
, uchar_t
*bufaddr
,
22063 size_t buflen
, uchar_t class_req
)
22065 union scsi_cdb cdb
;
22066 struct uscsi_cmd ucmd_buf
;
22070 ASSERT(ssc
!= NULL
);
22072 ASSERT(un
!= NULL
);
22073 ASSERT(!mutex_owned(SD_MUTEX(un
)));
22074 ASSERT(bufaddr
!= NULL
);
22076 SD_TRACE(SD_LOG_IO
, un
,
22077 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un
);
22079 bzero(&cdb
, sizeof (cdb
));
22080 bzero(&ucmd_buf
, sizeof (ucmd_buf
));
22081 bzero(bufaddr
, buflen
);
22083 cdb
.scc_cmd
= SCMD_GET_EVENT_STATUS_NOTIFICATION
;
22084 cdb
.cdb_opaque
[1] = 1; /* polled */
22085 cdb
.cdb_opaque
[4] = class_req
;
22086 FORMG1COUNT(&cdb
, buflen
);
22088 ucmd_buf
.uscsi_cdb
= (char *)&cdb
;
22089 ucmd_buf
.uscsi_cdblen
= CDB_GROUP1
;
22090 ucmd_buf
.uscsi_bufaddr
= (caddr_t
)bufaddr
;
22091 ucmd_buf
.uscsi_buflen
= buflen
;
22092 ucmd_buf
.uscsi_rqbuf
= NULL
;
22093 ucmd_buf
.uscsi_rqlen
= 0;
22094 ucmd_buf
.uscsi_flags
= USCSI_READ
| USCSI_SILENT
;
22095 ucmd_buf
.uscsi_timeout
= 60;
22097 status
= sd_ssc_send(ssc
, &ucmd_buf
, FKIOCTL
,
22098 UIO_SYSSPACE
, SD_PATH_DIRECT
);
22101 * Only handle status == 0, the upper-level caller
22102 * will put different assessment based on the context.
22105 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
22107 if (ucmd_buf
.uscsi_resid
!= 0) {
22112 SD_TRACE(SD_LOG_IO
, un
,
22113 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
22120 sd_gesn_media_data_valid(uchar_t
*data
)
22124 len
= (data
[1] << 8) | data
[0];
22125 return ((len
>= 6) &&
22126 ((data
[2] & SD_GESN_HEADER_NEA
) == 0) &&
22127 ((data
[2] & SD_GESN_HEADER_CLASS
) == SD_GESN_MEDIA_CLASS
) &&
22128 ((data
[3] & (1 << SD_GESN_MEDIA_CLASS
)) != 0));
22133 * Function: sdioctl
22135 * Description: Driver's ioctl(9e) entry point function.
22137 * Arguments: dev - device number
22138 * cmd - ioctl operation to be performed
22139 * arg - user argument, contains data to be set or reference
22140 * parameter for get
22141 * flag - bit flag, indicating open settings, 32/64 bit type
22142 * cred_p - user credential pointer
22143 * rval_p - calling process return value (OPT)
22145 * Return Code: EINVAL
22153 * Context: Called from the device switch at normal priority.
22157 sdioctl(dev_t dev
, int cmd
, intptr_t arg
, int flag
, cred_t
*cred_p
, int *rval_p
)
22159 struct sd_lun
*un
= NULL
;
22163 int tmprval
= EINVAL
;
22164 boolean_t is_valid
;
22168 * All device accesses go thru sdstrategy where we check on suspend
22171 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
22175 ASSERT(!mutex_owned(SD_MUTEX(un
)));
22177 /* Initialize sd_ssc_t for internal uscsi commands */
22178 ssc
= sd_ssc_init(un
);
22180 is_valid
= SD_IS_VALID_LABEL(un
);
22183 * Moved this wait from sd_uscsi_strategy to here for
22184 * reasons of deadlock prevention. Internal driver commands,
22185 * specifically those to change a devices power level, result
22186 * in a call to sd_uscsi_strategy.
22188 mutex_enter(SD_MUTEX(un
));
22189 while ((un
->un_state
== SD_STATE_SUSPENDED
) ||
22190 (un
->un_state
== SD_STATE_PM_CHANGING
)) {
22191 cv_wait(&un
->un_suspend_cv
, SD_MUTEX(un
));
22194 * Twiddling the counter here protects commands from now
22195 * through to the top of sd_uscsi_strategy. Without the
22196 * counter inc. a power down, for example, could get in
22197 * after the above check for state is made and before
22198 * execution gets to the top of sd_uscsi_strategy.
22199 * That would cause problems.
22201 un
->un_ncmds_in_driver
++;
22204 (flag
& (FNDELAY
| FNONBLOCK
))) {
22206 case DKIOCGGEOM
: /* SD_PATH_DIRECT */
22208 case DKIOCGEXTVTOC
:
22210 case DKIOCPARTINFO
:
22211 case DKIOCEXTPARTINFO
:
22215 case DKIOCPARTITION
:
22217 case DKIOCSEXTVTOC
:
22221 case DKIOCG_PHYGEOM
:
22222 case DKIOCG_VIRTGEOM
:
22223 #if defined(__i386) || defined(__amd64)
22224 case DKIOCSETEXTPART
:
22226 /* let cmlb handle it */
22227 goto skip_ready_valid
;
22232 case CDROMPLAYTRKIND
:
22233 case CDROMREADTOCHDR
:
22234 case CDROMREADTOCENTRY
:
22239 case CDROMREADMODE2
:
22240 case CDROMREADMODE1
:
22241 case CDROMREADOFFSET
:
22242 case CDROMSBLKMODE
:
22243 case CDROMGBLKMODE
:
22244 case CDROMGDRVSPEED
:
22245 case CDROMSDRVSPEED
:
22250 un
->un_ncmds_in_driver
--;
22251 ASSERT(un
->un_ncmds_in_driver
>= 0);
22252 mutex_exit(SD_MUTEX(un
));
22254 goto done_without_assess
;
22260 if (!un
->un_f_eject_media_supported
) {
22261 un
->un_ncmds_in_driver
--;
22262 ASSERT(un
->un_ncmds_in_driver
>= 0);
22263 mutex_exit(SD_MUTEX(un
));
22265 goto done_without_assess
;
22268 case DKIOCFLUSHWRITECACHE
:
22269 mutex_exit(SD_MUTEX(un
));
22270 err
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
22272 mutex_enter(SD_MUTEX(un
));
22273 un
->un_ncmds_in_driver
--;
22274 ASSERT(un
->un_ncmds_in_driver
>= 0);
22275 mutex_exit(SD_MUTEX(un
));
22277 goto done_quick_assess
;
22279 mutex_enter(SD_MUTEX(un
));
22281 case DKIOCREMOVABLE
:
22282 case DKIOCHOTPLUGGABLE
:
22284 case DKIOCGMEDIAINFO
:
22285 case DKIOCGMEDIAINFOEXT
:
22286 case MHIOCENFAILFAST
:
22290 case MHIOCGRP_INKEYS
:
22291 case MHIOCGRP_INRESV
:
22292 case MHIOCGRP_REGISTER
:
22293 case MHIOCGRP_RESERVE
:
22294 case MHIOCGRP_PREEMPTANDABORT
:
22295 case MHIOCGRP_REGISTERANDIGNOREKEY
:
22296 case CDROMCLOSETRAY
:
22298 goto skip_ready_valid
;
22303 mutex_exit(SD_MUTEX(un
));
22304 err
= sd_ready_and_valid(ssc
, SDPART(dev
));
22305 mutex_enter(SD_MUTEX(un
));
22307 if (err
!= SD_READY_VALID
) {
22310 case CDROMGDRVSPEED
:
22311 case CDROMSDRVSPEED
:
22312 case FDEJECT
: /* for eject command */
22315 case DKIOCREMOVABLE
:
22316 case DKIOCHOTPLUGGABLE
:
22319 if (un
->un_f_has_removable_media
) {
22322 /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22323 if (err
== SD_RESERVED_BY_OTHERS
) {
22329 un
->un_ncmds_in_driver
--;
22330 ASSERT(un
->un_ncmds_in_driver
>= 0);
22331 mutex_exit(SD_MUTEX(un
));
22333 goto done_without_assess
;
22339 mutex_exit(SD_MUTEX(un
));
22343 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCINFO\n");
22344 err
= sd_dkio_ctrl_info(dev
, (caddr_t
)arg
, flag
);
22347 case DKIOCGMEDIAINFO
:
22348 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCGMEDIAINFO\n");
22349 err
= sd_get_media_info(dev
, (caddr_t
)arg
, flag
);
22352 case DKIOCGMEDIAINFOEXT
:
22353 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCGMEDIAINFOEXT\n");
22354 err
= sd_get_media_info_ext(dev
, (caddr_t
)arg
, flag
);
22359 case DKIOCGEXTVTOC
:
22361 case DKIOCPARTINFO
:
22362 case DKIOCEXTPARTINFO
:
22366 case DKIOCPARTITION
:
22368 case DKIOCSEXTVTOC
:
22372 case DKIOCG_PHYGEOM
:
22373 case DKIOCG_VIRTGEOM
:
22374 #if defined(__i386) || defined(__amd64)
22375 case DKIOCSETEXTPART
:
22377 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOC %d\n", cmd
);
22379 /* TUR should spin up */
22381 if (un
->un_f_has_removable_media
)
22382 err
= sd_send_scsi_TEST_UNIT_READY(ssc
,
22383 SD_CHECK_FOR_MEDIA
);
22386 err
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
22389 goto done_with_assess
;
22391 err
= cmlb_ioctl(un
->un_cmlbhandle
, dev
,
22392 cmd
, arg
, flag
, cred_p
, rval_p
, (void *)SD_PATH_DIRECT
);
22395 ((cmd
== DKIOCSETEFI
) ||
22396 (un
->un_f_pkstats_enabled
) &&
22397 (cmd
== DKIOCSAPART
|| cmd
== DKIOCSVTOC
||
22398 cmd
== DKIOCSEXTVTOC
))) {
22400 tmprval
= cmlb_validate(un
->un_cmlbhandle
, CMLB_SILENT
,
22401 (void *)SD_PATH_DIRECT
);
22402 if ((tmprval
== 0) && un
->un_f_pkstats_enabled
) {
22404 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
22405 "sd_ioctl: un:0x%p pstats created and "
22410 if ((cmd
== DKIOCSVTOC
|| cmd
== DKIOCSEXTVTOC
) ||
22411 ((cmd
== DKIOCSETEFI
) && (tmprval
== 0))) {
22413 mutex_enter(SD_MUTEX(un
));
22414 if (un
->un_f_devid_supported
&&
22415 (un
->un_f_opt_fab_devid
== TRUE
)) {
22416 if (un
->un_devid
== NULL
) {
22417 sd_register_devid(ssc
, SD_DEVINFO(un
),
22418 SD_TARGET_IS_UNRESERVED
);
22421 * The device id for this disk
22422 * has been fabricated. The
22423 * device id must be preserved
22424 * by writing it back out to
22427 if (sd_write_deviceid(ssc
) != 0) {
22428 ddi_devid_free(un
->un_devid
);
22429 un
->un_devid
= NULL
;
22433 mutex_exit(SD_MUTEX(un
));
22439 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCLOCK\n");
22440 err
= sd_send_scsi_DOORLOCK(ssc
, SD_REMOVAL_PREVENT
,
22442 goto done_with_assess
;
22445 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCUNLOCK\n");
22446 err
= sd_send_scsi_DOORLOCK(ssc
, SD_REMOVAL_ALLOW
,
22448 goto done_with_assess
;
22451 enum dkio_state state
;
22452 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCSTATE\n");
22454 if (ddi_copyin((void *)arg
, &state
, sizeof (int), flag
) != 0) {
22457 err
= sd_check_media(dev
, state
);
22459 if (ddi_copyout(&un
->un_mediastate
, (void *)arg
,
22460 sizeof (int), flag
) != 0)
22467 case DKIOCREMOVABLE
:
22468 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCREMOVABLE\n");
22469 i
= un
->un_f_has_removable_media
? 1 : 0;
22470 if (ddi_copyout(&i
, (void *)arg
, sizeof (int), flag
) != 0) {
22477 case DKIOCHOTPLUGGABLE
:
22478 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCHOTPLUGGABLE\n");
22479 i
= un
->un_f_is_hotpluggable
? 1 : 0;
22480 if (ddi_copyout(&i
, (void *)arg
, sizeof (int), flag
) != 0) {
22487 case DKIOCREADONLY
:
22488 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCREADONLY\n");
22490 if ((ISCD(un
) && !un
->un_f_mmc_writable_media
) ||
22491 (sr_check_wp(dev
) != 0)) {
22494 if (ddi_copyout(&i
, (void *)arg
, sizeof (int), flag
) != 0) {
22501 case DKIOCGTEMPERATURE
:
22502 SD_TRACE(SD_LOG_IOCTL
, un
, "DKIOCGTEMPERATURE\n");
22503 err
= sd_dkio_get_temp(dev
, (caddr_t
)arg
, flag
);
22506 case MHIOCENFAILFAST
:
22507 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCENFAILFAST\n");
22508 if ((err
= drv_priv(cred_p
)) == 0) {
22509 err
= sd_mhdioc_failfast(dev
, (caddr_t
)arg
, flag
);
22514 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCTKOWN\n");
22515 if ((err
= drv_priv(cred_p
)) == 0) {
22516 err
= sd_mhdioc_takeown(dev
, (caddr_t
)arg
, flag
);
22521 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCRELEASE\n");
22522 if ((err
= drv_priv(cred_p
)) == 0) {
22523 err
= sd_mhdioc_release(dev
);
22528 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCSTATUS\n");
22529 if ((err
= drv_priv(cred_p
)) == 0) {
22530 switch (sd_send_scsi_TEST_UNIT_READY(ssc
, 0)) {
22537 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
22541 goto done_with_assess
;
22546 case MHIOCQRESERVE
:
22547 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCQRESERVE\n");
22548 if ((err
= drv_priv(cred_p
)) == 0) {
22549 err
= sd_reserve_release(dev
, SD_RESERVE
);
22553 case MHIOCREREGISTERDEVID
:
22554 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCREREGISTERDEVID\n");
22555 if (drv_priv(cred_p
) == EPERM
) {
22557 } else if (!un
->un_f_devid_supported
) {
22560 err
= sd_mhdioc_register_devid(dev
);
22564 case MHIOCGRP_INKEYS
:
22565 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCGRP_INKEYS\n");
22566 if (((err
= drv_priv(cred_p
)) != EPERM
) && arg
!= NULL
) {
22567 if (un
->un_reservation_type
== SD_SCSI2_RESERVATION
) {
22570 err
= sd_mhdioc_inkeys(dev
, (caddr_t
)arg
,
22576 case MHIOCGRP_INRESV
:
22577 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCGRP_INRESV\n");
22578 if (((err
= drv_priv(cred_p
)) != EPERM
) && arg
!= NULL
) {
22579 if (un
->un_reservation_type
== SD_SCSI2_RESERVATION
) {
22582 err
= sd_mhdioc_inresv(dev
, (caddr_t
)arg
, flag
);
22587 case MHIOCGRP_REGISTER
:
22588 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCGRP_REGISTER\n");
22589 if ((err
= drv_priv(cred_p
)) != EPERM
) {
22590 if (un
->un_reservation_type
== SD_SCSI2_RESERVATION
) {
22592 } else if (arg
!= NULL
) {
22593 mhioc_register_t reg
;
22594 if (ddi_copyin((void *)arg
, ®
,
22595 sizeof (mhioc_register_t
), flag
) != 0) {
22599 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22600 ssc
, SD_SCSI3_REGISTER
,
22603 goto done_with_assess
;
22609 case MHIOCGRP_RESERVE
:
22610 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCGRP_RESERVE\n");
22611 if ((err
= drv_priv(cred_p
)) != EPERM
) {
22612 if (un
->un_reservation_type
== SD_SCSI2_RESERVATION
) {
22614 } else if (arg
!= NULL
) {
22615 mhioc_resv_desc_t resv_desc
;
22616 if (ddi_copyin((void *)arg
, &resv_desc
,
22617 sizeof (mhioc_resv_desc_t
), flag
) != 0) {
22621 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22622 ssc
, SD_SCSI3_RESERVE
,
22623 (uchar_t
*)&resv_desc
);
22625 goto done_with_assess
;
22631 case MHIOCGRP_PREEMPTANDABORT
:
22632 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCGRP_PREEMPTANDABORT\n");
22633 if ((err
= drv_priv(cred_p
)) != EPERM
) {
22634 if (un
->un_reservation_type
== SD_SCSI2_RESERVATION
) {
22636 } else if (arg
!= NULL
) {
22637 mhioc_preemptandabort_t preempt_abort
;
22638 if (ddi_copyin((void *)arg
, &preempt_abort
,
22639 sizeof (mhioc_preemptandabort_t
),
22644 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22645 ssc
, SD_SCSI3_PREEMPTANDABORT
,
22646 (uchar_t
*)&preempt_abort
);
22648 goto done_with_assess
;
22654 case MHIOCGRP_REGISTERANDIGNOREKEY
:
22655 SD_TRACE(SD_LOG_IOCTL
, un
, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
22656 if ((err
= drv_priv(cred_p
)) != EPERM
) {
22657 if (un
->un_reservation_type
== SD_SCSI2_RESERVATION
) {
22659 } else if (arg
!= NULL
) {
22660 mhioc_registerandignorekey_t r_and_i
;
22661 if (ddi_copyin((void *)arg
, (void *)&r_and_i
,
22662 sizeof (mhioc_registerandignorekey_t
),
22667 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22668 ssc
, SD_SCSI3_REGISTERANDIGNOREKEY
,
22669 (uchar_t
*)&r_and_i
);
22671 goto done_with_assess
;
22678 SD_TRACE(SD_LOG_IOCTL
, un
, "USCSICMD\n");
22679 cr
= ddi_get_cred();
22680 if ((drv_priv(cred_p
) != 0) && (drv_priv(cr
) != 0)) {
22683 enum uio_seg uioseg
;
22685 uioseg
= (flag
& FKIOCTL
) ? UIO_SYSSPACE
:
22687 if (un
->un_f_format_in_progress
== TRUE
) {
22692 err
= sd_ssc_send(ssc
,
22693 (struct uscsi_cmd
*)arg
,
22694 flag
, uioseg
, SD_PATH_STANDARD
);
22696 goto done_with_assess
;
22698 sd_ssc_assessment(ssc
, SD_FMT_STANDARD
);
22704 SD_TRACE(SD_LOG_IOCTL
, un
, "PAUSE-RESUME\n");
22708 err
= sr_pause_resume(dev
, cmd
);
22713 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMPLAYMSF\n");
22717 err
= sr_play_msf(dev
, (caddr_t
)arg
, flag
);
22721 case CDROMPLAYTRKIND
:
22722 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMPLAYTRKIND\n");
22723 #if defined(__i386) || defined(__amd64)
22725 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22727 if (!ISCD(un
) || (un
->un_f_cfg_is_atapi
== TRUE
)) {
22733 err
= sr_play_trkind(dev
, (caddr_t
)arg
, flag
);
22737 case CDROMREADTOCHDR
:
22738 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMREADTOCHDR\n");
22742 err
= sr_read_tochdr(dev
, (caddr_t
)arg
, flag
);
22746 case CDROMREADTOCENTRY
:
22747 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMREADTOCENTRY\n");
22751 err
= sr_read_tocentry(dev
, (caddr_t
)arg
, flag
);
22756 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMSTOP\n");
22760 err
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
22761 SD_TARGET_STOP
, SD_PATH_STANDARD
);
22762 goto done_with_assess
;
22767 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMSTART\n");
22771 err
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
22772 SD_TARGET_START
, SD_PATH_STANDARD
);
22773 goto done_with_assess
;
22777 case CDROMCLOSETRAY
:
22778 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMCLOSETRAY\n");
22782 err
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
22783 SD_TARGET_CLOSE
, SD_PATH_STANDARD
);
22784 goto done_with_assess
;
22788 case FDEJECT
: /* for eject command */
22791 SD_TRACE(SD_LOG_IOCTL
, un
, "EJECT\n");
22792 if (!un
->un_f_eject_media_supported
) {
22795 err
= sr_eject(dev
);
22800 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMVOLCTRL\n");
22804 err
= sr_volume_ctrl(dev
, (caddr_t
)arg
, flag
);
22809 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMSUBCHNL\n");
22813 err
= sr_read_subchannel(dev
, (caddr_t
)arg
, flag
);
22817 case CDROMREADMODE2
:
22818 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMREADMODE2\n");
22821 } else if (un
->un_f_cfg_is_atapi
== TRUE
) {
22823 * If the drive supports READ CD, use that instead of
22824 * switching the LBA size via a MODE SELECT
22827 err
= sr_read_cd_mode2(dev
, (caddr_t
)arg
, flag
);
22829 err
= sr_read_mode2(dev
, (caddr_t
)arg
, flag
);
22833 case CDROMREADMODE1
:
22834 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMREADMODE1\n");
22838 err
= sr_read_mode1(dev
, (caddr_t
)arg
, flag
);
22842 case CDROMREADOFFSET
:
22843 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMREADOFFSET\n");
22847 err
= sr_read_sony_session_offset(dev
, (caddr_t
)arg
,
22852 case CDROMSBLKMODE
:
22853 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMSBLKMODE\n");
22855 * There is no means of changing block size in case of atapi
22856 * drives, thus return ENOTTY if drive type is atapi
22858 if (!ISCD(un
) || (un
->un_f_cfg_is_atapi
== TRUE
)) {
22860 } else if (un
->un_f_mmc_cap
== TRUE
) {
22863 * MMC Devices do not support changing the
22864 * logical block size
22866 * Note: EINVAL is being returned instead of ENOTTY to
22867 * maintain consistancy with the original mmc
22872 mutex_enter(SD_MUTEX(un
));
22873 if ((!(un
->un_exclopen
& (1<<SDPART(dev
)))) ||
22874 (un
->un_ncmds_in_transport
> 0)) {
22875 mutex_exit(SD_MUTEX(un
));
22878 mutex_exit(SD_MUTEX(un
));
22879 err
= sr_change_blkmode(dev
, cmd
, arg
, flag
);
22884 case CDROMGBLKMODE
:
22885 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMGBLKMODE\n");
22888 } else if ((un
->un_f_cfg_is_atapi
!= FALSE
) &&
22889 (un
->un_f_blockcount_is_valid
!= FALSE
)) {
22891 * Drive is an ATAPI drive so return target block
22892 * size for ATAPI drives since we cannot change the
22893 * blocksize on ATAPI drives. Used primarily to detect
22894 * if an ATAPI cdrom is present.
22896 if (ddi_copyout(&un
->un_tgt_blocksize
, (void *)arg
,
22897 sizeof (int), flag
) != 0) {
22905 * Drive supports changing block sizes via a Mode
22908 err
= sr_change_blkmode(dev
, cmd
, arg
, flag
);
22912 case CDROMGDRVSPEED
:
22913 case CDROMSDRVSPEED
:
22914 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMXDRVSPEED\n");
22917 } else if (un
->un_f_mmc_cap
== TRUE
) {
22919 * Note: In the future the driver implementation
22921 * setting cd speed should entail:
22922 * 1) If non-mmc try the Toshiba mode page
22923 * (sr_change_speed)
22924 * 2) If mmc but no support for Real Time Streaming try
22925 * the SET CD SPEED (0xBB) command
22926 * (sr_atapi_change_speed)
22927 * 3) If mmc and support for Real Time Streaming
22928 * try the GET PERFORMANCE and SET STREAMING
22929 * commands (not yet implemented, 4380808)
22932 * As per recent MMC spec, CD-ROM speed is variable
22933 * and changes with LBA. Since there is no such
22934 * things as drive speed now, fail this ioctl.
22936 * Note: EINVAL is returned for consistancy of original
22937 * implementation which included support for getting
22938 * the drive speed of mmc devices but not setting
22939 * the drive speed. Thus EINVAL would be returned
22940 * if a set request was made for an mmc device.
22941 * We no longer support get or set speed for
22942 * mmc but need to remain consistent with regard
22943 * to the error code returned.
22946 } else if (un
->un_f_cfg_is_atapi
== TRUE
) {
22947 err
= sr_atapi_change_speed(dev
, cmd
, arg
, flag
);
22949 err
= sr_change_speed(dev
, cmd
, arg
, flag
);
22954 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMCDDA\n");
22958 err
= sr_read_cdda(dev
, (void *)arg
, flag
);
22963 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMCDXA\n");
22967 err
= sr_read_cdxa(dev
, (caddr_t
)arg
, flag
);
22972 SD_TRACE(SD_LOG_IOCTL
, un
, "CDROMSUBCODE\n");
22976 err
= sr_read_all_subcodes(dev
, (caddr_t
)arg
, flag
);
22982 /* RESET/ABORTS testing ioctls */
22986 if (ddi_copyin((void *)arg
, &reset_level
, sizeof (int), flag
)) {
22989 SD_INFO(SD_LOG_IOCTL
, un
, "sdioctl: DKIOCRESET: "
22990 "reset_level = 0x%lx\n", reset_level
);
22991 if (scsi_reset(SD_ADDRESS(un
), reset_level
)) {
23001 SD_INFO(SD_LOG_IOCTL
, un
, "sdioctl: DKIOCABORT:\n");
23002 if (scsi_abort(SD_ADDRESS(un
), NULL
)) {
23010 #ifdef SD_FAULT_INJECTION
23011 /* SDIOC FaultInjection testing ioctls */
23014 case SDIOCINSERTPKT
:
23015 case SDIOCINSERTXB
:
23016 case SDIOCINSERTUN
:
23017 case SDIOCINSERTARQ
:
23019 case SDIOCRETRIEVE
:
23021 SD_INFO(SD_LOG_SDTEST
, un
, "sdioctl:"
23022 "SDIOC detected cmd:0x%X:\n", cmd
);
23023 /* call error generator */
23024 sd_faultinjection_ioctl(cmd
, arg
, un
);
23028 #endif /* SD_FAULT_INJECTION */
23030 case DKIOCFLUSHWRITECACHE
:
23032 struct dk_callback
*dkc
= (struct dk_callback
*)arg
;
23034 mutex_enter(SD_MUTEX(un
));
23035 if (!un
->un_f_sync_cache_supported
||
23036 !un
->un_f_write_cache_enabled
) {
23037 err
= un
->un_f_sync_cache_supported
?
23039 mutex_exit(SD_MUTEX(un
));
23040 if ((flag
& FKIOCTL
) && dkc
!= NULL
&&
23041 dkc
->dkc_callback
!= NULL
) {
23042 (*dkc
->dkc_callback
)(dkc
->dkc_cookie
,
23045 * Did callback and reported error.
23046 * Since we did a callback, ioctl
23053 mutex_exit(SD_MUTEX(un
));
23055 if ((flag
& FKIOCTL
) && dkc
!= NULL
&&
23056 dkc
->dkc_callback
!= NULL
) {
23057 /* async SYNC CACHE request */
23058 err
= sd_send_scsi_SYNCHRONIZE_CACHE(un
, dkc
);
23060 /* synchronous SYNC CACHE request */
23061 err
= sd_send_scsi_SYNCHRONIZE_CACHE(un
, NULL
);
23066 case DKIOCGETWCE
: {
23070 if ((err
= sd_get_write_cache_enabled(ssc
, &wce
)) != 0) {
23074 if (ddi_copyout(&wce
, (void *)arg
, sizeof (wce
), flag
)) {
23080 case DKIOCSETWCE
: {
23082 int wce
, sync_supported
;
23085 if (ddi_copyin((void *)arg
, &wce
, sizeof (wce
), flag
)) {
23091 * Synchronize multiple threads trying to enable
23092 * or disable the cache via the un_f_wcc_cv
23093 * condition variable.
23095 mutex_enter(SD_MUTEX(un
));
23098 * Don't allow the cache to be enabled if the
23099 * config file has it disabled.
23101 if (un
->un_f_opt_disable_cache
&& wce
) {
23102 mutex_exit(SD_MUTEX(un
));
23108 * Wait for write cache change in progress
23109 * bit to be clear before proceeding.
23111 while (un
->un_f_wcc_inprog
)
23112 cv_wait(&un
->un_wcc_cv
, SD_MUTEX(un
));
23114 un
->un_f_wcc_inprog
= 1;
23116 mutex_exit(SD_MUTEX(un
));
23119 * Get the current write cache state
23121 if ((err
= sd_get_write_cache_enabled(ssc
, &cur_wce
)) != 0) {
23122 mutex_enter(SD_MUTEX(un
));
23123 un
->un_f_wcc_inprog
= 0;
23124 cv_broadcast(&un
->un_wcc_cv
);
23125 mutex_exit(SD_MUTEX(un
));
23129 mutex_enter(SD_MUTEX(un
));
23130 un
->un_f_write_cache_enabled
= (cur_wce
!= 0);
23132 if (un
->un_f_write_cache_enabled
&& wce
== 0) {
23134 * Disable the write cache. Don't clear
23135 * un_f_write_cache_enabled until after
23136 * the mode select and flush are complete.
23138 sync_supported
= un
->un_f_sync_cache_supported
;
23141 * If cache flush is suppressed, we assume that the
23142 * controller firmware will take care of managing the
23143 * write cache for us: no need to explicitly
23146 if (!un
->un_f_suppress_cache_flush
) {
23147 mutex_exit(SD_MUTEX(un
));
23148 if ((err
= sd_cache_control(ssc
,
23150 SD_CACHE_DISABLE
)) == 0 &&
23152 err
= sd_send_scsi_SYNCHRONIZE_CACHE(un
,
23156 mutex_exit(SD_MUTEX(un
));
23159 mutex_enter(SD_MUTEX(un
));
23161 un
->un_f_write_cache_enabled
= 0;
23164 } else if (!un
->un_f_write_cache_enabled
&& wce
!= 0) {
23166 * Set un_f_write_cache_enabled first, so there is
23167 * no window where the cache is enabled, but the
23168 * bit says it isn't.
23170 un
->un_f_write_cache_enabled
= 1;
23173 * If cache flush is suppressed, we assume that the
23174 * controller firmware will take care of managing the
23175 * write cache for us: no need to explicitly
23178 if (!un
->un_f_suppress_cache_flush
) {
23179 mutex_exit(SD_MUTEX(un
));
23180 err
= sd_cache_control(ssc
, SD_CACHE_NOCHANGE
,
23183 mutex_exit(SD_MUTEX(un
));
23186 mutex_enter(SD_MUTEX(un
));
23189 un
->un_f_write_cache_enabled
= 0;
23193 un
->un_f_wcc_inprog
= 0;
23194 cv_broadcast(&un
->un_wcc_cv
);
23195 mutex_exit(SD_MUTEX(un
));
23203 mutex_enter(SD_MUTEX(un
));
23204 un
->un_ncmds_in_driver
--;
23205 ASSERT(un
->un_ncmds_in_driver
>= 0);
23206 mutex_exit(SD_MUTEX(un
));
23209 done_without_assess
:
23212 SD_TRACE(SD_LOG_IOCTL
, un
, "sdioctl: exit: %d\n", err
);
23216 mutex_enter(SD_MUTEX(un
));
23217 un
->un_ncmds_in_driver
--;
23218 ASSERT(un
->un_ncmds_in_driver
>= 0);
23219 mutex_exit(SD_MUTEX(un
));
23223 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
23224 /* Uninitialize sd_ssc_t pointer */
23227 SD_TRACE(SD_LOG_IOCTL
, un
, "sdioctl: exit: %d\n", err
);
23233 * Function: sd_dkio_ctrl_info
23235 * Description: This routine is the driver entry point for handling controller
23236 * information ioctl requests (DKIOCINFO).
23238 * Arguments: dev - the device number
23239 * arg - pointer to user provided dk_cinfo structure
23240 * specifying the controller type and attributes.
23241 * flag - this argument is a pass through to ddi_copyxxx()
23242 * directly from the mode argument of ioctl().
23250 sd_dkio_ctrl_info(dev_t dev
, caddr_t arg
, int flag
)
23252 struct sd_lun
*un
= NULL
;
23253 struct dk_cinfo
*info
;
23257 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
23261 info
= (struct dk_cinfo
*)
23262 kmem_zalloc(sizeof (struct dk_cinfo
), KM_SLEEP
);
23264 switch (un
->un_ctype
) {
23266 info
->dki_ctype
= DKC_CDROM
;
23269 info
->dki_ctype
= DKC_SCSI_CCS
;
23272 pdip
= ddi_get_parent(SD_DEVINFO(un
));
23273 info
->dki_cnum
= ddi_get_instance(pdip
);
23274 if (strlen(ddi_get_name(pdip
)) < DK_DEVLEN
) {
23275 (void) strcpy(info
->dki_cname
, ddi_get_name(pdip
));
23277 (void) strncpy(info
->dki_cname
, ddi_node_name(pdip
),
23281 lun
= ddi_prop_get_int(DDI_DEV_T_ANY
, SD_DEVINFO(un
),
23282 DDI_PROP_DONTPASS
, SCSI_ADDR_PROP_LUN
, 0);
23283 tgt
= ddi_prop_get_int(DDI_DEV_T_ANY
, SD_DEVINFO(un
),
23284 DDI_PROP_DONTPASS
, SCSI_ADDR_PROP_TARGET
, 0);
23286 /* Unit Information */
23287 info
->dki_unit
= ddi_get_instance(SD_DEVINFO(un
));
23288 info
->dki_slave
= ((tgt
<< 3) | lun
);
23289 (void) strncpy(info
->dki_dname
, ddi_driver_name(SD_DEVINFO(un
)),
23291 info
->dki_flags
= DKI_FMTVOL
;
23292 info
->dki_partition
= SDPART(dev
);
23294 /* Max Transfer size of this device in blocks */
23295 info
->dki_maxtransfer
= un
->un_max_xfer_size
/ un
->un_sys_blocksize
;
23296 info
->dki_addr
= 0;
23297 info
->dki_space
= 0;
23298 info
->dki_prio
= 0;
23301 if (ddi_copyout(info
, arg
, sizeof (struct dk_cinfo
), flag
) != 0) {
23302 kmem_free(info
, sizeof (struct dk_cinfo
));
23305 kmem_free(info
, sizeof (struct dk_cinfo
));
23311 * Function: sd_get_media_info_com
23313 * Description: This routine returns the information required to populate
23314 * the fields for the dk_minfo/dk_minfo_ext structures.
23316 * Arguments: dev - the device number
23317 * dki_media_type - media_type
23318 * dki_lbsize - logical block size
23319 * dki_capacity - capacity in blocks
23320 * dki_pbsize - physical block size (if requested)
23329 sd_get_media_info_com(dev_t dev
, uint_t
*dki_media_type
, uint_t
*dki_lbsize
,
23330 diskaddr_t
*dki_capacity
, uint_t
*dki_pbsize
)
23332 struct sd_lun
*un
= NULL
;
23333 struct uscsi_cmd com
;
23334 struct scsi_inquiry
*sinq
;
23335 u_longlong_t media_capacity
;
23345 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
23346 (un
->un_state
== SD_STATE_OFFLINE
)) {
23350 SD_TRACE(SD_LOG_IOCTL_DKIO
, un
, "sd_get_media_info_com: entry\n");
23352 out_data
= kmem_zalloc(SD_PROFILE_HEADER_LEN
, KM_SLEEP
);
23353 rqbuf
= kmem_zalloc(SENSE_LENGTH
, KM_SLEEP
);
23354 ssc
= sd_ssc_init(un
);
23356 /* Issue a TUR to determine if the drive is ready with media present */
23357 rval
= sd_send_scsi_TEST_UNIT_READY(ssc
, SD_CHECK_FOR_MEDIA
);
23358 if (rval
== ENXIO
) {
23360 } else if (rval
!= 0) {
23361 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
23364 /* Now get configuration data */
23366 *dki_media_type
= DK_CDROM
;
23368 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */
23369 if (un
->un_f_mmc_cap
== TRUE
) {
23370 rtn
= sd_send_scsi_GET_CONFIGURATION(ssc
, &com
, rqbuf
,
23371 SENSE_LENGTH
, out_data
, SD_PROFILE_HEADER_LEN
,
23376 * We ignore all failures for CD and need to
23377 * put the assessment before processing code
23378 * to avoid missing assessment for FMA.
23380 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
23382 * Failed for other than an illegal request
23383 * or command not supported
23385 if ((com
.uscsi_status
== STATUS_CHECK
) &&
23386 (com
.uscsi_rqstatus
== STATUS_GOOD
)) {
23387 if ((rqbuf
[2] != KEY_ILLEGAL_REQUEST
) ||
23388 (rqbuf
[12] != 0x20)) {
23390 goto no_assessment
;
23395 * The GET CONFIGURATION command succeeded
23396 * so set the media type according to the
23399 *dki_media_type
= out_data
[6];
23400 *dki_media_type
<<= 8;
23401 *dki_media_type
|= out_data
[7];
23406 * The profile list is not available, so we attempt to identify
23407 * the media type based on the inquiry data
23409 sinq
= un
->un_sd
->sd_inq
;
23410 if ((sinq
->inq_dtype
== DTYPE_DIRECT
) ||
23411 (sinq
->inq_dtype
== DTYPE_OPTICAL
)) {
23412 /* This is a direct access device or optical disk */
23413 *dki_media_type
= DK_FIXED_DISK
;
23415 if ((bcmp(sinq
->inq_vid
, "IOMEGA", 6) == 0) ||
23416 (bcmp(sinq
->inq_vid
, "iomega", 6) == 0)) {
23417 if ((bcmp(sinq
->inq_pid
, "ZIP", 3) == 0)) {
23418 *dki_media_type
= DK_ZIP
;
23420 (bcmp(sinq
->inq_pid
, "jaz", 3) == 0)) {
23421 *dki_media_type
= DK_JAZ
;
23426 * Not a CD, direct access or optical disk so return
23429 *dki_media_type
= DK_UNKNOWN
;
23434 * Now read the capacity so we can provide the lbasize,
23435 * pbsize and capacity.
23437 if (dki_pbsize
&& un
->un_f_descr_format_supported
)
23438 rval
= sd_send_scsi_READ_CAPACITY_16(ssc
, &capacity
, &lbasize
,
23439 &pbsize
, SD_PATH_DIRECT
);
23441 if (dki_pbsize
== NULL
|| rval
!= 0 ||
23442 !un
->un_f_descr_format_supported
) {
23443 rval
= sd_send_scsi_READ_CAPACITY(ssc
, &capacity
, &lbasize
,
23448 if (un
->un_f_enable_rmw
&&
23449 un
->un_phy_blocksize
!= 0) {
23450 pbsize
= un
->un_phy_blocksize
;
23454 media_capacity
= capacity
;
23457 * sd_send_scsi_READ_CAPACITY() reports capacity in
23458 * un->un_sys_blocksize chunks. So we need to convert
23459 * it into cap.lbsize chunks.
23461 if (un
->un_f_has_removable_media
) {
23462 media_capacity
*= un
->un_sys_blocksize
;
23463 media_capacity
/= lbasize
;
23474 if (un
->un_f_enable_rmw
&&
23475 !ISP2(pbsize
% DEV_BSIZE
)) {
23476 pbsize
= SSD_SECSIZE
;
23477 } else if (!ISP2(lbasize
% DEV_BSIZE
) ||
23478 !ISP2(pbsize
% DEV_BSIZE
)) {
23479 pbsize
= lbasize
= DEV_BSIZE
;
23481 media_capacity
= capacity
;
23485 * If lun is expanded dynamically, update the un structure.
23487 mutex_enter(SD_MUTEX(un
));
23488 if ((un
->un_f_blockcount_is_valid
== TRUE
) &&
23489 (un
->un_f_tgt_blocksize_is_valid
== TRUE
) &&
23490 (capacity
> un
->un_blockcount
)) {
23491 un
->un_f_expnevent
= B_FALSE
;
23492 sd_update_block_info(un
, lbasize
, capacity
);
23494 mutex_exit(SD_MUTEX(un
));
23496 *dki_lbsize
= lbasize
;
23497 *dki_capacity
= media_capacity
;
23499 *dki_pbsize
= pbsize
;
23504 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
23506 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
23510 kmem_free(out_data
, SD_PROFILE_HEADER_LEN
);
23511 kmem_free(rqbuf
, SENSE_LENGTH
);
23516 * Function: sd_get_media_info
23518 * Description: This routine is the driver entry point for handling ioctl
23519 * requests for the media type or command set profile used by the
23520 * drive to operate on the media (DKIOCGMEDIAINFO).
23522 * Arguments: dev - the device number
23523 * arg - pointer to user provided dk_minfo structure
23524 * specifying the media type, logical block size and
23526 * flag - this argument is a pass through to ddi_copyxxx()
23527 * directly from the mode argument of ioctl().
23529 * Return Code: returns the value from sd_get_media_info_com
23532 sd_get_media_info(dev_t dev
, caddr_t arg
, int flag
)
23534 struct dk_minfo mi
;
23537 rval
= sd_get_media_info_com(dev
, &mi
.dki_media_type
,
23538 &mi
.dki_lbsize
, &mi
.dki_capacity
, NULL
);
23542 if (ddi_copyout(&mi
, arg
, sizeof (struct dk_minfo
), flag
))
23548 * Function: sd_get_media_info_ext
23550 * Description: This routine is the driver entry point for handling ioctl
23551 * requests for the media type or command set profile used by the
23552 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The
23553 * difference this ioctl and DKIOCGMEDIAINFO is the return value
23554 * of this ioctl contains both logical block size and physical
23558 * Arguments: dev - the device number
23559 * arg - pointer to user provided dk_minfo_ext structure
23560 * specifying the media type, logical block size,
23561 * physical block size and disk capacity.
23562 * flag - this argument is a pass through to ddi_copyxxx()
23563 * directly from the mode argument of ioctl().
23565 * Return Code: returns the value from sd_get_media_info_com
23568 sd_get_media_info_ext(dev_t dev
, caddr_t arg
, int flag
)
23570 struct dk_minfo_ext mie
;
23573 rval
= sd_get_media_info_com(dev
, &mie
.dki_media_type
,
23574 &mie
.dki_lbsize
, &mie
.dki_capacity
, &mie
.dki_pbsize
);
23578 if (ddi_copyout(&mie
, arg
, sizeof (struct dk_minfo_ext
), flag
))
23585 * Function: sd_watch_request_submit
23587 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit
23588 * depending on which is supported by device.
23591 sd_watch_request_submit(struct sd_lun
*un
)
23595 /* All submissions are unified to use same device number */
23596 dev
= sd_make_device(SD_DEVINFO(un
));
23598 if (un
->un_f_mmc_cap
&& un
->un_f_mmc_gesn_polling
) {
23599 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un
),
23600 sd_check_media_time
, SENSE_LENGTH
, sd_media_watch_cb
,
23603 return (scsi_watch_request_submit(SD_SCSI_DEVP(un
),
23604 sd_check_media_time
, SENSE_LENGTH
, sd_media_watch_cb
,
23611 * Function: sd_check_media
23613 * Description: This utility routine implements the functionality for the
23614 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the
23615 * driver state changes from that specified by the user
23616 * (inserted or ejected). For example, if the user specifies
23617 * DKIO_EJECTED and the current media state is inserted this
23618 * routine will immediately return DKIO_INSERTED. However, if the
23619 * current media state is not inserted the user thread will be
23620 * blocked until the drive state changes. If DKIO_NONE is specified
23621 * the user thread will block until a drive state change occurs.
23623 * Arguments: dev - the device number
23624 * state - user pointer to a dkio_state, updated with the current
23625 * drive state at return.
23627 * Return Code: ENXIO
23634 sd_check_media(dev_t dev
, enum dkio_state state
)
23636 struct sd_lun
*un
= NULL
;
23637 enum dkio_state prev_state
;
23638 opaque_t token
= NULL
;
23642 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
23646 SD_TRACE(SD_LOG_COMMON
, un
, "sd_check_media: entry\n");
23648 ssc
= sd_ssc_init(un
);
23650 mutex_enter(SD_MUTEX(un
));
23652 SD_TRACE(SD_LOG_COMMON
, un
, "sd_check_media: "
23653 "state=%x, mediastate=%x\n", state
, un
->un_mediastate
);
23655 prev_state
= un
->un_mediastate
;
23657 /* is there anything to do? */
23658 if (state
== un
->un_mediastate
|| un
->un_mediastate
== DKIO_NONE
) {
23660 * submit the request to the scsi_watch service;
23661 * scsi_media_watch_cb() does the real work
23663 mutex_exit(SD_MUTEX(un
));
23666 * This change handles the case where a scsi watch request is
23667 * added to a device that is powered down. To accomplish this
23668 * we power up the device before adding the scsi watch request,
23669 * since the scsi watch sends a TUR directly to the device
23670 * which the device cannot handle if it is powered down.
23672 if (sd_pm_entry(un
) != DDI_SUCCESS
) {
23673 mutex_enter(SD_MUTEX(un
));
23677 token
= sd_watch_request_submit(un
);
23681 mutex_enter(SD_MUTEX(un
));
23682 if (token
== NULL
) {
23688 * This is a special case IOCTL that doesn't return
23689 * until the media state changes. Routine sdpower
23690 * knows about and handles this so don't count it
23691 * as an active cmd in the driver, which would
23692 * keep the device busy to the pm framework.
23693 * If the count isn't decremented the device can't
23696 un
->un_ncmds_in_driver
--;
23697 ASSERT(un
->un_ncmds_in_driver
>= 0);
23700 * if a prior request had been made, this will be the same
23701 * token, as scsi_watch was designed that way.
23703 un
->un_swr_token
= token
;
23704 un
->un_specified_mediastate
= state
;
23707 * now wait for media change
23708 * we will not be signalled unless mediastate == state but it is
23709 * still better to test for this condition, since there is a
23710 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED
23712 SD_TRACE(SD_LOG_COMMON
, un
,
23713 "sd_check_media: waiting for media state change\n");
23714 while (un
->un_mediastate
== state
) {
23715 if (cv_wait_sig(&un
->un_state_cv
, SD_MUTEX(un
)) == 0) {
23716 SD_TRACE(SD_LOG_COMMON
, un
,
23717 "sd_check_media: waiting for media state "
23718 "was interrupted\n");
23719 un
->un_ncmds_in_driver
++;
23723 SD_TRACE(SD_LOG_COMMON
, un
,
23724 "sd_check_media: received signal, state=%x\n",
23725 un
->un_mediastate
);
23728 * Inc the counter to indicate the device once again
23729 * has an active outstanding cmd.
23731 un
->un_ncmds_in_driver
++;
23734 /* invalidate geometry */
23735 if (prev_state
== DKIO_INSERTED
&& un
->un_mediastate
== DKIO_EJECTED
) {
23739 if (un
->un_mediastate
== DKIO_INSERTED
&& prev_state
!= DKIO_INSERTED
) {
23743 SD_TRACE(SD_LOG_COMMON
, un
, "sd_check_media: media inserted\n");
23744 mutex_exit(SD_MUTEX(un
));
23746 * Since the following routines use SD_PATH_DIRECT, we must
23747 * call PM directly before the upcoming disk accesses. This
23748 * may cause the disk to be power/spin up.
23751 if (sd_pm_entry(un
) == DDI_SUCCESS
) {
23752 rval
= sd_send_scsi_READ_CAPACITY(ssc
,
23753 &capacity
, &lbasize
, SD_PATH_DIRECT
);
23757 sd_ssc_assessment(ssc
,
23758 SD_FMT_STATUS_CHECK
);
23760 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
23761 mutex_enter(SD_MUTEX(un
));
23766 mutex_enter(SD_MUTEX(un
));
23769 mutex_enter(SD_MUTEX(un
));
23771 sd_update_block_info(un
, lbasize
, capacity
);
23774 * Check if the media in the device is writable or not
23777 sd_check_for_writable_cd(ssc
, SD_PATH_DIRECT
);
23780 mutex_exit(SD_MUTEX(un
));
23781 cmlb_invalidate(un
->un_cmlbhandle
, (void *)SD_PATH_DIRECT
);
23782 if ((cmlb_validate(un
->un_cmlbhandle
, 0,
23783 (void *)SD_PATH_DIRECT
) == 0) && un
->un_f_pkstats_enabled
) {
23785 SD_TRACE(SD_LOG_IO_PARTITION
, un
,
23786 "sd_check_media: un:0x%p pstats created and "
23790 rval
= sd_send_scsi_DOORLOCK(ssc
, SD_REMOVAL_PREVENT
,
23797 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
23799 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
23802 mutex_enter(SD_MUTEX(un
));
23806 un
->un_f_watcht_stopped
= FALSE
;
23807 if (token
!= NULL
&& un
->un_swr_token
!= NULL
) {
23809 * Use of this local token and the mutex ensures that we avoid
23810 * some race conditions associated with terminating the
23813 token
= un
->un_swr_token
;
23814 mutex_exit(SD_MUTEX(un
));
23815 (void) scsi_watch_request_terminate(token
,
23816 SCSI_WATCH_TERMINATE_WAIT
);
23817 if (scsi_watch_get_ref_count(token
) == 0) {
23818 mutex_enter(SD_MUTEX(un
));
23819 un
->un_swr_token
= (opaque_t
)NULL
;
23821 mutex_enter(SD_MUTEX(un
));
23826 * Update the capacity kstat value, if no media previously
23827 * (capacity kstat is 0) and a media has been inserted
23828 * (un_f_blockcount_is_valid == TRUE)
23830 if (un
->un_errstats
) {
23831 struct sd_errstats
*stp
= NULL
;
23833 stp
= (struct sd_errstats
*)un
->un_errstats
->ks_data
;
23834 if ((stp
->sd_capacity
.value
.ui64
== 0) &&
23835 (un
->un_f_blockcount_is_valid
== TRUE
)) {
23836 stp
->sd_capacity
.value
.ui64
=
23837 (uint64_t)((uint64_t)un
->un_blockcount
*
23838 un
->un_sys_blocksize
);
23841 mutex_exit(SD_MUTEX(un
));
23842 SD_TRACE(SD_LOG_COMMON
, un
, "sd_check_media: done\n");
23848 * Function: sd_delayed_cv_broadcast
23850 * Description: Delayed cv_broadcast to allow for target to recover from media
23853 * Arguments: arg - driver soft state (unit) structure
23857 sd_delayed_cv_broadcast(void *arg
)
23859 struct sd_lun
*un
= arg
;
23861 SD_TRACE(SD_LOG_COMMON
, un
, "sd_delayed_cv_broadcast\n");
23863 mutex_enter(SD_MUTEX(un
));
23864 un
->un_dcvb_timeid
= NULL
;
23865 cv_broadcast(&un
->un_state_cv
);
23866 mutex_exit(SD_MUTEX(un
));
23871 * Function: sd_media_watch_cb
23873 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This
23874 * routine processes the TUR sense data and updates the driver
23875 * state if a transition has occurred. The user thread
23876 * (sd_check_media) is then signalled.
23878 * Arguments: arg - the device 'dev_t' is used for context to discriminate
23879 * among multiple watches that share this callback function
23880 * resultp - scsi watch facility result packet containing scsi
23881 * packet, status byte and sense data
23883 * Return Code: 0 for success, -1 for failure
23887 sd_media_watch_cb(caddr_t arg
, struct scsi_watch_result
*resultp
)
23890 struct scsi_status
*statusp
= resultp
->statusp
;
23891 uint8_t *sensep
= (uint8_t *)resultp
->sensep
;
23892 enum dkio_state state
= DKIO_NONE
;
23893 dev_t dev
= (dev_t
)arg
;
23894 uchar_t actual_sense_length
;
23895 uint8_t skey
, asc
, ascq
;
23897 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
23900 actual_sense_length
= resultp
->actual_sense_length
;
23902 mutex_enter(SD_MUTEX(un
));
23903 SD_TRACE(SD_LOG_COMMON
, un
,
23904 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
23905 *((char *)statusp
), (void *)sensep
, actual_sense_length
);
23907 if (resultp
->pkt
->pkt_reason
== CMD_DEV_GONE
) {
23908 un
->un_mediastate
= DKIO_DEV_GONE
;
23909 cv_broadcast(&un
->un_state_cv
);
23910 mutex_exit(SD_MUTEX(un
));
23915 if (un
->un_f_mmc_cap
&& un
->un_f_mmc_gesn_polling
) {
23916 if (sd_gesn_media_data_valid(resultp
->mmc_data
)) {
23917 if ((resultp
->mmc_data
[5] &
23918 SD_GESN_MEDIA_EVENT_STATUS_PRESENT
) != 0) {
23919 state
= DKIO_INSERTED
;
23921 state
= DKIO_EJECTED
;
23923 if ((resultp
->mmc_data
[4] & SD_GESN_MEDIA_EVENT_CODE
) ==
23924 SD_GESN_MEDIA_EVENT_EJECTREQUEST
) {
23925 sd_log_eject_request_event(un
, KM_NOSLEEP
);
23928 } else if (sensep
!= NULL
) {
23930 * If there was a check condition then sensep points to valid
23931 * sense data. If status was not a check condition but a
23932 * reservation or busy status then the new state is DKIO_NONE.
23934 skey
= scsi_sense_key(sensep
);
23935 asc
= scsi_sense_asc(sensep
);
23936 ascq
= scsi_sense_ascq(sensep
);
23938 SD_INFO(SD_LOG_COMMON
, un
,
23939 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
23941 /* This routine only uses up to 13 bytes of sense data. */
23942 if (actual_sense_length
>= 13) {
23943 if (skey
== KEY_UNIT_ATTENTION
) {
23945 state
= DKIO_INSERTED
;
23947 } else if (skey
== KEY_NOT_READY
) {
23949 * Sense data of 02/06/00 means that the
23950 * drive could not read the media (No
23951 * reference position found). In this case
23952 * to prevent a hang on the DKIOCSTATE IOCTL
23953 * we set the media state to DKIO_INSERTED.
23955 if (asc
== 0x06 && ascq
== 0x00)
23956 state
= DKIO_INSERTED
;
23959 * if 02/04/02 means that the host
23960 * should send start command. Explicitly
23961 * leave the media state as is
23962 * (inserted) as the media is inserted
23963 * and host has stopped device for PM
23964 * reasons. Upon next true read/write
23965 * to this media will bring the
23966 * device to the right state good for
23970 state
= DKIO_EJECTED
;
23973 * If the drive is busy with an
23974 * operation or long write, keep the
23975 * media in an inserted state.
23978 if ((asc
== 0x04) &&
23982 state
= DKIO_INSERTED
;
23985 } else if (skey
== KEY_NO_SENSE
) {
23986 if ((asc
== 0x00) && (ascq
== 0x00)) {
23988 * Sense Data 00/00/00 does not provide
23989 * any information about the state of
23990 * the media. Ignore it.
23992 mutex_exit(SD_MUTEX(un
));
23997 } else if ((*((char *)statusp
) == STATUS_GOOD
) &&
23998 (resultp
->pkt
->pkt_reason
== CMD_CMPLT
)) {
23999 state
= DKIO_INSERTED
;
24002 SD_TRACE(SD_LOG_COMMON
, un
,
24003 "sd_media_watch_cb: state=%x, specified=%x\n",
24004 state
, un
->un_specified_mediastate
);
24007 * now signal the waiting thread if this is *not* the specified state;
24008 * delay the signal if the state is DKIO_INSERTED to allow the target
24011 if (state
!= un
->un_specified_mediastate
) {
24012 un
->un_mediastate
= state
;
24013 if (state
== DKIO_INSERTED
) {
24015 * delay the signal to give the drive a chance
24016 * to do what it apparently needs to do
24018 SD_TRACE(SD_LOG_COMMON
, un
,
24019 "sd_media_watch_cb: delayed cv_broadcast\n");
24020 if (un
->un_dcvb_timeid
== NULL
) {
24021 un
->un_dcvb_timeid
=
24022 timeout(sd_delayed_cv_broadcast
, un
,
24023 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY
));
24026 SD_TRACE(SD_LOG_COMMON
, un
,
24027 "sd_media_watch_cb: immediate cv_broadcast\n");
24028 cv_broadcast(&un
->un_state_cv
);
24031 mutex_exit(SD_MUTEX(un
));
24037 * Function: sd_dkio_get_temp
24039 * Description: This routine is the driver entry point for handling ioctl
24040 * requests to get the disk temperature.
24042 * Arguments: dev - the device number
24043 * arg - pointer to user provided dk_temperature structure.
24044 * flag - this argument is a pass through to ddi_copyxxx()
24045 * directly from the mode argument of ioctl().
24054 sd_dkio_get_temp(dev_t dev
, caddr_t arg
, int flag
)
24056 struct sd_lun
*un
= NULL
;
24057 struct dk_temperature
*dktemp
= NULL
;
24058 uchar_t
*temperature_page
;
24060 int path_flag
= SD_PATH_STANDARD
;
24063 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24067 ssc
= sd_ssc_init(un
);
24068 dktemp
= kmem_zalloc(sizeof (struct dk_temperature
), KM_SLEEP
);
24070 /* copyin the disk temp argument to get the user flags */
24071 if (ddi_copyin((void *)arg
, dktemp
,
24072 sizeof (struct dk_temperature
), flag
) != 0) {
24077 /* Initialize the temperature to invalid. */
24078 dktemp
->dkt_cur_temp
= (short)DKT_INVALID_TEMP
;
24079 dktemp
->dkt_ref_temp
= (short)DKT_INVALID_TEMP
;
24082 * Note: Investigate removing the "bypass pm" semantic.
24083 * Can we just bypass PM always?
24085 if (dktemp
->dkt_flags
& DKT_BYPASS_PM
) {
24086 path_flag
= SD_PATH_DIRECT
;
24087 ASSERT(!mutex_owned(&un
->un_pm_mutex
));
24088 mutex_enter(&un
->un_pm_mutex
);
24089 if (SD_DEVICE_IS_IN_LOW_POWER(un
)) {
24091 * If DKT_BYPASS_PM is set, and the drive happens to be
24092 * in low power mode, we can not wake it up, Need to
24095 mutex_exit(&un
->un_pm_mutex
);
24100 * Indicate to PM the device is busy. This is required
24101 * to avoid a race - i.e. the ioctl is issuing a
24102 * command and the pm framework brings down the device
24103 * to low power mode (possible power cut-off on some
24106 mutex_exit(&un
->un_pm_mutex
);
24107 if (sd_pm_entry(un
) != DDI_SUCCESS
) {
24114 temperature_page
= kmem_zalloc(TEMPERATURE_PAGE_SIZE
, KM_SLEEP
);
24116 rval
= sd_send_scsi_LOG_SENSE(ssc
, temperature_page
,
24117 TEMPERATURE_PAGE_SIZE
, TEMPERATURE_PAGE
, 1, 0, path_flag
);
24122 * For the current temperature verify that the parameter length is 0x02
24123 * and the parameter code is 0x00
24125 if ((temperature_page
[7] == 0x02) && (temperature_page
[4] == 0x00) &&
24126 (temperature_page
[5] == 0x00)) {
24127 if (temperature_page
[9] == 0xFF) {
24128 dktemp
->dkt_cur_temp
= (short)DKT_INVALID_TEMP
;
24130 dktemp
->dkt_cur_temp
= (short)(temperature_page
[9]);
24135 * For the reference temperature verify that the parameter
24136 * length is 0x02 and the parameter code is 0x01
24138 if ((temperature_page
[13] == 0x02) && (temperature_page
[10] == 0x00) &&
24139 (temperature_page
[11] == 0x01)) {
24140 if (temperature_page
[15] == 0xFF) {
24141 dktemp
->dkt_ref_temp
= (short)DKT_INVALID_TEMP
;
24143 dktemp
->dkt_ref_temp
= (short)(temperature_page
[15]);
24147 /* Do the copyout regardless of the temperature commands status. */
24148 if (ddi_copyout(dktemp
, (void *)arg
, sizeof (struct dk_temperature
),
24157 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
24159 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
24162 if (path_flag
== SD_PATH_DIRECT
) {
24166 kmem_free(temperature_page
, TEMPERATURE_PAGE_SIZE
);
24169 if (dktemp
!= NULL
) {
24170 kmem_free(dktemp
, sizeof (struct dk_temperature
));
24178 * Function: sd_log_page_supported
24180 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of
24181 * supported log pages.
24183 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
24184 * structure for this target.
24187 * Return Code: -1 - on error (log sense is optional and may not be supported).
24188 * 0 - log page not found.
24189 * 1 - log page found.
24193 sd_log_page_supported(sd_ssc_t
*ssc
, int log_page
)
24195 uchar_t
*log_page_data
;
24202 ASSERT(ssc
!= NULL
);
24204 ASSERT(un
!= NULL
);
24206 log_page_data
= kmem_zalloc(0xFF, KM_SLEEP
);
24208 status
= sd_send_scsi_LOG_SENSE(ssc
, log_page_data
, 0xFF, 0, 0x01, 0,
24212 if (status
== EIO
) {
24214 * Some disks do not support log sense, we
24215 * should ignore this kind of error(sense key is
24216 * 0x5 - illegal request).
24221 sensep
= (uint8_t *)ssc
->ssc_uscsi_cmd
->uscsi_rqbuf
;
24222 senlen
= (int)(ssc
->ssc_uscsi_cmd
->uscsi_rqlen
-
24223 ssc
->ssc_uscsi_cmd
->uscsi_rqresid
);
24226 scsi_sense_key(sensep
) == KEY_ILLEGAL_REQUEST
) {
24227 sd_ssc_assessment(ssc
,
24228 SD_FMT_IGNORE_COMPROMISE
);
24230 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
24233 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
24236 SD_ERROR(SD_LOG_COMMON
, un
,
24237 "sd_log_page_supported: failed log page retrieval\n");
24238 kmem_free(log_page_data
, 0xFF);
24242 log_size
= log_page_data
[3];
24245 * The list of supported log pages start from the fourth byte. Check
24246 * until we run out of log pages or a match is found.
24248 for (i
= 4; (i
< (log_size
+ 4)) && !match
; i
++) {
24249 if (log_page_data
[i
] == log_page
) {
24253 kmem_free(log_page_data
, 0xFF);
24259 * Function: sd_mhdioc_failfast
24261 * Description: This routine is the driver entry point for handling ioctl
24262 * requests to enable/disable the multihost failfast option.
24263 * (MHIOCENFAILFAST)
24265 * Arguments: dev - the device number
24266 * arg - user specified probing interval.
24267 * flag - this argument is a pass through to ddi_copyxxx()
24268 * directly from the mode argument of ioctl().
24276 sd_mhdioc_failfast(dev_t dev
, caddr_t arg
, int flag
)
24278 struct sd_lun
*un
= NULL
;
24282 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24286 if (ddi_copyin((void *)arg
, &mh_time
, sizeof (int), flag
))
24290 mutex_enter(SD_MUTEX(un
));
24291 un
->un_resvd_status
|= SD_FAILFAST
;
24292 mutex_exit(SD_MUTEX(un
));
24294 * If mh_time is INT_MAX, then this ioctl is being used for
24295 * SCSI-3 PGR purposes, and we don't need to spawn watch thread.
24297 if (mh_time
!= INT_MAX
) {
24298 rval
= sd_check_mhd(dev
, mh_time
);
24301 (void) sd_check_mhd(dev
, 0);
24302 mutex_enter(SD_MUTEX(un
));
24303 un
->un_resvd_status
&= ~SD_FAILFAST
;
24304 mutex_exit(SD_MUTEX(un
));
24311 * Function: sd_mhdioc_takeown
24313 * Description: This routine is the driver entry point for handling ioctl
24314 * requests to forcefully acquire exclusive access rights to the
24315 * multihost disk (MHIOCTKOWN).
24317 * Arguments: dev - the device number
24318 * arg - user provided structure specifying the delay
24319 * parameters in milliseconds
24320 * flag - this argument is a pass through to ddi_copyxxx()
24321 * directly from the mode argument of ioctl().
24329 sd_mhdioc_takeown(dev_t dev
, caddr_t arg
, int flag
)
24331 struct sd_lun
*un
= NULL
;
24332 struct mhioctkown
*tkown
= NULL
;
24335 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24340 tkown
= (struct mhioctkown
*)
24341 kmem_zalloc(sizeof (struct mhioctkown
), KM_SLEEP
);
24342 rval
= ddi_copyin(arg
, tkown
, sizeof (struct mhioctkown
), flag
);
24349 rval
= sd_take_ownership(dev
, tkown
);
24350 mutex_enter(SD_MUTEX(un
));
24352 un
->un_resvd_status
|= SD_RESERVE
;
24353 if (tkown
!= NULL
&& tkown
->reinstate_resv_delay
!= 0) {
24354 sd_reinstate_resv_delay
=
24355 tkown
->reinstate_resv_delay
* 1000;
24357 sd_reinstate_resv_delay
= SD_REINSTATE_RESV_DELAY
;
24360 * Give the scsi_watch routine interval set by
24361 * the MHIOCENFAILFAST ioctl precedence here.
24363 if ((un
->un_resvd_status
& SD_FAILFAST
) == 0) {
24364 mutex_exit(SD_MUTEX(un
));
24365 (void) sd_check_mhd(dev
, sd_reinstate_resv_delay
/1000);
24366 SD_TRACE(SD_LOG_IOCTL_MHD
, un
,
24367 "sd_mhdioc_takeown : %d\n",
24368 sd_reinstate_resv_delay
);
24370 mutex_exit(SD_MUTEX(un
));
24372 (void) scsi_reset_notify(SD_ADDRESS(un
), SCSI_RESET_NOTIFY
,
24373 sd_mhd_reset_notify_cb
, (caddr_t
)un
);
24375 un
->un_resvd_status
&= ~SD_RESERVE
;
24376 mutex_exit(SD_MUTEX(un
));
24380 if (tkown
!= NULL
) {
24381 kmem_free(tkown
, sizeof (struct mhioctkown
));
24388 * Function: sd_mhdioc_release
24390 * Description: This routine is the driver entry point for handling ioctl
24391 * requests to release exclusive access rights to the multihost
24392 * disk (MHIOCRELEASE).
24394 * Arguments: dev - the device number
24401 sd_mhdioc_release(dev_t dev
)
24403 struct sd_lun
*un
= NULL
;
24404 timeout_id_t resvd_timeid_save
;
24405 int resvd_status_save
;
24408 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24412 mutex_enter(SD_MUTEX(un
));
24413 resvd_status_save
= un
->un_resvd_status
;
24414 un
->un_resvd_status
&=
24415 ~(SD_RESERVE
| SD_LOST_RESERVE
| SD_WANT_RESERVE
);
24416 if (un
->un_resvd_timeid
) {
24417 resvd_timeid_save
= un
->un_resvd_timeid
;
24418 un
->un_resvd_timeid
= NULL
;
24419 mutex_exit(SD_MUTEX(un
));
24420 (void) untimeout(resvd_timeid_save
);
24422 mutex_exit(SD_MUTEX(un
));
24426 * destroy any pending timeout thread that may be attempting to
24427 * reinstate reservation on this device.
24429 sd_rmv_resv_reclaim_req(dev
);
24431 if ((rval
= sd_reserve_release(dev
, SD_RELEASE
)) == 0) {
24432 mutex_enter(SD_MUTEX(un
));
24433 if ((un
->un_mhd_token
) &&
24434 ((un
->un_resvd_status
& SD_FAILFAST
) == 0)) {
24435 mutex_exit(SD_MUTEX(un
));
24436 (void) sd_check_mhd(dev
, 0);
24438 mutex_exit(SD_MUTEX(un
));
24440 (void) scsi_reset_notify(SD_ADDRESS(un
), SCSI_RESET_CANCEL
,
24441 sd_mhd_reset_notify_cb
, (caddr_t
)un
);
24444 * sd_mhd_watch_cb will restart the resvd recover timeout thread
24446 mutex_enter(SD_MUTEX(un
));
24447 un
->un_resvd_status
= resvd_status_save
;
24448 mutex_exit(SD_MUTEX(un
));
24455 * Function: sd_mhdioc_register_devid
24457 * Description: This routine is the driver entry point for handling ioctl
24458 * requests to register the device id (MHIOCREREGISTERDEVID).
24460 * Note: The implementation for this ioctl has been updated to
24461 * be consistent with the original PSARC case (1999/357)
24462 * (4375899, 4241671, 4220005)
24464 * Arguments: dev - the device number
24471 sd_mhdioc_register_devid(dev_t dev
)
24473 struct sd_lun
*un
= NULL
;
24477 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24481 ASSERT(!mutex_owned(SD_MUTEX(un
)));
24483 mutex_enter(SD_MUTEX(un
));
24485 /* If a devid already exists, de-register it */
24486 if (un
->un_devid
!= NULL
) {
24487 ddi_devid_unregister(SD_DEVINFO(un
));
24489 * After unregister devid, needs to free devid memory
24491 ddi_devid_free(un
->un_devid
);
24492 un
->un_devid
= NULL
;
24495 /* Check for reservation conflict */
24496 mutex_exit(SD_MUTEX(un
));
24497 ssc
= sd_ssc_init(un
);
24498 rval
= sd_send_scsi_TEST_UNIT_READY(ssc
, 0);
24499 mutex_enter(SD_MUTEX(un
));
24503 sd_register_devid(ssc
, SD_DEVINFO(un
), SD_TARGET_IS_UNRESERVED
);
24511 mutex_exit(SD_MUTEX(un
));
24514 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
24516 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
24524 * Function: sd_mhdioc_inkeys
24526 * Description: This routine is the driver entry point for handling ioctl
24527 * requests to issue the SCSI-3 Persistent In Read Keys command
24528 * to the device (MHIOCGRP_INKEYS).
24530 * Arguments: dev - the device number
24531 * arg - user provided in_keys structure
24532 * flag - this argument is a pass through to ddi_copyxxx()
24533 * directly from the mode argument of ioctl().
24535 * Return Code: code returned by sd_persistent_reservation_in_read_keys()
24541 sd_mhdioc_inkeys(dev_t dev
, caddr_t arg
, int flag
)
24544 mhioc_inkeys_t inkeys
;
24547 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24551 #ifdef _MULTI_DATAMODEL
24552 switch (ddi_model_convert_from(flag
& FMODELS
)) {
24553 case DDI_MODEL_ILP32
: {
24554 struct mhioc_inkeys32 inkeys32
;
24556 if (ddi_copyin(arg
, &inkeys32
,
24557 sizeof (struct mhioc_inkeys32
), flag
) != 0) {
24560 inkeys
.li
= (mhioc_key_list_t
*)(uintptr_t)inkeys32
.li
;
24561 if ((rval
= sd_persistent_reservation_in_read_keys(un
,
24562 &inkeys
, flag
)) != 0) {
24565 inkeys32
.generation
= inkeys
.generation
;
24566 if (ddi_copyout(&inkeys32
, arg
, sizeof (struct mhioc_inkeys32
),
24572 case DDI_MODEL_NONE
:
24573 if (ddi_copyin(arg
, &inkeys
, sizeof (mhioc_inkeys_t
),
24577 if ((rval
= sd_persistent_reservation_in_read_keys(un
,
24578 &inkeys
, flag
)) != 0) {
24581 if (ddi_copyout(&inkeys
, arg
, sizeof (mhioc_inkeys_t
),
24588 #else /* ! _MULTI_DATAMODEL */
24590 if (ddi_copyin(arg
, &inkeys
, sizeof (mhioc_inkeys_t
), flag
) != 0) {
24593 rval
= sd_persistent_reservation_in_read_keys(un
, &inkeys
, flag
);
24597 if (ddi_copyout(&inkeys
, arg
, sizeof (mhioc_inkeys_t
), flag
) != 0) {
24601 #endif /* _MULTI_DATAMODEL */
24608 * Function: sd_mhdioc_inresv
24610 * Description: This routine is the driver entry point for handling ioctl
24611 * requests to issue the SCSI-3 Persistent In Read Reservations
24612 * command to the device (MHIOCGRP_INKEYS).
24614 * Arguments: dev - the device number
24615 * arg - user provided in_resv structure
24616 * flag - this argument is a pass through to ddi_copyxxx()
24617 * directly from the mode argument of ioctl().
24619 * Return Code: code returned by sd_persistent_reservation_in_read_resv()
24625 sd_mhdioc_inresv(dev_t dev
, caddr_t arg
, int flag
)
24628 mhioc_inresvs_t inresvs
;
24631 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24635 #ifdef _MULTI_DATAMODEL
24637 switch (ddi_model_convert_from(flag
& FMODELS
)) {
24638 case DDI_MODEL_ILP32
: {
24639 struct mhioc_inresvs32 inresvs32
;
24641 if (ddi_copyin(arg
, &inresvs32
,
24642 sizeof (struct mhioc_inresvs32
), flag
) != 0) {
24645 inresvs
.li
= (mhioc_resv_desc_list_t
*)(uintptr_t)inresvs32
.li
;
24646 if ((rval
= sd_persistent_reservation_in_read_resv(un
,
24647 &inresvs
, flag
)) != 0) {
24650 inresvs32
.generation
= inresvs
.generation
;
24651 if (ddi_copyout(&inresvs32
, arg
,
24652 sizeof (struct mhioc_inresvs32
), flag
) != 0) {
24657 case DDI_MODEL_NONE
:
24658 if (ddi_copyin(arg
, &inresvs
,
24659 sizeof (mhioc_inresvs_t
), flag
) != 0) {
24662 if ((rval
= sd_persistent_reservation_in_read_resv(un
,
24663 &inresvs
, flag
)) != 0) {
24666 if (ddi_copyout(&inresvs
, arg
,
24667 sizeof (mhioc_inresvs_t
), flag
) != 0) {
24673 #else /* ! _MULTI_DATAMODEL */
24675 if (ddi_copyin(arg
, &inresvs
, sizeof (mhioc_inresvs_t
), flag
) != 0) {
24678 rval
= sd_persistent_reservation_in_read_resv(un
, &inresvs
, flag
);
24682 if (ddi_copyout(&inresvs
, arg
, sizeof (mhioc_inresvs_t
), flag
)) {
24686 #endif /* ! _MULTI_DATAMODEL */
24693 * The following routines support the clustering functionality described below
24694 * and implement lost reservation reclaim functionality.
24698 * The clustering code uses two different, independent forms of SCSI
24699 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3
24700 * Persistent Group Reservations. For any particular disk, it will use either
24701 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk.
24704 * The cluster software takes ownership of a multi-hosted disk by issuing the
24705 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the
24706 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a
24707 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl
24708 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the
24709 * driver. The meaning of failfast is that if the driver (on this host) ever
24710 * encounters the scsi error return code RESERVATION_CONFLICT from the device,
24711 * it should immediately panic the host. The motivation for this ioctl is that
24712 * if this host does encounter reservation conflict, the underlying cause is
24713 * that some other host of the cluster has decided that this host is no longer
24714 * in the cluster and has seized control of the disks for itself. Since this
24715 * host is no longer in the cluster, it ought to panic itself. The
24716 * MHIOCENFAILFAST ioctl does two things:
24717 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT
24718 * error to panic the host
24719 * (b) it sets up a periodic timer to test whether this host still has
24720 * "access" (in that no other host has reserved the device): if the
24721 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The
24722 * purpose of that periodic timer is to handle scenarios where the host is
24723 * otherwise temporarily quiescent, temporarily doing no real i/o.
24724 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host,
24725 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for
24726 * the device itself.
24729 * A direct semantic implementation of the SCSI-3 Persistent Reservation
24730 * facility is supported through the shared multihost disk ioctls
24731 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE,
24732 * MHIOCGRP_PREEMPTANDABORT)
24734 * Reservation Reclaim:
24735 * --------------------
24736 * To support the lost reservation reclaim operations this driver creates a
24737 * single thread to handle reinstating reservations on all devices that have
24738 * lost reservations sd_resv_reclaim_requests are logged for all devices that
24739 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb
24740 * and the reservation reclaim thread loops through the requests to regain the
24741 * lost reservations.
24745 * Function: sd_check_mhd()
24747 * Description: This function sets up and submits a scsi watch request or
24748 * terminates an existing watch request. This routine is used in
24749 * support of reservation reclaim.
24751 * Arguments: dev - the device 'dev_t' is used for context to discriminate
24752 * among multiple watches that share the callback function
24753 * interval - the number of microseconds specifying the watch
24754 * interval for issuing TEST UNIT READY commands. If
24755 * set to 0 the watch should be terminated. If the
24756 * interval is set to 0 and if the device is required
24757 * to hold reservation while disabling failfast, the
24758 * watch is restarted with an interval of
24759 * reinstate_resv_delay.
24761 * Return Code: 0 - Successful submit/terminate of scsi watch request
24762 * ENXIO - Indicates an invalid device was specified
24763 * EAGAIN - Unable to submit the scsi watch request
24767 sd_check_mhd(dev_t dev
, int interval
)
24772 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24776 /* is this a watch termination request? */
24777 if (interval
== 0) {
24778 mutex_enter(SD_MUTEX(un
));
24779 /* if there is an existing watch task then terminate it */
24780 if (un
->un_mhd_token
) {
24781 token
= un
->un_mhd_token
;
24782 un
->un_mhd_token
= NULL
;
24783 mutex_exit(SD_MUTEX(un
));
24784 (void) scsi_watch_request_terminate(token
,
24785 SCSI_WATCH_TERMINATE_ALL_WAIT
);
24786 mutex_enter(SD_MUTEX(un
));
24788 mutex_exit(SD_MUTEX(un
));
24790 * Note: If we return here we don't check for the
24791 * failfast case. This is the original legacy
24792 * implementation but perhaps we should be checking
24793 * the failfast case.
24798 * If the device is required to hold reservation while
24799 * disabling failfast, we need to restart the scsi_watch
24800 * routine with an interval of reinstate_resv_delay.
24802 if (un
->un_resvd_status
& SD_RESERVE
) {
24803 interval
= sd_reinstate_resv_delay
/1000;
24805 /* no failfast so bail */
24806 mutex_exit(SD_MUTEX(un
));
24809 mutex_exit(SD_MUTEX(un
));
24813 * adjust minimum time interval to 1 second,
24814 * and convert from msecs to usecs
24816 if (interval
> 0 && interval
< 1000) {
24822 * submit the request to the scsi_watch service
24824 token
= scsi_watch_request_submit(SD_SCSI_DEVP(un
), interval
,
24825 SENSE_LENGTH
, sd_mhd_watch_cb
, (caddr_t
)dev
);
24826 if (token
== NULL
) {
24831 * save token for termination later on
24833 mutex_enter(SD_MUTEX(un
));
24834 un
->un_mhd_token
= token
;
24835 mutex_exit(SD_MUTEX(un
));
24841 * Function: sd_mhd_watch_cb()
24843 * Description: This function is the call back function used by the scsi watch
24844 * facility. The scsi watch facility sends the "Test Unit Ready"
24845 * and processes the status. If applicable (i.e. a "Unit Attention"
24846 * status and automatic "Request Sense" not used) the scsi watch
24847 * facility will send a "Request Sense" and retrieve the sense data
24848 * to be passed to this callback function. In either case the
24849 * automatic "Request Sense" or the facility submitting one, this
24850 * callback is passed the status and sense data.
24852 * Arguments: arg - the device 'dev_t' is used for context to discriminate
24853 * among multiple watches that share this callback function
24854 * resultp - scsi watch facility result packet containing scsi
24855 * packet, status byte and sense data
24857 * Return Code: 0 - continue the watch task
24858 * non-zero - terminate the watch task
24862 sd_mhd_watch_cb(caddr_t arg
, struct scsi_watch_result
*resultp
)
24865 struct scsi_status
*statusp
;
24867 struct scsi_pkt
*pkt
;
24868 uchar_t actual_sense_length
;
24869 dev_t dev
= (dev_t
)arg
;
24871 ASSERT(resultp
!= NULL
);
24872 statusp
= resultp
->statusp
;
24873 sensep
= (uint8_t *)resultp
->sensep
;
24874 pkt
= resultp
->pkt
;
24875 actual_sense_length
= resultp
->actual_sense_length
;
24877 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
24881 SD_TRACE(SD_LOG_IOCTL_MHD
, un
,
24882 "sd_mhd_watch_cb: reason '%s', status '%s'\n",
24883 scsi_rname(pkt
->pkt_reason
), sd_sname(*((unsigned char *)statusp
)));
24885 /* Begin processing of the status and/or sense data */
24886 if (pkt
->pkt_reason
!= CMD_CMPLT
) {
24887 /* Handle the incomplete packet */
24888 sd_mhd_watch_incomplete(un
, pkt
);
24890 } else if (*((unsigned char *)statusp
) != STATUS_GOOD
) {
24891 if (*((unsigned char *)statusp
)
24892 == STATUS_RESERVATION_CONFLICT
) {
24894 * Handle a reservation conflict by panicking if
24895 * configured for failfast or by logging the conflict
24896 * and updating the reservation status
24898 mutex_enter(SD_MUTEX(un
));
24899 if ((un
->un_resvd_status
& SD_FAILFAST
) &&
24900 (sd_failfast_enable
)) {
24901 sd_panic_for_res_conflict(un
);
24904 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
24905 "sd_mhd_watch_cb: Reservation Conflict\n");
24906 un
->un_resvd_status
|= SD_RESERVATION_CONFLICT
;
24907 mutex_exit(SD_MUTEX(un
));
24911 if (sensep
!= NULL
) {
24912 if (actual_sense_length
>= (SENSE_LENGTH
- 2)) {
24913 mutex_enter(SD_MUTEX(un
));
24914 if ((scsi_sense_asc(sensep
) ==
24915 SD_SCSI_RESET_SENSE_CODE
) &&
24916 (un
->un_resvd_status
& SD_RESERVE
)) {
24918 * The additional sense code indicates a power
24919 * on or bus device reset has occurred; update
24920 * the reservation status.
24922 un
->un_resvd_status
|=
24923 (SD_LOST_RESERVE
| SD_WANT_RESERVE
);
24924 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
24925 "sd_mhd_watch_cb: Lost Reservation\n");
24931 mutex_enter(SD_MUTEX(un
));
24934 if ((un
->un_resvd_status
& SD_RESERVE
) &&
24935 (un
->un_resvd_status
& SD_LOST_RESERVE
)) {
24936 if (un
->un_resvd_status
& SD_WANT_RESERVE
) {
24938 * A reset occurred in between the last probe and this
24939 * one so if a timeout is pending cancel it.
24941 if (un
->un_resvd_timeid
) {
24942 timeout_id_t temp_id
= un
->un_resvd_timeid
;
24943 un
->un_resvd_timeid
= NULL
;
24944 mutex_exit(SD_MUTEX(un
));
24945 (void) untimeout(temp_id
);
24946 mutex_enter(SD_MUTEX(un
));
24948 un
->un_resvd_status
&= ~SD_WANT_RESERVE
;
24950 if (un
->un_resvd_timeid
== 0) {
24951 /* Schedule a timeout to handle the lost reservation */
24952 un
->un_resvd_timeid
= timeout(sd_mhd_resvd_recover
,
24954 drv_usectohz(sd_reinstate_resv_delay
));
24957 mutex_exit(SD_MUTEX(un
));
24963 * Function: sd_mhd_watch_incomplete()
24965 * Description: This function is used to find out why a scsi pkt sent by the
24966 * scsi watch facility was not completed. Under some scenarios this
24967 * routine will return. Otherwise it will send a bus reset to see
24968 * if the drive is still online.
24970 * Arguments: un - driver soft state (unit) structure
24971 * pkt - incomplete scsi pkt
24975 sd_mhd_watch_incomplete(struct sd_lun
*un
, struct scsi_pkt
*pkt
)
24980 ASSERT(pkt
!= NULL
);
24981 ASSERT(un
!= NULL
);
24982 be_chatty
= (!(pkt
->pkt_flags
& FLAG_SILENT
));
24983 perr
= (pkt
->pkt_statistics
& STAT_PERR
);
24985 mutex_enter(SD_MUTEX(un
));
24986 if (un
->un_state
== SD_STATE_DUMPING
) {
24987 mutex_exit(SD_MUTEX(un
));
24991 switch (pkt
->pkt_reason
) {
24992 case CMD_UNX_BUS_FREE
:
24994 * If we had a parity error that caused the target to drop BSY*,
24995 * don't be chatty about it.
24997 if (perr
&& be_chatty
) {
25001 case CMD_TAG_REJECT
:
25003 * The SCSI-2 spec states that a tag reject will be sent by the
25004 * target if tagged queuing is not supported. A tag reject may
25005 * also be sent during certain initialization periods or to
25006 * control internal resources. For the latter case the target
25007 * may also return Queue Full.
25009 * If this driver receives a tag reject from a target that is
25010 * going through an init period or controlling internal
25011 * resources tagged queuing will be disabled. This is a less
25012 * than optimal behavior but the driver is unable to determine
25013 * the target state and assumes tagged queueing is not supported
25015 pkt
->pkt_flags
= 0;
25016 un
->un_tagflags
= 0;
25018 if (un
->un_f_opt_queueing
== TRUE
) {
25019 un
->un_throttle
= min(un
->un_throttle
, 3);
25021 un
->un_throttle
= 1;
25023 mutex_exit(SD_MUTEX(un
));
25024 (void) scsi_ifsetcap(SD_ADDRESS(un
), "tagged-qing", 0, 1);
25025 mutex_enter(SD_MUTEX(un
));
25027 case CMD_INCOMPLETE
:
25029 * The transport stopped with an abnormal state, fallthrough and
25030 * reset the target and/or bus unless selection did not complete
25031 * (indicated by STATE_GOT_BUS) in which case we don't want to
25032 * go through a target/bus reset
25034 if (pkt
->pkt_state
== STATE_GOT_BUS
) {
25042 * The lun may still be running the command, so a lun reset
25043 * should be attempted. If the lun reset fails or cannot be
25044 * issued, than try a target reset. Lastly try a bus reset.
25046 if ((pkt
->pkt_statistics
&
25047 (STAT_BUS_RESET
|STAT_DEV_RESET
|STAT_ABORTED
)) == 0) {
25048 int reset_retval
= 0;
25049 mutex_exit(SD_MUTEX(un
));
25050 if (un
->un_f_allow_bus_device_reset
== TRUE
) {
25051 if (un
->un_f_lun_reset_enabled
== TRUE
) {
25053 scsi_reset(SD_ADDRESS(un
),
25056 if (reset_retval
== 0) {
25058 scsi_reset(SD_ADDRESS(un
),
25062 if (reset_retval
== 0) {
25063 (void) scsi_reset(SD_ADDRESS(un
), RESET_ALL
);
25065 mutex_enter(SD_MUTEX(un
));
25070 /* A device/bus reset has occurred; update the reservation status. */
25071 if ((pkt
->pkt_reason
== CMD_RESET
) || (pkt
->pkt_statistics
&
25072 (STAT_BUS_RESET
| STAT_DEV_RESET
))) {
25073 if ((un
->un_resvd_status
& SD_RESERVE
) == SD_RESERVE
) {
25074 un
->un_resvd_status
|=
25075 (SD_LOST_RESERVE
| SD_WANT_RESERVE
);
25076 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
25077 "sd_mhd_watch_incomplete: Lost Reservation\n");
25082 * The disk has been turned off; Update the device state.
25084 * Note: Should we be offlining the disk here?
25086 if (pkt
->pkt_state
== STATE_GOT_BUS
) {
25087 SD_INFO(SD_LOG_IOCTL_MHD
, un
, "sd_mhd_watch_incomplete: "
25088 "Disk not responding to selection\n");
25089 if (un
->un_state
!= SD_STATE_OFFLINE
) {
25090 New_state(un
, SD_STATE_OFFLINE
);
25092 } else if (be_chatty
) {
25094 * suppress messages if they are all the same pkt reason;
25095 * with TQ, many (up to 256) are returned with the same
25098 if (pkt
->pkt_reason
!= un
->un_last_pkt_reason
) {
25099 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
25100 "sd_mhd_watch_incomplete: "
25101 "SCSI transport failed: reason '%s'\n",
25102 scsi_rname(pkt
->pkt_reason
));
25105 un
->un_last_pkt_reason
= pkt
->pkt_reason
;
25106 mutex_exit(SD_MUTEX(un
));
25111 * Function: sd_sname()
25113 * Description: This is a simple little routine to return a string containing
25114 * a printable description of command status byte for use in
25117 * Arguments: status - pointer to a status byte
25119 * Return Code: char * - string containing status description.
25123 sd_sname(uchar_t status
)
25125 switch (status
& STATUS_MASK
) {
25127 return ("good status");
25129 return ("check condition");
25131 return ("condition met");
25134 case STATUS_INTERMEDIATE
:
25135 return ("intermediate");
25136 case STATUS_INTERMEDIATE_MET
:
25137 return ("intermediate - condition met");
25138 case STATUS_RESERVATION_CONFLICT
:
25139 return ("reservation_conflict");
25140 case STATUS_TERMINATED
:
25141 return ("command terminated");
25143 return ("queue full");
25145 return ("<unknown status>");
25151 * Function: sd_mhd_resvd_recover()
25153 * Description: This function adds a reservation entry to the
25154 * sd_resv_reclaim_request list and signals the reservation
25155 * reclaim thread that there is work pending. If the reservation
25156 * reclaim thread has not been previously created this function
25157 * will kick it off.
25159 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25160 * among multiple watches that share this callback function
25162 * Context: This routine is called by timeout() and is run in interrupt
25163 * context. It must not sleep or call other functions which may
25168 sd_mhd_resvd_recover(void *arg
)
25170 dev_t dev
= (dev_t
)arg
;
25172 struct sd_thr_request
*sd_treq
= NULL
;
25173 struct sd_thr_request
*sd_cur
= NULL
;
25174 struct sd_thr_request
*sd_prev
= NULL
;
25175 int already_there
= 0;
25177 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
25181 mutex_enter(SD_MUTEX(un
));
25182 un
->un_resvd_timeid
= NULL
;
25183 if (un
->un_resvd_status
& SD_WANT_RESERVE
) {
25185 * There was a reset so don't issue the reserve, allow the
25186 * sd_mhd_watch_cb callback function to notice this and
25187 * reschedule the timeout for reservation.
25189 mutex_exit(SD_MUTEX(un
));
25192 mutex_exit(SD_MUTEX(un
));
25195 * Add this device to the sd_resv_reclaim_request list and the
25196 * sd_resv_reclaim_thread should take care of the rest.
25198 * Note: We can't sleep in this context so if the memory allocation
25199 * fails allow the sd_mhd_watch_cb callback function to notice this and
25200 * reschedule the timeout for reservation. (4378460)
25202 sd_treq
= (struct sd_thr_request
*)
25203 kmem_zalloc(sizeof (struct sd_thr_request
), KM_NOSLEEP
);
25204 if (sd_treq
== NULL
) {
25208 sd_treq
->sd_thr_req_next
= NULL
;
25209 sd_treq
->dev
= dev
;
25210 mutex_enter(&sd_tr
.srq_resv_reclaim_mutex
);
25211 if (sd_tr
.srq_thr_req_head
== NULL
) {
25212 sd_tr
.srq_thr_req_head
= sd_treq
;
25214 sd_cur
= sd_prev
= sd_tr
.srq_thr_req_head
;
25215 for (; sd_cur
!= NULL
; sd_cur
= sd_cur
->sd_thr_req_next
) {
25216 if (sd_cur
->dev
== dev
) {
25218 * already in Queue so don't log
25219 * another request for the device
25226 if (!already_there
) {
25227 SD_INFO(SD_LOG_IOCTL_MHD
, un
, "sd_mhd_resvd_recover: "
25228 "logging request for %lx\n", dev
);
25229 sd_prev
->sd_thr_req_next
= sd_treq
;
25231 kmem_free(sd_treq
, sizeof (struct sd_thr_request
));
25236 * Create a kernel thread to do the reservation reclaim and free up this
25237 * thread. We cannot block this thread while we go away to do the
25238 * reservation reclaim
25240 if (sd_tr
.srq_resv_reclaim_thread
== NULL
)
25241 sd_tr
.srq_resv_reclaim_thread
= thread_create(NULL
, 0,
25242 sd_resv_reclaim_thread
, NULL
,
25243 0, &p0
, TS_RUN
, v
.v_maxsyspri
- 2);
25245 /* Tell the reservation reclaim thread that it has work to do */
25246 cv_signal(&sd_tr
.srq_resv_reclaim_cv
);
25247 mutex_exit(&sd_tr
.srq_resv_reclaim_mutex
);
25251 * Function: sd_resv_reclaim_thread()
25253 * Description: This function implements the reservation reclaim operations
25255 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25256 * among multiple watches that share this callback function
25260 sd_resv_reclaim_thread()
25263 struct sd_thr_request
*sd_mhreq
;
25265 /* Wait for work */
25266 mutex_enter(&sd_tr
.srq_resv_reclaim_mutex
);
25267 if (sd_tr
.srq_thr_req_head
== NULL
) {
25268 cv_wait(&sd_tr
.srq_resv_reclaim_cv
,
25269 &sd_tr
.srq_resv_reclaim_mutex
);
25272 /* Loop while we have work */
25273 while ((sd_tr
.srq_thr_cur_req
= sd_tr
.srq_thr_req_head
) != NULL
) {
25274 un
= ddi_get_soft_state(sd_state
,
25275 SDUNIT(sd_tr
.srq_thr_cur_req
->dev
));
25278 * softstate structure is NULL so just
25279 * dequeue the request and continue
25281 sd_tr
.srq_thr_req_head
=
25282 sd_tr
.srq_thr_cur_req
->sd_thr_req_next
;
25283 kmem_free(sd_tr
.srq_thr_cur_req
,
25284 sizeof (struct sd_thr_request
));
25288 /* dequeue the request */
25289 sd_mhreq
= sd_tr
.srq_thr_cur_req
;
25290 sd_tr
.srq_thr_req_head
=
25291 sd_tr
.srq_thr_cur_req
->sd_thr_req_next
;
25292 mutex_exit(&sd_tr
.srq_resv_reclaim_mutex
);
25295 * Reclaim reservation only if SD_RESERVE is still set. There
25296 * may have been a call to MHIOCRELEASE before we got here.
25298 mutex_enter(SD_MUTEX(un
));
25299 if ((un
->un_resvd_status
& SD_RESERVE
) == SD_RESERVE
) {
25301 * Note: The SD_LOST_RESERVE flag is cleared before
25302 * reclaiming the reservation. If this is done after the
25303 * call to sd_reserve_release a reservation loss in the
25304 * window between pkt completion of reserve cmd and
25305 * mutex_enter below may not be recognized
25307 un
->un_resvd_status
&= ~SD_LOST_RESERVE
;
25308 mutex_exit(SD_MUTEX(un
));
25310 if (sd_reserve_release(sd_mhreq
->dev
,
25311 SD_RESERVE
) == 0) {
25312 mutex_enter(SD_MUTEX(un
));
25313 un
->un_resvd_status
|= SD_RESERVE
;
25314 mutex_exit(SD_MUTEX(un
));
25315 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
25316 "sd_resv_reclaim_thread: "
25317 "Reservation Recovered\n");
25319 mutex_enter(SD_MUTEX(un
));
25320 un
->un_resvd_status
|= SD_LOST_RESERVE
;
25321 mutex_exit(SD_MUTEX(un
));
25322 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
25323 "sd_resv_reclaim_thread: Failed "
25324 "Reservation Recovery\n");
25327 mutex_exit(SD_MUTEX(un
));
25329 mutex_enter(&sd_tr
.srq_resv_reclaim_mutex
);
25330 ASSERT(sd_mhreq
== sd_tr
.srq_thr_cur_req
);
25331 kmem_free(sd_mhreq
, sizeof (struct sd_thr_request
));
25332 sd_mhreq
= sd_tr
.srq_thr_cur_req
= NULL
;
25334 * wakeup the destroy thread if anyone is waiting on
25337 cv_signal(&sd_tr
.srq_inprocess_cv
);
25338 SD_TRACE(SD_LOG_IOCTL_MHD
, un
,
25339 "sd_resv_reclaim_thread: cv_signalling current request \n");
25343 * cleanup the sd_tr structure now that this thread will not exist
25345 ASSERT(sd_tr
.srq_thr_req_head
== NULL
);
25346 ASSERT(sd_tr
.srq_thr_cur_req
== NULL
);
25347 sd_tr
.srq_resv_reclaim_thread
= NULL
;
25348 mutex_exit(&sd_tr
.srq_resv_reclaim_mutex
);
25354 * Function: sd_rmv_resv_reclaim_req()
25356 * Description: This function removes any pending reservation reclaim requests
25357 * for the specified device.
25359 * Arguments: dev - the device 'dev_t'
25363 sd_rmv_resv_reclaim_req(dev_t dev
)
25365 struct sd_thr_request
*sd_mhreq
;
25366 struct sd_thr_request
*sd_prev
;
25368 /* Remove a reservation reclaim request from the list */
25369 mutex_enter(&sd_tr
.srq_resv_reclaim_mutex
);
25370 if (sd_tr
.srq_thr_cur_req
&& sd_tr
.srq_thr_cur_req
->dev
== dev
) {
25372 * We are attempting to reinstate reservation for
25373 * this device. We wait for sd_reserve_release()
25374 * to return before we return.
25376 cv_wait(&sd_tr
.srq_inprocess_cv
,
25377 &sd_tr
.srq_resv_reclaim_mutex
);
25379 sd_prev
= sd_mhreq
= sd_tr
.srq_thr_req_head
;
25380 if (sd_mhreq
&& sd_mhreq
->dev
== dev
) {
25381 sd_tr
.srq_thr_req_head
= sd_mhreq
->sd_thr_req_next
;
25382 kmem_free(sd_mhreq
, sizeof (struct sd_thr_request
));
25383 mutex_exit(&sd_tr
.srq_resv_reclaim_mutex
);
25386 for (; sd_mhreq
!= NULL
; sd_mhreq
= sd_mhreq
->sd_thr_req_next
) {
25387 if (sd_mhreq
&& sd_mhreq
->dev
== dev
) {
25390 sd_prev
= sd_mhreq
;
25392 if (sd_mhreq
!= NULL
) {
25393 sd_prev
->sd_thr_req_next
= sd_mhreq
->sd_thr_req_next
;
25394 kmem_free(sd_mhreq
, sizeof (struct sd_thr_request
));
25397 mutex_exit(&sd_tr
.srq_resv_reclaim_mutex
);
25402 * Function: sd_mhd_reset_notify_cb()
25404 * Description: This is a call back function for scsi_reset_notify. This
25405 * function updates the softstate reserved status and logs the
25406 * reset. The driver scsi watch facility callback function
25407 * (sd_mhd_watch_cb) and reservation reclaim thread functionality
25408 * will reclaim the reservation.
25410 * Arguments: arg - driver soft state (unit) structure
25414 sd_mhd_reset_notify_cb(caddr_t arg
)
25416 struct sd_lun
*un
= (struct sd_lun
*)arg
;
25418 mutex_enter(SD_MUTEX(un
));
25419 if ((un
->un_resvd_status
& SD_RESERVE
) == SD_RESERVE
) {
25420 un
->un_resvd_status
|= (SD_LOST_RESERVE
| SD_WANT_RESERVE
);
25421 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
25422 "sd_mhd_reset_notify_cb: Lost Reservation\n");
25424 mutex_exit(SD_MUTEX(un
));
25429 * Function: sd_take_ownership()
25431 * Description: This routine implements an algorithm to achieve a stable
25432 * reservation on disks which don't implement priority reserve,
25433 * and makes sure that other host lose re-reservation attempts.
25434 * This algorithm contains of a loop that keeps issuing the RESERVE
25435 * for some period of time (min_ownership_delay, default 6 seconds)
25436 * During that loop, it looks to see if there has been a bus device
25437 * reset or bus reset (both of which cause an existing reservation
25438 * to be lost). If the reservation is lost issue RESERVE until a
25439 * period of min_ownership_delay with no resets has gone by, or
25440 * until max_ownership_delay has expired. This loop ensures that
25441 * the host really did manage to reserve the device, in spite of
25442 * resets. The looping for min_ownership_delay (default six
25443 * seconds) is important to early generation clustering products,
25444 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an
25445 * MHIOCENFAILFAST periodic timer of two seconds. By having
25446 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having
25447 * MHIOCENFAILFAST poll every two seconds, the idea is that by the
25448 * time the MHIOCTKOWN ioctl returns, the other host (if any) will
25449 * have already noticed, via the MHIOCENFAILFAST polling, that it
25450 * no longer "owns" the disk and will have panicked itself. Thus,
25451 * the host issuing the MHIOCTKOWN is assured (with timing
25452 * dependencies) that by the time it actually starts to use the
25453 * disk for real work, the old owner is no longer accessing it.
25455 * min_ownership_delay is the minimum amount of time for which the
25456 * disk must be reserved continuously devoid of resets before the
25457 * MHIOCTKOWN ioctl will return success.
25459 * max_ownership_delay indicates the amount of time by which the
25460 * take ownership should succeed or timeout with an error.
25462 * Arguments: dev - the device 'dev_t'
25463 * *p - struct containing timing info.
25465 * Return Code: 0 for success or error code
25469 sd_take_ownership(dev_t dev
, struct mhioctkown
*p
)
25474 int reservation_count
= 0;
25475 int min_ownership_delay
= 6000000; /* in usec */
25476 int max_ownership_delay
= 30000000; /* in usec */
25477 clock_t start_time
; /* starting time of this algorithm */
25478 clock_t end_time
; /* time limit for giving up */
25479 clock_t ownership_time
; /* time limit for stable ownership */
25480 clock_t current_time
;
25481 clock_t previous_current_time
;
25483 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
25488 * Attempt a device reservation. A priority reservation is requested.
25490 if ((rval
= sd_reserve_release(dev
, SD_PRIORITY_RESERVE
))
25492 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
25493 "sd_take_ownership: return(1)=%d\n", rval
);
25497 /* Update the softstate reserved status to indicate the reservation */
25498 mutex_enter(SD_MUTEX(un
));
25499 un
->un_resvd_status
|= SD_RESERVE
;
25500 un
->un_resvd_status
&=
25501 ~(SD_LOST_RESERVE
| SD_WANT_RESERVE
| SD_RESERVATION_CONFLICT
);
25502 mutex_exit(SD_MUTEX(un
));
25505 if (p
->min_ownership_delay
!= 0) {
25506 min_ownership_delay
= p
->min_ownership_delay
* 1000;
25508 if (p
->max_ownership_delay
!= 0) {
25509 max_ownership_delay
= p
->max_ownership_delay
* 1000;
25512 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
25513 "sd_take_ownership: min, max delays: %d, %d\n",
25514 min_ownership_delay
, max_ownership_delay
);
25516 start_time
= ddi_get_lbolt();
25517 current_time
= start_time
;
25518 ownership_time
= current_time
+ drv_usectohz(min_ownership_delay
);
25519 end_time
= start_time
+ drv_usectohz(max_ownership_delay
);
25521 while (current_time
- end_time
< 0) {
25522 delay(drv_usectohz(500000));
25524 if ((err
= sd_reserve_release(dev
, SD_RESERVE
)) != 0) {
25525 if ((sd_reserve_release(dev
, SD_RESERVE
)) != 0) {
25526 mutex_enter(SD_MUTEX(un
));
25527 rval
= (un
->un_resvd_status
&
25528 SD_RESERVATION_CONFLICT
) ? EACCES
: EIO
;
25529 mutex_exit(SD_MUTEX(un
));
25533 previous_current_time
= current_time
;
25534 current_time
= ddi_get_lbolt();
25535 mutex_enter(SD_MUTEX(un
));
25536 if (err
|| (un
->un_resvd_status
& SD_LOST_RESERVE
)) {
25537 ownership_time
= ddi_get_lbolt() +
25538 drv_usectohz(min_ownership_delay
);
25539 reservation_count
= 0;
25541 reservation_count
++;
25543 un
->un_resvd_status
|= SD_RESERVE
;
25544 un
->un_resvd_status
&= ~(SD_LOST_RESERVE
| SD_WANT_RESERVE
);
25545 mutex_exit(SD_MUTEX(un
));
25547 SD_INFO(SD_LOG_IOCTL_MHD
, un
,
25548 "sd_take_ownership: ticks for loop iteration=%ld, "
25549 "reservation=%s\n", (current_time
- previous_current_time
),
25550 reservation_count
? "ok" : "reclaimed");
25552 if (current_time
- ownership_time
>= 0 &&
25553 reservation_count
>= 4) {
25554 rval
= 0; /* Achieved a stable ownership */
25557 if (current_time
- end_time
>= 0) {
25558 rval
= EACCES
; /* No ownership in max possible time */
25562 SD_TRACE(SD_LOG_IOCTL_MHD
, un
,
25563 "sd_take_ownership: return(2)=%d\n", rval
);
25569 * Function: sd_reserve_release()
25571 * Description: This function builds and sends scsi RESERVE, RELEASE, and
25572 * PRIORITY RESERVE commands based on a user specified command type
25574 * Arguments: dev - the device 'dev_t'
25575 * cmd - user specified command type; one of SD_PRIORITY_RESERVE,
25576 * SD_RESERVE, SD_RELEASE
25578 * Return Code: 0 or Error Code
25582 sd_reserve_release(dev_t dev
, int cmd
)
25584 struct uscsi_cmd
*com
= NULL
;
25585 struct sd_lun
*un
= NULL
;
25586 char cdb
[CDB_GROUP0
];
25589 ASSERT((cmd
== SD_RELEASE
) || (cmd
== SD_RESERVE
) ||
25590 (cmd
== SD_PRIORITY_RESERVE
));
25592 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
25596 /* instantiate and initialize the command and cdb */
25597 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
25598 bzero(cdb
, CDB_GROUP0
);
25599 com
->uscsi_flags
= USCSI_SILENT
;
25600 com
->uscsi_timeout
= un
->un_reserve_release_time
;
25601 com
->uscsi_cdblen
= CDB_GROUP0
;
25602 com
->uscsi_cdb
= cdb
;
25603 if (cmd
== SD_RELEASE
) {
25604 cdb
[0] = SCMD_RELEASE
;
25606 cdb
[0] = SCMD_RESERVE
;
25609 /* Send the command. */
25610 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
25614 * "break" a reservation that is held by another host, by issuing a
25615 * reset if priority reserve is desired, and we could not get the
25618 if ((cmd
== SD_PRIORITY_RESERVE
) &&
25619 (rval
!= 0) && (com
->uscsi_status
== STATUS_RESERVATION_CONFLICT
)) {
25621 * First try to reset the LUN. If we cannot, then try a target
25622 * reset, followed by a bus reset if the target reset fails.
25624 int reset_retval
= 0;
25625 if (un
->un_f_lun_reset_enabled
== TRUE
) {
25626 reset_retval
= scsi_reset(SD_ADDRESS(un
), RESET_LUN
);
25628 if (reset_retval
== 0) {
25629 /* The LUN reset either failed or was not issued */
25630 reset_retval
= scsi_reset(SD_ADDRESS(un
), RESET_TARGET
);
25632 if ((reset_retval
== 0) &&
25633 (scsi_reset(SD_ADDRESS(un
), RESET_ALL
) == 0)) {
25635 kmem_free(com
, sizeof (*com
));
25639 bzero(com
, sizeof (struct uscsi_cmd
));
25640 com
->uscsi_flags
= USCSI_SILENT
;
25641 com
->uscsi_cdb
= cdb
;
25642 com
->uscsi_cdblen
= CDB_GROUP0
;
25643 com
->uscsi_timeout
= 5;
25646 * Reissue the last reserve command, this time without request
25647 * sense. Assume that it is just a regular reserve command.
25649 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
25653 /* Return an error if still getting a reservation conflict. */
25654 if ((rval
!= 0) && (com
->uscsi_status
== STATUS_RESERVATION_CONFLICT
)) {
25658 kmem_free(com
, sizeof (*com
));
25663 #define SD_NDUMP_RETRIES 12
25665 * System Crash Dump routine
25669 sddump(dev_t dev
, caddr_t addr
, daddr_t blkno
, int nblk
)
25676 struct scsi_pkt
*wr_pktp
;
25679 daddr_t tgt_byte_offset
; /* rmw - byte offset for target */
25680 daddr_t tgt_blkno
; /* rmw - blkno for target */
25681 size_t tgt_byte_count
; /* rmw - # of bytes to xfer */
25682 size_t tgt_nblk
; /* rmw - # of tgt blks to xfer */
25683 size_t io_start_offset
;
25684 int doing_rmw
= FALSE
;
25688 diskaddr_t nblks
= 0;
25689 diskaddr_t start_block
;
25691 instance
= SDUNIT(dev
);
25692 if (((un
= ddi_get_soft_state(sd_state
, instance
)) == NULL
) ||
25693 !SD_IS_VALID_LABEL(un
) || ISCD(un
)) {
25697 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un
))
25699 SD_TRACE(SD_LOG_DUMP
, un
, "sddump: entry\n");
25701 partition
= SDPART(dev
);
25702 SD_INFO(SD_LOG_DUMP
, un
, "sddump: partition = %d\n", partition
);
25704 if (!(NOT_DEVBSIZE(un
))) {
25708 blknomask
= (un
->un_tgt_blocksize
/ DEV_BSIZE
) - 1;
25709 secmask
= un
->un_tgt_blocksize
- 1;
25711 if (blkno
& blknomask
) {
25712 SD_TRACE(SD_LOG_DUMP
, un
,
25713 "sddump: dump start block not modulo %d\n",
25714 un
->un_tgt_blocksize
);
25718 if ((nblk
* DEV_BSIZE
) & secmask
) {
25719 SD_TRACE(SD_LOG_DUMP
, un
,
25720 "sddump: dump length not modulo %d\n",
25721 un
->un_tgt_blocksize
);
25727 /* Validate blocks to dump at against partition size. */
25729 (void) cmlb_partinfo(un
->un_cmlbhandle
, partition
,
25730 &nblks
, &start_block
, NULL
, NULL
, (void *)SD_PATH_DIRECT
);
25732 if (NOT_DEVBSIZE(un
)) {
25733 if ((blkno
+ nblk
) > nblks
) {
25734 SD_TRACE(SD_LOG_DUMP
, un
,
25735 "sddump: dump range larger than partition: "
25736 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25737 blkno
, nblk
, nblks
);
25741 if (((blkno
/ (un
->un_tgt_blocksize
/ DEV_BSIZE
)) +
25742 (nblk
/ (un
->un_tgt_blocksize
/ DEV_BSIZE
))) > nblks
) {
25743 SD_TRACE(SD_LOG_DUMP
, un
,
25744 "sddump: dump range larger than partition: "
25745 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25746 blkno
, nblk
, nblks
);
25751 mutex_enter(&un
->un_pm_mutex
);
25752 if (SD_DEVICE_IS_IN_LOW_POWER(un
)) {
25753 struct scsi_pkt
*start_pktp
;
25755 mutex_exit(&un
->un_pm_mutex
);
25758 * use pm framework to power on HBA 1st
25760 (void) pm_raise_power(SD_DEVINFO(un
), 0,
25761 SD_PM_STATE_ACTIVE(un
));
25764 * Dump no long uses sdpower to power on a device, it's
25765 * in-line here so it can be done in polled mode.
25768 SD_INFO(SD_LOG_DUMP
, un
, "sddump: starting device\n");
25770 start_pktp
= scsi_init_pkt(SD_ADDRESS(un
), NULL
, NULL
,
25771 CDB_GROUP0
, un
->un_status_len
, 0, 0, NULL_FUNC
, NULL
);
25773 if (start_pktp
== NULL
) {
25774 /* We were not given a SCSI packet, fail. */
25777 bzero(start_pktp
->pkt_cdbp
, CDB_GROUP0
);
25778 start_pktp
->pkt_cdbp
[0] = SCMD_START_STOP
;
25779 start_pktp
->pkt_cdbp
[4] = SD_TARGET_START
;
25780 start_pktp
->pkt_flags
= FLAG_NOINTR
;
25782 mutex_enter(SD_MUTEX(un
));
25783 SD_FILL_SCSI1_LUN(un
, start_pktp
);
25784 mutex_exit(SD_MUTEX(un
));
25786 * Scsi_poll returns 0 (success) if the command completes and
25787 * the status block is STATUS_GOOD.
25789 if (sd_scsi_poll(un
, start_pktp
) != 0) {
25790 scsi_destroy_pkt(start_pktp
);
25793 scsi_destroy_pkt(start_pktp
);
25794 (void) sd_pm_state_change(un
, SD_PM_STATE_ACTIVE(un
),
25795 SD_PM_STATE_CHANGE
);
25797 mutex_exit(&un
->un_pm_mutex
);
25800 mutex_enter(SD_MUTEX(un
));
25801 un
->un_throttle
= 0;
25804 * The first time through, reset the specific target device.
25805 * However, when cpr calls sddump we know that sd is in a
25806 * a good state so no bus reset is required.
25807 * Clear sense data via Request Sense cmd.
25808 * In sddump we don't care about allow_bus_device_reset anymore
25811 if ((un
->un_state
!= SD_STATE_SUSPENDED
) &&
25812 (un
->un_state
!= SD_STATE_DUMPING
)) {
25814 New_state(un
, SD_STATE_DUMPING
);
25816 if (un
->un_f_is_fibre
== FALSE
) {
25817 mutex_exit(SD_MUTEX(un
));
25819 * Attempt a bus reset for parallel scsi.
25821 * Note: A bus reset is required because on some host
25822 * systems (i.e. E420R) a bus device reset is
25823 * insufficient to reset the state of the target.
25825 * Note: Don't issue the reset for fibre-channel,
25826 * because this tends to hang the bus (loop) for
25827 * too long while everyone is logging out and in
25828 * and the deadman timer for dumping will fire
25829 * before the dump is complete.
25831 if (scsi_reset(SD_ADDRESS(un
), RESET_ALL
) == 0) {
25832 mutex_enter(SD_MUTEX(un
));
25834 mutex_exit(SD_MUTEX(un
));
25838 /* Delay to give the device some recovery time. */
25839 drv_usecwait(10000);
25841 if (sd_send_polled_RQS(un
) == SD_FAILURE
) {
25842 SD_INFO(SD_LOG_DUMP
, un
,
25843 "sddump: sd_send_polled_RQS failed\n");
25845 mutex_enter(SD_MUTEX(un
));
25850 * Convert the partition-relative block number to a
25851 * disk physical block number.
25853 if (NOT_DEVBSIZE(un
)) {
25854 blkno
+= start_block
;
25856 blkno
= blkno
/ (un
->un_tgt_blocksize
/ DEV_BSIZE
);
25857 blkno
+= start_block
;
25860 SD_INFO(SD_LOG_DUMP
, un
, "sddump: disk blkno = 0x%x\n", blkno
);
25864 * Check if the device has a non-512 block size.
25867 if (NOT_DEVBSIZE(un
)) {
25868 tgt_byte_offset
= blkno
* un
->un_sys_blocksize
;
25869 tgt_byte_count
= nblk
* un
->un_sys_blocksize
;
25870 if ((tgt_byte_offset
% un
->un_tgt_blocksize
) ||
25871 (tgt_byte_count
% un
->un_tgt_blocksize
)) {
25874 * Calculate the block number and number of block
25875 * in terms of the media block size.
25877 tgt_blkno
= tgt_byte_offset
/ un
->un_tgt_blocksize
;
25879 ((tgt_byte_offset
+ tgt_byte_count
+
25880 (un
->un_tgt_blocksize
- 1)) /
25881 un
->un_tgt_blocksize
) - tgt_blkno
;
25884 * Invoke the routine which is going to do read part
25885 * of read-modify-write.
25886 * Note that this routine returns a pointer to
25887 * a valid bp in wr_bp.
25889 err
= sddump_do_read_of_rmw(un
, tgt_blkno
, tgt_nblk
,
25892 mutex_exit(SD_MUTEX(un
));
25896 * Offset is being calculated as -
25897 * (original block # * system block size) -
25898 * (new block # * target block size)
25901 ((uint64_t)(blkno
* un
->un_sys_blocksize
)) -
25902 ((uint64_t)(tgt_blkno
* un
->un_tgt_blocksize
));
25904 ASSERT((io_start_offset
>= 0) &&
25905 (io_start_offset
< un
->un_tgt_blocksize
));
25907 * Do the modify portion of read modify write.
25909 bcopy(addr
, &wr_bp
->b_un
.b_addr
[io_start_offset
],
25910 (size_t)nblk
* un
->un_sys_blocksize
);
25913 tgt_blkno
= tgt_byte_offset
/ un
->un_tgt_blocksize
;
25914 tgt_nblk
= tgt_byte_count
/ un
->un_tgt_blocksize
;
25917 /* Convert blkno and nblk to target blocks */
25922 bzero(wr_bp
, sizeof (struct buf
));
25923 wr_bp
->b_flags
= B_BUSY
;
25924 wr_bp
->b_un
.b_addr
= addr
;
25925 wr_bp
->b_bcount
= nblk
<< DEV_BSHIFT
;
25926 wr_bp
->b_resid
= 0;
25929 mutex_exit(SD_MUTEX(un
));
25932 * Obtain a SCSI packet for the write command.
25933 * It should be safe to call the allocator here without
25934 * worrying about being locked for DVMA mapping because
25935 * the address we're passed is already a DVMA mapping
25937 * We are also not going to worry about semaphore ownership
25938 * in the dump buffer. Dumping is single threaded at present.
25943 dma_resid
= wr_bp
->b_bcount
;
25946 if (!(NOT_DEVBSIZE(un
))) {
25947 nblk
= nblk
/ (un
->un_tgt_blocksize
/ DEV_BSIZE
);
25950 while (dma_resid
!= 0) {
25952 for (i
= 0; i
< SD_NDUMP_RETRIES
; i
++) {
25953 wr_bp
->b_flags
&= ~B_ERROR
;
25955 if (un
->un_partial_dma_supported
== 1) {
25957 ((wr_bp
->b_bcount
- dma_resid
) /
25958 un
->un_tgt_blocksize
);
25959 nblk
= dma_resid
/ un
->un_tgt_blocksize
;
25963 * Partial DMA transfers after initial transfer
25965 rval
= sd_setup_next_rw_pkt(un
, wr_pktp
, wr_bp
,
25968 /* Initial transfer */
25969 rval
= sd_setup_rw_pkt(un
, &wr_pktp
, wr_bp
,
25970 un
->un_pkt_flags
, NULL_FUNC
, NULL
,
25974 rval
= sd_setup_rw_pkt(un
, &wr_pktp
, wr_bp
,
25975 0, NULL_FUNC
, NULL
, blkno
, nblk
);
25979 /* We were given a SCSI packet, continue. */
25984 if (wr_bp
->b_flags
& B_ERROR
) {
25985 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
25986 "no resources for dumping; "
25987 "error code: 0x%x, retrying",
25990 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
25991 "no resources for dumping; retrying");
25993 } else if (i
!= (SD_NDUMP_RETRIES
- 1)) {
25994 if (wr_bp
->b_flags
& B_ERROR
) {
25995 scsi_log(SD_DEVINFO(un
), sd_label
, CE_CONT
,
25996 "no resources for dumping; error code: "
25997 "0x%x, retrying\n", geterror(wr_bp
));
26000 if (wr_bp
->b_flags
& B_ERROR
) {
26001 scsi_log(SD_DEVINFO(un
), sd_label
, CE_CONT
,
26002 "no resources for dumping; "
26003 "error code: 0x%x, retries failed, "
26004 "giving up.\n", geterror(wr_bp
));
26006 scsi_log(SD_DEVINFO(un
), sd_label
, CE_CONT
,
26007 "no resources for dumping; "
26008 "retries failed, giving up.\n");
26010 mutex_enter(SD_MUTEX(un
));
26012 if (NOT_DEVBSIZE(un
) && (doing_rmw
== TRUE
)) {
26013 mutex_exit(SD_MUTEX(un
));
26014 scsi_free_consistent_buf(wr_bp
);
26016 mutex_exit(SD_MUTEX(un
));
26020 drv_usecwait(10000);
26023 if (un
->un_partial_dma_supported
== 1) {
26025 * save the resid from PARTIAL_DMA
26027 dma_resid
= wr_pktp
->pkt_resid
;
26028 if (dma_resid
!= 0)
26029 nblk
-= SD_BYTES2TGTBLOCKS(un
, dma_resid
);
26030 wr_pktp
->pkt_resid
= 0;
26035 /* SunBug 1222170 */
26036 wr_pktp
->pkt_flags
= FLAG_NOINTR
;
26039 for (i
= 0; i
< SD_NDUMP_RETRIES
; i
++) {
26042 * Scsi_poll returns 0 (success) if the command completes and
26043 * the status block is STATUS_GOOD. We should only check
26044 * errors if this condition is not true. Even then we should
26045 * send our own request sense packet only if we have a check
26046 * condition and auto request sense has not been performed by
26049 SD_TRACE(SD_LOG_DUMP
, un
, "sddump: sending write\n");
26051 if ((sd_scsi_poll(un
, wr_pktp
) == 0) &&
26052 (wr_pktp
->pkt_resid
== 0)) {
26058 * Check CMD_DEV_GONE 1st, give up if device is gone.
26060 if (wr_pktp
->pkt_reason
== CMD_DEV_GONE
) {
26061 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
26062 "Error while dumping state...Device is gone\n");
26066 if (SD_GET_PKT_STATUS(wr_pktp
) == STATUS_CHECK
) {
26067 SD_INFO(SD_LOG_DUMP
, un
,
26068 "sddump: write failed with CHECK, try # %d\n", i
);
26069 if (((wr_pktp
->pkt_state
& STATE_ARQ_DONE
) == 0)) {
26070 (void) sd_send_polled_RQS(un
);
26076 if (SD_GET_PKT_STATUS(wr_pktp
) == STATUS_BUSY
) {
26077 int reset_retval
= 0;
26079 SD_INFO(SD_LOG_DUMP
, un
,
26080 "sddump: write failed with BUSY, try # %d\n", i
);
26082 if (un
->un_f_lun_reset_enabled
== TRUE
) {
26083 reset_retval
= scsi_reset(SD_ADDRESS(un
),
26086 if (reset_retval
== 0) {
26087 (void) scsi_reset(SD_ADDRESS(un
), RESET_TARGET
);
26089 (void) sd_send_polled_RQS(un
);
26092 SD_INFO(SD_LOG_DUMP
, un
,
26093 "sddump: write failed with 0x%x, try # %d\n",
26094 SD_GET_PKT_STATUS(wr_pktp
), i
);
26095 mutex_enter(SD_MUTEX(un
));
26096 sd_reset_target(un
, wr_pktp
);
26097 mutex_exit(SD_MUTEX(un
));
26101 * If we are not getting anywhere with lun/target resets,
26102 * let's reset the bus.
26104 if (i
== SD_NDUMP_RETRIES
/2) {
26105 (void) scsi_reset(SD_ADDRESS(un
), RESET_ALL
);
26106 (void) sd_send_polled_RQS(un
);
26111 scsi_destroy_pkt(wr_pktp
);
26112 mutex_enter(SD_MUTEX(un
));
26113 if ((NOT_DEVBSIZE(un
)) && (doing_rmw
== TRUE
)) {
26114 mutex_exit(SD_MUTEX(un
));
26115 scsi_free_consistent_buf(wr_bp
);
26117 mutex_exit(SD_MUTEX(un
));
26119 SD_TRACE(SD_LOG_DUMP
, un
, "sddump: exit: err = %d\n", err
);
26124 * Function: sd_scsi_poll()
26126 * Description: This is a wrapper for the scsi_poll call.
26128 * Arguments: sd_lun - The unit structure
26129 * scsi_pkt - The scsi packet being sent to the device.
26131 * Return Code: 0 - Command completed successfully with good status
26132 * -1 - Command failed. This could indicate a check condition
26133 * or other status value requiring recovery action.
26135 * NOTE: This code is only called off sddump().
26139 sd_scsi_poll(struct sd_lun
*un
, struct scsi_pkt
*pktp
)
26143 ASSERT(un
!= NULL
);
26144 ASSERT(!mutex_owned(SD_MUTEX(un
)));
26145 ASSERT(pktp
!= NULL
);
26147 status
= SD_SUCCESS
;
26149 if (scsi_ifgetcap(&pktp
->pkt_address
, "tagged-qing", 1) == 1) {
26150 pktp
->pkt_flags
|= un
->un_tagflags
;
26151 pktp
->pkt_flags
&= ~FLAG_NODISCON
;
26154 status
= sd_ddi_scsi_poll(pktp
);
26156 * Scsi_poll returns 0 (success) if the command completes and the
26157 * status block is STATUS_GOOD. We should only check errors if this
26158 * condition is not true. Even then we should send our own request
26159 * sense packet only if we have a check condition and auto
26160 * request sense has not been performed by the hba.
26161 * Don't get RQS data if pkt_reason is CMD_DEV_GONE.
26163 if ((status
!= SD_SUCCESS
) &&
26164 (SD_GET_PKT_STATUS(pktp
) == STATUS_CHECK
) &&
26165 (pktp
->pkt_state
& STATE_ARQ_DONE
) == 0 &&
26166 (pktp
->pkt_reason
!= CMD_DEV_GONE
))
26167 (void) sd_send_polled_RQS(un
);
26173 * Function: sd_send_polled_RQS()
26175 * Description: This sends the request sense command to a device.
26177 * Arguments: sd_lun - The unit structure
26179 * Return Code: 0 - Command completed successfully with good status
26180 * -1 - Command failed.
26185 sd_send_polled_RQS(struct sd_lun
*un
)
26188 struct scsi_pkt
*rqs_pktp
;
26189 struct buf
*rqs_bp
;
26191 ASSERT(un
!= NULL
);
26192 ASSERT(!mutex_owned(SD_MUTEX(un
)));
26194 ret_val
= SD_SUCCESS
;
26196 rqs_pktp
= un
->un_rqs_pktp
;
26197 rqs_bp
= un
->un_rqs_bp
;
26199 mutex_enter(SD_MUTEX(un
));
26201 if (un
->un_sense_isbusy
) {
26202 ret_val
= SD_FAILURE
;
26203 mutex_exit(SD_MUTEX(un
));
26208 * If the request sense buffer (and packet) is not in use,
26209 * let's set the un_sense_isbusy and send our packet
26211 un
->un_sense_isbusy
= 1;
26212 rqs_pktp
->pkt_resid
= 0;
26213 rqs_pktp
->pkt_reason
= 0;
26214 rqs_pktp
->pkt_flags
|= FLAG_NOINTR
;
26215 bzero(rqs_bp
->b_un
.b_addr
, SENSE_LENGTH
);
26217 mutex_exit(SD_MUTEX(un
));
26219 SD_INFO(SD_LOG_COMMON
, un
, "sd_send_polled_RQS: req sense buf at"
26220 " 0x%p\n", rqs_bp
->b_un
.b_addr
);
26223 * Can't send this to sd_scsi_poll, we wrap ourselves around the
26224 * axle - it has a call into us!
26226 if ((ret_val
= sd_ddi_scsi_poll(rqs_pktp
)) != 0) {
26227 SD_INFO(SD_LOG_COMMON
, un
,
26228 "sd_send_polled_RQS: RQS failed\n");
26231 SD_DUMP_MEMORY(un
, SD_LOG_COMMON
, "sd_send_polled_RQS:",
26232 (uchar_t
*)rqs_bp
->b_un
.b_addr
, SENSE_LENGTH
, SD_LOG_HEX
);
26234 mutex_enter(SD_MUTEX(un
));
26235 un
->un_sense_isbusy
= 0;
26236 mutex_exit(SD_MUTEX(un
));
26242 * Defines needed for localized version of the scsi_poll routine.
26244 #define CSEC 10000 /* usecs */
26245 #define SEC_TO_CSEC (1000000/CSEC)
26248 * Function: sd_ddi_scsi_poll()
26250 * Description: Localized version of the scsi_poll routine. The purpose is to
26251 * send a scsi_pkt to a device as a polled command. This version
26252 * is to ensure more robust handling of transport errors.
26253 * Specifically this routine cures not ready, coming ready
26254 * transition for power up and reset of sonoma's. This can take
26255 * up to 45 seconds for power-on and 20 seconds for reset of a
26258 * Arguments: scsi_pkt - The scsi_pkt being sent to a device
26260 * Return Code: 0 - Command completed successfully with good status
26261 * -1 - Command failed.
26263 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can
26264 * be fixed (removing this code), we need to determine how to handle the
26265 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump().
26267 * NOTE: This code is only called off sddump().
26270 sd_ddi_scsi_poll(struct scsi_pkt
*pkt
)
26281 struct scsi_arq_status
*arqstat
;
26282 extern int do_polled_io
;
26284 ASSERT(pkt
->pkt_scbp
);
26289 savef
= pkt
->pkt_flags
;
26290 savec
= pkt
->pkt_comp
;
26291 savet
= pkt
->pkt_time
;
26293 pkt
->pkt_flags
|= FLAG_NOINTR
;
26296 * XXX there is nothing in the SCSA spec that states that we should not
26297 * do a callback for polled cmds; however, removing this will break sd
26298 * and probably other target drivers
26300 pkt
->pkt_comp
= NULL
;
26303 * we don't like a polled command without timeout.
26304 * 60 seconds seems long enough.
26306 if (pkt
->pkt_time
== 0)
26307 pkt
->pkt_time
= SCSI_POLL_TIMEOUT
;
26312 * We do some error recovery for various errors. Tran_busy,
26313 * queue full, and non-dispatched commands are retried every 10 msec.
26314 * as they are typically transient failures. Busy status and Not
26315 * Ready are retried every second as this status takes a while to
26318 timeout
= pkt
->pkt_time
* SEC_TO_CSEC
;
26320 for (busy_count
= 0; busy_count
< timeout
; busy_count
++) {
26322 * Initialize pkt status variables.
26324 *pkt
->pkt_scbp
= pkt
->pkt_reason
= pkt
->pkt_state
= 0;
26326 if ((rc
= scsi_transport(pkt
)) != TRAN_ACCEPT
) {
26327 if (rc
!= TRAN_BUSY
) {
26328 /* Transport failed - give up. */
26331 /* Transport busy - try again. */
26332 poll_delay
= 1 * CSEC
; /* 10 msec. */
26336 * Transport accepted - check pkt status.
26338 rc
= (*pkt
->pkt_scbp
) & STATUS_MASK
;
26339 if ((pkt
->pkt_reason
== CMD_CMPLT
) &&
26340 (rc
== STATUS_CHECK
) &&
26341 (pkt
->pkt_state
& STATE_ARQ_DONE
)) {
26343 (struct scsi_arq_status
*)(pkt
->pkt_scbp
);
26344 sensep
= (uint8_t *)&arqstat
->sts_sensedata
;
26349 if ((pkt
->pkt_reason
== CMD_CMPLT
) &&
26350 (rc
== STATUS_GOOD
)) {
26351 /* No error - we're done */
26355 } else if (pkt
->pkt_reason
== CMD_DEV_GONE
) {
26356 /* Lost connection - give up */
26359 } else if ((pkt
->pkt_reason
== CMD_INCOMPLETE
) &&
26360 (pkt
->pkt_state
== 0)) {
26361 /* Pkt not dispatched - try again. */
26362 poll_delay
= 1 * CSEC
; /* 10 msec. */
26364 } else if ((pkt
->pkt_reason
== CMD_CMPLT
) &&
26365 (rc
== STATUS_QFULL
)) {
26366 /* Queue full - try again. */
26367 poll_delay
= 1 * CSEC
; /* 10 msec. */
26369 } else if ((pkt
->pkt_reason
== CMD_CMPLT
) &&
26370 (rc
== STATUS_BUSY
)) {
26371 /* Busy - try again. */
26372 poll_delay
= 100 * CSEC
; /* 1 sec. */
26373 busy_count
+= (SEC_TO_CSEC
- 1);
26375 } else if ((sensep
!= NULL
) &&
26376 (scsi_sense_key(sensep
) == KEY_UNIT_ATTENTION
)) {
26378 * Unit Attention - try again.
26379 * Pretend it took 1 sec.
26380 * NOTE: 'continue' avoids poll_delay
26382 busy_count
+= (SEC_TO_CSEC
- 1);
26385 } else if ((sensep
!= NULL
) &&
26386 (scsi_sense_key(sensep
) == KEY_NOT_READY
) &&
26387 (scsi_sense_asc(sensep
) == 0x04) &&
26388 (scsi_sense_ascq(sensep
) == 0x01)) {
26390 * Not ready -> ready - try again.
26391 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY
26392 * ...same as STATUS_BUSY
26394 poll_delay
= 100 * CSEC
; /* 1 sec. */
26395 busy_count
+= (SEC_TO_CSEC
- 1);
26398 /* BAD status - give up. */
26403 if (((curthread
->t_flag
& T_INTR_THREAD
) == 0) &&
26405 delay(drv_usectohz(poll_delay
));
26407 /* we busy wait during cpr_dump or interrupt threads */
26408 drv_usecwait(poll_delay
);
26412 pkt
->pkt_flags
= savef
;
26413 pkt
->pkt_comp
= savec
;
26414 pkt
->pkt_time
= savet
;
26416 /* return on error */
26421 * This is not a performance critical code path.
26423 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync()
26424 * issues associated with looking at DMA memory prior to
26425 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return.
26427 scsi_sync_pkt(pkt
);
26434 * Function: sd_persistent_reservation_in_read_keys
26436 * Description: This routine is the driver entry point for handling CD-ROM
26437 * multi-host persistent reservation requests (MHIOCGRP_INKEYS)
26438 * by sending the SCSI-3 PRIN commands to the device.
26439 * Processes the read keys command response by copying the
26440 * reservation key information into the user provided buffer.
26441 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented.
26443 * Arguments: un - Pointer to soft state struct for the target.
26444 * usrp - user provided pointer to multihost Persistent In Read
26445 * Keys structure (mhioc_inkeys_t)
26446 * flag - this argument is a pass through to ddi_copyxxx()
26447 * directly from the mode argument of ioctl().
26449 * Return Code: 0 - Success
26452 * errno return code from sd_send_scsi_cmd()
26454 * Context: Can sleep. Does not return until command is completed.
26458 sd_persistent_reservation_in_read_keys(struct sd_lun
*un
,
26459 mhioc_inkeys_t
*usrp
, int flag
)
26461 #ifdef _MULTI_DATAMODEL
26462 struct mhioc_key_list32 li32
;
26464 sd_prin_readkeys_t
*in
;
26465 mhioc_inkeys_t
*ptr
;
26466 mhioc_key_list_t li
;
26467 uchar_t
*data_bufp
;
26473 if ((ptr
= (mhioc_inkeys_t
*)usrp
) == NULL
) {
26476 bzero(&li
, sizeof (mhioc_key_list_t
));
26478 ssc
= sd_ssc_init(un
);
26481 * Get the listsize from user
26483 #ifdef _MULTI_DATAMODEL
26485 switch (ddi_model_convert_from(flag
& FMODELS
)) {
26486 case DDI_MODEL_ILP32
:
26487 copysz
= sizeof (struct mhioc_key_list32
);
26488 if (ddi_copyin(ptr
->li
, &li32
, copysz
, flag
)) {
26489 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26490 "sd_persistent_reservation_in_read_keys: "
26491 "failed ddi_copyin: mhioc_key_list32_t\n");
26495 li
.listsize
= li32
.listsize
;
26496 li
.list
= (mhioc_resv_key_t
*)(uintptr_t)li32
.list
;
26499 case DDI_MODEL_NONE
:
26500 copysz
= sizeof (mhioc_key_list_t
);
26501 if (ddi_copyin(ptr
->li
, &li
, copysz
, flag
)) {
26502 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26503 "sd_persistent_reservation_in_read_keys: "
26504 "failed ddi_copyin: mhioc_key_list_t\n");
26511 #else /* ! _MULTI_DATAMODEL */
26512 copysz
= sizeof (mhioc_key_list_t
);
26513 if (ddi_copyin(ptr
->li
, &li
, copysz
, flag
)) {
26514 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26515 "sd_persistent_reservation_in_read_keys: "
26516 "failed ddi_copyin: mhioc_key_list_t\n");
26522 data_len
= li
.listsize
* MHIOC_RESV_KEY_SIZE
;
26523 data_len
+= (sizeof (sd_prin_readkeys_t
) - sizeof (caddr_t
));
26524 data_bufp
= kmem_zalloc(data_len
, KM_SLEEP
);
26526 rval
= sd_send_scsi_PERSISTENT_RESERVE_IN(ssc
, SD_READ_KEYS
,
26527 data_len
, data_bufp
);
26530 sd_ssc_assessment(ssc
, SD_FMT_IGNORE_COMPROMISE
);
26532 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
26535 in
= (sd_prin_readkeys_t
*)data_bufp
;
26536 ptr
->generation
= BE_32(in
->generation
);
26537 li
.listlen
= BE_32(in
->len
) / MHIOC_RESV_KEY_SIZE
;
26540 * Return the min(listsize, listlen) keys
26542 #ifdef _MULTI_DATAMODEL
26544 switch (ddi_model_convert_from(flag
& FMODELS
)) {
26545 case DDI_MODEL_ILP32
:
26546 li32
.listlen
= li
.listlen
;
26547 if (ddi_copyout(&li32
, ptr
->li
, copysz
, flag
)) {
26548 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26549 "sd_persistent_reservation_in_read_keys: "
26550 "failed ddi_copyout: mhioc_key_list32_t\n");
26556 case DDI_MODEL_NONE
:
26557 if (ddi_copyout(&li
, ptr
->li
, copysz
, flag
)) {
26558 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26559 "sd_persistent_reservation_in_read_keys: "
26560 "failed ddi_copyout: mhioc_key_list_t\n");
26567 #else /* ! _MULTI_DATAMODEL */
26569 if (ddi_copyout(&li
, ptr
->li
, copysz
, flag
)) {
26570 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26571 "sd_persistent_reservation_in_read_keys: "
26572 "failed ddi_copyout: mhioc_key_list_t\n");
26577 #endif /* _MULTI_DATAMODEL */
26579 copysz
= min(li
.listlen
* MHIOC_RESV_KEY_SIZE
,
26580 li
.listsize
* MHIOC_RESV_KEY_SIZE
);
26581 if (ddi_copyout(&in
->keylist
, li
.list
, copysz
, flag
)) {
26582 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26583 "sd_persistent_reservation_in_read_keys: "
26584 "failed ddi_copyout: keylist\n");
26589 kmem_free(data_bufp
, data_len
);
26595 * Function: sd_persistent_reservation_in_read_resv
26597 * Description: This routine is the driver entry point for handling CD-ROM
26598 * multi-host persistent reservation requests (MHIOCGRP_INRESV)
26599 * by sending the SCSI-3 PRIN commands to the device.
26600 * Process the read persistent reservations command response by
26601 * copying the reservation information into the user provided
26602 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented.
26604 * Arguments: un - Pointer to soft state struct for the target.
26605 * usrp - user provided pointer to multihost Persistent In Read
26606 * Keys structure (mhioc_inkeys_t)
26607 * flag - this argument is a pass through to ddi_copyxxx()
26608 * directly from the mode argument of ioctl().
26610 * Return Code: 0 - Success
26613 * errno return code from sd_send_scsi_cmd()
26615 * Context: Can sleep. Does not return until command is completed.
26619 sd_persistent_reservation_in_read_resv(struct sd_lun
*un
,
26620 mhioc_inresvs_t
*usrp
, int flag
)
26622 #ifdef _MULTI_DATAMODEL
26623 struct mhioc_resv_desc_list32 resvlist32
;
26625 sd_prin_readresv_t
*in
;
26626 mhioc_inresvs_t
*ptr
;
26627 sd_readresv_desc_t
*readresv_ptr
;
26628 mhioc_resv_desc_list_t resvlist
;
26629 mhioc_resv_desc_t resvdesc
;
26630 uchar_t
*data_bufp
= NULL
;
26635 mhioc_resv_desc_t
*bufp
;
26638 if ((ptr
= usrp
) == NULL
) {
26642 ssc
= sd_ssc_init(un
);
26645 * Get the listsize from user
26647 #ifdef _MULTI_DATAMODEL
26648 switch (ddi_model_convert_from(flag
& FMODELS
)) {
26649 case DDI_MODEL_ILP32
:
26650 copysz
= sizeof (struct mhioc_resv_desc_list32
);
26651 if (ddi_copyin(ptr
->li
, &resvlist32
, copysz
, flag
)) {
26652 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26653 "sd_persistent_reservation_in_read_resv: "
26654 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26658 resvlist
.listsize
= resvlist32
.listsize
;
26659 resvlist
.list
= (mhioc_resv_desc_t
*)(uintptr_t)resvlist32
.list
;
26662 case DDI_MODEL_NONE
:
26663 copysz
= sizeof (mhioc_resv_desc_list_t
);
26664 if (ddi_copyin(ptr
->li
, &resvlist
, copysz
, flag
)) {
26665 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26666 "sd_persistent_reservation_in_read_resv: "
26667 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26673 #else /* ! _MULTI_DATAMODEL */
26674 copysz
= sizeof (mhioc_resv_desc_list_t
);
26675 if (ddi_copyin(ptr
->li
, &resvlist
, copysz
, flag
)) {
26676 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26677 "sd_persistent_reservation_in_read_resv: "
26678 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26682 #endif /* ! _MULTI_DATAMODEL */
26684 data_len
= resvlist
.listsize
* SCSI3_RESV_DESC_LEN
;
26685 data_len
+= (sizeof (sd_prin_readresv_t
) - sizeof (caddr_t
));
26686 data_bufp
= kmem_zalloc(data_len
, KM_SLEEP
);
26688 rval
= sd_send_scsi_PERSISTENT_RESERVE_IN(ssc
, SD_READ_RESV
,
26689 data_len
, data_bufp
);
26692 sd_ssc_assessment(ssc
, SD_FMT_IGNORE_COMPROMISE
);
26694 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
26697 in
= (sd_prin_readresv_t
*)data_bufp
;
26698 ptr
->generation
= BE_32(in
->generation
);
26699 resvlist
.listlen
= BE_32(in
->len
) / SCSI3_RESV_DESC_LEN
;
26702 * Return the min(listsize, listlen( keys
26704 #ifdef _MULTI_DATAMODEL
26706 switch (ddi_model_convert_from(flag
& FMODELS
)) {
26707 case DDI_MODEL_ILP32
:
26708 resvlist32
.listlen
= resvlist
.listlen
;
26709 if (ddi_copyout(&resvlist32
, ptr
->li
, copysz
, flag
)) {
26710 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26711 "sd_persistent_reservation_in_read_resv: "
26712 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26718 case DDI_MODEL_NONE
:
26719 if (ddi_copyout(&resvlist
, ptr
->li
, copysz
, flag
)) {
26720 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26721 "sd_persistent_reservation_in_read_resv: "
26722 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26729 #else /* ! _MULTI_DATAMODEL */
26731 if (ddi_copyout(&resvlist
, ptr
->li
, copysz
, flag
)) {
26732 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26733 "sd_persistent_reservation_in_read_resv: "
26734 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26739 #endif /* ! _MULTI_DATAMODEL */
26741 readresv_ptr
= (sd_readresv_desc_t
*)&in
->readresv_desc
;
26742 bufp
= resvlist
.list
;
26743 copysz
= sizeof (mhioc_resv_desc_t
);
26744 for (i
= 0; i
< min(resvlist
.listlen
, resvlist
.listsize
);
26745 i
++, readresv_ptr
++, bufp
++) {
26747 bcopy(&readresv_ptr
->resvkey
, &resvdesc
.key
,
26748 MHIOC_RESV_KEY_SIZE
);
26749 resvdesc
.type
= readresv_ptr
->type
;
26750 resvdesc
.scope
= readresv_ptr
->scope
;
26751 resvdesc
.scope_specific_addr
=
26752 BE_32(readresv_ptr
->scope_specific_addr
);
26754 if (ddi_copyout(&resvdesc
, bufp
, copysz
, flag
)) {
26755 SD_ERROR(SD_LOG_IOCTL_MHD
, un
,
26756 "sd_persistent_reservation_in_read_resv: "
26757 "failed ddi_copyout: resvlist\n");
26764 /* only if data_bufp is allocated, we need to free it */
26766 kmem_free(data_bufp
, data_len
);
26773 * Function: sr_change_blkmode()
26775 * Description: This routine is the driver entry point for handling CD-ROM
26776 * block mode ioctl requests. Support for returning and changing
26777 * the current block size in use by the device is implemented. The
26778 * LBA size is changed via a MODE SELECT Block Descriptor.
26780 * This routine issues a mode sense with an allocation length of
26781 * 12 bytes for the mode page header and a single block descriptor.
26783 * Arguments: dev - the device 'dev_t'
26784 * cmd - the request type; one of CDROMGBLKMODE (get) or
26785 * CDROMSBLKMODE (set)
26786 * data - current block size or requested block size
26787 * flag - this argument is a pass through to ddi_copyxxx() directly
26788 * from the mode argument of ioctl().
26790 * Return Code: the code returned by sd_send_scsi_cmd()
26791 * EINVAL if invalid arguments are provided
26792 * EFAULT if ddi_copyxxx() fails
26793 * ENXIO if fail ddi_get_soft_state
26794 * EIO if invalid mode sense block descriptor length
26799 sr_change_blkmode(dev_t dev
, int cmd
, intptr_t data
, int flag
)
26801 struct sd_lun
*un
= NULL
;
26802 struct mode_header
*sense_mhp
, *select_mhp
;
26803 struct block_descriptor
*sense_desc
, *select_desc
;
26806 uchar_t
*sense
= NULL
;
26807 uchar_t
*select
= NULL
;
26810 ASSERT((cmd
== CDROMGBLKMODE
) || (cmd
== CDROMSBLKMODE
));
26812 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
26817 * The block length is changed via the Mode Select block descriptor, the
26818 * "Read/Write Error Recovery" mode page (0x1) contents are not actually
26819 * required as part of this routine. Therefore the mode sense allocation
26820 * length is specified to be the length of a mode page header and a
26821 * block descriptor.
26823 sense
= kmem_zalloc(BUFLEN_CHG_BLK_MODE
, KM_SLEEP
);
26825 ssc
= sd_ssc_init(un
);
26826 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, sense
,
26827 BUFLEN_CHG_BLK_MODE
, MODEPAGE_ERR_RECOV
, SD_PATH_STANDARD
);
26830 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
26831 "sr_change_blkmode: Mode Sense Failed\n");
26832 kmem_free(sense
, BUFLEN_CHG_BLK_MODE
);
26836 /* Check the block descriptor len to handle only 1 block descriptor */
26837 sense_mhp
= (struct mode_header
*)sense
;
26838 if ((sense_mhp
->bdesc_length
== 0) ||
26839 (sense_mhp
->bdesc_length
> MODE_BLK_DESC_LENGTH
)) {
26840 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
26841 "sr_change_blkmode: Mode Sense returned invalid block"
26842 " descriptor length\n");
26843 kmem_free(sense
, BUFLEN_CHG_BLK_MODE
);
26846 sense_desc
= (struct block_descriptor
*)(sense
+ MODE_HEADER_LENGTH
);
26847 current_bsize
= ((sense_desc
->blksize_hi
<< 16) |
26848 (sense_desc
->blksize_mid
<< 8) | sense_desc
->blksize_lo
);
26850 /* Process command */
26852 case CDROMGBLKMODE
:
26853 /* Return the block size obtained during the mode sense */
26854 if (ddi_copyout(¤t_bsize
, (void *)data
,
26855 sizeof (int), flag
) != 0)
26858 case CDROMSBLKMODE
:
26859 /* Validate the requested block size */
26861 case CDROM_BLK_512
:
26862 case CDROM_BLK_1024
:
26863 case CDROM_BLK_2048
:
26864 case CDROM_BLK_2056
:
26865 case CDROM_BLK_2336
:
26866 case CDROM_BLK_2340
:
26867 case CDROM_BLK_2352
:
26868 case CDROM_BLK_2368
:
26869 case CDROM_BLK_2448
:
26870 case CDROM_BLK_2646
:
26871 case CDROM_BLK_2647
:
26874 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
26875 "sr_change_blkmode: "
26876 "Block Size '%ld' Not Supported\n", data
);
26877 kmem_free(sense
, BUFLEN_CHG_BLK_MODE
);
26882 * The current block size matches the requested block size so
26883 * there is no need to send the mode select to change the size
26885 if (current_bsize
== data
) {
26889 /* Build the select data for the requested block size */
26890 select
= kmem_zalloc(BUFLEN_CHG_BLK_MODE
, KM_SLEEP
);
26891 select_mhp
= (struct mode_header
*)select
;
26893 (struct block_descriptor
*)(select
+ MODE_HEADER_LENGTH
);
26895 * The LBA size is changed via the block descriptor, so the
26896 * descriptor is built according to the user data
26898 select_mhp
->bdesc_length
= MODE_BLK_DESC_LENGTH
;
26899 select_desc
->blksize_hi
= (char)(((data
) & 0x00ff0000) >> 16);
26900 select_desc
->blksize_mid
= (char)(((data
) & 0x0000ff00) >> 8);
26901 select_desc
->blksize_lo
= (char)((data
) & 0x000000ff);
26903 /* Send the mode select for the requested block size */
26904 ssc
= sd_ssc_init(un
);
26905 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
,
26906 select
, BUFLEN_CHG_BLK_MODE
, SD_DONTSAVE_PAGE
,
26910 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
26911 "sr_change_blkmode: Mode Select Failed\n");
26913 * The mode select failed for the requested block size,
26914 * so reset the data for the original block size and
26915 * send it to the target. The error is indicated by the
26916 * return value for the failed mode select.
26918 select_desc
->blksize_hi
= sense_desc
->blksize_hi
;
26919 select_desc
->blksize_mid
= sense_desc
->blksize_mid
;
26920 select_desc
->blksize_lo
= sense_desc
->blksize_lo
;
26921 ssc
= sd_ssc_init(un
);
26922 (void) sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
,
26923 select
, BUFLEN_CHG_BLK_MODE
, SD_DONTSAVE_PAGE
,
26927 ASSERT(!mutex_owned(SD_MUTEX(un
)));
26928 mutex_enter(SD_MUTEX(un
));
26929 sd_update_block_info(un
, (uint32_t)data
, 0);
26930 mutex_exit(SD_MUTEX(un
));
26934 /* should not reach here, but check anyway */
26935 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
26936 "sr_change_blkmode: Command '%x' Not Supported\n", cmd
);
26942 kmem_free(select
, BUFLEN_CHG_BLK_MODE
);
26945 kmem_free(sense
, BUFLEN_CHG_BLK_MODE
);
26952 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines
26953 * implement driver support for getting and setting the CD speed. The command
26954 * set used will be based on the device type. If the device has not been
26955 * identified as MMC the Toshiba vendor specific mode page will be used. If
26956 * the device is MMC but does not support the Real Time Streaming feature
26957 * the SET CD SPEED command will be used to set speed and mode page 0x2A will
26958 * be used to read the speed.
26962 * Function: sr_change_speed()
26964 * Description: This routine is the driver entry point for handling CD-ROM
26965 * drive speed ioctl requests for devices supporting the Toshiba
26966 * vendor specific drive speed mode page. Support for returning
26967 * and changing the current drive speed in use by the device is
26970 * Arguments: dev - the device 'dev_t'
26971 * cmd - the request type; one of CDROMGDRVSPEED (get) or
26972 * CDROMSDRVSPEED (set)
26973 * data - current drive speed or requested drive speed
26974 * flag - this argument is a pass through to ddi_copyxxx() directly
26975 * from the mode argument of ioctl().
26977 * Return Code: the code returned by sd_send_scsi_cmd()
26978 * EINVAL if invalid arguments are provided
26979 * EFAULT if ddi_copyxxx() fails
26980 * ENXIO if fail ddi_get_soft_state
26981 * EIO if invalid mode sense block descriptor length
26985 sr_change_speed(dev_t dev
, int cmd
, intptr_t data
, int flag
)
26987 struct sd_lun
*un
= NULL
;
26988 struct mode_header
*sense_mhp
, *select_mhp
;
26989 struct mode_speed
*sense_page
, *select_page
;
26993 uchar_t
*sense
= NULL
;
26994 uchar_t
*select
= NULL
;
26997 ASSERT((cmd
== CDROMGDRVSPEED
) || (cmd
== CDROMSDRVSPEED
));
26998 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
27003 * Note: The drive speed is being modified here according to a Toshiba
27004 * vendor specific mode page (0x31).
27006 sense
= kmem_zalloc(BUFLEN_MODE_CDROM_SPEED
, KM_SLEEP
);
27008 ssc
= sd_ssc_init(un
);
27009 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, sense
,
27010 BUFLEN_MODE_CDROM_SPEED
, CDROM_MODE_SPEED
,
27014 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27015 "sr_change_speed: Mode Sense Failed\n");
27016 kmem_free(sense
, BUFLEN_MODE_CDROM_SPEED
);
27019 sense_mhp
= (struct mode_header
*)sense
;
27021 /* Check the block descriptor len to handle only 1 block descriptor */
27022 bd_len
= sense_mhp
->bdesc_length
;
27023 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
27024 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27025 "sr_change_speed: Mode Sense returned invalid block "
27026 "descriptor length\n");
27027 kmem_free(sense
, BUFLEN_MODE_CDROM_SPEED
);
27031 sense_page
= (struct mode_speed
*)
27032 (sense
+ MODE_HEADER_LENGTH
+ sense_mhp
->bdesc_length
);
27033 current_speed
= sense_page
->speed
;
27035 /* Process command */
27037 case CDROMGDRVSPEED
:
27038 /* Return the drive speed obtained during the mode sense */
27039 if (current_speed
== 0x2) {
27040 current_speed
= CDROM_TWELVE_SPEED
;
27042 if (ddi_copyout(¤t_speed
, (void *)data
,
27043 sizeof (int), flag
) != 0) {
27047 case CDROMSDRVSPEED
:
27048 /* Validate the requested drive speed */
27049 switch ((uchar_t
)data
) {
27050 case CDROM_TWELVE_SPEED
:
27053 case CDROM_NORMAL_SPEED
:
27054 case CDROM_DOUBLE_SPEED
:
27055 case CDROM_QUAD_SPEED
:
27056 case CDROM_MAXIMUM_SPEED
:
27059 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27060 "sr_change_speed: "
27061 "Drive Speed '%d' Not Supported\n", (uchar_t
)data
);
27062 kmem_free(sense
, BUFLEN_MODE_CDROM_SPEED
);
27067 * The current drive speed matches the requested drive speed so
27068 * there is no need to send the mode select to change the speed
27070 if (current_speed
== data
) {
27074 /* Build the select data for the requested drive speed */
27075 select
= kmem_zalloc(BUFLEN_MODE_CDROM_SPEED
, KM_SLEEP
);
27076 select_mhp
= (struct mode_header
*)select
;
27077 select_mhp
->bdesc_length
= 0;
27079 (struct mode_speed
*)(select
+ MODE_HEADER_LENGTH
);
27081 (struct mode_speed
*)(select
+ MODE_HEADER_LENGTH
);
27082 select_page
->mode_page
.code
= CDROM_MODE_SPEED
;
27083 select_page
->mode_page
.length
= 2;
27084 select_page
->speed
= (uchar_t
)data
;
27086 /* Send the mode select for the requested block size */
27087 ssc
= sd_ssc_init(un
);
27088 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
, select
,
27089 MODEPAGE_CDROM_SPEED_LEN
+ MODE_HEADER_LENGTH
,
27090 SD_DONTSAVE_PAGE
, SD_PATH_STANDARD
);
27094 * The mode select failed for the requested drive speed,
27095 * so reset the data for the original drive speed and
27096 * send it to the target. The error is indicated by the
27097 * return value for the failed mode select.
27099 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27100 "sr_drive_speed: Mode Select Failed\n");
27101 select_page
->speed
= sense_page
->speed
;
27102 ssc
= sd_ssc_init(un
);
27103 (void) sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
, select
,
27104 MODEPAGE_CDROM_SPEED_LEN
+ MODE_HEADER_LENGTH
,
27105 SD_DONTSAVE_PAGE
, SD_PATH_STANDARD
);
27110 /* should not reach here, but check anyway */
27111 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27112 "sr_change_speed: Command '%x' Not Supported\n", cmd
);
27118 kmem_free(select
, BUFLEN_MODE_CDROM_SPEED
);
27121 kmem_free(sense
, BUFLEN_MODE_CDROM_SPEED
);
27129 * Function: sr_atapi_change_speed()
27131 * Description: This routine is the driver entry point for handling CD-ROM
27132 * drive speed ioctl requests for MMC devices that do not support
27133 * the Real Time Streaming feature (0x107).
27135 * Note: This routine will use the SET SPEED command which may not
27136 * be supported by all devices.
27138 * Arguments: dev- the device 'dev_t'
27139 * cmd- the request type; one of CDROMGDRVSPEED (get) or
27140 * CDROMSDRVSPEED (set)
27141 * data- current drive speed or requested drive speed
27142 * flag- this argument is a pass through to ddi_copyxxx() directly
27143 * from the mode argument of ioctl().
27145 * Return Code: the code returned by sd_send_scsi_cmd()
27146 * EINVAL if invalid arguments are provided
27147 * EFAULT if ddi_copyxxx() fails
27148 * ENXIO if fail ddi_get_soft_state
27149 * EIO if invalid mode sense block descriptor length
27153 sr_atapi_change_speed(dev_t dev
, int cmd
, intptr_t data
, int flag
)
27156 struct uscsi_cmd
*com
= NULL
;
27157 struct mode_header_grp2
*sense_mhp
;
27158 uchar_t
*sense_page
;
27159 uchar_t
*sense
= NULL
;
27160 char cdb
[CDB_GROUP5
];
27162 int current_speed
= 0;
27167 ASSERT((cmd
== CDROMGDRVSPEED
) || (cmd
== CDROMSDRVSPEED
));
27169 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
27173 sense
= kmem_zalloc(BUFLEN_MODE_CDROM_CAP
, KM_SLEEP
);
27175 ssc
= sd_ssc_init(un
);
27176 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, sense
,
27177 BUFLEN_MODE_CDROM_CAP
, MODEPAGE_CDROM_CAP
,
27181 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27182 "sr_atapi_change_speed: Mode Sense Failed\n");
27183 kmem_free(sense
, BUFLEN_MODE_CDROM_CAP
);
27187 /* Check the block descriptor len to handle only 1 block descriptor */
27188 sense_mhp
= (struct mode_header_grp2
*)sense
;
27189 bd_len
= (sense_mhp
->bdesc_length_hi
<< 8) | sense_mhp
->bdesc_length_lo
;
27190 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
27191 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27192 "sr_atapi_change_speed: Mode Sense returned invalid "
27193 "block descriptor length\n");
27194 kmem_free(sense
, BUFLEN_MODE_CDROM_CAP
);
27198 /* Calculate the current and maximum drive speeds */
27199 sense_page
= (uchar_t
*)(sense
+ MODE_HEADER_LENGTH_GRP2
+ bd_len
);
27200 current_speed
= (sense_page
[14] << 8) | sense_page
[15];
27201 max_speed
= (sense_page
[8] << 8) | sense_page
[9];
27203 /* Process the command */
27205 case CDROMGDRVSPEED
:
27206 current_speed
/= SD_SPEED_1X
;
27207 if (ddi_copyout(¤t_speed
, (void *)data
,
27208 sizeof (int), flag
) != 0)
27211 case CDROMSDRVSPEED
:
27212 /* Convert the speed code to KB/sec */
27213 switch ((uchar_t
)data
) {
27214 case CDROM_NORMAL_SPEED
:
27215 current_speed
= SD_SPEED_1X
;
27217 case CDROM_DOUBLE_SPEED
:
27218 current_speed
= 2 * SD_SPEED_1X
;
27220 case CDROM_QUAD_SPEED
:
27221 current_speed
= 4 * SD_SPEED_1X
;
27223 case CDROM_TWELVE_SPEED
:
27224 current_speed
= 12 * SD_SPEED_1X
;
27226 case CDROM_MAXIMUM_SPEED
:
27227 current_speed
= 0xffff;
27230 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27231 "sr_atapi_change_speed: invalid drive speed %d\n",
27233 kmem_free(sense
, BUFLEN_MODE_CDROM_CAP
);
27237 /* Check the request against the drive's max speed. */
27238 if (current_speed
!= 0xffff) {
27239 if (current_speed
> max_speed
) {
27240 kmem_free(sense
, BUFLEN_MODE_CDROM_CAP
);
27246 * Build and send the SET SPEED command
27248 * Note: The SET SPEED (0xBB) command used in this routine is
27249 * obsolete per the SCSI MMC spec but still supported in the
27250 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27251 * therefore the command is still implemented in this routine.
27253 bzero(cdb
, sizeof (cdb
));
27254 cdb
[0] = (char)SCMD_SET_CDROM_SPEED
;
27255 cdb
[2] = (uchar_t
)(current_speed
>> 8);
27256 cdb
[3] = (uchar_t
)current_speed
;
27257 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27258 com
->uscsi_cdb
= (caddr_t
)cdb
;
27259 com
->uscsi_cdblen
= CDB_GROUP5
;
27260 com
->uscsi_bufaddr
= NULL
;
27261 com
->uscsi_buflen
= 0;
27262 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
;
27263 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, 0, SD_PATH_STANDARD
);
27266 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27267 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd
);
27272 kmem_free(sense
, BUFLEN_MODE_CDROM_CAP
);
27275 kmem_free(com
, sizeof (*com
));
27282 * Function: sr_pause_resume()
27284 * Description: This routine is the driver entry point for handling CD-ROM
27285 * pause/resume ioctl requests. This only affects the audio play
27288 * Arguments: dev - the device 'dev_t'
27289 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used
27290 * for setting the resume bit of the cdb.
27292 * Return Code: the code returned by sd_send_scsi_cmd()
27293 * EINVAL if invalid mode specified
27298 sr_pause_resume(dev_t dev
, int cmd
)
27301 struct uscsi_cmd
*com
;
27302 char cdb
[CDB_GROUP1
];
27305 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
27309 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27310 bzero(cdb
, CDB_GROUP1
);
27311 cdb
[0] = SCMD_PAUSE_RESUME
;
27320 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
, "sr_pause_resume:"
27321 " Command '%x' Not Supported\n", cmd
);
27326 com
->uscsi_cdb
= cdb
;
27327 com
->uscsi_cdblen
= CDB_GROUP1
;
27328 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
;
27330 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27334 kmem_free(com
, sizeof (*com
));
27340 * Function: sr_play_msf()
27342 * Description: This routine is the driver entry point for handling CD-ROM
27343 * ioctl requests to output the audio signals at the specified
27344 * starting address and continue the audio play until the specified
27345 * ending address (CDROMPLAYMSF) The address is in Minute Second
27346 * Frame (MSF) format.
27348 * Arguments: dev - the device 'dev_t'
27349 * data - pointer to user provided audio msf structure,
27350 * specifying start/end addresses.
27351 * flag - this argument is a pass through to ddi_copyxxx()
27352 * directly from the mode argument of ioctl().
27354 * Return Code: the code returned by sd_send_scsi_cmd()
27355 * EFAULT if ddi_copyxxx() fails
27356 * ENXIO if fail ddi_get_soft_state
27357 * EINVAL if data pointer is NULL
27361 sr_play_msf(dev_t dev
, caddr_t data
, int flag
)
27364 struct uscsi_cmd
*com
;
27365 struct cdrom_msf msf_struct
;
27366 struct cdrom_msf
*msf
= &msf_struct
;
27367 char cdb
[CDB_GROUP1
];
27370 if (data
== NULL
) {
27374 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
27378 if (ddi_copyin(data
, msf
, sizeof (struct cdrom_msf
), flag
)) {
27382 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27383 bzero(cdb
, CDB_GROUP1
);
27384 cdb
[0] = SCMD_PLAYAUDIO_MSF
;
27385 if (un
->un_f_cfg_playmsf_bcd
== TRUE
) {
27386 cdb
[3] = BYTE_TO_BCD(msf
->cdmsf_min0
);
27387 cdb
[4] = BYTE_TO_BCD(msf
->cdmsf_sec0
);
27388 cdb
[5] = BYTE_TO_BCD(msf
->cdmsf_frame0
);
27389 cdb
[6] = BYTE_TO_BCD(msf
->cdmsf_min1
);
27390 cdb
[7] = BYTE_TO_BCD(msf
->cdmsf_sec1
);
27391 cdb
[8] = BYTE_TO_BCD(msf
->cdmsf_frame1
);
27393 cdb
[3] = msf
->cdmsf_min0
;
27394 cdb
[4] = msf
->cdmsf_sec0
;
27395 cdb
[5] = msf
->cdmsf_frame0
;
27396 cdb
[6] = msf
->cdmsf_min1
;
27397 cdb
[7] = msf
->cdmsf_sec1
;
27398 cdb
[8] = msf
->cdmsf_frame1
;
27400 com
->uscsi_cdb
= cdb
;
27401 com
->uscsi_cdblen
= CDB_GROUP1
;
27402 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
;
27403 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27405 kmem_free(com
, sizeof (*com
));
27411 * Function: sr_play_trkind()
27413 * Description: This routine is the driver entry point for handling CD-ROM
27414 * ioctl requests to output the audio signals at the specified
27415 * starting address and continue the audio play until the specified
27416 * ending address (CDROMPLAYTRKIND). The address is in Track Index
27419 * Arguments: dev - the device 'dev_t'
27420 * data - pointer to user provided audio track/index structure,
27421 * specifying start/end addresses.
27422 * flag - this argument is a pass through to ddi_copyxxx()
27423 * directly from the mode argument of ioctl().
27425 * Return Code: the code returned by sd_send_scsi_cmd()
27426 * EFAULT if ddi_copyxxx() fails
27427 * ENXIO if fail ddi_get_soft_state
27428 * EINVAL if data pointer is NULL
27432 sr_play_trkind(dev_t dev
, caddr_t data
, int flag
)
27434 struct cdrom_ti ti_struct
;
27435 struct cdrom_ti
*ti
= &ti_struct
;
27436 struct uscsi_cmd
*com
= NULL
;
27437 char cdb
[CDB_GROUP1
];
27440 if (data
== NULL
) {
27444 if (ddi_copyin(data
, ti
, sizeof (struct cdrom_ti
), flag
)) {
27448 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27449 bzero(cdb
, CDB_GROUP1
);
27450 cdb
[0] = SCMD_PLAYAUDIO_TI
;
27451 cdb
[4] = ti
->cdti_trk0
;
27452 cdb
[5] = ti
->cdti_ind0
;
27453 cdb
[7] = ti
->cdti_trk1
;
27454 cdb
[8] = ti
->cdti_ind1
;
27455 com
->uscsi_cdb
= cdb
;
27456 com
->uscsi_cdblen
= CDB_GROUP1
;
27457 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
;
27458 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27460 kmem_free(com
, sizeof (*com
));
27466 * Function: sr_read_all_subcodes()
27468 * Description: This routine is the driver entry point for handling CD-ROM
27469 * ioctl requests to return raw subcode data while the target is
27470 * playing audio (CDROMSUBCODE).
27472 * Arguments: dev - the device 'dev_t'
27473 * data - pointer to user provided cdrom subcode structure,
27474 * specifying the transfer length and address.
27475 * flag - this argument is a pass through to ddi_copyxxx()
27476 * directly from the mode argument of ioctl().
27478 * Return Code: the code returned by sd_send_scsi_cmd()
27479 * EFAULT if ddi_copyxxx() fails
27480 * ENXIO if fail ddi_get_soft_state
27481 * EINVAL if data pointer is NULL
27485 sr_read_all_subcodes(dev_t dev
, caddr_t data
, int flag
)
27487 struct sd_lun
*un
= NULL
;
27488 struct uscsi_cmd
*com
= NULL
;
27489 struct cdrom_subcode
*subcode
= NULL
;
27492 char cdb
[CDB_GROUP5
];
27494 #ifdef _MULTI_DATAMODEL
27495 /* To support ILP32 applications in an LP64 world */
27496 struct cdrom_subcode32 cdrom_subcode32
;
27497 struct cdrom_subcode32
*cdsc32
= &cdrom_subcode32
;
27499 if (data
== NULL
) {
27503 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
27507 subcode
= kmem_zalloc(sizeof (struct cdrom_subcode
), KM_SLEEP
);
27509 #ifdef _MULTI_DATAMODEL
27510 switch (ddi_model_convert_from(flag
& FMODELS
)) {
27511 case DDI_MODEL_ILP32
:
27512 if (ddi_copyin(data
, cdsc32
, sizeof (*cdsc32
), flag
)) {
27513 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27514 "sr_read_all_subcodes: ddi_copyin Failed\n");
27515 kmem_free(subcode
, sizeof (struct cdrom_subcode
));
27518 /* Convert the ILP32 uscsi data from the application to LP64 */
27519 cdrom_subcode32tocdrom_subcode(cdsc32
, subcode
);
27521 case DDI_MODEL_NONE
:
27522 if (ddi_copyin(data
, subcode
,
27523 sizeof (struct cdrom_subcode
), flag
)) {
27524 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27525 "sr_read_all_subcodes: ddi_copyin Failed\n");
27526 kmem_free(subcode
, sizeof (struct cdrom_subcode
));
27531 #else /* ! _MULTI_DATAMODEL */
27532 if (ddi_copyin(data
, subcode
, sizeof (struct cdrom_subcode
), flag
)) {
27533 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27534 "sr_read_all_subcodes: ddi_copyin Failed\n");
27535 kmem_free(subcode
, sizeof (struct cdrom_subcode
));
27538 #endif /* _MULTI_DATAMODEL */
27541 * Since MMC-2 expects max 3 bytes for length, check if the
27542 * length input is greater than 3 bytes
27544 if ((subcode
->cdsc_length
& 0xFF000000) != 0) {
27545 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
27546 "sr_read_all_subcodes: "
27547 "cdrom transfer length too large: %d (limit %d)\n",
27548 subcode
->cdsc_length
, 0xFFFFFF);
27549 kmem_free(subcode
, sizeof (struct cdrom_subcode
));
27553 buflen
= CDROM_BLK_SUBCODE
* subcode
->cdsc_length
;
27554 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27555 bzero(cdb
, CDB_GROUP5
);
27557 if (un
->un_f_mmc_cap
== TRUE
) {
27558 cdb
[0] = (char)SCMD_READ_CD
;
27559 cdb
[2] = (char)0xff;
27560 cdb
[3] = (char)0xff;
27561 cdb
[4] = (char)0xff;
27562 cdb
[5] = (char)0xff;
27563 cdb
[6] = (((subcode
->cdsc_length
) & 0x00ff0000) >> 16);
27564 cdb
[7] = (((subcode
->cdsc_length
) & 0x0000ff00) >> 8);
27565 cdb
[8] = ((subcode
->cdsc_length
) & 0x000000ff);
27569 * Note: A vendor specific command (0xDF) is being used her to
27570 * request a read of all subcodes.
27572 cdb
[0] = (char)SCMD_READ_ALL_SUBCODES
;
27573 cdb
[6] = (((subcode
->cdsc_length
) & 0xff000000) >> 24);
27574 cdb
[7] = (((subcode
->cdsc_length
) & 0x00ff0000) >> 16);
27575 cdb
[8] = (((subcode
->cdsc_length
) & 0x0000ff00) >> 8);
27576 cdb
[9] = ((subcode
->cdsc_length
) & 0x000000ff);
27578 com
->uscsi_cdb
= cdb
;
27579 com
->uscsi_cdblen
= CDB_GROUP5
;
27580 com
->uscsi_bufaddr
= (caddr_t
)subcode
->cdsc_addr
;
27581 com
->uscsi_buflen
= buflen
;
27582 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
27583 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_USERSPACE
,
27585 kmem_free(subcode
, sizeof (struct cdrom_subcode
));
27586 kmem_free(com
, sizeof (*com
));
27592 * Function: sr_read_subchannel()
27594 * Description: This routine is the driver entry point for handling CD-ROM
27595 * ioctl requests to return the Q sub-channel data of the CD
27596 * current position block. (CDROMSUBCHNL) The data includes the
27597 * track number, index number, absolute CD-ROM address (LBA or MSF
27598 * format per the user) , track relative CD-ROM address (LBA or MSF
27599 * format per the user), control data and audio status.
27601 * Arguments: dev - the device 'dev_t'
27602 * data - pointer to user provided cdrom sub-channel structure
27603 * flag - this argument is a pass through to ddi_copyxxx()
27604 * directly from the mode argument of ioctl().
27606 * Return Code: the code returned by sd_send_scsi_cmd()
27607 * EFAULT if ddi_copyxxx() fails
27608 * ENXIO if fail ddi_get_soft_state
27609 * EINVAL if data pointer is NULL
27613 sr_read_subchannel(dev_t dev
, caddr_t data
, int flag
)
27616 struct uscsi_cmd
*com
;
27617 struct cdrom_subchnl subchanel
;
27618 struct cdrom_subchnl
*subchnl
= &subchanel
;
27619 char cdb
[CDB_GROUP1
];
27623 if (data
== NULL
) {
27627 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
27628 (un
->un_state
== SD_STATE_OFFLINE
)) {
27632 if (ddi_copyin(data
, subchnl
, sizeof (struct cdrom_subchnl
), flag
)) {
27636 buffer
= kmem_zalloc((size_t)16, KM_SLEEP
);
27637 bzero(cdb
, CDB_GROUP1
);
27638 cdb
[0] = SCMD_READ_SUBCHANNEL
;
27639 /* Set the MSF bit based on the user requested address format */
27640 cdb
[1] = (subchnl
->cdsc_format
& CDROM_LBA
) ? 0 : 0x02;
27642 * Set the Q bit in byte 2 to indicate that Q sub-channel data be
27647 * Set byte 3 to specify the return data format. A value of 0x01
27648 * indicates that the CD-ROM current position should be returned.
27652 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27653 com
->uscsi_cdb
= cdb
;
27654 com
->uscsi_cdblen
= CDB_GROUP1
;
27655 com
->uscsi_bufaddr
= buffer
;
27656 com
->uscsi_buflen
= 16;
27657 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
27658 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27661 kmem_free(buffer
, 16);
27662 kmem_free(com
, sizeof (*com
));
27666 /* Process the returned Q sub-channel data */
27667 subchnl
->cdsc_audiostatus
= buffer
[1];
27668 subchnl
->cdsc_adr
= (buffer
[5] & 0xF0);
27669 subchnl
->cdsc_ctrl
= (buffer
[5] & 0x0F);
27670 subchnl
->cdsc_trk
= buffer
[6];
27671 subchnl
->cdsc_ind
= buffer
[7];
27672 if (subchnl
->cdsc_format
& CDROM_LBA
) {
27673 subchnl
->cdsc_absaddr
.lba
=
27674 ((uchar_t
)buffer
[8] << 24) + ((uchar_t
)buffer
[9] << 16) +
27675 ((uchar_t
)buffer
[10] << 8) + ((uchar_t
)buffer
[11]);
27676 subchnl
->cdsc_reladdr
.lba
=
27677 ((uchar_t
)buffer
[12] << 24) + ((uchar_t
)buffer
[13] << 16) +
27678 ((uchar_t
)buffer
[14] << 8) + ((uchar_t
)buffer
[15]);
27679 } else if (un
->un_f_cfg_readsub_bcd
== TRUE
) {
27680 subchnl
->cdsc_absaddr
.msf
.minute
= BCD_TO_BYTE(buffer
[9]);
27681 subchnl
->cdsc_absaddr
.msf
.second
= BCD_TO_BYTE(buffer
[10]);
27682 subchnl
->cdsc_absaddr
.msf
.frame
= BCD_TO_BYTE(buffer
[11]);
27683 subchnl
->cdsc_reladdr
.msf
.minute
= BCD_TO_BYTE(buffer
[13]);
27684 subchnl
->cdsc_reladdr
.msf
.second
= BCD_TO_BYTE(buffer
[14]);
27685 subchnl
->cdsc_reladdr
.msf
.frame
= BCD_TO_BYTE(buffer
[15]);
27687 subchnl
->cdsc_absaddr
.msf
.minute
= buffer
[9];
27688 subchnl
->cdsc_absaddr
.msf
.second
= buffer
[10];
27689 subchnl
->cdsc_absaddr
.msf
.frame
= buffer
[11];
27690 subchnl
->cdsc_reladdr
.msf
.minute
= buffer
[13];
27691 subchnl
->cdsc_reladdr
.msf
.second
= buffer
[14];
27692 subchnl
->cdsc_reladdr
.msf
.frame
= buffer
[15];
27694 kmem_free(buffer
, 16);
27695 kmem_free(com
, sizeof (*com
));
27696 if (ddi_copyout(subchnl
, data
, sizeof (struct cdrom_subchnl
), flag
)
27705 * Function: sr_read_tocentry()
27707 * Description: This routine is the driver entry point for handling CD-ROM
27708 * ioctl requests to read from the Table of Contents (TOC)
27709 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL
27710 * fields, the starting address (LBA or MSF format per the user)
27711 * and the data mode if the user specified track is a data track.
27713 * Note: The READ HEADER (0x44) command used in this routine is
27714 * obsolete per the SCSI MMC spec but still supported in the
27715 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27716 * therefore the command is still implemented in this routine.
27718 * Arguments: dev - the device 'dev_t'
27719 * data - pointer to user provided toc entry structure,
27720 * specifying the track # and the address format
27722 * flag - this argument is a pass through to ddi_copyxxx()
27723 * directly from the mode argument of ioctl().
27725 * Return Code: the code returned by sd_send_scsi_cmd()
27726 * EFAULT if ddi_copyxxx() fails
27727 * ENXIO if fail ddi_get_soft_state
27728 * EINVAL if data pointer is NULL
27732 sr_read_tocentry(dev_t dev
, caddr_t data
, int flag
)
27734 struct sd_lun
*un
= NULL
;
27735 struct uscsi_cmd
*com
;
27736 struct cdrom_tocentry toc_entry
;
27737 struct cdrom_tocentry
*entry
= &toc_entry
;
27740 char cdb
[CDB_GROUP1
];
27742 if (data
== NULL
) {
27746 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
27747 (un
->un_state
== SD_STATE_OFFLINE
)) {
27751 if (ddi_copyin(data
, entry
, sizeof (struct cdrom_tocentry
), flag
)) {
27755 /* Validate the requested track and address format */
27756 if (!(entry
->cdte_format
& (CDROM_LBA
| CDROM_MSF
))) {
27760 if (entry
->cdte_track
== 0) {
27764 buffer
= kmem_zalloc((size_t)12, KM_SLEEP
);
27765 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27766 bzero(cdb
, CDB_GROUP1
);
27768 cdb
[0] = SCMD_READ_TOC
;
27769 /* Set the MSF bit based on the user requested address format */
27770 cdb
[1] = ((entry
->cdte_format
& CDROM_LBA
) ? 0 : 2);
27771 if (un
->un_f_cfg_read_toc_trk_bcd
== TRUE
) {
27772 cdb
[6] = BYTE_TO_BCD(entry
->cdte_track
);
27774 cdb
[6] = entry
->cdte_track
;
27778 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
27779 * (4 byte TOC response header + 8 byte track descriptor)
27782 com
->uscsi_cdb
= cdb
;
27783 com
->uscsi_cdblen
= CDB_GROUP1
;
27784 com
->uscsi_bufaddr
= buffer
;
27785 com
->uscsi_buflen
= 0x0C;
27786 com
->uscsi_flags
= (USCSI_DIAGNOSE
| USCSI_SILENT
| USCSI_READ
);
27787 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27790 kmem_free(buffer
, 12);
27791 kmem_free(com
, sizeof (*com
));
27795 /* Process the toc entry */
27796 entry
->cdte_adr
= (buffer
[5] & 0xF0) >> 4;
27797 entry
->cdte_ctrl
= (buffer
[5] & 0x0F);
27798 if (entry
->cdte_format
& CDROM_LBA
) {
27799 entry
->cdte_addr
.lba
=
27800 ((uchar_t
)buffer
[8] << 24) + ((uchar_t
)buffer
[9] << 16) +
27801 ((uchar_t
)buffer
[10] << 8) + ((uchar_t
)buffer
[11]);
27802 } else if (un
->un_f_cfg_read_toc_addr_bcd
== TRUE
) {
27803 entry
->cdte_addr
.msf
.minute
= BCD_TO_BYTE(buffer
[9]);
27804 entry
->cdte_addr
.msf
.second
= BCD_TO_BYTE(buffer
[10]);
27805 entry
->cdte_addr
.msf
.frame
= BCD_TO_BYTE(buffer
[11]);
27807 * Send a READ TOC command using the LBA address format to get
27808 * the LBA for the track requested so it can be used in the
27809 * READ HEADER request
27811 * Note: The MSF bit of the READ HEADER command specifies the
27812 * output format. The block address specified in that command
27813 * must be in LBA format.
27816 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27819 kmem_free(buffer
, 12);
27820 kmem_free(com
, sizeof (*com
));
27824 entry
->cdte_addr
.msf
.minute
= buffer
[9];
27825 entry
->cdte_addr
.msf
.second
= buffer
[10];
27826 entry
->cdte_addr
.msf
.frame
= buffer
[11];
27828 * Send a READ TOC command using the LBA address format to get
27829 * the LBA for the track requested so it can be used in the
27830 * READ HEADER request
27832 * Note: The MSF bit of the READ HEADER command specifies the
27833 * output format. The block address specified in that command
27834 * must be in LBA format.
27837 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27840 kmem_free(buffer
, 12);
27841 kmem_free(com
, sizeof (*com
));
27847 * Build and send the READ HEADER command to determine the data mode of
27848 * the user specified track.
27850 if ((entry
->cdte_ctrl
& CDROM_DATA_TRACK
) &&
27851 (entry
->cdte_track
!= CDROM_LEADOUT
)) {
27852 bzero(cdb
, CDB_GROUP1
);
27853 cdb
[0] = SCMD_READ_HEADER
;
27854 cdb
[2] = buffer
[8];
27855 cdb
[3] = buffer
[9];
27856 cdb
[4] = buffer
[10];
27857 cdb
[5] = buffer
[11];
27859 com
->uscsi_buflen
= 0x08;
27860 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27863 entry
->cdte_datamode
= buffer
[0];
27866 * READ HEADER command failed, since this is
27867 * obsoleted in one spec, its better to return
27868 * -1 for an invlid track so that we can still
27869 * receive the rest of the TOC data.
27871 entry
->cdte_datamode
= (uchar_t
)-1;
27874 entry
->cdte_datamode
= (uchar_t
)-1;
27877 kmem_free(buffer
, 12);
27878 kmem_free(com
, sizeof (*com
));
27879 if (ddi_copyout(entry
, data
, sizeof (struct cdrom_tocentry
), flag
) != 0)
27887 * Function: sr_read_tochdr()
27889 * Description: This routine is the driver entry point for handling CD-ROM
27890 * ioctl requests to read the Table of Contents (TOC) header
27891 * (CDROMREADTOHDR). The TOC header consists of the disk starting
27892 * and ending track numbers
27894 * Arguments: dev - the device 'dev_t'
27895 * data - pointer to user provided toc header structure,
27896 * specifying the starting and ending track numbers.
27897 * flag - this argument is a pass through to ddi_copyxxx()
27898 * directly from the mode argument of ioctl().
27900 * Return Code: the code returned by sd_send_scsi_cmd()
27901 * EFAULT if ddi_copyxxx() fails
27902 * ENXIO if fail ddi_get_soft_state
27903 * EINVAL if data pointer is NULL
27907 sr_read_tochdr(dev_t dev
, caddr_t data
, int flag
)
27910 struct uscsi_cmd
*com
;
27911 struct cdrom_tochdr toc_header
;
27912 struct cdrom_tochdr
*hdr
= &toc_header
;
27913 char cdb
[CDB_GROUP1
];
27917 if (data
== NULL
) {
27921 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
27922 (un
->un_state
== SD_STATE_OFFLINE
)) {
27926 buffer
= kmem_zalloc(4, KM_SLEEP
);
27927 bzero(cdb
, CDB_GROUP1
);
27928 cdb
[0] = SCMD_READ_TOC
;
27930 * Specifying a track number of 0x00 in the READ TOC command indicates
27931 * that the TOC header should be returned
27935 * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
27936 * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
27939 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
27940 com
->uscsi_cdb
= cdb
;
27941 com
->uscsi_cdblen
= CDB_GROUP1
;
27942 com
->uscsi_bufaddr
= buffer
;
27943 com
->uscsi_buflen
= 0x04;
27944 com
->uscsi_timeout
= 300;
27945 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
27947 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
27949 if (un
->un_f_cfg_read_toc_trk_bcd
== TRUE
) {
27950 hdr
->cdth_trk0
= BCD_TO_BYTE(buffer
[2]);
27951 hdr
->cdth_trk1
= BCD_TO_BYTE(buffer
[3]);
27953 hdr
->cdth_trk0
= buffer
[2];
27954 hdr
->cdth_trk1
= buffer
[3];
27956 kmem_free(buffer
, 4);
27957 kmem_free(com
, sizeof (*com
));
27958 if (ddi_copyout(hdr
, data
, sizeof (struct cdrom_tochdr
), flag
) != 0) {
27966 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(),
27967 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for
27968 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data,
27969 * digital audio and extended architecture digital audio. These modes are
27970 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3
27973 * In addition to support for the various data formats these routines also
27974 * include support for devices that implement only the direct access READ
27975 * commands (0x08, 0x28), devices that implement the READ_CD commands
27976 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and
27977 * READ CDXA commands (0xD8, 0xDB)
27981 * Function: sr_read_mode1()
27983 * Description: This routine is the driver entry point for handling CD-ROM
27984 * ioctl read mode1 requests (CDROMREADMODE1).
27986 * Arguments: dev - the device 'dev_t'
27987 * data - pointer to user provided cd read structure specifying
27988 * the lba buffer address and length.
27989 * flag - this argument is a pass through to ddi_copyxxx()
27990 * directly from the mode argument of ioctl().
27992 * Return Code: the code returned by sd_send_scsi_cmd()
27993 * EFAULT if ddi_copyxxx() fails
27994 * ENXIO if fail ddi_get_soft_state
27995 * EINVAL if data pointer is NULL
27999 sr_read_mode1(dev_t dev
, caddr_t data
, int flag
)
28002 struct cdrom_read mode1_struct
;
28003 struct cdrom_read
*mode1
= &mode1_struct
;
28007 #ifdef _MULTI_DATAMODEL
28008 /* To support ILP32 applications in an LP64 world */
28009 struct cdrom_read32 cdrom_read32
;
28010 struct cdrom_read32
*cdrd32
= &cdrom_read32
;
28011 #endif /* _MULTI_DATAMODEL */
28013 if (data
== NULL
) {
28017 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
28018 (un
->un_state
== SD_STATE_OFFLINE
)) {
28022 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
28023 "sd_read_mode1: entry: un:0x%p\n", un
);
28025 #ifdef _MULTI_DATAMODEL
28026 switch (ddi_model_convert_from(flag
& FMODELS
)) {
28027 case DDI_MODEL_ILP32
:
28028 if (ddi_copyin(data
, cdrd32
, sizeof (*cdrd32
), flag
) != 0) {
28031 /* Convert the ILP32 uscsi data from the application to LP64 */
28032 cdrom_read32tocdrom_read(cdrd32
, mode1
);
28034 case DDI_MODEL_NONE
:
28035 if (ddi_copyin(data
, mode1
, sizeof (struct cdrom_read
), flag
)) {
28039 #else /* ! _MULTI_DATAMODEL */
28040 if (ddi_copyin(data
, mode1
, sizeof (struct cdrom_read
), flag
)) {
28043 #endif /* _MULTI_DATAMODEL */
28045 ssc
= sd_ssc_init(un
);
28046 rval
= sd_send_scsi_READ(ssc
, mode1
->cdread_bufaddr
,
28047 mode1
->cdread_buflen
, mode1
->cdread_lba
, SD_PATH_STANDARD
);
28050 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
28051 "sd_read_mode1: exit: un:0x%p\n", un
);
28058 * Function: sr_read_cd_mode2()
28060 * Description: This routine is the driver entry point for handling CD-ROM
28061 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28062 * support the READ CD (0xBE) command or the 1st generation
28063 * READ CD (0xD4) command.
28065 * Arguments: dev - the device 'dev_t'
28066 * data - pointer to user provided cd read structure specifying
28067 * the lba buffer address and length.
28068 * flag - this argument is a pass through to ddi_copyxxx()
28069 * directly from the mode argument of ioctl().
28071 * Return Code: the code returned by sd_send_scsi_cmd()
28072 * EFAULT if ddi_copyxxx() fails
28073 * ENXIO if fail ddi_get_soft_state
28074 * EINVAL if data pointer is NULL
28078 sr_read_cd_mode2(dev_t dev
, caddr_t data
, int flag
)
28081 struct uscsi_cmd
*com
;
28082 struct cdrom_read mode2_struct
;
28083 struct cdrom_read
*mode2
= &mode2_struct
;
28084 uchar_t cdb
[CDB_GROUP5
];
28087 #ifdef _MULTI_DATAMODEL
28088 /* To support ILP32 applications in an LP64 world */
28089 struct cdrom_read32 cdrom_read32
;
28090 struct cdrom_read32
*cdrd32
= &cdrom_read32
;
28091 #endif /* _MULTI_DATAMODEL */
28093 if (data
== NULL
) {
28097 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
28098 (un
->un_state
== SD_STATE_OFFLINE
)) {
28102 #ifdef _MULTI_DATAMODEL
28103 switch (ddi_model_convert_from(flag
& FMODELS
)) {
28104 case DDI_MODEL_ILP32
:
28105 if (ddi_copyin(data
, cdrd32
, sizeof (*cdrd32
), flag
) != 0) {
28108 /* Convert the ILP32 uscsi data from the application to LP64 */
28109 cdrom_read32tocdrom_read(cdrd32
, mode2
);
28111 case DDI_MODEL_NONE
:
28112 if (ddi_copyin(data
, mode2
, sizeof (*mode2
), flag
) != 0) {
28118 #else /* ! _MULTI_DATAMODEL */
28119 if (ddi_copyin(data
, mode2
, sizeof (*mode2
), flag
) != 0) {
28122 #endif /* _MULTI_DATAMODEL */
28124 bzero(cdb
, sizeof (cdb
));
28125 if (un
->un_f_cfg_read_cd_xd4
== TRUE
) {
28126 /* Read command supported by 1st generation atapi drives */
28127 cdb
[0] = SCMD_READ_CDD4
;
28129 /* Universal CD Access Command */
28130 cdb
[0] = SCMD_READ_CD
;
28134 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book
28136 cdb
[1] = CDROM_SECTOR_TYPE_MODE2
;
28138 /* set the start address */
28139 cdb
[2] = (uchar_t
)((mode2
->cdread_lba
>> 24) & 0XFF);
28140 cdb
[3] = (uchar_t
)((mode2
->cdread_lba
>> 16) & 0XFF);
28141 cdb
[4] = (uchar_t
)((mode2
->cdread_lba
>> 8) & 0xFF);
28142 cdb
[5] = (uchar_t
)(mode2
->cdread_lba
& 0xFF);
28144 /* set the transfer length */
28145 nblocks
= mode2
->cdread_buflen
/ 2336;
28146 cdb
[6] = (uchar_t
)(nblocks
>> 16);
28147 cdb
[7] = (uchar_t
)(nblocks
>> 8);
28148 cdb
[8] = (uchar_t
)nblocks
;
28150 /* set the filter bits */
28151 cdb
[9] = CDROM_READ_CD_USERDATA
;
28153 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
28154 com
->uscsi_cdb
= (caddr_t
)cdb
;
28155 com
->uscsi_cdblen
= sizeof (cdb
);
28156 com
->uscsi_bufaddr
= mode2
->cdread_bufaddr
;
28157 com
->uscsi_buflen
= mode2
->cdread_buflen
;
28158 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
28160 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_USERSPACE
,
28162 kmem_free(com
, sizeof (*com
));
28168 * Function: sr_read_mode2()
28170 * Description: This routine is the driver entry point for handling CD-ROM
28171 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28172 * do not support the READ CD (0xBE) command.
28174 * Arguments: dev - the device 'dev_t'
28175 * data - pointer to user provided cd read structure specifying
28176 * the lba buffer address and length.
28177 * flag - this argument is a pass through to ddi_copyxxx()
28178 * directly from the mode argument of ioctl().
28180 * Return Code: the code returned by sd_send_scsi_cmd()
28181 * EFAULT if ddi_copyxxx() fails
28182 * ENXIO if fail ddi_get_soft_state
28183 * EINVAL if data pointer is NULL
28184 * EIO if fail to reset block size
28185 * EAGAIN if commands are in progress in the driver
28189 sr_read_mode2(dev_t dev
, caddr_t data
, int flag
)
28192 struct cdrom_read mode2_struct
;
28193 struct cdrom_read
*mode2
= &mode2_struct
;
28195 uint32_t restore_blksize
;
28196 struct uscsi_cmd
*com
;
28197 uchar_t cdb
[CDB_GROUP0
];
28200 #ifdef _MULTI_DATAMODEL
28201 /* To support ILP32 applications in an LP64 world */
28202 struct cdrom_read32 cdrom_read32
;
28203 struct cdrom_read32
*cdrd32
= &cdrom_read32
;
28204 #endif /* _MULTI_DATAMODEL */
28206 if (data
== NULL
) {
28210 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
28211 (un
->un_state
== SD_STATE_OFFLINE
)) {
28216 * Because this routine will update the device and driver block size
28217 * being used we want to make sure there are no commands in progress.
28218 * If commands are in progress the user will have to try again.
28220 * We check for 1 instead of 0 because we increment un_ncmds_in_driver
28221 * in sdioctl to protect commands from sdioctl through to the top of
28222 * sd_uscsi_strategy. See sdioctl for details.
28224 mutex_enter(SD_MUTEX(un
));
28225 if (un
->un_ncmds_in_driver
!= 1) {
28226 mutex_exit(SD_MUTEX(un
));
28229 mutex_exit(SD_MUTEX(un
));
28231 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
28232 "sd_read_mode2: entry: un:0x%p\n", un
);
28234 #ifdef _MULTI_DATAMODEL
28235 switch (ddi_model_convert_from(flag
& FMODELS
)) {
28236 case DDI_MODEL_ILP32
:
28237 if (ddi_copyin(data
, cdrd32
, sizeof (*cdrd32
), flag
) != 0) {
28240 /* Convert the ILP32 uscsi data from the application to LP64 */
28241 cdrom_read32tocdrom_read(cdrd32
, mode2
);
28243 case DDI_MODEL_NONE
:
28244 if (ddi_copyin(data
, mode2
, sizeof (*mode2
), flag
) != 0) {
28249 #else /* ! _MULTI_DATAMODEL */
28250 if (ddi_copyin(data
, mode2
, sizeof (*mode2
), flag
)) {
28253 #endif /* _MULTI_DATAMODEL */
28255 /* Store the current target block size for restoration later */
28256 restore_blksize
= un
->un_tgt_blocksize
;
28258 /* Change the device and soft state target block size to 2336 */
28259 if (sr_sector_mode(dev
, SD_MODE2_BLKSIZE
) != 0) {
28265 bzero(cdb
, sizeof (cdb
));
28267 /* set READ operation */
28268 cdb
[0] = SCMD_READ
;
28270 /* adjust lba for 2kbyte blocks from 512 byte blocks */
28271 mode2
->cdread_lba
>>= 2;
28273 /* set the start address */
28274 cdb
[1] = (uchar_t
)((mode2
->cdread_lba
>> 16) & 0X1F);
28275 cdb
[2] = (uchar_t
)((mode2
->cdread_lba
>> 8) & 0xFF);
28276 cdb
[3] = (uchar_t
)(mode2
->cdread_lba
& 0xFF);
28278 /* set the transfer length */
28279 nblocks
= mode2
->cdread_buflen
/ 2336;
28280 cdb
[4] = (uchar_t
)nblocks
& 0xFF;
28282 /* build command */
28283 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
28284 com
->uscsi_cdb
= (caddr_t
)cdb
;
28285 com
->uscsi_cdblen
= sizeof (cdb
);
28286 com
->uscsi_bufaddr
= mode2
->cdread_bufaddr
;
28287 com
->uscsi_buflen
= mode2
->cdread_buflen
;
28288 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
28291 * Issue SCSI command with user space address for read buffer.
28293 * This sends the command through main channel in the driver.
28295 * Since this is accessed via an IOCTL call, we go through the
28296 * standard path, so that if the device was powered down, then
28297 * it would be 'awakened' to handle the command.
28299 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_USERSPACE
,
28302 kmem_free(com
, sizeof (*com
));
28304 /* Restore the device and soft state target block size */
28305 if (sr_sector_mode(dev
, restore_blksize
) != 0) {
28306 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28307 "can't do switch back to mode 1\n");
28309 * If sd_send_scsi_READ succeeded we still need to report
28310 * an error because we failed to reset the block size
28318 SD_TRACE(SD_LOG_ATTACH_DETACH
, un
,
28319 "sd_read_mode2: exit: un:0x%p\n", un
);
28326 * Function: sr_sector_mode()
28328 * Description: This utility function is used by sr_read_mode2 to set the target
28329 * block size based on the user specified size. This is a legacy
28330 * implementation based upon a vendor specific mode page
28332 * Arguments: dev - the device 'dev_t'
28333 * data - flag indicating if block size is being set to 2336 or
28336 * Return Code: the code returned by sd_send_scsi_cmd()
28337 * EFAULT if ddi_copyxxx() fails
28338 * ENXIO if fail ddi_get_soft_state
28339 * EINVAL if data pointer is NULL
28343 sr_sector_mode(dev_t dev
, uint32_t blksize
)
28351 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
28352 (un
->un_state
== SD_STATE_OFFLINE
)) {
28356 sense
= kmem_zalloc(20, KM_SLEEP
);
28358 /* Note: This is a vendor specific mode page (0x81) */
28359 ssc
= sd_ssc_init(un
);
28360 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, sense
, 20, 0x81,
28364 SD_ERROR(SD_LOG_IOCTL_RMMEDIA
, un
,
28365 "sr_sector_mode: Mode Sense failed\n");
28366 kmem_free(sense
, 20);
28369 select
= kmem_zalloc(20, KM_SLEEP
);
28371 select
[10] = ((blksize
>> 8) & 0xff);
28372 select
[11] = (blksize
& 0xff);
28375 select
[14] = sense
[14];
28376 select
[15] = sense
[15];
28377 if (blksize
== SD_MODE2_BLKSIZE
) {
28378 select
[14] |= 0x01;
28381 ssc
= sd_ssc_init(un
);
28382 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
, select
, 20,
28383 SD_DONTSAVE_PAGE
, SD_PATH_STANDARD
);
28386 SD_ERROR(SD_LOG_IOCTL_RMMEDIA
, un
,
28387 "sr_sector_mode: Mode Select failed\n");
28390 * Only update the softstate block size if we successfully
28391 * changed the device block mode.
28393 mutex_enter(SD_MUTEX(un
));
28394 sd_update_block_info(un
, blksize
, 0);
28395 mutex_exit(SD_MUTEX(un
));
28397 kmem_free(sense
, 20);
28398 kmem_free(select
, 20);
28404 * Function: sr_read_cdda()
28406 * Description: This routine is the driver entry point for handling CD-ROM
28407 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If
28408 * the target supports CDDA these requests are handled via a vendor
28409 * specific command (0xD8) If the target does not support CDDA
28410 * these requests are handled via the READ CD command (0xBE).
28412 * Arguments: dev - the device 'dev_t'
28413 * data - pointer to user provided CD-DA structure specifying
28414 * the track starting address, transfer length, and
28416 * flag - this argument is a pass through to ddi_copyxxx()
28417 * directly from the mode argument of ioctl().
28419 * Return Code: the code returned by sd_send_scsi_cmd()
28420 * EFAULT if ddi_copyxxx() fails
28421 * ENXIO if fail ddi_get_soft_state
28422 * EINVAL if invalid arguments are provided
28427 sr_read_cdda(dev_t dev
, caddr_t data
, int flag
)
28430 struct uscsi_cmd
*com
;
28431 struct cdrom_cdda
*cdda
;
28434 char cdb
[CDB_GROUP5
];
28436 #ifdef _MULTI_DATAMODEL
28437 /* To support ILP32 applications in an LP64 world */
28438 struct cdrom_cdda32 cdrom_cdda32
;
28439 struct cdrom_cdda32
*cdda32
= &cdrom_cdda32
;
28440 #endif /* _MULTI_DATAMODEL */
28442 if (data
== NULL
) {
28446 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
28450 cdda
= kmem_zalloc(sizeof (struct cdrom_cdda
), KM_SLEEP
);
28452 #ifdef _MULTI_DATAMODEL
28453 switch (ddi_model_convert_from(flag
& FMODELS
)) {
28454 case DDI_MODEL_ILP32
:
28455 if (ddi_copyin(data
, cdda32
, sizeof (*cdda32
), flag
)) {
28456 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28457 "sr_read_cdda: ddi_copyin Failed\n");
28458 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28461 /* Convert the ILP32 uscsi data from the application to LP64 */
28462 cdrom_cdda32tocdrom_cdda(cdda32
, cdda
);
28464 case DDI_MODEL_NONE
:
28465 if (ddi_copyin(data
, cdda
, sizeof (struct cdrom_cdda
), flag
)) {
28466 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28467 "sr_read_cdda: ddi_copyin Failed\n");
28468 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28473 #else /* ! _MULTI_DATAMODEL */
28474 if (ddi_copyin(data
, cdda
, sizeof (struct cdrom_cdda
), flag
)) {
28475 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28476 "sr_read_cdda: ddi_copyin Failed\n");
28477 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28480 #endif /* _MULTI_DATAMODEL */
28483 * Since MMC-2 expects max 3 bytes for length, check if the
28484 * length input is greater than 3 bytes
28486 if ((cdda
->cdda_length
& 0xFF000000) != 0) {
28487 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
, "sr_read_cdda: "
28488 "cdrom transfer length too large: %d (limit %d)\n",
28489 cdda
->cdda_length
, 0xFFFFFF);
28490 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28494 switch (cdda
->cdda_subcode
) {
28495 case CDROM_DA_NO_SUBCODE
:
28496 buflen
= CDROM_BLK_2352
* cdda
->cdda_length
;
28498 case CDROM_DA_SUBQ
:
28499 buflen
= CDROM_BLK_2368
* cdda
->cdda_length
;
28501 case CDROM_DA_ALL_SUBCODE
:
28502 buflen
= CDROM_BLK_2448
* cdda
->cdda_length
;
28504 case CDROM_DA_SUBCODE_ONLY
:
28505 buflen
= CDROM_BLK_SUBCODE
* cdda
->cdda_length
;
28508 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28509 "sr_read_cdda: Subcode '0x%x' Not Supported\n",
28510 cdda
->cdda_subcode
);
28511 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28515 /* Build and send the command */
28516 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
28517 bzero(cdb
, CDB_GROUP5
);
28519 if (un
->un_f_cfg_cdda
== TRUE
) {
28520 cdb
[0] = (char)SCMD_READ_CD
;
28522 cdb
[2] = (((cdda
->cdda_addr
) & 0xff000000) >> 24);
28523 cdb
[3] = (((cdda
->cdda_addr
) & 0x00ff0000) >> 16);
28524 cdb
[4] = (((cdda
->cdda_addr
) & 0x0000ff00) >> 8);
28525 cdb
[5] = ((cdda
->cdda_addr
) & 0x000000ff);
28526 cdb
[6] = (((cdda
->cdda_length
) & 0x00ff0000) >> 16);
28527 cdb
[7] = (((cdda
->cdda_length
) & 0x0000ff00) >> 8);
28528 cdb
[8] = ((cdda
->cdda_length
) & 0x000000ff);
28530 switch (cdda
->cdda_subcode
) {
28531 case CDROM_DA_NO_SUBCODE
:
28534 case CDROM_DA_SUBQ
:
28537 case CDROM_DA_ALL_SUBCODE
:
28540 case CDROM_DA_SUBCODE_ONLY
:
28543 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28544 kmem_free(com
, sizeof (*com
));
28548 cdb
[0] = (char)SCMD_READ_CDDA
;
28549 cdb
[2] = (((cdda
->cdda_addr
) & 0xff000000) >> 24);
28550 cdb
[3] = (((cdda
->cdda_addr
) & 0x00ff0000) >> 16);
28551 cdb
[4] = (((cdda
->cdda_addr
) & 0x0000ff00) >> 8);
28552 cdb
[5] = ((cdda
->cdda_addr
) & 0x000000ff);
28553 cdb
[6] = (((cdda
->cdda_length
) & 0xff000000) >> 24);
28554 cdb
[7] = (((cdda
->cdda_length
) & 0x00ff0000) >> 16);
28555 cdb
[8] = (((cdda
->cdda_length
) & 0x0000ff00) >> 8);
28556 cdb
[9] = ((cdda
->cdda_length
) & 0x000000ff);
28557 cdb
[10] = cdda
->cdda_subcode
;
28560 com
->uscsi_cdb
= cdb
;
28561 com
->uscsi_cdblen
= CDB_GROUP5
;
28562 com
->uscsi_bufaddr
= (caddr_t
)cdda
->cdda_data
;
28563 com
->uscsi_buflen
= buflen
;
28564 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
28566 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_USERSPACE
,
28569 kmem_free(cdda
, sizeof (struct cdrom_cdda
));
28570 kmem_free(com
, sizeof (*com
));
28576 * Function: sr_read_cdxa()
28578 * Description: This routine is the driver entry point for handling CD-ROM
28579 * ioctl requests to return CD-XA (Extended Architecture) data.
28582 * Arguments: dev - the device 'dev_t'
28583 * data - pointer to user provided CD-XA structure specifying
28584 * the data starting address, transfer length, and format
28585 * flag - this argument is a pass through to ddi_copyxxx()
28586 * directly from the mode argument of ioctl().
28588 * Return Code: the code returned by sd_send_scsi_cmd()
28589 * EFAULT if ddi_copyxxx() fails
28590 * ENXIO if fail ddi_get_soft_state
28591 * EINVAL if data pointer is NULL
28595 sr_read_cdxa(dev_t dev
, caddr_t data
, int flag
)
28598 struct uscsi_cmd
*com
;
28599 struct cdrom_cdxa
*cdxa
;
28602 char cdb
[CDB_GROUP5
];
28603 uchar_t read_flags
;
28605 #ifdef _MULTI_DATAMODEL
28606 /* To support ILP32 applications in an LP64 world */
28607 struct cdrom_cdxa32 cdrom_cdxa32
;
28608 struct cdrom_cdxa32
*cdxa32
= &cdrom_cdxa32
;
28609 #endif /* _MULTI_DATAMODEL */
28611 if (data
== NULL
) {
28615 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
28619 cdxa
= kmem_zalloc(sizeof (struct cdrom_cdxa
), KM_SLEEP
);
28621 #ifdef _MULTI_DATAMODEL
28622 switch (ddi_model_convert_from(flag
& FMODELS
)) {
28623 case DDI_MODEL_ILP32
:
28624 if (ddi_copyin(data
, cdxa32
, sizeof (*cdxa32
), flag
)) {
28625 kmem_free(cdxa
, sizeof (struct cdrom_cdxa
));
28629 * Convert the ILP32 uscsi data from the
28630 * application to LP64 for internal use.
28632 cdrom_cdxa32tocdrom_cdxa(cdxa32
, cdxa
);
28634 case DDI_MODEL_NONE
:
28635 if (ddi_copyin(data
, cdxa
, sizeof (struct cdrom_cdxa
), flag
)) {
28636 kmem_free(cdxa
, sizeof (struct cdrom_cdxa
));
28641 #else /* ! _MULTI_DATAMODEL */
28642 if (ddi_copyin(data
, cdxa
, sizeof (struct cdrom_cdxa
), flag
)) {
28643 kmem_free(cdxa
, sizeof (struct cdrom_cdxa
));
28646 #endif /* _MULTI_DATAMODEL */
28649 * Since MMC-2 expects max 3 bytes for length, check if the
28650 * length input is greater than 3 bytes
28652 if ((cdxa
->cdxa_length
& 0xFF000000) != 0) {
28653 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
, "sr_read_cdxa: "
28654 "cdrom transfer length too large: %d (limit %d)\n",
28655 cdxa
->cdxa_length
, 0xFFFFFF);
28656 kmem_free(cdxa
, sizeof (struct cdrom_cdxa
));
28660 switch (cdxa
->cdxa_format
) {
28661 case CDROM_XA_DATA
:
28662 buflen
= CDROM_BLK_2048
* cdxa
->cdxa_length
;
28665 case CDROM_XA_SECTOR_DATA
:
28666 buflen
= CDROM_BLK_2352
* cdxa
->cdxa_length
;
28669 case CDROM_XA_DATA_W_ERROR
:
28670 buflen
= CDROM_BLK_2646
* cdxa
->cdxa_length
;
28674 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28675 "sr_read_cdxa: Format '0x%x' Not Supported\n",
28676 cdxa
->cdxa_format
);
28677 kmem_free(cdxa
, sizeof (struct cdrom_cdxa
));
28681 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
28682 bzero(cdb
, CDB_GROUP5
);
28683 if (un
->un_f_mmc_cap
== TRUE
) {
28684 cdb
[0] = (char)SCMD_READ_CD
;
28685 cdb
[2] = (((cdxa
->cdxa_addr
) & 0xff000000) >> 24);
28686 cdb
[3] = (((cdxa
->cdxa_addr
) & 0x00ff0000) >> 16);
28687 cdb
[4] = (((cdxa
->cdxa_addr
) & 0x0000ff00) >> 8);
28688 cdb
[5] = ((cdxa
->cdxa_addr
) & 0x000000ff);
28689 cdb
[6] = (((cdxa
->cdxa_length
) & 0x00ff0000) >> 16);
28690 cdb
[7] = (((cdxa
->cdxa_length
) & 0x0000ff00) >> 8);
28691 cdb
[8] = ((cdxa
->cdxa_length
) & 0x000000ff);
28692 cdb
[9] = (char)read_flags
;
28695 * Note: A vendor specific command (0xDB) is being used her to
28696 * request a read of all subcodes.
28698 cdb
[0] = (char)SCMD_READ_CDXA
;
28699 cdb
[2] = (((cdxa
->cdxa_addr
) & 0xff000000) >> 24);
28700 cdb
[3] = (((cdxa
->cdxa_addr
) & 0x00ff0000) >> 16);
28701 cdb
[4] = (((cdxa
->cdxa_addr
) & 0x0000ff00) >> 8);
28702 cdb
[5] = ((cdxa
->cdxa_addr
) & 0x000000ff);
28703 cdb
[6] = (((cdxa
->cdxa_length
) & 0xff000000) >> 24);
28704 cdb
[7] = (((cdxa
->cdxa_length
) & 0x00ff0000) >> 16);
28705 cdb
[8] = (((cdxa
->cdxa_length
) & 0x0000ff00) >> 8);
28706 cdb
[9] = ((cdxa
->cdxa_length
) & 0x000000ff);
28707 cdb
[10] = cdxa
->cdxa_format
;
28709 com
->uscsi_cdb
= cdb
;
28710 com
->uscsi_cdblen
= CDB_GROUP5
;
28711 com
->uscsi_bufaddr
= (caddr_t
)cdxa
->cdxa_data
;
28712 com
->uscsi_buflen
= buflen
;
28713 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
28714 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_USERSPACE
,
28716 kmem_free(cdxa
, sizeof (struct cdrom_cdxa
));
28717 kmem_free(com
, sizeof (*com
));
28723 * Function: sr_eject()
28725 * Description: This routine is the driver entry point for handling CD-ROM
28726 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT)
28728 * Arguments: dev - the device 'dev_t'
28730 * Return Code: the code returned by sd_send_scsi_cmd()
28734 sr_eject(dev_t dev
)
28740 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
28741 (un
->un_state
== SD_STATE_OFFLINE
)) {
28746 * To prevent race conditions with the eject
28747 * command, keep track of an eject command as
28748 * it progresses. If we are already handling
28749 * an eject command in the driver for the given
28750 * unit and another request to eject is received
28751 * immediately return EAGAIN so we don't lose
28752 * the command if the current eject command fails.
28754 mutex_enter(SD_MUTEX(un
));
28755 if (un
->un_f_ejecting
== TRUE
) {
28756 mutex_exit(SD_MUTEX(un
));
28759 un
->un_f_ejecting
= TRUE
;
28760 mutex_exit(SD_MUTEX(un
));
28762 ssc
= sd_ssc_init(un
);
28763 rval
= sd_send_scsi_DOORLOCK(ssc
, SD_REMOVAL_ALLOW
,
28768 mutex_enter(SD_MUTEX(un
));
28769 un
->un_f_ejecting
= FALSE
;
28770 mutex_exit(SD_MUTEX(un
));
28774 ssc
= sd_ssc_init(un
);
28775 rval
= sd_send_scsi_START_STOP_UNIT(ssc
, SD_START_STOP
,
28776 SD_TARGET_EJECT
, SD_PATH_STANDARD
);
28780 mutex_enter(SD_MUTEX(un
));
28782 un
->un_mediastate
= DKIO_EJECTED
;
28783 un
->un_f_ejecting
= FALSE
;
28784 cv_broadcast(&un
->un_state_cv
);
28785 mutex_exit(SD_MUTEX(un
));
28787 mutex_enter(SD_MUTEX(un
));
28788 un
->un_f_ejecting
= FALSE
;
28789 mutex_exit(SD_MUTEX(un
));
28796 * Function: sr_ejected()
28798 * Description: This routine updates the soft state structure to invalidate the
28799 * geometry information after the media has been ejected or a
28800 * media eject has been detected.
28802 * Arguments: un - driver soft state (unit) structure
28806 sr_ejected(struct sd_lun
*un
)
28808 struct sd_errstats
*stp
;
28810 ASSERT(un
!= NULL
);
28811 ASSERT(mutex_owned(SD_MUTEX(un
)));
28813 un
->un_f_blockcount_is_valid
= FALSE
;
28814 un
->un_f_tgt_blocksize_is_valid
= FALSE
;
28815 mutex_exit(SD_MUTEX(un
));
28816 cmlb_invalidate(un
->un_cmlbhandle
, (void *)SD_PATH_DIRECT_PRIORITY
);
28817 mutex_enter(SD_MUTEX(un
));
28819 if (un
->un_errstats
!= NULL
) {
28820 stp
= (struct sd_errstats
*)un
->un_errstats
->ks_data
;
28821 stp
->sd_capacity
.value
.ui64
= 0;
28827 * Function: sr_check_wp()
28829 * Description: This routine checks the write protection of a removable
28830 * media disk and hotpluggable devices via the write protect bit of
28831 * the Mode Page Header device specific field. Some devices choke
28832 * on unsupported mode page. In order to workaround this issue,
28833 * this routine has been implemented to use 0x3f mode page(request
28834 * for all pages) for all device types.
28836 * Arguments: dev - the device 'dev_t'
28838 * Return Code: int indicating if the device is write protected (1) or not (0)
28840 * Context: Kernel thread.
28845 sr_check_wp(dev_t dev
)
28848 uchar_t device_specific
;
28856 * Note: The return codes for this routine should be reworked to
28857 * properly handle the case of a NULL softstate.
28859 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
) {
28863 if (un
->un_f_cfg_is_atapi
== TRUE
) {
28865 * The mode page contents are not required; set the allocation
28866 * length for the mode page header only
28868 hdrlen
= MODE_HEADER_LENGTH_GRP2
;
28869 sense
= kmem_zalloc(hdrlen
, KM_SLEEP
);
28870 ssc
= sd_ssc_init(un
);
28871 status
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, sense
, hdrlen
,
28872 MODEPAGE_ALLPAGES
, SD_PATH_STANDARD
);
28877 ((struct mode_header_grp2
*)sense
)->device_specific
;
28879 hdrlen
= MODE_HEADER_LENGTH
;
28880 sense
= kmem_zalloc(hdrlen
, KM_SLEEP
);
28881 ssc
= sd_ssc_init(un
);
28882 status
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, sense
, hdrlen
,
28883 MODEPAGE_ALLPAGES
, SD_PATH_STANDARD
);
28888 ((struct mode_header
*)sense
)->device_specific
;
28893 * Write protect mode sense failed; not all disks
28894 * understand this query. Return FALSE assuming that
28895 * these devices are not writable.
28897 if (device_specific
& WRITE_PROTECT
) {
28902 kmem_free(sense
, hdrlen
);
28907 * Function: sr_volume_ctrl()
28909 * Description: This routine is the driver entry point for handling CD-ROM
28910 * audio output volume ioctl requests. (CDROMVOLCTRL)
28912 * Arguments: dev - the device 'dev_t'
28913 * data - pointer to user audio volume control structure
28914 * flag - this argument is a pass through to ddi_copyxxx()
28915 * directly from the mode argument of ioctl().
28917 * Return Code: the code returned by sd_send_scsi_cmd()
28918 * EFAULT if ddi_copyxxx() fails
28919 * ENXIO if fail ddi_get_soft_state
28920 * EINVAL if data pointer is NULL
28925 sr_volume_ctrl(dev_t dev
, caddr_t data
, int flag
)
28928 struct cdrom_volctrl volume
;
28929 struct cdrom_volctrl
*vol
= &volume
;
28930 uchar_t
*sense_page
;
28931 uchar_t
*select_page
;
28939 if (data
== NULL
) {
28943 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
28944 (un
->un_state
== SD_STATE_OFFLINE
)) {
28948 if (ddi_copyin(data
, vol
, sizeof (struct cdrom_volctrl
), flag
)) {
28952 if ((un
->un_f_cfg_is_atapi
== TRUE
) || (un
->un_f_mmc_cap
== TRUE
)) {
28953 struct mode_header_grp2
*sense_mhp
;
28954 struct mode_header_grp2
*select_mhp
;
28957 sense_buflen
= MODE_PARAM_LENGTH_GRP2
+ MODEPAGE_AUDIO_CTRL_LEN
;
28958 select_buflen
= MODE_HEADER_LENGTH_GRP2
+
28959 MODEPAGE_AUDIO_CTRL_LEN
;
28960 sense
= kmem_zalloc(sense_buflen
, KM_SLEEP
);
28961 select
= kmem_zalloc(select_buflen
, KM_SLEEP
);
28962 ssc
= sd_ssc_init(un
);
28963 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP1
, sense
,
28964 sense_buflen
, MODEPAGE_AUDIO_CTRL
,
28969 SD_ERROR(SD_LOG_IOCTL_RMMEDIA
, un
,
28970 "sr_volume_ctrl: Mode Sense Failed\n");
28971 kmem_free(sense
, sense_buflen
);
28972 kmem_free(select
, select_buflen
);
28975 sense_mhp
= (struct mode_header_grp2
*)sense
;
28976 select_mhp
= (struct mode_header_grp2
*)select
;
28977 bd_len
= (sense_mhp
->bdesc_length_hi
<< 8) |
28978 sense_mhp
->bdesc_length_lo
;
28979 if (bd_len
> MODE_BLK_DESC_LENGTH
) {
28980 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
28981 "sr_volume_ctrl: Mode Sense returned invalid "
28982 "block descriptor length\n");
28983 kmem_free(sense
, sense_buflen
);
28984 kmem_free(select
, select_buflen
);
28987 sense_page
= (uchar_t
*)
28988 (sense
+ MODE_HEADER_LENGTH_GRP2
+ bd_len
);
28989 select_page
= (uchar_t
*)(select
+ MODE_HEADER_LENGTH_GRP2
);
28990 select_mhp
->length_msb
= 0;
28991 select_mhp
->length_lsb
= 0;
28992 select_mhp
->bdesc_length_hi
= 0;
28993 select_mhp
->bdesc_length_lo
= 0;
28995 struct mode_header
*sense_mhp
, *select_mhp
;
28997 sense_buflen
= MODE_PARAM_LENGTH
+ MODEPAGE_AUDIO_CTRL_LEN
;
28998 select_buflen
= MODE_HEADER_LENGTH
+ MODEPAGE_AUDIO_CTRL_LEN
;
28999 sense
= kmem_zalloc(sense_buflen
, KM_SLEEP
);
29000 select
= kmem_zalloc(select_buflen
, KM_SLEEP
);
29001 ssc
= sd_ssc_init(un
);
29002 rval
= sd_send_scsi_MODE_SENSE(ssc
, CDB_GROUP0
, sense
,
29003 sense_buflen
, MODEPAGE_AUDIO_CTRL
,
29008 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
29009 "sr_volume_ctrl: Mode Sense Failed\n");
29010 kmem_free(sense
, sense_buflen
);
29011 kmem_free(select
, select_buflen
);
29014 sense_mhp
= (struct mode_header
*)sense
;
29015 select_mhp
= (struct mode_header
*)select
;
29016 if (sense_mhp
->bdesc_length
> MODE_BLK_DESC_LENGTH
) {
29017 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
29018 "sr_volume_ctrl: Mode Sense returned invalid "
29019 "block descriptor length\n");
29020 kmem_free(sense
, sense_buflen
);
29021 kmem_free(select
, select_buflen
);
29024 sense_page
= (uchar_t
*)
29025 (sense
+ MODE_HEADER_LENGTH
+ sense_mhp
->bdesc_length
);
29026 select_page
= (uchar_t
*)(select
+ MODE_HEADER_LENGTH
);
29027 select_mhp
->length
= 0;
29028 select_mhp
->bdesc_length
= 0;
29031 * Note: An audio control data structure could be created and overlayed
29032 * on the following in place of the array indexing method implemented.
29035 /* Build the select data for the user volume data */
29036 select_page
[0] = MODEPAGE_AUDIO_CTRL
;
29037 select_page
[1] = 0xE;
29038 /* Set the immediate bit */
29039 select_page
[2] = 0x04;
29040 /* Zero out reserved fields */
29041 select_page
[3] = 0x00;
29042 select_page
[4] = 0x00;
29043 /* Return sense data for fields not to be modified */
29044 select_page
[5] = sense_page
[5];
29045 select_page
[6] = sense_page
[6];
29046 select_page
[7] = sense_page
[7];
29047 /* Set the user specified volume levels for channel 0 and 1 */
29048 select_page
[8] = 0x01;
29049 select_page
[9] = vol
->channel0
;
29050 select_page
[10] = 0x02;
29051 select_page
[11] = vol
->channel1
;
29052 /* Channel 2 and 3 are currently unsupported so return the sense data */
29053 select_page
[12] = sense_page
[12];
29054 select_page
[13] = sense_page
[13];
29055 select_page
[14] = sense_page
[14];
29056 select_page
[15] = sense_page
[15];
29058 ssc
= sd_ssc_init(un
);
29059 if ((un
->un_f_cfg_is_atapi
== TRUE
) || (un
->un_f_mmc_cap
== TRUE
)) {
29060 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP1
, select
,
29061 select_buflen
, SD_DONTSAVE_PAGE
, SD_PATH_STANDARD
);
29063 rval
= sd_send_scsi_MODE_SELECT(ssc
, CDB_GROUP0
, select
,
29064 select_buflen
, SD_DONTSAVE_PAGE
, SD_PATH_STANDARD
);
29068 kmem_free(sense
, sense_buflen
);
29069 kmem_free(select
, select_buflen
);
29075 * Function: sr_read_sony_session_offset()
29077 * Description: This routine is the driver entry point for handling CD-ROM
29078 * ioctl requests for session offset information. (CDROMREADOFFSET)
29079 * The address of the first track in the last session of a
29080 * multi-session CD-ROM is returned
29082 * Note: This routine uses a vendor specific key value in the
29083 * command control field without implementing any vendor check here
29084 * or in the ioctl routine.
29086 * Arguments: dev - the device 'dev_t'
29087 * data - pointer to an int to hold the requested address
29088 * flag - this argument is a pass through to ddi_copyxxx()
29089 * directly from the mode argument of ioctl().
29091 * Return Code: the code returned by sd_send_scsi_cmd()
29092 * EFAULT if ddi_copyxxx() fails
29093 * ENXIO if fail ddi_get_soft_state
29094 * EINVAL if data pointer is NULL
29098 sr_read_sony_session_offset(dev_t dev
, caddr_t data
, int flag
)
29101 struct uscsi_cmd
*com
;
29103 char cdb
[CDB_GROUP1
];
29104 int session_offset
= 0;
29107 if (data
== NULL
) {
29111 if ((un
= ddi_get_soft_state(sd_state
, SDUNIT(dev
))) == NULL
||
29112 (un
->un_state
== SD_STATE_OFFLINE
)) {
29116 buffer
= kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN
, KM_SLEEP
);
29117 bzero(cdb
, CDB_GROUP1
);
29118 cdb
[0] = SCMD_READ_TOC
;
29120 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
29121 * (4 byte TOC response header + 8 byte response data)
29123 cdb
[8] = SONY_SESSION_OFFSET_LEN
;
29124 /* Byte 9 is the control byte. A vendor specific value is used */
29125 cdb
[9] = SONY_SESSION_OFFSET_KEY
;
29126 com
= kmem_zalloc(sizeof (*com
), KM_SLEEP
);
29127 com
->uscsi_cdb
= cdb
;
29128 com
->uscsi_cdblen
= CDB_GROUP1
;
29129 com
->uscsi_bufaddr
= buffer
;
29130 com
->uscsi_buflen
= SONY_SESSION_OFFSET_LEN
;
29131 com
->uscsi_flags
= USCSI_DIAGNOSE
|USCSI_SILENT
|USCSI_READ
;
29133 rval
= sd_send_scsi_cmd(dev
, com
, FKIOCTL
, UIO_SYSSPACE
,
29136 kmem_free(buffer
, SONY_SESSION_OFFSET_LEN
);
29137 kmem_free(com
, sizeof (*com
));
29140 if (buffer
[1] == SONY_SESSION_OFFSET_VALID
) {
29142 ((uchar_t
)buffer
[8] << 24) + ((uchar_t
)buffer
[9] << 16) +
29143 ((uchar_t
)buffer
[10] << 8) + ((uchar_t
)buffer
[11]);
29145 * Offset returned offset in current lbasize block's. Convert to
29146 * 2k block's to return to the user
29148 if (un
->un_tgt_blocksize
== CDROM_BLK_512
) {
29149 session_offset
>>= 2;
29150 } else if (un
->un_tgt_blocksize
== CDROM_BLK_1024
) {
29151 session_offset
>>= 1;
29155 if (ddi_copyout(&session_offset
, data
, sizeof (int), flag
) != 0) {
29159 kmem_free(buffer
, SONY_SESSION_OFFSET_LEN
);
29160 kmem_free(com
, sizeof (*com
));
29166 * Function: sd_wm_cache_constructor()
29168 * Description: Cache Constructor for the wmap cache for the read/modify/write
29171 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29172 * un - sd_lun structure for the device.
29173 * flag - the km flags passed to constructor
29175 * Return Code: 0 on success.
29181 sd_wm_cache_constructor(void *wm
, void *un
, int flags
)
29183 bzero(wm
, sizeof (struct sd_w_map
));
29184 cv_init(&((struct sd_w_map
*)wm
)->wm_avail
, NULL
, CV_DRIVER
, NULL
);
29190 * Function: sd_wm_cache_destructor()
29192 * Description: Cache destructor for the wmap cache for the read/modify/write
29195 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29196 * un - sd_lun structure for the device.
29200 sd_wm_cache_destructor(void *wm
, void *un
)
29202 cv_destroy(&((struct sd_w_map
*)wm
)->wm_avail
);
29207 * Function: sd_range_lock()
29209 * Description: Lock the range of blocks specified as parameter to ensure
29210 * that read, modify write is atomic and no other i/o writes
29211 * to the same location. The range is specified in terms
29212 * of start and end blocks. Block numbers are the actual
29213 * media block numbers and not system.
29215 * Arguments: un - sd_lun structure for the device.
29216 * startb - The starting block number
29217 * endb - The end block number
29218 * typ - type of i/o - simple/read_modify_write
29220 * Return Code: wm - pointer to the wmap structure.
29222 * Context: This routine can sleep.
29225 static struct sd_w_map
*
29226 sd_range_lock(struct sd_lun
*un
, daddr_t startb
, daddr_t endb
, ushort_t typ
)
29228 struct sd_w_map
*wmp
= NULL
;
29229 struct sd_w_map
*sl_wmp
= NULL
;
29230 struct sd_w_map
*tmp_wmp
;
29231 wm_state state
= SD_WM_CHK_LIST
;
29234 ASSERT(un
!= NULL
);
29235 ASSERT(!mutex_owned(SD_MUTEX(un
)));
29237 mutex_enter(SD_MUTEX(un
));
29239 while (state
!= SD_WM_DONE
) {
29242 case SD_WM_CHK_LIST
:
29244 * This is the starting state. Check the wmap list
29245 * to see if the range is currently available.
29247 if (!(typ
& SD_WTYPE_RMW
) && !(un
->un_rmw_count
)) {
29249 * If this is a simple write and no rmw
29250 * i/o is pending then try to lock the
29251 * range as the range should be available.
29253 state
= SD_WM_LOCK_RANGE
;
29255 tmp_wmp
= sd_get_range(un
, startb
, endb
);
29256 if (tmp_wmp
!= NULL
) {
29257 if ((wmp
!= NULL
) && ONLIST(un
, wmp
)) {
29259 * Should not keep onlist wmps
29260 * while waiting this macro
29261 * will also do wmp = NULL;
29263 FREE_ONLIST_WMAP(un
, wmp
);
29266 * sl_wmp is the wmap on which wait
29267 * is done, since the tmp_wmp points
29268 * to the inuse wmap, set sl_wmp to
29269 * tmp_wmp and change the state to sleep
29272 state
= SD_WM_WAIT_MAP
;
29274 state
= SD_WM_LOCK_RANGE
;
29280 case SD_WM_LOCK_RANGE
:
29281 ASSERT(un
->un_wm_cache
);
29283 * The range need to be locked, try to get a wmap.
29284 * First attempt it with NO_SLEEP, want to avoid a sleep
29285 * if possible as we will have to release the sd mutex
29286 * if we have to sleep.
29289 wmp
= kmem_cache_alloc(un
->un_wm_cache
,
29292 mutex_exit(SD_MUTEX(un
));
29293 _NOTE(DATA_READABLE_WITHOUT_LOCK
29294 (sd_lun::un_wm_cache
))
29295 wmp
= kmem_cache_alloc(un
->un_wm_cache
,
29297 mutex_enter(SD_MUTEX(un
));
29299 * we released the mutex so recheck and go to
29300 * check list state.
29302 state
= SD_WM_CHK_LIST
;
29305 * We exit out of state machine since we
29306 * have the wmap. Do the housekeeping first.
29307 * place the wmap on the wmap list if it is not
29308 * on it already and then set the state to done.
29310 wmp
->wm_start
= startb
;
29311 wmp
->wm_end
= endb
;
29312 wmp
->wm_flags
= typ
| SD_WM_BUSY
;
29313 if (typ
& SD_WTYPE_RMW
) {
29314 un
->un_rmw_count
++;
29317 * If not already on the list then link
29319 if (!ONLIST(un
, wmp
)) {
29320 wmp
->wm_next
= un
->un_wm
;
29321 wmp
->wm_prev
= NULL
;
29323 wmp
->wm_next
->wm_prev
= wmp
;
29326 state
= SD_WM_DONE
;
29330 case SD_WM_WAIT_MAP
:
29331 ASSERT(sl_wmp
->wm_flags
& SD_WM_BUSY
);
29333 * Wait is done on sl_wmp, which is set in the
29334 * check_list state.
29336 sl_wmp
->wm_wanted_count
++;
29337 cv_wait(&sl_wmp
->wm_avail
, SD_MUTEX(un
));
29338 sl_wmp
->wm_wanted_count
--;
29340 * We can reuse the memory from the completed sl_wmp
29341 * lock range for our new lock, but only if noone is
29344 ASSERT(!(sl_wmp
->wm_flags
& SD_WM_BUSY
));
29345 if (sl_wmp
->wm_wanted_count
== 0) {
29347 CHK_N_FREEWMP(un
, wmp
);
29352 * After waking up, need to recheck for availability of
29355 state
= SD_WM_CHK_LIST
;
29359 panic("sd_range_lock: "
29360 "Unknown state %d in sd_range_lock", state
);
29362 } /* switch(state) */
29364 } /* while(state != SD_WM_DONE) */
29366 mutex_exit(SD_MUTEX(un
));
29368 ASSERT(wmp
!= NULL
);
29375 * Function: sd_get_range()
29377 * Description: Find if there any overlapping I/O to this one
29378 * Returns the write-map of 1st such I/O, NULL otherwise.
29380 * Arguments: un - sd_lun structure for the device.
29381 * startb - The starting block number
29382 * endb - The end block number
29384 * Return Code: wm - pointer to the wmap structure.
29387 static struct sd_w_map
*
29388 sd_get_range(struct sd_lun
*un
, daddr_t startb
, daddr_t endb
)
29390 struct sd_w_map
*wmp
;
29392 ASSERT(un
!= NULL
);
29394 for (wmp
= un
->un_wm
; wmp
!= NULL
; wmp
= wmp
->wm_next
) {
29395 if (!(wmp
->wm_flags
& SD_WM_BUSY
)) {
29398 if ((startb
>= wmp
->wm_start
) && (startb
<= wmp
->wm_end
)) {
29401 if ((endb
>= wmp
->wm_start
) && (endb
<= wmp
->wm_end
)) {
29411 * Function: sd_free_inlist_wmap()
29413 * Description: Unlink and free a write map struct.
29415 * Arguments: un - sd_lun structure for the device.
29416 * wmp - sd_w_map which needs to be unlinked.
29420 sd_free_inlist_wmap(struct sd_lun
*un
, struct sd_w_map
*wmp
)
29422 ASSERT(un
!= NULL
);
29424 if (un
->un_wm
== wmp
) {
29425 un
->un_wm
= wmp
->wm_next
;
29427 wmp
->wm_prev
->wm_next
= wmp
->wm_next
;
29430 if (wmp
->wm_next
) {
29431 wmp
->wm_next
->wm_prev
= wmp
->wm_prev
;
29434 wmp
->wm_next
= wmp
->wm_prev
= NULL
;
29436 kmem_cache_free(un
->un_wm_cache
, wmp
);
29441 * Function: sd_range_unlock()
29443 * Description: Unlock the range locked by wm.
29444 * Free write map if nobody else is waiting on it.
29446 * Arguments: un - sd_lun structure for the device.
29447 * wmp - sd_w_map which needs to be unlinked.
29451 sd_range_unlock(struct sd_lun
*un
, struct sd_w_map
*wm
)
29453 ASSERT(un
!= NULL
);
29454 ASSERT(wm
!= NULL
);
29455 ASSERT(!mutex_owned(SD_MUTEX(un
)));
29457 mutex_enter(SD_MUTEX(un
));
29459 if (wm
->wm_flags
& SD_WTYPE_RMW
) {
29460 un
->un_rmw_count
--;
29463 if (wm
->wm_wanted_count
) {
29466 * Broadcast that the wmap is available now.
29468 cv_broadcast(&wm
->wm_avail
);
29471 * If no one is waiting on the map, it should be free'ed.
29473 sd_free_inlist_wmap(un
, wm
);
29476 mutex_exit(SD_MUTEX(un
));
29481 * Function: sd_read_modify_write_task
29483 * Description: Called from a taskq thread to initiate the write phase of
29484 * a read-modify-write request. This is used for targets where
29485 * un->un_sys_blocksize != un->un_tgt_blocksize.
29487 * Arguments: arg - a pointer to the buf(9S) struct for the write command.
29489 * Context: Called under taskq thread context.
29493 sd_read_modify_write_task(void *arg
)
29495 struct sd_mapblocksize_info
*bsp
;
29497 struct sd_xbuf
*xp
;
29500 bp
= arg
; /* The bp is given in arg */
29501 ASSERT(bp
!= NULL
);
29503 /* Get the pointer to the layer-private data struct */
29504 xp
= SD_GET_XBUF(bp
);
29505 ASSERT(xp
!= NULL
);
29506 bsp
= xp
->xb_private
;
29507 ASSERT(bsp
!= NULL
);
29509 un
= SD_GET_UN(bp
);
29510 ASSERT(un
!= NULL
);
29511 ASSERT(!mutex_owned(SD_MUTEX(un
)));
29513 SD_TRACE(SD_LOG_IO_RMMEDIA
, un
,
29514 "sd_read_modify_write_task: entry: buf:0x%p\n", bp
);
29517 * This is the write phase of a read-modify-write request, called
29518 * under the context of a taskq thread in response to the completion
29519 * of the read portion of the rmw request completing under interrupt
29520 * context. The write request must be sent from here down the iostart
29521 * chain as if it were being sent from sd_mapblocksize_iostart(), so
29522 * we use the layer index saved in the layer-private data area.
29524 SD_NEXT_IOSTART(bsp
->mbs_layer_index
, un
, bp
);
29526 SD_TRACE(SD_LOG_IO_RMMEDIA
, un
,
29527 "sd_read_modify_write_task: exit: buf:0x%p\n", bp
);
29532 * Function: sddump_do_read_of_rmw()
29534 * Description: This routine will be called from sddump, If sddump is called
29535 * with an I/O which not aligned on device blocksize boundary
29536 * then the write has to be converted to read-modify-write.
29537 * Do the read part here in order to keep sddump simple.
29538 * Note - That the sd_mutex is held across the call to this
29541 * Arguments: un - sd_lun
29542 * blkno - block number in terms of media block size.
29543 * nblk - number of blocks.
29544 * bpp - pointer to pointer to the buf structure. On return
29545 * from this function, *bpp points to the valid buffer
29546 * to which the write has to be done.
29548 * Return Code: 0 for success or errno-type return code
29552 sddump_do_read_of_rmw(struct sd_lun
*un
, uint64_t blkno
, uint64_t nblk
,
29559 struct scsi_pkt
*pkt
= NULL
;
29560 uint32_t target_blocksize
;
29562 ASSERT(un
!= NULL
);
29563 ASSERT(mutex_owned(SD_MUTEX(un
)));
29565 target_blocksize
= un
->un_tgt_blocksize
;
29567 mutex_exit(SD_MUTEX(un
));
29569 bp
= scsi_alloc_consistent_buf(SD_ADDRESS(un
), (struct buf
*)NULL
,
29570 (size_t)(nblk
* target_blocksize
), B_READ
, NULL_FUNC
, NULL
);
29572 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
29573 "no resources for dumping; giving up");
29578 rval
= sd_setup_rw_pkt(un
, &pkt
, bp
, 0, NULL_FUNC
, NULL
,
29581 scsi_free_consistent_buf(bp
);
29582 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
29583 "no resources for dumping; giving up");
29588 pkt
->pkt_flags
|= FLAG_NOINTR
;
29591 for (i
= 0; i
< SD_NDUMP_RETRIES
; i
++) {
29594 * Scsi_poll returns 0 (success) if the command completes and
29595 * the status block is STATUS_GOOD. We should only check
29596 * errors if this condition is not true. Even then we should
29597 * send our own request sense packet only if we have a check
29598 * condition and auto request sense has not been performed by
29601 SD_TRACE(SD_LOG_DUMP
, un
, "sddump: sending read\n");
29603 if ((sd_scsi_poll(un
, pkt
) == 0) && (pkt
->pkt_resid
== 0)) {
29609 * Check CMD_DEV_GONE 1st, give up if device is gone,
29610 * no need to read RQS data.
29612 if (pkt
->pkt_reason
== CMD_DEV_GONE
) {
29613 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
29614 "Error while dumping state with rmw..."
29615 "Device is gone\n");
29619 if (SD_GET_PKT_STATUS(pkt
) == STATUS_CHECK
) {
29620 SD_INFO(SD_LOG_DUMP
, un
,
29621 "sddump: read failed with CHECK, try # %d\n", i
);
29622 if (((pkt
->pkt_state
& STATE_ARQ_DONE
) == 0)) {
29623 (void) sd_send_polled_RQS(un
);
29629 if (SD_GET_PKT_STATUS(pkt
) == STATUS_BUSY
) {
29630 int reset_retval
= 0;
29632 SD_INFO(SD_LOG_DUMP
, un
,
29633 "sddump: read failed with BUSY, try # %d\n", i
);
29635 if (un
->un_f_lun_reset_enabled
== TRUE
) {
29636 reset_retval
= scsi_reset(SD_ADDRESS(un
),
29639 if (reset_retval
== 0) {
29640 (void) scsi_reset(SD_ADDRESS(un
), RESET_TARGET
);
29642 (void) sd_send_polled_RQS(un
);
29645 SD_INFO(SD_LOG_DUMP
, un
,
29646 "sddump: read failed with 0x%x, try # %d\n",
29647 SD_GET_PKT_STATUS(pkt
), i
);
29648 mutex_enter(SD_MUTEX(un
));
29649 sd_reset_target(un
, pkt
);
29650 mutex_exit(SD_MUTEX(un
));
29654 * If we are not getting anywhere with lun/target resets,
29655 * let's reset the bus.
29657 if (i
> SD_NDUMP_RETRIES
/2) {
29658 (void) scsi_reset(SD_ADDRESS(un
), RESET_ALL
);
29659 (void) sd_send_polled_RQS(un
);
29663 scsi_destroy_pkt(pkt
);
29666 scsi_free_consistent_buf(bp
);
29673 mutex_enter(SD_MUTEX(un
));
29679 * Function: sd_failfast_flushq
29681 * Description: Take all bp's on the wait queue that have B_FAILFAST set
29682 * in b_flags and move them onto the failfast queue, then kick
29683 * off a thread to return all bp's on the failfast queue to
29684 * their owners with an error set.
29686 * Arguments: un - pointer to the soft state struct for the instance.
29688 * Context: may execute in interrupt context.
29692 sd_failfast_flushq(struct sd_lun
*un
)
29695 struct buf
*next_waitq_bp
;
29696 struct buf
*prev_waitq_bp
= NULL
;
29698 ASSERT(un
!= NULL
);
29699 ASSERT(mutex_owned(SD_MUTEX(un
)));
29700 ASSERT(un
->un_failfast_state
== SD_FAILFAST_ACTIVE
);
29701 ASSERT(un
->un_failfast_bp
== NULL
);
29703 SD_TRACE(SD_LOG_IO_FAILFAST
, un
,
29704 "sd_failfast_flushq: entry: un:0x%p\n", un
);
29707 * Check if we should flush all bufs when entering failfast state, or
29708 * just those with B_FAILFAST set.
29710 if (sd_failfast_flushctl
& SD_FAILFAST_FLUSH_ALL_BUFS
) {
29712 * Move *all* bp's on the wait queue to the failfast flush
29713 * queue, including those that do NOT have B_FAILFAST set.
29715 if (un
->un_failfast_headp
== NULL
) {
29716 ASSERT(un
->un_failfast_tailp
== NULL
);
29717 un
->un_failfast_headp
= un
->un_waitq_headp
;
29719 ASSERT(un
->un_failfast_tailp
!= NULL
);
29720 un
->un_failfast_tailp
->av_forw
= un
->un_waitq_headp
;
29723 un
->un_failfast_tailp
= un
->un_waitq_tailp
;
29725 /* update kstat for each bp moved out of the waitq */
29726 for (bp
= un
->un_waitq_headp
; bp
!= NULL
; bp
= bp
->av_forw
) {
29727 SD_UPDATE_KSTATS(un
, kstat_waitq_exit
, bp
);
29730 /* empty the waitq */
29731 un
->un_waitq_headp
= un
->un_waitq_tailp
= NULL
;
29735 * Go thru the wait queue, pick off all entries with
29736 * B_FAILFAST set, and move these onto the failfast queue.
29738 for (bp
= un
->un_waitq_headp
; bp
!= NULL
; bp
= next_waitq_bp
) {
29740 * Save the pointer to the next bp on the wait queue,
29741 * so we get to it on the next iteration of this loop.
29743 next_waitq_bp
= bp
->av_forw
;
29746 * If this bp from the wait queue does NOT have
29747 * B_FAILFAST set, just move on to the next element
29748 * in the wait queue. Note, this is the only place
29749 * where it is correct to set prev_waitq_bp.
29751 if ((bp
->b_flags
& B_FAILFAST
) == 0) {
29752 prev_waitq_bp
= bp
;
29757 * Remove the bp from the wait queue.
29759 if (bp
== un
->un_waitq_headp
) {
29760 /* The bp is the first element of the waitq. */
29761 un
->un_waitq_headp
= next_waitq_bp
;
29762 if (un
->un_waitq_headp
== NULL
) {
29763 /* The wait queue is now empty */
29764 un
->un_waitq_tailp
= NULL
;
29768 * The bp is either somewhere in the middle
29769 * or at the end of the wait queue.
29771 ASSERT(un
->un_waitq_headp
!= NULL
);
29772 ASSERT(prev_waitq_bp
!= NULL
);
29773 ASSERT((prev_waitq_bp
->b_flags
& B_FAILFAST
)
29775 if (bp
== un
->un_waitq_tailp
) {
29776 /* bp is the last entry on the waitq. */
29777 ASSERT(next_waitq_bp
== NULL
);
29778 un
->un_waitq_tailp
= prev_waitq_bp
;
29780 prev_waitq_bp
->av_forw
= next_waitq_bp
;
29782 bp
->av_forw
= NULL
;
29785 * update kstat since the bp is moved out of
29788 SD_UPDATE_KSTATS(un
, kstat_waitq_exit
, bp
);
29791 * Now put the bp onto the failfast queue.
29793 if (un
->un_failfast_headp
== NULL
) {
29794 /* failfast queue is currently empty */
29795 ASSERT(un
->un_failfast_tailp
== NULL
);
29796 un
->un_failfast_headp
=
29797 un
->un_failfast_tailp
= bp
;
29799 /* Add the bp to the end of the failfast q */
29800 ASSERT(un
->un_failfast_tailp
!= NULL
);
29801 ASSERT(un
->un_failfast_tailp
->b_flags
&
29803 un
->un_failfast_tailp
->av_forw
= bp
;
29804 un
->un_failfast_tailp
= bp
;
29810 * Now return all bp's on the failfast queue to their owners.
29812 while ((bp
= un
->un_failfast_headp
) != NULL
) {
29814 un
->un_failfast_headp
= bp
->av_forw
;
29815 if (un
->un_failfast_headp
== NULL
) {
29816 un
->un_failfast_tailp
= NULL
;
29820 * We want to return the bp with a failure error code, but
29821 * we do not want a call to sd_start_cmds() to occur here,
29822 * so use sd_return_failed_command_no_restart() instead of
29823 * sd_return_failed_command().
29825 sd_return_failed_command_no_restart(un
, bp
, EIO
);
29828 /* Flush the xbuf queues if required. */
29829 if (sd_failfast_flushctl
& SD_FAILFAST_FLUSH_ALL_QUEUES
) {
29830 ddi_xbuf_flushq(un
->un_xbuf_attr
, sd_failfast_flushq_callback
);
29833 SD_TRACE(SD_LOG_IO_FAILFAST
, un
,
29834 "sd_failfast_flushq: exit: un:0x%p\n", un
);
29839 * Function: sd_failfast_flushq_callback
29841 * Description: Return TRUE if the given bp meets the criteria for failfast
29842 * flushing. Used with ddi_xbuf_flushq(9F).
29844 * Arguments: bp - ptr to buf struct to be examined.
29850 sd_failfast_flushq_callback(struct buf
*bp
)
29853 * Return TRUE if (1) we want to flush ALL bufs when the failfast
29854 * state is entered; OR (2) the given bp has B_FAILFAST set.
29856 return (((sd_failfast_flushctl
& SD_FAILFAST_FLUSH_ALL_BUFS
) ||
29857 (bp
->b_flags
& B_FAILFAST
)) ? TRUE
: FALSE
);
29863 * Function: sd_setup_next_xfer
29865 * Description: Prepare next I/O operation using DMA_PARTIAL
29870 sd_setup_next_xfer(struct sd_lun
*un
, struct buf
*bp
,
29871 struct scsi_pkt
*pkt
, struct sd_xbuf
*xp
)
29873 ssize_t num_blks_not_xfered
;
29874 daddr_t strt_blk_num
;
29875 ssize_t bytes_not_xfered
;
29878 ASSERT(pkt
->pkt_resid
== 0);
29881 * Calculate next block number and amount to be transferred.
29883 * How much data NOT transfered to the HBA yet.
29885 bytes_not_xfered
= xp
->xb_dma_resid
;
29888 * figure how many blocks NOT transfered to the HBA yet.
29890 num_blks_not_xfered
= SD_BYTES2TGTBLOCKS(un
, bytes_not_xfered
);
29893 * set starting block number to the end of what WAS transfered.
29895 strt_blk_num
= xp
->xb_blkno
+
29896 SD_BYTES2TGTBLOCKS(un
, bp
->b_bcount
- bytes_not_xfered
);
29899 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt
29900 * will call scsi_initpkt with NULL_FUNC so we do not have to release
29901 * the disk mutex here.
29903 rval
= sd_setup_next_rw_pkt(un
, pkt
, bp
,
29904 strt_blk_num
, num_blks_not_xfered
);
29911 * Adjust things if there are still more blocks to be
29914 xp
->xb_dma_resid
= pkt
->pkt_resid
;
29915 pkt
->pkt_resid
= 0;
29921 * There's really only one possible return value from
29922 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt
29925 ASSERT(rval
== SD_PKT_ALLOC_FAILURE
);
29927 bp
->b_resid
= bp
->b_bcount
;
29928 bp
->b_flags
|= B_ERROR
;
29930 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
29931 "Error setting up next portion of DMA transfer\n");
29937 * Function: sd_panic_for_res_conflict
29939 * Description: Call panic with a string formatted with "Reservation Conflict"
29940 * and a human readable identifier indicating the SD instance
29941 * that experienced the reservation conflict.
29943 * Arguments: un - pointer to the soft state struct for the instance.
29945 * Context: may execute in interrupt context.
29948 #define SD_RESV_CONFLICT_FMT_LEN 40
29950 sd_panic_for_res_conflict(struct sd_lun
*un
)
29952 char panic_str
[SD_RESV_CONFLICT_FMT_LEN
+MAXPATHLEN
];
29953 char path_str
[MAXPATHLEN
];
29955 (void) snprintf(panic_str
, sizeof (panic_str
),
29956 "Reservation Conflict\nDisk: %s",
29957 ddi_pathname(SD_DEVINFO(un
), path_str
));
29963 * Note: The following sd_faultinjection_ioctl( ) routines implement
29964 * driver support for handling fault injection for error analysis
29965 * causing faults in multiple layers of the driver.
29969 #ifdef SD_FAULT_INJECTION
29970 static uint_t sd_fault_injection_on
= 0;
29973 * Function: sd_faultinjection_ioctl()
29975 * Description: This routine is the driver entry point for handling
29976 * faultinjection ioctls to inject errors into the
29979 * Arguments: cmd - the ioctl cmd received
29980 * arg - the arguments from user and returns
29984 sd_faultinjection_ioctl(int cmd
, intptr_t arg
, struct sd_lun
*un
) {
29989 SD_TRACE(SD_LOG_IOERR
, un
, "sd_faultinjection_ioctl: entry\n");
29991 mutex_enter(SD_MUTEX(un
));
29995 /* Allow pushed faults to be injected */
29996 SD_INFO(SD_LOG_SDTEST
, un
,
29997 "sd_faultinjection_ioctl: Injecting Fault Run\n");
29999 sd_fault_injection_on
= 1;
30001 SD_INFO(SD_LOG_IOERR
, un
,
30002 "sd_faultinjection_ioctl: run finished\n");
30006 /* Start Injection Session */
30007 SD_INFO(SD_LOG_SDTEST
, un
,
30008 "sd_faultinjection_ioctl: Injecting Fault Start\n");
30010 sd_fault_injection_on
= 0;
30011 un
->sd_injection_mask
= 0xFFFFFFFF;
30012 for (i
= 0; i
< SD_FI_MAX_ERROR
; i
++) {
30013 un
->sd_fi_fifo_pkt
[i
] = NULL
;
30014 un
->sd_fi_fifo_xb
[i
] = NULL
;
30015 un
->sd_fi_fifo_un
[i
] = NULL
;
30016 un
->sd_fi_fifo_arq
[i
] = NULL
;
30018 un
->sd_fi_fifo_start
= 0;
30019 un
->sd_fi_fifo_end
= 0;
30021 mutex_enter(&(un
->un_fi_mutex
));
30022 un
->sd_fi_log
[0] = '\0';
30023 un
->sd_fi_buf_len
= 0;
30024 mutex_exit(&(un
->un_fi_mutex
));
30026 SD_INFO(SD_LOG_IOERR
, un
,
30027 "sd_faultinjection_ioctl: start finished\n");
30031 /* Stop Injection Session */
30032 SD_INFO(SD_LOG_SDTEST
, un
,
30033 "sd_faultinjection_ioctl: Injecting Fault Stop\n");
30034 sd_fault_injection_on
= 0;
30035 un
->sd_injection_mask
= 0x0;
30037 /* Empty stray or unuseds structs from fifo */
30038 for (i
= 0; i
< SD_FI_MAX_ERROR
; i
++) {
30039 if (un
->sd_fi_fifo_pkt
[i
] != NULL
) {
30040 kmem_free(un
->sd_fi_fifo_pkt
[i
],
30041 sizeof (struct sd_fi_pkt
));
30043 if (un
->sd_fi_fifo_xb
[i
] != NULL
) {
30044 kmem_free(un
->sd_fi_fifo_xb
[i
],
30045 sizeof (struct sd_fi_xb
));
30047 if (un
->sd_fi_fifo_un
[i
] != NULL
) {
30048 kmem_free(un
->sd_fi_fifo_un
[i
],
30049 sizeof (struct sd_fi_un
));
30051 if (un
->sd_fi_fifo_arq
[i
] != NULL
) {
30052 kmem_free(un
->sd_fi_fifo_arq
[i
],
30053 sizeof (struct sd_fi_arq
));
30055 un
->sd_fi_fifo_pkt
[i
] = NULL
;
30056 un
->sd_fi_fifo_un
[i
] = NULL
;
30057 un
->sd_fi_fifo_xb
[i
] = NULL
;
30058 un
->sd_fi_fifo_arq
[i
] = NULL
;
30060 un
->sd_fi_fifo_start
= 0;
30061 un
->sd_fi_fifo_end
= 0;
30063 SD_INFO(SD_LOG_IOERR
, un
,
30064 "sd_faultinjection_ioctl: stop finished\n");
30067 case SDIOCINSERTPKT
:
30068 /* Store a packet struct to be pushed onto fifo */
30069 SD_INFO(SD_LOG_SDTEST
, un
,
30070 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30072 i
= un
->sd_fi_fifo_end
% SD_FI_MAX_ERROR
;
30074 sd_fault_injection_on
= 0;
30076 /* No more that SD_FI_MAX_ERROR allowed in Queue */
30077 if (un
->sd_fi_fifo_pkt
[i
] != NULL
) {
30078 kmem_free(un
->sd_fi_fifo_pkt
[i
],
30079 sizeof (struct sd_fi_pkt
));
30082 un
->sd_fi_fifo_pkt
[i
] =
30083 kmem_alloc(sizeof (struct sd_fi_pkt
), KM_NOSLEEP
);
30084 if (un
->sd_fi_fifo_pkt
[i
] == NULL
) {
30085 /* Alloc failed don't store anything */
30088 rval
= ddi_copyin((void *)arg
, un
->sd_fi_fifo_pkt
[i
],
30089 sizeof (struct sd_fi_pkt
), 0);
30091 kmem_free(un
->sd_fi_fifo_pkt
[i
],
30092 sizeof (struct sd_fi_pkt
));
30093 un
->sd_fi_fifo_pkt
[i
] = NULL
;
30096 SD_INFO(SD_LOG_IOERR
, un
,
30097 "sd_faultinjection_ioctl: pkt null\n");
30101 case SDIOCINSERTXB
:
30102 /* Store a xb struct to be pushed onto fifo */
30103 SD_INFO(SD_LOG_SDTEST
, un
,
30104 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30106 i
= un
->sd_fi_fifo_end
% SD_FI_MAX_ERROR
;
30108 sd_fault_injection_on
= 0;
30110 if (un
->sd_fi_fifo_xb
[i
] != NULL
) {
30111 kmem_free(un
->sd_fi_fifo_xb
[i
],
30112 sizeof (struct sd_fi_xb
));
30113 un
->sd_fi_fifo_xb
[i
] = NULL
;
30116 un
->sd_fi_fifo_xb
[i
] =
30117 kmem_alloc(sizeof (struct sd_fi_xb
), KM_NOSLEEP
);
30118 if (un
->sd_fi_fifo_xb
[i
] == NULL
) {
30119 /* Alloc failed don't store anything */
30122 rval
= ddi_copyin((void *)arg
, un
->sd_fi_fifo_xb
[i
],
30123 sizeof (struct sd_fi_xb
), 0);
30126 kmem_free(un
->sd_fi_fifo_xb
[i
],
30127 sizeof (struct sd_fi_xb
));
30128 un
->sd_fi_fifo_xb
[i
] = NULL
;
30131 SD_INFO(SD_LOG_IOERR
, un
,
30132 "sd_faultinjection_ioctl: xb null\n");
30136 case SDIOCINSERTUN
:
30137 /* Store a un struct to be pushed onto fifo */
30138 SD_INFO(SD_LOG_SDTEST
, un
,
30139 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30141 i
= un
->sd_fi_fifo_end
% SD_FI_MAX_ERROR
;
30143 sd_fault_injection_on
= 0;
30145 if (un
->sd_fi_fifo_un
[i
] != NULL
) {
30146 kmem_free(un
->sd_fi_fifo_un
[i
],
30147 sizeof (struct sd_fi_un
));
30148 un
->sd_fi_fifo_un
[i
] = NULL
;
30151 un
->sd_fi_fifo_un
[i
] =
30152 kmem_alloc(sizeof (struct sd_fi_un
), KM_NOSLEEP
);
30153 if (un
->sd_fi_fifo_un
[i
] == NULL
) {
30154 /* Alloc failed don't store anything */
30157 rval
= ddi_copyin((void *)arg
, un
->sd_fi_fifo_un
[i
],
30158 sizeof (struct sd_fi_un
), 0);
30160 kmem_free(un
->sd_fi_fifo_un
[i
],
30161 sizeof (struct sd_fi_un
));
30162 un
->sd_fi_fifo_un
[i
] = NULL
;
30166 SD_INFO(SD_LOG_IOERR
, un
,
30167 "sd_faultinjection_ioctl: un null\n");
30172 case SDIOCINSERTARQ
:
30173 /* Store a arq struct to be pushed onto fifo */
30174 SD_INFO(SD_LOG_SDTEST
, un
,
30175 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30176 i
= un
->sd_fi_fifo_end
% SD_FI_MAX_ERROR
;
30178 sd_fault_injection_on
= 0;
30180 if (un
->sd_fi_fifo_arq
[i
] != NULL
) {
30181 kmem_free(un
->sd_fi_fifo_arq
[i
],
30182 sizeof (struct sd_fi_arq
));
30183 un
->sd_fi_fifo_arq
[i
] = NULL
;
30186 un
->sd_fi_fifo_arq
[i
] =
30187 kmem_alloc(sizeof (struct sd_fi_arq
), KM_NOSLEEP
);
30188 if (un
->sd_fi_fifo_arq
[i
] == NULL
) {
30189 /* Alloc failed don't store anything */
30192 rval
= ddi_copyin((void *)arg
, un
->sd_fi_fifo_arq
[i
],
30193 sizeof (struct sd_fi_arq
), 0);
30195 kmem_free(un
->sd_fi_fifo_arq
[i
],
30196 sizeof (struct sd_fi_arq
));
30197 un
->sd_fi_fifo_arq
[i
] = NULL
;
30201 SD_INFO(SD_LOG_IOERR
, un
,
30202 "sd_faultinjection_ioctl: arq null\n");
30208 /* Push stored xb, pkt, un, and arq onto fifo */
30209 sd_fault_injection_on
= 0;
30212 rval
= ddi_copyin((void *)arg
, &i
, sizeof (uint_t
), 0);
30214 un
->sd_fi_fifo_end
+ i
< SD_FI_MAX_ERROR
) {
30215 un
->sd_fi_fifo_end
+= i
;
30218 SD_INFO(SD_LOG_IOERR
, un
,
30219 "sd_faultinjection_ioctl: push arg null\n");
30220 if (un
->sd_fi_fifo_end
+ i
< SD_FI_MAX_ERROR
) {
30221 un
->sd_fi_fifo_end
++;
30224 SD_INFO(SD_LOG_IOERR
, un
,
30225 "sd_faultinjection_ioctl: push to end=%d\n",
30226 un
->sd_fi_fifo_end
);
30229 case SDIOCRETRIEVE
:
30230 /* Return buffer of log from Injection session */
30231 SD_INFO(SD_LOG_SDTEST
, un
,
30232 "sd_faultinjection_ioctl: Injecting Fault Retreive");
30234 sd_fault_injection_on
= 0;
30236 mutex_enter(&(un
->un_fi_mutex
));
30237 rval
= ddi_copyout(un
->sd_fi_log
, (void *)arg
,
30238 un
->sd_fi_buf_len
+1, 0);
30239 mutex_exit(&(un
->un_fi_mutex
));
30243 * arg is possibly invalid setting
30244 * it to NULL for return
30251 mutex_exit(SD_MUTEX(un
));
30252 SD_TRACE(SD_LOG_IOERR
, un
, "sd_faultinjection_ioctl:"
30258 * Function: sd_injection_log()
30260 * Description: This routine adds buff to the already existing injection log
30261 * for retrieval via faultinjection_ioctl for use in fault
30262 * detection and recovery
30264 * Arguments: buf - the string to add to the log
30268 sd_injection_log(char *buf
, struct sd_lun
*un
)
30272 ASSERT(un
!= NULL
);
30273 ASSERT(buf
!= NULL
);
30275 mutex_enter(&(un
->un_fi_mutex
));
30277 len
= min(strlen(buf
), 255);
30278 /* Add logged value to Injection log to be returned later */
30279 if (len
+ un
->sd_fi_buf_len
< SD_FI_MAX_BUF
) {
30280 uint_t offset
= strlen((char *)un
->sd_fi_log
);
30281 char *destp
= (char *)un
->sd_fi_log
+ offset
;
30283 for (i
= 0; i
< len
; i
++) {
30286 un
->sd_fi_buf_len
+= len
;
30287 un
->sd_fi_log
[un
->sd_fi_buf_len
] = '\0';
30290 mutex_exit(&(un
->un_fi_mutex
));
30295 * Function: sd_faultinjection()
30297 * Description: This routine takes the pkt and changes its
30298 * content based on error injection scenerio.
30300 * Arguments: pktp - packet to be changed
30304 sd_faultinjection(struct scsi_pkt
*pktp
)
30307 struct sd_fi_pkt
*fi_pkt
;
30308 struct sd_fi_xb
*fi_xb
;
30309 struct sd_fi_un
*fi_un
;
30310 struct sd_fi_arq
*fi_arq
;
30312 struct sd_xbuf
*xb
;
30315 ASSERT(pktp
!= NULL
);
30317 /* pull bp xb and un from pktp */
30318 bp
= (struct buf
*)pktp
->pkt_private
;
30319 xb
= SD_GET_XBUF(bp
);
30320 un
= SD_GET_UN(bp
);
30322 ASSERT(un
!= NULL
);
30324 mutex_enter(SD_MUTEX(un
));
30326 SD_TRACE(SD_LOG_SDTEST
, un
,
30327 "sd_faultinjection: entry Injection from sdintr\n");
30329 /* if injection is off return */
30330 if (sd_fault_injection_on
== 0 ||
30331 un
->sd_fi_fifo_start
== un
->sd_fi_fifo_end
) {
30332 mutex_exit(SD_MUTEX(un
));
30336 SD_INFO(SD_LOG_SDTEST
, un
,
30337 "sd_faultinjection: is working for copying\n");
30339 /* take next set off fifo */
30340 i
= un
->sd_fi_fifo_start
% SD_FI_MAX_ERROR
;
30342 fi_pkt
= un
->sd_fi_fifo_pkt
[i
];
30343 fi_xb
= un
->sd_fi_fifo_xb
[i
];
30344 fi_un
= un
->sd_fi_fifo_un
[i
];
30345 fi_arq
= un
->sd_fi_fifo_arq
[i
];
30348 /* set variables accordingly */
30349 /* set pkt if it was on fifo */
30350 if (fi_pkt
!= NULL
) {
30351 SD_CONDSET(pktp
, pkt
, pkt_flags
, "pkt_flags");
30352 SD_CONDSET(*pktp
, pkt
, pkt_scbp
, "pkt_scbp");
30353 if (fi_pkt
->pkt_cdbp
!= 0xff)
30354 SD_CONDSET(*pktp
, pkt
, pkt_cdbp
, "pkt_cdbp");
30355 SD_CONDSET(pktp
, pkt
, pkt_state
, "pkt_state");
30356 SD_CONDSET(pktp
, pkt
, pkt_statistics
, "pkt_statistics");
30357 SD_CONDSET(pktp
, pkt
, pkt_reason
, "pkt_reason");
30360 /* set xb if it was on fifo */
30361 if (fi_xb
!= NULL
) {
30362 SD_CONDSET(xb
, xb
, xb_blkno
, "xb_blkno");
30363 SD_CONDSET(xb
, xb
, xb_dma_resid
, "xb_dma_resid");
30364 if (fi_xb
->xb_retry_count
!= 0)
30365 SD_CONDSET(xb
, xb
, xb_retry_count
, "xb_retry_count");
30366 SD_CONDSET(xb
, xb
, xb_victim_retry_count
,
30367 "xb_victim_retry_count");
30368 SD_CONDSET(xb
, xb
, xb_sense_status
, "xb_sense_status");
30369 SD_CONDSET(xb
, xb
, xb_sense_state
, "xb_sense_state");
30370 SD_CONDSET(xb
, xb
, xb_sense_resid
, "xb_sense_resid");
30372 /* copy in block data from sense */
30374 * if (fi_xb->xb_sense_data[0] != -1) {
30375 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data,
30379 bcopy(fi_xb
->xb_sense_data
, xb
->xb_sense_data
, SENSE_LENGTH
);
30381 /* copy in extended sense codes */
30382 SD_CONDSET(((struct scsi_extended_sense
*)xb
->xb_sense_data
),
30383 xb
, es_code
, "es_code");
30384 SD_CONDSET(((struct scsi_extended_sense
*)xb
->xb_sense_data
),
30385 xb
, es_key
, "es_key");
30386 SD_CONDSET(((struct scsi_extended_sense
*)xb
->xb_sense_data
),
30387 xb
, es_add_code
, "es_add_code");
30388 SD_CONDSET(((struct scsi_extended_sense
*)xb
->xb_sense_data
),
30389 xb
, es_qual_code
, "es_qual_code");
30390 struct scsi_extended_sense
*esp
;
30391 esp
= (struct scsi_extended_sense
*)xb
->xb_sense_data
;
30392 esp
->es_class
= CLASS_EXTENDED_SENSE
;
30395 /* set un if it was on fifo */
30396 if (fi_un
!= NULL
) {
30397 SD_CONDSET(un
->un_sd
->sd_inq
, un
, inq_rmb
, "inq_rmb");
30398 SD_CONDSET(un
, un
, un_ctype
, "un_ctype");
30399 SD_CONDSET(un
, un
, un_reset_retry_count
,
30400 "un_reset_retry_count");
30401 SD_CONDSET(un
, un
, un_reservation_type
, "un_reservation_type");
30402 SD_CONDSET(un
, un
, un_resvd_status
, "un_resvd_status");
30403 SD_CONDSET(un
, un
, un_f_arq_enabled
, "un_f_arq_enabled");
30404 SD_CONDSET(un
, un
, un_f_allow_bus_device_reset
,
30405 "un_f_allow_bus_device_reset");
30406 SD_CONDSET(un
, un
, un_f_opt_queueing
, "un_f_opt_queueing");
30410 /* copy in auto request sense if it was on fifo */
30411 if (fi_arq
!= NULL
) {
30412 bcopy(fi_arq
, pktp
->pkt_scbp
, sizeof (struct sd_fi_arq
));
30416 if (un
->sd_fi_fifo_pkt
[i
] != NULL
) {
30417 kmem_free(un
->sd_fi_fifo_pkt
[i
], sizeof (struct sd_fi_pkt
));
30419 if (un
->sd_fi_fifo_xb
[i
] != NULL
) {
30420 kmem_free(un
->sd_fi_fifo_xb
[i
], sizeof (struct sd_fi_xb
));
30422 if (un
->sd_fi_fifo_un
[i
] != NULL
) {
30423 kmem_free(un
->sd_fi_fifo_un
[i
], sizeof (struct sd_fi_un
));
30425 if (un
->sd_fi_fifo_arq
[i
] != NULL
) {
30426 kmem_free(un
->sd_fi_fifo_arq
[i
], sizeof (struct sd_fi_arq
));
30430 * kmem_free does not gurantee to set to NULL
30431 * since we uses these to determine if we set
30432 * values or not lets confirm they are always
30435 un
->sd_fi_fifo_pkt
[i
] = NULL
;
30436 un
->sd_fi_fifo_un
[i
] = NULL
;
30437 un
->sd_fi_fifo_xb
[i
] = NULL
;
30438 un
->sd_fi_fifo_arq
[i
] = NULL
;
30440 un
->sd_fi_fifo_start
++;
30442 mutex_exit(SD_MUTEX(un
));
30444 SD_INFO(SD_LOG_SDTEST
, un
, "sd_faultinjection: exit\n");
30447 #endif /* SD_FAULT_INJECTION */
30450 * This routine is invoked in sd_unit_attach(). Before calling it, the
30451 * properties in conf file should be processed already, and "hotpluggable"
30452 * property was processed also.
30454 * The sd driver distinguishes 3 different type of devices: removable media,
30455 * non-removable media, and hotpluggable. Below the differences are defined:
30459 * The device ID of a device is used to identify this device. Refer to
30460 * ddi_devid_register(9F).
30462 * For a non-removable media disk device which can provide 0x80 or 0x83
30463 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique
30464 * device ID is created to identify this device. For other non-removable
30465 * media devices, a default device ID is created only if this device has
30466 * at least 2 alter cylinders. Otherwise, this device has no devid.
30468 * -------------------------------------------------------
30469 * removable media hotpluggable | Can Have Device ID
30470 * -------------------------------------------------------
30471 * false false | Yes
30474 * ------------------------------------------------------
30477 * 2. SCSI group 4 commands
30479 * In SCSI specs, only some commands in group 4 command set can use
30480 * 8-byte addresses that can be used to access >2TB storage spaces.
30481 * Other commands have no such capability. Without supporting group4,
30482 * it is impossible to make full use of storage spaces of a disk with
30483 * capacity larger than 2TB.
30485 * -----------------------------------------------
30486 * removable media hotpluggable LP64 | Group
30487 * -----------------------------------------------
30488 * false false false | 1
30489 * false false true | 4
30490 * false true false | 1
30491 * false true true | 4
30493 * -----------------------------------------------
30496 * 3. Check for VTOC Label
30498 * If a direct-access disk has no EFI label, sd will check if it has a
30499 * valid VTOC label. Now, sd also does that check for removable media
30500 * and hotpluggable devices.
30502 * --------------------------------------------------------------
30503 * Direct-Access removable media hotpluggable | Check Label
30504 * -------------------------------------------------------------
30505 * false false false | No
30506 * false false true | No
30507 * false true false | Yes
30508 * false true true | Yes
30510 * --------------------------------------------------------------
30513 * 4. Building default VTOC label
30515 * As section 3 says, sd checks if some kinds of devices have VTOC label.
30516 * If those devices have no valid VTOC label, sd(7d) will attempt to
30517 * create default VTOC for them. Currently sd creates default VTOC label
30518 * for all devices on x86 platform (VTOC_16), but only for removable
30519 * media devices on SPARC (VTOC_8).
30521 * -----------------------------------------------------------
30522 * removable media hotpluggable platform | Default Label
30523 * -----------------------------------------------------------
30524 * false false sparc | No
30525 * false true x86 | Yes
30526 * false true sparc | Yes
30528 * ----------------------------------------------------------
30531 * 5. Supported blocksizes of target devices
30533 * Sd supports non-512-byte blocksize for removable media devices only.
30534 * For other devices, only 512-byte blocksize is supported. This may be
30535 * changed in near future because some RAID devices require non-512-byte
30538 * -----------------------------------------------------------
30539 * removable media hotpluggable | non-512-byte blocksize
30540 * -----------------------------------------------------------
30544 * -----------------------------------------------------------
30547 * 6. Automatic mount & unmount
30549 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30550 * if a device is removable media device. It return 1 for removable media
30551 * devices, and 0 for others.
30553 * The automatic mounting subsystem should distinguish between the types
30554 * of devices and apply automounting policies to each.
30557 * 7. fdisk partition management
30559 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30560 * just supports fdisk partitions on x86 platform. On sparc platform, sd
30561 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30562 * fdisk partitions on both x86 and SPARC platform.
30564 * -----------------------------------------------------------
30565 * platform removable media USB/1394 | fdisk supported
30566 * -----------------------------------------------------------
30568 * ------------------------------------------------------------
30569 * sparc X X | false
30570 * ------------------------------------------------------------
30575 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30576 * read/write mboot for removable media devices on sparc platform.
30578 * -----------------------------------------------------------
30579 * platform removable media USB/1394 | mboot supported
30580 * -----------------------------------------------------------
30582 * ------------------------------------------------------------
30583 * sparc false false | false
30584 * sparc false true | true
30585 * sparc true false | true
30586 * sparc true true | true
30587 * ------------------------------------------------------------
30590 * 9. error handling during opening device
30592 * If failed to open a disk device, an errno is returned. For some kinds
30593 * of errors, different errno is returned depending on if this device is
30594 * a removable media device. This brings USB/1394 hard disks in line with
30595 * expected hard disk behavior. It is not expected that this breaks any
30598 * ------------------------------------------------------
30599 * removable media hotpluggable | errno
30600 * ------------------------------------------------------
30601 * false false | EIO
30604 * ------------------------------------------------------
30607 * 11. ioctls: DKIOCEJECT, CDROMEJECT
30609 * These IOCTLs are applicable only to removable media devices.
30611 * -----------------------------------------------------------
30612 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT
30613 * -----------------------------------------------------------
30617 * -----------------------------------------------------------
30620 * 12. Kstats for partitions
30622 * sd creates partition kstat for non-removable media devices. USB and
30623 * Firewire hard disks now have partition kstats
30625 * ------------------------------------------------------
30626 * removable media hotpluggable | kstat
30627 * ------------------------------------------------------
30628 * false false | Yes
30631 * ------------------------------------------------------
30634 * 13. Removable media & hotpluggable properties
30636 * Sd driver creates a "removable-media" property for removable media
30637 * devices. Parent nexus drivers create a "hotpluggable" property if
30638 * it supports hotplugging.
30640 * ---------------------------------------------------------------------
30641 * removable media hotpluggable | "removable-media" " hotpluggable"
30642 * ---------------------------------------------------------------------
30643 * false false | No No
30644 * false true | No Yes
30645 * true false | Yes No
30646 * true true | Yes Yes
30647 * ---------------------------------------------------------------------
30650 * 14. Power Management
30652 * sd only power manages removable media devices or devices that support
30653 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250)
30655 * A parent nexus that supports hotplugging can also set "pm-capable"
30656 * if the disk can be power managed.
30658 * ------------------------------------------------------------
30659 * removable media hotpluggable pm-capable | power manage
30660 * ------------------------------------------------------------
30661 * false false false | No
30662 * false false true | Yes
30663 * false true false | No
30664 * false true true | Yes
30666 * ------------------------------------------------------------
30668 * USB and firewire hard disks can now be power managed independently
30669 * of the framebuffer
30672 * 15. Support for USB disks with capacity larger than 1TB
30674 * Currently, sd doesn't permit a fixed disk device with capacity
30675 * larger than 1TB to be used in a 32-bit operating system environment.
30676 * However, sd doesn't do that for removable media devices. Instead, it
30677 * assumes that removable media devices cannot have a capacity larger
30678 * than 1TB. Therefore, using those devices on 32-bit system is partially
30679 * supported, which can cause some unexpected results.
30681 * ---------------------------------------------------------------------
30682 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env
30683 * ---------------------------------------------------------------------
30684 * false false | true | no
30685 * false true | true | no
30686 * true false | true | Yes
30687 * true true | true | Yes
30688 * ---------------------------------------------------------------------
30691 * 16. Check write-protection at open time
30693 * When a removable media device is being opened for writing without NDELAY
30694 * flag, sd will check if this device is writable. If attempting to open
30695 * without NDELAY flag a write-protected device, this operation will abort.
30697 * ------------------------------------------------------------
30698 * removable media USB/1394 | WP Check
30699 * ------------------------------------------------------------
30704 * ------------------------------------------------------------
30707 * 17. syslog when corrupted VTOC is encountered
30709 * Currently, if an invalid VTOC is encountered, sd only print syslog
30710 * for fixed SCSI disks.
30711 * ------------------------------------------------------------
30712 * removable media USB/1394 | print syslog
30713 * ------------------------------------------------------------
30714 * false false | Yes
30718 * ------------------------------------------------------------
30721 sd_set_unit_attributes(struct sd_lun
*un
, dev_info_t
*devi
)
30726 ASSERT(un
->un_sd
->sd_inq
);
30729 * Enable SYNC CACHE support for all devices.
30731 un
->un_f_sync_cache_supported
= TRUE
;
30734 * Set the sync cache required flag to false.
30735 * This would ensure that there is no SYNC CACHE
30736 * sent when there are no writes
30738 un
->un_f_sync_cache_required
= FALSE
;
30740 if (un
->un_sd
->sd_inq
->inq_rmb
) {
30742 * The media of this device is removable. And for this kind
30743 * of devices, it is possible to change medium after opening
30744 * devices. Thus we should support this operation.
30746 un
->un_f_has_removable_media
= TRUE
;
30749 * support non-512-byte blocksize of removable media devices
30751 un
->un_f_non_devbsize_supported
= TRUE
;
30754 * Assume that all removable media devices support DOOR_LOCK
30756 un
->un_f_doorlock_supported
= TRUE
;
30759 * For a removable media device, it is possible to be opened
30760 * with NDELAY flag when there is no media in drive, in this
30761 * case we don't care if device is writable. But if without
30762 * NDELAY flag, we need to check if media is write-protected.
30764 un
->un_f_chk_wp_open
= TRUE
;
30767 * need to start a SCSI watch thread to monitor media state,
30768 * when media is being inserted or ejected, notify syseventd.
30770 un
->un_f_monitor_media_state
= TRUE
;
30773 * Some devices don't support START_STOP_UNIT command.
30774 * Therefore, we'd better check if a device supports it
30775 * before sending it.
30777 un
->un_f_check_start_stop
= TRUE
;
30780 * support eject media ioctl:
30781 * FDEJECT, DKIOCEJECT, CDROMEJECT
30783 un
->un_f_eject_media_supported
= TRUE
;
30786 * Because many removable-media devices don't support
30787 * LOG_SENSE, we couldn't use this command to check if
30788 * a removable media device support power-management.
30789 * We assume that they support power-management via
30790 * START_STOP_UNIT command and can be spun up and down
30791 * without limitations.
30793 un
->un_f_pm_supported
= TRUE
;
30796 * Need to create a zero length (Boolean) property
30797 * removable-media for the removable media devices.
30798 * Note that the return value of the property is not being
30799 * checked, since if unable to create the property
30800 * then do not want the attach to fail altogether. Consistent
30801 * with other property creation in attach.
30803 (void) ddi_prop_create(DDI_DEV_T_NONE
, devi
,
30804 DDI_PROP_CANSLEEP
, "removable-media", NULL
, 0);
30808 * create device ID for device
30810 un
->un_f_devid_supported
= TRUE
;
30813 * Spin up non-removable-media devices once it is attached
30815 un
->un_f_attach_spinup
= TRUE
;
30818 * According to SCSI specification, Sense data has two kinds of
30819 * format: fixed format, and descriptor format. At present, we
30820 * don't support descriptor format sense data for removable
30823 if (SD_INQUIRY(un
)->inq_dtype
== DTYPE_DIRECT
) {
30824 un
->un_f_descr_format_supported
= TRUE
;
30828 * kstats are created only for non-removable media devices.
30830 * Set this in sd.conf to 0 in order to disable kstats. The
30831 * default is 1, so they are enabled by default.
30833 un
->un_f_pkstats_enabled
= (ddi_prop_get_int(DDI_DEV_T_ANY
,
30834 SD_DEVINFO(un
), DDI_PROP_DONTPASS
,
30835 "enable-partition-kstats", 1));
30838 * Check if HBA has set the "pm-capable" property.
30839 * If "pm-capable" exists and is non-zero then we can
30840 * power manage the device without checking the start/stop
30841 * cycle count log sense page.
30843 * If "pm-capable" exists and is set to be false (0),
30844 * then we should not power manage the device.
30846 * If "pm-capable" doesn't exist then pm_cap will
30847 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case,
30848 * sd will check the start/stop cycle count log sense page
30849 * and power manage the device if the cycle count limit has
30850 * not been exceeded.
30852 pm_cap
= ddi_prop_get_int(DDI_DEV_T_ANY
, devi
,
30853 DDI_PROP_DONTPASS
, "pm-capable", SD_PM_CAPABLE_UNDEFINED
);
30854 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap
)) {
30855 un
->un_f_log_sense_supported
= TRUE
;
30856 if (!un
->un_f_power_condition_disabled
&&
30857 SD_INQUIRY(un
)->inq_ansi
== 6) {
30858 un
->un_f_power_condition_supported
= TRUE
;
30862 * pm-capable property exists.
30864 * Convert "TRUE" values for pm_cap to
30865 * SD_PM_CAPABLE_IS_TRUE to make it easier to check
30866 * later. "TRUE" values are any values defined in
30869 if (SD_PM_CAPABLE_IS_FALSE(pm_cap
)) {
30870 un
->un_f_log_sense_supported
= FALSE
;
30872 /* SD_PM_CAPABLE_IS_TRUE case */
30873 un
->un_f_pm_supported
= TRUE
;
30874 if (!un
->un_f_power_condition_disabled
&&
30875 SD_PM_CAPABLE_IS_SPC_4(pm_cap
)) {
30876 un
->un_f_power_condition_supported
=
30879 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap
)) {
30880 un
->un_f_log_sense_supported
= TRUE
;
30881 un
->un_f_pm_log_sense_smart
=
30882 SD_PM_CAP_SMART_LOG(pm_cap
);
30886 SD_INFO(SD_LOG_ATTACH_DETACH
, un
,
30887 "sd_unit_attach: un:0x%p pm-capable "
30888 "property set to %d.\n", un
, un
->un_f_pm_supported
);
30892 if (un
->un_f_is_hotpluggable
) {
30895 * Have to watch hotpluggable devices as well, since
30896 * that's the only way for userland applications to
30897 * detect hot removal while device is busy/mounted.
30899 un
->un_f_monitor_media_state
= TRUE
;
30901 un
->un_f_check_start_stop
= TRUE
;
30908 * Provides rdwr access for cmlb via sd_tgops. The start_block is
30909 * in sys block size, req_length in bytes.
30913 sd_tg_rdwr(dev_info_t
*devi
, uchar_t cmd
, void *bufaddr
,
30914 diskaddr_t start_block
, size_t reqlength
, void *tg_cookie
)
30917 int path_flag
= (int)(uintptr_t)tg_cookie
;
30919 diskaddr_t real_addr
= start_block
;
30920 diskaddr_t first_byte
, end_block
;
30922 size_t buffer_size
= reqlength
;
30928 un
= ddi_get_soft_state(sd_state
, ddi_get_instance(devi
));
30932 if (cmd
!= TG_READ
&& cmd
!= TG_WRITE
)
30935 ssc
= sd_ssc_init(un
);
30936 mutex_enter(SD_MUTEX(un
));
30937 if (un
->un_f_tgt_blocksize_is_valid
== FALSE
) {
30938 mutex_exit(SD_MUTEX(un
));
30939 rval
= sd_send_scsi_READ_CAPACITY(ssc
, (uint64_t *)&cap
,
30940 &lbasize
, path_flag
);
30943 mutex_enter(SD_MUTEX(un
));
30944 sd_update_block_info(un
, lbasize
, cap
);
30945 if ((un
->un_f_tgt_blocksize_is_valid
== FALSE
)) {
30946 mutex_exit(SD_MUTEX(un
));
30952 if (NOT_DEVBSIZE(un
)) {
30954 * sys_blocksize != tgt_blocksize, need to re-adjust
30955 * blkno and save the index to beginning of dk_label
30957 first_byte
= SD_SYSBLOCKS2BYTES(start_block
);
30958 real_addr
= first_byte
/ un
->un_tgt_blocksize
;
30960 end_block
= (first_byte
+ reqlength
+
30961 un
->un_tgt_blocksize
- 1) / un
->un_tgt_blocksize
;
30963 /* round up buffer size to multiple of target block size */
30964 buffer_size
= (end_block
- real_addr
) * un
->un_tgt_blocksize
;
30966 SD_TRACE(SD_LOG_IO_PARTITION
, un
, "sd_tg_rdwr",
30967 "label_addr: 0x%x allocation size: 0x%x\n",
30968 real_addr
, buffer_size
);
30970 if (((first_byte
% un
->un_tgt_blocksize
) != 0) ||
30971 (reqlength
% un
->un_tgt_blocksize
) != 0)
30972 /* the request is not aligned */
30973 dkl
= kmem_zalloc(buffer_size
, KM_SLEEP
);
30977 * The MMC standard allows READ CAPACITY to be
30978 * inaccurate by a bounded amount (in the interest of
30979 * response latency). As a result, failed READs are
30980 * commonplace (due to the reading of metadata and not
30981 * data). Depending on the per-Vendor/drive Sense data,
30982 * the failed READ can cause many (unnecessary) retries.
30985 if (ISCD(un
) && (cmd
== TG_READ
) &&
30986 (un
->un_f_blockcount_is_valid
== TRUE
) &&
30987 ((start_block
== (un
->un_blockcount
- 1))||
30988 (start_block
== (un
->un_blockcount
- 2)))) {
30989 path_flag
= SD_PATH_DIRECT_PRIORITY
;
30992 mutex_exit(SD_MUTEX(un
));
30993 if (cmd
== TG_READ
) {
30994 rval
= sd_send_scsi_READ(ssc
, (dkl
!= NULL
)? dkl
: bufaddr
,
30995 buffer_size
, real_addr
, path_flag
);
30997 bcopy(dkl
+ SD_TGTBYTEOFFSET(un
, start_block
,
30998 real_addr
), bufaddr
, reqlength
);
31001 rval
= sd_send_scsi_READ(ssc
, dkl
, buffer_size
,
31002 real_addr
, path_flag
);
31006 bcopy(bufaddr
, dkl
+ SD_TGTBYTEOFFSET(un
, start_block
,
31007 real_addr
), reqlength
);
31009 rval
= sd_send_scsi_WRITE(ssc
, (dkl
!= NULL
)? dkl
: bufaddr
,
31010 buffer_size
, real_addr
, path_flag
);
31015 kmem_free(dkl
, buffer_size
);
31019 sd_ssc_assessment(ssc
, SD_FMT_STATUS_CHECK
);
31021 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
31030 sd_tg_getinfo(dev_info_t
*devi
, int cmd
, void *arg
, void *tg_cookie
)
31036 int path_flag
= (int)(uintptr_t)tg_cookie
;
31039 un
= ddi_get_soft_state(sd_state
, ddi_get_instance(devi
));
31044 case TG_GETPHYGEOM
:
31045 case TG_GETVIRTGEOM
:
31046 case TG_GETCAPACITY
:
31047 case TG_GETBLOCKSIZE
:
31048 mutex_enter(SD_MUTEX(un
));
31050 if ((un
->un_f_blockcount_is_valid
== TRUE
) &&
31051 (un
->un_f_tgt_blocksize_is_valid
== TRUE
)) {
31052 cap
= un
->un_blockcount
;
31053 lbasize
= un
->un_tgt_blocksize
;
31054 mutex_exit(SD_MUTEX(un
));
31057 mutex_exit(SD_MUTEX(un
));
31058 ssc
= sd_ssc_init(un
);
31059 ret
= sd_send_scsi_READ_CAPACITY(ssc
, (uint64_t *)&cap
,
31060 &lbasize
, path_flag
);
31063 sd_ssc_assessment(ssc
,
31064 SD_FMT_STATUS_CHECK
);
31066 sd_ssc_assessment(ssc
,
31072 mutex_enter(SD_MUTEX(un
));
31073 sd_update_block_info(un
, lbasize
, cap
);
31074 if ((un
->un_f_blockcount_is_valid
== FALSE
) ||
31075 (un
->un_f_tgt_blocksize_is_valid
== FALSE
)) {
31076 mutex_exit(SD_MUTEX(un
));
31079 mutex_exit(SD_MUTEX(un
));
31082 if (cmd
== TG_GETCAPACITY
) {
31083 *(diskaddr_t
*)arg
= cap
;
31087 if (cmd
== TG_GETBLOCKSIZE
) {
31088 *(uint32_t *)arg
= lbasize
;
31092 if (cmd
== TG_GETPHYGEOM
)
31093 ret
= sd_get_physical_geometry(un
, (cmlb_geom_t
*)arg
,
31094 cap
, lbasize
, path_flag
);
31096 /* TG_GETVIRTGEOM */
31097 ret
= sd_get_virtual_geometry(un
,
31098 (cmlb_geom_t
*)arg
, cap
, lbasize
);
31103 mutex_enter(SD_MUTEX(un
));
31104 ((tg_attribute_t
*)arg
)->media_is_writable
=
31105 un
->un_f_mmc_writable_media
;
31106 ((tg_attribute_t
*)arg
)->media_is_solid_state
=
31107 un
->un_f_is_solid_state
;
31108 mutex_exit(SD_MUTEX(un
));
31117 * Function: sd_ssc_ereport_post
31119 * Description: Will be called when SD driver need to post an ereport.
31121 * Context: Kernel thread or interrupt context.
31124 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31127 sd_ssc_ereport_post(sd_ssc_t
*ssc
, enum sd_driver_assessment drv_assess
)
31129 int uscsi_path_instance
= 0;
31130 uchar_t uscsi_pkt_reason
;
31131 uint32_t uscsi_pkt_state
;
31132 uint32_t uscsi_pkt_statistics
;
31133 uint64_t uscsi_ena
;
31136 union scsi_cdb
*cdbp
;
31142 int ssc_invalid_flags
= SSC_FLAGS_INVALID_PKT_REASON
|
31143 SSC_FLAGS_INVALID_STATUS
|
31144 SSC_FLAGS_INVALID_SENSE
|
31145 SSC_FLAGS_INVALID_DATA
;
31146 char assessment
[16];
31148 ASSERT(ssc
!= NULL
);
31149 ASSERT(ssc
->ssc_uscsi_cmd
!= NULL
);
31150 ASSERT(ssc
->ssc_uscsi_info
!= NULL
);
31153 ASSERT(un
!= NULL
);
31155 dip
= un
->un_sd
->sd_dev
;
31159 * devid will only be passed to non-transport error reports.
31161 devid
= DEVI(dip
)->devi_devid_str
;
31164 * If we are syncing or dumping, the command will not be executed
31165 * so we bypass this situation.
31167 if (ddi_in_panic() || (un
->un_state
== SD_STATE_SUSPENDED
) ||
31168 (un
->un_state
== SD_STATE_DUMPING
))
31171 uscsi_pkt_reason
= ssc
->ssc_uscsi_info
->ui_pkt_reason
;
31172 uscsi_path_instance
= ssc
->ssc_uscsi_cmd
->uscsi_path_instance
;
31173 uscsi_pkt_state
= ssc
->ssc_uscsi_info
->ui_pkt_state
;
31174 uscsi_pkt_statistics
= ssc
->ssc_uscsi_info
->ui_pkt_statistics
;
31175 uscsi_ena
= ssc
->ssc_uscsi_info
->ui_ena
;
31177 sensep
= (uint8_t *)ssc
->ssc_uscsi_cmd
->uscsi_rqbuf
;
31178 cdbp
= (union scsi_cdb
*)ssc
->ssc_uscsi_cmd
->uscsi_cdb
;
31180 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31181 if (cdbp
== NULL
) {
31182 scsi_log(SD_DEVINFO(un
), sd_label
, CE_WARN
,
31183 "sd_ssc_ereport_post meet empty cdb\n");
31187 op_code
= cdbp
->scc_cmd
;
31189 cdblen
= (int)ssc
->ssc_uscsi_cmd
->uscsi_cdblen
;
31190 senlen
= (int)(ssc
->ssc_uscsi_cmd
->uscsi_rqlen
-
31191 ssc
->ssc_uscsi_cmd
->uscsi_rqresid
);
31194 ASSERT(sensep
!= NULL
);
31197 * Initialize drv_assess to corresponding values.
31198 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending
31199 * on the sense-key returned back.
31201 switch (drv_assess
) {
31202 case SD_FM_DRV_RECOVERY
:
31203 (void) sprintf(assessment
, "%s", "recovered");
31205 case SD_FM_DRV_RETRY
:
31206 (void) sprintf(assessment
, "%s", "retry");
31208 case SD_FM_DRV_NOTICE
:
31209 (void) sprintf(assessment
, "%s", "info");
31211 case SD_FM_DRV_FATAL
:
31213 (void) sprintf(assessment
, "%s", "unknown");
31216 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31217 * command, we will post ereport.io.scsi.cmd.disk.recovered.
31218 * driver-assessment will always be "recovered" here.
31220 if (drv_assess
== SD_FM_DRV_RECOVERY
) {
31221 scsi_fm_ereport_post(un
->un_sd
, uscsi_path_instance
, NULL
,
31222 "cmd.disk.recovered", uscsi_ena
, devid
, NULL
,
31224 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
31225 DEVID_IF_KNOWN(devid
),
31226 "driver-assessment", DATA_TYPE_STRING
, assessment
,
31227 "op-code", DATA_TYPE_UINT8
, op_code
,
31228 "cdb", DATA_TYPE_UINT8_ARRAY
,
31229 cdblen
, ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31230 "pkt-reason", DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31231 "pkt-state", DATA_TYPE_UINT32
, uscsi_pkt_state
,
31232 "pkt-stats", DATA_TYPE_UINT32
, uscsi_pkt_statistics
,
31238 * If there is un-expected/un-decodable data, we should post
31239 * ereport.io.scsi.cmd.disk.dev.uderr.
31240 * driver-assessment will be set based on parameter drv_assess.
31241 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31242 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31243 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31244 * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31246 if (ssc
->ssc_flags
& ssc_invalid_flags
) {
31247 if (ssc
->ssc_flags
& SSC_FLAGS_INVALID_SENSE
) {
31248 scsi_fm_ereport_post(un
->un_sd
, uscsi_path_instance
,
31249 NULL
, "cmd.disk.dev.uderr", uscsi_ena
, devid
,
31250 NULL
, DDI_NOSLEEP
, NULL
,
31251 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
31252 DEVID_IF_KNOWN(devid
),
31253 "driver-assessment", DATA_TYPE_STRING
,
31254 drv_assess
== SD_FM_DRV_FATAL
?
31255 "fail" : assessment
,
31256 "op-code", DATA_TYPE_UINT8
, op_code
,
31257 "cdb", DATA_TYPE_UINT8_ARRAY
,
31258 cdblen
, ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31259 "pkt-reason", DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31260 "pkt-state", DATA_TYPE_UINT32
, uscsi_pkt_state
,
31261 "pkt-stats", DATA_TYPE_UINT32
,
31262 uscsi_pkt_statistics
,
31263 "stat-code", DATA_TYPE_UINT8
,
31264 ssc
->ssc_uscsi_cmd
->uscsi_status
,
31265 "un-decode-info", DATA_TYPE_STRING
,
31267 "un-decode-value", DATA_TYPE_UINT8_ARRAY
,
31272 * For other type of invalid data, the
31273 * un-decode-value field would be empty because the
31274 * un-decodable content could be seen from upper
31275 * level payload or inside un-decode-info.
31277 scsi_fm_ereport_post(un
->un_sd
, uscsi_path_instance
,
31279 "cmd.disk.dev.uderr", uscsi_ena
, devid
,
31280 NULL
, DDI_NOSLEEP
, NULL
,
31281 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
31282 DEVID_IF_KNOWN(devid
),
31283 "driver-assessment", DATA_TYPE_STRING
,
31284 drv_assess
== SD_FM_DRV_FATAL
?
31285 "fail" : assessment
,
31286 "op-code", DATA_TYPE_UINT8
, op_code
,
31287 "cdb", DATA_TYPE_UINT8_ARRAY
,
31288 cdblen
, ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31289 "pkt-reason", DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31290 "pkt-state", DATA_TYPE_UINT32
, uscsi_pkt_state
,
31291 "pkt-stats", DATA_TYPE_UINT32
,
31292 uscsi_pkt_statistics
,
31293 "stat-code", DATA_TYPE_UINT8
,
31294 ssc
->ssc_uscsi_cmd
->uscsi_status
,
31295 "un-decode-info", DATA_TYPE_STRING
,
31297 "un-decode-value", DATA_TYPE_UINT8_ARRAY
,
31301 ssc
->ssc_flags
&= ~ssc_invalid_flags
;
31305 if (uscsi_pkt_reason
!= CMD_CMPLT
||
31306 (ssc
->ssc_flags
& SSC_FLAGS_TRAN_ABORT
)) {
31308 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was
31309 * set inside sd_start_cmds due to errors(bad packet or
31310 * fatal transport error), we should take it as a
31311 * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31312 * driver-assessment will be set based on drv_assess.
31313 * We will set devid to NULL because it is a transport
31316 if (ssc
->ssc_flags
& SSC_FLAGS_TRAN_ABORT
)
31317 ssc
->ssc_flags
&= ~SSC_FLAGS_TRAN_ABORT
;
31319 scsi_fm_ereport_post(un
->un_sd
, uscsi_path_instance
, NULL
,
31320 "cmd.disk.tran", uscsi_ena
, NULL
, NULL
, DDI_NOSLEEP
, NULL
,
31321 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
31322 DEVID_IF_KNOWN(devid
),
31323 "driver-assessment", DATA_TYPE_STRING
,
31324 drv_assess
== SD_FM_DRV_FATAL
? "fail" : assessment
,
31325 "op-code", DATA_TYPE_UINT8
, op_code
,
31326 "cdb", DATA_TYPE_UINT8_ARRAY
,
31327 cdblen
, ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31328 "pkt-reason", DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31329 "pkt-state", DATA_TYPE_UINT8
, uscsi_pkt_state
,
31330 "pkt-stats", DATA_TYPE_UINT32
, uscsi_pkt_statistics
,
31334 * If we got here, we have a completed command, and we need
31335 * to further investigate the sense data to see what kind
31336 * of ereport we should post.
31337 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr
31338 * if sense-key == 0x3.
31339 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise.
31340 * driver-assessment will be set based on the parameter
31345 * Here we have sense data available.
31348 sense_key
= scsi_sense_key(sensep
);
31349 if (sense_key
== 0x3) {
31351 * sense-key == 0x3(medium error),
31352 * driver-assessment should be "fatal" if
31353 * drv_assess is SD_FM_DRV_FATAL.
31355 scsi_fm_ereport_post(un
->un_sd
,
31356 uscsi_path_instance
, NULL
,
31357 "cmd.disk.dev.rqs.merr",
31358 uscsi_ena
, devid
, NULL
, DDI_NOSLEEP
, NULL
,
31359 FM_VERSION
, DATA_TYPE_UINT8
,
31361 DEVID_IF_KNOWN(devid
),
31362 "driver-assessment",
31364 drv_assess
== SD_FM_DRV_FATAL
?
31365 "fatal" : assessment
,
31367 DATA_TYPE_UINT8
, op_code
,
31369 DATA_TYPE_UINT8_ARRAY
, cdblen
,
31370 ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31372 DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31374 DATA_TYPE_UINT8
, uscsi_pkt_state
,
31377 uscsi_pkt_statistics
,
31380 ssc
->ssc_uscsi_cmd
->uscsi_status
,
31383 scsi_sense_key(sensep
),
31386 scsi_sense_asc(sensep
),
31389 scsi_sense_ascq(sensep
),
31391 DATA_TYPE_UINT8_ARRAY
,
31395 ssc
->ssc_uscsi_info
->ui_lba
,
31399 * if sense-key == 0x4(hardware
31400 * error), driver-assessment should
31401 * be "fatal" if drv_assess is
31404 scsi_fm_ereport_post(un
->un_sd
,
31405 uscsi_path_instance
, NULL
,
31406 "cmd.disk.dev.rqs.derr",
31408 NULL
, DDI_NOSLEEP
, NULL
,
31410 DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
31411 DEVID_IF_KNOWN(devid
),
31412 "driver-assessment",
31414 drv_assess
== SD_FM_DRV_FATAL
?
31415 (sense_key
== 0x4 ?
31416 "fatal" : "fail") : assessment
,
31418 DATA_TYPE_UINT8
, op_code
,
31420 DATA_TYPE_UINT8_ARRAY
, cdblen
,
31421 ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31423 DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31425 DATA_TYPE_UINT8
, uscsi_pkt_state
,
31428 uscsi_pkt_statistics
,
31431 ssc
->ssc_uscsi_cmd
->uscsi_status
,
31434 scsi_sense_key(sensep
),
31437 scsi_sense_asc(sensep
),
31440 scsi_sense_ascq(sensep
),
31442 DATA_TYPE_UINT8_ARRAY
,
31448 * For stat_code == STATUS_GOOD, this is not a
31451 if (ssc
->ssc_uscsi_cmd
->uscsi_status
== STATUS_GOOD
)
31455 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31456 * stat-code but with sense data unavailable.
31457 * driver-assessment will be set based on parameter
31460 scsi_fm_ereport_post(un
->un_sd
, uscsi_path_instance
,
31462 "cmd.disk.dev.serr", uscsi_ena
,
31463 devid
, NULL
, DDI_NOSLEEP
, NULL
,
31464 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
,
31465 DEVID_IF_KNOWN(devid
),
31466 "driver-assessment", DATA_TYPE_STRING
,
31467 drv_assess
== SD_FM_DRV_FATAL
? "fail" : assessment
,
31468 "op-code", DATA_TYPE_UINT8
, op_code
,
31470 DATA_TYPE_UINT8_ARRAY
,
31471 cdblen
, ssc
->ssc_uscsi_cmd
->uscsi_cdb
,
31473 DATA_TYPE_UINT8
, uscsi_pkt_reason
,
31475 DATA_TYPE_UINT8
, uscsi_pkt_state
,
31477 DATA_TYPE_UINT32
, uscsi_pkt_statistics
,
31480 ssc
->ssc_uscsi_cmd
->uscsi_status
,
31487 * Function: sd_ssc_extract_info
31489 * Description: Extract information available to help generate ereport.
31491 * Context: Kernel thread or interrupt context.
31494 sd_ssc_extract_info(sd_ssc_t
*ssc
, struct sd_lun
*un
, struct scsi_pkt
*pktp
,
31495 struct buf
*bp
, struct sd_xbuf
*xp
)
31498 union scsi_cdb
*cdbp
;
31501 * Need scsi_cdb_size array to determine the cdb length.
31503 extern uchar_t scsi_cdb_size
[];
31505 ASSERT(un
!= NULL
);
31506 ASSERT(pktp
!= NULL
);
31507 ASSERT(bp
!= NULL
);
31508 ASSERT(xp
!= NULL
);
31509 ASSERT(ssc
!= NULL
);
31510 ASSERT(mutex_owned(SD_MUTEX(un
)));
31513 * Transfer the cdb buffer pointer here.
31515 cdbp
= (union scsi_cdb
*)pktp
->pkt_cdbp
;
31517 ssc
->ssc_uscsi_cmd
->uscsi_cdblen
= scsi_cdb_size
[GETGROUP(cdbp
)];
31518 ssc
->ssc_uscsi_cmd
->uscsi_cdb
= (caddr_t
)cdbp
;
31521 * Transfer the sense data buffer pointer if sense data is available,
31522 * calculate the sense data length first.
31524 if ((xp
->xb_sense_state
& STATE_XARQ_DONE
) ||
31525 (xp
->xb_sense_state
& STATE_ARQ_DONE
)) {
31527 * For arq case, we will enter here.
31529 if (xp
->xb_sense_state
& STATE_XARQ_DONE
) {
31530 senlen
= MAX_SENSE_LENGTH
- xp
->xb_sense_resid
;
31532 senlen
= SENSE_LENGTH
;
31536 * For non-arq case, we will enter this branch.
31538 if (SD_GET_PKT_STATUS(pktp
) == STATUS_CHECK
&&
31539 (xp
->xb_sense_state
& STATE_XFERRED_DATA
)) {
31540 senlen
= SENSE_LENGTH
- xp
->xb_sense_resid
;
31545 ssc
->ssc_uscsi_cmd
->uscsi_rqlen
= (senlen
& 0xff);
31546 ssc
->ssc_uscsi_cmd
->uscsi_rqresid
= 0;
31547 ssc
->ssc_uscsi_cmd
->uscsi_rqbuf
= (caddr_t
)xp
->xb_sense_data
;
31549 ssc
->ssc_uscsi_cmd
->uscsi_status
= ((*(pktp
)->pkt_scbp
) & STATUS_MASK
);
31552 * Only transfer path_instance when scsi_pkt was properly allocated.
31554 path_instance
= pktp
->pkt_path_instance
;
31555 if (scsi_pkt_allocated_correctly(pktp
) && path_instance
)
31556 ssc
->ssc_uscsi_cmd
->uscsi_path_instance
= path_instance
;
31558 ssc
->ssc_uscsi_cmd
->uscsi_path_instance
= 0;
31561 * Copy in the other fields we may need when posting ereport.
31563 ssc
->ssc_uscsi_info
->ui_pkt_reason
= pktp
->pkt_reason
;
31564 ssc
->ssc_uscsi_info
->ui_pkt_state
= pktp
->pkt_state
;
31565 ssc
->ssc_uscsi_info
->ui_pkt_statistics
= pktp
->pkt_statistics
;
31566 ssc
->ssc_uscsi_info
->ui_lba
= (uint64_t)SD_GET_BLKNO(bp
);
31569 * For partially read/write command, we will not create ena
31570 * in case of a successful command be reconized as recovered.
31572 if ((pktp
->pkt_reason
== CMD_CMPLT
) &&
31573 (ssc
->ssc_uscsi_cmd
->uscsi_status
== STATUS_GOOD
) &&
31579 * To associate ereports of a single command execution flow, we
31580 * need a shared ena for a specific command.
31582 if (xp
->xb_ena
== 0)
31583 xp
->xb_ena
= fm_ena_generate(0, FM_ENA_FMT1
);
31584 ssc
->ssc_uscsi_info
->ui_ena
= xp
->xb_ena
;
31589 * Function: sd_check_solid_state
31591 * Description: Query the optional INQUIRY VPD page 0xb1. If the device
31592 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION
31593 * RATE. If the MEDIUM ROTATION RATE is 1, sd assumes the
31594 * device is a solid state drive.
31596 * Context: Kernel thread or interrupt context.
31600 sd_check_solid_state(sd_ssc_t
*ssc
)
31603 uchar_t
*inqb1
= NULL
;
31604 size_t inqb1_len
= MAX_INQUIRY_SIZE
;
31605 size_t inqb1_resid
= 0;
31608 ASSERT(ssc
!= NULL
);
31610 ASSERT(un
!= NULL
);
31611 ASSERT(!mutex_owned(SD_MUTEX(un
)));
31613 mutex_enter(SD_MUTEX(un
));
31614 un
->un_f_is_solid_state
= FALSE
;
31617 mutex_exit(SD_MUTEX(un
));
31621 if (sd_check_vpd_page_support(ssc
) == 0 &&
31622 un
->un_vpd_page_mask
& SD_VPD_DEV_CHARACTER_PG
) {
31623 mutex_exit(SD_MUTEX(un
));
31624 /* collect page b1 data */
31625 inqb1
= kmem_zalloc(inqb1_len
, KM_SLEEP
);
31627 rval
= sd_send_scsi_INQUIRY(ssc
, inqb1
, inqb1_len
,
31628 0x01, 0xB1, &inqb1_resid
);
31630 if (rval
== 0 && (inqb1_len
- inqb1_resid
> 5)) {
31631 SD_TRACE(SD_LOG_COMMON
, un
,
31632 "sd_check_solid_state: \
31633 successfully get VPD page: %x \
31634 PAGE LENGTH: %x BYTE 4: %x \
31635 BYTE 5: %x", inqb1
[1], inqb1
[3], inqb1
[4],
31638 mutex_enter(SD_MUTEX(un
));
31640 * Check the MEDIUM ROTATION RATE. If it is set
31641 * to 1, the device is a solid state drive.
31643 if (inqb1
[4] == 0 && inqb1
[5] == 1) {
31644 un
->un_f_is_solid_state
= TRUE
;
31646 mutex_exit(SD_MUTEX(un
));
31647 } else if (rval
!= 0) {
31648 sd_ssc_assessment(ssc
, SD_FMT_IGNORE
);
31651 kmem_free(inqb1
, inqb1_len
);
31653 mutex_exit(SD_MUTEX(un
));
31658 * Function: sd_check_emulation_mode
31660 * Description: Check whether the SSD is at emulation mode
31661 * by issuing READ_CAPACITY_16 to see whether
31662 * we can get physical block size of the drive.
31664 * Context: Kernel thread or interrupt context.
31668 sd_check_emulation_mode(sd_ssc_t
*ssc
)
31678 ASSERT(ssc
!= NULL
);
31680 ASSERT(un
!= NULL
);
31681 ASSERT(!mutex_owned(SD_MUTEX(un
)));
31683 mutex_enter(SD_MUTEX(un
));
31685 mutex_exit(SD_MUTEX(un
));
31689 if (un
->un_f_descr_format_supported
) {
31690 mutex_exit(SD_MUTEX(un
));
31691 rval
= sd_send_scsi_READ_CAPACITY_16(ssc
, &capacity
, &lbasize
,
31692 &pbsize
, SD_PATH_DIRECT
);
31693 mutex_enter(SD_MUTEX(un
));
31696 un
->un_phy_blocksize
= DEV_BSIZE
;
31698 if (!ISP2(pbsize
% DEV_BSIZE
) || pbsize
== 0) {
31699 un
->un_phy_blocksize
= DEV_BSIZE
;
31701 un
->un_phy_blocksize
= pbsize
;
31706 for (i
= 0; i
< sd_flash_dev_table_size
; i
++) {
31707 devid_len
= (int)strlen(sd_flash_dev_table
[i
]);
31708 if (sd_sdconf_id_match(un
, sd_flash_dev_table
[i
], devid_len
)
31710 un
->un_phy_blocksize
= SSD_SECSIZE
;
31711 if (un
->un_f_is_solid_state
&&
31712 un
->un_phy_blocksize
!= un
->un_tgt_blocksize
)
31713 un
->un_f_enable_rmw
= TRUE
;
31717 mutex_exit(SD_MUTEX(un
));