4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2000 to 2010, LSI Corporation.
29 * All rights reserved.
31 * Redistribution and use in source and binary forms of all code within
32 * this file that is exclusively owned by LSI, with or without
33 * modification, is permitted provided that, in addition to the CDDL 1.0
34 * License requirements, the following conditions are met:
36 * Neither the name of the author nor the names of its contributors may be
37 * used to endorse or promote products derived from this software without
38 * specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
55 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
59 #if defined(lint) || defined(DEBUG)
64 * standard header files.
67 #include <sys/scsi/scsi.h>
70 #include <sys/cpuvar.h>
71 #include <sys/policy.h>
72 #include <sys/sysevent.h>
73 #include <sys/sysevent/eventdefs.h>
74 #include <sys/sysevent/dr.h>
75 #include <sys/sata/sata_defs.h>
76 #include <sys/scsi/generic/sas.h>
77 #include <sys/scsi/impl/scsi_sas.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
91 * private header files.
94 #include <sys/scsi/impl/scsi_reset_notify.h>
95 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
96 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
97 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
99 #include <sys/raidioctl.h>
101 #include <sys/fs/dv_node.h> /* devfs_clean */
106 #include <sys/ddifm.h>
107 #include <sys/fm/protocol.h>
108 #include <sys/fm/util.h>
109 #include <sys/fm/io/ddi.h>
112 * For anyone who would modify the code in mptsas_driver, it must be awared
113 * that from snv_145 where CR6910752(mpt_sas driver performance can be
114 * improved) is integrated, the per_instance mutex m_mutex is not hold
115 * in the key IO code path, including mptsas_scsi_start(), mptsas_intr()
116 * and all of the recursive functions called in them, so don't
117 * make it for granted that all operations are sync/exclude correctly. Before
118 * doing any modification in key code path, and even other code path such as
119 * DR, watchsubr, ioctl, passthrough etc, make sure the elements modified have
120 * no releationship to elements shown in the fastpath
121 * (function mptsas_handle_io_fastpath()) in ISR and its recursive functions.
122 * otherwise, you have to use the new introduced mutex to protect them.
123 * As to how to do correctly, refer to the comments in mptsas_intr().
127 * autoconfiguration data and routines.
129 static int mptsas_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
130 static int mptsas_detach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
);
131 static int mptsas_power(dev_info_t
*dip
, int component
, int level
);
136 static int mptsas_ioctl(dev_t dev
, int cmd
, intptr_t data
, int mode
,
137 cred_t
*credp
, int *rval
);
139 static int mptsas_reset(dev_info_t
*devi
, ddi_reset_cmd_t cmd
);
141 static int mptsas_quiesce(dev_info_t
*devi
);
145 * Resource initilaization for hardware
147 static void mptsas_setup_cmd_reg(mptsas_t
*mpt
);
148 static void mptsas_disable_bus_master(mptsas_t
*mpt
);
149 static void mptsas_hba_fini(mptsas_t
*mpt
);
150 static void mptsas_cfg_fini(mptsas_t
*mptsas_blkp
);
151 static int mptsas_hba_setup(mptsas_t
*mpt
);
152 static void mptsas_hba_teardown(mptsas_t
*mpt
);
153 static int mptsas_config_space_init(mptsas_t
*mpt
);
154 static void mptsas_config_space_fini(mptsas_t
*mpt
);
155 static void mptsas_iport_register(mptsas_t
*mpt
);
156 static int mptsas_smp_setup(mptsas_t
*mpt
);
157 static void mptsas_smp_teardown(mptsas_t
*mpt
);
158 static int mptsas_cache_create(mptsas_t
*mpt
);
159 static void mptsas_cache_destroy(mptsas_t
*mpt
);
160 static int mptsas_alloc_request_frames(mptsas_t
*mpt
);
161 static int mptsas_alloc_reply_frames(mptsas_t
*mpt
);
162 static int mptsas_alloc_free_queue(mptsas_t
*mpt
);
163 static int mptsas_alloc_post_queue(mptsas_t
*mpt
);
164 static void mptsas_alloc_reply_args(mptsas_t
*mpt
);
165 static int mptsas_alloc_extra_sgl_frame(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
166 static void mptsas_free_extra_sgl_frame(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
167 static int mptsas_init_chip(mptsas_t
*mpt
, int first_time
);
170 * SCSA function prototypes
172 static int mptsas_scsi_start(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
173 static int mptsas_scsi_reset(struct scsi_address
*ap
, int level
);
174 static int mptsas_scsi_abort(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
175 static int mptsas_scsi_getcap(struct scsi_address
*ap
, char *cap
, int tgtonly
);
176 static int mptsas_scsi_setcap(struct scsi_address
*ap
, char *cap
, int value
,
178 static void mptsas_scsi_dmafree(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
179 static struct scsi_pkt
*mptsas_scsi_init_pkt(struct scsi_address
*ap
,
180 struct scsi_pkt
*pkt
, struct buf
*bp
, int cmdlen
, int statuslen
,
181 int tgtlen
, int flags
, int (*callback
)(), caddr_t arg
);
182 static void mptsas_scsi_sync_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
183 static void mptsas_scsi_destroy_pkt(struct scsi_address
*ap
,
184 struct scsi_pkt
*pkt
);
185 static int mptsas_scsi_tgt_init(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
186 scsi_hba_tran_t
*hba_tran
, struct scsi_device
*sd
);
187 static void mptsas_scsi_tgt_free(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
188 scsi_hba_tran_t
*hba_tran
, struct scsi_device
*sd
);
189 static int mptsas_scsi_reset_notify(struct scsi_address
*ap
, int flag
,
190 void (*callback
)(caddr_t
), caddr_t arg
);
191 static int mptsas_get_name(struct scsi_device
*sd
, char *name
, int len
);
192 static int mptsas_get_bus_addr(struct scsi_device
*sd
, char *name
, int len
);
193 static int mptsas_scsi_quiesce(dev_info_t
*dip
);
194 static int mptsas_scsi_unquiesce(dev_info_t
*dip
);
195 static int mptsas_bus_config(dev_info_t
*pdip
, uint_t flags
,
196 ddi_bus_config_op_t op
, void *arg
, dev_info_t
**childp
);
201 static int mptsas_smp_start(struct smp_pkt
*smp_pkt
);
204 * internal function prototypes.
206 static void mptsas_list_add(mptsas_t
*mpt
);
207 static void mptsas_list_del(mptsas_t
*mpt
);
209 static int mptsas_quiesce_bus(mptsas_t
*mpt
);
210 static int mptsas_unquiesce_bus(mptsas_t
*mpt
);
212 static int mptsas_alloc_handshake_msg(mptsas_t
*mpt
, size_t alloc_size
);
213 static void mptsas_free_handshake_msg(mptsas_t
*mpt
);
215 static void mptsas_ncmds_checkdrain(void *arg
);
217 static int mptsas_prepare_pkt(mptsas_cmd_t
*cmd
);
218 static int mptsas_accept_pkt(mptsas_t
*mpt
, mptsas_cmd_t
*sp
);
220 static int mptsas_do_detach(dev_info_t
*dev
);
221 static int mptsas_do_scsi_reset(mptsas_t
*mpt
, uint16_t devhdl
);
222 static int mptsas_do_scsi_abort(mptsas_t
*mpt
, int target
, int lun
,
223 struct scsi_pkt
*pkt
);
224 static int mptsas_scsi_capchk(char *cap
, int tgtonly
, int *cidxp
);
226 static void mptsas_handle_qfull(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
227 static void mptsas_handle_event(void *args
);
228 static int mptsas_handle_event_sync(void *args
);
229 static void mptsas_handle_dr(void *args
);
230 static void mptsas_handle_topo_change(mptsas_topo_change_list_t
*topo_node
,
233 static void mptsas_restart_cmd(void *);
235 static void mptsas_flush_hba(mptsas_t
*mpt
);
236 static void mptsas_flush_target(mptsas_t
*mpt
, ushort_t target
, int lun
,
238 static void mptsas_set_pkt_reason(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
,
239 uchar_t reason
, uint_t stat
);
241 static uint_t
mptsas_intr(caddr_t arg1
, caddr_t arg2
);
242 static void mptsas_process_intr(mptsas_t
*mpt
,
243 pMpi2ReplyDescriptorsUnion_t reply_desc_union
);
244 static int mptsas_handle_io_fastpath(mptsas_t
*mpt
, uint16_t SMID
);
245 static void mptsas_handle_scsi_io_success(mptsas_t
*mpt
,
246 pMpi2ReplyDescriptorsUnion_t reply_desc
);
247 static void mptsas_handle_address_reply(mptsas_t
*mpt
,
248 pMpi2ReplyDescriptorsUnion_t reply_desc
);
249 static int mptsas_wait_intr(mptsas_t
*mpt
, int polltime
);
250 static void mptsas_sge_setup(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
,
251 uint32_t *control
, pMpi2SCSIIORequest_t frame
, ddi_acc_handle_t acc_hdl
);
253 static void mptsas_watch(void *arg
);
254 static void mptsas_watchsubr(mptsas_t
*mpt
);
255 static void mptsas_cmd_timeout(mptsas_t
*mpt
, uint16_t devhdl
);
257 static void mptsas_start_passthru(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
258 static int mptsas_do_passthru(mptsas_t
*mpt
, uint8_t *request
, uint8_t *reply
,
259 uint8_t *data
, uint32_t request_size
, uint32_t reply_size
,
260 uint32_t data_size
, uint32_t direction
, uint8_t *dataout
,
261 uint32_t dataout_size
, short timeout
, int mode
);
262 static int mptsas_free_devhdl(mptsas_t
*mpt
, uint16_t devhdl
);
264 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t
*mpt
,
266 static void mptsas_start_diag(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
267 static int mptsas_post_fw_diag_buffer(mptsas_t
*mpt
,
268 mptsas_fw_diagnostic_buffer_t
*pBuffer
, uint32_t *return_code
);
269 static int mptsas_release_fw_diag_buffer(mptsas_t
*mpt
,
270 mptsas_fw_diagnostic_buffer_t
*pBuffer
, uint32_t *return_code
,
272 static int mptsas_diag_register(mptsas_t
*mpt
,
273 mptsas_fw_diag_register_t
*diag_register
, uint32_t *return_code
);
274 static int mptsas_diag_unregister(mptsas_t
*mpt
,
275 mptsas_fw_diag_unregister_t
*diag_unregister
, uint32_t *return_code
);
276 static int mptsas_diag_query(mptsas_t
*mpt
, mptsas_fw_diag_query_t
*diag_query
,
277 uint32_t *return_code
);
278 static int mptsas_diag_read_buffer(mptsas_t
*mpt
,
279 mptsas_diag_read_buffer_t
*diag_read_buffer
, uint8_t *ioctl_buf
,
280 uint32_t *return_code
, int ioctl_mode
);
281 static int mptsas_diag_release(mptsas_t
*mpt
,
282 mptsas_fw_diag_release_t
*diag_release
, uint32_t *return_code
);
283 static int mptsas_do_diag_action(mptsas_t
*mpt
, uint32_t action
,
284 uint8_t *diag_action
, uint32_t length
, uint32_t *return_code
,
286 static int mptsas_diag_action(mptsas_t
*mpt
, mptsas_diag_action_t
*data
,
289 static int mptsas_pkt_alloc_extern(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
,
290 int cmdlen
, int tgtlen
, int statuslen
, int kf
);
291 static void mptsas_pkt_destroy_extern(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
293 static int mptsas_kmem_cache_constructor(void *buf
, void *cdrarg
, int kmflags
);
294 static void mptsas_kmem_cache_destructor(void *buf
, void *cdrarg
);
296 static int mptsas_cache_frames_constructor(void *buf
, void *cdrarg
,
298 static void mptsas_cache_frames_destructor(void *buf
, void *cdrarg
);
300 static void mptsas_check_scsi_io_error(mptsas_t
*mpt
, pMpi2SCSIIOReply_t reply
,
302 static void mptsas_check_task_mgt(mptsas_t
*mpt
,
303 pMpi2SCSIManagementReply_t reply
, mptsas_cmd_t
*cmd
);
304 static int mptsas_send_scsi_cmd(mptsas_t
*mpt
, struct scsi_address
*ap
,
305 mptsas_target_t
*ptgt
, uchar_t
*cdb
, int cdblen
, struct buf
*data_bp
,
308 static int mptsas_alloc_active_slots(mptsas_t
*mpt
, int flag
);
309 static void mptsas_free_active_slots(mptsas_t
*mpt
);
310 static int mptsas_start_cmd(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
311 static int mptsas_start_cmd0(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
313 static void mptsas_restart_hba(mptsas_t
*mpt
);
315 static void mptsas_deliver_doneq_thread(mptsas_t
*mpt
);
316 static void mptsas_doneq_add(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
317 static inline void mptsas_doneq_add0(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
318 static void mptsas_doneq_mv(mptsas_t
*mpt
, uint64_t t
);
320 static mptsas_cmd_t
*mptsas_doneq_thread_rm(mptsas_t
*mpt
, uint64_t t
);
321 static void mptsas_doneq_empty(mptsas_t
*mpt
);
322 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t
*arg
);
324 static mptsas_cmd_t
*mptsas_waitq_rm(mptsas_t
*mpt
);
325 static void mptsas_waitq_delete(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
327 static void mptsas_start_watch_reset_delay();
328 static void mptsas_setup_bus_reset_delay(mptsas_t
*mpt
);
329 static void mptsas_watch_reset_delay(void *arg
);
330 static int mptsas_watch_reset_delay_subr(mptsas_t
*mpt
);
332 static int mptsas_outstanding_cmds_n(mptsas_t
*mpt
);
336 static void mptsas_dump_cmd(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
338 static dev_info_t
*mptsas_find_child(dev_info_t
*pdip
, char *name
);
339 static dev_info_t
*mptsas_find_child_phy(dev_info_t
*pdip
, uint8_t phy
);
340 static dev_info_t
*mptsas_find_child_addr(dev_info_t
*pdip
, uint64_t sasaddr
,
342 static mdi_pathinfo_t
*mptsas_find_path_addr(dev_info_t
*pdip
, uint64_t sasaddr
,
344 static mdi_pathinfo_t
*mptsas_find_path_phy(dev_info_t
*pdip
, uint8_t phy
);
345 static dev_info_t
*mptsas_find_smp_child(dev_info_t
*pdip
, char *str_wwn
);
347 static int mptsas_parse_address(char *name
, uint64_t *wwid
, uint8_t *phy
,
349 static int mptsas_parse_smp_name(char *name
, uint64_t *wwn
);
351 static mptsas_target_t
*mptsas_phy_to_tgt(mptsas_t
*mpt
, int phymask
,
353 static mptsas_target_t
*mptsas_wwid_to_ptgt(mptsas_t
*mpt
, int phymask
,
355 static mptsas_smp_t
*mptsas_wwid_to_psmp(mptsas_t
*mpt
, int phymask
,
358 static int mptsas_inquiry(mptsas_t
*mpt
, mptsas_target_t
*ptgt
, int lun
,
359 uchar_t page
, unsigned char *buf
, int len
, int *rlen
, uchar_t evpd
);
361 static int mptsas_get_target_device_info(mptsas_t
*mpt
, uint32_t page_address
,
362 uint16_t *handle
, mptsas_target_t
**pptgt
);
363 static void mptsas_update_phymask(mptsas_t
*mpt
);
364 static inline void mptsas_remove_cmd0(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
);
366 static int mptsas_send_sep(mptsas_t
*mpt
, mptsas_target_t
*ptgt
,
367 uint32_t *status
, uint8_t cmd
);
368 static dev_info_t
*mptsas_get_dip_from_dev(dev_t dev
,
369 mptsas_phymask_t
*phymask
);
370 static mptsas_target_t
*mptsas_addr_to_ptgt(mptsas_t
*mpt
, char *addr
,
371 mptsas_phymask_t phymask
);
372 static int mptsas_set_led_status(mptsas_t
*mpt
, mptsas_target_t
*ptgt
,
373 uint32_t slotstatus
);
377 * Enumeration / DR functions
379 static void mptsas_config_all(dev_info_t
*pdip
);
380 static int mptsas_config_one_addr(dev_info_t
*pdip
, uint64_t sasaddr
, int lun
,
381 dev_info_t
**lundip
);
382 static int mptsas_config_one_phy(dev_info_t
*pdip
, uint8_t phy
, int lun
,
383 dev_info_t
**lundip
);
385 static int mptsas_config_target(dev_info_t
*pdip
, mptsas_target_t
*ptgt
);
386 static int mptsas_offline_target(dev_info_t
*pdip
, char *name
);
388 static int mptsas_config_raid(dev_info_t
*pdip
, uint16_t target
,
391 static int mptsas_config_luns(dev_info_t
*pdip
, mptsas_target_t
*ptgt
);
392 static int mptsas_probe_lun(dev_info_t
*pdip
, int lun
,
393 dev_info_t
**dip
, mptsas_target_t
*ptgt
);
395 static int mptsas_create_lun(dev_info_t
*pdip
, struct scsi_inquiry
*sd_inq
,
396 dev_info_t
**dip
, mptsas_target_t
*ptgt
, int lun
);
398 static int mptsas_create_phys_lun(dev_info_t
*pdip
, struct scsi_inquiry
*sd
,
399 char *guid
, dev_info_t
**dip
, mptsas_target_t
*ptgt
, int lun
);
400 static int mptsas_create_virt_lun(dev_info_t
*pdip
, struct scsi_inquiry
*sd
,
401 char *guid
, dev_info_t
**dip
, mdi_pathinfo_t
**pip
, mptsas_target_t
*ptgt
,
404 static void mptsas_offline_missed_luns(dev_info_t
*pdip
,
405 uint16_t *repluns
, int lun_cnt
, mptsas_target_t
*ptgt
);
406 static int mptsas_offline_lun(dev_info_t
*pdip
, dev_info_t
*rdip
,
407 mdi_pathinfo_t
*rpip
, uint_t flags
);
409 static int mptsas_config_smp(dev_info_t
*pdip
, uint64_t sas_wwn
,
410 dev_info_t
**smp_dip
);
411 static int mptsas_offline_smp(dev_info_t
*pdip
, mptsas_smp_t
*smp_node
,
414 static int mptsas_event_query(mptsas_t
*mpt
, mptsas_event_query_t
*data
,
415 int mode
, int *rval
);
416 static int mptsas_event_enable(mptsas_t
*mpt
, mptsas_event_enable_t
*data
,
417 int mode
, int *rval
);
418 static int mptsas_event_report(mptsas_t
*mpt
, mptsas_event_report_t
*data
,
419 int mode
, int *rval
);
420 static void mptsas_record_event(void *args
);
421 static int mptsas_reg_access(mptsas_t
*mpt
, mptsas_reg_access_t
*data
,
424 static void mptsas_hash_init(mptsas_hash_table_t
*hashtab
);
425 static void mptsas_hash_uninit(mptsas_hash_table_t
*hashtab
, size_t datalen
);
426 static void mptsas_hash_add(mptsas_hash_table_t
*hashtab
, void *data
);
427 static void * mptsas_hash_rem(mptsas_hash_table_t
*hashtab
, uint64_t key1
,
428 mptsas_phymask_t key2
);
429 static void * mptsas_hash_search(mptsas_hash_table_t
*hashtab
, uint64_t key1
,
430 mptsas_phymask_t key2
);
431 static void * mptsas_hash_traverse(mptsas_hash_table_t
*hashtab
, int pos
);
433 mptsas_target_t
*mptsas_tgt_alloc(mptsas_hash_table_t
*, uint16_t, uint64_t,
434 uint32_t, mptsas_phymask_t
, uint8_t, mptsas_t
*);
435 static mptsas_smp_t
*mptsas_smp_alloc(mptsas_hash_table_t
*hashtab
,
437 static void mptsas_smp_free(mptsas_hash_table_t
*hashtab
, uint64_t wwid
,
438 mptsas_phymask_t phymask
);
439 static void mptsas_tgt_free(mptsas_hash_table_t
*, uint64_t, mptsas_phymask_t
);
440 static void * mptsas_search_by_devhdl(mptsas_hash_table_t
*, uint16_t);
441 static int mptsas_online_smp(dev_info_t
*pdip
, mptsas_smp_t
*smp_node
,
442 dev_info_t
**smp_dip
);
445 * Power management functions
447 static int mptsas_get_pci_cap(mptsas_t
*mpt
);
448 static int mptsas_init_pm(mptsas_t
*mpt
);
453 * By default MSI is enabled on all supported platforms.
455 boolean_t mptsas_enable_msi
= B_TRUE
;
456 boolean_t mptsas_physical_bind_failed_page_83
= B_FALSE
;
458 static int mptsas_register_intrs(mptsas_t
*);
459 static void mptsas_unregister_intrs(mptsas_t
*);
460 static int mptsas_add_intrs(mptsas_t
*, int);
461 static void mptsas_rem_intrs(mptsas_t
*);
466 static void mptsas_fm_init(mptsas_t
*mpt
);
467 static void mptsas_fm_fini(mptsas_t
*mpt
);
468 static int mptsas_fm_error_cb(dev_info_t
*, ddi_fm_error_t
*, const void *);
470 extern pri_t minclsyspri
, maxclsyspri
;
473 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
474 * under this device that the paths to a physical device are created when
477 extern dev_info_t
*scsi_vhci_dip
;
480 * Tunable timeout value for Inquiry VPD page 0x83
481 * By default the value is 30 seconds.
483 int mptsas_inq83_retry_timeout
= 30;
486 * This is used to allocate memory for message frame storage, not for
487 * data I/O DMA. All message frames must be stored in the first 4G of
490 ddi_dma_attr_t mptsas_dma_attrs
= {
491 DMA_ATTR_V0
, /* attribute layout version */
492 0x0ull
, /* address low - should be 0 (longlong) */
493 0xffffffffull
, /* address high - 32-bit max range */
494 0x00ffffffull
, /* count max - max DMA object size */
495 4, /* allocation alignment requirements */
496 0x78, /* burstsizes - binary encoded values */
497 1, /* minxfer - gran. of DMA engine */
498 0x00ffffffull
, /* maxxfer - gran. of DMA engine */
499 0xffffffffull
, /* max segment size (DMA boundary) */
500 MPTSAS_MAX_DMA_SEGS
, /* scatter/gather list length */
501 512, /* granularity - device transfer size */
502 0 /* flags, set to 0 */
506 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
507 * physical addresses are supported.)
509 ddi_dma_attr_t mptsas_dma_attrs64
= {
510 DMA_ATTR_V0
, /* attribute layout version */
511 0x0ull
, /* address low - should be 0 (longlong) */
512 0xffffffffffffffffull
, /* address high - 64-bit max */
513 0x00ffffffull
, /* count max - max DMA object size */
514 4, /* allocation alignment requirements */
515 0x78, /* burstsizes - binary encoded values */
516 1, /* minxfer - gran. of DMA engine */
517 0x00ffffffull
, /* maxxfer - gran. of DMA engine */
518 0xffffffffull
, /* max segment size (DMA boundary) */
519 MPTSAS_MAX_DMA_SEGS
, /* scatter/gather list length */
520 512, /* granularity - device transfer size */
521 DDI_DMA_RELAXED_ORDERING
/* flags, enable relaxed ordering */
524 ddi_device_acc_attr_t mptsas_dev_attr
= {
526 DDI_STRUCTURE_LE_ACC
,
531 static struct cb_ops mptsas_cb_ops
= {
532 scsi_hba_open
, /* open */
533 scsi_hba_close
, /* close */
534 nodev
, /* strategy */
539 mptsas_ioctl
, /* ioctl */
543 nochpoll
, /* chpoll */
544 ddi_prop_op
, /* cb_prop_op */
545 NULL
, /* streamtab */
552 static struct dev_ops mptsas_ops
= {
553 DEVO_REV
, /* devo_rev, */
555 ddi_no_info
, /* info */
556 nulldev
, /* identify */
558 mptsas_attach
, /* attach */
559 mptsas_detach
, /* detach */
565 &mptsas_cb_ops
, /* driver operations */
566 NULL
, /* bus operations */
567 mptsas_power
, /* power management */
569 ddi_quiesce_not_needed
571 mptsas_quiesce
/* quiesce */
576 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
578 static struct modldrv modldrv
= {
579 &mod_driverops
, /* Type of module. This one is a driver */
580 MPTSAS_MOD_STRING
, /* Name of the module. */
581 &mptsas_ops
, /* driver ops */
584 static struct modlinkage modlinkage
= {
585 MODREV_1
, &modldrv
, NULL
587 #define TARGET_PROP "target"
588 #define LUN_PROP "lun"
589 #define LUN64_PROP "lun64"
590 #define SAS_PROP "sas-mpt"
591 #define MDI_GUID "wwn"
592 #define NDI_GUID "guid"
593 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
598 #if defined(MPTSAS_DEBUG)
599 uint32_t mptsas_debug_flags
= 0;
600 #endif /* defined(MPTSAS_DEBUG) */
601 uint32_t mptsas_debug_resets
= 0;
603 static kmutex_t mptsas_global_mutex
;
604 static void *mptsas_state
; /* soft state ptr */
605 static krwlock_t mptsas_global_rwlock
;
607 static kmutex_t mptsas_log_mutex
;
608 static char mptsas_log_buf
[256];
609 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex
, mptsas_log_buf
))
611 static mptsas_t
*mptsas_head
, *mptsas_tail
;
612 static clock_t mptsas_scsi_watchdog_tick
;
613 static clock_t mptsas_tick
;
614 static timeout_id_t mptsas_reset_watch
;
615 static timeout_id_t mptsas_timeout_id
;
616 static int mptsas_timeouts_enabled
= 0;
620 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
621 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status
))
622 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt
))
623 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address
))
624 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private
))
625 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private
))
630 char *mptsas_driver_rev
= MPTSAS_MOD_STRING
;
633 void debug_enter(char *);
638 * - scsi_hba_init(9F) initializes SCSI HBA modules
639 * - must call scsi_hba_fini(9F) if modload() fails
646 ASSERT(NO_COMPETING_THREADS
);
650 status
= ddi_soft_state_init(&mptsas_state
, MPTSAS_SIZE
,
651 MPTSAS_INITIAL_SOFT_SPACE
);
656 if ((status
= scsi_hba_init(&modlinkage
)) != 0) {
657 ddi_soft_state_fini(&mptsas_state
);
661 mutex_init(&mptsas_global_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
662 rw_init(&mptsas_global_rwlock
, NULL
, RW_DRIVER
, NULL
);
663 mutex_init(&mptsas_log_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
665 if ((status
= mod_install(&modlinkage
)) != 0) {
666 mutex_destroy(&mptsas_log_mutex
);
667 rw_destroy(&mptsas_global_rwlock
);
668 mutex_destroy(&mptsas_global_mutex
);
669 ddi_soft_state_fini(&mptsas_state
);
670 scsi_hba_fini(&modlinkage
);
678 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
685 ASSERT(NO_COMPETING_THREADS
);
689 if ((status
= mod_remove(&modlinkage
)) == 0) {
690 ddi_soft_state_fini(&mptsas_state
);
691 scsi_hba_fini(&modlinkage
);
692 mutex_destroy(&mptsas_global_mutex
);
693 rw_destroy(&mptsas_global_rwlock
);
694 mutex_destroy(&mptsas_log_mutex
);
700 * The loadable-module _info(9E) entry point
703 _info(struct modinfo
*modinfop
)
706 ASSERT(NO_COMPETING_THREADS
);
707 NDBG0(("mptsas _info"));
709 return (mod_info(&modlinkage
, modinfop
));
714 mptsas_iport_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
718 scsi_hba_tran_t
*hba_tran
;
720 char phymask
[MPTSAS_MAX_PHYS
];
721 mptsas_phymask_t phy_mask
= 0;
722 int dynamic_port
= 0;
723 uint32_t page_address
;
724 char initiator_wwnstr
[MPTSAS_WWN_STRLEN
];
725 int rval
= DDI_FAILURE
;
729 uint8_t phy_port
= 0;
730 uint16_t attached_devhdl
= 0;
732 uint64_t attached_sas_wwn
;
735 uint16_t bay_num
, enclosure
;
736 char attached_wwnstr
[MPTSAS_WWN_STRLEN
];
739 ASSERT(NO_COMPETING_THREADS
);
747 * If this a scsi-iport node, nothing to do here.
749 return (DDI_SUCCESS
);
752 return (DDI_FAILURE
);
755 pdip
= ddi_get_parent(dip
);
757 if ((hba_tran
= ndi_flavorv_get(pdip
, SCSA_FLAVOR_SCSI_DEVICE
)) ==
759 cmn_err(CE_WARN
, "Failed attach iport because fail to "
760 "get tran vector for the HBA node");
761 return (DDI_FAILURE
);
764 mpt
= TRAN2MPT(hba_tran
);
767 return (DDI_FAILURE
);
769 if ((hba_tran
= ndi_flavorv_get(dip
, SCSA_FLAVOR_SCSI_DEVICE
)) ==
771 mptsas_log(mpt
, CE_WARN
, "Failed attach iport because fail to "
772 "get tran vector for the iport node");
773 return (DDI_FAILURE
);
777 * Overwrite parent's tran_hba_private to iport's tran vector
779 hba_tran
->tran_hba_private
= mpt
;
784 * Get SAS address for initiator port according dev_handle
786 iport
= ddi_get_name_addr(dip
);
787 if (iport
&& strncmp(iport
, "v0", 2) == 0) {
788 if (ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
789 MPTSAS_VIRTUAL_PORT
, 1) !=
791 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
,
792 MPTSAS_VIRTUAL_PORT
);
793 mptsas_log(mpt
, CE_WARN
, "mptsas virtual port "
794 "prop update failed");
795 return (DDI_FAILURE
);
797 return (DDI_SUCCESS
);
800 mutex_enter(&mpt
->m_mutex
);
801 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
802 bzero(phymask
, sizeof (phymask
));
803 (void) sprintf(phymask
,
804 "%x", mpt
->m_phy_info
[i
].phy_mask
);
805 if (strcmp(phymask
, iport
) == 0) {
810 if (i
== MPTSAS_MAX_PHYS
) {
811 mptsas_log(mpt
, CE_WARN
, "Failed attach port %s because port"
812 "seems not exist", iport
);
813 mutex_exit(&mpt
->m_mutex
);
814 return (DDI_FAILURE
);
817 phy_mask
= mpt
->m_phy_info
[i
].phy_mask
;
819 if (mpt
->m_phy_info
[i
].port_flags
& AUTO_PORT_CONFIGURATION
)
825 * Update PHY info for smhba
827 if (mptsas_smhba_phy_init(mpt
)) {
828 mutex_exit(&mpt
->m_mutex
);
829 mptsas_log(mpt
, CE_WARN
, "mptsas phy update "
831 return (DDI_FAILURE
);
834 mutex_exit(&mpt
->m_mutex
);
837 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
838 if ((phy_mask
>> i
) & 0x01) {
843 bzero(initiator_wwnstr
, sizeof (initiator_wwnstr
));
844 (void) sprintf(initiator_wwnstr
, "w%016"PRIx64
,
845 mpt
->un
.m_base_wwid
);
847 if (ddi_prop_update_string(DDI_DEV_T_NONE
, dip
,
848 SCSI_ADDR_PROP_INITIATOR_PORT
, initiator_wwnstr
) !=
850 (void) ddi_prop_remove(DDI_DEV_T_NONE
,
851 dip
, SCSI_ADDR_PROP_INITIATOR_PORT
);
852 mptsas_log(mpt
, CE_WARN
, "mptsas Initiator port "
853 "prop update failed");
854 return (DDI_FAILURE
);
856 if (ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
857 MPTSAS_NUM_PHYS
, numphys
) !=
859 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
, MPTSAS_NUM_PHYS
);
860 return (DDI_FAILURE
);
863 if (ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
864 "phymask", phy_mask
) !=
866 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
, "phymask");
867 mptsas_log(mpt
, CE_WARN
, "mptsas phy mask "
868 "prop update failed");
869 return (DDI_FAILURE
);
872 if (ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
873 "dynamic-port", dynamic_port
) !=
875 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
, "dynamic-port");
876 mptsas_log(mpt
, CE_WARN
, "mptsas dynamic port "
877 "prop update failed");
878 return (DDI_FAILURE
);
880 if (ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
881 MPTSAS_VIRTUAL_PORT
, 0) !=
883 (void) ddi_prop_remove(DDI_DEV_T_NONE
, dip
,
884 MPTSAS_VIRTUAL_PORT
);
885 mptsas_log(mpt
, CE_WARN
, "mptsas virtual port "
886 "prop update failed");
887 return (DDI_FAILURE
);
889 mptsas_smhba_set_phy_props(mpt
,
890 iport
, dip
, numphys
, &attached_devhdl
);
892 mutex_enter(&mpt
->m_mutex
);
893 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
894 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) | (uint32_t)attached_devhdl
;
895 rval
= mptsas_get_sas_device_page0(mpt
, page_address
, &dev_hdl
,
896 &attached_sas_wwn
, &dev_info
, &phy_port
, &phy_id
,
897 &pdev_hdl
, &bay_num
, &enclosure
);
898 if (rval
!= DDI_SUCCESS
) {
899 mptsas_log(mpt
, CE_WARN
,
900 "Failed to get device page0 for handle:%d",
902 mutex_exit(&mpt
->m_mutex
);
903 return (DDI_FAILURE
);
906 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
907 bzero(phymask
, sizeof (phymask
));
908 (void) sprintf(phymask
, "%x", mpt
->m_phy_info
[i
].phy_mask
);
909 if (strcmp(phymask
, iport
) == 0) {
910 (void) sprintf(&mpt
->m_phy_info
[i
].smhba_info
.path
[0],
912 mpt
->m_phy_info
[i
].phy_mask
);
915 mutex_exit(&mpt
->m_mutex
);
917 bzero(attached_wwnstr
, sizeof (attached_wwnstr
));
918 (void) sprintf(attached_wwnstr
, "w%016"PRIx64
,
920 if (ddi_prop_update_string(DDI_DEV_T_NONE
, dip
,
921 SCSI_ADDR_PROP_ATTACHED_PORT
, attached_wwnstr
) !=
923 (void) ddi_prop_remove(DDI_DEV_T_NONE
,
924 dip
, SCSI_ADDR_PROP_ATTACHED_PORT
);
925 return (DDI_FAILURE
);
928 /* Create kstats for each phy on this iport */
930 mptsas_create_phy_stats(mpt
, iport
, dip
);
933 * register sas hba iport with mdi (MPxIO/vhci)
935 if (mdi_phci_register(MDI_HCI_CLASS_SCSI
,
936 dip
, 0) == MDI_SUCCESS
) {
937 mpt
->m_mpxio_enable
= TRUE
;
939 return (DDI_SUCCESS
);
944 * Set up all device state and allocate data structures,
945 * mutexes, condition variables, etc. for device operation.
946 * Add interrupts needed.
947 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
950 mptsas_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
952 mptsas_t
*mpt
= NULL
;
954 int doneq_thread_num
;
957 char config_setup
= 0;
958 char hba_attach_setup
= 0;
959 char smp_attach_setup
= 0;
960 char mutex_init_done
= 0;
961 char event_taskq_create
= 0;
962 char dr_taskq_create
= 0;
963 char doneq_thread_create
= 0;
964 scsi_hba_tran_t
*hba_tran
;
965 uint_t mem_bar
= MEM_SPACE
;
966 int rval
= DDI_FAILURE
;
969 ASSERT(NO_COMPETING_THREADS
);
971 if (scsi_hba_iport_unit_address(dip
)) {
972 return (mptsas_iport_attach(dip
, cmd
));
980 if ((hba_tran
= ddi_get_driver_private(dip
)) == NULL
)
981 return (DDI_FAILURE
);
983 mpt
= TRAN2MPT(hba_tran
);
986 return (DDI_FAILURE
);
990 * Reset hardware and softc to "no outstanding commands"
991 * Note that a check condition can result on first command
994 mutex_enter(&mpt
->m_mutex
);
999 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1000 mutex_exit(&mpt
->m_mutex
);
1001 (void) pm_busy_component(dip
, 0);
1002 rval
= pm_power_has_changed(dip
, 0, PM_LEVEL_D0
);
1003 if (rval
== DDI_SUCCESS
) {
1004 mutex_enter(&mpt
->m_mutex
);
1007 * The pm_raise_power() call above failed,
1008 * and that can only occur if we were unable
1009 * to reset the hardware. This is probably
1010 * due to unhealty hardware, and because
1011 * important filesystems(such as the root
1012 * filesystem) could be on the attached disks,
1013 * it would not be a good idea to continue,
1014 * as we won't be entirely certain we are
1015 * writing correct data. So we panic() here
1016 * to not only prevent possible data corruption,
1017 * but to give developers or end users a hope
1018 * of identifying and correcting any problems.
1020 fm_panic("mptsas could not reset hardware "
1025 mpt
->m_suspended
= 0;
1030 mpt
->m_softstate
|= MPTSAS_SS_MSG_UNIT_RESET
;
1031 if (mptsas_init_chip(mpt
, FALSE
) == DDI_FAILURE
) {
1032 mutex_exit(&mpt
->m_mutex
);
1033 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1034 (void) pm_idle_component(dip
, 0);
1036 fm_panic("mptsas init chip fail during resume");
1039 * mptsas_update_driver_data needs interrupts so enable them
1042 MPTSAS_ENABLE_INTR(mpt
);
1043 mptsas_update_driver_data(mpt
);
1045 /* start requests, if possible */
1046 mptsas_restart_hba(mpt
);
1048 mutex_exit(&mpt
->m_mutex
);
1051 * Restart watch thread
1053 mutex_enter(&mptsas_global_mutex
);
1054 if (mptsas_timeout_id
== 0) {
1055 mptsas_timeout_id
= timeout(mptsas_watch
, NULL
,
1057 mptsas_timeouts_enabled
= 1;
1059 mutex_exit(&mptsas_global_mutex
);
1061 /* report idle status to pm framework */
1062 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1063 (void) pm_idle_component(dip
, 0);
1066 return (DDI_SUCCESS
);
1069 return (DDI_FAILURE
);
1073 instance
= ddi_get_instance(dip
);
1076 * Allocate softc information.
1078 if (ddi_soft_state_zalloc(mptsas_state
, instance
) != DDI_SUCCESS
) {
1079 mptsas_log(NULL
, CE_WARN
,
1080 "mptsas%d: cannot allocate soft state", instance
);
1084 mpt
= ddi_get_soft_state(mptsas_state
, instance
);
1087 mptsas_log(NULL
, CE_WARN
,
1088 "mptsas%d: cannot get soft state", instance
);
1092 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1093 scsi_size_clean(dip
);
1096 mpt
->m_instance
= instance
;
1098 /* Make a per-instance copy of the structures */
1099 mpt
->m_io_dma_attr
= mptsas_dma_attrs64
;
1100 mpt
->m_msg_dma_attr
= mptsas_dma_attrs
;
1101 mpt
->m_reg_acc_attr
= mptsas_dev_attr
;
1102 mpt
->m_dev_acc_attr
= mptsas_dev_attr
;
1107 mpt
->m_fm_capabilities
= ddi_getprop(DDI_DEV_T_ANY
, mpt
->m_dip
,
1108 DDI_PROP_CANSLEEP
| DDI_PROP_DONTPASS
, "fm-capable",
1109 DDI_FM_EREPORT_CAPABLE
| DDI_FM_ACCCHK_CAPABLE
|
1110 DDI_FM_DMACHK_CAPABLE
| DDI_FM_ERRCB_CAPABLE
);
1112 mptsas_fm_init(mpt
);
1114 if (mptsas_alloc_handshake_msg(mpt
,
1115 sizeof (Mpi2SCSITaskManagementRequest_t
)) == DDI_FAILURE
) {
1116 mptsas_log(mpt
, CE_WARN
, "cannot initialize handshake msg.");
1121 * Setup configuration space
1123 if (mptsas_config_space_init(mpt
) == FALSE
) {
1124 mptsas_log(mpt
, CE_WARN
, "mptsas_config_space_init failed");
1129 if (ddi_regs_map_setup(dip
, mem_bar
, (caddr_t
*)&mpt
->m_reg
,
1130 0, 0, &mpt
->m_reg_acc_attr
, &mpt
->m_datap
) != DDI_SUCCESS
) {
1131 mptsas_log(mpt
, CE_WARN
, "map setup failed");
1137 * A taskq is created for dealing with the event handler
1139 if ((mpt
->m_event_taskq
= ddi_taskq_create(dip
, "mptsas_event_taskq",
1140 1, TASKQ_DEFAULTPRI
, 0)) == NULL
) {
1141 mptsas_log(mpt
, CE_NOTE
, "ddi_taskq_create failed");
1144 event_taskq_create
++;
1147 * A taskq is created for dealing with dr events
1149 if ((mpt
->m_dr_taskq
= ddi_taskq_create(dip
,
1151 1, TASKQ_DEFAULTPRI
, 0)) == NULL
) {
1152 mptsas_log(mpt
, CE_NOTE
, "ddi_taskq_create for discovery "
1158 mpt
->m_doneq_thread_threshold
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
1159 0, "mptsas_doneq_thread_threshold_prop", 10);
1160 mpt
->m_doneq_length_threshold
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
1161 0, "mptsas_doneq_length_threshold_prop", 8);
1162 mpt
->m_doneq_thread_n
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
1163 0, "mptsas_doneq_thread_n_prop", 8);
1165 if (mpt
->m_doneq_thread_n
) {
1166 cv_init(&mpt
->m_doneq_thread_cv
, NULL
, CV_DRIVER
, NULL
);
1167 mutex_init(&mpt
->m_doneq_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1169 mutex_enter(&mpt
->m_doneq_mutex
);
1170 mpt
->m_doneq_thread_id
=
1171 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t
)
1172 * mpt
->m_doneq_thread_n
, KM_SLEEP
);
1174 for (j
= 0; j
< mpt
->m_doneq_thread_n
; j
++) {
1175 cv_init(&mpt
->m_doneq_thread_id
[j
].cv
, NULL
,
1177 mutex_init(&mpt
->m_doneq_thread_id
[j
].mutex
, NULL
,
1178 MUTEX_DRIVER
, NULL
);
1179 mutex_enter(&mpt
->m_doneq_thread_id
[j
].mutex
);
1180 mpt
->m_doneq_thread_id
[j
].flag
|=
1181 MPTSAS_DONEQ_THREAD_ACTIVE
;
1182 mpt
->m_doneq_thread_id
[j
].arg
.mpt
= mpt
;
1183 mpt
->m_doneq_thread_id
[j
].arg
.t
= j
;
1184 mpt
->m_doneq_thread_id
[j
].threadp
=
1185 thread_create(NULL
, 0, mptsas_doneq_thread
,
1186 &mpt
->m_doneq_thread_id
[j
].arg
,
1187 0, &p0
, TS_RUN
, minclsyspri
);
1188 mpt
->m_doneq_thread_id
[j
].donetail
=
1189 &mpt
->m_doneq_thread_id
[j
].doneq
;
1190 mutex_exit(&mpt
->m_doneq_thread_id
[j
].mutex
);
1192 mutex_exit(&mpt
->m_doneq_mutex
);
1193 doneq_thread_create
++;
1196 /* Initialize mutex used in interrupt handler */
1197 mutex_init(&mpt
->m_mutex
, NULL
, MUTEX_DRIVER
,
1198 DDI_INTR_PRI(mpt
->m_intr_pri
));
1199 mutex_init(&mpt
->m_passthru_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1200 mutex_init(&mpt
->m_intr_mutex
, NULL
, MUTEX_DRIVER
,
1201 DDI_INTR_PRI(mpt
->m_intr_pri
));
1202 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
1203 mutex_init(&mpt
->m_phy_info
[i
].smhba_info
.phy_mutex
,
1205 DDI_INTR_PRI(mpt
->m_intr_pri
));
1208 cv_init(&mpt
->m_cv
, NULL
, CV_DRIVER
, NULL
);
1209 cv_init(&mpt
->m_passthru_cv
, NULL
, CV_DRIVER
, NULL
);
1210 cv_init(&mpt
->m_fw_cv
, NULL
, CV_DRIVER
, NULL
);
1211 cv_init(&mpt
->m_config_cv
, NULL
, CV_DRIVER
, NULL
);
1212 cv_init(&mpt
->m_fw_diag_cv
, NULL
, CV_DRIVER
, NULL
);
1216 * Disable hardware interrupt since we're not ready to
1219 MPTSAS_DISABLE_INTR(mpt
);
1220 if (mptsas_register_intrs(mpt
) == FALSE
)
1224 mutex_enter(&mpt
->m_mutex
);
1226 * Initialize power management component
1228 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1229 if (mptsas_init_pm(mpt
)) {
1230 mutex_exit(&mpt
->m_mutex
);
1231 mptsas_log(mpt
, CE_WARN
, "mptsas pm initialization "
1238 * Initialize chip using Message Unit Reset, if allowed
1240 mpt
->m_softstate
|= MPTSAS_SS_MSG_UNIT_RESET
;
1241 if (mptsas_init_chip(mpt
, TRUE
) == DDI_FAILURE
) {
1242 mutex_exit(&mpt
->m_mutex
);
1243 mptsas_log(mpt
, CE_WARN
, "mptsas chip initialization failed");
1248 * Fill in the phy_info structure and get the base WWID
1250 if (mptsas_get_manufacture_page5(mpt
) == DDI_FAILURE
) {
1251 mptsas_log(mpt
, CE_WARN
,
1252 "mptsas_get_manufacture_page5 failed!");
1256 if (mptsas_get_sas_io_unit_page_hndshk(mpt
)) {
1257 mptsas_log(mpt
, CE_WARN
,
1258 "mptsas_get_sas_io_unit_page_hndshk failed!");
1262 if (mptsas_get_manufacture_page0(mpt
) == DDI_FAILURE
) {
1263 mptsas_log(mpt
, CE_WARN
,
1264 "mptsas_get_manufacture_page0 failed!");
1268 mutex_exit(&mpt
->m_mutex
);
1271 * Register the iport for multiple port HBA
1273 mptsas_iport_register(mpt
);
1276 * initialize SCSI HBA transport structure
1278 if (mptsas_hba_setup(mpt
) == FALSE
)
1282 if (mptsas_smp_setup(mpt
) == FALSE
)
1286 if (mptsas_cache_create(mpt
) == FALSE
)
1289 mpt
->m_scsi_reset_delay
= ddi_prop_get_int(DDI_DEV_T_ANY
,
1290 dip
, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY
);
1291 if (mpt
->m_scsi_reset_delay
== 0) {
1292 mptsas_log(mpt
, CE_NOTE
,
1293 "scsi_reset_delay of 0 is not recommended,"
1294 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1295 mpt
->m_scsi_reset_delay
= SCSI_DEFAULT_RESET_DELAY
;
1299 * Initialize the wait and done FIFO queue
1301 mpt
->m_donetail
= &mpt
->m_doneq
;
1302 mpt
->m_waitqtail
= &mpt
->m_waitq
;
1305 * ioc cmd queue initialize
1307 mpt
->m_ioc_event_cmdtail
= &mpt
->m_ioc_event_cmdq
;
1308 mpt
->m_dev_handle
= 0xFFFF;
1310 MPTSAS_ENABLE_INTR(mpt
);
1313 * enable event notification
1315 mutex_enter(&mpt
->m_mutex
);
1316 if (mptsas_ioc_enable_event_notification(mpt
)) {
1317 mutex_exit(&mpt
->m_mutex
);
1320 mutex_exit(&mpt
->m_mutex
);
1323 * Initialize PHY info for smhba
1325 if (mptsas_smhba_setup(mpt
)) {
1326 mptsas_log(mpt
, CE_WARN
, "mptsas phy initialization "
1331 /* Check all dma handles allocated in attach */
1332 if ((mptsas_check_dma_handle(mpt
->m_dma_req_frame_hdl
)
1334 (mptsas_check_dma_handle(mpt
->m_dma_reply_frame_hdl
)
1336 (mptsas_check_dma_handle(mpt
->m_dma_free_queue_hdl
)
1338 (mptsas_check_dma_handle(mpt
->m_dma_post_queue_hdl
)
1340 (mptsas_check_dma_handle(mpt
->m_hshk_dma_hdl
)
1345 /* Check all acc handles allocated in attach */
1346 if ((mptsas_check_acc_handle(mpt
->m_datap
) != DDI_SUCCESS
) ||
1347 (mptsas_check_acc_handle(mpt
->m_acc_req_frame_hdl
)
1349 (mptsas_check_acc_handle(mpt
->m_acc_reply_frame_hdl
)
1351 (mptsas_check_acc_handle(mpt
->m_acc_free_queue_hdl
)
1353 (mptsas_check_acc_handle(mpt
->m_acc_post_queue_hdl
)
1355 (mptsas_check_acc_handle(mpt
->m_hshk_acc_hdl
)
1357 (mptsas_check_acc_handle(mpt
->m_config_handle
)
1363 * After this point, we are not going to fail the attach.
1366 * used for mptsas_watch
1368 mptsas_list_add(mpt
);
1370 mutex_enter(&mptsas_global_mutex
);
1371 if (mptsas_timeouts_enabled
== 0) {
1372 mptsas_scsi_watchdog_tick
= ddi_prop_get_int(DDI_DEV_T_ANY
,
1373 dip
, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK
);
1375 mptsas_tick
= mptsas_scsi_watchdog_tick
*
1376 drv_usectohz((clock_t)1000000);
1378 mptsas_timeout_id
= timeout(mptsas_watch
, NULL
, mptsas_tick
);
1379 mptsas_timeouts_enabled
= 1;
1381 mutex_exit(&mptsas_global_mutex
);
1383 /* Print message of HBA present */
1384 ddi_report_dev(dip
);
1386 /* report idle status to pm framework */
1387 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1388 (void) pm_idle_component(dip
, 0);
1391 return (DDI_SUCCESS
);
1394 mptsas_log(mpt
, CE_WARN
, "attach failed");
1395 mptsas_fm_ereport(mpt
, DDI_FM_DEVICE_NO_RESPONSE
);
1396 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_LOST
);
1398 mutex_enter(&mptsas_global_mutex
);
1400 if (mptsas_timeout_id
&& (mptsas_head
== NULL
)) {
1401 timeout_id_t tid
= mptsas_timeout_id
;
1402 mptsas_timeouts_enabled
= 0;
1403 mptsas_timeout_id
= 0;
1404 mutex_exit(&mptsas_global_mutex
);
1405 (void) untimeout(tid
);
1406 mutex_enter(&mptsas_global_mutex
);
1408 mutex_exit(&mptsas_global_mutex
);
1409 /* deallocate in reverse order */
1410 mptsas_cache_destroy(mpt
);
1412 if (smp_attach_setup
) {
1413 mptsas_smp_teardown(mpt
);
1415 if (hba_attach_setup
) {
1416 mptsas_hba_teardown(mpt
);
1419 if (mpt
->m_active
) {
1420 mptsas_hash_uninit(&mpt
->m_active
->m_smptbl
,
1421 sizeof (mptsas_smp_t
));
1422 mptsas_hash_uninit(&mpt
->m_active
->m_tgttbl
,
1423 sizeof (mptsas_target_t
));
1424 mptsas_free_active_slots(mpt
);
1427 mptsas_unregister_intrs(mpt
);
1430 if (doneq_thread_create
) {
1431 mutex_enter(&mpt
->m_doneq_mutex
);
1432 doneq_thread_num
= mpt
->m_doneq_thread_n
;
1433 for (j
= 0; j
< mpt
->m_doneq_thread_n
; j
++) {
1434 mutex_enter(&mpt
->m_doneq_thread_id
[j
].mutex
);
1435 mpt
->m_doneq_thread_id
[j
].flag
&=
1436 (~MPTSAS_DONEQ_THREAD_ACTIVE
);
1437 cv_signal(&mpt
->m_doneq_thread_id
[j
].cv
);
1438 mutex_exit(&mpt
->m_doneq_thread_id
[j
].mutex
);
1440 while (mpt
->m_doneq_thread_n
) {
1441 cv_wait(&mpt
->m_doneq_thread_cv
,
1442 &mpt
->m_doneq_mutex
);
1444 for (j
= 0; j
< doneq_thread_num
; j
++) {
1445 cv_destroy(&mpt
->m_doneq_thread_id
[j
].cv
);
1446 mutex_destroy(&mpt
->m_doneq_thread_id
[j
].mutex
);
1448 kmem_free(mpt
->m_doneq_thread_id
,
1449 sizeof (mptsas_doneq_thread_list_t
)
1450 * doneq_thread_num
);
1451 mutex_exit(&mpt
->m_doneq_mutex
);
1452 cv_destroy(&mpt
->m_doneq_thread_cv
);
1453 mutex_destroy(&mpt
->m_doneq_mutex
);
1455 if (event_taskq_create
) {
1456 ddi_taskq_destroy(mpt
->m_event_taskq
);
1458 if (dr_taskq_create
) {
1459 ddi_taskq_destroy(mpt
->m_dr_taskq
);
1461 if (mutex_init_done
) {
1462 mutex_destroy(&mpt
->m_intr_mutex
);
1463 mutex_destroy(&mpt
->m_passthru_mutex
);
1464 mutex_destroy(&mpt
->m_mutex
);
1465 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
1467 &mpt
->m_phy_info
[i
].smhba_info
.phy_mutex
);
1469 cv_destroy(&mpt
->m_cv
);
1470 cv_destroy(&mpt
->m_passthru_cv
);
1471 cv_destroy(&mpt
->m_fw_cv
);
1472 cv_destroy(&mpt
->m_config_cv
);
1473 cv_destroy(&mpt
->m_fw_diag_cv
);
1477 mptsas_cfg_fini(mpt
);
1480 mptsas_config_space_fini(mpt
);
1482 mptsas_free_handshake_msg(mpt
);
1483 mptsas_hba_fini(mpt
);
1485 mptsas_fm_fini(mpt
);
1486 ddi_soft_state_free(mptsas_state
, instance
);
1487 ddi_prop_remove_all(dip
);
1489 return (DDI_FAILURE
);
1493 mptsas_suspend(dev_info_t
*devi
)
1496 scsi_hba_tran_t
*tran
;
1498 if (scsi_hba_iport_unit_address(devi
)) {
1499 return (DDI_SUCCESS
);
1502 if ((tran
= ddi_get_driver_private(devi
)) == NULL
)
1503 return (DDI_SUCCESS
);
1505 mpt
= TRAN2MPT(tran
);
1507 return (DDI_SUCCESS
);
1510 mutex_enter(&mpt
->m_mutex
);
1512 if (mpt
->m_suspended
++) {
1513 mutex_exit(&mpt
->m_mutex
);
1514 return (DDI_SUCCESS
);
1518 * Cancel timeout threads for this mpt
1520 if (mpt
->m_quiesce_timeid
) {
1521 timeout_id_t tid
= mpt
->m_quiesce_timeid
;
1522 mpt
->m_quiesce_timeid
= 0;
1523 mutex_exit(&mpt
->m_mutex
);
1524 (void) untimeout(tid
);
1525 mutex_enter(&mpt
->m_mutex
);
1528 if (mpt
->m_restart_cmd_timeid
) {
1529 timeout_id_t tid
= mpt
->m_restart_cmd_timeid
;
1530 mpt
->m_restart_cmd_timeid
= 0;
1531 mutex_exit(&mpt
->m_mutex
);
1532 (void) untimeout(tid
);
1533 mutex_enter(&mpt
->m_mutex
);
1536 mutex_exit(&mpt
->m_mutex
);
1538 (void) pm_idle_component(mpt
->m_dip
, 0);
1541 * Cancel watch threads if all mpts suspended
1543 rw_enter(&mptsas_global_rwlock
, RW_WRITER
);
1544 for (g
= mptsas_head
; g
!= NULL
; g
= g
->m_next
) {
1545 if (!g
->m_suspended
)
1548 rw_exit(&mptsas_global_rwlock
);
1550 mutex_enter(&mptsas_global_mutex
);
1554 mptsas_timeouts_enabled
= 0;
1555 if (mptsas_timeout_id
) {
1556 tid
= mptsas_timeout_id
;
1557 mptsas_timeout_id
= 0;
1558 mutex_exit(&mptsas_global_mutex
);
1559 (void) untimeout(tid
);
1560 mutex_enter(&mptsas_global_mutex
);
1562 if (mptsas_reset_watch
) {
1563 tid
= mptsas_reset_watch
;
1564 mptsas_reset_watch
= 0;
1565 mutex_exit(&mptsas_global_mutex
);
1566 (void) untimeout(tid
);
1567 mutex_enter(&mptsas_global_mutex
);
1570 mutex_exit(&mptsas_global_mutex
);
1572 mutex_enter(&mpt
->m_mutex
);
1575 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1577 if ((mpt
->m_options
& MPTSAS_OPT_PM
) &&
1578 (mpt
->m_power_level
!= PM_LEVEL_D0
)) {
1579 mutex_exit(&mpt
->m_mutex
);
1580 return (DDI_SUCCESS
);
1583 /* Disable HBA interrupts in hardware */
1584 MPTSAS_DISABLE_INTR(mpt
);
1586 * Send RAID action system shutdown to sync IR
1588 mptsas_raid_action_system_shutdown(mpt
);
1590 mutex_exit(&mpt
->m_mutex
);
1592 /* drain the taskq */
1593 ddi_taskq_wait(mpt
->m_event_taskq
);
1594 ddi_taskq_wait(mpt
->m_dr_taskq
);
1596 return (DDI_SUCCESS
);
1602 mptsas_reset(dev_info_t
*devi
, ddi_reset_cmd_t cmd
)
1605 scsi_hba_tran_t
*tran
;
1608 * If this call is for iport, just return.
1610 if (scsi_hba_iport_unit_address(devi
))
1611 return (DDI_SUCCESS
);
1613 if ((tran
= ddi_get_driver_private(devi
)) == NULL
)
1614 return (DDI_SUCCESS
);
1616 if ((mpt
= TRAN2MPT(tran
)) == NULL
)
1617 return (DDI_SUCCESS
);
1620 * Send RAID action system shutdown to sync IR. Disable HBA
1621 * interrupts in hardware first.
1623 MPTSAS_DISABLE_INTR(mpt
);
1624 mptsas_raid_action_system_shutdown(mpt
);
1626 return (DDI_SUCCESS
);
1630 * quiesce(9E) entry point.
1632 * This function is called when the system is single-threaded at high
1633 * PIL with preemption disabled. Therefore, this function must not be
1636 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1637 * DDI_FAILURE indicates an error condition and should almost never happen.
1640 mptsas_quiesce(dev_info_t
*devi
)
1643 scsi_hba_tran_t
*tran
;
1646 * If this call is for iport, just return.
1648 if (scsi_hba_iport_unit_address(devi
))
1649 return (DDI_SUCCESS
);
1651 if ((tran
= ddi_get_driver_private(devi
)) == NULL
)
1652 return (DDI_SUCCESS
);
1654 if ((mpt
= TRAN2MPT(tran
)) == NULL
)
1655 return (DDI_SUCCESS
);
1657 /* Disable HBA interrupts in hardware */
1658 MPTSAS_DISABLE_INTR(mpt
);
1659 /* Send RAID action system shutdonw to sync IR */
1660 mptsas_raid_action_system_shutdown(mpt
);
1662 return (DDI_SUCCESS
);
1664 #endif /* __sparc */
1667 * detach(9E). Remove all device allocations and system resources;
1668 * disable device interrupts.
1669 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1672 mptsas_detach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
)
1675 ASSERT(NO_COMPETING_THREADS
);
1676 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi
, (void *)cmd
));
1680 return (mptsas_do_detach(devi
));
1683 return (mptsas_suspend(devi
));
1686 return (DDI_FAILURE
);
1692 mptsas_do_detach(dev_info_t
*dip
)
1695 scsi_hba_tran_t
*tran
;
1698 mdi_pathinfo_t
*pip
= NULL
;
1700 int doneq_thread_num
= 0;
1702 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip
));
1704 if ((tran
= ndi_flavorv_get(dip
, SCSA_FLAVOR_SCSI_DEVICE
)) == NULL
)
1705 return (DDI_FAILURE
);
1707 mpt
= TRAN2MPT(tran
);
1709 return (DDI_FAILURE
);
1712 * Still have pathinfo child, should not detach mpt driver
1714 if (scsi_hba_iport_unit_address(dip
)) {
1715 if (mpt
->m_mpxio_enable
) {
1717 * MPxIO enabled for the iport
1719 ndi_devi_enter(scsi_vhci_dip
, &circ1
);
1720 ndi_devi_enter(dip
, &circ
);
1721 while (pip
= mdi_get_next_client_path(dip
, NULL
)) {
1722 if (mdi_pi_free(pip
, 0) == MDI_SUCCESS
) {
1725 ndi_devi_exit(dip
, circ
);
1726 ndi_devi_exit(scsi_vhci_dip
, circ1
);
1727 NDBG12(("detach failed because of "
1728 "outstanding path info"));
1729 return (DDI_FAILURE
);
1731 ndi_devi_exit(dip
, circ
);
1732 ndi_devi_exit(scsi_vhci_dip
, circ1
);
1733 (void) mdi_phci_unregister(dip
, 0);
1736 ddi_prop_remove_all(dip
);
1738 return (DDI_SUCCESS
);
1741 /* Make sure power level is D0 before accessing registers */
1742 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1743 (void) pm_busy_component(dip
, 0);
1744 if (mpt
->m_power_level
!= PM_LEVEL_D0
) {
1745 if (pm_raise_power(dip
, 0, PM_LEVEL_D0
) !=
1747 mptsas_log(mpt
, CE_WARN
,
1748 "mptsas%d: Raise power request failed.",
1750 (void) pm_idle_component(dip
, 0);
1751 return (DDI_FAILURE
);
1757 * Send RAID action system shutdown to sync IR. After action, send a
1758 * Message Unit Reset. Since after that DMA resource will be freed,
1759 * set ioc to READY state will avoid HBA initiated DMA operation.
1761 mutex_enter(&mpt
->m_mutex
);
1762 MPTSAS_DISABLE_INTR(mpt
);
1763 mptsas_raid_action_system_shutdown(mpt
);
1764 mpt
->m_softstate
|= MPTSAS_SS_MSG_UNIT_RESET
;
1765 (void) mptsas_ioc_reset(mpt
, FALSE
);
1766 mutex_exit(&mpt
->m_mutex
);
1767 mptsas_rem_intrs(mpt
);
1768 ddi_taskq_destroy(mpt
->m_event_taskq
);
1769 ddi_taskq_destroy(mpt
->m_dr_taskq
);
1771 if (mpt
->m_doneq_thread_n
) {
1772 mutex_enter(&mpt
->m_doneq_mutex
);
1773 doneq_thread_num
= mpt
->m_doneq_thread_n
;
1774 for (i
= 0; i
< mpt
->m_doneq_thread_n
; i
++) {
1775 mutex_enter(&mpt
->m_doneq_thread_id
[i
].mutex
);
1776 mpt
->m_doneq_thread_id
[i
].flag
&=
1777 (~MPTSAS_DONEQ_THREAD_ACTIVE
);
1778 cv_signal(&mpt
->m_doneq_thread_id
[i
].cv
);
1779 mutex_exit(&mpt
->m_doneq_thread_id
[i
].mutex
);
1781 while (mpt
->m_doneq_thread_n
) {
1782 cv_wait(&mpt
->m_doneq_thread_cv
,
1783 &mpt
->m_doneq_mutex
);
1785 for (i
= 0; i
< doneq_thread_num
; i
++) {
1786 cv_destroy(&mpt
->m_doneq_thread_id
[i
].cv
);
1787 mutex_destroy(&mpt
->m_doneq_thread_id
[i
].mutex
);
1789 kmem_free(mpt
->m_doneq_thread_id
,
1790 sizeof (mptsas_doneq_thread_list_t
)
1791 * doneq_thread_num
);
1792 mutex_exit(&mpt
->m_doneq_mutex
);
1793 cv_destroy(&mpt
->m_doneq_thread_cv
);
1794 mutex_destroy(&mpt
->m_doneq_mutex
);
1797 scsi_hba_reset_notify_tear_down(mpt
->m_reset_notify_listf
);
1799 mptsas_list_del(mpt
);
1802 * Cancel timeout threads for this mpt
1804 mutex_enter(&mpt
->m_mutex
);
1805 if (mpt
->m_quiesce_timeid
) {
1806 timeout_id_t tid
= mpt
->m_quiesce_timeid
;
1807 mpt
->m_quiesce_timeid
= 0;
1808 mutex_exit(&mpt
->m_mutex
);
1809 (void) untimeout(tid
);
1810 mutex_enter(&mpt
->m_mutex
);
1813 if (mpt
->m_restart_cmd_timeid
) {
1814 timeout_id_t tid
= mpt
->m_restart_cmd_timeid
;
1815 mpt
->m_restart_cmd_timeid
= 0;
1816 mutex_exit(&mpt
->m_mutex
);
1817 (void) untimeout(tid
);
1818 mutex_enter(&mpt
->m_mutex
);
1821 mutex_exit(&mpt
->m_mutex
);
1824 * last mpt? ... if active, CANCEL watch threads.
1826 mutex_enter(&mptsas_global_mutex
);
1827 if (mptsas_head
== NULL
) {
1830 * Clear mptsas_timeouts_enable so that the watch thread
1831 * gets restarted on DDI_ATTACH
1833 mptsas_timeouts_enabled
= 0;
1834 if (mptsas_timeout_id
) {
1835 tid
= mptsas_timeout_id
;
1836 mptsas_timeout_id
= 0;
1837 mutex_exit(&mptsas_global_mutex
);
1838 (void) untimeout(tid
);
1839 mutex_enter(&mptsas_global_mutex
);
1841 if (mptsas_reset_watch
) {
1842 tid
= mptsas_reset_watch
;
1843 mptsas_reset_watch
= 0;
1844 mutex_exit(&mptsas_global_mutex
);
1845 (void) untimeout(tid
);
1846 mutex_enter(&mptsas_global_mutex
);
1849 mutex_exit(&mptsas_global_mutex
);
1854 mptsas_destroy_phy_stats(mpt
);
1859 mutex_enter(&mpt
->m_mutex
);
1860 mptsas_hash_uninit(&mpt
->m_active
->m_tgttbl
, sizeof (mptsas_target_t
));
1861 mptsas_hash_uninit(&mpt
->m_active
->m_smptbl
, sizeof (mptsas_smp_t
));
1862 mptsas_free_active_slots(mpt
);
1863 mutex_exit(&mpt
->m_mutex
);
1865 /* deallocate everything that was allocated in mptsas_attach */
1866 mptsas_cache_destroy(mpt
);
1868 mptsas_hba_fini(mpt
);
1869 mptsas_cfg_fini(mpt
);
1871 /* Lower the power informing PM Framework */
1872 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
1873 if (pm_lower_power(dip
, 0, PM_LEVEL_D3
) != DDI_SUCCESS
)
1874 mptsas_log(mpt
, CE_WARN
,
1875 "!mptsas%d: Lower power request failed "
1876 "during detach, ignoring.",
1880 mutex_destroy(&mpt
->m_intr_mutex
);
1881 mutex_destroy(&mpt
->m_passthru_mutex
);
1882 mutex_destroy(&mpt
->m_mutex
);
1883 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
1884 mutex_destroy(&mpt
->m_phy_info
[i
].smhba_info
.phy_mutex
);
1886 cv_destroy(&mpt
->m_cv
);
1887 cv_destroy(&mpt
->m_passthru_cv
);
1888 cv_destroy(&mpt
->m_fw_cv
);
1889 cv_destroy(&mpt
->m_config_cv
);
1890 cv_destroy(&mpt
->m_fw_diag_cv
);
1893 mptsas_smp_teardown(mpt
);
1894 mptsas_hba_teardown(mpt
);
1896 mptsas_config_space_fini(mpt
);
1898 mptsas_free_handshake_msg(mpt
);
1900 mptsas_fm_fini(mpt
);
1901 ddi_soft_state_free(mptsas_state
, ddi_get_instance(dip
));
1902 ddi_prop_remove_all(dip
);
1904 return (DDI_SUCCESS
);
1908 mptsas_list_add(mptsas_t
*mpt
)
1910 rw_enter(&mptsas_global_rwlock
, RW_WRITER
);
1912 if (mptsas_head
== NULL
) {
1915 mptsas_tail
->m_next
= mpt
;
1918 rw_exit(&mptsas_global_rwlock
);
1922 mptsas_list_del(mptsas_t
*mpt
)
1926 * Remove device instance from the global linked list
1928 rw_enter(&mptsas_global_rwlock
, RW_WRITER
);
1929 if (mptsas_head
== mpt
) {
1930 m
= mptsas_head
= mpt
->m_next
;
1932 for (m
= mptsas_head
; m
!= NULL
; m
= m
->m_next
) {
1933 if (m
->m_next
== mpt
) {
1934 m
->m_next
= mpt
->m_next
;
1939 mptsas_log(mpt
, CE_PANIC
, "Not in softc list!");
1943 if (mptsas_tail
== mpt
) {
1946 rw_exit(&mptsas_global_rwlock
);
1950 mptsas_alloc_handshake_msg(mptsas_t
*mpt
, size_t alloc_size
)
1952 ddi_dma_attr_t task_dma_attrs
;
1954 task_dma_attrs
= mpt
->m_msg_dma_attr
;
1955 task_dma_attrs
.dma_attr_sgllen
= 1;
1956 task_dma_attrs
.dma_attr_granular
= (uint32_t)(alloc_size
);
1958 /* allocate Task Management ddi_dma resources */
1959 if (mptsas_dma_addr_create(mpt
, task_dma_attrs
,
1960 &mpt
->m_hshk_dma_hdl
, &mpt
->m_hshk_acc_hdl
, &mpt
->m_hshk_memp
,
1961 alloc_size
, NULL
) == FALSE
) {
1962 return (DDI_FAILURE
);
1964 mpt
->m_hshk_dma_size
= alloc_size
;
1966 return (DDI_SUCCESS
);
1970 mptsas_free_handshake_msg(mptsas_t
*mpt
)
1972 mptsas_dma_addr_destroy(&mpt
->m_hshk_dma_hdl
, &mpt
->m_hshk_acc_hdl
);
1973 mpt
->m_hshk_dma_size
= 0;
1977 mptsas_hba_setup(mptsas_t
*mpt
)
1979 scsi_hba_tran_t
*hba_tran
;
1982 /* Allocate a transport structure */
1983 hba_tran
= mpt
->m_tran
= scsi_hba_tran_alloc(mpt
->m_dip
,
1985 ASSERT(mpt
->m_tran
!= NULL
);
1987 hba_tran
->tran_hba_private
= mpt
;
1988 hba_tran
->tran_tgt_private
= NULL
;
1990 hba_tran
->tran_tgt_init
= mptsas_scsi_tgt_init
;
1991 hba_tran
->tran_tgt_free
= mptsas_scsi_tgt_free
;
1993 hba_tran
->tran_start
= mptsas_scsi_start
;
1994 hba_tran
->tran_reset
= mptsas_scsi_reset
;
1995 hba_tran
->tran_abort
= mptsas_scsi_abort
;
1996 hba_tran
->tran_getcap
= mptsas_scsi_getcap
;
1997 hba_tran
->tran_setcap
= mptsas_scsi_setcap
;
1998 hba_tran
->tran_init_pkt
= mptsas_scsi_init_pkt
;
1999 hba_tran
->tran_destroy_pkt
= mptsas_scsi_destroy_pkt
;
2001 hba_tran
->tran_dmafree
= mptsas_scsi_dmafree
;
2002 hba_tran
->tran_sync_pkt
= mptsas_scsi_sync_pkt
;
2003 hba_tran
->tran_reset_notify
= mptsas_scsi_reset_notify
;
2005 hba_tran
->tran_get_bus_addr
= mptsas_get_bus_addr
;
2006 hba_tran
->tran_get_name
= mptsas_get_name
;
2008 hba_tran
->tran_quiesce
= mptsas_scsi_quiesce
;
2009 hba_tran
->tran_unquiesce
= mptsas_scsi_unquiesce
;
2010 hba_tran
->tran_bus_reset
= NULL
;
2012 hba_tran
->tran_add_eventcall
= NULL
;
2013 hba_tran
->tran_get_eventcookie
= NULL
;
2014 hba_tran
->tran_post_event
= NULL
;
2015 hba_tran
->tran_remove_eventcall
= NULL
;
2017 hba_tran
->tran_bus_config
= mptsas_bus_config
;
2019 hba_tran
->tran_interconnect_type
= INTERCONNECT_SAS
;
2022 * All children of the HBA are iports. We need tran was cloned.
2023 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2024 * inherited to iport's tran vector.
2026 tran_flags
= (SCSI_HBA_HBA
| SCSI_HBA_TRAN_CLONE
);
2028 if (scsi_hba_attach_setup(mpt
->m_dip
, &mpt
->m_msg_dma_attr
,
2029 hba_tran
, tran_flags
) != DDI_SUCCESS
) {
2030 mptsas_log(mpt
, CE_WARN
, "hba attach setup failed");
2031 scsi_hba_tran_free(hba_tran
);
2039 mptsas_hba_teardown(mptsas_t
*mpt
)
2041 (void) scsi_hba_detach(mpt
->m_dip
);
2042 if (mpt
->m_tran
!= NULL
) {
2043 scsi_hba_tran_free(mpt
->m_tran
);
2049 mptsas_iport_register(mptsas_t
*mpt
)
2052 mptsas_phymask_t mask
= 0x0;
2054 * initial value of mask is 0
2056 mutex_enter(&mpt
->m_mutex
);
2057 for (i
= 0; i
< mpt
->m_num_phys
; i
++) {
2058 mptsas_phymask_t phy_mask
= 0x0;
2059 char phy_mask_name
[MPTSAS_MAX_PHYS
];
2060 uint8_t current_port
;
2062 if (mpt
->m_phy_info
[i
].attached_devhdl
== 0)
2065 bzero(phy_mask_name
, sizeof (phy_mask_name
));
2067 current_port
= mpt
->m_phy_info
[i
].port_num
;
2069 if ((mask
& (1 << i
)) != 0)
2072 for (j
= 0; j
< mpt
->m_num_phys
; j
++) {
2073 if (mpt
->m_phy_info
[j
].attached_devhdl
&&
2074 (mpt
->m_phy_info
[j
].port_num
== current_port
)) {
2075 phy_mask
|= (1 << j
);
2078 mask
= mask
| phy_mask
;
2080 for (j
= 0; j
< mpt
->m_num_phys
; j
++) {
2081 if ((phy_mask
>> j
) & 0x01) {
2082 mpt
->m_phy_info
[j
].phy_mask
= phy_mask
;
2086 (void) sprintf(phy_mask_name
, "%x", phy_mask
);
2088 mutex_exit(&mpt
->m_mutex
);
2092 (void) scsi_hba_iport_register(mpt
->m_dip
, phy_mask_name
);
2093 mutex_enter(&mpt
->m_mutex
);
2095 mutex_exit(&mpt
->m_mutex
);
2097 * register a virtual port for RAID volume always
2099 (void) scsi_hba_iport_register(mpt
->m_dip
, "v0");
2104 mptsas_smp_setup(mptsas_t
*mpt
)
2106 mpt
->m_smptran
= smp_hba_tran_alloc(mpt
->m_dip
);
2107 ASSERT(mpt
->m_smptran
!= NULL
);
2108 mpt
->m_smptran
->smp_tran_hba_private
= mpt
;
2109 mpt
->m_smptran
->smp_tran_start
= mptsas_smp_start
;
2110 if (smp_hba_attach_setup(mpt
->m_dip
, mpt
->m_smptran
) != DDI_SUCCESS
) {
2111 mptsas_log(mpt
, CE_WARN
, "smp attach setup failed");
2112 smp_hba_tran_free(mpt
->m_smptran
);
2113 mpt
->m_smptran
= NULL
;
2117 * Initialize smp hash table
2119 mptsas_hash_init(&mpt
->m_active
->m_smptbl
);
2120 mpt
->m_smp_devhdl
= 0xFFFF;
2126 mptsas_smp_teardown(mptsas_t
*mpt
)
2128 (void) smp_hba_detach(mpt
->m_dip
);
2129 if (mpt
->m_smptran
!= NULL
) {
2130 smp_hba_tran_free(mpt
->m_smptran
);
2131 mpt
->m_smptran
= NULL
;
2133 mpt
->m_smp_devhdl
= 0;
2137 mptsas_cache_create(mptsas_t
*mpt
)
2139 int instance
= mpt
->m_instance
;
2143 * create kmem cache for packets
2145 (void) sprintf(buf
, "mptsas%d_cache", instance
);
2146 mpt
->m_kmem_cache
= kmem_cache_create(buf
,
2147 sizeof (struct mptsas_cmd
) + scsi_pkt_size(), 8,
2148 mptsas_kmem_cache_constructor
, mptsas_kmem_cache_destructor
,
2149 NULL
, (void *)mpt
, NULL
, 0);
2151 if (mpt
->m_kmem_cache
== NULL
) {
2152 mptsas_log(mpt
, CE_WARN
, "creating kmem cache failed");
2157 * create kmem cache for extra SGL frames if SGL cannot
2158 * be accomodated into main request frame.
2160 (void) sprintf(buf
, "mptsas%d_cache_frames", instance
);
2161 mpt
->m_cache_frames
= kmem_cache_create(buf
,
2162 sizeof (mptsas_cache_frames_t
), 8,
2163 mptsas_cache_frames_constructor
, mptsas_cache_frames_destructor
,
2164 NULL
, (void *)mpt
, NULL
, 0);
2166 if (mpt
->m_cache_frames
== NULL
) {
2167 mptsas_log(mpt
, CE_WARN
, "creating cache for frames failed");
2175 mptsas_cache_destroy(mptsas_t
*mpt
)
2177 /* deallocate in reverse order */
2178 if (mpt
->m_cache_frames
) {
2179 kmem_cache_destroy(mpt
->m_cache_frames
);
2180 mpt
->m_cache_frames
= NULL
;
2182 if (mpt
->m_kmem_cache
) {
2183 kmem_cache_destroy(mpt
->m_kmem_cache
);
2184 mpt
->m_kmem_cache
= NULL
;
2189 mptsas_power(dev_info_t
*dip
, int component
, int level
)
2192 _NOTE(ARGUNUSED(component
))
2195 int rval
= DDI_SUCCESS
;
2197 uint32_t ioc_status
;
2199 if (scsi_hba_iport_unit_address(dip
) != 0)
2200 return (DDI_SUCCESS
);
2202 mpt
= ddi_get_soft_state(mptsas_state
, ddi_get_instance(dip
));
2204 return (DDI_FAILURE
);
2207 mutex_enter(&mpt
->m_mutex
);
2210 * If the device is busy, don't lower its power level
2212 if (mpt
->m_busy
&& (mpt
->m_power_level
> level
)) {
2213 mutex_exit(&mpt
->m_mutex
);
2214 return (DDI_FAILURE
);
2218 NDBG11(("mptsas%d: turning power ON.", mpt
->m_instance
));
2219 MPTSAS_POWER_ON(mpt
);
2221 * Wait up to 30 seconds for IOC to come out of reset.
2223 while (((ioc_status
= ddi_get32(mpt
->m_datap
,
2224 &mpt
->m_reg
->Doorbell
)) &
2225 MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_RESET
) {
2226 if (polls
++ > 3000) {
2229 delay(drv_usectohz(10000));
2232 * If IOC is not in operational state, try to hard reset it.
2234 if ((ioc_status
& MPI2_IOC_STATE_MASK
) !=
2235 MPI2_IOC_STATE_OPERATIONAL
) {
2236 mpt
->m_softstate
&= ~MPTSAS_SS_MSG_UNIT_RESET
;
2237 if (mptsas_restart_ioc(mpt
) == DDI_FAILURE
) {
2238 mptsas_log(mpt
, CE_WARN
,
2239 "mptsas_power: hard reset failed");
2240 mutex_exit(&mpt
->m_mutex
);
2241 return (DDI_FAILURE
);
2244 mutex_enter(&mpt
->m_intr_mutex
);
2245 mpt
->m_power_level
= PM_LEVEL_D0
;
2246 mutex_exit(&mpt
->m_intr_mutex
);
2249 NDBG11(("mptsas%d: turning power OFF.", mpt
->m_instance
));
2250 MPTSAS_POWER_OFF(mpt
);
2253 mptsas_log(mpt
, CE_WARN
, "mptsas%d: unknown power level <%x>.",
2254 mpt
->m_instance
, level
);
2258 mutex_exit(&mpt
->m_mutex
);
2263 * Initialize configuration space and figure out which
2264 * chip and revison of the chip the mpt driver is using.
2267 mptsas_config_space_init(mptsas_t
*mpt
)
2269 NDBG0(("mptsas_config_space_init"));
2271 if (mpt
->m_config_handle
!= NULL
)
2274 if (pci_config_setup(mpt
->m_dip
,
2275 &mpt
->m_config_handle
) != DDI_SUCCESS
) {
2276 mptsas_log(mpt
, CE_WARN
, "cannot map configuration space.");
2281 * This is a workaround for a XMITS ASIC bug which does not
2282 * drive the CBE upper bits.
2284 if (pci_config_get16(mpt
->m_config_handle
, PCI_CONF_STAT
) &
2286 pci_config_put16(mpt
->m_config_handle
, PCI_CONF_STAT
,
2290 mptsas_setup_cmd_reg(mpt
);
2293 * Get the chip device id:
2295 mpt
->m_devid
= pci_config_get16(mpt
->m_config_handle
, PCI_CONF_DEVID
);
2298 * Save the revision.
2300 mpt
->m_revid
= pci_config_get8(mpt
->m_config_handle
, PCI_CONF_REVID
);
2303 * Save the SubSystem Vendor and Device IDs
2305 mpt
->m_svid
= pci_config_get16(mpt
->m_config_handle
, PCI_CONF_SUBVENID
);
2306 mpt
->m_ssid
= pci_config_get16(mpt
->m_config_handle
, PCI_CONF_SUBSYSID
);
2309 * Set the latency timer to 0x40 as specified by the upa -> pci
2310 * bridge chip design team. This may be done by the sparc pci
2311 * bus nexus driver, but the driver should make sure the latency
2312 * timer is correct for performance reasons.
2314 pci_config_put8(mpt
->m_config_handle
, PCI_CONF_LATENCY_TIMER
,
2315 MPTSAS_LATENCY_TIMER
);
2317 (void) mptsas_get_pci_cap(mpt
);
2322 mptsas_config_space_fini(mptsas_t
*mpt
)
2324 if (mpt
->m_config_handle
!= NULL
) {
2325 mptsas_disable_bus_master(mpt
);
2326 pci_config_teardown(&mpt
->m_config_handle
);
2327 mpt
->m_config_handle
= NULL
;
2332 mptsas_setup_cmd_reg(mptsas_t
*mpt
)
2337 * Set the command register to the needed values.
2339 cmdreg
= pci_config_get16(mpt
->m_config_handle
, PCI_CONF_COMM
);
2340 cmdreg
|= (PCI_COMM_ME
| PCI_COMM_SERR_ENABLE
|
2341 PCI_COMM_PARITY_DETECT
| PCI_COMM_MAE
);
2342 cmdreg
&= ~PCI_COMM_IO
;
2343 pci_config_put16(mpt
->m_config_handle
, PCI_CONF_COMM
, cmdreg
);
2347 mptsas_disable_bus_master(mptsas_t
*mpt
)
2352 * Clear the master enable bit in the PCI command register.
2353 * This prevents any bus mastering activity like DMA.
2355 cmdreg
= pci_config_get16(mpt
->m_config_handle
, PCI_CONF_COMM
);
2356 cmdreg
&= ~PCI_COMM_ME
;
2357 pci_config_put16(mpt
->m_config_handle
, PCI_CONF_COMM
, cmdreg
);
2361 mptsas_dma_alloc(mptsas_t
*mpt
, mptsas_dma_alloc_state_t
*dma_statep
)
2363 ddi_dma_attr_t attrs
;
2365 attrs
= mpt
->m_io_dma_attr
;
2366 attrs
.dma_attr_sgllen
= 1;
2368 ASSERT(dma_statep
!= NULL
);
2370 if (mptsas_dma_addr_create(mpt
, attrs
, &dma_statep
->handle
,
2371 &dma_statep
->accessp
, &dma_statep
->memp
, dma_statep
->size
,
2372 &dma_statep
->cookie
) == FALSE
) {
2373 return (DDI_FAILURE
);
2376 return (DDI_SUCCESS
);
2380 mptsas_dma_free(mptsas_dma_alloc_state_t
*dma_statep
)
2382 ASSERT(dma_statep
!= NULL
);
2383 mptsas_dma_addr_destroy(&dma_statep
->handle
, &dma_statep
->accessp
);
2384 dma_statep
->size
= 0;
2388 mptsas_do_dma(mptsas_t
*mpt
, uint32_t size
, int var
, int (*callback
)())
2390 ddi_dma_attr_t attrs
;
2391 ddi_dma_handle_t dma_handle
;
2393 ddi_acc_handle_t accessp
;
2396 ASSERT(mutex_owned(&mpt
->m_mutex
));
2398 attrs
= mpt
->m_msg_dma_attr
;
2399 attrs
.dma_attr_sgllen
= 1;
2400 attrs
.dma_attr_granular
= size
;
2402 if (mptsas_dma_addr_create(mpt
, attrs
, &dma_handle
,
2403 &accessp
, &memp
, size
, NULL
) == FALSE
) {
2404 return (DDI_FAILURE
);
2407 rval
= (*callback
) (mpt
, memp
, var
, accessp
);
2409 if ((mptsas_check_dma_handle(dma_handle
) != DDI_SUCCESS
) ||
2410 (mptsas_check_acc_handle(accessp
) != DDI_SUCCESS
)) {
2411 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
2415 mptsas_dma_addr_destroy(&dma_handle
, &accessp
);
2421 mptsas_alloc_request_frames(mptsas_t
*mpt
)
2423 ddi_dma_attr_t frame_dma_attrs
;
2425 ddi_dma_cookie_t cookie
;
2429 * re-alloc when it has already alloced
2431 mptsas_dma_addr_destroy(&mpt
->m_dma_req_frame_hdl
,
2432 &mpt
->m_acc_req_frame_hdl
);
2435 * The size of the request frame pool is:
2436 * Number of Request Frames * Request Frame Size
2438 mem_size
= mpt
->m_max_requests
* mpt
->m_req_frame_size
;
2441 * set the DMA attributes. System Request Message Frames must be
2442 * aligned on a 16-byte boundry.
2444 frame_dma_attrs
= mpt
->m_msg_dma_attr
;
2445 frame_dma_attrs
.dma_attr_align
= 16;
2446 frame_dma_attrs
.dma_attr_sgllen
= 1;
2449 * allocate the request frame pool.
2451 if (mptsas_dma_addr_create(mpt
, frame_dma_attrs
,
2452 &mpt
->m_dma_req_frame_hdl
, &mpt
->m_acc_req_frame_hdl
, &memp
,
2453 mem_size
, &cookie
) == FALSE
) {
2454 return (DDI_FAILURE
);
2458 * Store the request frame memory address. This chip uses this
2459 * address to dma to and from the driver's frame. The second
2460 * address is the address mpt uses to fill in the frame.
2462 mpt
->m_req_frame_dma_addr
= cookie
.dmac_laddress
;
2463 mpt
->m_req_frame
= memp
;
2466 * Clear the request frame pool.
2468 bzero(mpt
->m_req_frame
, mem_size
);
2470 return (DDI_SUCCESS
);
2474 mptsas_alloc_reply_frames(mptsas_t
*mpt
)
2476 ddi_dma_attr_t frame_dma_attrs
;
2478 ddi_dma_cookie_t cookie
;
2482 * re-alloc when it has already alloced
2484 mptsas_dma_addr_destroy(&mpt
->m_dma_reply_frame_hdl
,
2485 &mpt
->m_acc_reply_frame_hdl
);
2488 * The size of the reply frame pool is:
2489 * Number of Reply Frames * Reply Frame Size
2491 mem_size
= mpt
->m_max_replies
* mpt
->m_reply_frame_size
;
2494 * set the DMA attributes. System Reply Message Frames must be
2495 * aligned on a 4-byte boundry. This is the default.
2497 frame_dma_attrs
= mpt
->m_msg_dma_attr
;
2498 frame_dma_attrs
.dma_attr_sgllen
= 1;
2501 * allocate the reply frame pool
2503 if (mptsas_dma_addr_create(mpt
, frame_dma_attrs
,
2504 &mpt
->m_dma_reply_frame_hdl
, &mpt
->m_acc_reply_frame_hdl
, &memp
,
2505 mem_size
, &cookie
) == FALSE
) {
2506 return (DDI_FAILURE
);
2510 * Store the reply frame memory address. This chip uses this
2511 * address to dma to and from the driver's frame. The second
2512 * address is the address mpt uses to process the frame.
2514 mpt
->m_reply_frame_dma_addr
= cookie
.dmac_laddress
;
2515 mpt
->m_reply_frame
= memp
;
2518 * Clear the reply frame pool.
2520 bzero(mpt
->m_reply_frame
, mem_size
);
2522 return (DDI_SUCCESS
);
2526 mptsas_alloc_free_queue(mptsas_t
*mpt
)
2528 ddi_dma_attr_t frame_dma_attrs
;
2530 ddi_dma_cookie_t cookie
;
2534 * re-alloc when it has already alloced
2536 mptsas_dma_addr_destroy(&mpt
->m_dma_free_queue_hdl
,
2537 &mpt
->m_acc_free_queue_hdl
);
2540 * The reply free queue size is:
2541 * Reply Free Queue Depth * 4
2542 * The "4" is the size of one 32 bit address (low part of 64-bit
2545 mem_size
= mpt
->m_free_queue_depth
* 4;
2548 * set the DMA attributes The Reply Free Queue must be aligned on a
2551 frame_dma_attrs
= mpt
->m_msg_dma_attr
;
2552 frame_dma_attrs
.dma_attr_align
= 16;
2553 frame_dma_attrs
.dma_attr_sgllen
= 1;
2556 * allocate the reply free queue
2558 if (mptsas_dma_addr_create(mpt
, frame_dma_attrs
,
2559 &mpt
->m_dma_free_queue_hdl
, &mpt
->m_acc_free_queue_hdl
, &memp
,
2560 mem_size
, &cookie
) == FALSE
) {
2561 return (DDI_FAILURE
);
2565 * Store the reply free queue memory address. This chip uses this
2566 * address to read from the reply free queue. The second address
2567 * is the address mpt uses to manage the queue.
2569 mpt
->m_free_queue_dma_addr
= cookie
.dmac_laddress
;
2570 mpt
->m_free_queue
= memp
;
2573 * Clear the reply free queue memory.
2575 bzero(mpt
->m_free_queue
, mem_size
);
2577 return (DDI_SUCCESS
);
2581 mptsas_alloc_post_queue(mptsas_t
*mpt
)
2583 ddi_dma_attr_t frame_dma_attrs
;
2585 ddi_dma_cookie_t cookie
;
2589 * re-alloc when it has already alloced
2591 mptsas_dma_addr_destroy(&mpt
->m_dma_post_queue_hdl
,
2592 &mpt
->m_acc_post_queue_hdl
);
2595 * The reply descriptor post queue size is:
2596 * Reply Descriptor Post Queue Depth * 8
2597 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2599 mem_size
= mpt
->m_post_queue_depth
* 8;
2602 * set the DMA attributes. The Reply Descriptor Post Queue must be
2603 * aligned on a 16-byte boundry.
2605 frame_dma_attrs
= mpt
->m_msg_dma_attr
;
2606 frame_dma_attrs
.dma_attr_align
= 16;
2607 frame_dma_attrs
.dma_attr_sgllen
= 1;
2610 * allocate the reply post queue
2612 if (mptsas_dma_addr_create(mpt
, frame_dma_attrs
,
2613 &mpt
->m_dma_post_queue_hdl
, &mpt
->m_acc_post_queue_hdl
, &memp
,
2614 mem_size
, &cookie
) == FALSE
) {
2615 return (DDI_FAILURE
);
2619 * Store the reply descriptor post queue memory address. This chip
2620 * uses this address to write to the reply descriptor post queue. The
2621 * second address is the address mpt uses to manage the queue.
2623 mpt
->m_post_queue_dma_addr
= cookie
.dmac_laddress
;
2624 mpt
->m_post_queue
= memp
;
2627 * Clear the reply post queue memory.
2629 bzero(mpt
->m_post_queue
, mem_size
);
2631 return (DDI_SUCCESS
);
2635 mptsas_alloc_reply_args(mptsas_t
*mpt
)
2637 if (mpt
->m_replyh_args
!= NULL
) {
2638 kmem_free(mpt
->m_replyh_args
, sizeof (m_replyh_arg_t
)
2639 * mpt
->m_max_replies
);
2640 mpt
->m_replyh_args
= NULL
;
2642 mpt
->m_replyh_args
= kmem_zalloc(sizeof (m_replyh_arg_t
) *
2643 mpt
->m_max_replies
, KM_SLEEP
);
2647 mptsas_alloc_extra_sgl_frame(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
2649 mptsas_cache_frames_t
*frames
= NULL
;
2650 if (cmd
->cmd_extra_frames
== NULL
) {
2651 frames
= kmem_cache_alloc(mpt
->m_cache_frames
, KM_NOSLEEP
);
2652 if (frames
== NULL
) {
2653 return (DDI_FAILURE
);
2655 cmd
->cmd_extra_frames
= frames
;
2657 return (DDI_SUCCESS
);
2661 mptsas_free_extra_sgl_frame(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
2663 if (cmd
->cmd_extra_frames
) {
2664 kmem_cache_free(mpt
->m_cache_frames
,
2665 (void *)cmd
->cmd_extra_frames
);
2666 cmd
->cmd_extra_frames
= NULL
;
2671 mptsas_cfg_fini(mptsas_t
*mpt
)
2673 NDBG0(("mptsas_cfg_fini"));
2674 ddi_regs_map_free(&mpt
->m_datap
);
2678 mptsas_hba_fini(mptsas_t
*mpt
)
2680 NDBG0(("mptsas_hba_fini"));
2683 * Free up any allocated memory
2685 mptsas_dma_addr_destroy(&mpt
->m_dma_req_frame_hdl
,
2686 &mpt
->m_acc_req_frame_hdl
);
2688 mptsas_dma_addr_destroy(&mpt
->m_dma_reply_frame_hdl
,
2689 &mpt
->m_acc_reply_frame_hdl
);
2691 mptsas_dma_addr_destroy(&mpt
->m_dma_free_queue_hdl
,
2692 &mpt
->m_acc_free_queue_hdl
);
2694 mptsas_dma_addr_destroy(&mpt
->m_dma_post_queue_hdl
,
2695 &mpt
->m_acc_post_queue_hdl
);
2697 if (mpt
->m_replyh_args
!= NULL
) {
2698 kmem_free(mpt
->m_replyh_args
, sizeof (m_replyh_arg_t
)
2699 * mpt
->m_max_replies
);
2704 mptsas_name_child(dev_info_t
*lun_dip
, char *name
, int len
)
2707 char *sas_wwn
= NULL
;
2711 /* Get the target num */
2712 lun
= ddi_prop_get_int(DDI_DEV_T_ANY
, lun_dip
, DDI_PROP_DONTPASS
,
2715 if ((phynum
= ddi_prop_get_int(DDI_DEV_T_ANY
, lun_dip
,
2716 DDI_PROP_DONTPASS
, "sata-phy", -1)) != -1) {
2718 * Stick in the address of form "pPHY,LUN"
2720 reallen
= snprintf(name
, len
, "p%x,%x", phynum
, lun
);
2721 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, lun_dip
,
2722 DDI_PROP_DONTPASS
, SCSI_ADDR_PROP_TARGET_PORT
, &sas_wwn
)
2723 == DDI_PROP_SUCCESS
) {
2725 * Stick in the address of the form "wWWN,LUN"
2727 reallen
= snprintf(name
, len
, "%s,%x", sas_wwn
, lun
);
2728 ddi_prop_free(sas_wwn
);
2730 return (DDI_FAILURE
);
2733 ASSERT(reallen
< len
);
2734 if (reallen
>= len
) {
2735 mptsas_log(0, CE_WARN
, "!mptsas_get_name: name parameter "
2736 "length too small, it needs to be %d bytes", reallen
+ 1);
2738 return (DDI_SUCCESS
);
2742 * tran_tgt_init(9E) - target device instance initialization
2745 mptsas_scsi_tgt_init(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
2746 scsi_hba_tran_t
*hba_tran
, struct scsi_device
*sd
)
2749 _NOTE(ARGUNUSED(hba_tran
))
2753 * At this point, the scsi_device structure already exists
2754 * and has been initialized.
2756 * Use this function to allocate target-private data structures,
2757 * if needed by this HBA. Add revised flow-control and queue
2758 * properties for child here, if desired and if you can tell they
2759 * support tagged queueing by now.
2762 int lun
= sd
->sd_address
.a_lun
;
2763 mdi_pathinfo_t
*pip
= NULL
;
2764 mptsas_tgt_private_t
*tgt_private
= NULL
;
2765 mptsas_target_t
*ptgt
= NULL
;
2766 char *psas_wwn
= NULL
;
2768 uint64_t sas_wwn
= 0;
2771 ASSERT(scsi_hba_iport_unit_address(hba_dip
) != 0);
2773 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2774 (void *)hba_dip
, (void *)tgt_dip
, lun
));
2776 if (ndi_dev_is_persistent_node(tgt_dip
) == 0) {
2777 (void) ndi_merge_node(tgt_dip
, mptsas_name_child
);
2778 ddi_set_name_addr(tgt_dip
, NULL
);
2779 return (DDI_FAILURE
);
2782 * phymask is 0 means the virtual port for RAID
2784 phymask
= ddi_prop_get_int(DDI_DEV_T_ANY
, hba_dip
, 0,
2786 if (mdi_component_is_client(tgt_dip
, NULL
) == MDI_SUCCESS
) {
2787 if ((pip
= (void *)(sd
->sd_private
)) == NULL
) {
2789 * Very bad news if this occurs. Somehow scsi_vhci has
2790 * lost the pathinfo node for this target.
2792 return (DDI_NOT_WELL_FORMED
);
2795 if (mdi_prop_lookup_int(pip
, LUN_PROP
, &lun
) !=
2797 mptsas_log(mpt
, CE_WARN
, "Get lun property failed\n");
2798 return (DDI_FAILURE
);
2801 if (mdi_prop_lookup_string(pip
, SCSI_ADDR_PROP_TARGET_PORT
,
2802 &psas_wwn
) == MDI_SUCCESS
) {
2803 if (scsi_wwnstr_to_wwn(psas_wwn
, &sas_wwn
)) {
2806 (void) mdi_prop_free(psas_wwn
);
2809 lun
= ddi_prop_get_int(DDI_DEV_T_ANY
, tgt_dip
,
2810 DDI_PROP_DONTPASS
, LUN_PROP
, 0);
2811 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, tgt_dip
,
2812 DDI_PROP_DONTPASS
, SCSI_ADDR_PROP_TARGET_PORT
, &psas_wwn
) ==
2814 if (scsi_wwnstr_to_wwn(psas_wwn
, &sas_wwn
)) {
2817 ddi_prop_free(psas_wwn
);
2822 ASSERT((sas_wwn
!= 0) || (phymask
!= 0));
2823 mutex_enter(&mpt
->m_mutex
);
2824 ptgt
= mptsas_hash_search(&mpt
->m_active
->m_tgttbl
, sas_wwn
, phymask
);
2825 mutex_exit(&mpt
->m_mutex
);
2827 mptsas_log(mpt
, CE_WARN
, "!tgt_init: target doesn't exist or "
2828 "gone already! phymask:%x, saswwn %"PRIx64
, phymask
,
2830 return (DDI_FAILURE
);
2832 if (hba_tran
->tran_tgt_private
== NULL
) {
2833 tgt_private
= kmem_zalloc(sizeof (mptsas_tgt_private_t
),
2835 tgt_private
->t_lun
= lun
;
2836 tgt_private
->t_private
= ptgt
;
2837 hba_tran
->tran_tgt_private
= tgt_private
;
2840 if (mdi_component_is_client(tgt_dip
, NULL
) == MDI_SUCCESS
) {
2841 return (DDI_SUCCESS
);
2843 mutex_enter(&mpt
->m_mutex
);
2845 if (ptgt
->m_deviceinfo
&
2846 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE
|
2847 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE
)) {
2848 uchar_t
*inq89
= NULL
;
2849 int inq89_len
= 0x238;
2852 struct sata_id
*sid
= NULL
;
2853 char model
[SATA_ID_MODEL_LEN
+ 1];
2854 char fw
[SATA_ID_FW_LEN
+ 1];
2858 mutex_exit(&mpt
->m_mutex
);
2860 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2861 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2862 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2864 inq89
= kmem_zalloc(inq89_len
, KM_SLEEP
);
2865 rval
= mptsas_inquiry(mpt
, ptgt
, 0, 0x89,
2866 inq89
, inq89_len
, &reallen
, 1);
2869 if (inq89
!= NULL
) {
2870 kmem_free(inq89
, inq89_len
);
2873 mptsas_log(mpt
, CE_WARN
, "!mptsas request inquiry page "
2874 "0x89 for SATA target:%x failed!", ptgt
->m_devhdl
);
2875 return (DDI_SUCCESS
);
2877 sid
= (void *)(&inq89
[60]);
2879 swab(sid
->ai_model
, model
, SATA_ID_MODEL_LEN
);
2880 swab(sid
->ai_fw
, fw
, SATA_ID_FW_LEN
);
2882 model
[SATA_ID_MODEL_LEN
] = 0;
2883 fw
[SATA_ID_FW_LEN
] = 0;
2886 * split model into into vid/pid
2888 for (i
= 0, pid
= model
; i
< SATA_ID_MODEL_LEN
; i
++, pid
++)
2889 if ((*pid
== ' ') || (*pid
== '\t'))
2891 if (i
< SATA_ID_MODEL_LEN
) {
2894 * terminate vid, establish pid
2899 * vid will stay "ATA ", the rule is same
2900 * as sata framework implementation.
2910 * override SCSA "inquiry-*" properties
2913 (void) scsi_device_prop_update_inqstring(sd
,
2914 INQUIRY_VENDOR_ID
, vid
, strlen(vid
));
2916 (void) scsi_device_prop_update_inqstring(sd
,
2917 INQUIRY_PRODUCT_ID
, pid
, strlen(pid
));
2918 (void) scsi_device_prop_update_inqstring(sd
,
2919 INQUIRY_REVISION_ID
, fw
, strlen(fw
));
2921 if (inq89
!= NULL
) {
2922 kmem_free(inq89
, inq89_len
);
2925 mutex_exit(&mpt
->m_mutex
);
2928 return (DDI_SUCCESS
);
2931 * tran_tgt_free(9E) - target device instance deallocation
2934 mptsas_scsi_tgt_free(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
2935 scsi_hba_tran_t
*hba_tran
, struct scsi_device
*sd
)
2938 _NOTE(ARGUNUSED(hba_dip
, tgt_dip
, hba_tran
, sd
))
2941 mptsas_tgt_private_t
*tgt_private
= hba_tran
->tran_tgt_private
;
2943 if (tgt_private
!= NULL
) {
2944 kmem_free(tgt_private
, sizeof (mptsas_tgt_private_t
));
2945 hba_tran
->tran_tgt_private
= NULL
;
2952 * Visible to the external world via the transport structure.
2957 * - transport the command to the addressed SCSI target/lun device
2958 * - normal operation is to schedule the command to be transported,
2959 * and return TRAN_ACCEPT if this is successful.
2960 * - if NO_INTR, tran_start must poll device for command completion
2963 mptsas_scsi_start(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
2966 _NOTE(ARGUNUSED(ap
))
2968 mptsas_t
*mpt
= PKT2MPT(pkt
);
2969 mptsas_cmd_t
*cmd
= PKT2CMD(pkt
);
2971 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
2973 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt
));
2976 return (TRAN_FATAL_ERROR
);
2979 * prepare the pkt before taking mutex.
2981 rval
= mptsas_prepare_pkt(cmd
);
2982 if (rval
!= TRAN_ACCEPT
) {
2987 * Send the command to target/lun, however your HBA requires it.
2988 * If busy, return TRAN_BUSY; if there's some other formatting error
2989 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2990 * return of TRAN_ACCEPT.
2992 * Remember that access to shared resources, including the mptsas_t
2993 * data structure and the HBA hardware registers, must be protected
2994 * with mutexes, here and everywhere.
2996 * Also remember that at interrupt time, you'll get an argument
2997 * to the interrupt handler which is a pointer to your mptsas_t
2998 * structure; you'll have to remember which commands are outstanding
2999 * and which scsi_pkt is the currently-running command so the
3000 * interrupt handler can refer to the pkt to set completion
3001 * status, call the target driver back through pkt_comp, etc.
3004 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
3005 if (ptgt
->m_dr_flag
== MPTSAS_DR_INTRANSITION
) {
3006 if (cmd
->cmd_pkt_flags
& FLAG_NOQUEUE
) {
3008 * commands should be allowed to retry by
3009 * returning TRAN_BUSY to stall the I/O's
3010 * which come from scsi_vhci since the device/
3011 * path is in unstable state now.
3013 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3017 * The device is offline, just fail the
3018 * command by returning TRAN_FATAL_ERROR.
3020 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3021 return (TRAN_FATAL_ERROR
);
3024 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3025 rval
= mptsas_accept_pkt(mpt
, cmd
);
3031 mptsas_accept_pkt(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
3033 int rval
= TRAN_ACCEPT
;
3034 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
3036 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd
));
3038 if ((cmd
->cmd_flags
& CFLAG_PREPARED
) == 0) {
3039 rval
= mptsas_prepare_pkt(cmd
);
3040 if (rval
!= TRAN_ACCEPT
) {
3041 cmd
->cmd_flags
&= ~CFLAG_TRANFLAG
;
3047 * reset the throttle if we were draining
3049 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
3050 if ((ptgt
->m_t_ncmds
== 0) &&
3051 (ptgt
->m_t_throttle
== DRAIN_THROTTLE
)) {
3052 NDBG23(("reset throttle"));
3053 ASSERT(ptgt
->m_reset_delay
== 0);
3054 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
3058 * If device handle has already been invalidated, just
3059 * fail the command. In theory, command from scsi_vhci
3060 * client is impossible send down command with invalid
3061 * devhdl since devhdl is set after path offline, target
3062 * driver is not suppose to select a offlined path.
3064 if (ptgt
->m_devhdl
== MPTSAS_INVALID_DEVHDL
) {
3065 NDBG20(("rejecting command, it might because invalid devhdl "
3067 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3068 mutex_enter(&mpt
->m_mutex
);
3070 * If HBA is being reset, the DevHandles are being
3071 * re-initialized, which means that they could be invalid
3072 * even if the target is still attached. Check if being reset
3073 * and if DevHandle is being re-initialized. If this is the
3074 * case, return BUSY so the I/O can be retried later.
3076 if (mpt
->m_in_reset
) {
3077 mptsas_set_pkt_reason(mpt
, cmd
, CMD_RESET
,
3079 if (cmd
->cmd_flags
& CFLAG_TXQ
) {
3080 mptsas_doneq_add(mpt
, cmd
);
3081 mptsas_doneq_empty(mpt
);
3082 mutex_exit(&mpt
->m_mutex
);
3085 mutex_exit(&mpt
->m_mutex
);
3089 mptsas_set_pkt_reason(mpt
, cmd
, CMD_DEV_GONE
, STAT_TERMINATED
);
3090 if (cmd
->cmd_flags
& CFLAG_TXQ
) {
3091 mptsas_doneq_add(mpt
, cmd
);
3092 mptsas_doneq_empty(mpt
);
3093 mutex_exit(&mpt
->m_mutex
);
3096 mutex_exit(&mpt
->m_mutex
);
3097 return (TRAN_FATAL_ERROR
);
3100 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3102 * The first case is the normal case. mpt gets a command from the
3103 * target driver and starts it.
3104 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3105 * commands is m_max_requests - 2.
3107 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
3108 if ((ptgt
->m_t_throttle
> HOLD_THROTTLE
) &&
3109 (ptgt
->m_t_ncmds
< ptgt
->m_t_throttle
) &&
3110 (ptgt
->m_reset_delay
== 0) &&
3111 (ptgt
->m_t_nwait
== 0) &&
3112 ((cmd
->cmd_pkt_flags
& FLAG_NOINTR
) == 0)) {
3113 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3114 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
3115 (void) mptsas_start_cmd0(mpt
, cmd
);
3117 mutex_enter(&mpt
->m_mutex
);
3118 mptsas_waitq_add(mpt
, cmd
);
3119 mutex_exit(&mpt
->m_mutex
);
3123 * Add this pkt to the work queue
3125 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3126 mutex_enter(&mpt
->m_mutex
);
3127 mptsas_waitq_add(mpt
, cmd
);
3129 if (cmd
->cmd_pkt_flags
& FLAG_NOINTR
) {
3130 (void) mptsas_poll(mpt
, cmd
, MPTSAS_POLL_TIME
);
3133 * Only flush the doneq if this is not a TM
3134 * cmd. For TM cmds the flushing of the
3135 * doneq will be done in those routines.
3137 if ((cmd
->cmd_flags
& CFLAG_TM_CMD
) == 0) {
3138 mptsas_doneq_empty(mpt
);
3141 mutex_exit(&mpt
->m_mutex
);
3147 mptsas_save_cmd(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
3149 mptsas_slots_t
*slots
;
3151 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
3152 mptsas_slot_free_e_t
*pe
;
3155 slots
= mpt
->m_active
;
3158 * Account for reserved TM request slot and reserved SMID of 0.
3160 ASSERT(slots
->m_n_slots
== (mpt
->m_max_requests
- 2));
3162 qn
= qn_first
= CPU
->cpu_seqid
& (mpt
->m_slot_freeq_pair_n
- 1);
3165 ASSERT(qn
< mpt
->m_slot_freeq_pair_n
);
3166 mutex_enter(&mpt
->m_slot_freeq_pairp
[qn
].m_slot_allocq
.s
.m_fq_mutex
);
3167 pe
= list_head(&mpt
->m_slot_freeq_pairp
[qn
].m_slot_allocq
.
3169 if (!pe
) { /* switch the allocq and releq */
3170 mutex_enter(&mpt
->m_slot_freeq_pairp
[qn
].m_slot_releq
.
3172 if (mpt
->m_slot_freeq_pairp
[qn
].m_slot_releq
.s
.m_fq_n
) {
3173 mpt
->m_slot_freeq_pairp
[qn
].
3174 m_slot_allocq
.s
.m_fq_n
=
3175 mpt
->m_slot_freeq_pairp
[qn
].
3176 m_slot_releq
.s
.m_fq_n
;
3177 mpt
->m_slot_freeq_pairp
[qn
].
3178 m_slot_allocq
.s
.m_fq_list
.list_head
.list_next
=
3179 mpt
->m_slot_freeq_pairp
[qn
].
3180 m_slot_releq
.s
.m_fq_list
.list_head
.list_next
;
3181 mpt
->m_slot_freeq_pairp
[qn
].
3182 m_slot_allocq
.s
.m_fq_list
.list_head
.list_prev
=
3183 mpt
->m_slot_freeq_pairp
[qn
].
3184 m_slot_releq
.s
.m_fq_list
.list_head
.list_prev
;
3185 mpt
->m_slot_freeq_pairp
[qn
].
3186 m_slot_releq
.s
.m_fq_list
.list_head
.list_prev
->
3188 &mpt
->m_slot_freeq_pairp
[qn
].
3189 m_slot_allocq
.s
.m_fq_list
.list_head
;
3190 mpt
->m_slot_freeq_pairp
[qn
].
3191 m_slot_releq
.s
.m_fq_list
.list_head
.list_next
->
3193 &mpt
->m_slot_freeq_pairp
[qn
].
3194 m_slot_allocq
.s
.m_fq_list
.list_head
;
3196 mpt
->m_slot_freeq_pairp
[qn
].
3197 m_slot_releq
.s
.m_fq_list
.list_head
.list_next
=
3198 mpt
->m_slot_freeq_pairp
[qn
].
3199 m_slot_releq
.s
.m_fq_list
.list_head
.list_prev
=
3200 &mpt
->m_slot_freeq_pairp
[qn
].
3201 m_slot_releq
.s
.m_fq_list
.list_head
;
3202 mpt
->m_slot_freeq_pairp
[qn
].
3203 m_slot_releq
.s
.m_fq_n
= 0;
3205 mutex_exit(&mpt
->m_slot_freeq_pairp
[qn
].
3206 m_slot_releq
.s
.m_fq_mutex
);
3207 mutex_exit(&mpt
->m_slot_freeq_pairp
[qn
].
3208 m_slot_allocq
.s
.m_fq_mutex
);
3209 qn
= (qn
+ 1) & (mpt
->m_slot_freeq_pair_n
- 1);
3215 mutex_exit(&mpt
->m_slot_freeq_pairp
[qn
].
3216 m_slot_releq
.s
.m_fq_mutex
);
3217 pe
= list_head(&mpt
->m_slot_freeq_pairp
[qn
].
3218 m_slot_allocq
.s
.m_fq_list
);
3221 list_remove(&mpt
->m_slot_freeq_pairp
[qn
].
3222 m_slot_allocq
.s
.m_fq_list
, pe
);
3225 * Make sure SMID is not using reserved value of 0
3226 * and the TM request slot.
3228 ASSERT((slot
> 0) && (slot
<= slots
->m_n_slots
) &&
3229 mpt
->m_slot_freeq_pairp
[qn
].m_slot_allocq
.s
.m_fq_n
> 0);
3230 cmd
->cmd_slot
= slot
;
3231 mpt
->m_slot_freeq_pairp
[qn
].m_slot_allocq
.s
.m_fq_n
--;
3232 ASSERT(mpt
->m_slot_freeq_pairp
[qn
].m_slot_allocq
.s
.m_fq_n
>= 0);
3234 mutex_exit(&mpt
->m_slot_freeq_pairp
[qn
].m_slot_allocq
.s
.m_fq_mutex
);
3236 * only increment per target ncmds if this is not a
3237 * command that has no target associated with it (i.e. a
3238 * event acknoledgment)
3240 if ((cmd
->cmd_flags
& CFLAG_CMDIOC
) == 0) {
3241 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
3243 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
3245 cmd
->cmd_active_timeout
= cmd
->cmd_pkt
->pkt_time
;
3248 * If initial timout is less than or equal to one tick, bump
3249 * the timeout by a tick so that command doesn't timeout before
3250 * its allotted time.
3252 if (cmd
->cmd_active_timeout
<= mptsas_scsi_watchdog_tick
) {
3253 cmd
->cmd_active_timeout
+= mptsas_scsi_watchdog_tick
;
3260 * the pkt may have been resubmitted or just reused so
3261 * initialize some fields and do some checks.
3264 mptsas_prepare_pkt(mptsas_cmd_t
*cmd
)
3266 struct scsi_pkt
*pkt
= CMD2PKT(cmd
);
3268 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd
));
3271 * Reinitialize some fields that need it; the packet may
3272 * have been resubmitted
3274 pkt
->pkt_reason
= CMD_CMPLT
;
3276 pkt
->pkt_statistics
= 0;
3279 cmd
->cmd_pkt_flags
= pkt
->pkt_flags
;
3284 *(pkt
->pkt_scbp
) = 0;
3286 if (cmd
->cmd_flags
& CFLAG_DMAVALID
) {
3287 pkt
->pkt_resid
= cmd
->cmd_dmacount
;
3290 * consistent packets need to be sync'ed first
3291 * (only for data going out)
3293 if ((cmd
->cmd_flags
& CFLAG_CMDIOPB
) &&
3294 (cmd
->cmd_flags
& CFLAG_DMASEND
)) {
3295 (void) ddi_dma_sync(cmd
->cmd_dmahandle
, 0, 0,
3296 DDI_DMA_SYNC_FORDEV
);
3301 (cmd
->cmd_flags
& ~(CFLAG_TRANFLAG
)) |
3302 CFLAG_PREPARED
| CFLAG_IN_TRANSPORT
;
3304 return (TRAN_ACCEPT
);
3308 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3310 * One of three possibilities:
3311 * - allocate scsi_pkt
3312 * - allocate scsi_pkt and DMA resources
3313 * - allocate DMA resources to an already-allocated pkt
3315 static struct scsi_pkt
*
3316 mptsas_scsi_init_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
,
3317 struct buf
*bp
, int cmdlen
, int statuslen
, int tgtlen
, int flags
,
3318 int (*callback
)(), caddr_t arg
)
3320 mptsas_cmd_t
*cmd
, *new_cmd
;
3321 mptsas_t
*mpt
= ADDR2MPT(ap
);
3325 #endif /* __sparc */
3326 mptsas_target_t
*ptgt
= NULL
;
3328 mptsas_tgt_private_t
*tgt_private
;
3331 kf
= (callback
== SLEEP_FUNC
)? KM_SLEEP
: KM_NOSLEEP
;
3333 tgt_private
= (mptsas_tgt_private_t
*)ap
->a_hba_tran
->
3335 ASSERT(tgt_private
!= NULL
);
3336 if (tgt_private
== NULL
) {
3339 ptgt
= tgt_private
->t_private
;
3340 ASSERT(ptgt
!= NULL
);
3343 ap
->a_target
= ptgt
->m_devhdl
;
3344 ap
->a_lun
= tgt_private
->t_lun
;
3346 ASSERT(callback
== NULL_FUNC
|| callback
== SLEEP_FUNC
);
3347 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3348 statuslen
*= 100; tgtlen
*= 4;
3350 NDBG3(("mptsas_scsi_init_pkt:\n"
3351 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3352 ap
->a_target
, (void *)pkt
, (void *)bp
,
3353 cmdlen
, statuslen
, tgtlen
, flags
));
3356 * Allocate the new packet.
3359 ddi_dma_handle_t save_dma_handle
;
3360 ddi_dma_handle_t save_arq_dma_handle
;
3361 struct buf
*save_arq_bp
;
3362 ddi_dma_cookie_t save_arqcookie
;
3365 #endif /* __sparc */
3367 cmd
= kmem_cache_alloc(mpt
->m_kmem_cache
, kf
);
3370 save_dma_handle
= cmd
->cmd_dmahandle
;
3371 save_arq_dma_handle
= cmd
->cmd_arqhandle
;
3372 save_arq_bp
= cmd
->cmd_arq_buf
;
3373 save_arqcookie
= cmd
->cmd_arqcookie
;
3375 save_sg
= cmd
->cmd_sg
;
3376 #endif /* __sparc */
3377 bzero(cmd
, sizeof (*cmd
) + scsi_pkt_size());
3378 cmd
->cmd_dmahandle
= save_dma_handle
;
3379 cmd
->cmd_arqhandle
= save_arq_dma_handle
;
3380 cmd
->cmd_arq_buf
= save_arq_bp
;
3381 cmd
->cmd_arqcookie
= save_arqcookie
;
3383 cmd
->cmd_sg
= save_sg
;
3384 #endif /* __sparc */
3385 pkt
= (void *)((uchar_t
*)cmd
+
3386 sizeof (struct mptsas_cmd
));
3387 pkt
->pkt_ha_private
= (opaque_t
)cmd
;
3388 pkt
->pkt_address
= *ap
;
3389 pkt
->pkt_private
= (opaque_t
)cmd
->cmd_pkt_private
;
3390 pkt
->pkt_scbp
= (opaque_t
)&cmd
->cmd_scb
;
3391 pkt
->pkt_cdbp
= (opaque_t
)&cmd
->cmd_cdb
;
3392 cmd
->cmd_pkt
= (struct scsi_pkt
*)pkt
;
3393 cmd
->cmd_cdblen
= (uchar_t
)cmdlen
;
3394 cmd
->cmd_scblen
= statuslen
;
3395 cmd
->cmd_rqslen
= SENSE_LENGTH
;
3396 cmd
->cmd_tgt_addr
= ptgt
;
3400 if (failure
|| (cmdlen
> sizeof (cmd
->cmd_cdb
)) ||
3401 (tgtlen
> PKT_PRIV_LEN
) ||
3402 (statuslen
> EXTCMDS_STATUS_SIZE
)) {
3405 * if extern alloc fails, all will be
3406 * deallocated, including cmd
3408 failure
= mptsas_pkt_alloc_extern(mpt
, cmd
,
3409 cmdlen
, tgtlen
, statuslen
, kf
);
3413 * if extern allocation fails, it will
3414 * deallocate the new pkt as well
3428 /* grab cmd->cmd_cookiec here as oldcookiec */
3430 oldcookiec
= cmd
->cmd_cookiec
;
3431 #endif /* __sparc */
3434 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3435 * greater than 0 and we'll need to grab the next dma window
3438 * SLM-not doing extra command frame right now; may add later
3441 if (cmd
->cmd_nwin
> 0) {
3444 * Make sure we havn't gone past the the total number
3447 if (++cmd
->cmd_winindex
>= cmd
->cmd_nwin
) {
3450 if (ddi_dma_getwin(cmd
->cmd_dmahandle
, cmd
->cmd_winindex
,
3451 &cmd
->cmd_dma_offset
, &cmd
->cmd_dma_len
,
3452 &cmd
->cmd_cookie
, &cmd
->cmd_cookiec
) == DDI_FAILURE
) {
3455 goto get_dma_cookies
;
3459 if (flags
& PKT_XARQ
) {
3460 cmd
->cmd_flags
|= CFLAG_XARQ
;
3464 * DMA resource allocation. This version assumes your
3465 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3466 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3467 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3469 if (bp
&& (bp
->b_bcount
!= 0) &&
3470 (cmd
->cmd_flags
& CFLAG_DMAVALID
) == 0) {
3473 mptti_t
*dmap
; /* ptr to the S/G list */
3476 * Set up DMA memory and position to the next DMA segment.
3478 ASSERT(cmd
->cmd_dmahandle
!= NULL
);
3480 if (bp
->b_flags
& B_READ
) {
3481 dma_flags
= DDI_DMA_READ
;
3482 cmd
->cmd_flags
&= ~CFLAG_DMASEND
;
3484 dma_flags
= DDI_DMA_WRITE
;
3485 cmd
->cmd_flags
|= CFLAG_DMASEND
;
3487 if (flags
& PKT_CONSISTENT
) {
3488 cmd
->cmd_flags
|= CFLAG_CMDIOPB
;
3489 dma_flags
|= DDI_DMA_CONSISTENT
;
3492 if (flags
& PKT_DMA_PARTIAL
) {
3493 dma_flags
|= DDI_DMA_PARTIAL
;
3497 * workaround for byte hole issue on psycho and
3500 if ((bp
->b_flags
& B_READ
) && ((bp
->b_flags
&
3501 (B_PAGEIO
|B_REMAPPED
)) != B_PAGEIO
) &&
3502 ((uintptr_t)bp
->b_un
.b_addr
& 0x7)) {
3503 dma_flags
|= DDI_DMA_CONSISTENT
;
3506 rval
= ddi_dma_buf_bind_handle(cmd
->cmd_dmahandle
, bp
,
3507 dma_flags
, callback
, arg
,
3508 &cmd
->cmd_cookie
, &cmd
->cmd_cookiec
);
3509 if (rval
== DDI_DMA_PARTIAL_MAP
) {
3510 (void) ddi_dma_numwin(cmd
->cmd_dmahandle
,
3512 cmd
->cmd_winindex
= 0;
3513 (void) ddi_dma_getwin(cmd
->cmd_dmahandle
,
3514 cmd
->cmd_winindex
, &cmd
->cmd_dma_offset
,
3515 &cmd
->cmd_dma_len
, &cmd
->cmd_cookie
,
3517 } else if (rval
&& (rval
!= DDI_DMA_MAPPED
)) {
3519 case DDI_DMA_NORESOURCES
:
3522 case DDI_DMA_BADATTR
:
3523 case DDI_DMA_NOMAPPING
:
3524 bioerror(bp
, EFAULT
);
3526 case DDI_DMA_TOOBIG
:
3528 bioerror(bp
, EINVAL
);
3531 cmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
3533 mptsas_scsi_destroy_pkt(ap
, pkt
);
3535 return ((struct scsi_pkt
*)NULL
);
3539 cmd
->cmd_flags
|= CFLAG_DMAVALID
;
3540 ASSERT(cmd
->cmd_cookiec
> 0);
3542 if (cmd
->cmd_cookiec
> MPTSAS_MAX_CMD_SEGS
) {
3543 mptsas_log(mpt
, CE_NOTE
, "large cookiec received %d\n",
3545 bioerror(bp
, EINVAL
);
3547 mptsas_scsi_destroy_pkt(ap
, pkt
);
3549 return ((struct scsi_pkt
*)NULL
);
3553 * Allocate extra SGL buffer if needed.
3555 if ((cmd
->cmd_cookiec
> MPTSAS_MAX_FRAME_SGES64(mpt
)) &&
3556 (cmd
->cmd_extra_frames
== NULL
)) {
3557 if (mptsas_alloc_extra_sgl_frame(mpt
, cmd
) ==
3559 mptsas_log(mpt
, CE_WARN
, "MPT SGL mem alloc "
3561 bioerror(bp
, ENOMEM
);
3563 mptsas_scsi_destroy_pkt(ap
, pkt
);
3565 return ((struct scsi_pkt
*)NULL
);
3570 * Always use scatter-gather transfer
3571 * Use the loop below to store physical addresses of
3572 * DMA segments, from the DMA cookies, into your HBA's
3573 * scatter-gather list.
3574 * We need to ensure we have enough kmem alloc'd
3575 * for the sg entries since we are no longer using an
3576 * array inside mptsas_cmd_t.
3578 * We check cmd->cmd_cookiec against oldcookiec so
3579 * the scatter-gather list is correctly allocated
3582 if (oldcookiec
!= cmd
->cmd_cookiec
) {
3583 if (cmd
->cmd_sg
!= (mptti_t
*)NULL
) {
3584 kmem_free(cmd
->cmd_sg
, sizeof (mptti_t
) *
3590 if (cmd
->cmd_sg
== (mptti_t
*)NULL
) {
3591 cmd
->cmd_sg
= kmem_alloc((size_t)(sizeof (mptti_t
)*
3592 cmd
->cmd_cookiec
), kf
);
3594 if (cmd
->cmd_sg
== (mptti_t
*)NULL
) {
3595 mptsas_log(mpt
, CE_WARN
,
3596 "unable to kmem_alloc enough memory "
3597 "for scatter/gather list");
3599 * if we have an ENOMEM condition we need to behave
3600 * the same way as the rest of this routine
3603 bioerror(bp
, ENOMEM
);
3605 mptsas_scsi_destroy_pkt(ap
, pkt
);
3607 return ((struct scsi_pkt
*)NULL
);
3610 #endif /* __sparc */
3613 ASSERT(cmd
->cmd_cookie
.dmac_size
!= 0);
3616 * store the first segment into the S/G list
3618 dmap
->count
= cmd
->cmd_cookie
.dmac_size
;
3619 dmap
->addr
.address64
.Low
= (uint32_t)
3620 (cmd
->cmd_cookie
.dmac_laddress
& 0xffffffffull
);
3621 dmap
->addr
.address64
.High
= (uint32_t)
3622 (cmd
->cmd_cookie
.dmac_laddress
>> 32);
3625 * dmacount counts the size of the dma for this window
3626 * (if partial dma is being used). totaldmacount
3627 * keeps track of the total amount of dma we have
3628 * transferred for all the windows (needed to calculate
3629 * the resid value below).
3631 cmd
->cmd_dmacount
= cmd
->cmd_cookie
.dmac_size
;
3632 cmd
->cmd_totaldmacount
+= cmd
->cmd_cookie
.dmac_size
;
3635 * We already stored the first DMA scatter gather segment,
3636 * start at 1 if we need to store more.
3638 for (cnt
= 1; cnt
< cmd
->cmd_cookiec
; cnt
++) {
3640 * Get next DMA cookie
3642 ddi_dma_nextcookie(cmd
->cmd_dmahandle
,
3646 cmd
->cmd_dmacount
+= cmd
->cmd_cookie
.dmac_size
;
3647 cmd
->cmd_totaldmacount
+= cmd
->cmd_cookie
.dmac_size
;
3650 * store the segment parms into the S/G list
3652 dmap
->count
= cmd
->cmd_cookie
.dmac_size
;
3653 dmap
->addr
.address64
.Low
= (uint32_t)
3654 (cmd
->cmd_cookie
.dmac_laddress
& 0xffffffffull
);
3655 dmap
->addr
.address64
.High
= (uint32_t)
3656 (cmd
->cmd_cookie
.dmac_laddress
>> 32);
3660 * If this was partially allocated we set the resid
3661 * the amount of data NOT transferred in this window
3662 * If there is only one window, the resid will be 0
3664 pkt
->pkt_resid
= (bp
->b_bcount
- cmd
->cmd_totaldmacount
);
3665 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd
->cmd_dmacount
));
3671 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3674 * - also frees DMA resources if allocated
3675 * - implicit DMA synchonization
3678 mptsas_scsi_destroy_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
3680 mptsas_cmd_t
*cmd
= PKT2CMD(pkt
);
3681 mptsas_t
*mpt
= ADDR2MPT(ap
);
3683 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3684 ap
->a_target
, (void *)pkt
));
3686 if (cmd
->cmd_flags
& CFLAG_DMAVALID
) {
3687 (void) ddi_dma_unbind_handle(cmd
->cmd_dmahandle
);
3688 cmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
3692 kmem_free(cmd
->cmd_sg
, sizeof (mptti_t
) * cmd
->cmd_cookiec
);
3695 #endif /* __sparc */
3696 mptsas_free_extra_sgl_frame(mpt
, cmd
);
3698 if ((cmd
->cmd_flags
&
3699 (CFLAG_FREE
| CFLAG_CDBEXTERN
| CFLAG_PRIVEXTERN
|
3700 CFLAG_SCBEXTERN
)) == 0) {
3701 cmd
->cmd_flags
= CFLAG_FREE
;
3702 kmem_cache_free(mpt
->m_kmem_cache
, (void *)cmd
);
3704 mptsas_pkt_destroy_extern(mpt
, cmd
);
3709 * kmem cache constructor and destructor:
3710 * When constructing, we bzero the cmd and allocate the dma handle
3711 * When destructing, just free the dma handle
3714 mptsas_kmem_cache_constructor(void *buf
, void *cdrarg
, int kmflags
)
3716 mptsas_cmd_t
*cmd
= buf
;
3717 mptsas_t
*mpt
= cdrarg
;
3718 struct scsi_address ap
;
3720 ddi_dma_attr_t arq_dma_attr
;
3721 int (*callback
)(caddr_t
);
3723 callback
= (kmflags
== KM_SLEEP
)? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
;
3725 NDBG4(("mptsas_kmem_cache_constructor"));
3727 ap
.a_hba_tran
= mpt
->m_tran
;
3732 * allocate a dma handle
3734 if ((ddi_dma_alloc_handle(mpt
->m_dip
, &mpt
->m_io_dma_attr
, callback
,
3735 NULL
, &cmd
->cmd_dmahandle
)) != DDI_SUCCESS
) {
3736 cmd
->cmd_dmahandle
= NULL
;
3740 cmd
->cmd_arq_buf
= scsi_alloc_consistent_buf(&ap
, (struct buf
*)NULL
,
3741 SENSE_LENGTH
, B_READ
, callback
, NULL
);
3742 if (cmd
->cmd_arq_buf
== NULL
) {
3743 ddi_dma_free_handle(&cmd
->cmd_dmahandle
);
3744 cmd
->cmd_dmahandle
= NULL
;
3749 * allocate a arq handle
3751 arq_dma_attr
= mpt
->m_msg_dma_attr
;
3752 arq_dma_attr
.dma_attr_sgllen
= 1;
3753 if ((ddi_dma_alloc_handle(mpt
->m_dip
, &arq_dma_attr
, callback
,
3754 NULL
, &cmd
->cmd_arqhandle
)) != DDI_SUCCESS
) {
3755 ddi_dma_free_handle(&cmd
->cmd_dmahandle
);
3756 scsi_free_consistent_buf(cmd
->cmd_arq_buf
);
3757 cmd
->cmd_dmahandle
= NULL
;
3758 cmd
->cmd_arqhandle
= NULL
;
3762 if (ddi_dma_buf_bind_handle(cmd
->cmd_arqhandle
,
3763 cmd
->cmd_arq_buf
, (DDI_DMA_READ
| DDI_DMA_CONSISTENT
),
3764 callback
, NULL
, &cmd
->cmd_arqcookie
, &cookiec
) != DDI_SUCCESS
) {
3765 ddi_dma_free_handle(&cmd
->cmd_dmahandle
);
3766 ddi_dma_free_handle(&cmd
->cmd_arqhandle
);
3767 scsi_free_consistent_buf(cmd
->cmd_arq_buf
);
3768 cmd
->cmd_dmahandle
= NULL
;
3769 cmd
->cmd_arqhandle
= NULL
;
3770 cmd
->cmd_arq_buf
= NULL
;
3774 * In sparc, the sgl length in most of the cases would be 1, so we
3775 * pre-allocate it in cache. On x86, the max number would be 256,
3776 * pre-allocate a maximum would waste a lot of memory especially
3777 * when many cmds are put onto waitq.
3780 cmd
->cmd_sg
= kmem_alloc((size_t)(sizeof (mptti_t
)*
3781 MPTSAS_MAX_CMD_SEGS
), KM_SLEEP
);
3782 #endif /* __sparc */
3788 mptsas_kmem_cache_destructor(void *buf
, void *cdrarg
)
3791 _NOTE(ARGUNUSED(cdrarg
))
3793 mptsas_cmd_t
*cmd
= buf
;
3795 NDBG4(("mptsas_kmem_cache_destructor"));
3797 if (cmd
->cmd_arqhandle
) {
3798 (void) ddi_dma_unbind_handle(cmd
->cmd_arqhandle
);
3799 ddi_dma_free_handle(&cmd
->cmd_arqhandle
);
3800 cmd
->cmd_arqhandle
= NULL
;
3802 if (cmd
->cmd_arq_buf
) {
3803 scsi_free_consistent_buf(cmd
->cmd_arq_buf
);
3804 cmd
->cmd_arq_buf
= NULL
;
3806 if (cmd
->cmd_dmahandle
) {
3807 ddi_dma_free_handle(&cmd
->cmd_dmahandle
);
3808 cmd
->cmd_dmahandle
= NULL
;
3812 kmem_free(cmd
->cmd_sg
, sizeof (mptti_t
)* MPTSAS_MAX_CMD_SEGS
);
3815 #endif /* __sparc */
3819 mptsas_cache_frames_constructor(void *buf
, void *cdrarg
, int kmflags
)
3821 mptsas_cache_frames_t
*p
= buf
;
3822 mptsas_t
*mpt
= cdrarg
;
3823 ddi_dma_attr_t frame_dma_attr
;
3824 size_t mem_size
, alloc_len
;
3825 ddi_dma_cookie_t cookie
;
3827 int (*callback
)(caddr_t
) = (kmflags
== KM_SLEEP
)
3828 ? DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
;
3830 frame_dma_attr
= mpt
->m_msg_dma_attr
;
3831 frame_dma_attr
.dma_attr_align
= 0x10;
3832 frame_dma_attr
.dma_attr_sgllen
= 1;
3834 if (ddi_dma_alloc_handle(mpt
->m_dip
, &frame_dma_attr
, callback
, NULL
,
3835 &p
->m_dma_hdl
) != DDI_SUCCESS
) {
3836 mptsas_log(mpt
, CE_WARN
, "Unable to allocate dma handle for"
3838 return (DDI_FAILURE
);
3841 mem_size
= (mpt
->m_max_request_frames
- 1) * mpt
->m_req_frame_size
;
3843 if (ddi_dma_mem_alloc(p
->m_dma_hdl
, mem_size
, &mpt
->m_dev_acc_attr
,
3844 DDI_DMA_CONSISTENT
, callback
, NULL
, (caddr_t
*)&p
->m_frames_addr
,
3845 &alloc_len
, &p
->m_acc_hdl
) != DDI_SUCCESS
) {
3846 ddi_dma_free_handle(&p
->m_dma_hdl
);
3847 p
->m_dma_hdl
= NULL
;
3848 mptsas_log(mpt
, CE_WARN
, "Unable to allocate dma memory for"
3850 return (DDI_FAILURE
);
3853 if (ddi_dma_addr_bind_handle(p
->m_dma_hdl
, NULL
, p
->m_frames_addr
,
3854 alloc_len
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
, callback
, NULL
,
3855 &cookie
, &ncookie
) != DDI_DMA_MAPPED
) {
3856 (void) ddi_dma_mem_free(&p
->m_acc_hdl
);
3857 ddi_dma_free_handle(&p
->m_dma_hdl
);
3858 p
->m_dma_hdl
= NULL
;
3859 mptsas_log(mpt
, CE_WARN
, "Unable to bind DMA resources for"
3861 return (DDI_FAILURE
);
3865 * Store the SGL memory address. This chip uses this
3866 * address to dma to and from the driver. The second
3867 * address is the address mpt uses to fill in the SGL.
3869 p
->m_phys_addr
= cookie
.dmac_address
;
3871 return (DDI_SUCCESS
);
3875 mptsas_cache_frames_destructor(void *buf
, void *cdrarg
)
3878 _NOTE(ARGUNUSED(cdrarg
))
3880 mptsas_cache_frames_t
*p
= buf
;
3881 if (p
->m_dma_hdl
!= NULL
) {
3882 (void) ddi_dma_unbind_handle(p
->m_dma_hdl
);
3883 (void) ddi_dma_mem_free(&p
->m_acc_hdl
);
3884 ddi_dma_free_handle(&p
->m_dma_hdl
);
3885 p
->m_phys_addr
= NULL
;
3886 p
->m_frames_addr
= NULL
;
3887 p
->m_dma_hdl
= NULL
;
3888 p
->m_acc_hdl
= NULL
;
3894 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3895 * for non-standard length cdb, pkt_private, status areas
3896 * if allocation fails, then deallocate all external space and the pkt
3900 mptsas_pkt_alloc_extern(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
,
3901 int cmdlen
, int tgtlen
, int statuslen
, int kf
)
3903 caddr_t cdbp
, scbp
, tgt
;
3904 int (*callback
)(caddr_t
) = (kf
== KM_SLEEP
) ?
3905 DDI_DMA_SLEEP
: DDI_DMA_DONTWAIT
;
3906 struct scsi_address ap
;
3908 ddi_dma_attr_t ext_arq_dma_attr
;
3911 NDBG3(("mptsas_pkt_alloc_extern: "
3912 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3913 (void *)cmd
, cmdlen
, tgtlen
, statuslen
, kf
));
3915 tgt
= cdbp
= scbp
= NULL
;
3916 cmd
->cmd_scblen
= statuslen
;
3917 cmd
->cmd_privlen
= (uchar_t
)tgtlen
;
3919 if (cmdlen
> sizeof (cmd
->cmd_cdb
)) {
3920 if ((cdbp
= kmem_zalloc((size_t)cmdlen
, kf
)) == NULL
) {
3923 cmd
->cmd_pkt
->pkt_cdbp
= (opaque_t
)cdbp
;
3924 cmd
->cmd_flags
|= CFLAG_CDBEXTERN
;
3926 if (tgtlen
> PKT_PRIV_LEN
) {
3927 if ((tgt
= kmem_zalloc((size_t)tgtlen
, kf
)) == NULL
) {
3930 cmd
->cmd_flags
|= CFLAG_PRIVEXTERN
;
3931 cmd
->cmd_pkt
->pkt_private
= tgt
;
3933 if (statuslen
> EXTCMDS_STATUS_SIZE
) {
3934 if ((scbp
= kmem_zalloc((size_t)statuslen
, kf
)) == NULL
) {
3937 cmd
->cmd_flags
|= CFLAG_SCBEXTERN
;
3938 cmd
->cmd_pkt
->pkt_scbp
= (opaque_t
)scbp
;
3940 /* allocate sense data buf for DMA */
3942 senselength
= statuslen
- MPTSAS_GET_ITEM_OFF(
3943 struct scsi_arq_status
, sts_sensedata
);
3944 cmd
->cmd_rqslen
= (uchar_t
)senselength
;
3946 ap
.a_hba_tran
= mpt
->m_tran
;
3950 cmd
->cmd_ext_arq_buf
= scsi_alloc_consistent_buf(&ap
,
3951 (struct buf
*)NULL
, senselength
, B_READ
,
3954 if (cmd
->cmd_ext_arq_buf
== NULL
) {
3958 * allocate a extern arq handle and bind the buf
3960 ext_arq_dma_attr
= mpt
->m_msg_dma_attr
;
3961 ext_arq_dma_attr
.dma_attr_sgllen
= 1;
3962 if ((ddi_dma_alloc_handle(mpt
->m_dip
,
3963 &ext_arq_dma_attr
, callback
,
3964 NULL
, &cmd
->cmd_ext_arqhandle
)) != DDI_SUCCESS
) {
3968 if (ddi_dma_buf_bind_handle(cmd
->cmd_ext_arqhandle
,
3969 cmd
->cmd_ext_arq_buf
, (DDI_DMA_READ
| DDI_DMA_CONSISTENT
),
3970 callback
, NULL
, &cmd
->cmd_ext_arqcookie
,
3975 cmd
->cmd_flags
|= CFLAG_EXTARQBUFVALID
;
3979 mptsas_pkt_destroy_extern(mpt
, cmd
);
3984 * deallocate external pkt space and deallocate the pkt
3987 mptsas_pkt_destroy_extern(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
3989 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd
));
3991 if (cmd
->cmd_flags
& CFLAG_FREE
) {
3992 mptsas_log(mpt
, CE_PANIC
,
3993 "mptsas_pkt_destroy_extern: freeing free packet");
3997 if (cmd
->cmd_flags
& CFLAG_CDBEXTERN
) {
3998 kmem_free(cmd
->cmd_pkt
->pkt_cdbp
, (size_t)cmd
->cmd_cdblen
);
4000 if (cmd
->cmd_flags
& CFLAG_SCBEXTERN
) {
4001 kmem_free(cmd
->cmd_pkt
->pkt_scbp
, (size_t)cmd
->cmd_scblen
);
4002 if (cmd
->cmd_flags
& CFLAG_EXTARQBUFVALID
) {
4003 (void) ddi_dma_unbind_handle(cmd
->cmd_ext_arqhandle
);
4005 if (cmd
->cmd_ext_arqhandle
) {
4006 ddi_dma_free_handle(&cmd
->cmd_ext_arqhandle
);
4007 cmd
->cmd_ext_arqhandle
= NULL
;
4009 if (cmd
->cmd_ext_arq_buf
)
4010 scsi_free_consistent_buf(cmd
->cmd_ext_arq_buf
);
4012 if (cmd
->cmd_flags
& CFLAG_PRIVEXTERN
) {
4013 kmem_free(cmd
->cmd_pkt
->pkt_private
, (size_t)cmd
->cmd_privlen
);
4015 cmd
->cmd_flags
= CFLAG_FREE
;
4016 kmem_cache_free(mpt
->m_kmem_cache
, (void *)cmd
);
4020 * tran_sync_pkt(9E) - explicit DMA synchronization
4024 mptsas_scsi_sync_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
4026 mptsas_cmd_t
*cmd
= PKT2CMD(pkt
);
4028 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4029 ap
->a_target
, (void *)pkt
));
4031 if (cmd
->cmd_dmahandle
) {
4032 (void) ddi_dma_sync(cmd
->cmd_dmahandle
, 0, 0,
4033 (cmd
->cmd_flags
& CFLAG_DMASEND
) ?
4034 DDI_DMA_SYNC_FORDEV
: DDI_DMA_SYNC_FORCPU
);
4039 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4043 mptsas_scsi_dmafree(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
4045 mptsas_cmd_t
*cmd
= PKT2CMD(pkt
);
4046 mptsas_t
*mpt
= ADDR2MPT(ap
);
4048 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4049 ap
->a_target
, (void *)pkt
));
4051 if (cmd
->cmd_flags
& CFLAG_DMAVALID
) {
4052 (void) ddi_dma_unbind_handle(cmd
->cmd_dmahandle
);
4053 cmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
4056 if (cmd
->cmd_flags
& CFLAG_EXTARQBUFVALID
) {
4057 (void) ddi_dma_unbind_handle(cmd
->cmd_ext_arqhandle
);
4058 cmd
->cmd_flags
&= ~CFLAG_EXTARQBUFVALID
;
4061 mptsas_free_extra_sgl_frame(mpt
, cmd
);
4065 mptsas_pkt_comp(struct scsi_pkt
*pkt
, mptsas_cmd_t
*cmd
)
4067 if ((cmd
->cmd_flags
& CFLAG_CMDIOPB
) &&
4068 (!(cmd
->cmd_flags
& CFLAG_DMASEND
))) {
4069 (void) ddi_dma_sync(cmd
->cmd_dmahandle
, 0, 0,
4070 DDI_DMA_SYNC_FORCPU
);
4072 (*pkt
->pkt_comp
)(pkt
);
4076 mptsas_sge_setup(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
, uint32_t *control
,
4077 pMpi2SCSIIORequest_t frame
, ddi_acc_handle_t acc_hdl
)
4082 pMpi2SGESimple64_t sge
;
4083 pMpi2SGEChain64_t sgechain
;
4084 ASSERT(cmd
->cmd_flags
& CFLAG_DMAVALID
);
4087 * Save the number of entries in the DMA
4088 * Scatter/Gather list
4090 cookiec
= cmd
->cmd_cookiec
;
4092 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec
));
4095 * Set read/write bit in control.
4097 if (cmd
->cmd_flags
& CFLAG_DMASEND
) {
4098 *control
|= MPI2_SCSIIO_CONTROL_WRITE
;
4100 *control
|= MPI2_SCSIIO_CONTROL_READ
;
4103 ddi_put32(acc_hdl
, &frame
->DataLength
, cmd
->cmd_dmacount
);
4106 * We have 2 cases here. First where we can fit all the
4107 * SG elements into the main frame, and the case
4109 * If we have more cookies than we can attach to a frame
4110 * we will need to use a chain element to point
4111 * a location of memory where the rest of the S/G
4114 if (cookiec
<= MPTSAS_MAX_FRAME_SGES64(mpt
)) {
4116 sge
= (pMpi2SGESimple64_t
)(&frame
->SGL
);
4119 &sge
->Address
.Low
, dmap
->addr
.address64
.Low
);
4121 &sge
->Address
.High
, dmap
->addr
.address64
.High
);
4122 ddi_put32(acc_hdl
, &sge
->FlagsLength
,
4124 flags
= ddi_get32(acc_hdl
, &sge
->FlagsLength
);
4125 flags
|= ((uint32_t)
4126 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
4127 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
|
4128 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
) <<
4129 MPI2_SGE_FLAGS_SHIFT
);
4132 * If this is the last cookie, we set the flags
4137 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4138 | MPI2_SGE_FLAGS_END_OF_BUFFER
4139 | MPI2_SGE_FLAGS_END_OF_LIST
) <<
4140 MPI2_SGE_FLAGS_SHIFT
);
4142 if (cmd
->cmd_flags
& CFLAG_DMASEND
) {
4143 flags
|= (MPI2_SGE_FLAGS_HOST_TO_IOC
<<
4144 MPI2_SGE_FLAGS_SHIFT
);
4146 flags
|= (MPI2_SGE_FLAGS_IOC_TO_HOST
<<
4147 MPI2_SGE_FLAGS_SHIFT
);
4149 ddi_put32(acc_hdl
, &sge
->FlagsLength
, flags
);
4155 * Hereby we start to deal with multiple frames.
4156 * The process is as follows:
4157 * 1. Determine how many frames are needed for SGL element
4158 * storage; Note that all frames are stored in contiguous
4159 * memory space and in 64-bit DMA mode each element is
4160 * 3 double-words (12 bytes) long.
4161 * 2. Fill up the main frame. We need to do this separately
4162 * since it contains the SCSI IO request header and needs
4163 * dedicated processing. Note that the last 4 double-words
4164 * of the SCSI IO header is for SGL element storage
4165 * (MPI2_SGE_IO_UNION).
4166 * 3. Fill the chain element in the main frame, so the DMA
4167 * engine can use the following frames.
4168 * 4. Enter a loop to fill the remaining frames. Note that the
4169 * last frame contains no chain element. The remaining
4170 * frames go into the mpt SGL buffer allocated on the fly,
4171 * not immediately following the main message frame, as in
4173 * Some restrictions:
4174 * 1. For 64-bit DMA, the simple element and chain element
4175 * are both of 3 double-words (12 bytes) in size, even
4176 * though all frames are stored in the first 4G of mem
4177 * range and the higher 32-bits of the address are always 0.
4178 * 2. On some controllers (like the 1064/1068), a frame can
4179 * hold SGL elements with the last 1 or 2 double-words
4180 * (4 or 8 bytes) un-used. On these controllers, we should
4181 * recognize that there's not enough room for another SGL
4182 * element and move the sge pointer to the next frame.
4184 int i
, j
, k
, l
, frames
, sgemax
;
4187 uint16_t chainlength
;
4188 mptsas_cache_frames_t
*p
;
4191 * Sgemax is the number of SGE's that will fit
4192 * each extra frame and frames is total
4193 * number of frames we'll need. 1 sge entry per
4194 * frame is reseverd for the chain element thus the -1 below.
4196 sgemax
= ((mpt
->m_req_frame_size
/ sizeof (MPI2_SGE_SIMPLE64
))
4198 temp
= (cookiec
- (MPTSAS_MAX_FRAME_SGES64(mpt
) - 1)) / sgemax
;
4201 * A little check to see if we need to round up the number
4204 if ((cookiec
- (MPTSAS_MAX_FRAME_SGES64(mpt
) - 1)) - (temp
*
4206 frames
= (temp
+ 1);
4211 sge
= (pMpi2SGESimple64_t
)(&frame
->SGL
);
4214 * First fill in the main frame
4216 for (j
= 1; j
< MPTSAS_MAX_FRAME_SGES64(mpt
); j
++) {
4217 ddi_put32(acc_hdl
, &sge
->Address
.Low
,
4218 dmap
->addr
.address64
.Low
);
4219 ddi_put32(acc_hdl
, &sge
->Address
.High
,
4220 dmap
->addr
.address64
.High
);
4221 ddi_put32(acc_hdl
, &sge
->FlagsLength
, dmap
->count
);
4222 flags
= ddi_get32(acc_hdl
, &sge
->FlagsLength
);
4223 flags
|= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
4224 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
|
4225 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
) <<
4226 MPI2_SGE_FLAGS_SHIFT
);
4229 * If this is the last SGE of this frame
4230 * we set the end of list flag
4232 if (j
== (MPTSAS_MAX_FRAME_SGES64(mpt
) - 1)) {
4233 flags
|= ((uint32_t)
4234 (MPI2_SGE_FLAGS_LAST_ELEMENT
) <<
4235 MPI2_SGE_FLAGS_SHIFT
);
4237 if (cmd
->cmd_flags
& CFLAG_DMASEND
) {
4239 (MPI2_SGE_FLAGS_HOST_TO_IOC
<<
4240 MPI2_SGE_FLAGS_SHIFT
);
4243 (MPI2_SGE_FLAGS_IOC_TO_HOST
<<
4244 MPI2_SGE_FLAGS_SHIFT
);
4246 ddi_put32(acc_hdl
, &sge
->FlagsLength
, flags
);
4252 * Fill in the chain element in the main frame.
4253 * About calculation on ChainOffset:
4254 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4255 * in the end reserved for SGL element storage
4256 * (MPI2_SGE_IO_UNION); we should count it in our
4257 * calculation. See its definition in the header file.
4258 * 2. Constant j is the counter of the current SGL element
4259 * that will be processed, and (j - 1) is the number of
4260 * SGL elements that have been processed (stored in the
4262 * 3. ChainOffset value should be in units of double-words (4
4263 * bytes) so the last value should be divided by 4.
4265 ddi_put8(acc_hdl
, &frame
->ChainOffset
,
4266 (sizeof (MPI2_SCSI_IO_REQUEST
) -
4267 sizeof (MPI2_SGE_IO_UNION
) +
4268 (j
- 1) * sizeof (MPI2_SGE_SIMPLE64
)) >> 2);
4269 sgechain
= (pMpi2SGEChain64_t
)sge
;
4270 chainflags
= (MPI2_SGE_FLAGS_CHAIN_ELEMENT
|
4271 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
|
4272 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
);
4273 ddi_put8(acc_hdl
, &sgechain
->Flags
, chainflags
);
4276 * The size of the next frame is the accurate size of space
4277 * (in bytes) used to store the SGL elements. j is the counter
4278 * of SGL elements. (j - 1) is the number of SGL elements that
4279 * have been processed (stored in frames).
4282 chainlength
= mpt
->m_req_frame_size
/
4283 sizeof (MPI2_SGE_SIMPLE64
) *
4284 sizeof (MPI2_SGE_SIMPLE64
);
4286 chainlength
= ((cookiec
- (j
- 1)) *
4287 sizeof (MPI2_SGE_SIMPLE64
));
4290 p
= cmd
->cmd_extra_frames
;
4292 ddi_put16(acc_hdl
, &sgechain
->Length
, chainlength
);
4293 ddi_put32(acc_hdl
, &sgechain
->Address
.Low
,
4295 /* SGL is allocated in the first 4G mem range */
4296 ddi_put32(acc_hdl
, &sgechain
->Address
.High
, 0);
4299 * If there are more than 2 frames left we have to
4300 * fill in the next chain offset to the location of
4301 * the chain element in the next frame.
4302 * sgemax is the number of simple elements in an extra
4303 * frame. Note that the value NextChainOffset should be
4304 * in double-words (4 bytes).
4307 ddi_put8(acc_hdl
, &sgechain
->NextChainOffset
,
4308 (sgemax
* sizeof (MPI2_SGE_SIMPLE64
)) >> 2);
4310 ddi_put8(acc_hdl
, &sgechain
->NextChainOffset
, 0);
4314 * Jump to next frame;
4315 * Starting here, chain buffers go into the per command SGL.
4316 * This buffer is allocated when chain buffers are needed.
4318 sge
= (pMpi2SGESimple64_t
)p
->m_frames_addr
;
4322 * Start filling in frames with SGE's. If we
4323 * reach the end of frame and still have SGE's
4324 * to fill we need to add a chain element and
4325 * use another frame. j will be our counter
4326 * for what cookie we are at and i will be
4327 * the total cookiec. k is the current frame
4329 for (k
= 1; k
<= frames
; k
++) {
4330 for (l
= 1; (l
<= (sgemax
+ 1)) && (j
<= i
); j
++, l
++) {
4333 * If we have reached the end of frame
4334 * and we have more SGE's to fill in
4335 * we have to fill the final entry
4336 * with a chain element and then
4337 * continue to the next frame
4339 if ((l
== (sgemax
+ 1)) && (k
!= frames
)) {
4340 sgechain
= (pMpi2SGEChain64_t
)sge
;
4343 MPI2_SGE_FLAGS_CHAIN_ELEMENT
|
4344 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
|
4345 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
);
4346 ddi_put8(p
->m_acc_hdl
,
4347 &sgechain
->Flags
, chainflags
);
4349 * k is the frame counter and (k + 1)
4350 * is the number of the next frame.
4351 * Note that frames are in contiguous
4354 ddi_put32(p
->m_acc_hdl
,
4355 &sgechain
->Address
.Low
,
4357 (mpt
->m_req_frame_size
* k
)));
4358 ddi_put32(p
->m_acc_hdl
,
4359 &sgechain
->Address
.High
, 0);
4362 * If there are more than 2 frames left
4363 * we have to next chain offset to
4364 * the location of the chain element
4365 * in the next frame and fill in the
4366 * length of the next chain
4368 if ((frames
- k
) >= 2) {
4369 ddi_put8(p
->m_acc_hdl
,
4370 &sgechain
->NextChainOffset
,
4372 sizeof (MPI2_SGE_SIMPLE64
))
4374 ddi_put16(p
->m_acc_hdl
,
4376 mpt
->m_req_frame_size
/
4377 sizeof (MPI2_SGE_SIMPLE64
) *
4378 sizeof (MPI2_SGE_SIMPLE64
));
4381 * This is the last frame. Set
4382 * the NextChainOffset to 0 and
4383 * Length is the total size of
4384 * all remaining simple elements
4386 ddi_put8(p
->m_acc_hdl
,
4387 &sgechain
->NextChainOffset
,
4389 ddi_put16(p
->m_acc_hdl
,
4392 sizeof (MPI2_SGE_SIMPLE64
));
4395 /* Jump to the next frame */
4396 sge
= (pMpi2SGESimple64_t
)
4397 ((char *)p
->m_frames_addr
+
4398 (int)mpt
->m_req_frame_size
* k
);
4403 ddi_put32(p
->m_acc_hdl
,
4405 dmap
->addr
.address64
.Low
);
4406 ddi_put32(p
->m_acc_hdl
,
4408 dmap
->addr
.address64
.High
);
4409 ddi_put32(p
->m_acc_hdl
,
4410 &sge
->FlagsLength
, dmap
->count
);
4411 flags
= ddi_get32(p
->m_acc_hdl
,
4413 flags
|= ((uint32_t)(
4414 MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
4415 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
|
4416 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
) <<
4417 MPI2_SGE_FLAGS_SHIFT
);
4420 * If we are at the end of the frame and
4421 * there is another frame to fill in
4422 * we set the last simple element as last
4425 if ((l
== sgemax
) && (k
!= frames
)) {
4426 flags
|= ((uint32_t)
4427 (MPI2_SGE_FLAGS_LAST_ELEMENT
) <<
4428 MPI2_SGE_FLAGS_SHIFT
);
4432 * If this is the final cookie we
4433 * indicate it by setting the flags
4436 flags
|= ((uint32_t)
4437 (MPI2_SGE_FLAGS_LAST_ELEMENT
|
4438 MPI2_SGE_FLAGS_END_OF_BUFFER
|
4439 MPI2_SGE_FLAGS_END_OF_LIST
) <<
4440 MPI2_SGE_FLAGS_SHIFT
);
4442 if (cmd
->cmd_flags
& CFLAG_DMASEND
) {
4444 (MPI2_SGE_FLAGS_HOST_TO_IOC
<<
4445 MPI2_SGE_FLAGS_SHIFT
);
4448 (MPI2_SGE_FLAGS_IOC_TO_HOST
<<
4449 MPI2_SGE_FLAGS_SHIFT
);
4451 ddi_put32(p
->m_acc_hdl
,
4452 &sge
->FlagsLength
, flags
);
4459 * Sync DMA with the chain buffers that were just created
4461 (void) ddi_dma_sync(p
->m_dma_hdl
, 0, 0, DDI_DMA_SYNC_FORDEV
);
4466 * Interrupt handling
4467 * Utility routine. Poll for status of a command sent to HBA
4468 * without interrupts (a FLAG_NOINTR command).
4471 mptsas_poll(mptsas_t
*mpt
, mptsas_cmd_t
*poll_cmd
, int polltime
)
4475 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd
));
4478 * In order to avoid using m_mutex in ISR(a new separate mutex
4479 * m_intr_mutex is introduced) and keep the same lock logic,
4480 * the m_intr_mutex should be used to protect the getting and
4481 * setting of the ReplyDescriptorIndex.
4483 * Since the m_intr_mutex would be released during processing the poll
4484 * cmd, so we should set the poll flag earlier here to make sure the
4485 * polled cmd be handled in this thread/context. A side effect is other
4486 * cmds during the period between the flag set and reset are also
4487 * handled in this thread and not the ISR. Since the poll cmd is not
4488 * so common, so the performance degradation in this case is not a big
4491 mutex_enter(&mpt
->m_intr_mutex
);
4492 mpt
->m_polled_intr
= 1;
4493 mutex_exit(&mpt
->m_intr_mutex
);
4495 if ((poll_cmd
->cmd_flags
& CFLAG_TM_CMD
) == 0) {
4496 mptsas_restart_hba(mpt
);
4500 * Wait, using drv_usecwait(), long enough for the command to
4501 * reasonably return from the target if the target isn't
4502 * "dead". A polled command may well be sent from scsi_poll, and
4503 * there are retries built in to scsi_poll if the transport
4504 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4505 * and retries the transport up to scsi_poll_busycnt times
4507 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4508 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4510 * limit the waiting to avoid a hang in the event that the
4511 * cmd never gets started but we are still receiving interrupts
4513 while (!(poll_cmd
->cmd_flags
& CFLAG_FINISHED
)) {
4514 if (mptsas_wait_intr(mpt
, polltime
) == FALSE
) {
4515 NDBG5(("mptsas_poll: command incomplete"));
4521 mutex_enter(&mpt
->m_intr_mutex
);
4522 mpt
->m_polled_intr
= 0;
4523 mutex_exit(&mpt
->m_intr_mutex
);
4525 if (rval
== FALSE
) {
4528 * this isn't supposed to happen, the hba must be wedged
4529 * Mark this cmd as a timeout.
4531 mptsas_set_pkt_reason(mpt
, poll_cmd
, CMD_TIMEOUT
,
4532 (STAT_TIMEOUT
|STAT_ABORTED
));
4534 if (poll_cmd
->cmd_queued
== FALSE
) {
4536 NDBG5(("mptsas_poll: not on waitq"));
4538 poll_cmd
->cmd_pkt
->pkt_state
|=
4539 (STATE_GOT_BUS
|STATE_GOT_TARGET
|STATE_SENT_CMD
);
4542 /* find and remove it from the waitq */
4543 NDBG5(("mptsas_poll: delete from waitq"));
4544 mptsas_waitq_delete(mpt
, poll_cmd
);
4548 mptsas_fma_check(mpt
, poll_cmd
);
4549 NDBG5(("mptsas_poll: done"));
4554 * Used for polling cmds and TM function
4557 mptsas_wait_intr(mptsas_t
*mpt
, int polltime
)
4560 pMpi2ReplyDescriptorsUnion_t reply_desc_union
;
4561 Mpi2ReplyDescriptorsUnion_t reply_desc_union_v
;
4565 NDBG5(("mptsas_wait_intr"));
4569 * Get the current interrupt mask and disable interrupts. When
4570 * re-enabling ints, set mask to saved value.
4572 int_mask
= ddi_get32(mpt
->m_datap
, &mpt
->m_reg
->HostInterruptMask
);
4573 MPTSAS_DISABLE_INTR(mpt
);
4576 * Keep polling for at least (polltime * 1000) seconds
4578 for (cnt
= 0; cnt
< polltime
; cnt
++) {
4579 mutex_enter(&mpt
->m_intr_mutex
);
4580 (void) ddi_dma_sync(mpt
->m_dma_post_queue_hdl
, 0, 0,
4581 DDI_DMA_SYNC_FORCPU
);
4583 reply_desc_union
= (pMpi2ReplyDescriptorsUnion_t
)
4584 MPTSAS_GET_NEXT_REPLY(mpt
, mpt
->m_post_index
);
4586 if (ddi_get32(mpt
->m_acc_post_queue_hdl
,
4587 &reply_desc_union
->Words
.Low
) == 0xFFFFFFFF ||
4588 ddi_get32(mpt
->m_acc_post_queue_hdl
,
4589 &reply_desc_union
->Words
.High
) == 0xFFFFFFFF) {
4590 mutex_exit(&mpt
->m_intr_mutex
);
4595 reply_type
= ddi_get8(mpt
->m_acc_post_queue_hdl
,
4596 &reply_desc_union
->Default
.ReplyFlags
);
4597 reply_type
&= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
4598 reply_desc_union_v
.Default
.ReplyFlags
= reply_type
;
4599 if (reply_type
== MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS
) {
4600 reply_desc_union_v
.SCSIIOSuccess
.SMID
=
4601 ddi_get16(mpt
->m_acc_post_queue_hdl
,
4602 &reply_desc_union
->SCSIIOSuccess
.SMID
);
4603 } else if (reply_type
==
4604 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY
) {
4605 reply_desc_union_v
.AddressReply
.ReplyFrameAddress
=
4606 ddi_get32(mpt
->m_acc_post_queue_hdl
,
4607 &reply_desc_union
->AddressReply
.ReplyFrameAddress
);
4608 reply_desc_union_v
.AddressReply
.SMID
=
4609 ddi_get16(mpt
->m_acc_post_queue_hdl
,
4610 &reply_desc_union
->AddressReply
.SMID
);
4613 * Clear the reply descriptor for re-use and increment
4616 ddi_put64(mpt
->m_acc_post_queue_hdl
,
4617 &((uint64_t *)(void *)mpt
->m_post_queue
)[mpt
->m_post_index
],
4618 0xFFFFFFFFFFFFFFFF);
4619 (void) ddi_dma_sync(mpt
->m_dma_post_queue_hdl
, 0, 0,
4620 DDI_DMA_SYNC_FORDEV
);
4622 if (++mpt
->m_post_index
== mpt
->m_post_queue_depth
) {
4623 mpt
->m_post_index
= 0;
4627 * Update the global reply index
4629 ddi_put32(mpt
->m_datap
,
4630 &mpt
->m_reg
->ReplyPostHostIndex
, mpt
->m_post_index
);
4631 mutex_exit(&mpt
->m_intr_mutex
);
4634 * The reply is valid, process it according to its
4637 mptsas_process_intr(mpt
, &reply_desc_union_v
);
4641 * Re-enable interrupts and quit.
4643 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->HostInterruptMask
,
4650 * Clear polling flag, re-enable interrupts and quit.
4652 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->HostInterruptMask
, int_mask
);
4657 * For fastpath, the m_intr_mutex should be held from the begining to the end,
4658 * so we only treat those cmds that need not release m_intr_mutex(even just for
4659 * a moment) as candidate for fast processing. otherwise, we don't handle them
4660 * and just return, then in ISR, those cmds would be handled later with m_mutex
4661 * held and m_intr_mutex not held.
4664 mptsas_handle_io_fastpath(mptsas_t
*mpt
,
4667 mptsas_slots_t
*slots
= mpt
->m_active
;
4668 mptsas_cmd_t
*cmd
= NULL
;
4669 struct scsi_pkt
*pkt
;
4672 * This is a success reply so just complete the IO. First, do a sanity
4673 * check on the SMID. The final slot is used for TM requests, which
4674 * would not come into this reply handler.
4676 if ((SMID
== 0) || (SMID
> slots
->m_n_slots
)) {
4677 mptsas_log(mpt
, CE_WARN
, "?Received invalid SMID of %d\n",
4679 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
4683 cmd
= slots
->m_slot
[SMID
];
4686 * print warning and return if the slot is empty
4689 mptsas_log(mpt
, CE_WARN
, "?NULL command for successful SCSI IO "
4690 "in slot %d", SMID
);
4695 pkt
->pkt_state
|= (STATE_GOT_BUS
| STATE_GOT_TARGET
| STATE_SENT_CMD
|
4697 if (cmd
->cmd_flags
& CFLAG_DMAVALID
) {
4698 pkt
->pkt_state
|= STATE_XFERRED_DATA
;
4703 * If the cmd is a IOC, or a passthrough, then we don't process it in
4704 * fastpath, and later it would be handled by mptsas_process_intr()
4705 * with m_mutex protected.
4707 if (cmd
->cmd_flags
& (CFLAG_PASSTHRU
| CFLAG_CMDIOC
)) {
4710 mptsas_remove_cmd0(mpt
, cmd
);
4713 if (cmd
->cmd_flags
& CFLAG_RETRY
) {
4715 * The target returned QFULL or busy, do not add tihs
4716 * pkt to the doneq since the hba will retry
4719 * The pkt has already been resubmitted in
4720 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4721 * Remove this cmd_flag here.
4723 cmd
->cmd_flags
&= ~CFLAG_RETRY
;
4725 mptsas_doneq_add0(mpt
, cmd
);
4729 * In fastpath, the cmd should only be a context reply, so just check
4730 * the post queue of the reply descriptor and the dmahandle of the cmd
4731 * is enough. No sense data in this case and no need to check the dma
4732 * handle where sense data dma info is saved, the dma handle of the
4733 * reply frame, and the dma handle of the reply free queue.
4734 * For the dma handle of the request queue. Check fma here since we
4735 * are sure the request must have already been sent/DMAed correctly.
4736 * otherwise checking in mptsas_scsi_start() is not correct since
4737 * at that time the dma may not start.
4739 if ((mptsas_check_dma_handle(mpt
->m_dma_req_frame_hdl
) !=
4741 (mptsas_check_dma_handle(mpt
->m_dma_post_queue_hdl
) !=
4743 ddi_fm_service_impact(mpt
->m_dip
,
4744 DDI_SERVICE_UNAFFECTED
);
4745 pkt
->pkt_reason
= CMD_TRAN_ERR
;
4746 pkt
->pkt_statistics
= 0;
4748 if (cmd
->cmd_dmahandle
&&
4749 (mptsas_check_dma_handle(cmd
->cmd_dmahandle
) != DDI_SUCCESS
)) {
4750 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
4751 pkt
->pkt_reason
= CMD_TRAN_ERR
;
4752 pkt
->pkt_statistics
= 0;
4754 if ((cmd
->cmd_extra_frames
&&
4755 ((mptsas_check_dma_handle(cmd
->cmd_extra_frames
->m_dma_hdl
) !=
4757 (mptsas_check_acc_handle(cmd
->cmd_extra_frames
->m_acc_hdl
) !=
4759 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
4760 pkt
->pkt_reason
= CMD_TRAN_ERR
;
4761 pkt
->pkt_statistics
= 0;
4768 mptsas_handle_scsi_io_success(mptsas_t
*mpt
,
4769 pMpi2ReplyDescriptorsUnion_t reply_desc
)
4771 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success
;
4773 mptsas_slots_t
*slots
= mpt
->m_active
;
4774 mptsas_cmd_t
*cmd
= NULL
;
4775 struct scsi_pkt
*pkt
;
4777 scsi_io_success
= (pMpi2SCSIIOSuccessReplyDescriptor_t
)reply_desc
;
4778 SMID
= scsi_io_success
->SMID
;
4781 * This is a success reply so just complete the IO. First, do a sanity
4782 * check on the SMID. The final slot is used for TM requests, which
4783 * would not come into this reply handler.
4785 if ((SMID
== 0) || (SMID
> slots
->m_n_slots
)) {
4786 mptsas_log(mpt
, CE_WARN
, "?Received invalid SMID of %d\n",
4788 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
4792 cmd
= slots
->m_slot
[SMID
];
4795 * print warning and return if the slot is empty
4798 mptsas_log(mpt
, CE_WARN
, "?NULL command for successful SCSI IO "
4799 "in slot %d", SMID
);
4804 pkt
->pkt_state
|= (STATE_GOT_BUS
| STATE_GOT_TARGET
| STATE_SENT_CMD
|
4806 if (cmd
->cmd_flags
& CFLAG_DMAVALID
) {
4807 pkt
->pkt_state
|= STATE_XFERRED_DATA
;
4811 if (cmd
->cmd_flags
& CFLAG_PASSTHRU
) {
4812 cmd
->cmd_flags
|= CFLAG_FINISHED
;
4813 cv_broadcast(&mpt
->m_passthru_cv
);
4816 mptsas_remove_cmd(mpt
, cmd
);
4819 if (cmd
->cmd_flags
& CFLAG_RETRY
) {
4821 * The target returned QFULL or busy, do not add tihs
4822 * pkt to the doneq since the hba will retry
4825 * The pkt has already been resubmitted in
4826 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4827 * Remove this cmd_flag here.
4829 cmd
->cmd_flags
&= ~CFLAG_RETRY
;
4831 mptsas_doneq_add(mpt
, cmd
);
4836 mptsas_handle_address_reply(mptsas_t
*mpt
,
4837 pMpi2ReplyDescriptorsUnion_t reply_desc
)
4839 pMpi2AddressReplyDescriptor_t address_reply
;
4840 pMPI2DefaultReply_t reply
;
4841 mptsas_fw_diagnostic_buffer_t
*pBuffer
;
4842 uint32_t reply_addr
;
4843 uint16_t SMID
, iocstatus
;
4844 mptsas_slots_t
*slots
= mpt
->m_active
;
4845 mptsas_cmd_t
*cmd
= NULL
;
4846 uint8_t function
, buffer_type
;
4847 m_replyh_arg_t
*args
;
4850 ASSERT(mutex_owned(&mpt
->m_mutex
));
4852 address_reply
= (pMpi2AddressReplyDescriptor_t
)reply_desc
;
4854 reply_addr
= address_reply
->ReplyFrameAddress
;
4855 SMID
= address_reply
->SMID
;
4857 * If reply frame is not in the proper range we should ignore this
4858 * message and exit the interrupt handler.
4860 if ((reply_addr
< mpt
->m_reply_frame_dma_addr
) ||
4861 (reply_addr
>= (mpt
->m_reply_frame_dma_addr
+
4862 (mpt
->m_reply_frame_size
* mpt
->m_max_replies
))) ||
4863 ((reply_addr
- mpt
->m_reply_frame_dma_addr
) %
4864 mpt
->m_reply_frame_size
!= 0)) {
4865 mptsas_log(mpt
, CE_WARN
, "?Received invalid reply frame "
4866 "address 0x%x\n", reply_addr
);
4867 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
4871 (void) ddi_dma_sync(mpt
->m_dma_reply_frame_hdl
, 0, 0,
4872 DDI_DMA_SYNC_FORCPU
);
4873 reply
= (pMPI2DefaultReply_t
)(mpt
->m_reply_frame
+ (reply_addr
-
4874 mpt
->m_reply_frame_dma_addr
));
4875 function
= ddi_get8(mpt
->m_acc_reply_frame_hdl
, &reply
->Function
);
4878 * don't get slot information and command for events since these values
4881 if ((function
!= MPI2_FUNCTION_EVENT_NOTIFICATION
) &&
4882 (function
!= MPI2_FUNCTION_DIAG_BUFFER_POST
)) {
4884 * This could be a TM reply, which use the last allocated SMID,
4885 * so allow for that.
4887 if ((SMID
== 0) || (SMID
> (slots
->m_n_slots
+ 1))) {
4888 mptsas_log(mpt
, CE_WARN
, "?Received invalid SMID of "
4890 ddi_fm_service_impact(mpt
->m_dip
,
4891 DDI_SERVICE_UNAFFECTED
);
4895 cmd
= slots
->m_slot
[SMID
];
4898 * print warning and return if the slot is empty
4901 mptsas_log(mpt
, CE_WARN
, "?NULL command for address "
4902 "reply in slot %d", SMID
);
4905 if ((cmd
->cmd_flags
& CFLAG_PASSTHRU
) ||
4906 (cmd
->cmd_flags
& CFLAG_CONFIG
) ||
4907 (cmd
->cmd_flags
& CFLAG_FW_DIAG
)) {
4908 cmd
->cmd_rfm
= reply_addr
;
4909 cmd
->cmd_flags
|= CFLAG_FINISHED
;
4910 cv_broadcast(&mpt
->m_passthru_cv
);
4911 cv_broadcast(&mpt
->m_config_cv
);
4912 cv_broadcast(&mpt
->m_fw_diag_cv
);
4914 } else if (!(cmd
->cmd_flags
& CFLAG_FW_CMD
)) {
4915 mptsas_remove_cmd(mpt
, cmd
);
4917 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID
));
4920 * Depending on the function, we need to handle
4921 * the reply frame (and cmd) differently.
4924 case MPI2_FUNCTION_SCSI_IO_REQUEST
:
4925 mptsas_check_scsi_io_error(mpt
, (pMpi2SCSIIOReply_t
)reply
, cmd
);
4927 case MPI2_FUNCTION_SCSI_TASK_MGMT
:
4928 cmd
->cmd_rfm
= reply_addr
;
4929 mptsas_check_task_mgt(mpt
, (pMpi2SCSIManagementReply_t
)reply
,
4932 case MPI2_FUNCTION_FW_DOWNLOAD
:
4933 cmd
->cmd_flags
|= CFLAG_FINISHED
;
4934 cv_signal(&mpt
->m_fw_cv
);
4936 case MPI2_FUNCTION_EVENT_NOTIFICATION
:
4937 reply_frame_no
= (reply_addr
- mpt
->m_reply_frame_dma_addr
) /
4938 mpt
->m_reply_frame_size
;
4939 args
= &mpt
->m_replyh_args
[reply_frame_no
];
4940 args
->mpt
= (void *)mpt
;
4941 args
->rfm
= reply_addr
;
4944 * Record the event if its type is enabled in
4945 * this mpt instance by ioctl.
4947 mptsas_record_event(args
);
4950 * Handle time critical events
4951 * NOT_RESPONDING/ADDED only now
4953 if (mptsas_handle_event_sync(args
) == DDI_SUCCESS
) {
4955 * Would not return main process,
4956 * just let taskq resolve ack action
4957 * and ack would be sent in taskq thread
4959 NDBG20(("send mptsas_handle_event_sync success"));
4961 if ((ddi_taskq_dispatch(mpt
->m_event_taskq
, mptsas_handle_event
,
4962 (void *)args
, DDI_NOSLEEP
)) != DDI_SUCCESS
) {
4963 mptsas_log(mpt
, CE_WARN
, "No memory available"
4964 "for dispatch taskq");
4966 * Return the reply frame to the free queue.
4968 ddi_put32(mpt
->m_acc_free_queue_hdl
,
4969 &((uint32_t *)(void *)
4970 mpt
->m_free_queue
)[mpt
->m_free_index
], reply_addr
);
4971 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
4972 DDI_DMA_SYNC_FORDEV
);
4973 if (++mpt
->m_free_index
== mpt
->m_free_queue_depth
) {
4974 mpt
->m_free_index
= 0;
4977 ddi_put32(mpt
->m_datap
,
4978 &mpt
->m_reg
->ReplyFreeHostIndex
, mpt
->m_free_index
);
4981 case MPI2_FUNCTION_DIAG_BUFFER_POST
:
4983 * If SMID is 0, this implies that the reply is due to a
4984 * release function with a status that the buffer has been
4985 * released. Set the buffer flags accordingly.
4988 iocstatus
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
4990 buffer_type
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
4991 &(((pMpi2DiagBufferPostReply_t
)reply
)->BufferType
));
4992 if (iocstatus
== MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
) {
4994 &mpt
->m_fw_diag_buffer_list
[buffer_type
];
4995 pBuffer
->valid_data
= TRUE
;
4996 pBuffer
->owned_by_firmware
= FALSE
;
4997 pBuffer
->immediate
= FALSE
;
5001 * Normal handling of diag post reply with SMID.
5003 cmd
= slots
->m_slot
[SMID
];
5006 * print warning and return if the slot is empty
5009 mptsas_log(mpt
, CE_WARN
, "?NULL command for "
5010 "address reply in slot %d", SMID
);
5013 cmd
->cmd_rfm
= reply_addr
;
5014 cmd
->cmd_flags
|= CFLAG_FINISHED
;
5015 cv_broadcast(&mpt
->m_fw_diag_cv
);
5019 mptsas_log(mpt
, CE_WARN
, "Unknown function 0x%x ", function
);
5024 * Return the reply frame to the free queue.
5026 ddi_put32(mpt
->m_acc_free_queue_hdl
,
5027 &((uint32_t *)(void *)mpt
->m_free_queue
)[mpt
->m_free_index
],
5029 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
5030 DDI_DMA_SYNC_FORDEV
);
5031 if (++mpt
->m_free_index
== mpt
->m_free_queue_depth
) {
5032 mpt
->m_free_index
= 0;
5034 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->ReplyFreeHostIndex
,
5037 if (cmd
->cmd_flags
& CFLAG_FW_CMD
)
5040 if (cmd
->cmd_flags
& CFLAG_RETRY
) {
5042 * The target returned QFULL or busy, do not add tihs
5043 * pkt to the doneq since the hba will retry
5046 * The pkt has already been resubmitted in
5047 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5048 * Remove this cmd_flag here.
5050 cmd
->cmd_flags
&= ~CFLAG_RETRY
;
5052 mptsas_doneq_add(mpt
, cmd
);
5057 mptsas_check_scsi_io_error(mptsas_t
*mpt
, pMpi2SCSIIOReply_t reply
,
5060 uint8_t scsi_status
, scsi_state
;
5061 uint16_t ioc_status
;
5062 uint32_t xferred
, sensecount
, responsedata
, loginfo
= 0;
5063 struct scsi_pkt
*pkt
;
5064 struct scsi_arq_status
*arqstat
;
5066 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
5067 uint8_t *sensedata
= NULL
;
5069 if ((cmd
->cmd_flags
& (CFLAG_SCBEXTERN
| CFLAG_EXTARQBUFVALID
)) ==
5070 (CFLAG_SCBEXTERN
| CFLAG_EXTARQBUFVALID
)) {
5071 bp
= cmd
->cmd_ext_arq_buf
;
5073 bp
= cmd
->cmd_arq_buf
;
5076 scsi_status
= ddi_get8(mpt
->m_acc_reply_frame_hdl
, &reply
->SCSIStatus
);
5077 ioc_status
= ddi_get16(mpt
->m_acc_reply_frame_hdl
, &reply
->IOCStatus
);
5078 scsi_state
= ddi_get8(mpt
->m_acc_reply_frame_hdl
, &reply
->SCSIState
);
5079 xferred
= ddi_get32(mpt
->m_acc_reply_frame_hdl
, &reply
->TransferCount
);
5080 sensecount
= ddi_get32(mpt
->m_acc_reply_frame_hdl
, &reply
->SenseCount
);
5081 responsedata
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
5082 &reply
->ResponseInfo
);
5084 if (ioc_status
& MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE
) {
5085 loginfo
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
5086 &reply
->IOCLogInfo
);
5087 mptsas_log(mpt
, CE_NOTE
,
5088 "?Log info 0x%x received for target %d.\n"
5089 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5090 loginfo
, Tgt(cmd
), scsi_status
, ioc_status
,
5094 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5095 scsi_status
, ioc_status
, scsi_state
));
5098 *(pkt
->pkt_scbp
) = scsi_status
;
5100 if (loginfo
== 0x31170000) {
5102 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5103 * 0x31170000 comes, that means the device missing delay
5104 * is in progressing, the command need retry later.
5106 *(pkt
->pkt_scbp
) = STATUS_BUSY
;
5110 if ((scsi_state
& MPI2_SCSI_STATE_NO_SCSI_STATUS
) &&
5111 ((ioc_status
& MPI2_IOCSTATUS_MASK
) ==
5112 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE
)) {
5113 pkt
->pkt_reason
= CMD_INCOMPLETE
;
5114 pkt
->pkt_state
|= STATE_GOT_BUS
;
5115 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
5116 if (ptgt
->m_reset_delay
== 0) {
5117 mptsas_set_throttle(mpt
, ptgt
,
5120 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
5124 if (scsi_state
& MPI2_SCSI_STATE_RESPONSE_INFO_VALID
) {
5125 responsedata
&= 0x000000FF;
5126 if (responsedata
& MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF
) {
5127 mptsas_log(mpt
, CE_NOTE
, "Do not support the TLR\n");
5128 pkt
->pkt_reason
= CMD_TLR_OFF
;
5134 switch (scsi_status
) {
5135 case MPI2_SCSI_STATUS_CHECK_CONDITION
:
5136 pkt
->pkt_resid
= (cmd
->cmd_dmacount
- xferred
);
5137 arqstat
= (void*)(pkt
->pkt_scbp
);
5138 arqstat
->sts_rqpkt_status
= *((struct scsi_status
*)
5140 pkt
->pkt_state
|= (STATE_GOT_BUS
| STATE_GOT_TARGET
|
5141 STATE_SENT_CMD
| STATE_GOT_STATUS
| STATE_ARQ_DONE
);
5142 if (cmd
->cmd_flags
& CFLAG_XARQ
) {
5143 pkt
->pkt_state
|= STATE_XARQ_DONE
;
5145 if (pkt
->pkt_resid
!= cmd
->cmd_dmacount
) {
5146 pkt
->pkt_state
|= STATE_XFERRED_DATA
;
5148 arqstat
->sts_rqpkt_reason
= pkt
->pkt_reason
;
5149 arqstat
->sts_rqpkt_state
= pkt
->pkt_state
;
5150 arqstat
->sts_rqpkt_state
|= STATE_XFERRED_DATA
;
5151 arqstat
->sts_rqpkt_statistics
= pkt
->pkt_statistics
;
5152 sensedata
= (uint8_t *)&arqstat
->sts_sensedata
;
5154 bcopy((uchar_t
*)bp
->b_un
.b_addr
, sensedata
,
5155 ((cmd
->cmd_rqslen
>= sensecount
) ? sensecount
:
5157 arqstat
->sts_rqpkt_resid
= (cmd
->cmd_rqslen
- sensecount
);
5158 cmd
->cmd_flags
|= CFLAG_CMDARQ
;
5160 * Set proper status for pkt if autosense was valid
5162 if (scsi_state
& MPI2_SCSI_STATE_AUTOSENSE_VALID
) {
5163 struct scsi_status zero_status
= { 0 };
5164 arqstat
->sts_rqpkt_status
= zero_status
;
5168 * ASC=0x47 is parity error
5169 * ASC=0x48 is initiator detected error received
5171 if ((scsi_sense_key(sensedata
) == KEY_ABORTED_COMMAND
) &&
5172 ((scsi_sense_asc(sensedata
) == 0x47) ||
5173 (scsi_sense_asc(sensedata
) == 0x48))) {
5174 mptsas_log(mpt
, CE_NOTE
, "Aborted_command!");
5178 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5179 * ASC/ASCQ=0x25/0x00 means invalid lun
5181 if (((scsi_sense_key(sensedata
) == KEY_UNIT_ATTENTION
) &&
5182 (scsi_sense_asc(sensedata
) == 0x3F) &&
5183 (scsi_sense_ascq(sensedata
) == 0x0E)) ||
5184 ((scsi_sense_key(sensedata
) == KEY_ILLEGAL_REQUEST
) &&
5185 (scsi_sense_asc(sensedata
) == 0x25) &&
5186 (scsi_sense_ascq(sensedata
) == 0x00))) {
5187 mptsas_topo_change_list_t
*topo_node
= NULL
;
5189 topo_node
= kmem_zalloc(
5190 sizeof (mptsas_topo_change_list_t
),
5192 if (topo_node
== NULL
) {
5193 mptsas_log(mpt
, CE_NOTE
, "No memory"
5194 "resource for handle SAS dynamic"
5198 topo_node
->mpt
= mpt
;
5199 topo_node
->event
= MPTSAS_DR_EVENT_RECONFIG_TARGET
;
5200 topo_node
->un
.phymask
= ptgt
->m_phymask
;
5201 topo_node
->devhdl
= ptgt
->m_devhdl
;
5202 topo_node
->object
= (void *)ptgt
;
5203 topo_node
->flags
= MPTSAS_TOPO_FLAG_LUN_ASSOCIATED
;
5205 if ((ddi_taskq_dispatch(mpt
->m_dr_taskq
,
5208 DDI_NOSLEEP
)) != DDI_SUCCESS
) {
5209 mptsas_log(mpt
, CE_NOTE
, "mptsas start taskq"
5210 "for handle SAS dynamic reconfigure"
5215 case MPI2_SCSI_STATUS_GOOD
:
5216 switch (ioc_status
& MPI2_IOCSTATUS_MASK
) {
5217 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE
:
5218 pkt
->pkt_reason
= CMD_DEV_GONE
;
5219 pkt
->pkt_state
|= STATE_GOT_BUS
;
5220 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
5221 if (ptgt
->m_reset_delay
== 0) {
5222 mptsas_set_throttle(mpt
, ptgt
, DRAIN_THROTTLE
);
5224 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
5225 NDBG31(("lost disk for target%d, command:%x",
5226 Tgt(cmd
), pkt
->pkt_cdbp
[0]));
5228 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN
:
5229 NDBG31(("data overrun: xferred=%d", xferred
));
5230 NDBG31(("dmacount=%d", cmd
->cmd_dmacount
));
5231 pkt
->pkt_reason
= CMD_DATA_OVR
;
5232 pkt
->pkt_state
|= (STATE_GOT_BUS
| STATE_GOT_TARGET
5233 | STATE_SENT_CMD
| STATE_GOT_STATUS
5234 | STATE_XFERRED_DATA
);
5237 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH
:
5238 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN
:
5239 NDBG31(("data underrun: xferred=%d", xferred
));
5240 NDBG31(("dmacount=%d", cmd
->cmd_dmacount
));
5241 pkt
->pkt_state
|= (STATE_GOT_BUS
| STATE_GOT_TARGET
5242 | STATE_SENT_CMD
| STATE_GOT_STATUS
);
5243 pkt
->pkt_resid
= (cmd
->cmd_dmacount
- xferred
);
5244 if (pkt
->pkt_resid
!= cmd
->cmd_dmacount
) {
5245 pkt
->pkt_state
|= STATE_XFERRED_DATA
;
5248 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED
:
5249 mptsas_set_pkt_reason(mpt
,
5250 cmd
, CMD_RESET
, STAT_BUS_RESET
);
5252 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED
:
5253 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED
:
5254 mptsas_set_pkt_reason(mpt
,
5255 cmd
, CMD_RESET
, STAT_DEV_RESET
);
5257 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR
:
5258 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR
:
5259 pkt
->pkt_state
|= (STATE_GOT_BUS
| STATE_GOT_TARGET
);
5260 mptsas_set_pkt_reason(mpt
,
5261 cmd
, CMD_TERMINATED
, STAT_TERMINATED
);
5263 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES
:
5264 case MPI2_IOCSTATUS_BUSY
:
5266 * set throttles to drain
5268 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
5269 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_FIRST
);
5270 while (ptgt
!= NULL
) {
5271 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
5272 mptsas_set_throttle(mpt
, ptgt
, DRAIN_THROTTLE
);
5273 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
5275 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
5276 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
5282 cmd
->cmd_flags
|= CFLAG_RETRY
;
5283 cmd
->cmd_pkt_flags
|= FLAG_HEAD
;
5285 mutex_exit(&mpt
->m_mutex
);
5286 (void) mptsas_accept_pkt(mpt
, cmd
);
5287 mutex_enter(&mpt
->m_mutex
);
5290 mptsas_log(mpt
, CE_WARN
,
5291 "unknown ioc_status = %x\n", ioc_status
);
5292 mptsas_log(mpt
, CE_CONT
, "scsi_state = %x, transfer "
5293 "count = %x, scsi_status = %x", scsi_state
,
5294 xferred
, scsi_status
);
5298 case MPI2_SCSI_STATUS_TASK_SET_FULL
:
5299 mptsas_handle_qfull(mpt
, cmd
);
5301 case MPI2_SCSI_STATUS_BUSY
:
5302 NDBG31(("scsi_status busy received"));
5304 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT
:
5305 NDBG31(("scsi_status reservation conflict received"));
5308 mptsas_log(mpt
, CE_WARN
, "scsi_status=%x, ioc_status=%x\n",
5309 scsi_status
, ioc_status
);
5310 mptsas_log(mpt
, CE_WARN
,
5311 "mptsas_process_intr: invalid scsi status\n");
5317 mptsas_check_task_mgt(mptsas_t
*mpt
, pMpi2SCSIManagementReply_t reply
,
5321 uint16_t ioc_status
;
5323 uint16_t dev_handle
;
5324 struct scsi_pkt
*pkt
= CMD2PKT(cmd
);
5326 task_type
= ddi_get8(mpt
->m_acc_reply_frame_hdl
, &reply
->TaskType
);
5327 ioc_status
= ddi_get16(mpt
->m_acc_reply_frame_hdl
, &reply
->IOCStatus
);
5328 log_info
= ddi_get32(mpt
->m_acc_reply_frame_hdl
, &reply
->IOCLogInfo
);
5329 dev_handle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
, &reply
->DevHandle
);
5331 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
5332 mptsas_log(mpt
, CE_WARN
, "mptsas_check_task_mgt: Task 0x%x "
5333 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5334 task_type
, ioc_status
, log_info
, dev_handle
);
5335 pkt
->pkt_reason
= CMD_INCOMPLETE
;
5339 switch (task_type
) {
5340 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK
:
5341 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET
:
5342 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK
:
5343 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA
:
5344 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET
:
5345 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION
:
5347 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET
:
5348 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET
:
5349 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
5351 * Check for invalid DevHandle of 0 in case application
5352 * sends bad command. DevHandle of 0 could cause problems.
5354 if (dev_handle
== 0) {
5355 mptsas_log(mpt
, CE_WARN
, "!Can't flush target with"
5356 " DevHandle of 0.");
5358 mptsas_flush_target(mpt
, dev_handle
, Lun(cmd
),
5363 mptsas_log(mpt
, CE_WARN
, "Unknown task management type %d.",
5365 mptsas_log(mpt
, CE_WARN
, "ioc status = %x", ioc_status
);
5371 mptsas_doneq_thread(mptsas_doneq_thread_arg_t
*arg
)
5373 mptsas_t
*mpt
= arg
->mpt
;
5374 uint64_t t
= arg
->t
;
5376 struct scsi_pkt
*pkt
;
5377 mptsas_doneq_thread_list_t
*item
= &mpt
->m_doneq_thread_id
[t
];
5379 mutex_enter(&item
->mutex
);
5380 while (item
->flag
& MPTSAS_DONEQ_THREAD_ACTIVE
) {
5382 cv_wait(&item
->cv
, &item
->mutex
);
5385 if ((cmd
= mptsas_doneq_thread_rm(mpt
, t
)) != NULL
) {
5386 cmd
->cmd_flags
|= CFLAG_COMPLETED
;
5389 mutex_exit(&item
->mutex
);
5391 mptsas_pkt_comp(pkt
, cmd
);
5393 mutex_enter(&item
->mutex
);
5395 mutex_exit(&item
->mutex
);
5396 mutex_enter(&mpt
->m_doneq_mutex
);
5397 mpt
->m_doneq_thread_n
--;
5398 cv_broadcast(&mpt
->m_doneq_thread_cv
);
5399 mutex_exit(&mpt
->m_doneq_mutex
);
5403 * mpt interrupt handler.
5406 mptsas_intr(caddr_t arg1
, caddr_t arg2
)
5408 mptsas_t
*mpt
= (void *)arg1
;
5409 pMpi2ReplyDescriptorsUnion_t reply_desc_union
;
5410 uchar_t did_reply
= FALSE
;
5415 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1
, (void *)arg2
));
5419 * To avoid using m_mutex in the ISR(ISR referes not only mptsas_intr,
5420 * but all of the recursive called functions in it. the same below),
5421 * separate mutexs are introduced to protect the elements shown in ISR.
5422 * 3 type of mutex are involved here:
5423 * a)per instance mutex m_intr_mutex.
5424 * b)per target mutex m_tgt_intr_mutex.
5425 * c)mutex that protect the free slot.
5427 * a)per instance mutex m_intr_mutex:
5428 * used to protect m_options, m_power, m_waitq, etc that would be
5429 * checked/modified in ISR; protect the getting and setting the reply
5430 * descriptor index; protect the m_slots[];
5432 * b)per target mutex m_tgt_intr_mutex:
5433 * used to protect per target element which has relationship to ISR.
5434 * contention for the new per target mutex is just as high as it in
5437 * c)mutexs that protect the free slots:
5438 * those mutexs are introduced to minimize the mutex contentions
5439 * between the IO request threads where free slots are allocated
5440 * for sending cmds and ISR where slots holding outstanding cmds
5441 * are returned to the free pool.
5442 * the idea is like this:
5443 * 1) Partition all of the free slot into NCPU groups. For example,
5444 * In system where we have 15 slots, and 4 CPU, then slot s1,s5,s9,s13
5445 * are marked belonging to CPU1, s2,s6,s10,s14 to CPU2, s3,s7,s11,s15
5446 * to CPU3, and s4,s8,s12 to CPU4.
5447 * 2) In each of the group, an alloc/release queue pair is created,
5448 * and both the allocq and the releaseq have a dedicated mutex.
5449 * 3) When init, all of the slots in a CPU group are inserted into the
5450 * allocq of its CPU's pair.
5452 * mptsas_scsi_start()
5454 * cpuid = the cpu NO of the cpu where this thread is running on
5456 * mutex_enter(&allocq[cpuid]);
5457 * if (get free slot = success) {
5458 * remove the slot from the allocq
5459 * mutex_exit(&allocq[cpuid]);
5461 * } else { // exchange allocq and releaseq and try again
5462 * mutex_enter(&releq[cpuid]);
5463 * exchange the allocq and releaseq of this pair;
5464 * mutex_exit(&releq[cpuid]);
5465 * if (try to get free slot again = success) {
5466 * remove the slot from the allocq
5467 * mutex_exit(&allocq[cpuid]);
5472 * if (all CPU groups tried)
5473 * mutex_exit(&allocq[cpuid]);
5480 * cpuid = the CPU group id where the slot sending the
5482 * mutex_enter(&releq[cpuid]);
5483 * remove the slot from the releaseq
5484 * mutex_exit(&releq[cpuid]);
5486 * This way, only when the queue pair doing exchange have mutex
5489 * For mutex m_intr_mutex and m_tgt_intr_mutex, there are 2 scenarios:
5491 * a)If the elements are only checked but not modified in the ISR, then
5492 * only the places where those elements are modifed(outside of ISR)
5493 * need to be protected by the new introduced mutex.
5494 * For example, data A is only read/checked in ISR, then we need do
5498 * mutex_enter(&new_mutex);
5500 * mutex_exit(&new_mutex);
5501 * //the new_mutex here is either the m_tgt_intr_mutex or
5502 * //the m_intr_mutex.
5506 * mutex_enter(&m_mutex); //the stock driver already did this
5507 * mutex_enter(&new_mutex);
5509 * mutex_exit(&new_mutex);
5510 * mutex_exit(&m_mutex); //the stock driver already did this
5513 * // read(A) in non-ISR is not required to be protected by new
5514 * // mutex since 'A' has already been protected by m_mutex
5515 * // outside of the ISR
5518 * Those fields in mptsas_target_t/ptgt which are only read in ISR
5519 * fall into this catergory. So they, together with the fields which
5520 * are never read in ISR, are not necessary to be protected by
5521 * m_tgt_intr_mutex, don't bother.
5522 * checking of m_waitq also falls into this catergory. so all of the
5523 * place outside of ISR where the m_waitq is modified, such as in
5524 * mptsas_waitq_add(), mptsas_waitq_delete(), mptsas_waitq_rm(),
5525 * m_intr_mutex should be used.
5527 * b)If the elements are modified in the ISR, then each place where
5528 * those elements are referred(outside of ISR) need to be protected
5529 * by the new introduced mutex. Of course, if those elements only
5530 * appear in the non-key code path, that is, they don't affect
5531 * performance, then the m_mutex can still be used as before.
5532 * For example, data B is modified in key code path in ISR, and data C
5533 * is modified in non-key code path in ISR, then we can do like this:
5536 * mutex_enter(&new_mutex);
5538 * mutex_exit(&new_mutex);
5539 * if (seldom happen) {
5540 * mutex_enter(&m_mutex);
5542 * mutex_exit(&m_mutex);
5544 * //the new_mutex here is either the m_tgt_intr_mutex or
5545 * //the m_intr_mutex.
5549 * mutex_enter(&new_mutex);
5551 * mutex_exit(&new_mutex);
5553 * mutex_enter(&new_mutex);
5555 * mutex_exit(&new_mutex);
5556 * // both write(B) and read(B) in non-ISR is required to be
5557 * // protected by new mutex outside of the ISR
5559 * mutex_enter(&m_mutex); //the stock driver already did this
5562 * mutex_exit(&m_mutex); //the stock driver already did this
5563 * // both write(C) and read(C) in non-ISR have been already
5564 * // been protected by m_mutex outside of the ISR
5567 * For example, ptgt->m_t_ncmds fall into 'B' of this catergory, and
5568 * elements shown in address reply, restart_hba, passthrough, IOC
5569 * fall into 'C' of this catergory.
5571 * In any case where mutexs are nested, make sure in the following
5573 * m_mutex -> m_intr_mutex -> m_tgt_intr_mutex
5574 * m_intr_mutex -> m_tgt_intr_mutex
5575 * m_mutex -> m_intr_mutex
5576 * m_mutex -> m_tgt_intr_mutex
5579 * Make sure at any time, getting the ReplyDescriptor by m_post_index
5580 * and setting m_post_index to the ReplyDescriptorIndex register are
5581 * atomic. Since m_mutex is not used for this purpose in ISR, the new
5582 * mutex m_intr_mutex must play this role. So mptsas_poll(), where this
5583 * kind of getting/setting is also performed, must use m_intr_mutex.
5584 * Note, since context reply in ISR/process_intr is the only code path
5585 * which affect performance, a fast path is introduced to only handle
5586 * the read/write IO having context reply. For other IOs such as
5587 * passthrough and IOC with context reply and all address reply, we
5588 * use the as-is process_intr() to handle them. In order to keep the
5589 * same semantics in process_intr(), make sure any new mutex is not held
5590 * before enterring it.
5593 mutex_enter(&mpt
->m_intr_mutex
);
5596 * If interrupts are shared by two channels then check whether this
5597 * interrupt is genuinely for this channel by making sure first the
5598 * chip is in high power state.
5600 if ((mpt
->m_options
& MPTSAS_OPT_PM
) &&
5601 (mpt
->m_power_level
!= PM_LEVEL_D0
)) {
5602 mutex_exit(&mpt
->m_intr_mutex
);
5603 return (DDI_INTR_UNCLAIMED
);
5607 * If polling, interrupt was triggered by some shared interrupt because
5608 * IOC interrupts are disabled during polling, so polling routine will
5609 * handle any replies. Considering this, if polling is happening,
5610 * return with interrupt unclaimed.
5612 if (mpt
->m_polled_intr
) {
5613 mutex_exit(&mpt
->m_intr_mutex
);
5614 mptsas_log(mpt
, CE_WARN
, "mpt_sas: Unclaimed interrupt");
5615 return (DDI_INTR_UNCLAIMED
);
5619 * Read the istat register.
5621 if ((INTPENDING(mpt
)) != 0) {
5623 * read fifo until empty.
5629 (void) ddi_dma_sync(mpt
->m_dma_post_queue_hdl
, 0, 0,
5630 DDI_DMA_SYNC_FORCPU
);
5631 reply_desc_union
= (pMpi2ReplyDescriptorsUnion_t
)
5632 MPTSAS_GET_NEXT_REPLY(mpt
, mpt
->m_post_index
);
5634 if (ddi_get32(mpt
->m_acc_post_queue_hdl
,
5635 &reply_desc_union
->Words
.Low
) == 0xFFFFFFFF ||
5636 ddi_get32(mpt
->m_acc_post_queue_hdl
,
5637 &reply_desc_union
->Words
.High
) == 0xFFFFFFFF) {
5642 * The reply is valid, process it according to its
5643 * type. Also, set a flag for updating the reply index
5644 * after they've all been processed.
5648 reply_type
= ddi_get8(mpt
->m_acc_post_queue_hdl
,
5649 &reply_desc_union
->Default
.ReplyFlags
);
5650 reply_type
&= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
5651 mpt
->m_reply
[i
].Default
.ReplyFlags
= reply_type
;
5653 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS
) {
5654 SMID
= ddi_get16(mpt
->m_acc_post_queue_hdl
,
5655 &reply_desc_union
->SCSIIOSuccess
.SMID
);
5656 if (mptsas_handle_io_fastpath(mpt
, SMID
) !=
5658 mpt
->m_reply
[i
].SCSIIOSuccess
.SMID
=
5662 } else if (reply_type
==
5663 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY
) {
5664 mpt
->m_reply
[i
].AddressReply
.ReplyFrameAddress
=
5665 ddi_get32(mpt
->m_acc_post_queue_hdl
,
5666 &reply_desc_union
->AddressReply
.
5668 mpt
->m_reply
[i
].AddressReply
.SMID
=
5669 ddi_get16(mpt
->m_acc_post_queue_hdl
,
5670 &reply_desc_union
->AddressReply
.SMID
);
5674 * Clear the reply descriptor for re-use and increment
5677 ddi_put64(mpt
->m_acc_post_queue_hdl
,
5678 &((uint64_t *)(void *)mpt
->m_post_queue
)
5679 [mpt
->m_post_index
], 0xFFFFFFFFFFFFFFFF);
5680 (void) ddi_dma_sync(mpt
->m_dma_post_queue_hdl
, 0, 0,
5681 DDI_DMA_SYNC_FORDEV
);
5684 * Increment post index and roll over if needed.
5686 if (++mpt
->m_post_index
== mpt
->m_post_queue_depth
) {
5687 mpt
->m_post_index
= 0;
5689 if (i
>= MPI_ADDRESS_COALSCE_MAX
)
5694 * Update the global reply index if at least one reply was
5698 ddi_put32(mpt
->m_datap
,
5699 &mpt
->m_reg
->ReplyPostHostIndex
, mpt
->m_post_index
);
5702 * For fma, only check the PIO is required and enough
5703 * here. Those cases where fastpath is not hit, the
5704 * mptsas_fma_check() check all of the types of
5705 * fma. That is not necessary and sometimes not
5706 * correct. fma check should only be done after
5707 * the PIO and/or dma is performed.
5709 if ((mptsas_check_acc_handle(mpt
->m_datap
) !=
5711 ddi_fm_service_impact(mpt
->m_dip
,
5712 DDI_SERVICE_UNAFFECTED
);
5717 mutex_exit(&mpt
->m_intr_mutex
);
5718 return (DDI_INTR_UNCLAIMED
);
5720 NDBG1(("mptsas_intr complete"));
5721 mutex_exit(&mpt
->m_intr_mutex
);
5724 * Since most of the cmds(read and write IO with success return.)
5725 * have already been processed in fast path in which the m_mutex
5726 * is not held, handling here the address reply and other context reply
5727 * such as passthrough and IOC cmd with m_mutex held should be a big
5728 * issue for performance.
5729 * If holding m_mutex to process these cmds was still an obvious issue,
5730 * we can process them in a taskq.
5732 for (j
= 0; j
< i
; j
++) {
5733 mutex_enter(&mpt
->m_mutex
);
5734 mptsas_process_intr(mpt
, &mpt
->m_reply
[j
]);
5735 mutex_exit(&mpt
->m_mutex
);
5739 * If no helper threads are created, process the doneq in ISR. If
5740 * helpers are created, use the doneq length as a metric to measure the
5741 * load on the interrupt CPU. If it is long enough, which indicates the
5742 * load is heavy, then we deliver the IO completions to the helpers.
5743 * This measurement has some limitations, although it is simple and
5744 * straightforward and works well for most of the cases at present.
5746 if (!mpt
->m_doneq_thread_n
) {
5747 mptsas_doneq_empty(mpt
);
5750 mutex_enter(&mpt
->m_intr_mutex
);
5751 if (mpt
->m_doneq_len
<= mpt
->m_doneq_length_threshold
)
5753 mutex_exit(&mpt
->m_intr_mutex
);
5755 mptsas_deliver_doneq_thread(mpt
);
5757 mptsas_doneq_empty(mpt
);
5762 * If there are queued cmd, start them now.
5764 mutex_enter(&mpt
->m_intr_mutex
);
5765 if (mpt
->m_waitq
!= NULL
) {
5766 mutex_exit(&mpt
->m_intr_mutex
);
5767 mutex_enter(&mpt
->m_mutex
);
5768 mptsas_restart_hba(mpt
);
5769 mutex_exit(&mpt
->m_mutex
);
5770 return (DDI_INTR_CLAIMED
);
5772 mutex_exit(&mpt
->m_intr_mutex
);
5773 return (DDI_INTR_CLAIMED
);
5777 * In ISR, the successfully completed read and write IO are processed in a
5778 * fast path. This function is only used to handle non-fastpath IO, including
5779 * all of the address reply, and the context reply for IOC cmd, passthrough,
5781 * This function is also used to process polled cmd.
5784 mptsas_process_intr(mptsas_t
*mpt
,
5785 pMpi2ReplyDescriptorsUnion_t reply_desc_union
)
5790 * The reply is valid, process it according to its
5791 * type. Also, set a flag for updated the reply index
5792 * after they've all been processed.
5794 reply_type
= reply_desc_union
->Default
.ReplyFlags
;
5795 if (reply_type
== MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS
) {
5796 mptsas_handle_scsi_io_success(mpt
, reply_desc_union
);
5797 } else if (reply_type
== MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY
) {
5798 mptsas_handle_address_reply(mpt
, reply_desc_union
);
5800 mptsas_log(mpt
, CE_WARN
, "?Bad reply type %x", reply_type
);
5801 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
5806 * handle qfull condition
5809 mptsas_handle_qfull(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
5811 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
5813 if ((++cmd
->cmd_qfull_retries
> ptgt
->m_qfull_retries
) ||
5814 (ptgt
->m_qfull_retries
== 0)) {
5816 * We have exhausted the retries on QFULL, or,
5817 * the target driver has indicated that it
5818 * wants to handle QFULL itself by setting
5819 * qfull-retries capability to 0. In either case
5820 * we want the target driver's QFULL handling
5821 * to kick in. We do this by having pkt_reason
5822 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5824 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
5825 mptsas_set_throttle(mpt
, ptgt
, DRAIN_THROTTLE
);
5826 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
5828 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
5829 if (ptgt
->m_reset_delay
== 0) {
5830 ptgt
->m_t_throttle
=
5831 max((ptgt
->m_t_ncmds
- 2), 0);
5833 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
5835 cmd
->cmd_pkt_flags
|= FLAG_HEAD
;
5836 cmd
->cmd_flags
&= ~(CFLAG_TRANFLAG
);
5837 cmd
->cmd_flags
|= CFLAG_RETRY
;
5839 mutex_exit(&mpt
->m_mutex
);
5840 (void) mptsas_accept_pkt(mpt
, cmd
);
5841 mutex_enter(&mpt
->m_mutex
);
5844 * when target gives queue full status with no commands
5845 * outstanding (m_t_ncmds == 0), throttle is set to 0
5846 * (HOLD_THROTTLE), and the queue full handling start
5847 * (see psarc/1994/313); if there are commands outstanding,
5848 * throttle is set to (m_t_ncmds - 2)
5850 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
5851 if (ptgt
->m_t_throttle
== HOLD_THROTTLE
) {
5853 * By setting throttle to QFULL_THROTTLE, we
5854 * avoid submitting new commands and in
5855 * mptsas_restart_cmd find out slots which need
5856 * their throttles to be cleared.
5858 mptsas_set_throttle(mpt
, ptgt
, QFULL_THROTTLE
);
5859 if (mpt
->m_restart_cmd_timeid
== 0) {
5860 mpt
->m_restart_cmd_timeid
=
5861 timeout(mptsas_restart_cmd
, mpt
,
5862 ptgt
->m_qfull_retry_interval
);
5865 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
5870 mptsas_physport_to_phymask(mptsas_t
*mpt
, uint8_t physport
)
5872 mptsas_phymask_t phy_mask
= 0;
5875 NDBG20(("mptsas%d physport_to_phymask enter", mpt
->m_instance
));
5877 ASSERT(mutex_owned(&mpt
->m_mutex
));
5880 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5882 if (physport
== 0xFF) {
5886 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
5887 if (mpt
->m_phy_info
[i
].attached_devhdl
&&
5888 (mpt
->m_phy_info
[i
].phy_mask
!= 0) &&
5889 (mpt
->m_phy_info
[i
].port_num
== physport
)) {
5890 phy_mask
= mpt
->m_phy_info
[i
].phy_mask
;
5894 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5895 mpt
->m_instance
, physport
, phy_mask
));
5900 * mpt free device handle after device gone, by use of passthrough
5903 mptsas_free_devhdl(mptsas_t
*mpt
, uint16_t devhdl
)
5905 Mpi2SasIoUnitControlRequest_t req
;
5906 Mpi2SasIoUnitControlReply_t rep
;
5909 ASSERT(mutex_owned(&mpt
->m_mutex
));
5912 * Need to compose a SAS IO Unit Control request message
5913 * and call mptsas_do_passthru() function
5915 bzero(&req
, sizeof (req
));
5916 bzero(&rep
, sizeof (rep
));
5918 req
.Function
= MPI2_FUNCTION_SAS_IO_UNIT_CONTROL
;
5919 req
.Operation
= MPI2_SAS_OP_REMOVE_DEVICE
;
5920 req
.DevHandle
= LE_16(devhdl
);
5922 ret
= mptsas_do_passthru(mpt
, (uint8_t *)&req
, (uint8_t *)&rep
, NULL
,
5923 sizeof (req
), sizeof (rep
), NULL
, 0, NULL
, 0, 60, FKIOCTL
);
5925 cmn_err(CE_WARN
, "mptsas_free_devhdl: passthru SAS IO Unit "
5926 "Control error %d", ret
);
5927 return (DDI_FAILURE
);
5930 /* do passthrough success, check the ioc status */
5931 if (LE_16(rep
.IOCStatus
) != MPI2_IOCSTATUS_SUCCESS
) {
5932 cmn_err(CE_WARN
, "mptsas_free_devhdl: passthru SAS IO Unit "
5933 "Control IOCStatus %d", LE_16(rep
.IOCStatus
));
5934 return (DDI_FAILURE
);
5937 return (DDI_SUCCESS
);
5941 mptsas_update_phymask(mptsas_t
*mpt
)
5943 mptsas_phymask_t mask
= 0, phy_mask
;
5944 char *phy_mask_name
;
5945 uint8_t current_port
;
5948 NDBG20(("mptsas%d update phymask ", mpt
->m_instance
));
5950 ASSERT(mutex_owned(&mpt
->m_mutex
));
5952 (void) mptsas_get_sas_io_unit_page(mpt
);
5954 phy_mask_name
= kmem_zalloc(MPTSAS_MAX_PHYS
, KM_SLEEP
);
5956 for (i
= 0; i
< mpt
->m_num_phys
; i
++) {
5959 if (mpt
->m_phy_info
[i
].attached_devhdl
== 0)
5962 bzero(phy_mask_name
, sizeof (phy_mask_name
));
5964 current_port
= mpt
->m_phy_info
[i
].port_num
;
5966 if ((mask
& (1 << i
)) != 0)
5969 for (j
= 0; j
< mpt
->m_num_phys
; j
++) {
5970 if (mpt
->m_phy_info
[j
].attached_devhdl
&&
5971 (mpt
->m_phy_info
[j
].port_num
== current_port
)) {
5972 phy_mask
|= (1 << j
);
5975 mask
= mask
| phy_mask
;
5977 for (j
= 0; j
< mpt
->m_num_phys
; j
++) {
5978 if ((phy_mask
>> j
) & 0x01) {
5979 mpt
->m_phy_info
[j
].phy_mask
= phy_mask
;
5983 (void) sprintf(phy_mask_name
, "%x", phy_mask
);
5985 mutex_exit(&mpt
->m_mutex
);
5987 * register a iport, if the port has already been existed
5988 * SCSA will do nothing and just return.
5990 (void) scsi_hba_iport_register(mpt
->m_dip
, phy_mask_name
);
5991 mutex_enter(&mpt
->m_mutex
);
5993 kmem_free(phy_mask_name
, MPTSAS_MAX_PHYS
);
5994 NDBG20(("mptsas%d update phymask return", mpt
->m_instance
));
5998 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5999 * 1. Directly attched Device Added/Removed.
6000 * 2. Expander Device Added/Removed.
6001 * 3. Indirectly Attached Device Added/Expander.
6002 * 4. LUNs of a existing device status change.
6003 * 5. RAID volume created/deleted.
6004 * 6. Member of RAID volume is released because of RAID deletion.
6005 * 7. Physical disks are removed because of RAID creation.
6008 mptsas_handle_dr(void *args
) {
6009 mptsas_topo_change_list_t
*topo_node
= NULL
;
6010 mptsas_topo_change_list_t
*save_node
= NULL
;
6012 dev_info_t
*parent
= NULL
;
6013 mptsas_phymask_t phymask
= 0;
6014 char *phy_mask_name
;
6015 uint8_t flags
= 0, physport
= 0xff;
6016 uint8_t port_update
= 0;
6019 topo_node
= (mptsas_topo_change_list_t
*)args
;
6021 mpt
= topo_node
->mpt
;
6022 event
= topo_node
->event
;
6023 flags
= topo_node
->flags
;
6025 phy_mask_name
= kmem_zalloc(MPTSAS_MAX_PHYS
, KM_SLEEP
);
6027 NDBG20(("mptsas%d handle_dr enter", mpt
->m_instance
));
6030 case MPTSAS_DR_EVENT_RECONFIG_TARGET
:
6031 if ((flags
== MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
) ||
6032 (flags
== MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE
) ||
6033 (flags
== MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
)) {
6035 * Direct attached or expander attached device added
6036 * into system or a Phys Disk that is being unhidden.
6041 case MPTSAS_DR_EVENT_RECONFIG_SMP
:
6043 * New expander added into system, it must be the head
6044 * of topo_change_list_t
6053 * All cases port_update == 1 may cause initiator port form change
6055 mutex_enter(&mpt
->m_mutex
);
6056 if (mpt
->m_port_chng
&& port_update
) {
6058 * mpt->m_port_chng flag indicates some PHYs of initiator
6059 * port have changed to online. So when expander added or
6060 * directly attached device online event come, we force to
6061 * update port information by issueing SAS IO Unit Page and
6064 (void) mptsas_update_phymask(mpt
);
6065 mpt
->m_port_chng
= 0;
6068 mutex_exit(&mpt
->m_mutex
);
6071 if (parent
== NULL
) {
6072 physport
= topo_node
->un
.physport
;
6073 event
= topo_node
->event
;
6074 flags
= topo_node
->flags
;
6075 if (event
& (MPTSAS_DR_EVENT_OFFLINE_TARGET
|
6076 MPTSAS_DR_EVENT_OFFLINE_SMP
)) {
6078 * For all offline events, phymask is known
6080 phymask
= topo_node
->un
.phymask
;
6083 if (event
& MPTSAS_TOPO_FLAG_REMOVE_HANDLE
) {
6084 goto handle_topo_change
;
6086 if (flags
& MPTSAS_TOPO_FLAG_LUN_ASSOCIATED
) {
6087 phymask
= topo_node
->un
.phymask
;
6092 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
) &&
6093 (event
== MPTSAS_DR_EVENT_RECONFIG_TARGET
)) {
6095 * There is no any field in IR_CONFIG_CHANGE
6096 * event indicate physport/phynum, let's get
6097 * parent after SAS Device Page0 request.
6099 goto handle_topo_change
;
6102 mutex_enter(&mpt
->m_mutex
);
6103 if (flags
== MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
) {
6105 * If the direct attached device added or a
6106 * phys disk is being unhidden, argument
6107 * physport actually is PHY#, so we have to get
6108 * phymask according PHY#.
6110 physport
= mpt
->m_phy_info
[physport
].port_num
;
6114 * Translate physport to phymask so that we can search
6117 phymask
= mptsas_physport_to_phymask(mpt
,
6119 mutex_exit(&mpt
->m_mutex
);
6122 bzero(phy_mask_name
, MPTSAS_MAX_PHYS
);
6124 * For RAID topology change node, write the iport name
6127 if (flags
& MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
) {
6128 (void) sprintf(phy_mask_name
, "v0");
6131 * phymask can bo 0 if the drive has been
6132 * pulled by the time an add event is
6133 * processed. If phymask is 0, just skip this
6134 * event and continue.
6137 mutex_enter(&mpt
->m_mutex
);
6138 save_node
= topo_node
;
6139 topo_node
= topo_node
->next
;
6141 kmem_free(save_node
,
6142 sizeof (mptsas_topo_change_list_t
));
6143 mutex_exit(&mpt
->m_mutex
);
6148 (void) sprintf(phy_mask_name
, "%x", phymask
);
6150 parent
= scsi_hba_iport_find(mpt
->m_dip
,
6152 if (parent
== NULL
) {
6153 mptsas_log(mpt
, CE_WARN
, "Failed to find an "
6154 "iport, should not happen!");
6162 mutex_enter(&mpt
->m_mutex
);
6164 mptsas_handle_topo_change(topo_node
, parent
);
6165 save_node
= topo_node
;
6166 topo_node
= topo_node
->next
;
6168 kmem_free(save_node
, sizeof (mptsas_topo_change_list_t
));
6169 mutex_exit(&mpt
->m_mutex
);
6171 if ((flags
== MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
) ||
6172 (flags
== MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
) ||
6173 (flags
== MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
)) {
6175 * If direct attached device associated, make sure
6176 * reset the parent before start the next one. But
6177 * all devices associated with expander shares the
6178 * parent. Also, reset parent if this is for RAID.
6184 kmem_free(phy_mask_name
, MPTSAS_MAX_PHYS
);
6188 mptsas_handle_topo_change(mptsas_topo_change_list_t
*topo_node
,
6191 mptsas_target_t
*ptgt
= NULL
;
6192 mptsas_smp_t
*psmp
= NULL
;
6193 mptsas_t
*mpt
= (void *)topo_node
->mpt
;
6195 uint16_t attached_devhdl
;
6196 uint64_t sas_wwn
= 0;
6198 uint32_t page_address
;
6202 int circ
= 0, circ1
= 0;
6203 char attached_wwnstr
[MPTSAS_WWN_STRLEN
];
6205 NDBG20(("mptsas%d handle_topo_change enter", mpt
->m_instance
));
6207 ASSERT(mutex_owned(&mpt
->m_mutex
));
6209 switch (topo_node
->event
) {
6210 case MPTSAS_DR_EVENT_RECONFIG_TARGET
:
6212 char *phy_mask_name
;
6213 mptsas_phymask_t phymask
= 0;
6215 if (topo_node
->flags
== MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
) {
6217 * Get latest RAID info.
6219 (void) mptsas_get_raid_info(mpt
);
6220 ptgt
= mptsas_search_by_devhdl(
6221 &mpt
->m_active
->m_tgttbl
, topo_node
->devhdl
);
6225 ptgt
= (void *)topo_node
->object
;
6230 * If a Phys Disk was deleted, RAID info needs to be
6231 * updated to reflect the new topology.
6233 (void) mptsas_get_raid_info(mpt
);
6236 * Get sas device page 0 by DevHandle to make sure if
6237 * SSP/SATA end device exist.
6239 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
6240 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) |
6243 rval
= mptsas_get_target_device_info(mpt
, page_address
,
6245 if (rval
== DEV_INFO_WRONG_DEVICE_TYPE
) {
6246 mptsas_log(mpt
, CE_NOTE
,
6247 "mptsas_handle_topo_change: target %d is "
6248 "not a SAS/SATA device. \n",
6250 } else if (rval
== DEV_INFO_FAIL_ALLOC
) {
6251 mptsas_log(mpt
, CE_NOTE
,
6252 "mptsas_handle_topo_change: could not "
6253 "allocate memory. \n");
6256 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6257 * else to do, just leave.
6259 if (rval
!= DEV_INFO_SUCCESS
) {
6264 ASSERT(ptgt
->m_devhdl
== topo_node
->devhdl
);
6266 mutex_exit(&mpt
->m_mutex
);
6267 flags
= topo_node
->flags
;
6269 if (flags
== MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
) {
6270 phymask
= ptgt
->m_phymask
;
6271 phy_mask_name
= kmem_zalloc(MPTSAS_MAX_PHYS
, KM_SLEEP
);
6272 (void) sprintf(phy_mask_name
, "%x", phymask
);
6273 parent
= scsi_hba_iport_find(mpt
->m_dip
,
6275 kmem_free(phy_mask_name
, MPTSAS_MAX_PHYS
);
6276 if (parent
== NULL
) {
6277 mptsas_log(mpt
, CE_WARN
, "Failed to find a "
6278 "iport for PD, should not happen!");
6279 mutex_enter(&mpt
->m_mutex
);
6284 if (flags
== MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
) {
6285 ndi_devi_enter(parent
, &circ1
);
6286 (void) mptsas_config_raid(parent
, topo_node
->devhdl
,
6288 ndi_devi_exit(parent
, circ1
);
6291 * hold nexus for bus configure
6293 ndi_devi_enter(scsi_vhci_dip
, &circ
);
6294 ndi_devi_enter(parent
, &circ1
);
6295 rval
= mptsas_config_target(parent
, ptgt
);
6297 * release nexus for bus configure
6299 ndi_devi_exit(parent
, circ1
);
6300 ndi_devi_exit(scsi_vhci_dip
, circ
);
6303 * Add parent's props for SMHBA support
6305 if (flags
== MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
) {
6306 bzero(attached_wwnstr
,
6307 sizeof (attached_wwnstr
));
6308 (void) sprintf(attached_wwnstr
, "w%016"PRIx64
,
6310 if (ddi_prop_update_string(DDI_DEV_T_NONE
,
6312 SCSI_ADDR_PROP_ATTACHED_PORT
,
6314 != DDI_PROP_SUCCESS
) {
6315 (void) ddi_prop_remove(DDI_DEV_T_NONE
,
6317 SCSI_ADDR_PROP_ATTACHED_PORT
);
6318 mptsas_log(mpt
, CE_WARN
, "Failed to"
6319 "attached-port props");
6322 if (ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
6323 MPTSAS_NUM_PHYS
, 1) !=
6325 (void) ddi_prop_remove(DDI_DEV_T_NONE
,
6326 parent
, MPTSAS_NUM_PHYS
);
6327 mptsas_log(mpt
, CE_WARN
, "Failed to"
6328 " create num-phys props");
6333 * Update PHY info for smhba
6335 mutex_enter(&mpt
->m_mutex
);
6336 if (mptsas_smhba_phy_init(mpt
)) {
6337 mutex_exit(&mpt
->m_mutex
);
6338 mptsas_log(mpt
, CE_WARN
, "mptsas phy"
6342 mutex_exit(&mpt
->m_mutex
);
6343 mptsas_smhba_set_phy_props(mpt
,
6344 ddi_get_name_addr(parent
), parent
,
6345 1, &attached_devhdl
);
6346 if (ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
6347 MPTSAS_VIRTUAL_PORT
, 0) !=
6349 (void) ddi_prop_remove(DDI_DEV_T_NONE
,
6350 parent
, MPTSAS_VIRTUAL_PORT
);
6351 mptsas_log(mpt
, CE_WARN
,
6352 "mptsas virtual-port"
6353 "port prop update failed");
6358 mutex_enter(&mpt
->m_mutex
);
6360 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6361 "phymask:%x.", mpt
->m_instance
, ptgt
->m_devhdl
,
6365 case MPTSAS_DR_EVENT_OFFLINE_TARGET
:
6367 mptsas_hash_table_t
*tgttbl
= &mpt
->m_active
->m_tgttbl
;
6368 devhdl
= topo_node
->devhdl
;
6369 ptgt
= mptsas_search_by_devhdl(tgttbl
, devhdl
);
6373 sas_wwn
= ptgt
->m_sas_wwn
;
6374 phy
= ptgt
->m_phynum
;
6376 addr
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
6379 (void) sprintf(addr
, "w%016"PRIx64
, sas_wwn
);
6381 (void) sprintf(addr
, "p%x", phy
);
6383 ASSERT(ptgt
->m_devhdl
== devhdl
);
6385 if ((topo_node
->flags
== MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
) ||
6386 (topo_node
->flags
==
6387 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
)) {
6389 * Get latest RAID info if RAID volume status changes
6390 * or Phys Disk status changes
6392 (void) mptsas_get_raid_info(mpt
);
6395 * Abort all outstanding command on the device
6397 rval
= mptsas_do_scsi_reset(mpt
, devhdl
);
6399 NDBG20(("mptsas%d handle_topo_change to reset target "
6400 "before offline devhdl:%x, phymask:%x, rval:%x",
6401 mpt
->m_instance
, ptgt
->m_devhdl
, ptgt
->m_phymask
,
6405 mutex_exit(&mpt
->m_mutex
);
6407 ndi_devi_enter(scsi_vhci_dip
, &circ
);
6408 ndi_devi_enter(parent
, &circ1
);
6409 rval
= mptsas_offline_target(parent
, addr
);
6410 ndi_devi_exit(parent
, circ1
);
6411 ndi_devi_exit(scsi_vhci_dip
, circ
);
6412 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6413 "phymask:%x, rval:%x", mpt
->m_instance
,
6414 ptgt
->m_devhdl
, ptgt
->m_phymask
, rval
));
6416 kmem_free(addr
, SCSI_MAXNAMELEN
);
6419 * Clear parent's props for SMHBA support
6421 flags
= topo_node
->flags
;
6422 if (flags
== MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
) {
6423 bzero(attached_wwnstr
, sizeof (attached_wwnstr
));
6424 if (ddi_prop_update_string(DDI_DEV_T_NONE
, parent
,
6425 SCSI_ADDR_PROP_ATTACHED_PORT
, attached_wwnstr
) !=
6427 (void) ddi_prop_remove(DDI_DEV_T_NONE
, parent
,
6428 SCSI_ADDR_PROP_ATTACHED_PORT
);
6429 mptsas_log(mpt
, CE_WARN
, "mptsas attached port "
6430 "prop update failed");
6433 if (ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
6434 MPTSAS_NUM_PHYS
, 0) !=
6436 (void) ddi_prop_remove(DDI_DEV_T_NONE
, parent
,
6438 mptsas_log(mpt
, CE_WARN
, "mptsas num phys "
6439 "prop update failed");
6442 if (ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
6443 MPTSAS_VIRTUAL_PORT
, 1) !=
6445 (void) ddi_prop_remove(DDI_DEV_T_NONE
, parent
,
6446 MPTSAS_VIRTUAL_PORT
);
6447 mptsas_log(mpt
, CE_WARN
, "mptsas virtual port "
6448 "prop update failed");
6453 mutex_enter(&mpt
->m_mutex
);
6454 if (mptsas_set_led_status(mpt
, ptgt
, 0) != DDI_SUCCESS
) {
6455 NDBG14(("mptsas: clear LED for tgt %x failed",
6458 if (rval
== DDI_SUCCESS
) {
6459 mptsas_tgt_free(&mpt
->m_active
->m_tgttbl
,
6460 ptgt
->m_sas_wwn
, ptgt
->m_phymask
);
6464 * clean DR_INTRANSITION flag to allow I/O down to
6465 * PHCI driver since failover finished.
6466 * Invalidate the devhdl
6468 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
6469 ptgt
->m_devhdl
= MPTSAS_INVALID_DEVHDL
;
6470 ptgt
->m_tgt_unconfigured
= 0;
6471 ptgt
->m_dr_flag
= MPTSAS_DR_INACTIVE
;
6472 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
6476 * Send SAS IO Unit Control to free the dev handle
6478 if ((flags
== MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
) ||
6479 (flags
== MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE
)) {
6480 rval
= mptsas_free_devhdl(mpt
, devhdl
);
6482 NDBG20(("mptsas%d handle_topo_change to remove "
6483 "devhdl:%x, rval:%x", mpt
->m_instance
, devhdl
,
6489 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE
:
6491 devhdl
= topo_node
->devhdl
;
6493 * If this is the remove handle event, do a reset first.
6495 if (topo_node
->event
== MPTSAS_TOPO_FLAG_REMOVE_HANDLE
) {
6496 rval
= mptsas_do_scsi_reset(mpt
, devhdl
);
6498 NDBG20(("mpt%d reset target before remove "
6499 "devhdl:%x, rval:%x", mpt
->m_instance
,
6505 * Send SAS IO Unit Control to free the dev handle
6507 rval
= mptsas_free_devhdl(mpt
, devhdl
);
6508 NDBG20(("mptsas%d handle_topo_change to remove "
6509 "devhdl:%x, rval:%x", mpt
->m_instance
, devhdl
,
6513 case MPTSAS_DR_EVENT_RECONFIG_SMP
:
6517 mptsas_hash_table_t
*smptbl
= &mpt
->m_active
->m_smptbl
;
6519 devhdl
= topo_node
->devhdl
;
6521 page_address
= (MPI2_SAS_EXPAND_PGAD_FORM_HNDL
&
6522 MPI2_SAS_EXPAND_PGAD_FORM_MASK
) | (uint32_t)devhdl
;
6523 rval
= mptsas_get_sas_expander_page0(mpt
, page_address
, &smp
);
6524 if (rval
!= DDI_SUCCESS
) {
6525 mptsas_log(mpt
, CE_WARN
, "failed to online smp, "
6526 "handle %x", devhdl
);
6530 psmp
= mptsas_smp_alloc(smptbl
, &smp
);
6535 mutex_exit(&mpt
->m_mutex
);
6536 ndi_devi_enter(parent
, &circ1
);
6537 (void) mptsas_online_smp(parent
, psmp
, &smpdip
);
6538 ndi_devi_exit(parent
, circ1
);
6540 mutex_enter(&mpt
->m_mutex
);
6543 case MPTSAS_DR_EVENT_OFFLINE_SMP
:
6545 mptsas_hash_table_t
*smptbl
= &mpt
->m_active
->m_smptbl
;
6546 devhdl
= topo_node
->devhdl
;
6549 psmp
= mptsas_search_by_devhdl(smptbl
, devhdl
);
6553 * The mptsas_smp_t data is released only if the dip is offlined
6556 mutex_exit(&mpt
->m_mutex
);
6558 ndi_devi_enter(parent
, &circ1
);
6559 rval
= mptsas_offline_smp(parent
, psmp
, NDI_DEVI_REMOVE
);
6560 ndi_devi_exit(parent
, circ1
);
6562 dev_info
= psmp
->m_deviceinfo
;
6563 if ((dev_info
& DEVINFO_DIRECT_ATTACHED
) ==
6564 DEVINFO_DIRECT_ATTACHED
) {
6565 if (ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
6566 MPTSAS_VIRTUAL_PORT
, 1) !=
6568 (void) ddi_prop_remove(DDI_DEV_T_NONE
, parent
,
6569 MPTSAS_VIRTUAL_PORT
);
6570 mptsas_log(mpt
, CE_WARN
, "mptsas virtual port "
6571 "prop update failed");
6575 * Check whether the smp connected to the iport,
6577 if (ddi_prop_update_int(DDI_DEV_T_NONE
, parent
,
6578 MPTSAS_NUM_PHYS
, 0) !=
6580 (void) ddi_prop_remove(DDI_DEV_T_NONE
, parent
,
6582 mptsas_log(mpt
, CE_WARN
, "mptsas num phys"
6583 "prop update failed");
6587 * Clear parent's attached-port props
6589 bzero(attached_wwnstr
, sizeof (attached_wwnstr
));
6590 if (ddi_prop_update_string(DDI_DEV_T_NONE
, parent
,
6591 SCSI_ADDR_PROP_ATTACHED_PORT
, attached_wwnstr
) !=
6593 (void) ddi_prop_remove(DDI_DEV_T_NONE
, parent
,
6594 SCSI_ADDR_PROP_ATTACHED_PORT
);
6595 mptsas_log(mpt
, CE_WARN
, "mptsas attached port "
6596 "prop update failed");
6601 mutex_enter(&mpt
->m_mutex
);
6602 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6603 "rval:%x", mpt
->m_instance
, psmp
->m_devhdl
, rval
));
6604 if (rval
== DDI_SUCCESS
) {
6605 mptsas_smp_free(smptbl
, psmp
->m_sasaddr
,
6608 psmp
->m_devhdl
= MPTSAS_INVALID_DEVHDL
;
6611 bzero(attached_wwnstr
, sizeof (attached_wwnstr
));
6621 * Record the event if its type is enabled in mpt instance by ioctl.
6624 mptsas_record_event(void *args
)
6626 m_replyh_arg_t
*replyh_arg
;
6627 pMpi2EventNotificationReply_t eventreply
;
6628 uint32_t event
, rfm
;
6631 uint16_t event_data_len
;
6632 boolean_t sendAEN
= FALSE
;
6634 replyh_arg
= (m_replyh_arg_t
*)args
;
6635 rfm
= replyh_arg
->rfm
;
6636 mpt
= replyh_arg
->mpt
;
6638 eventreply
= (pMpi2EventNotificationReply_t
)
6639 (mpt
->m_reply_frame
+ (rfm
- mpt
->m_reply_frame_dma_addr
));
6640 event
= ddi_get16(mpt
->m_acc_reply_frame_hdl
, &eventreply
->Event
);
6644 * Generate a system event to let anyone who cares know that a
6645 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6646 * event mask is set to.
6648 if (event
== MPI2_EVENT_LOG_ENTRY_ADDED
) {
6653 * Record the event only if it is not masked. Determine which dword
6654 * and bit of event mask to test.
6656 i
= (uint8_t)(event
/ 32);
6657 j
= (uint8_t)(event
% 32);
6658 if ((i
< 4) && ((1 << j
) & mpt
->m_event_mask
[i
])) {
6659 i
= mpt
->m_event_index
;
6660 mpt
->m_events
[i
].Type
= event
;
6661 mpt
->m_events
[i
].Number
= ++mpt
->m_event_number
;
6662 bzero(mpt
->m_events
[i
].Data
, MPTSAS_MAX_EVENT_DATA_LENGTH
* 4);
6663 event_data_len
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
6664 &eventreply
->EventDataLength
);
6666 if (event_data_len
> 0) {
6668 * Limit data to size in m_event entry
6670 if (event_data_len
> MPTSAS_MAX_EVENT_DATA_LENGTH
) {
6671 event_data_len
= MPTSAS_MAX_EVENT_DATA_LENGTH
;
6673 for (j
= 0; j
< event_data_len
; j
++) {
6674 mpt
->m_events
[i
].Data
[j
] =
6675 ddi_get32(mpt
->m_acc_reply_frame_hdl
,
6676 &(eventreply
->EventData
[j
]));
6680 * check for index wrap-around
6682 if (++i
== MPTSAS_EVENT_QUEUE_SIZE
) {
6685 mpt
->m_event_index
= (uint8_t)i
;
6688 * Set flag to send the event.
6695 * Generate a system event if flag is set to let anyone who cares know
6696 * that an event has occurred.
6699 (void) ddi_log_sysevent(mpt
->m_dip
, DDI_VENDOR_LSI
, "MPT_SAS",
6700 "SAS", NULL
, NULL
, DDI_NOSLEEP
);
6704 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6706 * handle sync events from ioc in interrupt
6708 * DDI_SUCCESS: The event is handled by this func
6709 * DDI_FAILURE: Event is not handled
6712 mptsas_handle_event_sync(void *args
)
6714 m_replyh_arg_t
*replyh_arg
;
6715 pMpi2EventNotificationReply_t eventreply
;
6716 uint32_t event
, rfm
;
6720 replyh_arg
= (m_replyh_arg_t
*)args
;
6721 rfm
= replyh_arg
->rfm
;
6722 mpt
= replyh_arg
->mpt
;
6724 ASSERT(mutex_owned(&mpt
->m_mutex
));
6726 eventreply
= (pMpi2EventNotificationReply_t
)
6727 (mpt
->m_reply_frame
+ (rfm
- mpt
->m_reply_frame_dma_addr
));
6728 event
= ddi_get16(mpt
->m_acc_reply_frame_hdl
, &eventreply
->Event
);
6730 if (iocstatus
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
6731 &eventreply
->IOCStatus
)) {
6732 if (iocstatus
== MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE
) {
6733 mptsas_log(mpt
, CE_WARN
,
6734 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6735 "IOCLogInfo=0x%x", iocstatus
,
6736 ddi_get32(mpt
->m_acc_reply_frame_hdl
,
6737 &eventreply
->IOCLogInfo
));
6739 mptsas_log(mpt
, CE_WARN
,
6740 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6741 "IOCLogInfo=0x%x", iocstatus
,
6742 ddi_get32(mpt
->m_acc_reply_frame_hdl
,
6743 &eventreply
->IOCLogInfo
));
6748 * figure out what kind of event we got and handle accordingly
6751 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST
:
6753 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list
;
6754 uint8_t num_entries
, expstatus
, phy
;
6755 uint8_t phystatus
, physport
, state
, i
;
6756 uint8_t start_phy_num
, link_rate
;
6757 uint16_t dev_handle
, reason_code
;
6758 uint16_t enc_handle
, expd_handle
;
6759 char string
[80], curr
[80], prev
[80];
6760 mptsas_topo_change_list_t
*topo_head
= NULL
;
6761 mptsas_topo_change_list_t
*topo_tail
= NULL
;
6762 mptsas_topo_change_list_t
*topo_node
= NULL
;
6763 mptsas_target_t
*ptgt
;
6765 mptsas_hash_table_t
*tgttbl
, *smptbl
;
6766 uint8_t flags
= 0, exp_flag
;
6767 smhba_info_t
*pSmhba
= NULL
;
6769 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6771 tgttbl
= &mpt
->m_active
->m_tgttbl
;
6772 smptbl
= &mpt
->m_active
->m_smptbl
;
6774 sas_topo_change_list
= (pMpi2EventDataSasTopologyChangeList_t
)
6775 eventreply
->EventData
;
6777 enc_handle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
6778 &sas_topo_change_list
->EnclosureHandle
);
6779 expd_handle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
6780 &sas_topo_change_list
->ExpanderDevHandle
);
6781 num_entries
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
6782 &sas_topo_change_list
->NumEntries
);
6783 start_phy_num
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
6784 &sas_topo_change_list
->StartPhyNum
);
6785 expstatus
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
6786 &sas_topo_change_list
->ExpStatus
);
6787 physport
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
6788 &sas_topo_change_list
->PhysicalPort
);
6792 flags
= MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED
;
6793 switch (expstatus
) {
6794 case MPI2_EVENT_SAS_TOPO_ES_ADDED
:
6795 (void) sprintf(string
, " added");
6797 * New expander device added
6799 mpt
->m_port_chng
= 1;
6800 topo_node
= kmem_zalloc(
6801 sizeof (mptsas_topo_change_list_t
),
6803 topo_node
->mpt
= mpt
;
6804 topo_node
->event
= MPTSAS_DR_EVENT_RECONFIG_SMP
;
6805 topo_node
->un
.physport
= physport
;
6806 topo_node
->devhdl
= expd_handle
;
6807 topo_node
->flags
= flags
;
6808 topo_node
->object
= NULL
;
6809 if (topo_head
== NULL
) {
6810 topo_head
= topo_tail
= topo_node
;
6812 topo_tail
->next
= topo_node
;
6813 topo_tail
= topo_node
;
6816 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING
:
6817 (void) sprintf(string
, " not responding, "
6819 psmp
= mptsas_search_by_devhdl(smptbl
,
6824 topo_node
= kmem_zalloc(
6825 sizeof (mptsas_topo_change_list_t
),
6827 topo_node
->mpt
= mpt
;
6828 topo_node
->un
.phymask
= psmp
->m_phymask
;
6829 topo_node
->event
= MPTSAS_DR_EVENT_OFFLINE_SMP
;
6830 topo_node
->devhdl
= expd_handle
;
6831 topo_node
->flags
= flags
;
6832 topo_node
->object
= NULL
;
6833 if (topo_head
== NULL
) {
6834 topo_head
= topo_tail
= topo_node
;
6836 topo_tail
->next
= topo_node
;
6837 topo_tail
= topo_node
;
6840 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING
:
6842 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING
:
6843 (void) sprintf(string
, " not responding, "
6844 "delaying removal");
6850 flags
= MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE
;
6853 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6854 enc_handle
, expd_handle
, string
));
6855 for (i
= 0; i
< num_entries
; i
++) {
6856 phy
= i
+ start_phy_num
;
6857 phystatus
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
6858 &sas_topo_change_list
->PHY
[i
].PhyStatus
);
6859 dev_handle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
6860 &sas_topo_change_list
->PHY
[i
].AttachedDevHandle
);
6861 reason_code
= phystatus
& MPI2_EVENT_SAS_TOPO_RC_MASK
;
6863 * Filter out processing of Phy Vacant Status unless
6864 * the reason code is "Not Responding". Process all
6865 * other combinations of Phy Status and Reason Codes.
6868 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT
) &&
6870 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING
)) {
6876 switch (reason_code
) {
6877 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED
:
6879 NDBG20(("mptsas%d phy %d physical_port %d "
6880 "dev_handle %d added", mpt
->m_instance
, phy
,
6881 physport
, dev_handle
));
6882 link_rate
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
6883 &sas_topo_change_list
->PHY
[i
].LinkRate
);
6884 state
= (link_rate
&
6885 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK
) >>
6886 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT
;
6888 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED
:
6889 (void) sprintf(curr
, "is disabled");
6891 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED
:
6892 (void) sprintf(curr
, "is offline, "
6893 "failed speed negotiation");
6895 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE
:
6896 (void) sprintf(curr
, "SATA OOB "
6899 case SMP_RESET_IN_PROGRESS
:
6900 (void) sprintf(curr
, "SMP reset in "
6903 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5
:
6904 (void) sprintf(curr
, "is online at "
6907 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0
:
6908 (void) sprintf(curr
, "is online at 3.0 "
6911 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0
:
6912 (void) sprintf(curr
, "is online at 6.0 "
6916 (void) sprintf(curr
, "state is "
6921 * New target device added into the system.
6922 * Set association flag according to if an
6923 * expander is used or not.
6926 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE
;
6928 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED
) {
6931 topo_node
= kmem_zalloc(
6932 sizeof (mptsas_topo_change_list_t
),
6934 topo_node
->mpt
= mpt
;
6936 MPTSAS_DR_EVENT_RECONFIG_TARGET
;
6937 if (expd_handle
== 0) {
6939 * Per MPI 2, if expander dev handle
6940 * is 0, it's a directly attached
6941 * device. So driver use PHY to decide
6942 * which iport is associated
6945 mpt
->m_port_chng
= 1;
6947 topo_node
->un
.physport
= physport
;
6948 topo_node
->devhdl
= dev_handle
;
6949 topo_node
->flags
= flags
;
6950 topo_node
->object
= NULL
;
6951 if (topo_head
== NULL
) {
6952 topo_head
= topo_tail
= topo_node
;
6954 topo_tail
->next
= topo_node
;
6955 topo_tail
= topo_node
;
6959 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING
:
6961 NDBG20(("mptsas%d phy %d physical_port %d "
6962 "dev_handle %d removed", mpt
->m_instance
,
6963 phy
, physport
, dev_handle
));
6965 * Set association flag according to if an
6966 * expander is used or not.
6969 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE
;
6971 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED
) {
6975 * Target device is removed from the system
6976 * Before the device is really offline from
6979 ptgt
= mptsas_search_by_devhdl(tgttbl
,
6982 * If ptgt is NULL here, it means that the
6983 * DevHandle is not in the hash table. This is
6984 * reasonable sometimes. For example, if a
6985 * disk was pulled, then added, then pulled
6986 * again, the disk will not have been put into
6987 * the hash table because the add event will
6988 * have an invalid phymask. BUT, this does not
6989 * mean that the DevHandle is invalid. The
6990 * controller will still have a valid DevHandle
6991 * that must be removed. To do this, use the
6992 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6995 topo_node
= kmem_zalloc(
6996 sizeof (mptsas_topo_change_list_t
),
6998 topo_node
->mpt
= mpt
;
6999 topo_node
->un
.phymask
= 0;
7001 MPTSAS_TOPO_FLAG_REMOVE_HANDLE
;
7002 topo_node
->devhdl
= dev_handle
;
7003 topo_node
->flags
= flags
;
7004 topo_node
->object
= NULL
;
7005 if (topo_head
== NULL
) {
7006 topo_head
= topo_tail
=
7009 topo_tail
->next
= topo_node
;
7010 topo_tail
= topo_node
;
7016 * Update DR flag immediately avoid I/O failure
7017 * before failover finish. Pay attention to the
7018 * mutex protect, we need grab the per target
7019 * mutex during set m_dr_flag because the
7020 * m_mutex would not be held all the time in
7021 * mptsas_scsi_start().
7023 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
7024 ptgt
->m_dr_flag
= MPTSAS_DR_INTRANSITION
;
7025 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
7027 topo_node
= kmem_zalloc(
7028 sizeof (mptsas_topo_change_list_t
),
7030 topo_node
->mpt
= mpt
;
7031 topo_node
->un
.phymask
= ptgt
->m_phymask
;
7033 MPTSAS_DR_EVENT_OFFLINE_TARGET
;
7034 topo_node
->devhdl
= dev_handle
;
7035 topo_node
->flags
= flags
;
7036 topo_node
->object
= NULL
;
7037 if (topo_head
== NULL
) {
7038 topo_head
= topo_tail
= topo_node
;
7040 topo_tail
->next
= topo_node
;
7041 topo_tail
= topo_node
;
7045 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED
:
7046 link_rate
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7047 &sas_topo_change_list
->PHY
[i
].LinkRate
);
7048 state
= (link_rate
&
7049 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK
) >>
7050 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT
;
7051 pSmhba
= &mpt
->m_phy_info
[i
].smhba_info
;
7052 pSmhba
->negotiated_link_rate
= state
;
7054 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED
:
7055 (void) sprintf(curr
, "is disabled");
7056 mptsas_smhba_log_sysevent(mpt
,
7059 &mpt
->m_phy_info
[i
].smhba_info
);
7060 mpt
->m_phy_info
[i
].smhba_info
.
7061 negotiated_link_rate
7064 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED
:
7065 (void) sprintf(curr
, "is offline, "
7066 "failed speed negotiation");
7067 mptsas_smhba_log_sysevent(mpt
,
7070 &mpt
->m_phy_info
[i
].smhba_info
);
7072 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE
:
7073 (void) sprintf(curr
, "SATA OOB "
7076 case SMP_RESET_IN_PROGRESS
:
7077 (void) sprintf(curr
, "SMP reset in "
7080 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5
:
7081 (void) sprintf(curr
, "is online at "
7083 if ((expd_handle
== 0) &&
7084 (enc_handle
== 1)) {
7085 mpt
->m_port_chng
= 1;
7087 mptsas_smhba_log_sysevent(mpt
,
7090 &mpt
->m_phy_info
[i
].smhba_info
);
7092 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0
:
7093 (void) sprintf(curr
, "is online at 3.0 "
7095 if ((expd_handle
== 0) &&
7096 (enc_handle
== 1)) {
7097 mpt
->m_port_chng
= 1;
7099 mptsas_smhba_log_sysevent(mpt
,
7102 &mpt
->m_phy_info
[i
].smhba_info
);
7104 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0
:
7105 (void) sprintf(curr
, "is online at "
7107 if ((expd_handle
== 0) &&
7108 (enc_handle
== 1)) {
7109 mpt
->m_port_chng
= 1;
7111 mptsas_smhba_log_sysevent(mpt
,
7114 &mpt
->m_phy_info
[i
].smhba_info
);
7117 (void) sprintf(curr
, "state is "
7122 state
= (link_rate
&
7123 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK
) >>
7124 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT
;
7126 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED
:
7127 (void) sprintf(prev
, ", was disabled");
7129 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED
:
7130 (void) sprintf(prev
, ", was offline, "
7131 "failed speed negotiation");
7133 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE
:
7134 (void) sprintf(prev
, ", was SATA OOB "
7137 case SMP_RESET_IN_PROGRESS
:
7138 (void) sprintf(prev
, ", was SMP reset "
7141 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5
:
7142 (void) sprintf(prev
, ", was online at "
7145 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0
:
7146 (void) sprintf(prev
, ", was online at "
7149 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0
:
7150 (void) sprintf(prev
, ", was online at "
7156 (void) sprintf(&string
[strlen(string
)], "link "
7159 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE
:
7161 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING
:
7162 (void) sprintf(&string
[strlen(string
)],
7163 "target not responding, delaying "
7167 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7168 mpt
->m_instance
, phy
, dev_handle
, string
, curr
,
7171 if (topo_head
!= NULL
) {
7173 * Launch DR taskq to handle topology change
7175 if ((ddi_taskq_dispatch(mpt
->m_dr_taskq
,
7176 mptsas_handle_dr
, (void *)topo_head
,
7177 DDI_NOSLEEP
)) != DDI_SUCCESS
) {
7178 mptsas_log(mpt
, CE_NOTE
, "mptsas start taskq "
7179 "for handle SAS DR event failed. \n");
7184 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST
:
7186 Mpi2EventDataIrConfigChangeList_t
*irChangeList
;
7187 mptsas_topo_change_list_t
*topo_head
= NULL
;
7188 mptsas_topo_change_list_t
*topo_tail
= NULL
;
7189 mptsas_topo_change_list_t
*topo_node
= NULL
;
7190 mptsas_target_t
*ptgt
;
7191 mptsas_hash_table_t
*tgttbl
;
7192 uint8_t num_entries
, i
, reason
;
7193 uint16_t volhandle
, diskhandle
;
7195 irChangeList
= (pMpi2EventDataIrConfigChangeList_t
)
7196 eventreply
->EventData
;
7197 num_entries
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7198 &irChangeList
->NumElements
);
7200 tgttbl
= &mpt
->m_active
->m_tgttbl
;
7202 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7205 for (i
= 0; i
< num_entries
; i
++) {
7206 reason
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7207 &irChangeList
->ConfigElement
[i
].ReasonCode
);
7208 volhandle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7209 &irChangeList
->ConfigElement
[i
].VolDevHandle
);
7210 diskhandle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7211 &irChangeList
->ConfigElement
[i
].PhysDiskDevHandle
);
7214 case MPI2_EVENT_IR_CHANGE_RC_ADDED
:
7215 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED
:
7217 NDBG20(("mptsas %d volume added\n",
7220 topo_node
= kmem_zalloc(
7221 sizeof (mptsas_topo_change_list_t
),
7224 topo_node
->mpt
= mpt
;
7226 MPTSAS_DR_EVENT_RECONFIG_TARGET
;
7227 topo_node
->un
.physport
= 0xff;
7228 topo_node
->devhdl
= volhandle
;
7230 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
;
7231 topo_node
->object
= NULL
;
7232 if (topo_head
== NULL
) {
7233 topo_head
= topo_tail
= topo_node
;
7235 topo_tail
->next
= topo_node
;
7236 topo_tail
= topo_node
;
7240 case MPI2_EVENT_IR_CHANGE_RC_REMOVED
:
7241 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED
:
7243 NDBG20(("mptsas %d volume deleted\n",
7245 ptgt
= mptsas_search_by_devhdl(tgttbl
,
7251 * Clear any flags related to volume
7253 (void) mptsas_delete_volume(mpt
, volhandle
);
7256 * Update DR flag immediately avoid I/O failure
7258 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
7259 ptgt
->m_dr_flag
= MPTSAS_DR_INTRANSITION
;
7260 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
7262 topo_node
= kmem_zalloc(
7263 sizeof (mptsas_topo_change_list_t
),
7265 topo_node
->mpt
= mpt
;
7266 topo_node
->un
.phymask
= ptgt
->m_phymask
;
7268 MPTSAS_DR_EVENT_OFFLINE_TARGET
;
7269 topo_node
->devhdl
= volhandle
;
7271 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED
;
7272 topo_node
->object
= (void *)ptgt
;
7273 if (topo_head
== NULL
) {
7274 topo_head
= topo_tail
= topo_node
;
7276 topo_tail
->next
= topo_node
;
7277 topo_tail
= topo_node
;
7281 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED
:
7282 case MPI2_EVENT_IR_CHANGE_RC_HIDE
:
7284 ptgt
= mptsas_search_by_devhdl(tgttbl
,
7290 * Update DR flag immediately avoid I/O failure
7292 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
7293 ptgt
->m_dr_flag
= MPTSAS_DR_INTRANSITION
;
7294 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
7296 topo_node
= kmem_zalloc(
7297 sizeof (mptsas_topo_change_list_t
),
7299 topo_node
->mpt
= mpt
;
7300 topo_node
->un
.phymask
= ptgt
->m_phymask
;
7302 MPTSAS_DR_EVENT_OFFLINE_TARGET
;
7303 topo_node
->devhdl
= diskhandle
;
7305 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
;
7306 topo_node
->object
= (void *)ptgt
;
7307 if (topo_head
== NULL
) {
7308 topo_head
= topo_tail
= topo_node
;
7310 topo_tail
->next
= topo_node
;
7311 topo_tail
= topo_node
;
7315 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE
:
7316 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED
:
7319 * The physical drive is released by a IR
7320 * volume. But we cannot get the the physport
7321 * or phynum from the event data, so we only
7322 * can get the physport/phynum after SAS
7323 * Device Page0 request for the devhdl.
7325 topo_node
= kmem_zalloc(
7326 sizeof (mptsas_topo_change_list_t
),
7328 topo_node
->mpt
= mpt
;
7329 topo_node
->un
.phymask
= 0;
7331 MPTSAS_DR_EVENT_RECONFIG_TARGET
;
7332 topo_node
->devhdl
= diskhandle
;
7334 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED
;
7335 topo_node
->object
= NULL
;
7336 mpt
->m_port_chng
= 1;
7337 if (topo_head
== NULL
) {
7338 topo_head
= topo_tail
= topo_node
;
7340 topo_tail
->next
= topo_node
;
7341 topo_tail
= topo_node
;
7350 if (topo_head
!= NULL
) {
7352 * Launch DR taskq to handle topology change
7354 if ((ddi_taskq_dispatch(mpt
->m_dr_taskq
,
7355 mptsas_handle_dr
, (void *)topo_head
,
7356 DDI_NOSLEEP
)) != DDI_SUCCESS
) {
7357 mptsas_log(mpt
, CE_NOTE
, "mptsas start taskq "
7358 "for handle SAS DR event failed. \n");
7364 return (DDI_FAILURE
);
7367 return (DDI_SUCCESS
);
7371 * handle events from ioc
7374 mptsas_handle_event(void *args
)
7376 m_replyh_arg_t
*replyh_arg
;
7377 pMpi2EventNotificationReply_t eventreply
;
7378 uint32_t event
, iocloginfo
, rfm
;
7384 replyh_arg
= (m_replyh_arg_t
*)args
;
7385 rfm
= replyh_arg
->rfm
;
7386 mpt
= replyh_arg
->mpt
;
7388 mutex_enter(&mpt
->m_mutex
);
7390 eventreply
= (pMpi2EventNotificationReply_t
)
7391 (mpt
->m_reply_frame
+ (rfm
- mpt
->m_reply_frame_dma_addr
));
7392 event
= ddi_get16(mpt
->m_acc_reply_frame_hdl
, &eventreply
->Event
);
7394 if (iocstatus
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7395 &eventreply
->IOCStatus
)) {
7396 if (iocstatus
== MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE
) {
7397 mptsas_log(mpt
, CE_WARN
,
7398 "!mptsas_handle_event: IOCStatus=0x%x, "
7399 "IOCLogInfo=0x%x", iocstatus
,
7400 ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7401 &eventreply
->IOCLogInfo
));
7403 mptsas_log(mpt
, CE_WARN
,
7404 "mptsas_handle_event: IOCStatus=0x%x, "
7405 "IOCLogInfo=0x%x", iocstatus
,
7406 ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7407 &eventreply
->IOCLogInfo
));
7412 * figure out what kind of event we got and handle accordingly
7415 case MPI2_EVENT_LOG_ENTRY_ADDED
:
7417 case MPI2_EVENT_LOG_DATA
:
7418 iocloginfo
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7419 &eventreply
->IOCLogInfo
);
7420 NDBG20(("mptsas %d log info %x received.\n", mpt
->m_instance
,
7423 case MPI2_EVENT_STATE_CHANGE
:
7424 NDBG20(("mptsas%d state change.", mpt
->m_instance
));
7426 case MPI2_EVENT_HARD_RESET_RECEIVED
:
7427 NDBG20(("mptsas%d event change.", mpt
->m_instance
));
7429 case MPI2_EVENT_SAS_DISCOVERY
:
7431 MPI2_EVENT_DATA_SAS_DISCOVERY
*sasdiscovery
;
7436 (pMpi2EventDataSasDiscovery_t
)eventreply
->EventData
;
7438 rc
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7439 &sasdiscovery
->ReasonCode
);
7440 port
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7441 &sasdiscovery
->PhysicalPort
);
7442 status
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7443 &sasdiscovery
->DiscoveryStatus
);
7447 case MPI2_EVENT_SAS_DISC_RC_STARTED
:
7448 (void) sprintf(string
, "STARTING");
7450 case MPI2_EVENT_SAS_DISC_RC_COMPLETED
:
7451 (void) sprintf(string
, "COMPLETED");
7454 (void) sprintf(string
, "UNKNOWN");
7458 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string
,
7463 case MPI2_EVENT_EVENT_CHANGE
:
7464 NDBG20(("mptsas%d event change.", mpt
->m_instance
));
7466 case MPI2_EVENT_TASK_SET_FULL
:
7468 pMpi2EventDataTaskSetFull_t taskfull
;
7470 taskfull
= (pMpi2EventDataTaskSetFull_t
)eventreply
->EventData
;
7472 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7473 mpt
->m_instance
, ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7474 &taskfull
->CurrentDepth
)));
7477 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST
:
7480 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7481 * in mptsas_handle_event_sync() of interrupt context
7485 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
:
7487 pMpi2EventDataSasEnclDevStatusChange_t encstatus
;
7491 encstatus
= (pMpi2EventDataSasEnclDevStatusChange_t
)
7492 eventreply
->EventData
;
7494 rc
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7495 &encstatus
->ReasonCode
);
7497 case MPI2_EVENT_SAS_ENCL_RC_ADDED
:
7498 (void) sprintf(string
, "added");
7500 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING
:
7501 (void) sprintf(string
, ", not responding");
7506 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7507 mpt
->m_instance
, ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7508 &encstatus
->EnclosureHandle
), string
));
7513 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7514 * mptsas_handle_event_sync,in here just send ack message.
7516 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE
:
7518 pMpi2EventDataSasDeviceStatusChange_t statuschange
;
7522 uint32_t wwn_lo
, wwn_hi
;
7524 statuschange
= (pMpi2EventDataSasDeviceStatusChange_t
)
7525 eventreply
->EventData
;
7526 rc
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7527 &statuschange
->ReasonCode
);
7528 wwn_lo
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7529 (uint32_t *)(void *)&statuschange
->SASAddress
);
7530 wwn_hi
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7531 (uint32_t *)(void *)&statuschange
->SASAddress
+ 1);
7532 wwn
= ((uint64_t)wwn_hi
<< 32) | wwn_lo
;
7533 devhdl
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7534 &statuschange
->DevHandle
);
7536 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64
,
7540 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA
:
7541 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7542 ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7543 &statuschange
->ASC
),
7544 ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7545 &statuschange
->ASCQ
)));
7548 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED
:
7549 NDBG20(("Device not supported"));
7552 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET
:
7553 NDBG20(("IOC internally generated the Target Reset "
7554 "for devhdl:%x", devhdl
));
7557 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET
:
7558 NDBG20(("IOC's internally generated Target Reset "
7559 "completed for devhdl:%x", devhdl
));
7562 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL
:
7563 NDBG20(("IOC internally generated Abort Task"));
7566 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL
:
7567 NDBG20(("IOC's internally generated Abort Task "
7571 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL
:
7572 NDBG20(("IOC internally generated Abort Task Set"));
7575 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL
:
7576 NDBG20(("IOC internally generated Clear Task Set"));
7579 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL
:
7580 NDBG20(("IOC internally generated Query Task"));
7583 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION
:
7584 NDBG20(("Device sent an Asynchronous Notification"));
7592 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST
:
7595 * IR TOPOLOGY CHANGE LIST Event has already been handled
7596 * in mpt_handle_event_sync() of interrupt context
7600 case MPI2_EVENT_IR_OPERATION_STATUS
:
7602 Mpi2EventDataIrOperationStatus_t
*irOpStatus
;
7603 char reason_str
[80];
7604 uint8_t rc
, percent
;
7607 irOpStatus
= (pMpi2EventDataIrOperationStatus_t
)
7608 eventreply
->EventData
;
7609 rc
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7610 &irOpStatus
->RAIDOperation
);
7611 percent
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7612 &irOpStatus
->PercentComplete
);
7613 handle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7614 &irOpStatus
->VolDevHandle
);
7617 case MPI2_EVENT_IR_RAIDOP_RESYNC
:
7618 (void) sprintf(reason_str
, "resync");
7620 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION
:
7621 (void) sprintf(reason_str
, "online capacity "
7624 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK
:
7625 (void) sprintf(reason_str
, "consistency check");
7628 (void) sprintf(reason_str
, "unknown reason %x",
7632 NDBG20(("mptsas%d raid operational status: (%s)"
7633 "\thandle(0x%04x), percent complete(%d)\n",
7634 mpt
->m_instance
, reason_str
, handle
, percent
));
7637 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE
:
7639 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast
;
7643 sas_broadcast
= (pMpi2EventDataSasBroadcastPrimitive_t
)
7644 eventreply
->EventData
;
7646 phy_num
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7647 &sas_broadcast
->PhyNum
);
7648 primitive
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7649 &sas_broadcast
->Primitive
);
7651 switch (primitive
) {
7652 case MPI2_EVENT_PRIMITIVE_CHANGE
:
7653 mptsas_smhba_log_sysevent(mpt
,
7654 ESC_SAS_HBA_PORT_BROADCAST
,
7655 SAS_PORT_BROADCAST_CHANGE
,
7656 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7658 case MPI2_EVENT_PRIMITIVE_SES
:
7659 mptsas_smhba_log_sysevent(mpt
,
7660 ESC_SAS_HBA_PORT_BROADCAST
,
7661 SAS_PORT_BROADCAST_SES
,
7662 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7664 case MPI2_EVENT_PRIMITIVE_EXPANDER
:
7665 mptsas_smhba_log_sysevent(mpt
,
7666 ESC_SAS_HBA_PORT_BROADCAST
,
7667 SAS_PORT_BROADCAST_D01_4
,
7668 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7670 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT
:
7671 mptsas_smhba_log_sysevent(mpt
,
7672 ESC_SAS_HBA_PORT_BROADCAST
,
7673 SAS_PORT_BROADCAST_D04_7
,
7674 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7676 case MPI2_EVENT_PRIMITIVE_RESERVED3
:
7677 mptsas_smhba_log_sysevent(mpt
,
7678 ESC_SAS_HBA_PORT_BROADCAST
,
7679 SAS_PORT_BROADCAST_D16_7
,
7680 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7682 case MPI2_EVENT_PRIMITIVE_RESERVED4
:
7683 mptsas_smhba_log_sysevent(mpt
,
7684 ESC_SAS_HBA_PORT_BROADCAST
,
7685 SAS_PORT_BROADCAST_D29_7
,
7686 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7688 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED
:
7689 mptsas_smhba_log_sysevent(mpt
,
7690 ESC_SAS_HBA_PORT_BROADCAST
,
7691 SAS_PORT_BROADCAST_D24_0
,
7692 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7694 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED
:
7695 mptsas_smhba_log_sysevent(mpt
,
7696 ESC_SAS_HBA_PORT_BROADCAST
,
7697 SAS_PORT_BROADCAST_D27_4
,
7698 &mpt
->m_phy_info
[phy_num
].smhba_info
);
7701 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7703 mpt
->m_instance
, primitive
));
7706 NDBG20(("mptsas%d sas broadcast primitive: "
7707 "\tprimitive(0x%04x), phy(%d) complete\n",
7708 mpt
->m_instance
, primitive
, phy_num
));
7711 case MPI2_EVENT_IR_VOLUME
:
7713 Mpi2EventDataIrVolume_t
*irVolume
;
7717 mptsas_slots_t
*slots
= mpt
->m_active
;
7718 uint8_t found
= FALSE
;
7720 irVolume
= (pMpi2EventDataIrVolume_t
)eventreply
->EventData
;
7721 state
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7722 &irVolume
->NewValue
);
7723 devhandle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7724 &irVolume
->VolDevHandle
);
7726 NDBG20(("EVENT_IR_VOLUME event is received"));
7729 * Get latest RAID info and then find the DevHandle for this
7730 * event in the configuration. If the DevHandle is not found
7731 * just exit the event.
7733 (void) mptsas_get_raid_info(mpt
);
7734 for (config
= 0; (config
< slots
->m_num_raid_configs
) &&
7735 (!found
); config
++) {
7736 for (vol
= 0; vol
< MPTSAS_MAX_RAIDVOLS
; vol
++) {
7737 if (slots
->m_raidconfig
[config
].m_raidvol
[vol
].
7738 m_raidhandle
== devhandle
) {
7748 switch (irVolume
->ReasonCode
) {
7749 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED
:
7752 slots
->m_raidconfig
[config
].m_raidvol
[vol
].m_settings
=
7755 i
= state
& MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING
;
7756 mptsas_log(mpt
, CE_NOTE
, " Volume %d settings changed"
7757 ", auto-config of hot-swap drives is %s"
7758 ", write caching is %s"
7759 ", hot-spare pool mask is %02x\n",
7761 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7762 ? "disabled" : "enabled",
7763 i
== MPI2_RAIDVOL0_SETTING_UNCHANGED
7764 ? "controlled by member disks" :
7765 i
== MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7767 i
== MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7770 (state
>> 16) & 0xff);
7773 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED
:
7775 slots
->m_raidconfig
[config
].m_raidvol
[vol
].m_state
=
7778 mptsas_log(mpt
, CE_NOTE
,
7779 "Volume %d is now %s\n", vol
,
7780 state
== MPI2_RAID_VOL_STATE_OPTIMAL
7782 state
== MPI2_RAID_VOL_STATE_DEGRADED
7784 state
== MPI2_RAID_VOL_STATE_ONLINE
7786 state
== MPI2_RAID_VOL_STATE_INITIALIZING
7788 state
== MPI2_RAID_VOL_STATE_FAILED
7790 state
== MPI2_RAID_VOL_STATE_MISSING
7795 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED
:
7797 slots
->m_raidconfig
[config
].m_raidvol
[vol
].
7798 m_statusflags
= state
;
7800 mptsas_log(mpt
, CE_NOTE
,
7801 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7803 state
& MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7804 ? ", enabled" : ", disabled",
7805 state
& MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7806 ? ", quiesced" : "",
7807 state
& MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7808 ? ", inactive" : ", active",
7810 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7811 ? ", bad block table is full" : "",
7813 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7814 ? ", resync in progress" : "",
7815 state
& MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7816 ? ", background initialization in progress" : "",
7818 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7819 ? ", capacity expansion in progress" : "",
7821 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7822 ? ", consistency check in progress" : "",
7823 state
& MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7824 ? ", data scrub in progress" : "");
7832 case MPI2_EVENT_IR_PHYSICAL_DISK
:
7834 Mpi2EventDataIrPhysicalDisk_t
*irPhysDisk
;
7835 uint16_t devhandle
, enchandle
, slot
;
7836 uint32_t status
, state
;
7837 uint8_t physdisknum
, reason
;
7839 irPhysDisk
= (Mpi2EventDataIrPhysicalDisk_t
*)
7840 eventreply
->EventData
;
7841 physdisknum
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7842 &irPhysDisk
->PhysDiskNum
);
7843 devhandle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7844 &irPhysDisk
->PhysDiskDevHandle
);
7845 enchandle
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7846 &irPhysDisk
->EnclosureHandle
);
7847 slot
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
7849 state
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
7850 &irPhysDisk
->NewValue
);
7851 reason
= ddi_get8(mpt
->m_acc_reply_frame_hdl
,
7852 &irPhysDisk
->ReasonCode
);
7854 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7857 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED
:
7858 mptsas_log(mpt
, CE_NOTE
,
7859 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7860 "for enclosure with handle 0x%x is now in hot "
7862 physdisknum
, devhandle
, slot
, enchandle
,
7863 (state
>> 16) & 0xff);
7866 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED
:
7868 mptsas_log(mpt
, CE_NOTE
,
7869 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7870 "for enclosure with handle 0x%x is now "
7871 "%s%s%s%s%s\n", physdisknum
, devhandle
, slot
,
7873 status
& MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7874 ? ", inactive" : ", active",
7875 status
& MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7876 ? ", out of sync" : "",
7877 status
& MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7878 ? ", quiesced" : "",
7880 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7881 ? ", write cache enabled" : "",
7882 status
& MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7883 ? ", capacity expansion target" : "");
7886 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED
:
7887 mptsas_log(mpt
, CE_NOTE
,
7888 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7889 "for enclosure with handle 0x%x is now %s\n",
7890 physdisknum
, devhandle
, slot
, enchandle
,
7891 state
== MPI2_RAID_PD_STATE_OPTIMAL
7893 state
== MPI2_RAID_PD_STATE_REBUILDING
7895 state
== MPI2_RAID_PD_STATE_DEGRADED
7897 state
== MPI2_RAID_PD_STATE_HOT_SPARE
7899 state
== MPI2_RAID_PD_STATE_ONLINE
7901 state
== MPI2_RAID_PD_STATE_OFFLINE
7903 state
== MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7904 ? "not compatible" :
7905 state
== MPI2_RAID_PD_STATE_NOT_CONFIGURED
7906 ? "not configured" :
7913 NDBG20(("mptsas%d: unknown event %x received",
7914 mpt
->m_instance
, event
));
7919 * Return the reply frame to the free queue.
7921 ddi_put32(mpt
->m_acc_free_queue_hdl
,
7922 &((uint32_t *)(void *)mpt
->m_free_queue
)[mpt
->m_free_index
], rfm
);
7923 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
7924 DDI_DMA_SYNC_FORDEV
);
7925 if (++mpt
->m_free_index
== mpt
->m_free_queue_depth
) {
7926 mpt
->m_free_index
= 0;
7928 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->ReplyFreeHostIndex
,
7930 mutex_exit(&mpt
->m_mutex
);
7934 * invoked from timeout() to restart qfull cmds with throttle == 0
7937 mptsas_restart_cmd(void *arg
)
7939 mptsas_t
*mpt
= arg
;
7940 mptsas_target_t
*ptgt
= NULL
;
7942 mutex_enter(&mpt
->m_mutex
);
7944 mpt
->m_restart_cmd_timeid
= 0;
7946 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
7948 while (ptgt
!= NULL
) {
7949 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
7950 if (ptgt
->m_reset_delay
== 0) {
7951 if (ptgt
->m_t_throttle
== QFULL_THROTTLE
) {
7952 mptsas_set_throttle(mpt
, ptgt
,
7956 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
7958 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
7959 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
7961 mptsas_restart_hba(mpt
);
7962 mutex_exit(&mpt
->m_mutex
);
7966 * mptsas_remove_cmd0 is similar to mptsas_remove_cmd except that it is called
7967 * where m_intr_mutex has already been held.
7970 mptsas_remove_cmd(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
7972 ASSERT(mutex_owned(&mpt
->m_mutex
));
7975 * With new fine-grained lock mechanism, the outstanding cmd is only
7976 * linked to m_active before the dma is triggerred(MPTSAS_START_CMD)
7977 * to send it. that is, mptsas_save_cmd() doesn't link the outstanding
7978 * cmd now. So when mptsas_remove_cmd is called, a mptsas_save_cmd must
7979 * have been called, but the cmd may have not been linked.
7980 * For mptsas_remove_cmd0, the cmd must have been linked.
7981 * In order to keep the same semantic, we link the cmd to the
7982 * outstanding cmd list.
7984 mpt
->m_active
->m_slot
[cmd
->cmd_slot
] = cmd
;
7986 mutex_enter(&mpt
->m_intr_mutex
);
7987 mptsas_remove_cmd0(mpt
, cmd
);
7988 mutex_exit(&mpt
->m_intr_mutex
);
7992 mptsas_remove_cmd0(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
7995 mptsas_slots_t
*slots
= mpt
->m_active
;
7997 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
7998 mptsas_slot_free_e_t
*pe
;
8000 ASSERT(cmd
!= NULL
);
8001 ASSERT(cmd
->cmd_queued
== FALSE
);
8004 * Task Management cmds are removed in their own routines. Also,
8005 * we don't want to modify timeout based on TM cmds.
8007 if (cmd
->cmd_flags
& CFLAG_TM_CMD
) {
8012 slot
= cmd
->cmd_slot
;
8013 pe
= mpt
->m_slot_free_ae
+ slot
- 1;
8014 ASSERT(cmd
== slots
->m_slot
[slot
]);
8015 ASSERT((slot
> 0) && slot
< (mpt
->m_max_requests
- 1));
8020 mutex_enter(&mpt
->m_slot_freeq_pairp
[pe
->cpuid
].
8021 m_slot_releq
.s
.m_fq_mutex
);
8022 NDBG31(("mptsas_remove_cmd0: removing cmd=0x%p", (void *)cmd
));
8023 slots
->m_slot
[slot
] = NULL
;
8024 ASSERT(pe
->slot
== slot
);
8025 list_insert_tail(&mpt
->m_slot_freeq_pairp
[pe
->cpuid
].
8026 m_slot_releq
.s
.m_fq_list
, pe
);
8027 mpt
->m_slot_freeq_pairp
[pe
->cpuid
].m_slot_releq
.s
.m_fq_n
++;
8028 ASSERT(mpt
->m_slot_freeq_pairp
[pe
->cpuid
].
8029 m_slot_releq
.s
.m_fq_n
<= mpt
->m_max_requests
- 2);
8030 mutex_exit(&mpt
->m_slot_freeq_pairp
[pe
->cpuid
].
8031 m_slot_releq
.s
.m_fq_mutex
);
8034 * only decrement per target ncmds if command
8035 * has a target associated with it.
8037 if ((cmd
->cmd_flags
& CFLAG_CMDIOC
) == 0) {
8038 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8041 * reset throttle if we just ran an untagged command
8042 * to a tagged target
8044 if ((ptgt
->m_t_ncmds
== 0) &&
8045 ((cmd
->cmd_pkt_flags
& FLAG_TAGMASK
) == 0)) {
8046 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
8048 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8052 * This is all we need to do for ioc commands.
8053 * The ioc cmds would never be handled in fastpath in ISR, so we make
8054 * sure the mptsas_return_to_pool() would always be called with
8055 * m_mutex protected.
8057 if (cmd
->cmd_flags
& CFLAG_CMDIOC
) {
8058 ASSERT(mutex_owned(&mpt
->m_mutex
));
8059 mptsas_return_to_pool(mpt
, cmd
);
8064 * Figure out what to set tag Q timeout for...
8066 * Optimize: If we have duplicate's of same timeout
8067 * we're using, then we'll use it again until we run
8068 * out of duplicates. This should be the normal case
8069 * for block and raw I/O.
8070 * If no duplicates, we have to scan through tag que and
8071 * find the longest timeout value and use it. This is
8072 * going to take a while...
8073 * Add 1 to m_n_slots to account for TM request.
8075 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8076 if (cmd
->cmd_pkt
->pkt_time
== ptgt
->m_timebase
) {
8077 if (--(ptgt
->m_dups
) == 0) {
8078 if (ptgt
->m_t_ncmds
) {
8081 ushort_t nslots
= (slots
->m_n_slots
+ 1);
8084 * This crude check assumes we don't do
8085 * this too often which seems reasonable
8086 * for block and raw I/O.
8088 for (i
= 0; i
< nslots
; i
++) {
8089 ssp
= slots
->m_slot
[i
];
8090 if (ssp
&& (Tgt(ssp
) == t
) &&
8091 (ssp
->cmd_pkt
->pkt_time
> n
)) {
8092 n
= ssp
->cmd_pkt
->pkt_time
;
8094 } else if (ssp
&& (Tgt(ssp
) == t
) &&
8095 (ssp
->cmd_pkt
->pkt_time
== n
)) {
8099 ptgt
->m_timebase
= n
;
8102 ptgt
->m_timebase
= 0;
8106 ptgt
->m_timeout
= ptgt
->m_timebase
;
8108 ASSERT(cmd
!= slots
->m_slot
[cmd
->cmd_slot
]);
8109 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8113 * start a fresh request from the top of the device queue.
8116 mptsas_restart_hba(mptsas_t
*mpt
)
8118 mptsas_cmd_t
*cmd
, *next_cmd
;
8119 mptsas_target_t
*ptgt
= NULL
;
8121 NDBG1(("mptsas_restart_hba: mpt=0x%p", (void *)mpt
));
8123 ASSERT(mutex_owned(&mpt
->m_mutex
));
8126 * If there is a reset delay, don't start any cmds. Otherwise, start
8127 * as many cmds as possible.
8128 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8129 * commands is m_max_requests - 2.
8133 while (cmd
!= NULL
) {
8134 next_cmd
= cmd
->cmd_linkp
;
8135 if (cmd
->cmd_flags
& CFLAG_PASSTHRU
) {
8136 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
8138 * passthru command get slot need
8139 * set CFLAG_PREPARED.
8141 cmd
->cmd_flags
|= CFLAG_PREPARED
;
8142 mptsas_waitq_delete(mpt
, cmd
);
8143 mptsas_start_passthru(mpt
, cmd
);
8148 if (cmd
->cmd_flags
& CFLAG_CONFIG
) {
8149 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
8151 * Send the config page request and delete it
8154 cmd
->cmd_flags
|= CFLAG_PREPARED
;
8155 mptsas_waitq_delete(mpt
, cmd
);
8156 mptsas_start_config_page_access(mpt
, cmd
);
8161 if (cmd
->cmd_flags
& CFLAG_FW_DIAG
) {
8162 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
8164 * Send the FW Diag request and delete if from
8167 cmd
->cmd_flags
|= CFLAG_PREPARED
;
8168 mptsas_waitq_delete(mpt
, cmd
);
8169 mptsas_start_diag(mpt
, cmd
);
8175 ptgt
= cmd
->cmd_tgt_addr
;
8177 mutex_enter(&mpt
->m_intr_mutex
);
8178 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8179 if ((ptgt
->m_t_throttle
== DRAIN_THROTTLE
) &&
8180 (ptgt
->m_t_ncmds
== 0)) {
8181 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
8183 if ((ptgt
->m_reset_delay
== 0) &&
8184 (ptgt
->m_t_ncmds
< ptgt
->m_t_throttle
)) {
8185 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8186 mutex_exit(&mpt
->m_intr_mutex
);
8187 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
8188 mptsas_waitq_delete(mpt
, cmd
);
8189 (void) mptsas_start_cmd(mpt
, cmd
);
8193 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8194 mutex_exit(&mpt
->m_intr_mutex
);
8202 * mpt tag type lookup
8204 static char mptsas_tag_lookup
[] =
8205 {0, MSG_HEAD_QTAG
, MSG_ORDERED_QTAG
, 0, MSG_SIMPLE_QTAG
};
8208 * mptsas_start_cmd0 is similar to mptsas_start_cmd, except that, it is called
8209 * without ANY mutex protected, while, mptsas_start_cmd is called with m_mutex
8212 * the relevant field in ptgt should be protected by m_tgt_intr_mutex in both
8215 * before the cmds are linked on the slot for monitor as outstanding cmds, they
8216 * are accessed as slab objects, so slab framework ensures the exclusive access,
8217 * and no other mutex is requireed. Linking for monitor and the trigger of dma
8218 * must be done exclusively.
8221 mptsas_start_cmd0(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8223 struct scsi_pkt
*pkt
= CMD2PKT(cmd
);
8224 uint32_t control
= 0;
8227 pMpi2SCSIIORequest_t io_request
;
8228 ddi_dma_handle_t dma_hdl
= mpt
->m_dma_req_frame_hdl
;
8229 ddi_acc_handle_t acc_hdl
= mpt
->m_acc_req_frame_hdl
;
8230 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
8231 uint16_t SMID
, io_flags
= 0;
8232 uint32_t request_desc_low
, request_desc_high
;
8234 NDBG1(("mptsas_start_cmd0: cmd=0x%p", (void *)cmd
));
8237 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8238 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8240 SMID
= cmd
->cmd_slot
;
8243 * It is possible for back to back device reset to
8244 * happen before the reset delay has expired. That's
8245 * ok, just let the device reset go out on the bus.
8247 if ((cmd
->cmd_pkt_flags
& FLAG_NOINTR
) == 0) {
8248 ASSERT(ptgt
->m_reset_delay
== 0);
8252 * if a non-tagged cmd is submitted to an active tagged target
8253 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8256 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8257 if (((cmd
->cmd_pkt_flags
& FLAG_TAGMASK
) == 0) &&
8258 (ptgt
->m_t_ncmds
> 1) &&
8259 ((cmd
->cmd_flags
& CFLAG_TM_CMD
) == 0) &&
8260 (*(cmd
->cmd_pkt
->pkt_cdbp
) != SCMD_REQUEST_SENSE
)) {
8261 if ((cmd
->cmd_pkt_flags
& FLAG_NOINTR
) == 0) {
8262 NDBG23(("target=%d, untagged cmd, start draining\n",
8265 if (ptgt
->m_reset_delay
== 0) {
8266 mptsas_set_throttle(mpt
, ptgt
, DRAIN_THROTTLE
);
8268 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8270 mutex_enter(&mpt
->m_mutex
);
8271 mptsas_remove_cmd(mpt
, cmd
);
8272 cmd
->cmd_pkt_flags
|= FLAG_HEAD
;
8273 mptsas_waitq_add(mpt
, cmd
);
8274 mutex_exit(&mpt
->m_mutex
);
8275 return (DDI_FAILURE
);
8277 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8278 return (DDI_FAILURE
);
8280 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8283 * Set correct tag bits.
8285 if (cmd
->cmd_pkt_flags
& FLAG_TAGMASK
) {
8286 switch (mptsas_tag_lookup
[((cmd
->cmd_pkt_flags
&
8287 FLAG_TAGMASK
) >> 12)]) {
8288 case MSG_SIMPLE_QTAG
:
8289 control
|= MPI2_SCSIIO_CONTROL_SIMPLEQ
;
8292 control
|= MPI2_SCSIIO_CONTROL_HEADOFQ
;
8294 case MSG_ORDERED_QTAG
:
8295 control
|= MPI2_SCSIIO_CONTROL_ORDEREDQ
;
8298 mptsas_log(mpt
, CE_WARN
, "mpt: Invalid tag type\n");
8302 if (*(cmd
->cmd_pkt
->pkt_cdbp
) != SCMD_REQUEST_SENSE
) {
8303 ptgt
->m_t_throttle
= 1;
8305 control
|= MPI2_SCSIIO_CONTROL_SIMPLEQ
;
8308 if (cmd
->cmd_pkt_flags
& FLAG_TLR
) {
8309 control
|= MPI2_SCSIIO_CONTROL_TLR_ON
;
8312 mem
= mpt
->m_req_frame
+ (mpt
->m_req_frame_size
* SMID
);
8313 io_request
= (pMpi2SCSIIORequest_t
)mem
;
8315 bzero(io_request
, sizeof (Mpi2SCSIIORequest_t
));
8316 ddi_put8(acc_hdl
, &io_request
->SGLOffset0
, offsetof
8317 (MPI2_SCSI_IO_REQUEST
, SGL
) / 4);
8318 mptsas_init_std_hdr(acc_hdl
, io_request
, ptgt
->m_devhdl
, Lun(cmd
), 0,
8319 MPI2_FUNCTION_SCSI_IO_REQUEST
);
8321 (void) ddi_rep_put8(acc_hdl
, (uint8_t *)pkt
->pkt_cdbp
,
8322 io_request
->CDB
.CDB32
, cmd
->cmd_cdblen
, DDI_DEV_AUTOINCR
);
8324 io_flags
= cmd
->cmd_cdblen
;
8325 ddi_put16(acc_hdl
, &io_request
->IoFlags
, io_flags
);
8327 * setup the Scatter/Gather DMA list for this request
8329 if (cmd
->cmd_cookiec
> 0) {
8330 mptsas_sge_setup(mpt
, cmd
, &control
, io_request
, acc_hdl
);
8332 ddi_put32(acc_hdl
, &io_request
->SGL
.MpiSimple
.FlagsLength
,
8333 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT
|
8334 MPI2_SGE_FLAGS_END_OF_BUFFER
|
8335 MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
8336 MPI2_SGE_FLAGS_END_OF_LIST
) << MPI2_SGE_FLAGS_SHIFT
);
8340 * save ARQ information
8342 ddi_put8(acc_hdl
, &io_request
->SenseBufferLength
, cmd
->cmd_rqslen
);
8343 if ((cmd
->cmd_flags
& (CFLAG_SCBEXTERN
| CFLAG_EXTARQBUFVALID
)) ==
8344 (CFLAG_SCBEXTERN
| CFLAG_EXTARQBUFVALID
)) {
8345 ddi_put32(acc_hdl
, &io_request
->SenseBufferLowAddress
,
8346 cmd
->cmd_ext_arqcookie
.dmac_address
);
8348 ddi_put32(acc_hdl
, &io_request
->SenseBufferLowAddress
,
8349 cmd
->cmd_arqcookie
.dmac_address
);
8352 ddi_put32(acc_hdl
, &io_request
->Control
, control
);
8354 NDBG31(("starting message=0x%p, with cmd=0x%p",
8355 (void *)(uintptr_t)mpt
->m_req_frame_dma_addr
, (void *)cmd
));
8357 (void) ddi_dma_sync(dma_hdl
, 0, 0, DDI_DMA_SYNC_FORDEV
);
8360 * Build request descriptor and write it to the request desc post reg.
8362 request_desc_low
= (SMID
<< 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
;
8363 request_desc_high
= ptgt
->m_devhdl
<< 16;
8365 mutex_enter(&mpt
->m_mutex
);
8366 mpt
->m_active
->m_slot
[cmd
->cmd_slot
] = cmd
;
8367 MPTSAS_START_CMD(mpt
, request_desc_low
, request_desc_high
);
8368 mutex_exit(&mpt
->m_mutex
);
8373 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8376 * Temporarily set timebase = 0; needed for
8377 * timeout torture test.
8379 if (mptsas_test_timeouts
) {
8380 ptgt
->m_timebase
= 0;
8383 n
= pkt
->pkt_time
- ptgt
->m_timebase
;
8387 ptgt
->m_timeout
= ptgt
->m_timebase
;
8390 ptgt
->m_timebase
= pkt
->pkt_time
;
8393 ptgt
->m_timeout
= ptgt
->m_timebase
;
8397 * Set back to a number higher than
8398 * mptsas_scsi_watchdog_tick
8399 * so timeouts will happen in mptsas_watchsubr
8401 if (mptsas_test_timeouts
) {
8402 ptgt
->m_timebase
= 60;
8405 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8407 if ((mptsas_check_dma_handle(dma_hdl
) != DDI_SUCCESS
) ||
8408 (mptsas_check_acc_handle(acc_hdl
) != DDI_SUCCESS
)) {
8409 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
8410 return (DDI_FAILURE
);
8412 return (DDI_SUCCESS
);
8416 mptsas_start_cmd(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8418 struct scsi_pkt
*pkt
= CMD2PKT(cmd
);
8419 uint32_t control
= 0;
8422 pMpi2SCSIIORequest_t io_request
;
8423 ddi_dma_handle_t dma_hdl
= mpt
->m_dma_req_frame_hdl
;
8424 ddi_acc_handle_t acc_hdl
= mpt
->m_acc_req_frame_hdl
;
8425 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
8426 uint16_t SMID
, io_flags
= 0;
8427 uint32_t request_desc_low
, request_desc_high
;
8429 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd
));
8432 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8433 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8435 SMID
= cmd
->cmd_slot
;
8438 * It is possible for back to back device reset to
8439 * happen before the reset delay has expired. That's
8440 * ok, just let the device reset go out on the bus.
8442 if ((cmd
->cmd_pkt_flags
& FLAG_NOINTR
) == 0) {
8443 ASSERT(ptgt
->m_reset_delay
== 0);
8447 * if a non-tagged cmd is submitted to an active tagged target
8448 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8451 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8452 if (((cmd
->cmd_pkt_flags
& FLAG_TAGMASK
) == 0) &&
8453 (ptgt
->m_t_ncmds
> 1) &&
8454 ((cmd
->cmd_flags
& CFLAG_TM_CMD
) == 0) &&
8455 (*(cmd
->cmd_pkt
->pkt_cdbp
) != SCMD_REQUEST_SENSE
)) {
8456 if ((cmd
->cmd_pkt_flags
& FLAG_NOINTR
) == 0) {
8457 NDBG23(("target=%d, untagged cmd, start draining\n",
8460 if (ptgt
->m_reset_delay
== 0) {
8461 mptsas_set_throttle(mpt
, ptgt
, DRAIN_THROTTLE
);
8463 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8465 mptsas_remove_cmd(mpt
, cmd
);
8466 cmd
->cmd_pkt_flags
|= FLAG_HEAD
;
8467 mptsas_waitq_add(mpt
, cmd
);
8468 return (DDI_FAILURE
);
8470 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8471 return (DDI_FAILURE
);
8473 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8476 * Set correct tag bits.
8478 if (cmd
->cmd_pkt_flags
& FLAG_TAGMASK
) {
8479 switch (mptsas_tag_lookup
[((cmd
->cmd_pkt_flags
&
8480 FLAG_TAGMASK
) >> 12)]) {
8481 case MSG_SIMPLE_QTAG
:
8482 control
|= MPI2_SCSIIO_CONTROL_SIMPLEQ
;
8485 control
|= MPI2_SCSIIO_CONTROL_HEADOFQ
;
8487 case MSG_ORDERED_QTAG
:
8488 control
|= MPI2_SCSIIO_CONTROL_ORDEREDQ
;
8491 mptsas_log(mpt
, CE_WARN
, "mpt: Invalid tag type\n");
8495 if (*(cmd
->cmd_pkt
->pkt_cdbp
) != SCMD_REQUEST_SENSE
) {
8496 ptgt
->m_t_throttle
= 1;
8498 control
|= MPI2_SCSIIO_CONTROL_SIMPLEQ
;
8501 if (cmd
->cmd_pkt_flags
& FLAG_TLR
) {
8502 control
|= MPI2_SCSIIO_CONTROL_TLR_ON
;
8505 mem
= mpt
->m_req_frame
+ (mpt
->m_req_frame_size
* SMID
);
8506 io_request
= (pMpi2SCSIIORequest_t
)mem
;
8508 bzero(io_request
, sizeof (Mpi2SCSIIORequest_t
));
8509 ddi_put8(acc_hdl
, &io_request
->SGLOffset0
, offsetof
8510 (MPI2_SCSI_IO_REQUEST
, SGL
) / 4);
8511 mptsas_init_std_hdr(acc_hdl
, io_request
, ptgt
->m_devhdl
, Lun(cmd
), 0,
8512 MPI2_FUNCTION_SCSI_IO_REQUEST
);
8514 (void) ddi_rep_put8(acc_hdl
, (uint8_t *)pkt
->pkt_cdbp
,
8515 io_request
->CDB
.CDB32
, cmd
->cmd_cdblen
, DDI_DEV_AUTOINCR
);
8517 io_flags
= cmd
->cmd_cdblen
;
8518 ddi_put16(acc_hdl
, &io_request
->IoFlags
, io_flags
);
8520 * setup the Scatter/Gather DMA list for this request
8522 if (cmd
->cmd_cookiec
> 0) {
8523 mptsas_sge_setup(mpt
, cmd
, &control
, io_request
, acc_hdl
);
8525 ddi_put32(acc_hdl
, &io_request
->SGL
.MpiSimple
.FlagsLength
,
8526 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT
|
8527 MPI2_SGE_FLAGS_END_OF_BUFFER
|
8528 MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
8529 MPI2_SGE_FLAGS_END_OF_LIST
) << MPI2_SGE_FLAGS_SHIFT
);
8533 * save ARQ information
8535 ddi_put8(acc_hdl
, &io_request
->SenseBufferLength
, cmd
->cmd_rqslen
);
8536 if ((cmd
->cmd_flags
& (CFLAG_SCBEXTERN
| CFLAG_EXTARQBUFVALID
)) ==
8537 (CFLAG_SCBEXTERN
| CFLAG_EXTARQBUFVALID
)) {
8538 ddi_put32(acc_hdl
, &io_request
->SenseBufferLowAddress
,
8539 cmd
->cmd_ext_arqcookie
.dmac_address
);
8541 ddi_put32(acc_hdl
, &io_request
->SenseBufferLowAddress
,
8542 cmd
->cmd_arqcookie
.dmac_address
);
8545 ddi_put32(acc_hdl
, &io_request
->Control
, control
);
8547 NDBG31(("starting message=0x%p, with cmd=0x%p",
8548 (void *)(uintptr_t)mpt
->m_req_frame_dma_addr
, (void *)cmd
));
8550 (void) ddi_dma_sync(dma_hdl
, 0, 0, DDI_DMA_SYNC_FORDEV
);
8553 * Build request descriptor and write it to the request desc post reg.
8555 request_desc_low
= (SMID
<< 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
;
8556 request_desc_high
= ptgt
->m_devhdl
<< 16;
8558 mpt
->m_active
->m_slot
[cmd
->cmd_slot
] = cmd
;
8559 MPTSAS_START_CMD(mpt
, request_desc_low
, request_desc_high
);
8564 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
8567 * Temporarily set timebase = 0; needed for
8568 * timeout torture test.
8570 if (mptsas_test_timeouts
) {
8571 ptgt
->m_timebase
= 0;
8574 n
= pkt
->pkt_time
- ptgt
->m_timebase
;
8578 ptgt
->m_timeout
= ptgt
->m_timebase
;
8581 ptgt
->m_timebase
= pkt
->pkt_time
;
8584 ptgt
->m_timeout
= ptgt
->m_timebase
;
8588 * Set back to a number higher than
8589 * mptsas_scsi_watchdog_tick
8590 * so timeouts will happen in mptsas_watchsubr
8592 if (mptsas_test_timeouts
) {
8593 ptgt
->m_timebase
= 60;
8596 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
8598 if ((mptsas_check_dma_handle(dma_hdl
) != DDI_SUCCESS
) ||
8599 (mptsas_check_acc_handle(acc_hdl
) != DDI_SUCCESS
)) {
8600 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
8601 return (DDI_FAILURE
);
8603 return (DDI_SUCCESS
);
8607 * Select a helper thread to handle current doneq
8610 mptsas_deliver_doneq_thread(mptsas_t
*mpt
)
8613 uint32_t min
= 0xffffffff;
8614 mptsas_doneq_thread_list_t
*item
;
8616 for (i
= 0; i
< mpt
->m_doneq_thread_n
; i
++) {
8617 item
= &mpt
->m_doneq_thread_id
[i
];
8619 * If the completed command on help thread[i] less than
8620 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8621 * pick a thread which has least completed command.
8624 mutex_enter(&item
->mutex
);
8625 if (item
->len
< mpt
->m_doneq_thread_threshold
) {
8627 mutex_exit(&item
->mutex
);
8630 if (item
->len
< min
) {
8634 mutex_exit(&item
->mutex
);
8636 mutex_enter(&mpt
->m_doneq_thread_id
[t
].mutex
);
8637 mptsas_doneq_mv(mpt
, t
);
8638 cv_signal(&mpt
->m_doneq_thread_id
[t
].cv
);
8639 mutex_exit(&mpt
->m_doneq_thread_id
[t
].mutex
);
8643 * move the current global doneq to the doneq of thread[t]
8646 mptsas_doneq_mv(mptsas_t
*mpt
, uint64_t t
)
8649 mptsas_doneq_thread_list_t
*item
= &mpt
->m_doneq_thread_id
[t
];
8651 ASSERT(mutex_owned(&item
->mutex
));
8652 mutex_enter(&mpt
->m_intr_mutex
);
8653 while ((cmd
= mpt
->m_doneq
) != NULL
) {
8654 if ((mpt
->m_doneq
= cmd
->cmd_linkp
) == NULL
) {
8655 mpt
->m_donetail
= &mpt
->m_doneq
;
8657 cmd
->cmd_linkp
= NULL
;
8658 *item
->donetail
= cmd
;
8659 item
->donetail
= &cmd
->cmd_linkp
;
8663 mutex_exit(&mpt
->m_intr_mutex
);
8667 mptsas_fma_check(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8669 struct scsi_pkt
*pkt
= CMD2PKT(cmd
);
8671 /* Check all acc and dma handles */
8672 if ((mptsas_check_acc_handle(mpt
->m_datap
) !=
8674 (mptsas_check_acc_handle(mpt
->m_acc_req_frame_hdl
) !=
8676 (mptsas_check_acc_handle(mpt
->m_acc_reply_frame_hdl
) !=
8678 (mptsas_check_acc_handle(mpt
->m_acc_free_queue_hdl
) !=
8680 (mptsas_check_acc_handle(mpt
->m_acc_post_queue_hdl
) !=
8682 (mptsas_check_acc_handle(mpt
->m_hshk_acc_hdl
) !=
8684 (mptsas_check_acc_handle(mpt
->m_config_handle
) !=
8686 ddi_fm_service_impact(mpt
->m_dip
,
8687 DDI_SERVICE_UNAFFECTED
);
8688 ddi_fm_acc_err_clear(mpt
->m_config_handle
,
8690 pkt
->pkt_reason
= CMD_TRAN_ERR
;
8691 pkt
->pkt_statistics
= 0;
8693 if ((mptsas_check_dma_handle(mpt
->m_dma_req_frame_hdl
) !=
8695 (mptsas_check_dma_handle(mpt
->m_dma_reply_frame_hdl
) !=
8697 (mptsas_check_dma_handle(mpt
->m_dma_free_queue_hdl
) !=
8699 (mptsas_check_dma_handle(mpt
->m_dma_post_queue_hdl
) !=
8701 (mptsas_check_dma_handle(mpt
->m_hshk_dma_hdl
) !=
8703 ddi_fm_service_impact(mpt
->m_dip
,
8704 DDI_SERVICE_UNAFFECTED
);
8705 pkt
->pkt_reason
= CMD_TRAN_ERR
;
8706 pkt
->pkt_statistics
= 0;
8708 if (cmd
->cmd_dmahandle
&&
8709 (mptsas_check_dma_handle(cmd
->cmd_dmahandle
) != DDI_SUCCESS
)) {
8710 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
8711 pkt
->pkt_reason
= CMD_TRAN_ERR
;
8712 pkt
->pkt_statistics
= 0;
8714 if ((cmd
->cmd_extra_frames
&&
8715 ((mptsas_check_dma_handle(cmd
->cmd_extra_frames
->m_dma_hdl
) !=
8717 (mptsas_check_acc_handle(cmd
->cmd_extra_frames
->m_acc_hdl
) !=
8719 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
8720 pkt
->pkt_reason
= CMD_TRAN_ERR
;
8721 pkt
->pkt_statistics
= 0;
8723 if (cmd
->cmd_arqhandle
&&
8724 (mptsas_check_dma_handle(cmd
->cmd_arqhandle
) != DDI_SUCCESS
)) {
8725 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
8726 pkt
->pkt_reason
= CMD_TRAN_ERR
;
8727 pkt
->pkt_statistics
= 0;
8729 if (cmd
->cmd_ext_arqhandle
&&
8730 (mptsas_check_dma_handle(cmd
->cmd_ext_arqhandle
) != DDI_SUCCESS
)) {
8731 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
8732 pkt
->pkt_reason
= CMD_TRAN_ERR
;
8733 pkt
->pkt_statistics
= 0;
8738 * mptsas_doneq_add0 is similar to mptsas_doneq_add except that it is called
8739 * where m_intr_mutex has already been held.
8742 mptsas_doneq_add0(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8744 struct scsi_pkt
*pkt
= CMD2PKT(cmd
);
8746 NDBG31(("mptsas_doneq_add0: cmd=0x%p", (void *)cmd
));
8748 ASSERT((cmd
->cmd_flags
& CFLAG_COMPLETED
) == 0);
8749 cmd
->cmd_linkp
= NULL
;
8750 cmd
->cmd_flags
|= CFLAG_FINISHED
;
8751 cmd
->cmd_flags
&= ~CFLAG_IN_TRANSPORT
;
8754 * only add scsi pkts that have completion routines to
8755 * the doneq. no intr cmds do not have callbacks.
8757 if (pkt
&& (pkt
->pkt_comp
)) {
8758 *mpt
->m_donetail
= cmd
;
8759 mpt
->m_donetail
= &cmd
->cmd_linkp
;
8765 * These routines manipulate the queue of commands that
8766 * are waiting for their completion routines to be called.
8767 * The queue is usually in FIFO order but on an MP system
8768 * it's possible for the completion routines to get out
8769 * of order. If that's a problem you need to add a global
8770 * mutex around the code that calls the completion routine
8771 * in the interrupt handler.
8774 mptsas_doneq_add(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8776 ASSERT(mutex_owned(&mpt
->m_mutex
));
8778 mptsas_fma_check(mpt
, cmd
);
8780 mutex_enter(&mpt
->m_intr_mutex
);
8781 mptsas_doneq_add0(mpt
, cmd
);
8782 mutex_exit(&mpt
->m_intr_mutex
);
8785 static mptsas_cmd_t
*
8786 mptsas_doneq_thread_rm(mptsas_t
*mpt
, uint64_t t
)
8789 mptsas_doneq_thread_list_t
*item
= &mpt
->m_doneq_thread_id
[t
];
8791 /* pop one off the done queue */
8792 if ((cmd
= item
->doneq
) != NULL
) {
8793 /* if the queue is now empty fix the tail pointer */
8794 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd
));
8795 if ((item
->doneq
= cmd
->cmd_linkp
) == NULL
) {
8796 item
->donetail
= &item
->doneq
;
8798 cmd
->cmd_linkp
= NULL
;
8805 mptsas_doneq_empty(mptsas_t
*mpt
)
8807 mutex_enter(&mpt
->m_intr_mutex
);
8808 if (mpt
->m_doneq
&& !mpt
->m_in_callback
) {
8809 mptsas_cmd_t
*cmd
, *next
;
8810 struct scsi_pkt
*pkt
;
8812 mpt
->m_in_callback
= 1;
8814 mpt
->m_doneq
= NULL
;
8815 mpt
->m_donetail
= &mpt
->m_doneq
;
8816 mpt
->m_doneq_len
= 0;
8818 mutex_exit(&mpt
->m_intr_mutex
);
8821 * ONLY in ISR, is it called without m_mutex held, otherwise,
8822 * it is always called with m_mutex held.
8824 if ((curthread
->t_flag
& T_INTR_THREAD
) == 0)
8825 mutex_exit(&mpt
->m_mutex
);
8827 * run the completion routines of all the
8828 * completed commands
8830 while (cmd
!= NULL
) {
8831 next
= cmd
->cmd_linkp
;
8832 cmd
->cmd_linkp
= NULL
;
8833 /* run this command's completion routine */
8834 cmd
->cmd_flags
|= CFLAG_COMPLETED
;
8836 mptsas_pkt_comp(pkt
, cmd
);
8839 if ((curthread
->t_flag
& T_INTR_THREAD
) == 0)
8840 mutex_enter(&mpt
->m_mutex
);
8841 mpt
->m_in_callback
= 0;
8844 mutex_exit(&mpt
->m_intr_mutex
);
8848 * These routines manipulate the target's queue of pending requests
8851 mptsas_waitq_add(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8853 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd
));
8854 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
8855 cmd
->cmd_queued
= TRUE
;
8858 if (cmd
->cmd_pkt_flags
& FLAG_HEAD
) {
8859 mutex_enter(&mpt
->m_intr_mutex
);
8860 if ((cmd
->cmd_linkp
= mpt
->m_waitq
) == NULL
) {
8861 mpt
->m_waitqtail
= &cmd
->cmd_linkp
;
8864 mutex_exit(&mpt
->m_intr_mutex
);
8866 cmd
->cmd_linkp
= NULL
;
8867 *(mpt
->m_waitqtail
) = cmd
;
8868 mpt
->m_waitqtail
= &cmd
->cmd_linkp
;
8872 static mptsas_cmd_t
*
8873 mptsas_waitq_rm(mptsas_t
*mpt
)
8876 mptsas_target_t
*ptgt
;
8877 NDBG7(("mptsas_waitq_rm"));
8879 mutex_enter(&mpt
->m_intr_mutex
);
8880 MPTSAS_WAITQ_RM(mpt
, cmd
);
8881 mutex_exit(&mpt
->m_intr_mutex
);
8883 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd
));
8885 ptgt
= cmd
->cmd_tgt_addr
;
8888 ASSERT(ptgt
->m_t_nwait
>= 0);
8895 * remove specified cmd from the middle of the wait queue.
8898 mptsas_waitq_delete(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
8900 mptsas_cmd_t
*prevp
= mpt
->m_waitq
;
8901 mptsas_target_t
*ptgt
= cmd
->cmd_tgt_addr
;
8903 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8904 (void *)mpt
, (void *)cmd
));
8907 ASSERT(ptgt
->m_t_nwait
>= 0);
8911 mutex_enter(&mpt
->m_intr_mutex
);
8912 if ((mpt
->m_waitq
= cmd
->cmd_linkp
) == NULL
)
8913 mpt
->m_waitqtail
= &mpt
->m_waitq
;
8914 mutex_exit(&mpt
->m_intr_mutex
);
8916 cmd
->cmd_linkp
= NULL
;
8917 cmd
->cmd_queued
= FALSE
;
8918 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8919 (void *)mpt
, (void *)cmd
));
8923 while (prevp
!= NULL
) {
8924 if (prevp
->cmd_linkp
== cmd
) {
8925 if ((prevp
->cmd_linkp
= cmd
->cmd_linkp
) == NULL
)
8926 mpt
->m_waitqtail
= &prevp
->cmd_linkp
;
8928 cmd
->cmd_linkp
= NULL
;
8929 cmd
->cmd_queued
= FALSE
;
8930 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8931 (void *)mpt
, (void *)cmd
));
8934 prevp
= prevp
->cmd_linkp
;
8936 cmn_err(CE_PANIC
, "mpt: mptsas_waitq_delete: queue botch");
8940 * device and bus reset handling
8943 * - RESET_ALL: reset the controller
8944 * - RESET_TARGET: reset the target specified in scsi_address
8947 mptsas_scsi_reset(struct scsi_address
*ap
, int level
)
8949 mptsas_t
*mpt
= ADDR2MPT(ap
);
8951 mptsas_tgt_private_t
*tgt_private
;
8952 mptsas_target_t
*ptgt
= NULL
;
8954 tgt_private
= (mptsas_tgt_private_t
*)ap
->a_hba_tran
->tran_tgt_private
;
8955 ptgt
= tgt_private
->t_private
;
8959 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt
->m_devhdl
,
8962 mutex_enter(&mpt
->m_mutex
);
8964 * if we are not in panic set up a reset delay for this target
8966 if (!ddi_in_panic()) {
8967 mptsas_setup_bus_reset_delay(mpt
);
8969 drv_usecwait(mpt
->m_scsi_reset_delay
* 1000);
8971 rval
= mptsas_do_scsi_reset(mpt
, ptgt
->m_devhdl
);
8972 mutex_exit(&mpt
->m_mutex
);
8975 * The transport layer expect to only see TRUE and
8976 * FALSE. Therefore, we will adjust the return value
8977 * if mptsas_do_scsi_reset returns FAILED.
8985 mptsas_do_scsi_reset(mptsas_t
*mpt
, uint16_t devhdl
)
8988 uint8_t config
, disk
;
8989 mptsas_slots_t
*slots
= mpt
->m_active
;
8991 ASSERT(mutex_owned(&mpt
->m_mutex
));
8993 if (mptsas_debug_resets
) {
8994 mptsas_log(mpt
, CE_WARN
, "mptsas_do_scsi_reset: target=%d",
8999 * Issue a Target Reset message to the target specified but not to a
9000 * disk making up a raid volume. Just look through the RAID config
9001 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9002 * list, then don't reset this target.
9004 for (config
= 0; config
< slots
->m_num_raid_configs
; config
++) {
9005 for (disk
= 0; disk
< MPTSAS_MAX_DISKS_IN_CONFIG
; disk
++) {
9006 if (devhdl
== slots
->m_raidconfig
[config
].
9007 m_physdisk_devhdl
[disk
]) {
9013 rval
= mptsas_ioc_task_management(mpt
,
9014 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
, devhdl
, 0, NULL
, 0, 0);
9016 mptsas_doneq_empty(mpt
);
9021 mptsas_scsi_reset_notify(struct scsi_address
*ap
, int flag
,
9022 void (*callback
)(caddr_t
), caddr_t arg
)
9024 mptsas_t
*mpt
= ADDR2MPT(ap
);
9026 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap
->a_target
));
9028 return (scsi_hba_reset_notify_setup(ap
, flag
, callback
, arg
,
9029 &mpt
->m_mutex
, &mpt
->m_reset_notify_listf
));
9033 mptsas_get_name(struct scsi_device
*sd
, char *name
, int len
)
9035 dev_info_t
*lun_dip
= NULL
;
9038 ASSERT(name
!= NULL
);
9039 lun_dip
= sd
->sd_dev
;
9040 ASSERT(lun_dip
!= NULL
);
9042 if (mptsas_name_child(lun_dip
, name
, len
) == DDI_SUCCESS
) {
9050 mptsas_get_bus_addr(struct scsi_device
*sd
, char *name
, int len
)
9052 return (mptsas_get_name(sd
, name
, len
));
9056 mptsas_set_throttle(mptsas_t
*mpt
, mptsas_target_t
*ptgt
, int what
)
9059 NDBG25(("mptsas_set_throttle: throttle=%x", what
));
9062 * if the bus is draining/quiesced, no changes to the throttles
9063 * are allowed. Not allowing change of throttles during draining
9064 * limits error recovery but will reduce draining time
9066 * all throttles should have been set to HOLD_THROTTLE
9068 if (mpt
->m_softstate
& (MPTSAS_SS_QUIESCED
| MPTSAS_SS_DRAINING
)) {
9072 if (what
== HOLD_THROTTLE
) {
9073 ptgt
->m_t_throttle
= HOLD_THROTTLE
;
9074 } else if (ptgt
->m_reset_delay
== 0) {
9075 ptgt
->m_t_throttle
= what
;
9080 * Clean up from a device reset.
9081 * For the case of target reset, this function clears the waitq of all
9082 * commands for a particular target. For the case of abort task set, this
9083 * function clears the waitq of all commonds for a particular target/lun.
9086 mptsas_flush_target(mptsas_t
*mpt
, ushort_t target
, int lun
, uint8_t tasktype
)
9088 mptsas_slots_t
*slots
= mpt
->m_active
;
9089 mptsas_cmd_t
*cmd
, *next_cmd
;
9094 NDBG25(("mptsas_flush_target: target=%d lun=%d", target
, lun
));
9097 * Make sure the I/O Controller has flushed all cmds
9098 * that are associated with this target for a target reset
9099 * and target/lun for abort task set.
9100 * Account for TM requests, which use the last SMID.
9102 mutex_enter(&mpt
->m_intr_mutex
);
9103 for (slot
= 0; slot
<= mpt
->m_active
->m_n_slots
; slot
++) {
9104 if ((cmd
= slots
->m_slot
[slot
]) == NULL
) {
9108 stat
= STAT_DEV_RESET
;
9110 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
9111 if (Tgt(cmd
) == target
) {
9112 NDBG25(("mptsas_flush_target discovered non-"
9113 "NULL cmd in slot %d, tasktype 0x%x", slot
,
9115 mptsas_dump_cmd(mpt
, cmd
);
9116 mptsas_remove_cmd0(mpt
, cmd
);
9117 mptsas_set_pkt_reason(mpt
, cmd
, reason
, stat
);
9118 mptsas_doneq_add0(mpt
, cmd
);
9121 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET
:
9122 reason
= CMD_ABORTED
;
9123 stat
= STAT_ABORTED
;
9125 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET
:
9126 if ((Tgt(cmd
) == target
) && (Lun(cmd
) == lun
)) {
9128 NDBG25(("mptsas_flush_target discovered non-"
9129 "NULL cmd in slot %d, tasktype 0x%x", slot
,
9131 mptsas_dump_cmd(mpt
, cmd
);
9132 mptsas_remove_cmd0(mpt
, cmd
);
9133 mptsas_set_pkt_reason(mpt
, cmd
, reason
,
9135 mptsas_doneq_add0(mpt
, cmd
);
9142 mutex_exit(&mpt
->m_intr_mutex
);
9145 * Flush the waitq of this target's cmds
9150 stat
= STAT_DEV_RESET
;
9153 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
9154 while (cmd
!= NULL
) {
9155 next_cmd
= cmd
->cmd_linkp
;
9156 if (Tgt(cmd
) == target
) {
9157 mptsas_waitq_delete(mpt
, cmd
);
9158 mptsas_set_pkt_reason(mpt
, cmd
,
9160 mptsas_doneq_add(mpt
, cmd
);
9165 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET
:
9166 reason
= CMD_ABORTED
;
9167 stat
= STAT_ABORTED
;
9169 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET
:
9170 while (cmd
!= NULL
) {
9171 next_cmd
= cmd
->cmd_linkp
;
9172 if ((Tgt(cmd
) == target
) && (Lun(cmd
) == lun
)) {
9173 mptsas_waitq_delete(mpt
, cmd
);
9174 mptsas_set_pkt_reason(mpt
, cmd
,
9176 mptsas_doneq_add(mpt
, cmd
);
9182 mptsas_log(mpt
, CE_WARN
, "Unknown task management type %d.",
9189 * Clean up hba state, abort all outstanding command and commands in waitq
9190 * reset timeout of all targets.
9193 mptsas_flush_hba(mptsas_t
*mpt
)
9195 mptsas_slots_t
*slots
= mpt
->m_active
;
9199 NDBG25(("mptsas_flush_hba"));
9202 * The I/O Controller should have already sent back
9203 * all commands via the scsi I/O reply frame. Make
9204 * sure all commands have been flushed.
9205 * Account for TM request, which use the last SMID.
9207 mutex_enter(&mpt
->m_intr_mutex
);
9208 for (slot
= 0; slot
<= mpt
->m_active
->m_n_slots
; slot
++) {
9209 if ((cmd
= slots
->m_slot
[slot
]) == NULL
) {
9213 if (cmd
->cmd_flags
& CFLAG_CMDIOC
) {
9215 * Need to make sure to tell everyone that might be
9216 * waiting on this command that it's going to fail. If
9217 * we get here, this command will never timeout because
9218 * the active command table is going to be re-allocated,
9219 * so there will be nothing to check against a time out.
9220 * Instead, mark the command as failed due to reset.
9222 mptsas_set_pkt_reason(mpt
, cmd
, CMD_RESET
,
9224 if ((cmd
->cmd_flags
& CFLAG_PASSTHRU
) ||
9225 (cmd
->cmd_flags
& CFLAG_CONFIG
) ||
9226 (cmd
->cmd_flags
& CFLAG_FW_DIAG
)) {
9227 cmd
->cmd_flags
|= CFLAG_FINISHED
;
9228 cv_broadcast(&mpt
->m_passthru_cv
);
9229 cv_broadcast(&mpt
->m_config_cv
);
9230 cv_broadcast(&mpt
->m_fw_diag_cv
);
9235 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9237 mptsas_dump_cmd(mpt
, cmd
);
9239 mptsas_remove_cmd0(mpt
, cmd
);
9240 mptsas_set_pkt_reason(mpt
, cmd
, CMD_RESET
, STAT_BUS_RESET
);
9241 mptsas_doneq_add0(mpt
, cmd
);
9243 mutex_exit(&mpt
->m_intr_mutex
);
9248 while ((cmd
= mptsas_waitq_rm(mpt
)) != NULL
) {
9249 mptsas_set_pkt_reason(mpt
, cmd
, CMD_RESET
, STAT_BUS_RESET
);
9250 if ((cmd
->cmd_flags
& CFLAG_PASSTHRU
) ||
9251 (cmd
->cmd_flags
& CFLAG_CONFIG
) ||
9252 (cmd
->cmd_flags
& CFLAG_FW_DIAG
)) {
9253 cmd
->cmd_flags
|= CFLAG_FINISHED
;
9254 cv_broadcast(&mpt
->m_passthru_cv
);
9255 cv_broadcast(&mpt
->m_config_cv
);
9256 cv_broadcast(&mpt
->m_fw_diag_cv
);
9258 mptsas_doneq_add(mpt
, cmd
);
9264 * set pkt_reason and OR in pkt_statistics flag
9267 mptsas_set_pkt_reason(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
, uchar_t reason
,
9271 _NOTE(ARGUNUSED(mpt
))
9274 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9275 (void *)cmd
, reason
, stat
));
9278 if (cmd
->cmd_pkt
->pkt_reason
== CMD_CMPLT
) {
9279 cmd
->cmd_pkt
->pkt_reason
= reason
;
9281 cmd
->cmd_pkt
->pkt_statistics
|= stat
;
9286 mptsas_start_watch_reset_delay()
9288 NDBG22(("mptsas_start_watch_reset_delay"));
9290 mutex_enter(&mptsas_global_mutex
);
9291 if (mptsas_reset_watch
== NULL
&& mptsas_timeouts_enabled
) {
9292 mptsas_reset_watch
= timeout(mptsas_watch_reset_delay
, NULL
,
9293 drv_usectohz((clock_t)
9294 MPTSAS_WATCH_RESET_DELAY_TICK
* 1000));
9295 ASSERT(mptsas_reset_watch
!= NULL
);
9297 mutex_exit(&mptsas_global_mutex
);
9301 mptsas_setup_bus_reset_delay(mptsas_t
*mpt
)
9303 mptsas_target_t
*ptgt
= NULL
;
9305 NDBG22(("mptsas_setup_bus_reset_delay"));
9306 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
9308 while (ptgt
!= NULL
) {
9309 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
9310 mptsas_set_throttle(mpt
, ptgt
, HOLD_THROTTLE
);
9311 ptgt
->m_reset_delay
= mpt
->m_scsi_reset_delay
;
9312 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
9314 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
9315 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
9318 mptsas_start_watch_reset_delay();
9322 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9323 * mpt instance for active reset delays
9326 mptsas_watch_reset_delay(void *arg
)
9329 _NOTE(ARGUNUSED(arg
))
9335 NDBG22(("mptsas_watch_reset_delay"));
9337 mutex_enter(&mptsas_global_mutex
);
9338 mptsas_reset_watch
= 0;
9339 mutex_exit(&mptsas_global_mutex
);
9340 rw_enter(&mptsas_global_rwlock
, RW_READER
);
9341 for (mpt
= mptsas_head
; mpt
!= NULL
; mpt
= mpt
->m_next
) {
9342 if (mpt
->m_tran
== 0) {
9345 mutex_enter(&mpt
->m_mutex
);
9346 not_done
+= mptsas_watch_reset_delay_subr(mpt
);
9347 mutex_exit(&mpt
->m_mutex
);
9349 rw_exit(&mptsas_global_rwlock
);
9352 mptsas_start_watch_reset_delay();
9357 mptsas_watch_reset_delay_subr(mptsas_t
*mpt
)
9361 mptsas_target_t
*ptgt
= NULL
;
9363 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt
));
9365 ASSERT(mutex_owned(&mpt
->m_mutex
));
9367 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
9369 while (ptgt
!= NULL
) {
9370 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
9371 if (ptgt
->m_reset_delay
!= 0) {
9372 ptgt
->m_reset_delay
-=
9373 MPTSAS_WATCH_RESET_DELAY_TICK
;
9374 if (ptgt
->m_reset_delay
<= 0) {
9375 ptgt
->m_reset_delay
= 0;
9376 mptsas_set_throttle(mpt
, ptgt
,
9383 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
9385 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
9386 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
9390 mptsas_restart_hba(mpt
);
9397 mptsas_test_reset(mptsas_t
*mpt
, int target
)
9399 mptsas_target_t
*ptgt
= NULL
;
9401 if (mptsas_rtest
== target
) {
9402 if (mptsas_do_scsi_reset(mpt
, target
) == TRUE
) {
9405 if (mptsas_rtest
== -1) {
9406 NDBG22(("mptsas_test_reset success"));
9416 * - if pkt is not NULL, abort just that command
9417 * - if pkt is NULL, abort all outstanding commands for target
9420 mptsas_scsi_abort(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
9422 mptsas_t
*mpt
= ADDR2MPT(ap
);
9424 mptsas_tgt_private_t
*tgt_private
;
9427 tgt_private
= (mptsas_tgt_private_t
*)ap
->a_hba_tran
->
9429 ASSERT(tgt_private
!= NULL
);
9430 target
= tgt_private
->t_private
->m_devhdl
;
9431 lun
= tgt_private
->t_lun
;
9433 NDBG23(("mptsas_scsi_abort: target=%d.%d", target
, lun
));
9435 mutex_enter(&mpt
->m_mutex
);
9436 rval
= mptsas_do_scsi_abort(mpt
, target
, lun
, pkt
);
9437 mutex_exit(&mpt
->m_mutex
);
9442 mptsas_do_scsi_abort(mptsas_t
*mpt
, int target
, int lun
, struct scsi_pkt
*pkt
)
9444 mptsas_cmd_t
*sp
= NULL
;
9445 mptsas_slots_t
*slots
= mpt
->m_active
;
9448 ASSERT(mutex_owned(&mpt
->m_mutex
));
9451 * Abort the command pkt on the target/lun in ap. If pkt is
9452 * NULL, abort all outstanding commands on that target/lun.
9453 * If you can abort them, return 1, else return 0.
9454 * Each packet that's aborted should be sent back to the target
9455 * driver through the callback routine, with pkt_reason set to
9458 * abort cmd pkt on HBA hardware; clean out of outstanding
9459 * command lists, etc.
9462 /* abort the specified packet */
9465 if (sp
->cmd_queued
) {
9466 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9468 mptsas_waitq_delete(mpt
, sp
);
9469 mptsas_set_pkt_reason(mpt
, sp
, CMD_ABORTED
,
9471 mptsas_doneq_add(mpt
, sp
);
9477 * Have mpt firmware abort this command
9479 mutex_enter(&mpt
->m_intr_mutex
);
9480 if (slots
->m_slot
[sp
->cmd_slot
] != NULL
) {
9481 mutex_exit(&mpt
->m_intr_mutex
);
9482 rval
= mptsas_ioc_task_management(mpt
,
9483 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK
, target
,
9487 * The transport layer expects only TRUE and FALSE.
9488 * Therefore, if mptsas_ioc_task_management returns
9489 * FAILED we will return FALSE.
9495 mutex_exit(&mpt
->m_intr_mutex
);
9499 * If pkt is NULL then abort task set
9501 rval
= mptsas_ioc_task_management(mpt
,
9502 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET
, target
, lun
, NULL
, 0, 0);
9505 * The transport layer expects only TRUE and FALSE.
9506 * Therefore, if mptsas_ioc_task_management returns
9507 * FAILED we will return FALSE.
9513 if (rval
&& mptsas_test_stop
) {
9514 debug_enter("mptsas_do_scsi_abort");
9519 mptsas_doneq_empty(mpt
);
9524 * capability handling:
9525 * (*tran_getcap). Get the capability named, and return its value.
9528 mptsas_scsi_getcap(struct scsi_address
*ap
, char *cap
, int tgtonly
)
9530 mptsas_t
*mpt
= ADDR2MPT(ap
);
9534 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9535 ap
->a_target
, cap
, tgtonly
));
9537 mutex_enter(&mpt
->m_mutex
);
9539 if ((mptsas_scsi_capchk(cap
, tgtonly
, &ckey
)) != TRUE
) {
9540 mutex_exit(&mpt
->m_mutex
);
9545 case SCSI_CAP_DMA_MAX
:
9546 rval
= (int)mpt
->m_msg_dma_attr
.dma_attr_maxxfer
;
9551 case SCSI_CAP_MSG_OUT
:
9552 case SCSI_CAP_PARITY
:
9553 case SCSI_CAP_UNTAGGED_QING
:
9556 case SCSI_CAP_TAGGED_QING
:
9559 case SCSI_CAP_RESET_NOTIFICATION
:
9562 case SCSI_CAP_LINKED_CMDS
:
9565 case SCSI_CAP_QFULL_RETRIES
:
9566 rval
= ((mptsas_tgt_private_t
*)(ap
->a_hba_tran
->
9567 tran_tgt_private
))->t_private
->m_qfull_retries
;
9569 case SCSI_CAP_QFULL_RETRY_INTERVAL
:
9570 rval
= drv_hztousec(((mptsas_tgt_private_t
*)
9571 (ap
->a_hba_tran
->tran_tgt_private
))->
9572 t_private
->m_qfull_retry_interval
) / 1000;
9574 case SCSI_CAP_CDB_LEN
:
9577 case SCSI_CAP_INTERCONNECT_TYPE
:
9578 rval
= INTERCONNECT_SAS
;
9580 case SCSI_CAP_TRAN_LAYER_RETRIES
:
9581 if (mpt
->m_ioc_capabilities
&
9582 MPI2_IOCFACTS_CAPABILITY_TLR
)
9592 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap
, rval
));
9594 mutex_exit(&mpt
->m_mutex
);
9599 * (*tran_setcap). Set the capability named to the value given.
9602 mptsas_scsi_setcap(struct scsi_address
*ap
, char *cap
, int value
, int tgtonly
)
9604 mptsas_t
*mpt
= ADDR2MPT(ap
);
9607 mptsas_target_t
*ptgt
;
9609 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9610 ap
->a_target
, cap
, value
, tgtonly
));
9616 mutex_enter(&mpt
->m_mutex
);
9618 if ((mptsas_scsi_capchk(cap
, tgtonly
, &ckey
)) != TRUE
) {
9619 mutex_exit(&mpt
->m_mutex
);
9624 case SCSI_CAP_DMA_MAX
:
9625 case SCSI_CAP_MSG_OUT
:
9626 case SCSI_CAP_PARITY
:
9627 case SCSI_CAP_INITIATOR_ID
:
9628 case SCSI_CAP_LINKED_CMDS
:
9629 case SCSI_CAP_UNTAGGED_QING
:
9630 case SCSI_CAP_RESET_NOTIFICATION
:
9632 * None of these are settable via
9633 * the capability interface.
9638 * We cannot turn off arq so return false if asked to
9646 case SCSI_CAP_TAGGED_QING
:
9647 ptgt
= ((mptsas_tgt_private_t
*)
9648 (ap
->a_hba_tran
->tran_tgt_private
))->t_private
;
9649 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
9650 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
9651 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
9654 case SCSI_CAP_QFULL_RETRIES
:
9655 ((mptsas_tgt_private_t
*)(ap
->a_hba_tran
->tran_tgt_private
))->
9656 t_private
->m_qfull_retries
= (uchar_t
)value
;
9659 case SCSI_CAP_QFULL_RETRY_INTERVAL
:
9660 ((mptsas_tgt_private_t
*)(ap
->a_hba_tran
->tran_tgt_private
))->
9661 t_private
->m_qfull_retry_interval
=
9662 drv_usectohz(value
* 1000);
9669 mutex_exit(&mpt
->m_mutex
);
9674 * Utility routine for mptsas_ifsetcap/ifgetcap
9678 mptsas_scsi_capchk(char *cap
, int tgtonly
, int *cidxp
)
9680 NDBG24(("mptsas_scsi_capchk: cap=%s", cap
));
9685 *cidxp
= scsi_hba_lookup_capstr(cap
);
9690 mptsas_alloc_active_slots(mptsas_t
*mpt
, int flag
)
9692 mptsas_slots_t
*old_active
= mpt
->m_active
;
9693 mptsas_slots_t
*new_active
;
9695 int rval
= -1, nslot
, i
;
9696 mptsas_slot_free_e_t
*pe
;
9698 if (mptsas_outstanding_cmds_n(mpt
)) {
9699 NDBG9(("cannot change size of active slots array"));
9703 size
= MPTSAS_SLOTS_SIZE(mpt
);
9704 new_active
= kmem_zalloc(size
, flag
);
9705 if (new_active
== NULL
) {
9706 NDBG1(("new active alloc failed"));
9710 * Since SMID 0 is reserved and the TM slot is reserved, the
9711 * number of slots that can be used at any one time is
9712 * m_max_requests - 2.
9714 new_active
->m_n_slots
= nslot
= (mpt
->m_max_requests
- 2);
9715 new_active
->m_size
= size
;
9716 new_active
->m_tags
= 1;
9719 new_active
->m_tgttbl
= old_active
->m_tgttbl
;
9720 new_active
->m_smptbl
= old_active
->m_smptbl
;
9721 new_active
->m_num_raid_configs
=
9722 old_active
->m_num_raid_configs
;
9723 for (i
= 0; i
< new_active
->m_num_raid_configs
; i
++) {
9724 new_active
->m_raidconfig
[i
] =
9725 old_active
->m_raidconfig
[i
];
9727 mptsas_free_active_slots(mpt
);
9730 if (max_ncpus
& (max_ncpus
- 1)) {
9731 mpt
->m_slot_freeq_pair_n
= (1 << highbit(max_ncpus
));
9733 mpt
->m_slot_freeq_pair_n
= max_ncpus
;
9735 mpt
->m_slot_freeq_pairp
= kmem_zalloc(
9736 mpt
->m_slot_freeq_pair_n
*
9737 sizeof (mptsas_slot_freeq_pair_t
), KM_SLEEP
);
9738 for (i
= 0; i
< mpt
->m_slot_freeq_pair_n
; i
++) {
9739 list_create(&mpt
->m_slot_freeq_pairp
[i
].
9740 m_slot_allocq
.s
.m_fq_list
,
9741 sizeof (mptsas_slot_free_e_t
),
9742 offsetof(mptsas_slot_free_e_t
, node
));
9743 list_create(&mpt
->m_slot_freeq_pairp
[i
].
9744 m_slot_releq
.s
.m_fq_list
,
9745 sizeof (mptsas_slot_free_e_t
),
9746 offsetof(mptsas_slot_free_e_t
, node
));
9747 mpt
->m_slot_freeq_pairp
[i
].m_slot_allocq
.s
.m_fq_n
= 0;
9748 mpt
->m_slot_freeq_pairp
[i
].m_slot_releq
.s
.m_fq_n
= 0;
9749 mutex_init(&mpt
->m_slot_freeq_pairp
[i
].
9750 m_slot_allocq
.s
.m_fq_mutex
, NULL
, MUTEX_DRIVER
,
9751 DDI_INTR_PRI(mpt
->m_intr_pri
));
9752 mutex_init(&mpt
->m_slot_freeq_pairp
[i
].
9753 m_slot_releq
.s
.m_fq_mutex
, NULL
, MUTEX_DRIVER
,
9754 DDI_INTR_PRI(mpt
->m_intr_pri
));
9756 pe
= mpt
->m_slot_free_ae
= kmem_zalloc(nslot
*
9757 sizeof (mptsas_slot_free_e_t
), KM_SLEEP
);
9759 * An array of Mpi2ReplyDescriptorsUnion_t is defined here.
9760 * We are trying to eliminate the m_mutex in the context
9761 * reply code path in the ISR. Since the read of the
9762 * ReplyDescriptor and update/write of the ReplyIndex must
9763 * be atomic (since the poll thread may also update them at
9764 * the same time) so we first read out of the ReplyDescriptor
9765 * into this array and update the ReplyIndex register with a
9766 * separate mutex m_intr_mutex protected, and then release the
9767 * mutex and process all of them. the length of the array is
9768 * defined as max as 128(128*64=8k), which is
9769 * assumed as the maxmium depth of the interrupt coalese.
9771 mpt
->m_reply
= kmem_zalloc(MPI_ADDRESS_COALSCE_MAX
*
9772 sizeof (Mpi2ReplyDescriptorsUnion_t
), KM_SLEEP
);
9773 for (i
= 0; i
< nslot
; i
++, pe
++) {
9774 pe
->slot
= i
+ 1; /* SMID 0 is reserved */
9775 pe
->cpuid
= i
% mpt
->m_slot_freeq_pair_n
;
9776 list_insert_tail(&mpt
->m_slot_freeq_pairp
9777 [i
% mpt
->m_slot_freeq_pair_n
]
9778 .m_slot_allocq
.s
.m_fq_list
, pe
);
9779 mpt
->m_slot_freeq_pairp
[i
% mpt
->m_slot_freeq_pair_n
]
9780 .m_slot_allocq
.s
.m_fq_n
++;
9781 mpt
->m_slot_freeq_pairp
[i
% mpt
->m_slot_freeq_pair_n
]
9782 .m_slot_allocq
.s
.m_fq_n_init
++;
9785 mpt
->m_active
= new_active
;
9792 mptsas_free_active_slots(mptsas_t
*mpt
)
9794 mptsas_slots_t
*active
= mpt
->m_active
;
9796 mptsas_slot_free_e_t
*pe
;
9802 if (mpt
->m_slot_freeq_pairp
) {
9803 for (i
= 0; i
< mpt
->m_slot_freeq_pair_n
; i
++) {
9804 while ((pe
= list_head(&mpt
->m_slot_freeq_pairp
9805 [i
].m_slot_allocq
.s
.m_fq_list
)) != NULL
) {
9806 list_remove(&mpt
->m_slot_freeq_pairp
[i
]
9807 .m_slot_allocq
.s
.m_fq_list
, pe
);
9809 list_destroy(&mpt
->m_slot_freeq_pairp
9810 [i
].m_slot_allocq
.s
.m_fq_list
);
9811 while ((pe
= list_head(&mpt
->m_slot_freeq_pairp
9812 [i
].m_slot_releq
.s
.m_fq_list
)) != NULL
) {
9813 list_remove(&mpt
->m_slot_freeq_pairp
[i
]
9814 .m_slot_releq
.s
.m_fq_list
, pe
);
9816 list_destroy(&mpt
->m_slot_freeq_pairp
9817 [i
].m_slot_releq
.s
.m_fq_list
);
9818 mutex_destroy(&mpt
->m_slot_freeq_pairp
9819 [i
].m_slot_allocq
.s
.m_fq_mutex
);
9820 mutex_destroy(&mpt
->m_slot_freeq_pairp
9821 [i
].m_slot_releq
.s
.m_fq_mutex
);
9823 kmem_free(mpt
->m_slot_freeq_pairp
, mpt
->m_slot_freeq_pair_n
*
9824 sizeof (mptsas_slot_freeq_pair_t
));
9826 if (mpt
->m_slot_free_ae
)
9827 kmem_free(mpt
->m_slot_free_ae
, mpt
->m_active
->m_n_slots
*
9828 sizeof (mptsas_slot_free_e_t
));
9831 kmem_free(mpt
->m_reply
, MPI_ADDRESS_COALSCE_MAX
*
9832 sizeof (Mpi2ReplyDescriptorsUnion_t
));
9834 size
= active
->m_size
;
9835 kmem_free(active
, size
);
9836 mpt
->m_active
= NULL
;
9840 * Error logging, printing, and debug print routines.
9842 static char *mptsas_label
= "mpt_sas";
9846 mptsas_log(mptsas_t
*mpt
, int level
, char *fmt
, ...)
9857 mutex_enter(&mptsas_log_mutex
);
9860 (void) vsprintf(mptsas_log_buf
, fmt
, ap
);
9863 if (level
== CE_CONT
) {
9864 scsi_log(dev
, mptsas_label
, level
, "%s\n", mptsas_log_buf
);
9866 scsi_log(dev
, mptsas_label
, level
, "%s", mptsas_log_buf
);
9869 mutex_exit(&mptsas_log_mutex
);
9875 mptsas_printf(char *fmt
, ...)
9877 dev_info_t
*dev
= 0;
9880 mutex_enter(&mptsas_log_mutex
);
9883 (void) vsprintf(mptsas_log_buf
, fmt
, ap
);
9887 prom_printf("%s:\t%s\n", mptsas_label
, mptsas_log_buf
);
9889 scsi_log(dev
, mptsas_label
, SCSI_DEBUG
, "%s\n", mptsas_log_buf
);
9891 mutex_exit(&mptsas_log_mutex
);
9899 mptsas_watch(void *arg
)
9902 _NOTE(ARGUNUSED(arg
))
9908 NDBG30(("mptsas_watch"));
9910 rw_enter(&mptsas_global_rwlock
, RW_READER
);
9911 for (mpt
= mptsas_head
; mpt
!= (mptsas_t
*)NULL
; mpt
= mpt
->m_next
) {
9913 mutex_enter(&mpt
->m_mutex
);
9915 /* Skip device if not powered on */
9916 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
9917 if (mpt
->m_power_level
== PM_LEVEL_D0
) {
9918 (void) pm_busy_component(mpt
->m_dip
, 0);
9921 mutex_exit(&mpt
->m_mutex
);
9927 * Check if controller is in a FAULT state. If so, reset it.
9929 doorbell
= ddi_get32(mpt
->m_datap
, &mpt
->m_reg
->Doorbell
);
9930 if ((doorbell
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
) {
9931 doorbell
&= MPI2_DOORBELL_DATA_MASK
;
9932 mptsas_log(mpt
, CE_WARN
, "MPT Firmware Fault, "
9933 "code: %04x", doorbell
);
9934 mpt
->m_softstate
&= ~MPTSAS_SS_MSG_UNIT_RESET
;
9935 if ((mptsas_restart_ioc(mpt
)) == DDI_FAILURE
) {
9936 mptsas_log(mpt
, CE_WARN
, "Reset failed"
9937 "after fault was detected");
9942 * For now, always call mptsas_watchsubr.
9944 mptsas_watchsubr(mpt
);
9946 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
9948 (void) pm_idle_component(mpt
->m_dip
, 0);
9951 mutex_exit(&mpt
->m_mutex
);
9953 rw_exit(&mptsas_global_rwlock
);
9955 mutex_enter(&mptsas_global_mutex
);
9956 if (mptsas_timeouts_enabled
)
9957 mptsas_timeout_id
= timeout(mptsas_watch
, NULL
, mptsas_tick
);
9958 mutex_exit(&mptsas_global_mutex
);
9962 mptsas_watchsubr(mptsas_t
*mpt
)
9966 mptsas_target_t
*ptgt
= NULL
;
9968 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt
));
9971 if (mptsas_enable_untagged
) {
9972 mptsas_test_untagged
++;
9977 * Check for commands stuck in active slot
9978 * Account for TM requests, which use the last SMID.
9980 mutex_enter(&mpt
->m_intr_mutex
);
9981 for (i
= 0; i
<= mpt
->m_active
->m_n_slots
; i
++) {
9982 if ((cmd
= mpt
->m_active
->m_slot
[i
]) != NULL
) {
9983 if ((cmd
->cmd_flags
& CFLAG_CMDIOC
) == 0) {
9984 cmd
->cmd_active_timeout
-=
9985 mptsas_scsi_watchdog_tick
;
9986 if (cmd
->cmd_active_timeout
<= 0) {
9988 * There seems to be a command stuck
9989 * in the active slot. Drain throttle.
9991 ptgt
= cmd
->cmd_tgt_addr
;
9992 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
9993 mptsas_set_throttle(mpt
, ptgt
,
9995 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
9998 if ((cmd
->cmd_flags
& CFLAG_PASSTHRU
) ||
9999 (cmd
->cmd_flags
& CFLAG_CONFIG
) ||
10000 (cmd
->cmd_flags
& CFLAG_FW_DIAG
)) {
10001 cmd
->cmd_active_timeout
-=
10002 mptsas_scsi_watchdog_tick
;
10003 if (cmd
->cmd_active_timeout
<= 0) {
10005 * passthrough command timeout
10007 cmd
->cmd_flags
|= (CFLAG_FINISHED
|
10009 cv_broadcast(&mpt
->m_passthru_cv
);
10010 cv_broadcast(&mpt
->m_config_cv
);
10011 cv_broadcast(&mpt
->m_fw_diag_cv
);
10016 mutex_exit(&mpt
->m_intr_mutex
);
10018 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
10019 MPTSAS_HASH_FIRST
);
10020 while (ptgt
!= NULL
) {
10022 * In order to avoid using m_mutex in the key code path in ISR,
10023 * separate mutexs are introduced to protect those elements
10026 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
10029 * If we were draining due to a qfull condition,
10030 * go back to full throttle.
10032 if ((ptgt
->m_t_throttle
< MAX_THROTTLE
) &&
10033 (ptgt
->m_t_throttle
> HOLD_THROTTLE
) &&
10034 (ptgt
->m_t_ncmds
< ptgt
->m_t_throttle
)) {
10035 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
10036 mptsas_restart_hba(mpt
);
10039 if ((ptgt
->m_t_ncmds
> 0) &&
10040 (ptgt
->m_timebase
)) {
10042 if (ptgt
->m_timebase
<=
10043 mptsas_scsi_watchdog_tick
) {
10044 ptgt
->m_timebase
+=
10045 mptsas_scsi_watchdog_tick
;
10046 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10047 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10048 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10052 ptgt
->m_timeout
-= mptsas_scsi_watchdog_tick
;
10054 if (ptgt
->m_timeout
< 0) {
10055 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10056 mptsas_cmd_timeout(mpt
, ptgt
->m_devhdl
);
10057 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10058 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10062 if ((ptgt
->m_timeout
) <=
10063 mptsas_scsi_watchdog_tick
) {
10064 NDBG23(("pending timeout"));
10065 mptsas_set_throttle(mpt
, ptgt
,
10069 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10070 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10071 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10079 mptsas_cmd_timeout(mptsas_t
*mpt
, uint16_t devhdl
)
10082 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl
));
10083 mptsas_log(mpt
, CE_WARN
, "Disconnected command timeout for "
10084 "Target %d", devhdl
);
10087 * If the current target is not the target passed in,
10088 * try to reset that target.
10090 NDBG29(("mptsas_cmd_timeout: device reset"));
10091 if (mptsas_do_scsi_reset(mpt
, devhdl
) != TRUE
) {
10092 mptsas_log(mpt
, CE_WARN
, "Target %d reset for command timeout "
10093 "recovery failed!", devhdl
);
10098 * Device / Hotplug control
10101 mptsas_scsi_quiesce(dev_info_t
*dip
)
10104 scsi_hba_tran_t
*tran
;
10106 tran
= ddi_get_driver_private(dip
);
10107 if (tran
== NULL
|| (mpt
= TRAN2MPT(tran
)) == NULL
)
10110 return (mptsas_quiesce_bus(mpt
));
10114 mptsas_scsi_unquiesce(dev_info_t
*dip
)
10117 scsi_hba_tran_t
*tran
;
10119 tran
= ddi_get_driver_private(dip
);
10120 if (tran
== NULL
|| (mpt
= TRAN2MPT(tran
)) == NULL
)
10123 return (mptsas_unquiesce_bus(mpt
));
10127 mptsas_quiesce_bus(mptsas_t
*mpt
)
10129 mptsas_target_t
*ptgt
= NULL
;
10131 NDBG28(("mptsas_quiesce_bus"));
10132 mutex_enter(&mpt
->m_mutex
);
10134 /* Set all the throttles to zero */
10135 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
10136 MPTSAS_HASH_FIRST
);
10137 while (ptgt
!= NULL
) {
10138 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
10139 mptsas_set_throttle(mpt
, ptgt
, HOLD_THROTTLE
);
10140 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10142 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10143 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10146 /* If there are any outstanding commands in the queue */
10147 mutex_enter(&mpt
->m_intr_mutex
);
10148 if (mptsas_outstanding_cmds_n(mpt
)) {
10149 mutex_exit(&mpt
->m_intr_mutex
);
10150 mpt
->m_softstate
|= MPTSAS_SS_DRAINING
;
10151 mpt
->m_quiesce_timeid
= timeout(mptsas_ncmds_checkdrain
,
10152 mpt
, (MPTSAS_QUIESCE_TIMEOUT
* drv_usectohz(1000000)));
10153 if (cv_wait_sig(&mpt
->m_cv
, &mpt
->m_mutex
) == 0) {
10155 * Quiesce has been interrupted
10157 mpt
->m_softstate
&= ~MPTSAS_SS_DRAINING
;
10158 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10159 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_FIRST
);
10160 while (ptgt
!= NULL
) {
10161 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
10162 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
10163 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10165 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10166 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10168 mptsas_restart_hba(mpt
);
10169 if (mpt
->m_quiesce_timeid
!= 0) {
10170 timeout_id_t tid
= mpt
->m_quiesce_timeid
;
10171 mpt
->m_quiesce_timeid
= 0;
10172 mutex_exit(&mpt
->m_mutex
);
10173 (void) untimeout(tid
);
10176 mutex_exit(&mpt
->m_mutex
);
10179 /* Bus has been quiesced */
10180 ASSERT(mpt
->m_quiesce_timeid
== 0);
10181 mpt
->m_softstate
&= ~MPTSAS_SS_DRAINING
;
10182 mpt
->m_softstate
|= MPTSAS_SS_QUIESCED
;
10183 mutex_exit(&mpt
->m_mutex
);
10187 mutex_exit(&mpt
->m_intr_mutex
);
10188 /* Bus was not busy - QUIESCED */
10189 mutex_exit(&mpt
->m_mutex
);
10195 mptsas_unquiesce_bus(mptsas_t
*mpt
)
10197 mptsas_target_t
*ptgt
= NULL
;
10199 NDBG28(("mptsas_unquiesce_bus"));
10200 mutex_enter(&mpt
->m_mutex
);
10201 mpt
->m_softstate
&= ~MPTSAS_SS_QUIESCED
;
10202 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
10203 MPTSAS_HASH_FIRST
);
10204 while (ptgt
!= NULL
) {
10205 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
10206 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
10207 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10209 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10210 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10212 mptsas_restart_hba(mpt
);
10213 mutex_exit(&mpt
->m_mutex
);
10218 mptsas_ncmds_checkdrain(void *arg
)
10220 mptsas_t
*mpt
= arg
;
10221 mptsas_target_t
*ptgt
= NULL
;
10223 mutex_enter(&mpt
->m_mutex
);
10224 if (mpt
->m_softstate
& MPTSAS_SS_DRAINING
) {
10225 mpt
->m_quiesce_timeid
= 0;
10226 mutex_enter(&mpt
->m_intr_mutex
);
10227 if (mptsas_outstanding_cmds_n(mpt
)) {
10228 mutex_exit(&mpt
->m_intr_mutex
);
10230 * The throttle may have been reset because
10231 * of a SCSI bus reset
10233 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10234 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_FIRST
);
10235 while (ptgt
!= NULL
) {
10236 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
10237 mptsas_set_throttle(mpt
, ptgt
, HOLD_THROTTLE
);
10238 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
10240 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
10241 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
10244 mpt
->m_quiesce_timeid
= timeout(mptsas_ncmds_checkdrain
,
10245 mpt
, (MPTSAS_QUIESCE_TIMEOUT
*
10246 drv_usectohz(1000000)));
10248 mutex_exit(&mpt
->m_intr_mutex
);
10249 /* Command queue has been drained */
10250 cv_signal(&mpt
->m_cv
);
10253 mutex_exit(&mpt
->m_mutex
);
10258 mptsas_dump_cmd(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
10261 uint8_t *cp
= (uchar_t
*)cmd
->cmd_pkt
->pkt_cdbp
;
10265 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd
,
10266 Tgt(cmd
), Lun(cmd
)));
10267 (void) sprintf(&buf
[0], "\tcdb=[");
10268 for (i
= 0; i
< (int)cmd
->cmd_cdblen
; i
++) {
10269 (void) sprintf(&buf
[strlen(buf
)], " 0x%x", *cp
++);
10271 (void) sprintf(&buf
[strlen(buf
)], " ]");
10272 NDBG25(("?%s\n", buf
));
10273 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10274 cmd
->cmd_pkt
->pkt_flags
, cmd
->cmd_pkt
->pkt_statistics
,
10275 cmd
->cmd_pkt
->pkt_state
));
10276 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd
->cmd_pkt
->pkt_scbp
?
10277 *(cmd
->cmd_pkt
->pkt_scbp
) : 0, cmd
->cmd_flags
));
10281 mptsas_start_passthru(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
10284 pMPI2RequestHeader_t request_hdrp
;
10285 struct scsi_pkt
*pkt
= cmd
->cmd_pkt
;
10286 mptsas_pt_request_t
*pt
= pkt
->pkt_ha_private
;
10287 uint32_t request_size
, data_size
, dataout_size
;
10288 uint32_t direction
;
10289 ddi_dma_cookie_t data_cookie
;
10290 ddi_dma_cookie_t dataout_cookie
;
10291 uint32_t request_desc_low
, request_desc_high
= 0;
10292 uint32_t i
, sense_bufp
;
10294 uint8_t *request
, function
;
10295 ddi_dma_handle_t dma_hdl
= mpt
->m_dma_req_frame_hdl
;
10296 ddi_acc_handle_t acc_hdl
= mpt
->m_acc_req_frame_hdl
;
10298 desc_type
= MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE
;
10300 request
= pt
->request
;
10301 direction
= pt
->direction
;
10302 request_size
= pt
->request_size
;
10303 data_size
= pt
->data_size
;
10304 dataout_size
= pt
->dataout_size
;
10305 data_cookie
= pt
->data_cookie
;
10306 dataout_cookie
= pt
->dataout_cookie
;
10309 * Store the passthrough message in memory location
10310 * corresponding to our slot number
10312 memp
= mpt
->m_req_frame
+ (mpt
->m_req_frame_size
* cmd
->cmd_slot
);
10313 request_hdrp
= (pMPI2RequestHeader_t
)memp
;
10314 bzero(memp
, mpt
->m_req_frame_size
);
10316 for (i
= 0; i
< request_size
; i
++) {
10317 bcopy(request
+ i
, memp
+ i
, 1);
10320 if (data_size
|| dataout_size
) {
10321 pMpi2SGESimple64_t sgep
;
10322 uint32_t sge_flags
;
10324 sgep
= (pMpi2SGESimple64_t
)((uint8_t *)request_hdrp
+
10326 if (dataout_size
) {
10328 sge_flags
= dataout_size
|
10329 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
10330 MPI2_SGE_FLAGS_END_OF_BUFFER
|
10331 MPI2_SGE_FLAGS_HOST_TO_IOC
|
10332 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
) <<
10333 MPI2_SGE_FLAGS_SHIFT
);
10334 ddi_put32(acc_hdl
, &sgep
->FlagsLength
, sge_flags
);
10335 ddi_put32(acc_hdl
, &sgep
->Address
.Low
,
10336 (uint32_t)(dataout_cookie
.dmac_laddress
&
10338 ddi_put32(acc_hdl
, &sgep
->Address
.High
,
10339 (uint32_t)(dataout_cookie
.dmac_laddress
10343 sge_flags
= data_size
;
10344 sge_flags
|= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
10345 MPI2_SGE_FLAGS_LAST_ELEMENT
|
10346 MPI2_SGE_FLAGS_END_OF_BUFFER
|
10347 MPI2_SGE_FLAGS_END_OF_LIST
|
10348 MPI2_SGE_FLAGS_64_BIT_ADDRESSING
) <<
10349 MPI2_SGE_FLAGS_SHIFT
);
10350 if (direction
== MPTSAS_PASS_THRU_DIRECTION_WRITE
) {
10351 sge_flags
|= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC
) <<
10352 MPI2_SGE_FLAGS_SHIFT
);
10354 sge_flags
|= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST
) <<
10355 MPI2_SGE_FLAGS_SHIFT
);
10357 ddi_put32(acc_hdl
, &sgep
->FlagsLength
,
10359 ddi_put32(acc_hdl
, &sgep
->Address
.Low
,
10360 (uint32_t)(data_cookie
.dmac_laddress
&
10362 ddi_put32(acc_hdl
, &sgep
->Address
.High
,
10363 (uint32_t)(data_cookie
.dmac_laddress
>> 32));
10366 function
= request_hdrp
->Function
;
10367 if ((function
== MPI2_FUNCTION_SCSI_IO_REQUEST
) ||
10368 (function
== MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
)) {
10369 pMpi2SCSIIORequest_t scsi_io_req
;
10371 scsi_io_req
= (pMpi2SCSIIORequest_t
)request_hdrp
;
10373 * Put SGE for data and data_out buffer at the end of
10374 * scsi_io_request message header.(64 bytes in total)
10375 * Following above SGEs, the residual space will be
10376 * used by sense data.
10379 &scsi_io_req
->SenseBufferLength
,
10380 (uint8_t)(request_size
- 64));
10382 sense_bufp
= mpt
->m_req_frame_dma_addr
+
10383 (mpt
->m_req_frame_size
* cmd
->cmd_slot
);
10386 &scsi_io_req
->SenseBufferLowAddress
, sense_bufp
);
10389 * Set SGLOffset0 value
10391 ddi_put8(acc_hdl
, &scsi_io_req
->SGLOffset0
,
10392 offsetof(MPI2_SCSI_IO_REQUEST
, SGL
) / 4);
10395 * Setup descriptor info. RAID passthrough must use the
10396 * default request descriptor which is already set, so if this
10397 * is a SCSI IO request, change the descriptor to SCSI IO.
10399 if (function
== MPI2_FUNCTION_SCSI_IO_REQUEST
) {
10400 desc_type
= MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
;
10401 request_desc_high
= (ddi_get16(acc_hdl
,
10402 &scsi_io_req
->DevHandle
) << 16);
10407 * We must wait till the message has been completed before
10408 * beginning the next message so we wait for this one to
10411 (void) ddi_dma_sync(dma_hdl
, 0, 0, DDI_DMA_SYNC_FORDEV
);
10412 request_desc_low
= (cmd
->cmd_slot
<< 16) + desc_type
;
10413 cmd
->cmd_rfm
= NULL
;
10414 mpt
->m_active
->m_slot
[cmd
->cmd_slot
] = cmd
;
10415 MPTSAS_START_CMD(mpt
, request_desc_low
, request_desc_high
);
10416 if ((mptsas_check_dma_handle(dma_hdl
) != DDI_SUCCESS
) ||
10417 (mptsas_check_acc_handle(acc_hdl
) != DDI_SUCCESS
)) {
10418 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
10425 mptsas_do_passthru(mptsas_t
*mpt
, uint8_t *request
, uint8_t *reply
,
10426 uint8_t *data
, uint32_t request_size
, uint32_t reply_size
,
10427 uint32_t data_size
, uint32_t direction
, uint8_t *dataout
,
10428 uint32_t dataout_size
, short timeout
, int mode
)
10430 mptsas_pt_request_t pt
;
10431 mptsas_dma_alloc_state_t data_dma_state
;
10432 mptsas_dma_alloc_state_t dataout_dma_state
;
10434 mptsas_cmd_t
*cmd
= NULL
;
10435 struct scsi_pkt
*pkt
;
10436 uint32_t reply_len
= 0, sense_len
= 0;
10437 pMPI2RequestHeader_t request_hdrp
;
10438 pMPI2RequestHeader_t request_msg
;
10439 pMPI2DefaultReply_t reply_msg
;
10440 Mpi2SCSIIOReply_t rep_msg
;
10441 int i
, status
= 0, pt_flags
= 0, rv
= 0;
10445 ASSERT(mutex_owned(&mpt
->m_mutex
));
10447 reply_msg
= (pMPI2DefaultReply_t
)(&rep_msg
);
10448 bzero(reply_msg
, sizeof (MPI2_DEFAULT_REPLY
));
10449 request_msg
= kmem_zalloc(request_size
, KM_SLEEP
);
10451 mutex_exit(&mpt
->m_mutex
);
10453 * copy in the request buffer since it could be used by
10454 * another thread when the pt request into waitq
10456 if (ddi_copyin(request
, request_msg
, request_size
, mode
)) {
10457 mutex_enter(&mpt
->m_mutex
);
10459 mptsas_log(mpt
, CE_WARN
, "failed to copy request data");
10462 mutex_enter(&mpt
->m_mutex
);
10464 function
= request_msg
->Function
;
10465 if (function
== MPI2_FUNCTION_SCSI_TASK_MGMT
) {
10466 pMpi2SCSITaskManagementRequest_t task
;
10467 task
= (pMpi2SCSITaskManagementRequest_t
)request_msg
;
10468 mptsas_setup_bus_reset_delay(mpt
);
10469 rv
= mptsas_ioc_task_management(mpt
, task
->TaskType
,
10470 task
->DevHandle
, (int)task
->LUN
[1], reply
, reply_size
,
10475 mptsas_log(mpt
, CE_WARN
, "task management failed");
10480 if (data_size
!= 0) {
10481 data_dma_state
.size
= data_size
;
10482 if (mptsas_dma_alloc(mpt
, &data_dma_state
) != DDI_SUCCESS
) {
10484 mptsas_log(mpt
, CE_WARN
, "failed to alloc DMA "
10488 pt_flags
|= MPTSAS_DATA_ALLOCATED
;
10489 if (direction
== MPTSAS_PASS_THRU_DIRECTION_WRITE
) {
10490 mutex_exit(&mpt
->m_mutex
);
10491 for (i
= 0; i
< data_size
; i
++) {
10492 if (ddi_copyin(data
+ i
, (uint8_t *)
10493 data_dma_state
.memp
+ i
, 1, mode
)) {
10494 mutex_enter(&mpt
->m_mutex
);
10496 mptsas_log(mpt
, CE_WARN
, "failed to "
10501 mutex_enter(&mpt
->m_mutex
);
10505 if (dataout_size
!= 0) {
10506 dataout_dma_state
.size
= dataout_size
;
10507 if (mptsas_dma_alloc(mpt
, &dataout_dma_state
) != DDI_SUCCESS
) {
10509 mptsas_log(mpt
, CE_WARN
, "failed to alloc DMA "
10513 pt_flags
|= MPTSAS_DATAOUT_ALLOCATED
;
10514 mutex_exit(&mpt
->m_mutex
);
10515 for (i
= 0; i
< dataout_size
; i
++) {
10516 if (ddi_copyin(dataout
+ i
, (uint8_t *)
10517 dataout_dma_state
.memp
+ i
, 1, mode
)) {
10518 mutex_enter(&mpt
->m_mutex
);
10519 mptsas_log(mpt
, CE_WARN
, "failed to copy out"
10525 mutex_enter(&mpt
->m_mutex
);
10528 if ((rvalue
= (mptsas_request_from_pool(mpt
, &cmd
, &pkt
))) == -1) {
10530 mptsas_log(mpt
, CE_NOTE
, "event ack command pool is full");
10533 pt_flags
|= MPTSAS_REQUEST_POOL_CMD
;
10535 bzero((caddr_t
)cmd
, sizeof (*cmd
));
10536 bzero((caddr_t
)pkt
, scsi_pkt_size());
10537 bzero((caddr_t
)&pt
, sizeof (pt
));
10539 cmd
->ioc_cmd_slot
= (uint32_t)(rvalue
);
10541 pt
.request
= (uint8_t *)request_msg
;
10542 pt
.direction
= direction
;
10543 pt
.request_size
= request_size
;
10544 pt
.data_size
= data_size
;
10545 pt
.dataout_size
= dataout_size
;
10546 pt
.data_cookie
= data_dma_state
.cookie
;
10547 pt
.dataout_cookie
= dataout_dma_state
.cookie
;
10550 * Form a blank cmd/pkt to store the acknowledgement message
10552 pkt
->pkt_cdbp
= (opaque_t
)&cmd
->cmd_cdb
[0];
10553 pkt
->pkt_scbp
= (opaque_t
)&cmd
->cmd_scb
;
10554 pkt
->pkt_ha_private
= (opaque_t
)&pt
;
10555 pkt
->pkt_flags
= FLAG_HEAD
;
10556 pkt
->pkt_time
= timeout
;
10557 cmd
->cmd_pkt
= pkt
;
10558 cmd
->cmd_flags
= CFLAG_CMDIOC
| CFLAG_PASSTHRU
;
10561 * Save the command in a slot
10563 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
10565 * Once passthru command get slot, set cmd_flags
10568 cmd
->cmd_flags
|= CFLAG_PREPARED
;
10569 mptsas_start_passthru(mpt
, cmd
);
10571 mptsas_waitq_add(mpt
, cmd
);
10574 while ((cmd
->cmd_flags
& CFLAG_FINISHED
) == 0) {
10575 cv_wait(&mpt
->m_passthru_cv
, &mpt
->m_mutex
);
10578 if (cmd
->cmd_flags
& CFLAG_PREPARED
) {
10579 memp
= mpt
->m_req_frame
+ (mpt
->m_req_frame_size
*
10581 request_hdrp
= (pMPI2RequestHeader_t
)memp
;
10584 if (cmd
->cmd_flags
& CFLAG_TIMEOUT
) {
10585 status
= ETIMEDOUT
;
10586 mptsas_log(mpt
, CE_WARN
, "passthrough command timeout");
10587 pt_flags
|= MPTSAS_CMD_TIMEOUT
;
10591 if (cmd
->cmd_rfm
) {
10593 * cmd_rfm is zero means the command reply is a CONTEXT
10594 * reply and no PCI Write to post the free reply SMFA
10595 * because no reply message frame is used.
10596 * cmd_rfm is non-zero means the reply is a ADDRESS
10597 * reply and reply message frame is used.
10599 pt_flags
|= MPTSAS_ADDRESS_REPLY
;
10600 (void) ddi_dma_sync(mpt
->m_dma_reply_frame_hdl
, 0, 0,
10601 DDI_DMA_SYNC_FORCPU
);
10602 reply_msg
= (pMPI2DefaultReply_t
)
10603 (mpt
->m_reply_frame
+ (cmd
->cmd_rfm
-
10604 mpt
->m_reply_frame_dma_addr
));
10607 mptsas_fma_check(mpt
, cmd
);
10608 if (pkt
->pkt_reason
== CMD_TRAN_ERR
) {
10610 mptsas_log(mpt
, CE_WARN
, "passthru fma error");
10613 if (pkt
->pkt_reason
== CMD_RESET
) {
10615 mptsas_log(mpt
, CE_WARN
, "ioc reset abort passthru");
10619 if (pkt
->pkt_reason
== CMD_INCOMPLETE
) {
10621 mptsas_log(mpt
, CE_WARN
, "passthrough command incomplete");
10625 mutex_exit(&mpt
->m_mutex
);
10626 if (cmd
->cmd_flags
& CFLAG_PREPARED
) {
10627 function
= request_hdrp
->Function
;
10628 if ((function
== MPI2_FUNCTION_SCSI_IO_REQUEST
) ||
10629 (function
== MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
)) {
10630 reply_len
= sizeof (MPI2_SCSI_IO_REPLY
);
10631 sense_len
= reply_size
- reply_len
;
10633 reply_len
= reply_size
;
10637 for (i
= 0; i
< reply_len
; i
++) {
10638 if (ddi_copyout((uint8_t *)reply_msg
+ i
, reply
+ i
, 1,
10640 mutex_enter(&mpt
->m_mutex
);
10642 mptsas_log(mpt
, CE_WARN
, "failed to copy out "
10647 for (i
= 0; i
< sense_len
; i
++) {
10648 if (ddi_copyout((uint8_t *)request_hdrp
+ 64 + i
,
10649 reply
+ reply_len
+ i
, 1, mode
)) {
10650 mutex_enter(&mpt
->m_mutex
);
10652 mptsas_log(mpt
, CE_WARN
, "failed to copy out "
10660 if (direction
!= MPTSAS_PASS_THRU_DIRECTION_WRITE
) {
10661 (void) ddi_dma_sync(data_dma_state
.handle
, 0, 0,
10662 DDI_DMA_SYNC_FORCPU
);
10663 for (i
= 0; i
< data_size
; i
++) {
10664 if (ddi_copyout((uint8_t *)(
10665 data_dma_state
.memp
+ i
), data
+ i
, 1,
10667 mutex_enter(&mpt
->m_mutex
);
10669 mptsas_log(mpt
, CE_WARN
, "failed to "
10670 "copy out the reply data");
10676 mutex_enter(&mpt
->m_mutex
);
10679 * Put the reply frame back on the free queue, increment the free
10680 * index, and write the new index to the free index register. But only
10681 * if this reply is an ADDRESS reply.
10683 if (pt_flags
& MPTSAS_ADDRESS_REPLY
) {
10684 ddi_put32(mpt
->m_acc_free_queue_hdl
,
10685 &((uint32_t *)(void *)mpt
->m_free_queue
)[mpt
->m_free_index
],
10687 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
10688 DDI_DMA_SYNC_FORDEV
);
10689 if (++mpt
->m_free_index
== mpt
->m_free_queue_depth
) {
10690 mpt
->m_free_index
= 0;
10692 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->ReplyFreeHostIndex
,
10693 mpt
->m_free_index
);
10695 if (cmd
&& (cmd
->cmd_flags
& CFLAG_PREPARED
)) {
10696 mptsas_remove_cmd(mpt
, cmd
);
10697 pt_flags
&= (~MPTSAS_REQUEST_POOL_CMD
);
10699 if (pt_flags
& MPTSAS_REQUEST_POOL_CMD
)
10700 mptsas_return_to_pool(mpt
, cmd
);
10701 if (pt_flags
& MPTSAS_DATA_ALLOCATED
) {
10702 if (mptsas_check_dma_handle(data_dma_state
.handle
) !=
10704 ddi_fm_service_impact(mpt
->m_dip
,
10705 DDI_SERVICE_UNAFFECTED
);
10708 mptsas_dma_free(&data_dma_state
);
10710 if (pt_flags
& MPTSAS_DATAOUT_ALLOCATED
) {
10711 if (mptsas_check_dma_handle(dataout_dma_state
.handle
) !=
10713 ddi_fm_service_impact(mpt
->m_dip
,
10714 DDI_SERVICE_UNAFFECTED
);
10717 mptsas_dma_free(&dataout_dma_state
);
10719 if (pt_flags
& MPTSAS_CMD_TIMEOUT
) {
10720 if ((mptsas_restart_ioc(mpt
)) == DDI_FAILURE
) {
10721 mptsas_log(mpt
, CE_WARN
, "mptsas_restart_ioc failed");
10725 kmem_free(request_msg
, request_size
);
10731 mptsas_pass_thru(mptsas_t
*mpt
, mptsas_pass_thru_t
*data
, int mode
)
10734 * If timeout is 0, set timeout to default of 60 seconds.
10736 if (data
->Timeout
== 0) {
10737 data
->Timeout
= MPTSAS_PASS_THRU_TIME_DEFAULT
;
10740 if (((data
->DataSize
== 0) &&
10741 (data
->DataDirection
== MPTSAS_PASS_THRU_DIRECTION_NONE
)) ||
10742 ((data
->DataSize
!= 0) &&
10743 ((data
->DataDirection
== MPTSAS_PASS_THRU_DIRECTION_READ
) ||
10744 (data
->DataDirection
== MPTSAS_PASS_THRU_DIRECTION_WRITE
) ||
10745 ((data
->DataDirection
== MPTSAS_PASS_THRU_DIRECTION_BOTH
) &&
10746 (data
->DataOutSize
!= 0))))) {
10747 if (data
->DataDirection
== MPTSAS_PASS_THRU_DIRECTION_BOTH
) {
10748 data
->DataDirection
= MPTSAS_PASS_THRU_DIRECTION_READ
;
10750 data
->DataOutSize
= 0;
10753 * Send passthru request messages
10755 return (mptsas_do_passthru(mpt
,
10756 (uint8_t *)((uintptr_t)data
->PtrRequest
),
10757 (uint8_t *)((uintptr_t)data
->PtrReply
),
10758 (uint8_t *)((uintptr_t)data
->PtrData
),
10759 data
->RequestSize
, data
->ReplySize
,
10760 data
->DataSize
, data
->DataDirection
,
10761 (uint8_t *)((uintptr_t)data
->PtrDataOut
),
10762 data
->DataOutSize
, data
->Timeout
, mode
));
10769 mptsas_get_fw_diag_buffer_number(mptsas_t
*mpt
, uint32_t unique_id
)
10773 for (index
= 0; index
< MPI2_DIAG_BUF_TYPE_COUNT
; index
++) {
10774 if (mpt
->m_fw_diag_buffer_list
[index
].unique_id
== unique_id
) {
10779 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND
);
10783 mptsas_start_diag(mptsas_t
*mpt
, mptsas_cmd_t
*cmd
)
10785 pMpi2DiagBufferPostRequest_t pDiag_post_msg
;
10786 pMpi2DiagReleaseRequest_t pDiag_release_msg
;
10787 struct scsi_pkt
*pkt
= cmd
->cmd_pkt
;
10788 mptsas_diag_request_t
*diag
= pkt
->pkt_ha_private
;
10789 uint32_t request_desc_low
, i
;
10791 ASSERT(mutex_owned(&mpt
->m_mutex
));
10794 * Form the diag message depending on the post or release function.
10796 if (diag
->function
== MPI2_FUNCTION_DIAG_BUFFER_POST
) {
10797 pDiag_post_msg
= (pMpi2DiagBufferPostRequest_t
)
10798 (mpt
->m_req_frame
+ (mpt
->m_req_frame_size
*
10800 bzero(pDiag_post_msg
, mpt
->m_req_frame_size
);
10801 ddi_put8(mpt
->m_acc_req_frame_hdl
, &pDiag_post_msg
->Function
,
10803 ddi_put8(mpt
->m_acc_req_frame_hdl
, &pDiag_post_msg
->BufferType
,
10804 diag
->pBuffer
->buffer_type
);
10805 ddi_put8(mpt
->m_acc_req_frame_hdl
,
10806 &pDiag_post_msg
->ExtendedType
,
10807 diag
->pBuffer
->extended_type
);
10808 ddi_put32(mpt
->m_acc_req_frame_hdl
,
10809 &pDiag_post_msg
->BufferLength
,
10810 diag
->pBuffer
->buffer_data
.size
);
10811 for (i
= 0; i
< (sizeof (pDiag_post_msg
->ProductSpecific
) / 4);
10813 ddi_put32(mpt
->m_acc_req_frame_hdl
,
10814 &pDiag_post_msg
->ProductSpecific
[i
],
10815 diag
->pBuffer
->product_specific
[i
]);
10817 ddi_put32(mpt
->m_acc_req_frame_hdl
,
10818 &pDiag_post_msg
->BufferAddress
.Low
,
10819 (uint32_t)(diag
->pBuffer
->buffer_data
.cookie
.dmac_laddress
10821 ddi_put32(mpt
->m_acc_req_frame_hdl
,
10822 &pDiag_post_msg
->BufferAddress
.High
,
10823 (uint32_t)(diag
->pBuffer
->buffer_data
.cookie
.dmac_laddress
10826 pDiag_release_msg
= (pMpi2DiagReleaseRequest_t
)
10827 (mpt
->m_req_frame
+ (mpt
->m_req_frame_size
*
10829 bzero(pDiag_release_msg
, mpt
->m_req_frame_size
);
10830 ddi_put8(mpt
->m_acc_req_frame_hdl
,
10831 &pDiag_release_msg
->Function
, diag
->function
);
10832 ddi_put8(mpt
->m_acc_req_frame_hdl
,
10833 &pDiag_release_msg
->BufferType
,
10834 diag
->pBuffer
->buffer_type
);
10840 (void) ddi_dma_sync(mpt
->m_dma_req_frame_hdl
, 0, 0,
10841 DDI_DMA_SYNC_FORDEV
);
10842 request_desc_low
= (cmd
->cmd_slot
<< 16) +
10843 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE
;
10844 cmd
->cmd_rfm
= NULL
;
10845 mpt
->m_active
->m_slot
[cmd
->cmd_slot
] = cmd
;
10846 MPTSAS_START_CMD(mpt
, request_desc_low
, 0);
10847 if ((mptsas_check_dma_handle(mpt
->m_dma_req_frame_hdl
) !=
10849 (mptsas_check_acc_handle(mpt
->m_acc_req_frame_hdl
) !=
10851 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
10856 mptsas_post_fw_diag_buffer(mptsas_t
*mpt
,
10857 mptsas_fw_diagnostic_buffer_t
*pBuffer
, uint32_t *return_code
)
10859 mptsas_diag_request_t diag
;
10860 int status
, slot_num
, post_flags
= 0;
10861 mptsas_cmd_t
*cmd
= NULL
;
10862 struct scsi_pkt
*pkt
;
10863 pMpi2DiagBufferPostReply_t reply
;
10864 uint16_t iocstatus
;
10865 uint32_t iocloginfo
, transfer_length
;
10868 * If buffer is not enabled, just leave.
10870 *return_code
= MPTSAS_FW_DIAG_ERROR_POST_FAILED
;
10871 if (!pBuffer
->enabled
) {
10872 status
= DDI_FAILURE
;
10877 * Clear some flags initially.
10879 pBuffer
->force_release
= FALSE
;
10880 pBuffer
->valid_data
= FALSE
;
10881 pBuffer
->owned_by_firmware
= FALSE
;
10884 * Get a cmd buffer from the cmd buffer pool
10886 if ((slot_num
= (mptsas_request_from_pool(mpt
, &cmd
, &pkt
))) == -1) {
10887 status
= DDI_FAILURE
;
10888 mptsas_log(mpt
, CE_NOTE
, "command pool is full: Post FW Diag");
10891 post_flags
|= MPTSAS_REQUEST_POOL_CMD
;
10893 bzero((caddr_t
)cmd
, sizeof (*cmd
));
10894 bzero((caddr_t
)pkt
, scsi_pkt_size());
10896 cmd
->ioc_cmd_slot
= (uint32_t)(slot_num
);
10898 diag
.pBuffer
= pBuffer
;
10899 diag
.function
= MPI2_FUNCTION_DIAG_BUFFER_POST
;
10902 * Form a blank cmd/pkt to store the acknowledgement message
10904 pkt
->pkt_ha_private
= (opaque_t
)&diag
;
10905 pkt
->pkt_flags
= FLAG_HEAD
;
10906 pkt
->pkt_time
= 60;
10907 cmd
->cmd_pkt
= pkt
;
10908 cmd
->cmd_flags
= CFLAG_CMDIOC
| CFLAG_FW_DIAG
;
10911 * Save the command in a slot
10913 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
10915 * Once passthru command get slot, set cmd_flags
10918 cmd
->cmd_flags
|= CFLAG_PREPARED
;
10919 mptsas_start_diag(mpt
, cmd
);
10921 mptsas_waitq_add(mpt
, cmd
);
10924 while ((cmd
->cmd_flags
& CFLAG_FINISHED
) == 0) {
10925 cv_wait(&mpt
->m_fw_diag_cv
, &mpt
->m_mutex
);
10928 if (cmd
->cmd_flags
& CFLAG_TIMEOUT
) {
10929 status
= DDI_FAILURE
;
10930 mptsas_log(mpt
, CE_WARN
, "Post FW Diag command timeout");
10935 * cmd_rfm points to the reply message if a reply was given. Check the
10936 * IOCStatus to make sure everything went OK with the FW diag request
10937 * and set buffer flags.
10939 if (cmd
->cmd_rfm
) {
10940 post_flags
|= MPTSAS_ADDRESS_REPLY
;
10941 (void) ddi_dma_sync(mpt
->m_dma_reply_frame_hdl
, 0, 0,
10942 DDI_DMA_SYNC_FORCPU
);
10943 reply
= (pMpi2DiagBufferPostReply_t
)(mpt
->m_reply_frame
+
10944 (cmd
->cmd_rfm
- mpt
->m_reply_frame_dma_addr
));
10947 * Get the reply message data
10949 iocstatus
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
10950 &reply
->IOCStatus
);
10951 iocloginfo
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
10952 &reply
->IOCLogInfo
);
10953 transfer_length
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
10954 &reply
->TransferLength
);
10957 * If post failed quit.
10959 if (iocstatus
!= MPI2_IOCSTATUS_SUCCESS
) {
10960 status
= DDI_FAILURE
;
10961 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10962 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus
,
10963 iocloginfo
, transfer_length
));
10968 * Post was successful.
10970 pBuffer
->valid_data
= TRUE
;
10971 pBuffer
->owned_by_firmware
= TRUE
;
10972 *return_code
= MPTSAS_FW_DIAG_ERROR_SUCCESS
;
10973 status
= DDI_SUCCESS
;
10978 * Put the reply frame back on the free queue, increment the free
10979 * index, and write the new index to the free index register. But only
10980 * if this reply is an ADDRESS reply.
10982 if (post_flags
& MPTSAS_ADDRESS_REPLY
) {
10983 ddi_put32(mpt
->m_acc_free_queue_hdl
,
10984 &((uint32_t *)(void *)mpt
->m_free_queue
)[mpt
->m_free_index
],
10986 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
10987 DDI_DMA_SYNC_FORDEV
);
10988 if (++mpt
->m_free_index
== mpt
->m_free_queue_depth
) {
10989 mpt
->m_free_index
= 0;
10991 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->ReplyFreeHostIndex
,
10992 mpt
->m_free_index
);
10994 if (cmd
&& (cmd
->cmd_flags
& CFLAG_PREPARED
)) {
10995 mptsas_remove_cmd(mpt
, cmd
);
10996 post_flags
&= (~MPTSAS_REQUEST_POOL_CMD
);
10998 if (post_flags
& MPTSAS_REQUEST_POOL_CMD
) {
10999 mptsas_return_to_pool(mpt
, cmd
);
11006 mptsas_release_fw_diag_buffer(mptsas_t
*mpt
,
11007 mptsas_fw_diagnostic_buffer_t
*pBuffer
, uint32_t *return_code
,
11008 uint32_t diag_type
)
11010 mptsas_diag_request_t diag
;
11011 int status
, slot_num
, rel_flags
= 0;
11012 mptsas_cmd_t
*cmd
= NULL
;
11013 struct scsi_pkt
*pkt
;
11014 pMpi2DiagReleaseReply_t reply
;
11015 uint16_t iocstatus
;
11016 uint32_t iocloginfo
;
11019 * If buffer is not enabled, just leave.
11021 *return_code
= MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED
;
11022 if (!pBuffer
->enabled
) {
11023 mptsas_log(mpt
, CE_NOTE
, "This buffer type is not supported "
11025 status
= DDI_FAILURE
;
11030 * Clear some flags initially.
11032 pBuffer
->force_release
= FALSE
;
11033 pBuffer
->valid_data
= FALSE
;
11034 pBuffer
->owned_by_firmware
= FALSE
;
11037 * Get a cmd buffer from the cmd buffer pool
11039 if ((slot_num
= (mptsas_request_from_pool(mpt
, &cmd
, &pkt
))) == -1) {
11040 status
= DDI_FAILURE
;
11041 mptsas_log(mpt
, CE_NOTE
, "command pool is full: Release FW "
11045 rel_flags
|= MPTSAS_REQUEST_POOL_CMD
;
11047 bzero((caddr_t
)cmd
, sizeof (*cmd
));
11048 bzero((caddr_t
)pkt
, scsi_pkt_size());
11050 cmd
->ioc_cmd_slot
= (uint32_t)(slot_num
);
11052 diag
.pBuffer
= pBuffer
;
11053 diag
.function
= MPI2_FUNCTION_DIAG_RELEASE
;
11056 * Form a blank cmd/pkt to store the acknowledgement message
11058 pkt
->pkt_ha_private
= (opaque_t
)&diag
;
11059 pkt
->pkt_flags
= FLAG_HEAD
;
11060 pkt
->pkt_time
= 60;
11061 cmd
->cmd_pkt
= pkt
;
11062 cmd
->cmd_flags
= CFLAG_CMDIOC
| CFLAG_FW_DIAG
;
11065 * Save the command in a slot
11067 if (mptsas_save_cmd(mpt
, cmd
) == TRUE
) {
11069 * Once passthru command get slot, set cmd_flags
11072 cmd
->cmd_flags
|= CFLAG_PREPARED
;
11073 mptsas_start_diag(mpt
, cmd
);
11075 mptsas_waitq_add(mpt
, cmd
);
11078 while ((cmd
->cmd_flags
& CFLAG_FINISHED
) == 0) {
11079 cv_wait(&mpt
->m_fw_diag_cv
, &mpt
->m_mutex
);
11082 if (cmd
->cmd_flags
& CFLAG_TIMEOUT
) {
11083 status
= DDI_FAILURE
;
11084 mptsas_log(mpt
, CE_WARN
, "Release FW Diag command timeout");
11089 * cmd_rfm points to the reply message if a reply was given. Check the
11090 * IOCStatus to make sure everything went OK with the FW diag request
11091 * and set buffer flags.
11093 if (cmd
->cmd_rfm
) {
11094 rel_flags
|= MPTSAS_ADDRESS_REPLY
;
11095 (void) ddi_dma_sync(mpt
->m_dma_reply_frame_hdl
, 0, 0,
11096 DDI_DMA_SYNC_FORCPU
);
11097 reply
= (pMpi2DiagReleaseReply_t
)(mpt
->m_reply_frame
+
11098 (cmd
->cmd_rfm
- mpt
->m_reply_frame_dma_addr
));
11101 * Get the reply message data
11103 iocstatus
= ddi_get16(mpt
->m_acc_reply_frame_hdl
,
11104 &reply
->IOCStatus
);
11105 iocloginfo
= ddi_get32(mpt
->m_acc_reply_frame_hdl
,
11106 &reply
->IOCLogInfo
);
11109 * If release failed quit.
11111 if ((iocstatus
!= MPI2_IOCSTATUS_SUCCESS
) ||
11112 pBuffer
->owned_by_firmware
) {
11113 status
= DDI_FAILURE
;
11114 NDBG13(("release FW Diag Buffer failed: "
11115 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus
,
11121 * Release was successful.
11123 *return_code
= MPTSAS_FW_DIAG_ERROR_SUCCESS
;
11124 status
= DDI_SUCCESS
;
11127 * If this was for an UNREGISTER diag type command, clear the
11130 if (diag_type
== MPTSAS_FW_DIAG_TYPE_UNREGISTER
) {
11131 pBuffer
->unique_id
= MPTSAS_FW_DIAG_INVALID_UID
;
11137 * Put the reply frame back on the free queue, increment the free
11138 * index, and write the new index to the free index register. But only
11139 * if this reply is an ADDRESS reply.
11141 if (rel_flags
& MPTSAS_ADDRESS_REPLY
) {
11142 ddi_put32(mpt
->m_acc_free_queue_hdl
,
11143 &((uint32_t *)(void *)mpt
->m_free_queue
)[mpt
->m_free_index
],
11145 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
11146 DDI_DMA_SYNC_FORDEV
);
11147 if (++mpt
->m_free_index
== mpt
->m_free_queue_depth
) {
11148 mpt
->m_free_index
= 0;
11150 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->ReplyFreeHostIndex
,
11151 mpt
->m_free_index
);
11153 if (cmd
&& (cmd
->cmd_flags
& CFLAG_PREPARED
)) {
11154 mptsas_remove_cmd(mpt
, cmd
);
11155 rel_flags
&= (~MPTSAS_REQUEST_POOL_CMD
);
11157 if (rel_flags
& MPTSAS_REQUEST_POOL_CMD
) {
11158 mptsas_return_to_pool(mpt
, cmd
);
11165 mptsas_diag_register(mptsas_t
*mpt
, mptsas_fw_diag_register_t
*diag_register
,
11166 uint32_t *return_code
)
11168 mptsas_fw_diagnostic_buffer_t
*pBuffer
;
11169 uint8_t extended_type
, buffer_type
, i
;
11170 uint32_t buffer_size
;
11171 uint32_t unique_id
;
11174 ASSERT(mutex_owned(&mpt
->m_mutex
));
11176 extended_type
= diag_register
->ExtendedType
;
11177 buffer_type
= diag_register
->BufferType
;
11178 buffer_size
= diag_register
->RequestedBufferSize
;
11179 unique_id
= diag_register
->UniqueId
;
11182 * Check for valid buffer type
11184 if (buffer_type
>= MPI2_DIAG_BUF_TYPE_COUNT
) {
11185 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11186 return (DDI_FAILURE
);
11190 * Get the current buffer and look up the unique ID. The unique ID
11191 * should not be found. If it is, the ID is already in use.
11193 i
= mptsas_get_fw_diag_buffer_number(mpt
, unique_id
);
11194 pBuffer
= &mpt
->m_fw_diag_buffer_list
[buffer_type
];
11195 if (i
!= MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND
) {
11196 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11197 return (DDI_FAILURE
);
11201 * The buffer's unique ID should not be registered yet, and the given
11202 * unique ID cannot be 0.
11204 if ((pBuffer
->unique_id
!= MPTSAS_FW_DIAG_INVALID_UID
) ||
11205 (unique_id
== MPTSAS_FW_DIAG_INVALID_UID
)) {
11206 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11207 return (DDI_FAILURE
);
11211 * If this buffer is already posted as immediate, just change owner.
11213 if (pBuffer
->immediate
&& pBuffer
->owned_by_firmware
&&
11214 (pBuffer
->unique_id
== MPTSAS_FW_DIAG_INVALID_UID
)) {
11215 pBuffer
->immediate
= FALSE
;
11216 pBuffer
->unique_id
= unique_id
;
11217 return (DDI_SUCCESS
);
11221 * Post a new buffer after checking if it's enabled. The DMA buffer
11222 * that is allocated will be contiguous (sgl_len = 1).
11224 if (!pBuffer
->enabled
) {
11225 *return_code
= MPTSAS_FW_DIAG_ERROR_NO_BUFFER
;
11226 return (DDI_FAILURE
);
11228 bzero(&pBuffer
->buffer_data
, sizeof (mptsas_dma_alloc_state_t
));
11229 pBuffer
->buffer_data
.size
= buffer_size
;
11230 if (mptsas_dma_alloc(mpt
, &pBuffer
->buffer_data
) != DDI_SUCCESS
) {
11231 mptsas_log(mpt
, CE_WARN
, "failed to alloc DMA resource for "
11232 "diag buffer: size = %d bytes", buffer_size
);
11233 *return_code
= MPTSAS_FW_DIAG_ERROR_NO_BUFFER
;
11234 return (DDI_FAILURE
);
11238 * Copy the given info to the diag buffer and post the buffer.
11240 pBuffer
->buffer_type
= buffer_type
;
11241 pBuffer
->immediate
= FALSE
;
11242 if (buffer_type
== MPI2_DIAG_BUF_TYPE_TRACE
) {
11243 for (i
= 0; i
< (sizeof (pBuffer
->product_specific
) / 4);
11245 pBuffer
->product_specific
[i
] =
11246 diag_register
->ProductSpecific
[i
];
11249 pBuffer
->extended_type
= extended_type
;
11250 pBuffer
->unique_id
= unique_id
;
11251 status
= mptsas_post_fw_diag_buffer(mpt
, pBuffer
, return_code
);
11253 if (mptsas_check_dma_handle(pBuffer
->buffer_data
.handle
) !=
11255 mptsas_log(mpt
, CE_WARN
, "Check of DMA handle failed in "
11256 "mptsas_diag_register.");
11257 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
11258 status
= DDI_FAILURE
;
11262 * In case there was a failure, free the DMA buffer.
11264 if (status
== DDI_FAILURE
) {
11265 mptsas_dma_free(&pBuffer
->buffer_data
);
11272 mptsas_diag_unregister(mptsas_t
*mpt
,
11273 mptsas_fw_diag_unregister_t
*diag_unregister
, uint32_t *return_code
)
11275 mptsas_fw_diagnostic_buffer_t
*pBuffer
;
11277 uint32_t unique_id
;
11280 ASSERT(mutex_owned(&mpt
->m_mutex
));
11282 unique_id
= diag_unregister
->UniqueId
;
11285 * Get the current buffer and look up the unique ID. The unique ID
11288 i
= mptsas_get_fw_diag_buffer_number(mpt
, unique_id
);
11289 if (i
== MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND
) {
11290 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11291 return (DDI_FAILURE
);
11294 pBuffer
= &mpt
->m_fw_diag_buffer_list
[i
];
11297 * Try to release the buffer from FW before freeing it. If release
11298 * fails, don't free the DMA buffer in case FW tries to access it
11299 * later. If buffer is not owned by firmware, can't release it.
11301 if (!pBuffer
->owned_by_firmware
) {
11302 status
= DDI_SUCCESS
;
11304 status
= mptsas_release_fw_diag_buffer(mpt
, pBuffer
,
11305 return_code
, MPTSAS_FW_DIAG_TYPE_UNREGISTER
);
11309 * At this point, return the current status no matter what happens with
11312 pBuffer
->unique_id
= MPTSAS_FW_DIAG_INVALID_UID
;
11313 if (status
== DDI_SUCCESS
) {
11314 if (mptsas_check_dma_handle(pBuffer
->buffer_data
.handle
) !=
11316 mptsas_log(mpt
, CE_WARN
, "Check of DMA handle failed "
11317 "in mptsas_diag_unregister.");
11318 ddi_fm_service_impact(mpt
->m_dip
,
11319 DDI_SERVICE_UNAFFECTED
);
11321 mptsas_dma_free(&pBuffer
->buffer_data
);
11328 mptsas_diag_query(mptsas_t
*mpt
, mptsas_fw_diag_query_t
*diag_query
,
11329 uint32_t *return_code
)
11331 mptsas_fw_diagnostic_buffer_t
*pBuffer
;
11333 uint32_t unique_id
;
11335 ASSERT(mutex_owned(&mpt
->m_mutex
));
11337 unique_id
= diag_query
->UniqueId
;
11340 * If ID is valid, query on ID.
11341 * If ID is invalid, query on buffer type.
11343 if (unique_id
== MPTSAS_FW_DIAG_INVALID_UID
) {
11344 i
= diag_query
->BufferType
;
11345 if (i
>= MPI2_DIAG_BUF_TYPE_COUNT
) {
11346 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11347 return (DDI_FAILURE
);
11350 i
= mptsas_get_fw_diag_buffer_number(mpt
, unique_id
);
11351 if (i
== MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND
) {
11352 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11353 return (DDI_FAILURE
);
11358 * Fill query structure with the diag buffer info.
11360 pBuffer
= &mpt
->m_fw_diag_buffer_list
[i
];
11361 diag_query
->BufferType
= pBuffer
->buffer_type
;
11362 diag_query
->ExtendedType
= pBuffer
->extended_type
;
11363 if (diag_query
->BufferType
== MPI2_DIAG_BUF_TYPE_TRACE
) {
11364 for (i
= 0; i
< (sizeof (diag_query
->ProductSpecific
) / 4);
11366 diag_query
->ProductSpecific
[i
] =
11367 pBuffer
->product_specific
[i
];
11370 diag_query
->TotalBufferSize
= pBuffer
->buffer_data
.size
;
11371 diag_query
->DriverAddedBufferSize
= 0;
11372 diag_query
->UniqueId
= pBuffer
->unique_id
;
11373 diag_query
->ApplicationFlags
= 0;
11374 diag_query
->DiagnosticFlags
= 0;
11377 * Set/Clear application flags
11379 if (pBuffer
->immediate
) {
11380 diag_query
->ApplicationFlags
&= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED
;
11382 diag_query
->ApplicationFlags
|= MPTSAS_FW_DIAG_FLAG_APP_OWNED
;
11384 if (pBuffer
->valid_data
|| pBuffer
->owned_by_firmware
) {
11385 diag_query
->ApplicationFlags
|=
11386 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID
;
11388 diag_query
->ApplicationFlags
&=
11389 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID
;
11391 if (pBuffer
->owned_by_firmware
) {
11392 diag_query
->ApplicationFlags
|=
11393 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS
;
11395 diag_query
->ApplicationFlags
&=
11396 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS
;
11399 return (DDI_SUCCESS
);
11403 mptsas_diag_read_buffer(mptsas_t
*mpt
,
11404 mptsas_diag_read_buffer_t
*diag_read_buffer
, uint8_t *ioctl_buf
,
11405 uint32_t *return_code
, int ioctl_mode
)
11407 mptsas_fw_diagnostic_buffer_t
*pBuffer
;
11409 uint32_t unique_id
, byte
;
11412 ASSERT(mutex_owned(&mpt
->m_mutex
));
11414 unique_id
= diag_read_buffer
->UniqueId
;
11417 * Get the current buffer and look up the unique ID. The unique ID
11420 i
= mptsas_get_fw_diag_buffer_number(mpt
, unique_id
);
11421 if (i
== MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND
) {
11422 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11423 return (DDI_FAILURE
);
11426 pBuffer
= &mpt
->m_fw_diag_buffer_list
[i
];
11429 * Make sure requested read is within limits
11431 if (diag_read_buffer
->StartingOffset
+ diag_read_buffer
->BytesToRead
>
11432 pBuffer
->buffer_data
.size
) {
11433 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11434 return (DDI_FAILURE
);
11438 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11439 * buffer that was allocated is one contiguous buffer.
11441 pData
= (uint8_t *)(pBuffer
->buffer_data
.memp
+
11442 diag_read_buffer
->StartingOffset
);
11443 (void) ddi_dma_sync(pBuffer
->buffer_data
.handle
, 0, 0,
11444 DDI_DMA_SYNC_FORCPU
);
11445 for (byte
= 0; byte
< diag_read_buffer
->BytesToRead
; byte
++) {
11446 if (ddi_copyout(pData
+ byte
, ioctl_buf
+ byte
, 1, ioctl_mode
)
11448 return (DDI_FAILURE
);
11451 diag_read_buffer
->Status
= 0;
11454 * Set or clear the Force Release flag.
11456 if (pBuffer
->force_release
) {
11457 diag_read_buffer
->Flags
|= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE
;
11459 diag_read_buffer
->Flags
&= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE
;
11463 * If buffer is to be reregistered, make sure it's not already owned by
11466 status
= DDI_SUCCESS
;
11467 if (!pBuffer
->owned_by_firmware
) {
11468 if (diag_read_buffer
->Flags
& MPTSAS_FW_DIAG_FLAG_REREGISTER
) {
11469 status
= mptsas_post_fw_diag_buffer(mpt
, pBuffer
,
11478 mptsas_diag_release(mptsas_t
*mpt
, mptsas_fw_diag_release_t
*diag_release
,
11479 uint32_t *return_code
)
11481 mptsas_fw_diagnostic_buffer_t
*pBuffer
;
11483 uint32_t unique_id
;
11486 ASSERT(mutex_owned(&mpt
->m_mutex
));
11488 unique_id
= diag_release
->UniqueId
;
11491 * Get the current buffer and look up the unique ID. The unique ID
11494 i
= mptsas_get_fw_diag_buffer_number(mpt
, unique_id
);
11495 if (i
== MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND
) {
11496 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_UID
;
11497 return (DDI_FAILURE
);
11500 pBuffer
= &mpt
->m_fw_diag_buffer_list
[i
];
11503 * If buffer is not owned by firmware, it's already been released.
11505 if (!pBuffer
->owned_by_firmware
) {
11506 *return_code
= MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED
;
11507 return (DDI_FAILURE
);
11511 * Release the buffer.
11513 status
= mptsas_release_fw_diag_buffer(mpt
, pBuffer
, return_code
,
11514 MPTSAS_FW_DIAG_TYPE_RELEASE
);
11519 mptsas_do_diag_action(mptsas_t
*mpt
, uint32_t action
, uint8_t *diag_action
,
11520 uint32_t length
, uint32_t *return_code
, int ioctl_mode
)
11522 mptsas_fw_diag_register_t diag_register
;
11523 mptsas_fw_diag_unregister_t diag_unregister
;
11524 mptsas_fw_diag_query_t diag_query
;
11525 mptsas_diag_read_buffer_t diag_read_buffer
;
11526 mptsas_fw_diag_release_t diag_release
;
11527 int status
= DDI_SUCCESS
;
11528 uint32_t original_return_code
, read_buf_len
;
11530 ASSERT(mutex_owned(&mpt
->m_mutex
));
11532 original_return_code
= *return_code
;
11533 *return_code
= MPTSAS_FW_DIAG_ERROR_SUCCESS
;
11536 case MPTSAS_FW_DIAG_TYPE_REGISTER
:
11539 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11540 status
= DDI_FAILURE
;
11543 if (ddi_copyin(diag_action
, &diag_register
,
11544 sizeof (diag_register
), ioctl_mode
) != 0) {
11545 return (DDI_FAILURE
);
11547 status
= mptsas_diag_register(mpt
, &diag_register
,
11551 case MPTSAS_FW_DIAG_TYPE_UNREGISTER
:
11552 if (length
< sizeof (diag_unregister
)) {
11554 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11555 status
= DDI_FAILURE
;
11558 if (ddi_copyin(diag_action
, &diag_unregister
,
11559 sizeof (diag_unregister
), ioctl_mode
) != 0) {
11560 return (DDI_FAILURE
);
11562 status
= mptsas_diag_unregister(mpt
, &diag_unregister
,
11566 case MPTSAS_FW_DIAG_TYPE_QUERY
:
11567 if (length
< sizeof (diag_query
)) {
11569 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11570 status
= DDI_FAILURE
;
11573 if (ddi_copyin(diag_action
, &diag_query
,
11574 sizeof (diag_query
), ioctl_mode
) != 0) {
11575 return (DDI_FAILURE
);
11577 status
= mptsas_diag_query(mpt
, &diag_query
,
11579 if (status
== DDI_SUCCESS
) {
11580 if (ddi_copyout(&diag_query
, diag_action
,
11581 sizeof (diag_query
), ioctl_mode
) != 0) {
11582 return (DDI_FAILURE
);
11587 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER
:
11588 if (ddi_copyin(diag_action
, &diag_read_buffer
,
11589 sizeof (diag_read_buffer
) - 4, ioctl_mode
) != 0) {
11590 return (DDI_FAILURE
);
11592 read_buf_len
= sizeof (diag_read_buffer
) -
11593 sizeof (diag_read_buffer
.DataBuffer
) +
11594 diag_read_buffer
.BytesToRead
;
11595 if (length
< read_buf_len
) {
11597 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11598 status
= DDI_FAILURE
;
11601 status
= mptsas_diag_read_buffer(mpt
,
11602 &diag_read_buffer
, diag_action
+
11603 sizeof (diag_read_buffer
) - 4, return_code
,
11605 if (status
== DDI_SUCCESS
) {
11606 if (ddi_copyout(&diag_read_buffer
, diag_action
,
11607 sizeof (diag_read_buffer
) - 4, ioctl_mode
)
11609 return (DDI_FAILURE
);
11614 case MPTSAS_FW_DIAG_TYPE_RELEASE
:
11615 if (length
< sizeof (diag_release
)) {
11617 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11618 status
= DDI_FAILURE
;
11621 if (ddi_copyin(diag_action
, &diag_release
,
11622 sizeof (diag_release
), ioctl_mode
) != 0) {
11623 return (DDI_FAILURE
);
11625 status
= mptsas_diag_release(mpt
, &diag_release
,
11630 *return_code
= MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER
;
11631 status
= DDI_FAILURE
;
11635 if ((status
== DDI_FAILURE
) &&
11636 (original_return_code
== MPTSAS_FW_DIAG_NEW
) &&
11637 (*return_code
!= MPTSAS_FW_DIAG_ERROR_SUCCESS
)) {
11638 status
= DDI_SUCCESS
;
11645 mptsas_diag_action(mptsas_t
*mpt
, mptsas_diag_action_t
*user_data
, int mode
)
11648 mptsas_diag_action_t driver_data
;
11650 ASSERT(mutex_owned(&mpt
->m_mutex
));
11653 * Copy the user data to a driver data buffer.
11655 if (ddi_copyin(user_data
, &driver_data
, sizeof (mptsas_diag_action_t
),
11658 * Send diag action request if Action is valid
11660 if (driver_data
.Action
== MPTSAS_FW_DIAG_TYPE_REGISTER
||
11661 driver_data
.Action
== MPTSAS_FW_DIAG_TYPE_UNREGISTER
||
11662 driver_data
.Action
== MPTSAS_FW_DIAG_TYPE_QUERY
||
11663 driver_data
.Action
== MPTSAS_FW_DIAG_TYPE_READ_BUFFER
||
11664 driver_data
.Action
== MPTSAS_FW_DIAG_TYPE_RELEASE
) {
11665 status
= mptsas_do_diag_action(mpt
, driver_data
.Action
,
11666 (void *)(uintptr_t)driver_data
.PtrDiagAction
,
11667 driver_data
.Length
, &driver_data
.ReturnCode
,
11669 if (status
== DDI_SUCCESS
) {
11670 if (ddi_copyout(&driver_data
.ReturnCode
,
11671 &user_data
->ReturnCode
,
11672 sizeof (user_data
->ReturnCode
), mode
)
11692 * This routine handles the "event query" ioctl.
11695 mptsas_event_query(mptsas_t
*mpt
, mptsas_event_query_t
*data
, int mode
,
11699 mptsas_event_query_t driverdata
;
11702 driverdata
.Entries
= MPTSAS_EVENT_QUEUE_SIZE
;
11704 mutex_enter(&mpt
->m_mutex
);
11705 for (i
= 0; i
< 4; i
++) {
11706 driverdata
.Types
[i
] = mpt
->m_event_mask
[i
];
11708 mutex_exit(&mpt
->m_mutex
);
11710 if (ddi_copyout(&driverdata
, data
, sizeof (driverdata
), mode
) != 0) {
11713 *rval
= MPTIOCTL_STATUS_GOOD
;
11721 * This routine handles the "event enable" ioctl.
11724 mptsas_event_enable(mptsas_t
*mpt
, mptsas_event_enable_t
*data
, int mode
,
11728 mptsas_event_enable_t driverdata
;
11731 if (ddi_copyin(data
, &driverdata
, sizeof (driverdata
), mode
) == 0) {
11732 mutex_enter(&mpt
->m_mutex
);
11733 for (i
= 0; i
< 4; i
++) {
11734 mpt
->m_event_mask
[i
] = driverdata
.Types
[i
];
11736 mutex_exit(&mpt
->m_mutex
);
11738 *rval
= MPTIOCTL_STATUS_GOOD
;
11747 * This routine handles the "event report" ioctl.
11750 mptsas_event_report(mptsas_t
*mpt
, mptsas_event_report_t
*data
, int mode
,
11754 mptsas_event_report_t driverdata
;
11756 mutex_enter(&mpt
->m_mutex
);
11758 if (ddi_copyin(&data
->Size
, &driverdata
.Size
, sizeof (driverdata
.Size
),
11760 if (driverdata
.Size
>= sizeof (mpt
->m_events
)) {
11761 if (ddi_copyout(mpt
->m_events
, data
->Events
,
11762 sizeof (mpt
->m_events
), mode
) != 0) {
11765 if (driverdata
.Size
> sizeof (mpt
->m_events
)) {
11767 sizeof (mpt
->m_events
);
11768 if (ddi_copyout(&driverdata
.Size
,
11770 sizeof (driverdata
.Size
),
11774 *rval
= MPTIOCTL_STATUS_GOOD
;
11778 *rval
= MPTIOCTL_STATUS_GOOD
;
11783 *rval
= MPTIOCTL_STATUS_LEN_TOO_SHORT
;
11790 mutex_exit(&mpt
->m_mutex
);
11795 mptsas_lookup_pci_data(mptsas_t
*mpt
, mptsas_adapter_data_t
*adapter_data
)
11801 * Lookup the 'reg' property and extract the other data
11803 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, mpt
->m_dip
,
11804 DDI_PROP_DONTPASS
, "reg", ®_data
, ®len
) ==
11805 DDI_PROP_SUCCESS
) {
11807 * Extract the PCI data from the 'reg' property first DWORD.
11808 * The entry looks like the following:
11810 * Bits 0 - 7 8-bit Register number
11811 * Bits 8 - 10 3-bit Function number
11812 * Bits 11 - 15 5-bit Device number
11813 * Bits 16 - 23 8-bit Bus number
11814 * Bits 24 - 25 2-bit Address Space type identifier
11817 adapter_data
->PciInformation
.u
.bits
.BusNumber
=
11818 (reg_data
[0] & 0x00FF0000) >> 16;
11819 adapter_data
->PciInformation
.u
.bits
.DeviceNumber
=
11820 (reg_data
[0] & 0x0000F800) >> 11;
11821 adapter_data
->PciInformation
.u
.bits
.FunctionNumber
=
11822 (reg_data
[0] & 0x00000700) >> 8;
11823 ddi_prop_free((void *)reg_data
);
11826 * If we can't determine the PCI data then we fill in FF's for
11827 * the data to indicate this.
11829 adapter_data
->PCIDeviceHwId
= 0xFFFFFFFF;
11830 adapter_data
->MpiPortNumber
= 0xFFFFFFFF;
11831 adapter_data
->PciInformation
.u
.AsDWORD
= 0xFFFFFFFF;
11835 * Saved in the mpt->m_fwversion
11837 adapter_data
->MpiFirmwareVersion
= mpt
->m_fwversion
;
11841 mptsas_read_adapter_data(mptsas_t
*mpt
, mptsas_adapter_data_t
*adapter_data
)
11843 char *driver_verstr
= MPTSAS_MOD_STRING
;
11845 mptsas_lookup_pci_data(mpt
, adapter_data
);
11846 adapter_data
->AdapterType
= MPTIOCTL_ADAPTER_TYPE_SAS2
;
11847 adapter_data
->PCIDeviceHwId
= (uint32_t)mpt
->m_devid
;
11848 adapter_data
->PCIDeviceHwRev
= (uint32_t)mpt
->m_revid
;
11849 adapter_data
->SubSystemId
= (uint32_t)mpt
->m_ssid
;
11850 adapter_data
->SubsystemVendorId
= (uint32_t)mpt
->m_svid
;
11851 (void) strcpy((char *)&adapter_data
->DriverVersion
[0], driver_verstr
);
11852 adapter_data
->BiosVersion
= 0;
11853 (void) mptsas_get_bios_page3(mpt
, &adapter_data
->BiosVersion
);
11857 mptsas_read_pci_info(mptsas_t
*mpt
, mptsas_pci_info_t
*pci_info
)
11863 * Lookup the 'reg' property and extract the other data
11865 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, mpt
->m_dip
,
11866 DDI_PROP_DONTPASS
, "reg", ®_data
, ®len
) ==
11867 DDI_PROP_SUCCESS
) {
11869 * Extract the PCI data from the 'reg' property first DWORD.
11870 * The entry looks like the following:
11872 * Bits 8 - 10 3-bit Function number
11873 * Bits 11 - 15 5-bit Device number
11874 * Bits 16 - 23 8-bit Bus number
11876 pci_info
->BusNumber
= (reg_data
[0] & 0x00FF0000) >> 16;
11877 pci_info
->DeviceNumber
= (reg_data
[0] & 0x0000F800) >> 11;
11878 pci_info
->FunctionNumber
= (reg_data
[0] & 0x00000700) >> 8;
11879 ddi_prop_free((void *)reg_data
);
11882 * If we can't determine the PCI info then we fill in FF's for
11883 * the data to indicate this.
11885 pci_info
->BusNumber
= 0xFFFFFFFF;
11886 pci_info
->DeviceNumber
= 0xFF;
11887 pci_info
->FunctionNumber
= 0xFF;
11891 * Now get the interrupt vector and the pci header. The vector can
11892 * only be 0 right now. The header is the first 256 bytes of config
11895 pci_info
->InterruptVector
= 0;
11896 for (i
= 0; i
< sizeof (pci_info
->PciHeader
); i
++) {
11897 pci_info
->PciHeader
[i
] = pci_config_get8(mpt
->m_config_handle
,
11903 mptsas_reg_access(mptsas_t
*mpt
, mptsas_reg_access_t
*data
, int mode
)
11906 mptsas_reg_access_t driverdata
;
11908 mutex_enter(&mpt
->m_mutex
);
11909 if (ddi_copyin(data
, &driverdata
, sizeof (driverdata
), mode
) == 0) {
11910 switch (driverdata
.Command
) {
11912 * IO access is not supported.
11916 mptsas_log(mpt
, CE_WARN
, "IO access is not "
11917 "supported. Use memory access.");
11922 driverdata
.RegData
= ddi_get32(mpt
->m_datap
,
11923 (uint32_t *)(void *)mpt
->m_reg
+
11924 driverdata
.RegOffset
);
11925 if (ddi_copyout(&driverdata
.RegData
,
11927 sizeof (driverdata
.RegData
), mode
) != 0) {
11928 mptsas_log(mpt
, CE_WARN
, "Register "
11934 case REG_MEM_WRITE
:
11935 ddi_put32(mpt
->m_datap
,
11936 (uint32_t *)(void *)mpt
->m_reg
+
11937 driverdata
.RegOffset
,
11938 driverdata
.RegData
);
11949 mutex_exit(&mpt
->m_mutex
);
11954 mptsas_ioctl(dev_t dev
, int cmd
, intptr_t data
, int mode
, cred_t
*credp
,
11959 mptsas_update_flash_t flashdata
;
11960 mptsas_pass_thru_t passthru_data
;
11961 mptsas_adapter_data_t adapter_data
;
11962 mptsas_pci_info_t pci_info
;
11965 int iport_flag
= 0;
11966 dev_info_t
*dip
= NULL
;
11967 mptsas_phymask_t phymask
= 0;
11968 struct devctl_iocdata
*dcp
= NULL
;
11969 uint32_t slotstatus
= 0;
11971 mptsas_target_t
*ptgt
= NULL
;
11973 *rval
= MPTIOCTL_STATUS_GOOD
;
11974 if (secpolicy_sys_config(credp
, B_FALSE
) != 0) {
11978 mpt
= ddi_get_soft_state(mptsas_state
, MINOR2INST(getminor(dev
)));
11981 * Called from iport node, get the states
11984 dip
= mptsas_get_dip_from_dev(dev
, &phymask
);
11988 mpt
= DIP2MPT(dip
);
11990 /* Make sure power level is D0 before accessing registers */
11991 mutex_enter(&mpt
->m_mutex
);
11992 if (mpt
->m_options
& MPTSAS_OPT_PM
) {
11993 (void) pm_busy_component(mpt
->m_dip
, 0);
11994 if (mpt
->m_power_level
!= PM_LEVEL_D0
) {
11995 mutex_exit(&mpt
->m_mutex
);
11996 if (pm_raise_power(mpt
->m_dip
, 0, PM_LEVEL_D0
) !=
11998 mptsas_log(mpt
, CE_WARN
,
11999 "mptsas%d: mptsas_ioctl: Raise power "
12000 "request failed.", mpt
->m_instance
);
12001 (void) pm_idle_component(mpt
->m_dip
, 0);
12005 mutex_exit(&mpt
->m_mutex
);
12008 mutex_exit(&mpt
->m_mutex
);
12012 status
= scsi_hba_ioctl(dev
, cmd
, data
, mode
, credp
, rval
);
12017 * The following code control the OK2RM LED, it doesn't affect
12018 * the ioctl return status.
12020 if ((cmd
== DEVCTL_DEVICE_ONLINE
) ||
12021 (cmd
== DEVCTL_DEVICE_OFFLINE
)) {
12022 if (ndi_dc_allochdl((void *)data
, &dcp
) !=
12026 addr
= ndi_dc_getaddr(dcp
);
12027 ptgt
= mptsas_addr_to_ptgt(mpt
, addr
, phymask
);
12028 if (ptgt
== NULL
) {
12029 NDBG14(("mptsas_ioctl led control: tgt %s not "
12031 ndi_dc_freehdl(dcp
);
12034 mutex_enter(&mpt
->m_mutex
);
12035 if (cmd
== DEVCTL_DEVICE_ONLINE
) {
12036 ptgt
->m_tgt_unconfigured
= 0;
12037 } else if (cmd
== DEVCTL_DEVICE_OFFLINE
) {
12038 ptgt
->m_tgt_unconfigured
= 1;
12041 #ifdef MPTSAS_GET_LED
12043 * The get led status can't get a valid/reasonable
12044 * state, so ignore the get led status, and write the
12045 * required value directly
12047 if (mptsas_get_led_status(mpt
, ptgt
, &slotstatus
) !=
12049 NDBG14(("mptsas_ioctl: get LED for tgt %s "
12050 "failed %x", addr
, slotstatus
));
12053 NDBG14(("mptsas_ioctl: LED status %x for %s",
12054 slotstatus
, addr
));
12056 if (cmd
== DEVCTL_DEVICE_OFFLINE
) {
12058 MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE
;
12061 ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE
;
12063 if (mptsas_set_led_status(mpt
, ptgt
, slotstatus
) !=
12065 NDBG14(("mptsas_ioctl: set LED for tgt %s "
12066 "failed %x", addr
, slotstatus
));
12068 mutex_exit(&mpt
->m_mutex
);
12069 ndi_dc_freehdl(dcp
);
12074 case MPTIOCTL_UPDATE_FLASH
:
12075 if (ddi_copyin((void *)data
, &flashdata
,
12076 sizeof (struct mptsas_update_flash
), mode
)) {
12081 mutex_enter(&mpt
->m_mutex
);
12082 if (mptsas_update_flash(mpt
,
12083 (caddr_t
)(long)flashdata
.PtrBuffer
,
12084 flashdata
.ImageSize
, flashdata
.ImageType
, mode
)) {
12089 * Reset the chip to start using the new
12090 * firmware. Reset if failed also.
12092 mpt
->m_softstate
&= ~MPTSAS_SS_MSG_UNIT_RESET
;
12093 if (mptsas_restart_ioc(mpt
) == DDI_FAILURE
) {
12096 mutex_exit(&mpt
->m_mutex
);
12098 case MPTIOCTL_PASS_THRU
:
12100 * The user has requested to pass through a command to
12101 * be executed by the MPT firmware. Call our routine
12102 * which does this. Only allow one passthru IOCTL at
12103 * one time. Other threads will block on
12104 * m_passthru_mutex, which is of adaptive variant.
12106 if (ddi_copyin((void *)data
, &passthru_data
,
12107 sizeof (mptsas_pass_thru_t
), mode
)) {
12111 mutex_enter(&mpt
->m_passthru_mutex
);
12112 mutex_enter(&mpt
->m_mutex
);
12113 status
= mptsas_pass_thru(mpt
, &passthru_data
, mode
);
12114 mutex_exit(&mpt
->m_mutex
);
12115 mutex_exit(&mpt
->m_passthru_mutex
);
12118 case MPTIOCTL_GET_ADAPTER_DATA
:
12120 * The user has requested to read adapter data. Call
12121 * our routine which does this.
12123 bzero(&adapter_data
, sizeof (mptsas_adapter_data_t
));
12124 if (ddi_copyin((void *)data
, (void *)&adapter_data
,
12125 sizeof (mptsas_adapter_data_t
), mode
)) {
12129 if (adapter_data
.StructureLength
>=
12130 sizeof (mptsas_adapter_data_t
)) {
12131 adapter_data
.StructureLength
= (uint32_t)
12132 sizeof (mptsas_adapter_data_t
);
12133 copylen
= sizeof (mptsas_adapter_data_t
);
12134 mutex_enter(&mpt
->m_mutex
);
12135 mptsas_read_adapter_data(mpt
, &adapter_data
);
12136 mutex_exit(&mpt
->m_mutex
);
12138 adapter_data
.StructureLength
= (uint32_t)
12139 sizeof (mptsas_adapter_data_t
);
12140 copylen
= sizeof (adapter_data
.StructureLength
);
12141 *rval
= MPTIOCTL_STATUS_LEN_TOO_SHORT
;
12143 if (ddi_copyout((void *)(&adapter_data
), (void *)data
,
12144 copylen
, mode
) != 0) {
12148 case MPTIOCTL_GET_PCI_INFO
:
12150 * The user has requested to read pci info. Call
12151 * our routine which does this.
12153 bzero(&pci_info
, sizeof (mptsas_pci_info_t
));
12154 mutex_enter(&mpt
->m_mutex
);
12155 mptsas_read_pci_info(mpt
, &pci_info
);
12156 mutex_exit(&mpt
->m_mutex
);
12157 if (ddi_copyout((void *)(&pci_info
), (void *)data
,
12158 sizeof (mptsas_pci_info_t
), mode
) != 0) {
12162 case MPTIOCTL_RESET_ADAPTER
:
12163 mutex_enter(&mpt
->m_mutex
);
12164 mpt
->m_softstate
&= ~MPTSAS_SS_MSG_UNIT_RESET
;
12165 if ((mptsas_restart_ioc(mpt
)) == DDI_FAILURE
) {
12166 mptsas_log(mpt
, CE_WARN
, "reset adapter IOCTL "
12170 mutex_exit(&mpt
->m_mutex
);
12172 case MPTIOCTL_DIAG_ACTION
:
12174 * The user has done a diag buffer action. Call our
12175 * routine which does this. Only allow one diag action
12178 mutex_enter(&mpt
->m_mutex
);
12179 if (mpt
->m_diag_action_in_progress
) {
12180 mutex_exit(&mpt
->m_mutex
);
12183 mpt
->m_diag_action_in_progress
= 1;
12184 status
= mptsas_diag_action(mpt
,
12185 (mptsas_diag_action_t
*)data
, mode
);
12186 mpt
->m_diag_action_in_progress
= 0;
12187 mutex_exit(&mpt
->m_mutex
);
12189 case MPTIOCTL_EVENT_QUERY
:
12191 * The user has done an event query. Call our routine
12194 status
= mptsas_event_query(mpt
,
12195 (mptsas_event_query_t
*)data
, mode
, rval
);
12197 case MPTIOCTL_EVENT_ENABLE
:
12199 * The user has done an event enable. Call our routine
12202 status
= mptsas_event_enable(mpt
,
12203 (mptsas_event_enable_t
*)data
, mode
, rval
);
12205 case MPTIOCTL_EVENT_REPORT
:
12207 * The user has done an event report. Call our routine
12210 status
= mptsas_event_report(mpt
,
12211 (mptsas_event_report_t
*)data
, mode
, rval
);
12213 case MPTIOCTL_REG_ACCESS
:
12215 * The user has requested register access. Call our
12216 * routine which does this.
12218 status
= mptsas_reg_access(mpt
,
12219 (mptsas_reg_access_t
*)data
, mode
);
12222 status
= scsi_hba_ioctl(dev
, cmd
, data
, mode
, credp
,
12228 if (mpt
->m_options
& MPTSAS_OPT_PM
)
12229 (void) pm_idle_component(mpt
->m_dip
, 0);
12234 mptsas_restart_ioc(mptsas_t
*mpt
)
12236 int rval
= DDI_SUCCESS
;
12237 mptsas_target_t
*ptgt
= NULL
;
12239 ASSERT(mutex_owned(&mpt
->m_mutex
));
12242 * Set a flag telling I/O path that we're processing a reset. This is
12243 * needed because after the reset is complete, the hash table still
12244 * needs to be rebuilt. If I/Os are started before the hash table is
12245 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12246 * so that they can be retried.
12248 mpt
->m_in_reset
= TRUE
;
12251 * Set all throttles to HOLD
12253 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
12254 MPTSAS_HASH_FIRST
);
12255 while (ptgt
!= NULL
) {
12256 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
12257 mptsas_set_throttle(mpt
, ptgt
, HOLD_THROTTLE
);
12258 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
12260 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
12261 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
12265 * Disable interrupts
12267 MPTSAS_DISABLE_INTR(mpt
);
12270 * Abort all commands: outstanding commands, commands in waitq
12272 mptsas_flush_hba(mpt
);
12275 * Reinitialize the chip.
12277 if (mptsas_init_chip(mpt
, FALSE
) == DDI_FAILURE
) {
12278 rval
= DDI_FAILURE
;
12282 * Enable interrupts again
12284 MPTSAS_ENABLE_INTR(mpt
);
12287 * If mptsas_init_chip was successful, update the driver data.
12289 if (rval
== DDI_SUCCESS
) {
12290 mptsas_update_driver_data(mpt
);
12294 * Reset the throttles
12296 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
12297 MPTSAS_HASH_FIRST
);
12298 while (ptgt
!= NULL
) {
12299 mutex_enter(&ptgt
->m_tgt_intr_mutex
);
12300 mptsas_set_throttle(mpt
, ptgt
, MAX_THROTTLE
);
12301 mutex_exit(&ptgt
->m_tgt_intr_mutex
);
12303 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
12304 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
12307 mptsas_doneq_empty(mpt
);
12308 mptsas_restart_hba(mpt
);
12310 if (rval
!= DDI_SUCCESS
) {
12311 mptsas_fm_ereport(mpt
, DDI_FM_DEVICE_NO_RESPONSE
);
12312 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_LOST
);
12316 * Clear the reset flag so that I/Os can continue.
12318 mpt
->m_in_reset
= FALSE
;
12324 mptsas_init_chip(mptsas_t
*mpt
, int first_time
)
12326 ddi_dma_cookie_t cookie
;
12331 * Check to see if the firmware image is valid
12333 if (ddi_get32(mpt
->m_datap
, &mpt
->m_reg
->HostDiagnostic
) &
12334 MPI2_DIAG_FLASH_BAD_SIG
) {
12335 mptsas_log(mpt
, CE_WARN
, "mptsas bad flash signature!");
12342 rval
= mptsas_ioc_reset(mpt
, first_time
);
12343 if (rval
== MPTSAS_RESET_FAIL
) {
12344 mptsas_log(mpt
, CE_WARN
, "hard reset failed!");
12348 if ((rval
== MPTSAS_SUCCESS_MUR
) && (!first_time
)) {
12352 * Setup configuration space
12354 if (mptsas_config_space_init(mpt
) == FALSE
) {
12355 mptsas_log(mpt
, CE_WARN
, "mptsas_config_space_init "
12361 * IOC facts can change after a diag reset so all buffers that are
12362 * based on these numbers must be de-allocated and re-allocated. Get
12363 * new IOC facts each time chip is initialized.
12365 if (mptsas_ioc_get_facts(mpt
) == DDI_FAILURE
) {
12366 mptsas_log(mpt
, CE_WARN
, "mptsas_ioc_get_facts failed");
12370 if (mptsas_alloc_active_slots(mpt
, KM_SLEEP
)) {
12374 * Allocate request message frames, reply free queue, reply descriptor
12375 * post queue, and reply message frames using latest IOC facts.
12377 if (mptsas_alloc_request_frames(mpt
) == DDI_FAILURE
) {
12378 mptsas_log(mpt
, CE_WARN
, "mptsas_alloc_request_frames failed");
12381 if (mptsas_alloc_free_queue(mpt
) == DDI_FAILURE
) {
12382 mptsas_log(mpt
, CE_WARN
, "mptsas_alloc_free_queue failed!");
12385 if (mptsas_alloc_post_queue(mpt
) == DDI_FAILURE
) {
12386 mptsas_log(mpt
, CE_WARN
, "mptsas_alloc_post_queue failed!");
12389 if (mptsas_alloc_reply_frames(mpt
) == DDI_FAILURE
) {
12390 mptsas_log(mpt
, CE_WARN
, "mptsas_alloc_reply_frames failed!");
12396 * Re-Initialize ioc to operational state
12398 if (mptsas_ioc_init(mpt
) == DDI_FAILURE
) {
12399 mptsas_log(mpt
, CE_WARN
, "mptsas_ioc_init failed");
12403 mptsas_alloc_reply_args(mpt
);
12406 * Initialize reply post index. Reply free index is initialized after
12409 mpt
->m_post_index
= 0;
12412 * Initialize the Reply Free Queue with the physical addresses of our
12415 cookie
.dmac_address
= mpt
->m_reply_frame_dma_addr
;
12416 for (i
= 0; i
< mpt
->m_max_replies
; i
++) {
12417 ddi_put32(mpt
->m_acc_free_queue_hdl
,
12418 &((uint32_t *)(void *)mpt
->m_free_queue
)[i
],
12419 cookie
.dmac_address
);
12420 cookie
.dmac_address
+= mpt
->m_reply_frame_size
;
12422 (void) ddi_dma_sync(mpt
->m_dma_free_queue_hdl
, 0, 0,
12423 DDI_DMA_SYNC_FORDEV
);
12426 * Initialize the reply free index to one past the last frame on the
12427 * queue. This will signify that the queue is empty to start with.
12429 mpt
->m_free_index
= i
;
12430 ddi_put32(mpt
->m_datap
, &mpt
->m_reg
->ReplyFreeHostIndex
, i
);
12433 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12435 for (i
= 0; i
< mpt
->m_post_queue_depth
; i
++) {
12436 ddi_put64(mpt
->m_acc_post_queue_hdl
,
12437 &((uint64_t *)(void *)mpt
->m_post_queue
)[i
],
12438 0xFFFFFFFFFFFFFFFF);
12440 (void) ddi_dma_sync(mpt
->m_dma_post_queue_hdl
, 0, 0,
12441 DDI_DMA_SYNC_FORDEV
);
12446 if (mptsas_ioc_enable_port(mpt
) == DDI_FAILURE
) {
12447 mptsas_log(mpt
, CE_WARN
, "mptsas_ioc_enable_port failed");
12454 if (mptsas_ioc_enable_event_notification(mpt
)) {
12459 * We need checks in attach and these.
12460 * chip_init is called in mult. places
12463 if ((mptsas_check_dma_handle(mpt
->m_dma_req_frame_hdl
) !=
12465 (mptsas_check_dma_handle(mpt
->m_dma_reply_frame_hdl
) !=
12467 (mptsas_check_dma_handle(mpt
->m_dma_free_queue_hdl
) !=
12469 (mptsas_check_dma_handle(mpt
->m_dma_post_queue_hdl
) !=
12471 (mptsas_check_dma_handle(mpt
->m_hshk_dma_hdl
) !=
12473 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
12477 /* Check all acc handles */
12478 if ((mptsas_check_acc_handle(mpt
->m_datap
) != DDI_SUCCESS
) ||
12479 (mptsas_check_acc_handle(mpt
->m_acc_req_frame_hdl
) !=
12481 (mptsas_check_acc_handle(mpt
->m_acc_reply_frame_hdl
) !=
12483 (mptsas_check_acc_handle(mpt
->m_acc_free_queue_hdl
) !=
12485 (mptsas_check_acc_handle(mpt
->m_acc_post_queue_hdl
) !=
12487 (mptsas_check_acc_handle(mpt
->m_hshk_acc_hdl
) !=
12489 (mptsas_check_acc_handle(mpt
->m_config_handle
) !=
12491 ddi_fm_service_impact(mpt
->m_dip
, DDI_SERVICE_UNAFFECTED
);
12495 return (DDI_SUCCESS
);
12498 return (DDI_FAILURE
);
12502 mptsas_get_pci_cap(mptsas_t
*mpt
)
12504 ushort_t caps_ptr
, cap
, cap_count
;
12506 if (mpt
->m_config_handle
== NULL
)
12509 * Check if capabilities list is supported and if so,
12510 * get initial capabilities pointer and clear bits 0,1.
12512 if (pci_config_get16(mpt
->m_config_handle
, PCI_CONF_STAT
)
12514 caps_ptr
= P2ALIGN(pci_config_get8(mpt
->m_config_handle
,
12515 PCI_CONF_CAP_PTR
), 4);
12517 caps_ptr
= PCI_CAP_NEXT_PTR_NULL
;
12521 * Walk capabilities if supported.
12523 for (cap_count
= 0; caps_ptr
!= PCI_CAP_NEXT_PTR_NULL
; ) {
12526 * Check that we haven't exceeded the maximum number of
12527 * capabilities and that the pointer is in a valid range.
12529 if (++cap_count
> 48) {
12530 mptsas_log(mpt
, CE_WARN
,
12531 "too many device capabilities.\n");
12534 if (caps_ptr
< 64) {
12535 mptsas_log(mpt
, CE_WARN
,
12536 "capabilities pointer 0x%x out of range.\n",
12542 * Get next capability and check that it is valid.
12543 * For now, we only support power management.
12545 cap
= pci_config_get8(mpt
->m_config_handle
, caps_ptr
);
12547 case PCI_CAP_ID_PM
:
12548 mptsas_log(mpt
, CE_NOTE
,
12549 "?mptsas%d supports power management.\n",
12551 mpt
->m_options
|= MPTSAS_OPT_PM
;
12553 /* Save PMCSR offset */
12554 mpt
->m_pmcsr_offset
= caps_ptr
+ PCI_PMCSR
;
12557 * The following capabilities are valid. Any others
12558 * will cause a message to be logged.
12560 case PCI_CAP_ID_VPD
:
12561 case PCI_CAP_ID_MSI
:
12562 case PCI_CAP_ID_PCIX
:
12563 case PCI_CAP_ID_PCI_E
:
12564 case PCI_CAP_ID_MSI_X
:
12567 mptsas_log(mpt
, CE_NOTE
,
12568 "?mptsas%d unrecognized capability "
12569 "0x%x.\n", mpt
->m_instance
, cap
);
12574 * Get next capabilities pointer and clear bits 0,1.
12576 caps_ptr
= P2ALIGN(pci_config_get8(mpt
->m_config_handle
,
12577 (caps_ptr
+ PCI_CAP_NEXT_PTR
)), 4);
12583 mptsas_init_pm(mptsas_t
*mpt
)
12588 "0=Off (PCI D3 State)",
12589 "3=On (PCI D0 State)",
12592 uint16_t pmcsr_stat
;
12594 if (mptsas_get_pci_cap(mpt
) == FALSE
) {
12595 return (DDI_FAILURE
);
12598 * If PCI's capability does not support PM, then don't need
12599 * to registe the pm-components
12601 if (!(mpt
->m_options
& MPTSAS_OPT_PM
))
12602 return (DDI_SUCCESS
);
12604 * If power management is supported by this chip, create
12605 * pm-components property for the power management framework
12607 (void) sprintf(pmc_name
, "NAME=mptsas%d", mpt
->m_instance
);
12609 if (ddi_prop_update_string_array(DDI_DEV_T_NONE
, mpt
->m_dip
,
12610 "pm-components", pmc
, 3) != DDI_PROP_SUCCESS
) {
12611 mutex_enter(&mpt
->m_intr_mutex
);
12612 mpt
->m_options
&= ~MPTSAS_OPT_PM
;
12613 mutex_exit(&mpt
->m_intr_mutex
);
12614 mptsas_log(mpt
, CE_WARN
,
12615 "mptsas%d: pm-component property creation failed.",
12617 return (DDI_FAILURE
);
12623 (void) pm_busy_component(mpt
->m_dip
, 0);
12624 pmcsr_stat
= pci_config_get16(mpt
->m_config_handle
,
12625 mpt
->m_pmcsr_offset
);
12626 if ((pmcsr_stat
& PCI_PMCSR_STATE_MASK
) != PCI_PMCSR_D0
) {
12627 mptsas_log(mpt
, CE_WARN
, "mptsas%d: Power up the device",
12629 pci_config_put16(mpt
->m_config_handle
, mpt
->m_pmcsr_offset
,
12632 if (pm_power_has_changed(mpt
->m_dip
, 0, PM_LEVEL_D0
) != DDI_SUCCESS
) {
12633 mptsas_log(mpt
, CE_WARN
, "pm_power_has_changed failed");
12634 return (DDI_FAILURE
);
12636 mutex_enter(&mpt
->m_intr_mutex
);
12637 mpt
->m_power_level
= PM_LEVEL_D0
;
12638 mutex_exit(&mpt
->m_intr_mutex
);
12640 * Set pm idle delay.
12642 mpt
->m_pm_idle_delay
= ddi_prop_get_int(DDI_DEV_T_ANY
,
12643 mpt
->m_dip
, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT
);
12645 return (DDI_SUCCESS
);
12649 mptsas_register_intrs(mptsas_t
*mpt
)
12656 /* Get supported interrupt types */
12657 if (ddi_intr_get_supported_types(dip
, &intr_types
) != DDI_SUCCESS
) {
12658 mptsas_log(mpt
, CE_WARN
, "ddi_intr_get_supported_types "
12663 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types
));
12666 * Try MSI, but fall back to FIXED
12668 if (mptsas_enable_msi
&& (intr_types
& DDI_INTR_TYPE_MSI
)) {
12669 if (mptsas_add_intrs(mpt
, DDI_INTR_TYPE_MSI
) == DDI_SUCCESS
) {
12670 NDBG0(("Using MSI interrupt type"));
12671 mpt
->m_intr_type
= DDI_INTR_TYPE_MSI
;
12675 if (intr_types
& DDI_INTR_TYPE_FIXED
) {
12676 if (mptsas_add_intrs(mpt
, DDI_INTR_TYPE_FIXED
) == DDI_SUCCESS
) {
12677 NDBG0(("Using FIXED interrupt type"));
12678 mpt
->m_intr_type
= DDI_INTR_TYPE_FIXED
;
12681 NDBG0(("FIXED interrupt registration failed"));
12690 mptsas_unregister_intrs(mptsas_t
*mpt
)
12692 mptsas_rem_intrs(mpt
);
12696 * mptsas_add_intrs:
12698 * Register FIXED or MSI interrupts.
12701 mptsas_add_intrs(mptsas_t
*mpt
, int intr_type
)
12703 dev_info_t
*dip
= mpt
->m_dip
;
12704 int avail
, actual
, count
= 0;
12707 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type
));
12709 /* Get number of interrupts */
12710 ret
= ddi_intr_get_nintrs(dip
, intr_type
, &count
);
12711 if ((ret
!= DDI_SUCCESS
) || (count
<= 0)) {
12712 mptsas_log(mpt
, CE_WARN
, "ddi_intr_get_nintrs() failed, "
12713 "ret %d count %d\n", ret
, count
);
12715 return (DDI_FAILURE
);
12718 /* Get number of available interrupts */
12719 ret
= ddi_intr_get_navail(dip
, intr_type
, &avail
);
12720 if ((ret
!= DDI_SUCCESS
) || (avail
== 0)) {
12721 mptsas_log(mpt
, CE_WARN
, "ddi_intr_get_navail() failed, "
12722 "ret %d avail %d\n", ret
, avail
);
12724 return (DDI_FAILURE
);
12727 if (avail
< count
) {
12728 mptsas_log(mpt
, CE_NOTE
, "ddi_intr_get_nvail returned %d, "
12729 "navail() returned %d", count
, avail
);
12732 /* Mpt only have one interrupt routine */
12733 if ((intr_type
== DDI_INTR_TYPE_MSI
) && (count
> 1)) {
12737 /* Allocate an array of interrupt handles */
12738 mpt
->m_intr_size
= count
* sizeof (ddi_intr_handle_t
);
12739 mpt
->m_htable
= kmem_alloc(mpt
->m_intr_size
, KM_SLEEP
);
12741 flag
= DDI_INTR_ALLOC_NORMAL
;
12743 /* call ddi_intr_alloc() */
12744 ret
= ddi_intr_alloc(dip
, mpt
->m_htable
, intr_type
, 0,
12745 count
, &actual
, flag
);
12747 if ((ret
!= DDI_SUCCESS
) || (actual
== 0)) {
12748 mptsas_log(mpt
, CE_WARN
, "ddi_intr_alloc() failed, ret %d\n",
12750 kmem_free(mpt
->m_htable
, mpt
->m_intr_size
);
12751 return (DDI_FAILURE
);
12754 /* use interrupt count returned or abort? */
12755 if (actual
< count
) {
12756 mptsas_log(mpt
, CE_NOTE
, "Requested: %d, Received: %d\n",
12760 mpt
->m_intr_cnt
= actual
;
12763 * Get priority for first msi, assume remaining are all the same
12765 if ((ret
= ddi_intr_get_pri(mpt
->m_htable
[0],
12766 &mpt
->m_intr_pri
)) != DDI_SUCCESS
) {
12767 mptsas_log(mpt
, CE_WARN
, "ddi_intr_get_pri() failed %d\n", ret
);
12769 /* Free already allocated intr */
12770 for (i
= 0; i
< actual
; i
++) {
12771 (void) ddi_intr_free(mpt
->m_htable
[i
]);
12774 kmem_free(mpt
->m_htable
, mpt
->m_intr_size
);
12775 return (DDI_FAILURE
);
12778 /* Test for high level mutex */
12779 if (mpt
->m_intr_pri
>= ddi_intr_get_hilevel_pri()) {
12780 mptsas_log(mpt
, CE_WARN
, "mptsas_add_intrs: "
12781 "Hi level interrupt not supported\n");
12783 /* Free already allocated intr */
12784 for (i
= 0; i
< actual
; i
++) {
12785 (void) ddi_intr_free(mpt
->m_htable
[i
]);
12788 kmem_free(mpt
->m_htable
, mpt
->m_intr_size
);
12789 return (DDI_FAILURE
);
12792 /* Call ddi_intr_add_handler() */
12793 for (i
= 0; i
< actual
; i
++) {
12794 if ((ret
= ddi_intr_add_handler(mpt
->m_htable
[i
], mptsas_intr
,
12795 (caddr_t
)mpt
, (caddr_t
)(uintptr_t)i
)) != DDI_SUCCESS
) {
12796 mptsas_log(mpt
, CE_WARN
, "ddi_intr_add_handler() "
12797 "failed %d\n", ret
);
12799 /* Free already allocated intr */
12800 for (i
= 0; i
< actual
; i
++) {
12801 (void) ddi_intr_free(mpt
->m_htable
[i
]);
12804 kmem_free(mpt
->m_htable
, mpt
->m_intr_size
);
12805 return (DDI_FAILURE
);
12809 if ((ret
= ddi_intr_get_cap(mpt
->m_htable
[0], &mpt
->m_intr_cap
))
12811 mptsas_log(mpt
, CE_WARN
, "ddi_intr_get_cap() failed %d\n", ret
);
12813 /* Free already allocated intr */
12814 for (i
= 0; i
< actual
; i
++) {
12815 (void) ddi_intr_free(mpt
->m_htable
[i
]);
12818 kmem_free(mpt
->m_htable
, mpt
->m_intr_size
);
12819 return (DDI_FAILURE
);
12823 * Enable interrupts
12825 if (mpt
->m_intr_cap
& DDI_INTR_FLAG_BLOCK
) {
12826 /* Call ddi_intr_block_enable() for MSI interrupts */
12827 (void) ddi_intr_block_enable(mpt
->m_htable
, mpt
->m_intr_cnt
);
12829 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12830 for (i
= 0; i
< mpt
->m_intr_cnt
; i
++) {
12831 (void) ddi_intr_enable(mpt
->m_htable
[i
]);
12834 return (DDI_SUCCESS
);
12838 * mptsas_rem_intrs:
12840 * Unregister FIXED or MSI interrupts
12843 mptsas_rem_intrs(mptsas_t
*mpt
)
12847 NDBG6(("mptsas_rem_intrs"));
12849 /* Disable all interrupts */
12850 if (mpt
->m_intr_cap
& DDI_INTR_FLAG_BLOCK
) {
12851 /* Call ddi_intr_block_disable() */
12852 (void) ddi_intr_block_disable(mpt
->m_htable
, mpt
->m_intr_cnt
);
12854 for (i
= 0; i
< mpt
->m_intr_cnt
; i
++) {
12855 (void) ddi_intr_disable(mpt
->m_htable
[i
]);
12859 /* Call ddi_intr_remove_handler() */
12860 for (i
= 0; i
< mpt
->m_intr_cnt
; i
++) {
12861 (void) ddi_intr_remove_handler(mpt
->m_htable
[i
]);
12862 (void) ddi_intr_free(mpt
->m_htable
[i
]);
12865 kmem_free(mpt
->m_htable
, mpt
->m_intr_size
);
12869 * The IO fault service error handling callback function
12873 mptsas_fm_error_cb(dev_info_t
*dip
, ddi_fm_error_t
*err
, const void *impl_data
)
12876 * as the driver can always deal with an error in any dma or
12877 * access handle, we can just return the fme_status value.
12879 pci_ereport_post(dip
, err
, NULL
);
12880 return (err
->fme_status
);
12884 * mptsas_fm_init - initialize fma capabilities and register with IO
12888 mptsas_fm_init(mptsas_t
*mpt
)
12891 * Need to change iblock to priority for new MSI intr
12893 ddi_iblock_cookie_t fm_ibc
;
12895 /* Only register with IO Fault Services if we have some capability */
12896 if (mpt
->m_fm_capabilities
) {
12897 /* Adjust access and dma attributes for FMA */
12898 mpt
->m_reg_acc_attr
.devacc_attr_access
= DDI_FLAGERR_ACC
;
12899 mpt
->m_msg_dma_attr
.dma_attr_flags
|= DDI_DMA_FLAGERR
;
12900 mpt
->m_io_dma_attr
.dma_attr_flags
|= DDI_DMA_FLAGERR
;
12903 * Register capabilities with IO Fault Services.
12904 * mpt->m_fm_capabilities will be updated to indicate
12905 * capabilities actually supported (not requested.)
12907 ddi_fm_init(mpt
->m_dip
, &mpt
->m_fm_capabilities
, &fm_ibc
);
12910 * Initialize pci ereport capabilities if ereport
12911 * capable (should always be.)
12913 if (DDI_FM_EREPORT_CAP(mpt
->m_fm_capabilities
) ||
12914 DDI_FM_ERRCB_CAP(mpt
->m_fm_capabilities
)) {
12915 pci_ereport_setup(mpt
->m_dip
);
12919 * Register error callback if error callback capable.
12921 if (DDI_FM_ERRCB_CAP(mpt
->m_fm_capabilities
)) {
12922 ddi_fm_handler_register(mpt
->m_dip
,
12923 mptsas_fm_error_cb
, (void *) mpt
);
12929 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12934 mptsas_fm_fini(mptsas_t
*mpt
)
12936 /* Only unregister FMA capabilities if registered */
12937 if (mpt
->m_fm_capabilities
) {
12940 * Un-register error callback if error callback capable.
12943 if (DDI_FM_ERRCB_CAP(mpt
->m_fm_capabilities
)) {
12944 ddi_fm_handler_unregister(mpt
->m_dip
);
12948 * Release any resources allocated by pci_ereport_setup()
12951 if (DDI_FM_EREPORT_CAP(mpt
->m_fm_capabilities
) ||
12952 DDI_FM_ERRCB_CAP(mpt
->m_fm_capabilities
)) {
12953 pci_ereport_teardown(mpt
->m_dip
);
12956 /* Unregister from IO Fault Services */
12957 ddi_fm_fini(mpt
->m_dip
);
12959 /* Adjust access and dma attributes for FMA */
12960 mpt
->m_reg_acc_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
12961 mpt
->m_msg_dma_attr
.dma_attr_flags
&= ~DDI_DMA_FLAGERR
;
12962 mpt
->m_io_dma_attr
.dma_attr_flags
&= ~DDI_DMA_FLAGERR
;
12968 mptsas_check_acc_handle(ddi_acc_handle_t handle
)
12972 if (handle
== NULL
)
12973 return (DDI_FAILURE
);
12974 ddi_fm_acc_err_get(handle
, &de
, DDI_FME_VER0
);
12975 return (de
.fme_status
);
12979 mptsas_check_dma_handle(ddi_dma_handle_t handle
)
12983 if (handle
== NULL
)
12984 return (DDI_FAILURE
);
12985 ddi_fm_dma_err_get(handle
, &de
, DDI_FME_VER0
);
12986 return (de
.fme_status
);
12990 mptsas_fm_ereport(mptsas_t
*mpt
, char *detail
)
12993 char buf
[FM_MAX_CLASS
];
12995 (void) snprintf(buf
, FM_MAX_CLASS
, "%s.%s", DDI_FM_DEVICE
, detail
);
12996 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
12997 if (DDI_FM_EREPORT_CAP(mpt
->m_fm_capabilities
)) {
12998 ddi_fm_ereport_post(mpt
->m_dip
, buf
, ena
, DDI_NOSLEEP
,
12999 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
, NULL
);
13004 mptsas_get_target_device_info(mptsas_t
*mpt
, uint32_t page_address
,
13005 uint16_t *dev_handle
, mptsas_target_t
**pptgt
)
13010 mptsas_phymask_t phymask
;
13011 uint8_t physport
, phynum
, config
, disk
;
13012 mptsas_slots_t
*slots
= mpt
->m_active
;
13013 uint64_t devicename
;
13015 mptsas_target_t
*tmp_tgt
= NULL
;
13016 uint16_t bay_num
, enclosure
;
13018 ASSERT(*pptgt
== NULL
);
13020 rval
= mptsas_get_sas_device_page0(mpt
, page_address
, dev_handle
,
13021 &sas_wwn
, &dev_info
, &physport
, &phynum
, &pdev_hdl
,
13022 &bay_num
, &enclosure
);
13023 if (rval
!= DDI_SUCCESS
) {
13024 rval
= DEV_INFO_FAIL_PAGE0
;
13028 if ((dev_info
& (MPI2_SAS_DEVICE_INFO_SSP_TARGET
|
13029 MPI2_SAS_DEVICE_INFO_SATA_DEVICE
|
13030 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE
)) == NULL
) {
13031 rval
= DEV_INFO_WRONG_DEVICE_TYPE
;
13036 * Check if the dev handle is for a Phys Disk. If so, set return value
13037 * and exit. Don't add Phys Disks to hash.
13039 for (config
= 0; config
< slots
->m_num_raid_configs
; config
++) {
13040 for (disk
= 0; disk
< MPTSAS_MAX_DISKS_IN_CONFIG
; disk
++) {
13041 if (*dev_handle
== slots
->m_raidconfig
[config
].
13042 m_physdisk_devhdl
[disk
]) {
13043 rval
= DEV_INFO_PHYS_DISK
;
13050 * Get SATA Device Name from SAS device page0 for
13051 * sata device, if device name doesn't exist, set m_sas_wwn to
13052 * 0 for direct attached SATA. For the device behind the expander
13053 * we still can use STP address assigned by expander.
13055 if (dev_info
& (MPI2_SAS_DEVICE_INFO_SATA_DEVICE
|
13056 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE
)) {
13057 mutex_exit(&mpt
->m_mutex
);
13058 /* alloc a tmp_tgt to send the cmd */
13059 tmp_tgt
= kmem_zalloc(sizeof (struct mptsas_target
),
13061 tmp_tgt
->m_devhdl
= *dev_handle
;
13062 tmp_tgt
->m_deviceinfo
= dev_info
;
13063 tmp_tgt
->m_qfull_retries
= QFULL_RETRIES
;
13064 tmp_tgt
->m_qfull_retry_interval
=
13065 drv_usectohz(QFULL_RETRY_INTERVAL
* 1000);
13066 tmp_tgt
->m_t_throttle
= MAX_THROTTLE
;
13067 devicename
= mptsas_get_sata_guid(mpt
, tmp_tgt
, 0);
13068 kmem_free(tmp_tgt
, sizeof (struct mptsas_target
));
13069 mutex_enter(&mpt
->m_mutex
);
13070 if (devicename
!= 0 && (((devicename
>> 56) & 0xf0) == 0x50)) {
13071 sas_wwn
= devicename
;
13072 } else if (dev_info
& MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH
) {
13077 phymask
= mptsas_physport_to_phymask(mpt
, physport
);
13078 *pptgt
= mptsas_tgt_alloc(&slots
->m_tgttbl
, *dev_handle
, sas_wwn
,
13079 dev_info
, phymask
, phynum
, mpt
);
13080 if (*pptgt
== NULL
) {
13081 mptsas_log(mpt
, CE_WARN
, "Failed to allocated target"
13083 rval
= DEV_INFO_FAIL_ALLOC
;
13086 (*pptgt
)->m_enclosure
= enclosure
;
13087 (*pptgt
)->m_slot_num
= bay_num
;
13088 return (DEV_INFO_SUCCESS
);
13092 mptsas_get_sata_guid(mptsas_t
*mpt
, mptsas_target_t
*ptgt
, int lun
)
13094 uint64_t sata_guid
= 0, *pwwn
= NULL
;
13095 int target
= ptgt
->m_devhdl
;
13096 uchar_t
*inq83
= NULL
;
13097 int inq83_len
= 0xFF;
13098 uchar_t
*dblk
= NULL
;
13099 int inq83_retry
= 3;
13100 int rval
= DDI_FAILURE
;
13102 inq83
= kmem_zalloc(inq83_len
, KM_SLEEP
);
13105 rval
= mptsas_inquiry(mpt
, ptgt
, lun
, 0x83, inq83
,
13106 inq83_len
, NULL
, 1);
13107 if (rval
!= DDI_SUCCESS
) {
13108 mptsas_log(mpt
, CE_WARN
, "!mptsas request inquiry page "
13109 "0x83 for target:%x, lun:%x failed!", target
, lun
);
13112 /* According to SAT2, the first descriptor is logic unit name */
13114 if ((dblk
[1] & 0x30) != 0) {
13115 mptsas_log(mpt
, CE_WARN
, "!Descriptor is not lun associated.");
13118 pwwn
= (uint64_t *)(void *)(&dblk
[4]);
13119 if ((dblk
[4] & 0xf0) == 0x50) {
13120 sata_guid
= BE_64(*pwwn
);
13122 } else if (dblk
[4] == 'A') {
13123 NDBG20(("SATA drive has no NAA format GUID."));
13126 /* The data is not ready, wait and retry */
13128 if (inq83_retry
<= 0) {
13131 NDBG20(("The GUID is not ready, retry..."));
13132 delay(1 * drv_usectohz(1000000));
13136 kmem_free(inq83
, inq83_len
);
13137 return (sata_guid
);
13141 mptsas_inquiry(mptsas_t
*mpt
, mptsas_target_t
*ptgt
, int lun
, uchar_t page
,
13142 unsigned char *buf
, int len
, int *reallen
, uchar_t evpd
)
13144 uchar_t cdb
[CDB_GROUP0
];
13145 struct scsi_address ap
;
13146 struct buf
*data_bp
= NULL
;
13148 int ret
= DDI_FAILURE
;
13150 ASSERT(len
<= 0xffff);
13152 ap
.a_target
= MPTSAS_INVALID_DEVHDL
;
13153 ap
.a_lun
= (uchar_t
)(lun
);
13154 ap
.a_hba_tran
= mpt
->m_tran
;
13156 data_bp
= scsi_alloc_consistent_buf(&ap
,
13157 (struct buf
*)NULL
, len
, B_READ
, NULL_FUNC
, NULL
);
13158 if (data_bp
== NULL
) {
13161 bzero(cdb
, CDB_GROUP0
);
13162 cdb
[0] = SCMD_INQUIRY
;
13165 cdb
[3] = (len
& 0xff00) >> 8;
13166 cdb
[4] = (len
& 0x00ff);
13169 ret
= mptsas_send_scsi_cmd(mpt
, &ap
, ptgt
, &cdb
[0], CDB_GROUP0
, data_bp
,
13171 if (ret
== DDI_SUCCESS
) {
13173 *reallen
= len
- resid
;
13175 bcopy((caddr_t
)data_bp
->b_un
.b_addr
, buf
, len
);
13178 scsi_free_consistent_buf(data_bp
);
13184 mptsas_send_scsi_cmd(mptsas_t
*mpt
, struct scsi_address
*ap
,
13185 mptsas_target_t
*ptgt
, uchar_t
*cdb
, int cdblen
, struct buf
*data_bp
,
13188 struct scsi_pkt
*pktp
= NULL
;
13189 scsi_hba_tran_t
*tran_clone
= NULL
;
13190 mptsas_tgt_private_t
*tgt_private
= NULL
;
13191 int ret
= DDI_FAILURE
;
13194 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13195 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13196 * to simulate the cmds from sd
13198 tran_clone
= kmem_alloc(
13199 sizeof (scsi_hba_tran_t
), KM_SLEEP
);
13200 if (tran_clone
== NULL
) {
13203 bcopy((caddr_t
)mpt
->m_tran
,
13204 (caddr_t
)tran_clone
, sizeof (scsi_hba_tran_t
));
13205 tgt_private
= kmem_alloc(
13206 sizeof (mptsas_tgt_private_t
), KM_SLEEP
);
13207 if (tgt_private
== NULL
) {
13210 tgt_private
->t_lun
= ap
->a_lun
;
13211 tgt_private
->t_private
= ptgt
;
13212 tran_clone
->tran_tgt_private
= tgt_private
;
13213 ap
->a_hba_tran
= tran_clone
;
13215 pktp
= scsi_init_pkt(ap
, (struct scsi_pkt
*)NULL
,
13216 data_bp
, cdblen
, sizeof (struct scsi_arq_status
),
13217 0, PKT_CONSISTENT
, NULL
, NULL
);
13218 if (pktp
== NULL
) {
13221 bcopy(cdb
, pktp
->pkt_cdbp
, cdblen
);
13222 pktp
->pkt_flags
= FLAG_NOPARITY
;
13223 if (scsi_poll(pktp
) < 0) {
13226 if (((struct scsi_status
*)pktp
->pkt_scbp
)->sts_chk
) {
13229 if (resid
!= NULL
) {
13230 *resid
= pktp
->pkt_resid
;
13236 scsi_destroy_pkt(pktp
);
13239 kmem_free(tran_clone
, sizeof (scsi_hba_tran_t
));
13242 kmem_free(tgt_private
, sizeof (mptsas_tgt_private_t
));
13247 mptsas_parse_address(char *name
, uint64_t *wwid
, uint8_t *phy
, int *lun
)
13252 char *wwid_str
= NULL
;
13253 char *lun_str
= NULL
;
13256 int rc
= DDI_FAILURE
;
13259 ASSERT(ptr
[0] == 'w' || ptr
[0] == 'p');
13261 if ((cp
= strchr(ptr
, ',')) == NULL
) {
13262 return (DDI_FAILURE
);
13265 wwid_str
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
13266 s
= (uintptr_t)cp
- (uintptr_t)ptr
;
13268 bcopy(ptr
, wwid_str
, s
);
13269 wwid_str
[s
] = '\0';
13273 if ((cp
= strchr(ptr
, '\0')) == NULL
) {
13276 lun_str
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
13277 s
= (uintptr_t)cp
- (uintptr_t)ptr
;
13279 bcopy(ptr
, lun_str
, s
);
13282 if (name
[0] == 'p') {
13283 rc
= ddi_strtol(wwid_str
, NULL
, 0x10, &phyid
);
13285 rc
= scsi_wwnstr_to_wwn(wwid_str
, wwid
);
13287 if (rc
!= DDI_SUCCESS
)
13291 ASSERT(phyid
< MPTSAS_MAX_PHYS
);
13292 *phy
= (uint8_t)phyid
;
13294 rc
= ddi_strtol(lun_str
, NULL
, 0x10, &lunnum
);
13298 *lun
= (int)lunnum
;
13302 kmem_free(wwid_str
, SCSI_MAXNAMELEN
);
13304 kmem_free(lun_str
, SCSI_MAXNAMELEN
);
13310 * mptsas_parse_smp_name() is to parse sas wwn string
13311 * which format is "wWWN"
13314 mptsas_parse_smp_name(char *name
, uint64_t *wwn
)
13319 return (DDI_FAILURE
);
13323 if (scsi_wwnstr_to_wwn(ptr
, wwn
)) {
13324 return (DDI_FAILURE
);
13326 return (DDI_SUCCESS
);
13330 mptsas_bus_config(dev_info_t
*pdip
, uint_t flag
,
13331 ddi_bus_config_op_t op
, void *arg
, dev_info_t
**childp
)
13333 int ret
= NDI_FAILURE
;
13338 char *devnm
= NULL
;
13340 uint8_t phy
= 0xFF;
13342 uint_t mflags
= flag
;
13343 int bconfig
= TRUE
;
13345 if (scsi_hba_iport_unit_address(pdip
) == 0) {
13346 return (DDI_FAILURE
);
13349 mpt
= DIP2MPT(pdip
);
13351 return (DDI_FAILURE
);
13354 * Hold the nexus across the bus_config
13356 ndi_devi_enter(scsi_vhci_dip
, &circ
);
13357 ndi_devi_enter(pdip
, &circ1
);
13359 case BUS_CONFIG_ONE
:
13360 /* parse wwid/target name out of name given */
13361 if ((ptr
= strchr((char *)arg
, '@')) == NULL
) {
13366 if (strncmp((char *)arg
, "smp", 3) == 0) {
13368 * This is a SMP target device
13370 ret
= mptsas_parse_smp_name(ptr
, &wwid
);
13371 if (ret
!= DDI_SUCCESS
) {
13375 ret
= mptsas_config_smp(pdip
, wwid
, childp
);
13376 } else if ((ptr
[0] == 'w') || (ptr
[0] == 'p')) {
13378 * OBP could pass down a non-canonical form
13379 * bootpath without LUN part when LUN is 0.
13380 * So driver need adjust the string.
13382 if (strchr(ptr
, ',') == NULL
) {
13383 devnm
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
13384 (void) sprintf(devnm
, "%s,0", (char *)arg
);
13385 ptr
= strchr(devnm
, '@');
13390 * The device path is wWWID format and the device
13391 * is not SMP target device.
13393 ret
= mptsas_parse_address(ptr
, &wwid
, &phy
, &lun
);
13394 if (ret
!= DDI_SUCCESS
) {
13399 if (ptr
[0] == 'w') {
13400 ret
= mptsas_config_one_addr(pdip
, wwid
,
13402 } else if (ptr
[0] == 'p') {
13403 ret
= mptsas_config_one_phy(pdip
, phy
, lun
,
13408 * If this is CD/DVD device in OBP path, the
13409 * ndi_busop_bus_config can be skipped as config one
13410 * operation is done above.
13412 if ((ret
== NDI_SUCCESS
) && (*childp
!= NULL
) &&
13413 (strcmp(ddi_node_name(*childp
), "cdrom") == 0) &&
13414 (strncmp((char *)arg
, "disk", 4) == 0)) {
13416 ndi_hold_devi(*childp
);
13424 * DDI group instructed us to use this flag.
13426 mflags
|= NDI_MDI_FALLBACK
;
13428 case BUS_CONFIG_DRIVER
:
13429 case BUS_CONFIG_ALL
:
13430 mptsas_config_all(pdip
);
13435 if ((ret
== NDI_SUCCESS
) && bconfig
) {
13436 ret
= ndi_busop_bus_config(pdip
, mflags
, op
,
13437 (devnm
== NULL
) ? arg
: devnm
, childp
, 0);
13440 ndi_devi_exit(pdip
, circ1
);
13441 ndi_devi_exit(scsi_vhci_dip
, circ
);
13443 kmem_free(devnm
, SCSI_MAXNAMELEN
);
13448 mptsas_probe_lun(dev_info_t
*pdip
, int lun
, dev_info_t
**dip
,
13449 mptsas_target_t
*ptgt
)
13451 int rval
= DDI_FAILURE
;
13452 struct scsi_inquiry
*sd_inq
= NULL
;
13453 mptsas_t
*mpt
= DIP2MPT(pdip
);
13455 sd_inq
= (struct scsi_inquiry
*)kmem_alloc(SUN_INQSIZE
, KM_SLEEP
);
13457 rval
= mptsas_inquiry(mpt
, ptgt
, lun
, 0, (uchar_t
*)sd_inq
,
13458 SUN_INQSIZE
, 0, (uchar_t
)0);
13460 if ((rval
== DDI_SUCCESS
) && MPTSAS_VALID_LUN(sd_inq
)) {
13461 rval
= mptsas_create_lun(pdip
, sd_inq
, dip
, ptgt
, lun
);
13463 rval
= DDI_FAILURE
;
13466 kmem_free(sd_inq
, SUN_INQSIZE
);
13471 mptsas_config_one_addr(dev_info_t
*pdip
, uint64_t sasaddr
, int lun
,
13472 dev_info_t
**lundip
)
13475 mptsas_t
*mpt
= DIP2MPT(pdip
);
13477 mptsas_target_t
*ptgt
= NULL
;
13480 * Get the physical port associated to the iport
13482 phymask
= ddi_prop_get_int(DDI_DEV_T_ANY
, pdip
, 0,
13485 ptgt
= mptsas_wwid_to_ptgt(mpt
, phymask
, sasaddr
);
13486 if (ptgt
== NULL
) {
13488 * didn't match any device by searching
13490 return (DDI_FAILURE
);
13493 * If the LUN already exists and the status is online,
13494 * we just return the pointer to dev_info_t directly.
13495 * For the mdi_pathinfo node, we'll handle it in
13496 * mptsas_create_virt_lun()
13497 * TODO should be also in mptsas_handle_dr
13500 *lundip
= mptsas_find_child_addr(pdip
, sasaddr
, lun
);
13501 if (*lundip
!= NULL
) {
13503 * TODO Another senario is, we hotplug the same disk
13504 * on the same slot, the devhdl changed, is this
13506 * tgt_private->t_private != ptgt
13508 if (sasaddr
!= ptgt
->m_sas_wwn
) {
13510 * The device has changed although the devhdl is the
13511 * same (Enclosure mapping mode, change drive on the
13514 return (DDI_FAILURE
);
13516 return (DDI_SUCCESS
);
13519 if (phymask
== 0) {
13521 * Configure IR volume
13523 rval
= mptsas_config_raid(pdip
, ptgt
->m_devhdl
, lundip
);
13526 rval
= mptsas_probe_lun(pdip
, lun
, lundip
, ptgt
);
13532 mptsas_config_one_phy(dev_info_t
*pdip
, uint8_t phy
, int lun
,
13533 dev_info_t
**lundip
)
13536 mptsas_t
*mpt
= DIP2MPT(pdip
);
13538 mptsas_target_t
*ptgt
= NULL
;
13541 * Get the physical port associated to the iport
13543 phymask
= ddi_prop_get_int(DDI_DEV_T_ANY
, pdip
, 0,
13546 ptgt
= mptsas_phy_to_tgt(mpt
, phymask
, phy
);
13547 if (ptgt
== NULL
) {
13549 * didn't match any device by searching
13551 return (DDI_FAILURE
);
13555 * If the LUN already exists and the status is online,
13556 * we just return the pointer to dev_info_t directly.
13557 * For the mdi_pathinfo node, we'll handle it in
13558 * mptsas_create_virt_lun().
13561 *lundip
= mptsas_find_child_phy(pdip
, phy
);
13562 if (*lundip
!= NULL
) {
13563 return (DDI_SUCCESS
);
13566 rval
= mptsas_probe_lun(pdip
, lun
, lundip
, ptgt
);
13572 mptsas_retrieve_lundata(int lun_cnt
, uint8_t *buf
, uint16_t *lun_num
,
13573 uint8_t *lun_addr_type
)
13575 uint32_t lun_idx
= 0;
13577 ASSERT(lun_num
!= NULL
);
13578 ASSERT(lun_addr_type
!= NULL
);
13580 lun_idx
= (lun_cnt
+ 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE
;
13581 /* determine report luns addressing type */
13582 switch (buf
[lun_idx
] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK
) {
13584 * Vendors in the field have been found to be concatenating
13585 * bus/target/lun to equal the complete lun value instead
13586 * of switching to flat space addressing
13588 /* 00b - peripheral device addressing method */
13589 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL
:
13591 /* 10b - logical unit addressing method */
13592 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT
:
13594 /* 01b - flat space addressing method */
13595 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE
:
13596 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13597 *lun_addr_type
= (buf
[lun_idx
] &
13598 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK
) >> 6;
13599 *lun_num
= (buf
[lun_idx
] & 0x3F) << 8;
13600 *lun_num
|= buf
[lun_idx
+ 1];
13601 return (DDI_SUCCESS
);
13603 return (DDI_FAILURE
);
13608 mptsas_config_luns(dev_info_t
*pdip
, mptsas_target_t
*ptgt
)
13610 struct buf
*repluns_bp
= NULL
;
13611 struct scsi_address ap
;
13612 uchar_t cdb
[CDB_GROUP5
];
13613 int ret
= DDI_FAILURE
;
13615 int lun_list_len
= 0;
13616 uint16_t lun_num
= 0;
13617 uint8_t lun_addr_type
= 0;
13618 uint32_t lun_cnt
= 0;
13619 uint32_t lun_total
= 0;
13620 dev_info_t
*cdip
= NULL
;
13621 uint16_t *saved_repluns
= NULL
;
13622 char *buffer
= NULL
;
13624 mptsas_t
*mpt
= DIP2MPT(pdip
);
13625 uint64_t sas_wwn
= 0;
13626 uint8_t phy
= 0xFF;
13627 uint32_t dev_info
= 0;
13629 mutex_enter(&mpt
->m_mutex
);
13630 sas_wwn
= ptgt
->m_sas_wwn
;
13631 phy
= ptgt
->m_phynum
;
13632 dev_info
= ptgt
->m_deviceinfo
;
13633 mutex_exit(&mpt
->m_mutex
);
13635 if (sas_wwn
== 0) {
13637 * It's a SATA without Device Name
13638 * So don't try multi-LUNs
13640 if (mptsas_find_child_phy(pdip
, phy
)) {
13641 return (DDI_SUCCESS
);
13644 * need configure and create node
13646 return (DDI_FAILURE
);
13651 * WWN (SAS address or Device Name exist)
13653 if (dev_info
& (MPI2_SAS_DEVICE_INFO_SATA_DEVICE
|
13654 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE
)) {
13656 * SATA device with Device Name
13657 * So don't try multi-LUNs
13659 if (mptsas_find_child_addr(pdip
, sas_wwn
, 0)) {
13660 return (DDI_SUCCESS
);
13662 return (DDI_FAILURE
);
13667 ap
.a_target
= MPTSAS_INVALID_DEVHDL
;
13669 ap
.a_hba_tran
= mpt
->m_tran
;
13670 repluns_bp
= scsi_alloc_consistent_buf(&ap
,
13671 (struct buf
*)NULL
, buf_len
, B_READ
, NULL_FUNC
, NULL
);
13672 if (repluns_bp
== NULL
) {
13676 bzero(cdb
, CDB_GROUP5
);
13677 cdb
[0] = SCMD_REPORT_LUNS
;
13678 cdb
[6] = (buf_len
& 0xff000000) >> 24;
13679 cdb
[7] = (buf_len
& 0x00ff0000) >> 16;
13680 cdb
[8] = (buf_len
& 0x0000ff00) >> 8;
13681 cdb
[9] = (buf_len
& 0x000000ff);
13683 ret
= mptsas_send_scsi_cmd(mpt
, &ap
, ptgt
, &cdb
[0], CDB_GROUP5
,
13685 if (ret
!= DDI_SUCCESS
) {
13686 scsi_free_consistent_buf(repluns_bp
);
13690 lun_list_len
= BE_32(*(int *)((void *)(
13691 repluns_bp
->b_un
.b_addr
)));
13692 if (buf_len
>= lun_list_len
+ 8) {
13696 scsi_free_consistent_buf(repluns_bp
);
13697 buf_len
= lun_list_len
+ 8;
13699 } while (retry
< 3);
13701 if (ret
!= DDI_SUCCESS
)
13703 buffer
= (char *)repluns_bp
->b_un
.b_addr
;
13705 * find out the number of luns returned by the SCSI ReportLun call
13706 * and allocate buffer space
13708 lun_total
= lun_list_len
/ MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE
;
13709 saved_repluns
= kmem_zalloc(sizeof (uint16_t) * lun_total
, KM_SLEEP
);
13710 if (saved_repluns
== NULL
) {
13711 scsi_free_consistent_buf(repluns_bp
);
13712 return (DDI_FAILURE
);
13714 for (lun_cnt
= 0; lun_cnt
< lun_total
; lun_cnt
++) {
13715 if (mptsas_retrieve_lundata(lun_cnt
, (uint8_t *)(buffer
),
13716 &lun_num
, &lun_addr_type
) != DDI_SUCCESS
) {
13719 saved_repluns
[lun_cnt
] = lun_num
;
13720 if (cdip
= mptsas_find_child_addr(pdip
, sas_wwn
, lun_num
))
13723 ret
= mptsas_probe_lun(pdip
, lun_num
, &cdip
,
13725 if ((ret
== DDI_SUCCESS
) && (cdip
!= NULL
)) {
13726 (void) ndi_prop_remove(DDI_DEV_T_NONE
, cdip
,
13730 mptsas_offline_missed_luns(pdip
, saved_repluns
, lun_total
, ptgt
);
13731 kmem_free(saved_repluns
, sizeof (uint16_t) * lun_total
);
13732 scsi_free_consistent_buf(repluns_bp
);
13733 return (DDI_SUCCESS
);
13737 mptsas_config_raid(dev_info_t
*pdip
, uint16_t target
, dev_info_t
**dip
)
13739 int rval
= DDI_FAILURE
;
13740 struct scsi_inquiry
*sd_inq
= NULL
;
13741 mptsas_t
*mpt
= DIP2MPT(pdip
);
13742 mptsas_target_t
*ptgt
= NULL
;
13744 mutex_enter(&mpt
->m_mutex
);
13745 ptgt
= mptsas_search_by_devhdl(&mpt
->m_active
->m_tgttbl
, target
);
13746 mutex_exit(&mpt
->m_mutex
);
13747 if (ptgt
== NULL
) {
13748 mptsas_log(mpt
, CE_WARN
, "Volume with VolDevHandle of 0x%x "
13749 "not found.", target
);
13753 sd_inq
= (struct scsi_inquiry
*)kmem_alloc(SUN_INQSIZE
, KM_SLEEP
);
13754 rval
= mptsas_inquiry(mpt
, ptgt
, 0, 0, (uchar_t
*)sd_inq
,
13755 SUN_INQSIZE
, 0, (uchar_t
)0);
13757 if ((rval
== DDI_SUCCESS
) && MPTSAS_VALID_LUN(sd_inq
)) {
13758 rval
= mptsas_create_phys_lun(pdip
, sd_inq
, NULL
, dip
, ptgt
,
13761 rval
= DDI_FAILURE
;
13764 kmem_free(sd_inq
, SUN_INQSIZE
);
13769 * configure all RAID volumes for virtual iport
13772 mptsas_config_all_viport(dev_info_t
*pdip
)
13774 mptsas_t
*mpt
= DIP2MPT(pdip
);
13777 dev_info_t
*lundip
= NULL
;
13778 mptsas_slots_t
*slots
= mpt
->m_active
;
13781 * Get latest RAID info and search for any Volume DevHandles. If any
13782 * are found, configure the volume.
13784 mutex_enter(&mpt
->m_mutex
);
13785 for (config
= 0; config
< slots
->m_num_raid_configs
; config
++) {
13786 for (vol
= 0; vol
< MPTSAS_MAX_RAIDVOLS
; vol
++) {
13787 if (slots
->m_raidconfig
[config
].m_raidvol
[vol
].m_israid
13789 target
= slots
->m_raidconfig
[config
].
13790 m_raidvol
[vol
].m_raidhandle
;
13791 mutex_exit(&mpt
->m_mutex
);
13792 (void) mptsas_config_raid(pdip
, target
,
13794 mutex_enter(&mpt
->m_mutex
);
13798 mutex_exit(&mpt
->m_mutex
);
13802 mptsas_offline_missed_luns(dev_info_t
*pdip
, uint16_t *repluns
,
13803 int lun_cnt
, mptsas_target_t
*ptgt
)
13805 dev_info_t
*child
= NULL
, *savechild
= NULL
;
13806 mdi_pathinfo_t
*pip
= NULL
, *savepip
= NULL
;
13807 uint64_t sas_wwn
, wwid
;
13814 mptsas_t
*mpt
= DIP2MPT(pdip
);
13816 mutex_enter(&mpt
->m_mutex
);
13817 wwid
= ptgt
->m_sas_wwn
;
13818 mutex_exit(&mpt
->m_mutex
);
13820 child
= ddi_get_child(pdip
);
13824 child
= ddi_get_next_sibling(child
);
13826 nodename
= ddi_node_name(savechild
);
13827 if (strcmp(nodename
, "smp") == 0) {
13831 addr
= ddi_get_name_addr(savechild
);
13832 if (addr
== NULL
) {
13836 if (mptsas_parse_address(addr
, &sas_wwn
, &phy
, &lun
) !=
13841 if (wwid
== sas_wwn
) {
13842 for (i
= 0; i
< lun_cnt
; i
++) {
13843 if (repluns
[i
] == lun
) {
13853 * The lun has not been there already
13855 (void) mptsas_offline_lun(pdip
, savechild
, NULL
,
13860 pip
= mdi_get_next_client_path(pdip
, NULL
);
13864 addr
= MDI_PI(pip
)->pi_addr
;
13866 pip
= mdi_get_next_client_path(pdip
, pip
);
13868 if (addr
== NULL
) {
13872 if (mptsas_parse_address(addr
, &sas_wwn
, &phy
,
13873 &lun
) != DDI_SUCCESS
) {
13877 if (sas_wwn
== wwid
) {
13878 for (i
= 0; i
< lun_cnt
; i
++) {
13879 if (repluns
[i
] == lun
) {
13890 * The lun has not been there already
13892 (void) mptsas_offline_lun(pdip
, NULL
, savepip
,
13899 mptsas_update_hashtab(struct mptsas
*mpt
)
13901 uint32_t page_address
;
13903 uint16_t dev_handle
;
13904 mptsas_target_t
*ptgt
= NULL
;
13905 mptsas_smp_t smp_node
;
13908 * Get latest RAID info.
13910 (void) mptsas_get_raid_info(mpt
);
13912 dev_handle
= mpt
->m_smp_devhdl
;
13913 for (; mpt
->m_done_traverse_smp
== 0; ) {
13914 page_address
= (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL
&
13915 MPI2_SAS_EXPAND_PGAD_FORM_MASK
) | (uint32_t)dev_handle
;
13916 if (mptsas_get_sas_expander_page0(mpt
, page_address
, &smp_node
)
13920 mpt
->m_smp_devhdl
= dev_handle
= smp_node
.m_devhdl
;
13921 (void) mptsas_smp_alloc(&mpt
->m_active
->m_smptbl
, &smp_node
);
13925 * Config target devices
13927 dev_handle
= mpt
->m_dev_handle
;
13930 * Do loop to get sas device page 0 by GetNextHandle till the
13931 * the last handle. If the sas device is a SATA/SSP target,
13932 * we try to config it.
13934 for (; mpt
->m_done_traverse_dev
== 0; ) {
13937 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE
&
13938 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) |
13939 (uint32_t)dev_handle
;
13940 rval
= mptsas_get_target_device_info(mpt
, page_address
,
13941 &dev_handle
, &ptgt
);
13942 if ((rval
== DEV_INFO_FAIL_PAGE0
) ||
13943 (rval
== DEV_INFO_FAIL_ALLOC
)) {
13947 mpt
->m_dev_handle
= dev_handle
;
13953 mptsas_invalid_hashtab(mptsas_hash_table_t
*hashtab
)
13955 mptsas_hash_data_t
*data
;
13956 data
= mptsas_hash_traverse(hashtab
, MPTSAS_HASH_FIRST
);
13957 while (data
!= NULL
) {
13958 data
->devhdl
= MPTSAS_INVALID_DEVHDL
;
13959 data
->device_info
= 0;
13961 * For tgttbl, clear dr_flag.
13963 data
->dr_flag
= MPTSAS_DR_INACTIVE
;
13964 data
= mptsas_hash_traverse(hashtab
, MPTSAS_HASH_NEXT
);
13969 mptsas_update_driver_data(struct mptsas
*mpt
)
13972 * TODO after hard reset, update the driver data structures
13973 * 1. update port/phymask mapping table mpt->m_phy_info
13974 * 2. invalid all the entries in hash table
13975 * m_devhdl = 0xffff and m_deviceinfo = 0
13976 * 3. call sas_device_page/expander_page to update hash table
13978 mptsas_update_phymask(mpt
);
13980 * Invalid the existing entries
13982 mptsas_invalid_hashtab(&mpt
->m_active
->m_tgttbl
);
13983 mptsas_invalid_hashtab(&mpt
->m_active
->m_smptbl
);
13984 mpt
->m_done_traverse_dev
= 0;
13985 mpt
->m_done_traverse_smp
= 0;
13986 mpt
->m_dev_handle
= mpt
->m_smp_devhdl
= MPTSAS_INVALID_DEVHDL
;
13987 mptsas_update_hashtab(mpt
);
13991 mptsas_config_all(dev_info_t
*pdip
)
13993 dev_info_t
*smpdip
= NULL
;
13994 mptsas_t
*mpt
= DIP2MPT(pdip
);
13996 mptsas_phymask_t phy_mask
;
13997 mptsas_target_t
*ptgt
= NULL
;
13998 mptsas_smp_t
*psmp
;
14001 * Get the phymask associated to the iport
14003 phymask
= ddi_prop_get_int(DDI_DEV_T_ANY
, pdip
, 0,
14007 * Enumerate RAID volumes here (phymask == 0).
14009 if (phymask
== 0) {
14010 mptsas_config_all_viport(pdip
);
14014 mutex_enter(&mpt
->m_mutex
);
14016 if (!mpt
->m_done_traverse_dev
|| !mpt
->m_done_traverse_smp
) {
14017 mptsas_update_hashtab(mpt
);
14020 psmp
= (mptsas_smp_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_smptbl
,
14021 MPTSAS_HASH_FIRST
);
14022 while (psmp
!= NULL
) {
14023 phy_mask
= psmp
->m_phymask
;
14024 if (phy_mask
== phymask
) {
14026 mutex_exit(&mpt
->m_mutex
);
14027 (void) mptsas_online_smp(pdip
, psmp
, &smpdip
);
14028 mutex_enter(&mpt
->m_mutex
);
14030 psmp
= (mptsas_smp_t
*)mptsas_hash_traverse(
14031 &mpt
->m_active
->m_smptbl
, MPTSAS_HASH_NEXT
);
14034 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
14035 MPTSAS_HASH_FIRST
);
14036 while (ptgt
!= NULL
) {
14037 phy_mask
= ptgt
->m_phymask
;
14038 if (phy_mask
== phymask
) {
14039 mutex_exit(&mpt
->m_mutex
);
14040 (void) mptsas_config_target(pdip
, ptgt
);
14041 mutex_enter(&mpt
->m_mutex
);
14044 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
14045 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
14047 mutex_exit(&mpt
->m_mutex
);
14051 mptsas_config_target(dev_info_t
*pdip
, mptsas_target_t
*ptgt
)
14053 int rval
= DDI_FAILURE
;
14056 rval
= mptsas_config_luns(pdip
, ptgt
);
14057 if (rval
!= DDI_SUCCESS
) {
14059 * The return value means the SCMD_REPORT_LUNS
14060 * did not execute successfully. The target maybe
14061 * doesn't support such command.
14063 rval
= mptsas_probe_lun(pdip
, 0, &tdip
, ptgt
);
14069 * Return fail if not all the childs/paths are freed.
14070 * if there is any path under the HBA, the return value will be always fail
14071 * because we didn't call mdi_pi_free for path
14074 mptsas_offline_target(dev_info_t
*pdip
, char *name
)
14076 dev_info_t
*child
= NULL
, *prechild
= NULL
;
14077 mdi_pathinfo_t
*pip
= NULL
, *savepip
= NULL
;
14078 int tmp_rval
, rval
= DDI_SUCCESS
;
14081 mptsas_t
*mpt
= DIP2MPT(pdip
);
14083 child
= ddi_get_child(pdip
);
14085 addr
= ddi_get_name_addr(child
);
14087 child
= ddi_get_next_sibling(child
);
14089 if (addr
== NULL
) {
14092 if ((cp
= strchr(addr
, ',')) == NULL
) {
14096 s
= (uintptr_t)cp
- (uintptr_t)addr
;
14098 if (strncmp(addr
, name
, s
) != 0) {
14102 tmp_rval
= mptsas_offline_lun(pdip
, prechild
, NULL
,
14104 if (tmp_rval
!= DDI_SUCCESS
) {
14105 rval
= DDI_FAILURE
;
14106 if (ndi_prop_create_boolean(DDI_DEV_T_NONE
,
14107 prechild
, MPTSAS_DEV_GONE
) !=
14108 DDI_PROP_SUCCESS
) {
14109 mptsas_log(mpt
, CE_WARN
, "mptsas driver "
14110 "unable to create property for "
14111 "SAS %s (MPTSAS_DEV_GONE)", addr
);
14116 pip
= mdi_get_next_client_path(pdip
, NULL
);
14118 addr
= MDI_PI(pip
)->pi_addr
;
14120 pip
= mdi_get_next_client_path(pdip
, pip
);
14121 if (addr
== NULL
) {
14125 if ((cp
= strchr(addr
, ',')) == NULL
) {
14129 s
= (uintptr_t)cp
- (uintptr_t)addr
;
14131 if (strncmp(addr
, name
, s
) != 0) {
14135 (void) mptsas_offline_lun(pdip
, NULL
, savepip
,
14138 * driver will not invoke mdi_pi_free, so path will not
14139 * be freed forever, return DDI_FAILURE.
14141 rval
= DDI_FAILURE
;
14147 mptsas_offline_lun(dev_info_t
*pdip
, dev_info_t
*rdip
,
14148 mdi_pathinfo_t
*rpip
, uint_t flags
)
14150 int rval
= DDI_FAILURE
;
14152 dev_info_t
*cdip
, *parent
;
14154 if (rpip
!= NULL
) {
14155 parent
= scsi_vhci_dip
;
14156 cdip
= mdi_pi_get_client(rpip
);
14157 } else if (rdip
!= NULL
) {
14161 return (DDI_FAILURE
);
14165 * Make sure node is attached otherwise
14166 * it won't have related cache nodes to
14167 * clean up. i_ddi_devi_attached is
14168 * similiar to i_ddi_node_state(cdip) >=
14171 if (i_ddi_devi_attached(cdip
)) {
14173 /* Get full devname */
14174 devname
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
14175 (void) ddi_deviname(cdip
, devname
);
14177 (void) devfs_clean(parent
, devname
+ 1,
14179 kmem_free(devname
, MAXNAMELEN
+ 1);
14181 if (rpip
!= NULL
) {
14182 if (MDI_PI_IS_OFFLINE(rpip
)) {
14183 rval
= DDI_SUCCESS
;
14185 rval
= mdi_pi_offline(rpip
, 0);
14188 rval
= ndi_devi_offline(cdip
, flags
);
14194 static dev_info_t
*
14195 mptsas_find_smp_child(dev_info_t
*parent
, char *str_wwn
)
14197 dev_info_t
*child
= NULL
;
14198 char *smp_wwn
= NULL
;
14200 child
= ddi_get_child(parent
);
14202 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, child
,
14203 DDI_PROP_DONTPASS
, SMP_WWN
, &smp_wwn
)
14205 child
= ddi_get_next_sibling(child
);
14209 if (strcmp(smp_wwn
, str_wwn
) == 0) {
14210 ddi_prop_free(smp_wwn
);
14213 child
= ddi_get_next_sibling(child
);
14214 ddi_prop_free(smp_wwn
);
14220 mptsas_offline_smp(dev_info_t
*pdip
, mptsas_smp_t
*smp_node
, uint_t flags
)
14222 int rval
= DDI_FAILURE
;
14224 char wwn_str
[MPTSAS_WWN_STRLEN
];
14227 (void) sprintf(wwn_str
, "%"PRIx64
, smp_node
->m_sasaddr
);
14229 cdip
= mptsas_find_smp_child(pdip
, wwn_str
);
14232 return (DDI_SUCCESS
);
14235 * Make sure node is attached otherwise
14236 * it won't have related cache nodes to
14237 * clean up. i_ddi_devi_attached is
14238 * similiar to i_ddi_node_state(cdip) >=
14241 if (i_ddi_devi_attached(cdip
)) {
14243 /* Get full devname */
14244 devname
= kmem_alloc(MAXNAMELEN
+ 1, KM_SLEEP
);
14245 (void) ddi_deviname(cdip
, devname
);
14247 (void) devfs_clean(pdip
, devname
+ 1,
14249 kmem_free(devname
, MAXNAMELEN
+ 1);
14252 rval
= ndi_devi_offline(cdip
, flags
);
14257 static dev_info_t
*
14258 mptsas_find_child(dev_info_t
*pdip
, char *name
)
14260 dev_info_t
*child
= NULL
;
14261 char *rname
= NULL
;
14262 int rval
= DDI_FAILURE
;
14264 rname
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14266 child
= ddi_get_child(pdip
);
14268 rval
= mptsas_name_child(child
, rname
, SCSI_MAXNAMELEN
);
14269 if (rval
!= DDI_SUCCESS
) {
14270 child
= ddi_get_next_sibling(child
);
14271 bzero(rname
, SCSI_MAXNAMELEN
);
14275 if (strcmp(rname
, name
) == 0) {
14278 child
= ddi_get_next_sibling(child
);
14279 bzero(rname
, SCSI_MAXNAMELEN
);
14282 kmem_free(rname
, SCSI_MAXNAMELEN
);
14288 static dev_info_t
*
14289 mptsas_find_child_addr(dev_info_t
*pdip
, uint64_t sasaddr
, int lun
)
14291 dev_info_t
*child
= NULL
;
14295 name
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14296 addr
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14297 (void) sprintf(name
, "%016"PRIx64
, sasaddr
);
14298 (void) sprintf(addr
, "w%s,%x", name
, lun
);
14299 child
= mptsas_find_child(pdip
, addr
);
14300 kmem_free(name
, SCSI_MAXNAMELEN
);
14301 kmem_free(addr
, SCSI_MAXNAMELEN
);
14305 static dev_info_t
*
14306 mptsas_find_child_phy(dev_info_t
*pdip
, uint8_t phy
)
14311 addr
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14312 (void) sprintf(addr
, "p%x,0", phy
);
14313 child
= mptsas_find_child(pdip
, addr
);
14314 kmem_free(addr
, SCSI_MAXNAMELEN
);
14318 static mdi_pathinfo_t
*
14319 mptsas_find_path_phy(dev_info_t
*pdip
, uint8_t phy
)
14321 mdi_pathinfo_t
*path
;
14324 addr
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14325 (void) sprintf(addr
, "p%x,0", phy
);
14326 path
= mdi_pi_find(pdip
, NULL
, addr
);
14327 kmem_free(addr
, SCSI_MAXNAMELEN
);
14331 static mdi_pathinfo_t
*
14332 mptsas_find_path_addr(dev_info_t
*parent
, uint64_t sasaddr
, int lun
)
14334 mdi_pathinfo_t
*path
;
14338 name
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14339 addr
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14340 (void) sprintf(name
, "%016"PRIx64
, sasaddr
);
14341 (void) sprintf(addr
, "w%s,%x", name
, lun
);
14342 path
= mdi_pi_find(parent
, NULL
, addr
);
14343 kmem_free(name
, SCSI_MAXNAMELEN
);
14344 kmem_free(addr
, SCSI_MAXNAMELEN
);
14350 mptsas_create_lun(dev_info_t
*pdip
, struct scsi_inquiry
*sd_inq
,
14351 dev_info_t
**lun_dip
, mptsas_target_t
*ptgt
, int lun
)
14354 uchar_t
*inq83
= NULL
;
14355 int inq83_len1
= 0xFF;
14357 int rval
= DDI_FAILURE
;
14360 int target
= ptgt
->m_devhdl
;
14361 mdi_pathinfo_t
*pip
= NULL
;
14362 mptsas_t
*mpt
= DIP2MPT(pdip
);
14365 * For DVD/CD ROM and tape devices and optical
14366 * devices, we won't try to enumerate them under
14367 * scsi_vhci, so no need to try page83
14369 if (sd_inq
&& (sd_inq
->inq_dtype
== DTYPE_RODIRECT
||
14370 sd_inq
->inq_dtype
== DTYPE_OPTICAL
||
14371 sd_inq
->inq_dtype
== DTYPE_ESI
))
14375 * The LCA returns good SCSI status, but corrupt page 83 data the first
14376 * time it is queried. The solution is to keep trying to request page83
14377 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14378 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14379 * give up to get VPD page at this stage and fail the enumeration.
14382 inq83
= kmem_zalloc(inq83_len1
, KM_SLEEP
);
14384 for (i
= 0; i
< mptsas_inq83_retry_timeout
; i
++) {
14385 rval
= mptsas_inquiry(mpt
, ptgt
, lun
, 0x83, inq83
,
14386 inq83_len1
, &inq83_len
, 1);
14388 mptsas_log(mpt
, CE_WARN
, "!mptsas request inquiry page "
14389 "0x83 for target:%x, lun:%x failed!", target
, lun
);
14390 if (mptsas_physical_bind_failed_page_83
!= B_FALSE
)
14395 * create DEVID from inquiry data
14397 if ((rval
= ddi_devid_scsi_encode(
14398 DEVID_SCSI_ENCODE_VERSION_LATEST
, NULL
, (uchar_t
*)sd_inq
,
14399 sizeof (struct scsi_inquiry
), NULL
, 0, inq83
,
14400 (size_t)inq83_len
, &devid
)) == DDI_SUCCESS
) {
14402 * extract GUID from DEVID
14404 guid
= ddi_devid_to_guid(devid
);
14407 * Do not enable MPXIO if the strlen(guid) is greater
14408 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14409 * handled by framework later.
14411 if (guid
&& (strlen(guid
) > MPTSAS_MAX_GUID_LEN
)) {
14412 ddi_devid_free_guid(guid
);
14414 if (mpt
->m_mpxio_enable
== TRUE
) {
14415 mptsas_log(mpt
, CE_NOTE
, "!Target:%x, "
14416 "lun:%x doesn't have a valid GUID, "
14417 "multipathing for this drive is "
14418 "not enabled", target
, lun
);
14423 * devid no longer needed
14425 ddi_devid_free(devid
);
14427 } else if (rval
== DDI_NOT_WELL_FORMED
) {
14429 * return value of ddi_devid_scsi_encode equal to
14430 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14431 * to retry inquiry page 0x83 and get GUID.
14433 NDBG20(("Not well formed devid, retry..."));
14434 delay(1 * drv_usectohz(1000000));
14437 mptsas_log(mpt
, CE_WARN
, "!Encode devid failed for "
14438 "path target:%x, lun:%x", target
, lun
);
14439 rval
= DDI_FAILURE
;
14444 if (i
== mptsas_inq83_retry_timeout
) {
14445 mptsas_log(mpt
, CE_WARN
, "!Repeated page83 requests timeout "
14446 "for path target:%x, lun:%x", target
, lun
);
14449 rval
= DDI_FAILURE
;
14452 if ((guid
!= NULL
) && (mpt
->m_mpxio_enable
== TRUE
)) {
14453 rval
= mptsas_create_virt_lun(pdip
, sd_inq
, guid
, lun_dip
, &pip
,
14456 if (rval
!= DDI_SUCCESS
) {
14457 rval
= mptsas_create_phys_lun(pdip
, sd_inq
, guid
, lun_dip
,
14462 if (guid
!= NULL
) {
14464 * guid no longer needed
14466 ddi_devid_free_guid(guid
);
14469 kmem_free(inq83
, inq83_len1
);
14474 mptsas_create_virt_lun(dev_info_t
*pdip
, struct scsi_inquiry
*inq
, char *guid
,
14475 dev_info_t
**lun_dip
, mdi_pathinfo_t
**pip
, mptsas_target_t
*ptgt
, int lun
)
14478 char *nodename
= NULL
;
14479 char **compatible
= NULL
;
14480 int ncompatible
= 0;
14481 int mdi_rtn
= MDI_FAILURE
;
14482 int rval
= DDI_FAILURE
;
14483 char *old_guid
= NULL
;
14484 mptsas_t
*mpt
= DIP2MPT(pdip
);
14485 char *lun_addr
= NULL
;
14486 char *wwn_str
= NULL
;
14487 char *attached_wwn_str
= NULL
;
14488 char *component
= NULL
;
14489 uint8_t phy
= 0xFF;
14495 uint64_t dev_sas_wwn
;
14496 uint64_t pdev_sas_wwn
;
14497 uint32_t pdev_info
;
14500 uint32_t page_address
;
14501 uint16_t bay_num
, enclosure
;
14502 char pdev_wwn_str
[MPTSAS_WWN_STRLEN
];
14505 mutex_enter(&mpt
->m_mutex
);
14506 target
= ptgt
->m_devhdl
;
14507 sas_wwn
= ptgt
->m_sas_wwn
;
14508 devinfo
= ptgt
->m_deviceinfo
;
14509 phy
= ptgt
->m_phynum
;
14510 mutex_exit(&mpt
->m_mutex
);
14513 *pip
= mptsas_find_path_addr(pdip
, sas_wwn
, lun
);
14515 *pip
= mptsas_find_path_phy(pdip
, phy
);
14518 if (*pip
!= NULL
) {
14519 *lun_dip
= MDI_PI(*pip
)->pi_client
->ct_dip
;
14520 ASSERT(*lun_dip
!= NULL
);
14521 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, *lun_dip
,
14522 (DDI_PROP_DONTPASS
| DDI_PROP_NOTPROM
),
14523 MDI_CLIENT_GUID_PROP
, &old_guid
) == DDI_SUCCESS
) {
14524 if (strncmp(guid
, old_guid
, strlen(guid
)) == 0) {
14526 * Same path back online again.
14528 (void) ddi_prop_free(old_guid
);
14529 if ((!MDI_PI_IS_ONLINE(*pip
)) &&
14530 (!MDI_PI_IS_STANDBY(*pip
)) &&
14531 (ptgt
->m_tgt_unconfigured
== 0)) {
14532 rval
= mdi_pi_online(*pip
, 0);
14533 mutex_enter(&mpt
->m_mutex
);
14534 (void) mptsas_set_led_status(mpt
, ptgt
,
14536 mutex_exit(&mpt
->m_mutex
);
14538 rval
= DDI_SUCCESS
;
14540 if (rval
!= DDI_SUCCESS
) {
14541 mptsas_log(mpt
, CE_WARN
, "path:target: "
14542 "%x, lun:%x online failed!", target
,
14550 * The GUID of the LUN has changed which maybe
14551 * because customer mapped another volume to the
14554 mptsas_log(mpt
, CE_WARN
, "The GUID of the "
14555 "target:%x, lun:%x was changed, maybe "
14556 "because someone mapped another volume "
14557 "to the same LUN", target
, lun
);
14558 (void) ddi_prop_free(old_guid
);
14559 if (!MDI_PI_IS_OFFLINE(*pip
)) {
14560 rval
= mdi_pi_offline(*pip
, 0);
14561 if (rval
!= MDI_SUCCESS
) {
14562 mptsas_log(mpt
, CE_WARN
, "path:"
14563 "target:%x, lun:%x offline "
14564 "failed!", target
, lun
);
14567 return (DDI_FAILURE
);
14570 if (mdi_pi_free(*pip
, 0) != MDI_SUCCESS
) {
14571 mptsas_log(mpt
, CE_WARN
, "path:target:"
14572 "%x, lun:%x free failed!", target
,
14576 return (DDI_FAILURE
);
14580 mptsas_log(mpt
, CE_WARN
, "Can't get client-guid "
14581 "property for path:target:%x, lun:%x", target
, lun
);
14584 return (DDI_FAILURE
);
14587 scsi_hba_nodename_compatible_get(inq
, NULL
,
14588 inq
->inq_dtype
, NULL
, &nodename
, &compatible
, &ncompatible
);
14591 * if nodename can't be determined then print a message and skip it
14593 if (nodename
== NULL
) {
14594 mptsas_log(mpt
, CE_WARN
, "mptsas driver found no compatible "
14595 "driver for target%d lun %d dtype:0x%02x", target
, lun
,
14597 return (DDI_FAILURE
);
14600 wwn_str
= kmem_zalloc(MPTSAS_WWN_STRLEN
, KM_SLEEP
);
14601 /* The property is needed by MPAPI */
14602 (void) sprintf(wwn_str
, "%016"PRIx64
, sas_wwn
);
14604 lun_addr
= kmem_zalloc(SCSI_MAXNAMELEN
, KM_SLEEP
);
14606 (void) sprintf(lun_addr
, "w%s,%x", wwn_str
, lun
);
14607 (void) sprintf(wwn_str
, "w%016"PRIx64
, sas_wwn
);
14609 (void) sprintf(lun_addr
, "p%x,%x", phy
, lun
);
14610 (void) sprintf(wwn_str
, "p%x", phy
);
14613 mdi_rtn
= mdi_pi_alloc_compatible(pdip
, nodename
,
14614 guid
, lun_addr
, compatible
, ncompatible
,
14616 if (mdi_rtn
== MDI_SUCCESS
) {
14618 if (mdi_prop_update_string(*pip
, MDI_GUID
,
14619 guid
) != DDI_SUCCESS
) {
14620 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14621 "create prop for target %d lun %d (MDI_GUID)",
14623 mdi_rtn
= MDI_FAILURE
;
14624 goto virt_create_done
;
14627 if (mdi_prop_update_int(*pip
, LUN_PROP
,
14628 lun
) != DDI_SUCCESS
) {
14629 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14630 "create prop for target %d lun %d (LUN_PROP)",
14632 mdi_rtn
= MDI_FAILURE
;
14633 goto virt_create_done
;
14635 lun64
= (int64_t)lun
;
14636 if (mdi_prop_update_int64(*pip
, LUN64_PROP
,
14637 lun64
) != DDI_SUCCESS
) {
14638 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14639 "create prop for target %d (LUN64_PROP)",
14641 mdi_rtn
= MDI_FAILURE
;
14642 goto virt_create_done
;
14644 if (mdi_prop_update_string_array(*pip
, "compatible",
14645 compatible
, ncompatible
) !=
14646 DDI_PROP_SUCCESS
) {
14647 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14648 "create prop for target %d lun %d (COMPATIBLE)",
14650 mdi_rtn
= MDI_FAILURE
;
14651 goto virt_create_done
;
14653 if (sas_wwn
&& (mdi_prop_update_string(*pip
,
14654 SCSI_ADDR_PROP_TARGET_PORT
, wwn_str
) != DDI_PROP_SUCCESS
)) {
14655 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14656 "create prop for target %d lun %d "
14657 "(target-port)", target
, lun
);
14658 mdi_rtn
= MDI_FAILURE
;
14659 goto virt_create_done
;
14660 } else if ((sas_wwn
== 0) && (mdi_prop_update_int(*pip
,
14661 "sata-phy", phy
) != DDI_PROP_SUCCESS
)) {
14663 * Direct attached SATA device without DeviceName
14665 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14666 "create prop for SAS target %d lun %d "
14667 "(sata-phy)", target
, lun
);
14668 mdi_rtn
= MDI_FAILURE
;
14669 goto virt_create_done
;
14671 mutex_enter(&mpt
->m_mutex
);
14673 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
14674 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) |
14675 (uint32_t)ptgt
->m_devhdl
;
14676 rval
= mptsas_get_sas_device_page0(mpt
, page_address
,
14677 &dev_hdl
, &dev_sas_wwn
, &dev_info
, &physport
,
14678 &phy_id
, &pdev_hdl
, &bay_num
, &enclosure
);
14679 if (rval
!= DDI_SUCCESS
) {
14680 mutex_exit(&mpt
->m_mutex
);
14681 mptsas_log(mpt
, CE_WARN
, "mptsas unable to get "
14682 "parent device for handle %d", page_address
);
14683 mdi_rtn
= MDI_FAILURE
;
14684 goto virt_create_done
;
14687 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
14688 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) | (uint32_t)pdev_hdl
;
14689 rval
= mptsas_get_sas_device_page0(mpt
, page_address
,
14690 &dev_hdl
, &pdev_sas_wwn
, &pdev_info
, &physport
,
14691 &phy_id
, &pdev_hdl
, &bay_num
, &enclosure
);
14692 if (rval
!= DDI_SUCCESS
) {
14693 mutex_exit(&mpt
->m_mutex
);
14694 mptsas_log(mpt
, CE_WARN
, "mptsas unable to get"
14695 "device info for handle %d", page_address
);
14696 mdi_rtn
= MDI_FAILURE
;
14697 goto virt_create_done
;
14700 mutex_exit(&mpt
->m_mutex
);
14703 * If this device direct attached to the controller
14704 * set the attached-port to the base wwid
14706 if ((ptgt
->m_deviceinfo
& DEVINFO_DIRECT_ATTACHED
)
14707 != DEVINFO_DIRECT_ATTACHED
) {
14708 (void) sprintf(pdev_wwn_str
, "w%016"PRIx64
,
14712 * Update the iport's attached-port to guid
14714 if (sas_wwn
== 0) {
14715 (void) sprintf(wwn_str
, "p%x", phy
);
14717 (void) sprintf(wwn_str
, "w%016"PRIx64
, sas_wwn
);
14719 if (ddi_prop_update_string(DDI_DEV_T_NONE
,
14720 pdip
, SCSI_ADDR_PROP_ATTACHED_PORT
, wwn_str
) !=
14721 DDI_PROP_SUCCESS
) {
14722 mptsas_log(mpt
, CE_WARN
,
14723 "mptsas unable to create "
14724 "property for iport target-port"
14727 mdi_rtn
= MDI_FAILURE
;
14728 goto virt_create_done
;
14731 (void) sprintf(pdev_wwn_str
, "w%016"PRIx64
,
14732 mpt
->un
.m_base_wwid
);
14735 if (mdi_prop_update_string(*pip
,
14736 SCSI_ADDR_PROP_ATTACHED_PORT
, pdev_wwn_str
) !=
14737 DDI_PROP_SUCCESS
) {
14738 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
14739 "property for iport attached-port %s (sas_wwn)",
14741 mdi_rtn
= MDI_FAILURE
;
14742 goto virt_create_done
;
14746 if (inq
->inq_dtype
== 0) {
14747 component
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
14749 * set obp path for pathinfo
14751 (void) snprintf(component
, MAXPATHLEN
,
14752 "disk@%s", lun_addr
);
14754 if (mdi_pi_pathname_obp_set(*pip
, component
) !=
14756 mptsas_log(mpt
, CE_WARN
, "mpt_sas driver "
14757 "unable to set obp-path for object %s",
14759 mdi_rtn
= MDI_FAILURE
;
14760 goto virt_create_done
;
14764 *lun_dip
= MDI_PI(*pip
)->pi_client
->ct_dip
;
14765 if (devinfo
& (MPI2_SAS_DEVICE_INFO_SATA_DEVICE
|
14766 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE
)) {
14767 if ((ndi_prop_update_int(DDI_DEV_T_NONE
, *lun_dip
,
14768 "pm-capable", 1)) !=
14769 DDI_PROP_SUCCESS
) {
14770 mptsas_log(mpt
, CE_WARN
, "mptsas driver"
14771 "failed to create pm-capable "
14772 "property, target %d", target
);
14773 mdi_rtn
= MDI_FAILURE
;
14774 goto virt_create_done
;
14778 * Create the phy-num property
14780 if (mdi_prop_update_int(*pip
, "phy-num",
14781 ptgt
->m_phynum
) != DDI_SUCCESS
) {
14782 mptsas_log(mpt
, CE_WARN
, "mptsas driver unable to "
14783 "create phy-num property for target %d lun %d",
14785 mdi_rtn
= MDI_FAILURE
;
14786 goto virt_create_done
;
14788 NDBG20(("new path:%s onlining,", MDI_PI(*pip
)->pi_addr
));
14789 mdi_rtn
= mdi_pi_online(*pip
, 0);
14790 if (mdi_rtn
== MDI_SUCCESS
) {
14791 mutex_enter(&mpt
->m_mutex
);
14792 if (mptsas_set_led_status(mpt
, ptgt
, 0) !=
14794 NDBG14(("mptsas: clear LED for slot %x "
14795 "failed", ptgt
->m_slot_num
));
14797 mutex_exit(&mpt
->m_mutex
);
14799 if (mdi_rtn
== MDI_NOT_SUPPORTED
) {
14800 mdi_rtn
= MDI_FAILURE
;
14803 if (*pip
&& mdi_rtn
!= MDI_SUCCESS
) {
14804 (void) mdi_pi_free(*pip
, 0);
14810 scsi_hba_nodename_compatible_free(nodename
, compatible
);
14811 if (lun_addr
!= NULL
) {
14812 kmem_free(lun_addr
, SCSI_MAXNAMELEN
);
14814 if (wwn_str
!= NULL
) {
14815 kmem_free(wwn_str
, MPTSAS_WWN_STRLEN
);
14817 if (component
!= NULL
) {
14818 kmem_free(component
, MAXPATHLEN
);
14821 return ((mdi_rtn
== MDI_SUCCESS
) ? DDI_SUCCESS
: DDI_FAILURE
);
14825 mptsas_create_phys_lun(dev_info_t
*pdip
, struct scsi_inquiry
*inq
,
14826 char *guid
, dev_info_t
**lun_dip
, mptsas_target_t
*ptgt
, int lun
)
14830 int ndi_rtn
= NDI_FAILURE
;
14831 uint64_t be_sas_wwn
;
14832 char *nodename
= NULL
;
14833 char **compatible
= NULL
;
14834 int ncompatible
= 0;
14836 mptsas_t
*mpt
= DIP2MPT(pdip
);
14837 char *wwn_str
= NULL
;
14838 char *component
= NULL
;
14839 char *attached_wwn_str
= NULL
;
14840 uint8_t phy
= 0xFF;
14845 uint64_t pdev_sas_wwn
;
14846 uint64_t dev_sas_wwn
;
14847 uint32_t pdev_info
;
14850 uint32_t page_address
;
14851 uint16_t bay_num
, enclosure
;
14852 char pdev_wwn_str
[MPTSAS_WWN_STRLEN
];
14856 mutex_enter(&mpt
->m_mutex
);
14857 target
= ptgt
->m_devhdl
;
14858 sas_wwn
= ptgt
->m_sas_wwn
;
14859 devinfo
= ptgt
->m_deviceinfo
;
14860 phy
= ptgt
->m_phynum
;
14861 mutex_exit(&mpt
->m_mutex
);
14864 * generate compatible property with binding-set "mpt"
14866 scsi_hba_nodename_compatible_get(inq
, NULL
, inq
->inq_dtype
, NULL
,
14867 &nodename
, &compatible
, &ncompatible
);
14870 * if nodename can't be determined then print a message and skip it
14872 if (nodename
== NULL
) {
14873 mptsas_log(mpt
, CE_WARN
, "mptsas found no compatible driver "
14874 "for target %d lun %d", target
, lun
);
14875 return (DDI_FAILURE
);
14878 ndi_rtn
= ndi_devi_alloc(pdip
, nodename
,
14879 DEVI_SID_NODEID
, lun_dip
);
14882 * if lun alloc success, set props
14884 if (ndi_rtn
== NDI_SUCCESS
) {
14886 if (ndi_prop_update_int(DDI_DEV_T_NONE
,
14887 *lun_dip
, LUN_PROP
, lun
) !=
14888 DDI_PROP_SUCCESS
) {
14889 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
14890 "property for target %d lun %d (LUN_PROP)",
14892 ndi_rtn
= NDI_FAILURE
;
14893 goto phys_create_done
;
14896 lun64
= (int64_t)lun
;
14897 if (ndi_prop_update_int64(DDI_DEV_T_NONE
,
14898 *lun_dip
, LUN64_PROP
, lun64
) !=
14899 DDI_PROP_SUCCESS
) {
14900 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
14901 "property for target %d lun64 %d (LUN64_PROP)",
14903 ndi_rtn
= NDI_FAILURE
;
14904 goto phys_create_done
;
14906 if (ndi_prop_update_string_array(DDI_DEV_T_NONE
,
14907 *lun_dip
, "compatible", compatible
, ncompatible
)
14908 != DDI_PROP_SUCCESS
) {
14909 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
14910 "property for target %d lun %d (COMPATIBLE)",
14912 ndi_rtn
= NDI_FAILURE
;
14913 goto phys_create_done
;
14917 * We need the SAS WWN for non-multipath devices, so
14918 * we'll use the same property as that multipathing
14919 * devices need to present for MPAPI. If we don't have
14920 * a WWN (e.g. parallel SCSI), don't create the prop.
14922 wwn_str
= kmem_zalloc(MPTSAS_WWN_STRLEN
, KM_SLEEP
);
14923 (void) sprintf(wwn_str
, "w%016"PRIx64
, sas_wwn
);
14924 if (sas_wwn
&& ndi_prop_update_string(DDI_DEV_T_NONE
,
14925 *lun_dip
, SCSI_ADDR_PROP_TARGET_PORT
, wwn_str
)
14926 != DDI_PROP_SUCCESS
) {
14927 mptsas_log(mpt
, CE_WARN
, "mptsas unable to "
14928 "create property for SAS target %d lun %d "
14929 "(target-port)", target
, lun
);
14930 ndi_rtn
= NDI_FAILURE
;
14931 goto phys_create_done
;
14934 be_sas_wwn
= BE_64(sas_wwn
);
14935 if (sas_wwn
&& ndi_prop_update_byte_array(
14936 DDI_DEV_T_NONE
, *lun_dip
, "port-wwn",
14937 (uchar_t
*)&be_sas_wwn
, 8) != DDI_PROP_SUCCESS
) {
14938 mptsas_log(mpt
, CE_WARN
, "mptsas unable to "
14939 "create property for SAS target %d lun %d "
14940 "(port-wwn)", target
, lun
);
14941 ndi_rtn
= NDI_FAILURE
;
14942 goto phys_create_done
;
14943 } else if ((sas_wwn
== 0) && (ndi_prop_update_int(
14944 DDI_DEV_T_NONE
, *lun_dip
, "sata-phy", phy
) !=
14945 DDI_PROP_SUCCESS
)) {
14947 * Direct attached SATA device without DeviceName
14949 mptsas_log(mpt
, CE_WARN
, "mptsas unable to "
14950 "create property for SAS target %d lun %d "
14951 "(sata-phy)", target
, lun
);
14952 ndi_rtn
= NDI_FAILURE
;
14953 goto phys_create_done
;
14956 if (ndi_prop_create_boolean(DDI_DEV_T_NONE
,
14957 *lun_dip
, SAS_PROP
) != DDI_PROP_SUCCESS
) {
14958 mptsas_log(mpt
, CE_WARN
, "mptsas unable to"
14959 "create property for SAS target %d lun %d"
14960 " (SAS_PROP)", target
, lun
);
14961 ndi_rtn
= NDI_FAILURE
;
14962 goto phys_create_done
;
14964 if (guid
&& (ndi_prop_update_string(DDI_DEV_T_NONE
,
14965 *lun_dip
, NDI_GUID
, guid
) != DDI_SUCCESS
)) {
14966 mptsas_log(mpt
, CE_WARN
, "mptsas unable "
14967 "to create guid property for target %d "
14968 "lun %d", target
, lun
);
14969 ndi_rtn
= NDI_FAILURE
;
14970 goto phys_create_done
;
14974 * The following code is to set properties for SM-HBA support,
14975 * it doesn't apply to RAID volumes
14977 if (ptgt
->m_phymask
== 0)
14978 goto phys_raid_lun
;
14980 mutex_enter(&mpt
->m_mutex
);
14982 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
14983 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) |
14984 (uint32_t)ptgt
->m_devhdl
;
14985 rval
= mptsas_get_sas_device_page0(mpt
, page_address
,
14986 &dev_hdl
, &dev_sas_wwn
, &dev_info
,
14987 &physport
, &phy_id
, &pdev_hdl
,
14988 &bay_num
, &enclosure
);
14989 if (rval
!= DDI_SUCCESS
) {
14990 mutex_exit(&mpt
->m_mutex
);
14991 mptsas_log(mpt
, CE_WARN
, "mptsas unable to get"
14992 "parent device for handle %d.", page_address
);
14993 ndi_rtn
= NDI_FAILURE
;
14994 goto phys_create_done
;
14997 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
14998 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) | (uint32_t)pdev_hdl
;
14999 rval
= mptsas_get_sas_device_page0(mpt
, page_address
,
15000 &dev_hdl
, &pdev_sas_wwn
, &pdev_info
,
15001 &physport
, &phy_id
, &pdev_hdl
, &bay_num
, &enclosure
);
15002 if (rval
!= DDI_SUCCESS
) {
15003 mutex_exit(&mpt
->m_mutex
);
15004 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
15005 "device for handle %d.", page_address
);
15006 ndi_rtn
= NDI_FAILURE
;
15007 goto phys_create_done
;
15010 mutex_exit(&mpt
->m_mutex
);
15013 * If this device direct attached to the controller
15014 * set the attached-port to the base wwid
15016 if ((ptgt
->m_deviceinfo
& DEVINFO_DIRECT_ATTACHED
)
15017 != DEVINFO_DIRECT_ATTACHED
) {
15018 (void) sprintf(pdev_wwn_str
, "w%016"PRIx64
,
15022 * Update the iport's attached-port to guid
15024 if (sas_wwn
== 0) {
15025 (void) sprintf(wwn_str
, "p%x", phy
);
15027 (void) sprintf(wwn_str
, "w%016"PRIx64
, sas_wwn
);
15029 if (ddi_prop_update_string(DDI_DEV_T_NONE
,
15030 pdip
, SCSI_ADDR_PROP_ATTACHED_PORT
, wwn_str
) !=
15031 DDI_PROP_SUCCESS
) {
15032 mptsas_log(mpt
, CE_WARN
,
15033 "mptsas unable to create "
15034 "property for iport target-port"
15037 ndi_rtn
= NDI_FAILURE
;
15038 goto phys_create_done
;
15041 (void) sprintf(pdev_wwn_str
, "w%016"PRIx64
,
15042 mpt
->un
.m_base_wwid
);
15045 if (ndi_prop_update_string(DDI_DEV_T_NONE
,
15046 *lun_dip
, SCSI_ADDR_PROP_ATTACHED_PORT
, pdev_wwn_str
) !=
15047 DDI_PROP_SUCCESS
) {
15048 mptsas_log(mpt
, CE_WARN
,
15049 "mptsas unable to create "
15050 "property for iport attached-port %s (sas_wwn)",
15052 ndi_rtn
= NDI_FAILURE
;
15053 goto phys_create_done
;
15056 if (IS_SATA_DEVICE(dev_info
)) {
15057 if (ndi_prop_update_string(DDI_DEV_T_NONE
,
15058 *lun_dip
, MPTSAS_VARIANT
, "sata") !=
15059 DDI_PROP_SUCCESS
) {
15060 mptsas_log(mpt
, CE_WARN
,
15061 "mptsas unable to create "
15062 "property for device variant ");
15063 ndi_rtn
= NDI_FAILURE
;
15064 goto phys_create_done
;
15068 if (IS_ATAPI_DEVICE(dev_info
)) {
15069 if (ndi_prop_update_string(DDI_DEV_T_NONE
,
15070 *lun_dip
, MPTSAS_VARIANT
, "atapi") !=
15071 DDI_PROP_SUCCESS
) {
15072 mptsas_log(mpt
, CE_WARN
,
15073 "mptsas unable to create "
15074 "property for device variant ");
15075 ndi_rtn
= NDI_FAILURE
;
15076 goto phys_create_done
;
15082 * if this is a SAS controller, and the target is a SATA
15083 * drive, set the 'pm-capable' property for sd and if on
15084 * an OPL platform, also check if this is an ATAPI
15087 instance
= ddi_get_instance(mpt
->m_dip
);
15088 if (devinfo
& (MPI2_SAS_DEVICE_INFO_SATA_DEVICE
|
15089 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE
)) {
15090 NDBG2(("mptsas%d: creating pm-capable property, "
15091 "target %d", instance
, target
));
15093 if ((ndi_prop_update_int(DDI_DEV_T_NONE
,
15094 *lun_dip
, "pm-capable", 1)) !=
15095 DDI_PROP_SUCCESS
) {
15096 mptsas_log(mpt
, CE_WARN
, "mptsas "
15097 "failed to create pm-capable "
15098 "property, target %d", target
);
15099 ndi_rtn
= NDI_FAILURE
;
15100 goto phys_create_done
;
15105 if ((inq
->inq_dtype
== 0) || (inq
->inq_dtype
== 5)) {
15107 * add 'obp-path' properties for devinfo
15109 bzero(wwn_str
, sizeof (wwn_str
));
15110 (void) sprintf(wwn_str
, "%016"PRIx64
, sas_wwn
);
15111 component
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
15113 (void) snprintf(component
, MAXPATHLEN
,
15114 "disk@w%s,%x", wwn_str
, lun
);
15116 (void) snprintf(component
, MAXPATHLEN
,
15117 "disk@p%x,%x", phy
, lun
);
15119 if (ddi_pathname_obp_set(*lun_dip
, component
)
15121 mptsas_log(mpt
, CE_WARN
, "mpt_sas driver "
15122 "unable to set obp-path for SAS "
15123 "object %s", component
);
15124 ndi_rtn
= NDI_FAILURE
;
15125 goto phys_create_done
;
15129 * Create the phy-num property for non-raid disk
15131 if (ptgt
->m_phymask
!= 0) {
15132 if (ndi_prop_update_int(DDI_DEV_T_NONE
,
15133 *lun_dip
, "phy-num", ptgt
->m_phynum
) !=
15134 DDI_PROP_SUCCESS
) {
15135 mptsas_log(mpt
, CE_WARN
, "mptsas driver "
15136 "failed to create phy-num property for "
15137 "target %d", target
);
15138 ndi_rtn
= NDI_FAILURE
;
15139 goto phys_create_done
;
15144 * If props were setup ok, online the lun
15146 if (ndi_rtn
== NDI_SUCCESS
) {
15148 * Try to online the new node
15150 ndi_rtn
= ndi_devi_online(*lun_dip
, NDI_ONLINE_ATTACH
);
15152 if (ndi_rtn
== NDI_SUCCESS
) {
15153 mutex_enter(&mpt
->m_mutex
);
15154 if (mptsas_set_led_status(mpt
, ptgt
, 0) !=
15156 NDBG14(("mptsas: clear LED for tgt %x "
15157 "failed", ptgt
->m_slot_num
));
15159 mutex_exit(&mpt
->m_mutex
);
15163 * If success set rtn flag, else unwire alloc'd lun
15165 if (ndi_rtn
!= NDI_SUCCESS
) {
15166 NDBG12(("mptsas driver unable to online "
15167 "target %d lun %d", target
, lun
));
15168 ndi_prop_remove_all(*lun_dip
);
15169 (void) ndi_devi_free(*lun_dip
);
15174 scsi_hba_nodename_compatible_free(nodename
, compatible
);
15176 if (wwn_str
!= NULL
) {
15177 kmem_free(wwn_str
, MPTSAS_WWN_STRLEN
);
15179 if (component
!= NULL
) {
15180 kmem_free(component
, MAXPATHLEN
);
15184 return ((ndi_rtn
== NDI_SUCCESS
) ? DDI_SUCCESS
: DDI_FAILURE
);
15188 mptsas_probe_smp(dev_info_t
*pdip
, uint64_t wwn
)
15190 mptsas_t
*mpt
= DIP2MPT(pdip
);
15191 struct smp_device smp_sd
;
15193 /* XXX An HBA driver should not be allocating an smp_device. */
15194 bzero(&smp_sd
, sizeof (struct smp_device
));
15195 smp_sd
.smp_sd_address
.smp_a_hba_tran
= mpt
->m_smptran
;
15196 bcopy(&wwn
, smp_sd
.smp_sd_address
.smp_a_wwn
, SAS_WWN_BYTE_SIZE
);
15198 if (smp_probe(&smp_sd
) != DDI_PROBE_SUCCESS
)
15199 return (NDI_FAILURE
);
15200 return (NDI_SUCCESS
);
15204 mptsas_config_smp(dev_info_t
*pdip
, uint64_t sas_wwn
, dev_info_t
**smp_dip
)
15206 mptsas_t
*mpt
= DIP2MPT(pdip
);
15207 mptsas_smp_t
*psmp
= NULL
;
15212 * Get the physical port associated to the iport
15215 phymask
= ddi_prop_get_int(DDI_DEV_T_ANY
, pdip
, 0,
15218 * Find the smp node in hash table with specified sas address and
15221 psmp
= mptsas_wwid_to_psmp(mpt
, phymask
, sas_wwn
);
15222 if (psmp
== NULL
) {
15223 return (DDI_FAILURE
);
15226 rval
= mptsas_online_smp(pdip
, psmp
, smp_dip
);
15232 mptsas_online_smp(dev_info_t
*pdip
, mptsas_smp_t
*smp_node
,
15233 dev_info_t
**smp_dip
)
15235 char wwn_str
[MPTSAS_WWN_STRLEN
];
15236 char attached_wwn_str
[MPTSAS_WWN_STRLEN
];
15237 int ndi_rtn
= NDI_FAILURE
;
15239 mptsas_smp_t dev_info
;
15240 uint32_t page_address
;
15241 mptsas_t
*mpt
= DIP2MPT(pdip
);
15244 uint64_t smp_sas_wwn
;
15248 uint8_t numphys
= 0;
15250 char phymask
[MPTSAS_MAX_PHYS
];
15251 char *iport
= NULL
;
15252 mptsas_phymask_t phy_mask
= 0;
15253 uint16_t attached_devhdl
;
15254 uint16_t bay_num
, enclosure
;
15256 (void) sprintf(wwn_str
, "%"PRIx64
, smp_node
->m_sasaddr
);
15259 * Probe smp device, prevent the node of removed device from being
15260 * configured succesfully
15262 if (mptsas_probe_smp(pdip
, smp_node
->m_sasaddr
) != NDI_SUCCESS
) {
15263 return (DDI_FAILURE
);
15266 if ((*smp_dip
= mptsas_find_smp_child(pdip
, wwn_str
)) != NULL
) {
15267 return (DDI_SUCCESS
);
15270 ndi_rtn
= ndi_devi_alloc(pdip
, "smp", DEVI_SID_NODEID
, smp_dip
);
15273 * if lun alloc success, set props
15275 if (ndi_rtn
== NDI_SUCCESS
) {
15277 * Set the flavor of the child to be SMP flavored
15279 ndi_flavor_set(*smp_dip
, SCSA_FLAVOR_SMP
);
15281 if (ndi_prop_update_string(DDI_DEV_T_NONE
,
15282 *smp_dip
, SMP_WWN
, wwn_str
) !=
15283 DDI_PROP_SUCCESS
) {
15284 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
15285 "property for smp device %s (sas_wwn)",
15287 ndi_rtn
= NDI_FAILURE
;
15288 goto smp_create_done
;
15290 (void) sprintf(wwn_str
, "w%"PRIx64
, smp_node
->m_sasaddr
);
15291 if (ndi_prop_update_string(DDI_DEV_T_NONE
,
15292 *smp_dip
, SCSI_ADDR_PROP_TARGET_PORT
, wwn_str
) !=
15293 DDI_PROP_SUCCESS
) {
15294 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
15295 "property for iport target-port %s (sas_wwn)",
15297 ndi_rtn
= NDI_FAILURE
;
15298 goto smp_create_done
;
15301 mutex_enter(&mpt
->m_mutex
);
15303 page_address
= (MPI2_SAS_EXPAND_PGAD_FORM_HNDL
&
15304 MPI2_SAS_EXPAND_PGAD_FORM_MASK
) | smp_node
->m_devhdl
;
15305 rval
= mptsas_get_sas_expander_page0(mpt
, page_address
,
15307 if (rval
!= DDI_SUCCESS
) {
15308 mutex_exit(&mpt
->m_mutex
);
15309 mptsas_log(mpt
, CE_WARN
,
15310 "mptsas unable to get expander "
15311 "parent device info for %x", page_address
);
15312 ndi_rtn
= NDI_FAILURE
;
15313 goto smp_create_done
;
15316 smp_node
->m_pdevhdl
= dev_info
.m_pdevhdl
;
15317 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
15318 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) |
15319 (uint32_t)dev_info
.m_pdevhdl
;
15320 rval
= mptsas_get_sas_device_page0(mpt
, page_address
,
15321 &dev_hdl
, &sas_wwn
, &smp_node
->m_pdevinfo
,
15322 &physport
, &phy_id
, &pdev_hdl
, &bay_num
, &enclosure
);
15323 if (rval
!= DDI_SUCCESS
) {
15324 mutex_exit(&mpt
->m_mutex
);
15325 mptsas_log(mpt
, CE_WARN
, "mptsas unable to get "
15326 "device info for %x", page_address
);
15327 ndi_rtn
= NDI_FAILURE
;
15328 goto smp_create_done
;
15331 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE
&
15332 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) |
15333 (uint32_t)dev_info
.m_devhdl
;
15334 rval
= mptsas_get_sas_device_page0(mpt
, page_address
,
15335 &dev_hdl
, &smp_sas_wwn
, &smp_node
->m_deviceinfo
,
15336 &physport
, &phy_id
, &pdev_hdl
, &bay_num
, &enclosure
);
15337 if (rval
!= DDI_SUCCESS
) {
15338 mutex_exit(&mpt
->m_mutex
);
15339 mptsas_log(mpt
, CE_WARN
, "mptsas unable to get "
15340 "device info for %x", page_address
);
15341 ndi_rtn
= NDI_FAILURE
;
15342 goto smp_create_done
;
15344 mutex_exit(&mpt
->m_mutex
);
15347 * If this smp direct attached to the controller
15348 * set the attached-port to the base wwid
15350 if ((smp_node
->m_deviceinfo
& DEVINFO_DIRECT_ATTACHED
)
15351 != DEVINFO_DIRECT_ATTACHED
) {
15352 (void) sprintf(attached_wwn_str
, "w%016"PRIx64
,
15355 (void) sprintf(attached_wwn_str
, "w%016"PRIx64
,
15356 mpt
->un
.m_base_wwid
);
15359 if (ndi_prop_update_string(DDI_DEV_T_NONE
,
15360 *smp_dip
, SCSI_ADDR_PROP_ATTACHED_PORT
, attached_wwn_str
) !=
15361 DDI_PROP_SUCCESS
) {
15362 mptsas_log(mpt
, CE_WARN
, "mptsas unable to create "
15363 "property for smp attached-port %s (sas_wwn)",
15365 ndi_rtn
= NDI_FAILURE
;
15366 goto smp_create_done
;
15369 if (ndi_prop_create_boolean(DDI_DEV_T_NONE
,
15370 *smp_dip
, SMP_PROP
) != DDI_PROP_SUCCESS
) {
15371 mptsas_log(mpt
, CE_WARN
, "mptsas unable to "
15372 "create property for SMP %s (SMP_PROP) ",
15374 ndi_rtn
= NDI_FAILURE
;
15375 goto smp_create_done
;
15379 * check the smp to see whether it direct
15380 * attached to the controller
15382 if ((smp_node
->m_deviceinfo
& DEVINFO_DIRECT_ATTACHED
)
15383 != DEVINFO_DIRECT_ATTACHED
) {
15384 goto smp_create_done
;
15386 numphys
= ddi_prop_get_int(DDI_DEV_T_ANY
, pdip
,
15387 DDI_PROP_DONTPASS
, MPTSAS_NUM_PHYS
, -1);
15389 goto smp_create_done
;
15392 * this iport is an old iport, we need to
15393 * reconfig the props for it.
15395 if (ddi_prop_update_int(DDI_DEV_T_NONE
, pdip
,
15396 MPTSAS_VIRTUAL_PORT
, 0) !=
15397 DDI_PROP_SUCCESS
) {
15398 (void) ddi_prop_remove(DDI_DEV_T_NONE
, pdip
,
15399 MPTSAS_VIRTUAL_PORT
);
15400 mptsas_log(mpt
, CE_WARN
, "mptsas virtual port "
15401 "prop update failed");
15402 goto smp_create_done
;
15405 mutex_enter(&mpt
->m_mutex
);
15407 iport
= ddi_get_name_addr(pdip
);
15408 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
15409 bzero(phymask
, sizeof (phymask
));
15410 (void) sprintf(phymask
,
15411 "%x", mpt
->m_phy_info
[i
].phy_mask
);
15412 if (strcmp(phymask
, iport
) == 0) {
15413 phy_mask
= mpt
->m_phy_info
[i
].phy_mask
;
15418 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
15419 if ((phy_mask
>> i
) & 0x01) {
15424 * Update PHY info for smhba
15426 if (mptsas_smhba_phy_init(mpt
)) {
15427 mutex_exit(&mpt
->m_mutex
);
15428 mptsas_log(mpt
, CE_WARN
, "mptsas phy update "
15430 goto smp_create_done
;
15432 mutex_exit(&mpt
->m_mutex
);
15434 mptsas_smhba_set_phy_props(mpt
, iport
, pdip
,
15435 numphys
, &attached_devhdl
);
15437 if (ddi_prop_update_int(DDI_DEV_T_NONE
, pdip
,
15438 MPTSAS_NUM_PHYS
, numphys
) !=
15439 DDI_PROP_SUCCESS
) {
15440 (void) ddi_prop_remove(DDI_DEV_T_NONE
, pdip
,
15442 mptsas_log(mpt
, CE_WARN
, "mptsas update "
15443 "num phys props failed");
15444 goto smp_create_done
;
15447 * Add parent's props for SMHBA support
15449 if (ddi_prop_update_string(DDI_DEV_T_NONE
, pdip
,
15450 SCSI_ADDR_PROP_ATTACHED_PORT
, wwn_str
) !=
15451 DDI_PROP_SUCCESS
) {
15452 (void) ddi_prop_remove(DDI_DEV_T_NONE
, pdip
,
15453 SCSI_ADDR_PROP_ATTACHED_PORT
);
15454 mptsas_log(mpt
, CE_WARN
, "mptsas update iport"
15455 "attached-port failed");
15456 goto smp_create_done
;
15461 * If props were setup ok, online the lun
15463 if (ndi_rtn
== NDI_SUCCESS
) {
15465 * Try to online the new node
15467 ndi_rtn
= ndi_devi_online(*smp_dip
, NDI_ONLINE_ATTACH
);
15471 * If success set rtn flag, else unwire alloc'd lun
15473 if (ndi_rtn
!= NDI_SUCCESS
) {
15474 NDBG12(("mptsas unable to online "
15475 "SMP target %s", wwn_str
));
15476 ndi_prop_remove_all(*smp_dip
);
15477 (void) ndi_devi_free(*smp_dip
);
15481 return ((ndi_rtn
== NDI_SUCCESS
) ? DDI_SUCCESS
: DDI_FAILURE
);
15484 /* smp transport routine */
15485 static int mptsas_smp_start(struct smp_pkt
*smp_pkt
)
15488 Mpi2SmpPassthroughRequest_t req
;
15489 Mpi2SmpPassthroughReply_t rep
;
15490 uint32_t direction
= 0;
15495 mpt
= (mptsas_t
*)smp_pkt
->smp_pkt_address
->
15496 smp_a_hba_tran
->smp_tran_hba_private
;
15498 bcopy(smp_pkt
->smp_pkt_address
->smp_a_wwn
, &wwn
, SAS_WWN_BYTE_SIZE
);
15500 * Need to compose a SMP request message
15501 * and call mptsas_do_passthru() function
15503 bzero(&req
, sizeof (req
));
15504 bzero(&rep
, sizeof (rep
));
15505 req
.PassthroughFlags
= 0;
15506 req
.PhysicalPort
= 0xff;
15507 req
.ChainOffset
= 0;
15508 req
.Function
= MPI2_FUNCTION_SMP_PASSTHROUGH
;
15510 if ((smp_pkt
->smp_pkt_reqsize
& 0xffff0000ul
) != 0) {
15511 smp_pkt
->smp_pkt_reason
= ERANGE
;
15512 return (DDI_FAILURE
);
15514 req
.RequestDataLength
= LE_16((uint16_t)(smp_pkt
->smp_pkt_reqsize
- 4));
15517 tmp64
= LE_64(wwn
);
15518 bcopy(&tmp64
, &req
.SASAddress
, SAS_WWN_BYTE_SIZE
);
15519 if (smp_pkt
->smp_pkt_rspsize
> 0) {
15520 direction
|= MPTSAS_PASS_THRU_DIRECTION_READ
;
15522 if (smp_pkt
->smp_pkt_reqsize
> 0) {
15523 direction
|= MPTSAS_PASS_THRU_DIRECTION_WRITE
;
15526 mutex_enter(&mpt
->m_mutex
);
15527 ret
= mptsas_do_passthru(mpt
, (uint8_t *)&req
, (uint8_t *)&rep
,
15528 (uint8_t *)smp_pkt
->smp_pkt_rsp
,
15529 offsetof(Mpi2SmpPassthroughRequest_t
, SGL
), sizeof (rep
),
15530 smp_pkt
->smp_pkt_rspsize
- 4, direction
,
15531 (uint8_t *)smp_pkt
->smp_pkt_req
, smp_pkt
->smp_pkt_reqsize
- 4,
15532 smp_pkt
->smp_pkt_timeout
, FKIOCTL
);
15533 mutex_exit(&mpt
->m_mutex
);
15535 cmn_err(CE_WARN
, "smp_start do passthru error %d", ret
);
15536 smp_pkt
->smp_pkt_reason
= (uchar_t
)(ret
);
15537 return (DDI_FAILURE
);
15539 /* do passthrough success, check the smp status */
15540 if (LE_16(rep
.IOCStatus
) != MPI2_IOCSTATUS_SUCCESS
) {
15541 switch (LE_16(rep
.IOCStatus
)) {
15542 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE
:
15543 smp_pkt
->smp_pkt_reason
= ENODEV
;
15545 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN
:
15546 smp_pkt
->smp_pkt_reason
= EOVERFLOW
;
15548 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED
:
15549 smp_pkt
->smp_pkt_reason
= EIO
;
15552 mptsas_log(mpt
, CE_NOTE
, "smp_start: get unknown ioc"
15553 "status:%x", LE_16(rep
.IOCStatus
));
15554 smp_pkt
->smp_pkt_reason
= EIO
;
15557 return (DDI_FAILURE
);
15559 if (rep
.SASStatus
!= MPI2_SASSTATUS_SUCCESS
) {
15560 mptsas_log(mpt
, CE_NOTE
, "smp_start: get error SAS status:%x",
15562 smp_pkt
->smp_pkt_reason
= EIO
;
15563 return (DDI_FAILURE
);
15566 return (DDI_SUCCESS
);
15570 * If we didn't get a match, we need to get sas page0 for each device, and
15571 * untill we get a match. If failed, return NULL
15573 static mptsas_target_t
*
15574 mptsas_phy_to_tgt(mptsas_t
*mpt
, int phymask
, uint8_t phy
)
15578 uint16_t cur_handle
;
15579 uint32_t page_address
;
15580 mptsas_target_t
*ptgt
= NULL
;
15583 * PHY named device must be direct attached and attaches to
15584 * narrow port, if the iport is not parent of the device which
15585 * we are looking for.
15587 for (i
= 0; i
< MPTSAS_MAX_PHYS
; i
++) {
15588 if ((1 << i
) & phymask
)
15596 * Must be a narrow port and single device attached to the narrow port
15597 * So the physical port num of device which is equal to the iport's
15598 * port num is the device what we are looking for.
15601 if (mpt
->m_phy_info
[phy
].phy_mask
!= phymask
)
15604 mutex_enter(&mpt
->m_mutex
);
15606 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(&mpt
->m_active
->m_tgttbl
,
15607 MPTSAS_HASH_FIRST
);
15608 while (ptgt
!= NULL
) {
15609 if ((ptgt
->m_sas_wwn
== 0) && (ptgt
->m_phynum
== phy
)) {
15610 mutex_exit(&mpt
->m_mutex
);
15614 ptgt
= (mptsas_target_t
*)mptsas_hash_traverse(
15615 &mpt
->m_active
->m_tgttbl
, MPTSAS_HASH_NEXT
);
15618 if (mpt
->m_done_traverse_dev
) {
15619 mutex_exit(&mpt
->m_mutex
);
15623 /* If didn't get a match, come here */
15624 cur_handle
= mpt
->m_dev_handle
;
15627 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE
&
15628 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) | (uint32_t)cur_handle
;
15629 rval
= mptsas_get_target_device_info(mpt
, page_address
,
15630 &cur_handle
, &ptgt
);
15631 if ((rval
== DEV_INFO_FAIL_PAGE0
) ||
15632 (rval
== DEV_INFO_FAIL_ALLOC
)) {
15635 if ((rval
== DEV_INFO_WRONG_DEVICE_TYPE
) ||
15636 (rval
== DEV_INFO_PHYS_DISK
)) {
15639 mpt
->m_dev_handle
= cur_handle
;
15641 if ((ptgt
->m_sas_wwn
== 0) && (ptgt
->m_phynum
== phy
)) {
15646 mutex_exit(&mpt
->m_mutex
);
15651 * The ptgt->m_sas_wwn contains the wwid for each disk.
15652 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15653 * If we didn't get a match, we need to get sas page0 for each device, and
15654 * untill we get a match
15655 * If failed, return NULL
15657 static mptsas_target_t
*
15658 mptsas_wwid_to_ptgt(mptsas_t
*mpt
, int phymask
, uint64_t wwid
)
15661 uint16_t cur_handle
;
15662 uint32_t page_address
;
15663 mptsas_target_t
*tmp_tgt
= NULL
;
15665 mutex_enter(&mpt
->m_mutex
);
15666 tmp_tgt
= (struct mptsas_target
*)mptsas_hash_search(
15667 &mpt
->m_active
->m_tgttbl
, wwid
, phymask
);
15668 if (tmp_tgt
!= NULL
) {
15669 mutex_exit(&mpt
->m_mutex
);
15673 if (phymask
== 0) {
15677 rval
= mptsas_get_raid_info(mpt
);
15679 tmp_tgt
= (struct mptsas_target
*)mptsas_hash_search(
15680 &mpt
->m_active
->m_tgttbl
, wwid
, phymask
);
15682 mutex_exit(&mpt
->m_mutex
);
15686 if (mpt
->m_done_traverse_dev
) {
15687 mutex_exit(&mpt
->m_mutex
);
15691 /* If didn't get a match, come here */
15692 cur_handle
= mpt
->m_dev_handle
;
15695 page_address
= (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE
&
15696 MPI2_SAS_DEVICE_PGAD_FORM_MASK
) | cur_handle
;
15697 rval
= mptsas_get_target_device_info(mpt
, page_address
,
15698 &cur_handle
, &tmp_tgt
);
15699 if ((rval
== DEV_INFO_FAIL_PAGE0
) ||
15700 (rval
== DEV_INFO_FAIL_ALLOC
)) {
15704 if ((rval
== DEV_INFO_WRONG_DEVICE_TYPE
) ||
15705 (rval
== DEV_INFO_PHYS_DISK
)) {
15708 mpt
->m_dev_handle
= cur_handle
;
15709 if ((tmp_tgt
->m_sas_wwn
) && (tmp_tgt
->m_sas_wwn
== wwid
) &&
15710 (tmp_tgt
->m_phymask
== phymask
)) {
15715 mutex_exit(&mpt
->m_mutex
);
15719 static mptsas_smp_t
*
15720 mptsas_wwid_to_psmp(mptsas_t
*mpt
, int phymask
, uint64_t wwid
)
15723 uint16_t cur_handle
;
15724 uint32_t page_address
;
15725 mptsas_smp_t smp_node
, *psmp
= NULL
;
15727 mutex_enter(&mpt
->m_mutex
);
15728 psmp
= (struct mptsas_smp
*)mptsas_hash_search(&mpt
->m_active
->m_smptbl
,
15730 if (psmp
!= NULL
) {
15731 mutex_exit(&mpt
->m_mutex
);
15735 if (mpt
->m_done_traverse_smp
) {
15736 mutex_exit(&mpt
->m_mutex
);
15740 /* If didn't get a match, come here */
15741 cur_handle
= mpt
->m_smp_devhdl
;
15744 page_address
= (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL
&
15745 MPI2_SAS_EXPAND_PGAD_FORM_MASK
) | (uint32_t)cur_handle
;
15746 rval
= mptsas_get_sas_expander_page0(mpt
, page_address
,
15748 if (rval
!= DDI_SUCCESS
) {
15751 mpt
->m_smp_devhdl
= cur_handle
= smp_node
.m_devhdl
;
15752 psmp
= mptsas_smp_alloc(&mpt
->m_active
->m_smptbl
, &smp_node
);
15754 if ((psmp
->m_sasaddr
) && (psmp
->m_sasaddr
== wwid
) &&
15755 (psmp
->m_phymask
== phymask
)) {
15760 mutex_exit(&mpt
->m_mutex
);
15764 /* helper functions using hash */
15767 * Can't have duplicate entries for same devhdl,
15768 * if there are invalid entries, the devhdl should be set to 0xffff
15771 mptsas_search_by_devhdl(mptsas_hash_table_t
*hashtab
, uint16_t devhdl
)
15773 mptsas_hash_data_t
*data
;
15775 data
= mptsas_hash_traverse(hashtab
, MPTSAS_HASH_FIRST
);
15776 while (data
!= NULL
) {
15777 if (data
->devhdl
== devhdl
) {
15780 data
= mptsas_hash_traverse(hashtab
, MPTSAS_HASH_NEXT
);
15786 mptsas_tgt_alloc(mptsas_hash_table_t
*hashtab
, uint16_t devhdl
, uint64_t wwid
,
15787 uint32_t devinfo
, mptsas_phymask_t phymask
, uint8_t phynum
, mptsas_t
*mpt
)
15789 mptsas_target_t
*tmp_tgt
= NULL
;
15791 tmp_tgt
= mptsas_hash_search(hashtab
, wwid
, phymask
);
15792 if (tmp_tgt
!= NULL
) {
15793 NDBG20(("Hash item already exist"));
15794 tmp_tgt
->m_deviceinfo
= devinfo
;
15795 tmp_tgt
->m_devhdl
= devhdl
;
15798 tmp_tgt
= kmem_zalloc(sizeof (struct mptsas_target
), KM_SLEEP
);
15799 if (tmp_tgt
== NULL
) {
15800 cmn_err(CE_WARN
, "Fatal, allocated tgt failed");
15803 tmp_tgt
->m_devhdl
= devhdl
;
15804 tmp_tgt
->m_sas_wwn
= wwid
;
15805 tmp_tgt
->m_deviceinfo
= devinfo
;
15806 tmp_tgt
->m_phymask
= phymask
;
15807 tmp_tgt
->m_phynum
= phynum
;
15808 /* Initialized the tgt structure */
15809 tmp_tgt
->m_qfull_retries
= QFULL_RETRIES
;
15810 tmp_tgt
->m_qfull_retry_interval
=
15811 drv_usectohz(QFULL_RETRY_INTERVAL
* 1000);
15812 tmp_tgt
->m_t_throttle
= MAX_THROTTLE
;
15813 mutex_init(&tmp_tgt
->m_tgt_intr_mutex
, NULL
, MUTEX_DRIVER
,
15814 DDI_INTR_PRI(mpt
->m_intr_pri
));
15816 mptsas_hash_add(hashtab
, tmp_tgt
);
15822 mptsas_tgt_free(mptsas_hash_table_t
*hashtab
, uint64_t wwid
,
15823 mptsas_phymask_t phymask
)
15825 mptsas_target_t
*tmp_tgt
;
15826 tmp_tgt
= mptsas_hash_rem(hashtab
, wwid
, phymask
);
15827 if (tmp_tgt
== NULL
) {
15828 cmn_err(CE_WARN
, "Tgt not found, nothing to free");
15830 mutex_destroy(&tmp_tgt
->m_tgt_intr_mutex
);
15831 kmem_free(tmp_tgt
, sizeof (struct mptsas_target
));
15836 * Return the entry in the hash table
15838 static mptsas_smp_t
*
15839 mptsas_smp_alloc(mptsas_hash_table_t
*hashtab
, mptsas_smp_t
*data
)
15841 uint64_t key1
= data
->m_sasaddr
;
15842 mptsas_phymask_t key2
= data
->m_phymask
;
15843 mptsas_smp_t
*ret_data
;
15845 ret_data
= mptsas_hash_search(hashtab
, key1
, key2
);
15846 if (ret_data
!= NULL
) {
15847 bcopy(data
, ret_data
, sizeof (mptsas_smp_t
));
15851 ret_data
= kmem_alloc(sizeof (mptsas_smp_t
), KM_SLEEP
);
15852 bcopy(data
, ret_data
, sizeof (mptsas_smp_t
));
15853 mptsas_hash_add(hashtab
, ret_data
);
15858 mptsas_smp_free(mptsas_hash_table_t
*hashtab
, uint64_t wwid
,
15859 mptsas_phymask_t phymask
)
15861 mptsas_smp_t
*tmp_smp
;
15862 tmp_smp
= mptsas_hash_rem(hashtab
, wwid
, phymask
);
15863 if (tmp_smp
== NULL
) {
15864 cmn_err(CE_WARN
, "Smp element not found, nothing to free");
15866 kmem_free(tmp_smp
, sizeof (struct mptsas_smp
));
15871 * Hash operation functions
15872 * key1 is the sas_wwn, key2 is the phymask
15875 mptsas_hash_init(mptsas_hash_table_t
*hashtab
)
15877 if (hashtab
== NULL
) {
15880 bzero(hashtab
->head
, sizeof (mptsas_hash_node_t
) *
15881 MPTSAS_HASH_ARRAY_SIZE
);
15882 hashtab
->cur
= NULL
;
15887 mptsas_hash_uninit(mptsas_hash_table_t
*hashtab
, size_t datalen
)
15890 mptsas_hash_node_t
*cur
= NULL
, *last
= NULL
;
15892 if (hashtab
== NULL
) {
15895 for (line
= 0; line
< MPTSAS_HASH_ARRAY_SIZE
; line
++) {
15896 cur
= hashtab
->head
[line
];
15897 while (cur
!= NULL
) {
15900 kmem_free(last
->data
, datalen
);
15901 kmem_free(last
, sizeof (mptsas_hash_node_t
));
15907 * You must guarantee the element doesn't exist in the hash table
15908 * before you call mptsas_hash_add()
15911 mptsas_hash_add(mptsas_hash_table_t
*hashtab
, void *data
)
15913 uint64_t key1
= ((mptsas_hash_data_t
*)data
)->key1
;
15914 mptsas_phymask_t key2
= ((mptsas_hash_data_t
*)data
)->key2
;
15915 mptsas_hash_node_t
**head
= NULL
;
15916 mptsas_hash_node_t
*node
= NULL
;
15918 if (hashtab
== NULL
) {
15921 ASSERT(mptsas_hash_search(hashtab
, key1
, key2
) == NULL
);
15922 node
= kmem_zalloc(sizeof (mptsas_hash_node_t
), KM_NOSLEEP
);
15925 head
= &(hashtab
->head
[key1
% MPTSAS_HASH_ARRAY_SIZE
]);
15926 if (*head
== NULL
) {
15929 node
->next
= *head
;
15935 mptsas_hash_rem(mptsas_hash_table_t
*hashtab
, uint64_t key1
,
15936 mptsas_phymask_t key2
)
15938 mptsas_hash_node_t
**head
= NULL
;
15939 mptsas_hash_node_t
*last
= NULL
, *cur
= NULL
;
15940 mptsas_hash_data_t
*data
;
15941 if (hashtab
== NULL
) {
15944 head
= &(hashtab
->head
[key1
% MPTSAS_HASH_ARRAY_SIZE
]);
15946 while (cur
!= NULL
) {
15948 if ((data
->key1
== key1
) && (data
->key2
== key2
)) {
15949 if (last
== NULL
) {
15950 (*head
) = cur
->next
;
15952 last
->next
= cur
->next
;
15954 kmem_free(cur
, sizeof (mptsas_hash_node_t
));
15965 mptsas_hash_search(mptsas_hash_table_t
*hashtab
, uint64_t key1
,
15966 mptsas_phymask_t key2
)
15968 mptsas_hash_node_t
*cur
= NULL
;
15969 mptsas_hash_data_t
*data
;
15970 if (hashtab
== NULL
) {
15973 cur
= hashtab
->head
[key1
% MPTSAS_HASH_ARRAY_SIZE
];
15974 while (cur
!= NULL
) {
15976 if ((data
->key1
== key1
) && (data
->key2
== key2
)) {
15986 mptsas_hash_traverse(mptsas_hash_table_t
*hashtab
, int pos
)
15988 mptsas_hash_node_t
*this = NULL
;
15990 if (hashtab
== NULL
) {
15994 if (pos
== MPTSAS_HASH_FIRST
) {
15996 hashtab
->cur
= NULL
;
15997 this = hashtab
->head
[0];
15999 if (hashtab
->cur
== NULL
) {
16002 this = hashtab
->cur
->next
;
16006 while (this == NULL
) {
16008 if (hashtab
->line
>= MPTSAS_HASH_ARRAY_SIZE
) {
16009 /* the traverse reaches the end */
16010 hashtab
->cur
= NULL
;
16013 this = hashtab
->head
[hashtab
->line
];
16016 hashtab
->cur
= this;
16017 return (this->data
);
16021 * Functions for SGPIO LED support
16023 static dev_info_t
*
16024 mptsas_get_dip_from_dev(dev_t dev
, mptsas_phymask_t
*phymask
)
16028 dip
= e_ddi_hold_devi_by_dev(dev
, 0);
16031 prop
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, 0,
16033 *phymask
= (mptsas_phymask_t
)prop
;
16034 ddi_release_devi(dip
);
16037 static mptsas_target_t
*
16038 mptsas_addr_to_ptgt(mptsas_t
*mpt
, char *addr
, mptsas_phymask_t phymask
)
16043 mptsas_target_t
*ptgt
= NULL
;
16045 if (mptsas_parse_address(addr
, &wwn
, &phynum
, &lun
) != DDI_SUCCESS
) {
16048 if (addr
[0] == 'w') {
16049 ptgt
= mptsas_wwid_to_ptgt(mpt
, (int)phymask
, wwn
);
16051 ptgt
= mptsas_phy_to_tgt(mpt
, (int)phymask
, phynum
);
16056 #ifdef MPTSAS_GET_LED
16058 mptsas_get_led_status(mptsas_t
*mpt
, mptsas_target_t
*ptgt
,
16059 uint32_t *slotstatus
)
16061 return (mptsas_send_sep(mpt
, ptgt
, slotstatus
,
16062 MPI2_SEP_REQ_ACTION_READ_STATUS
));
16066 mptsas_set_led_status(mptsas_t
*mpt
, mptsas_target_t
*ptgt
, uint32_t slotstatus
)
16068 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16069 slotstatus
, ptgt
->m_slot_num
));
16070 return (mptsas_send_sep(mpt
, ptgt
, &slotstatus
,
16071 MPI2_SEP_REQ_ACTION_WRITE_STATUS
));
16074 * send sep request, use enclosure/slot addressing
16076 static int mptsas_send_sep(mptsas_t
*mpt
, mptsas_target_t
*ptgt
,
16077 uint32_t *status
, uint8_t act
)
16079 Mpi2SepRequest_t req
;
16080 Mpi2SepReply_t rep
;
16083 ASSERT(mutex_owned(&mpt
->m_mutex
));
16085 bzero(&req
, sizeof (req
));
16086 bzero(&rep
, sizeof (rep
));
16088 /* Do nothing for RAID volumes */
16089 if (ptgt
->m_phymask
== 0) {
16090 NDBG14(("mptsas_send_sep: Skip RAID volumes"));
16091 return (DDI_FAILURE
);
16094 req
.Function
= MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR
;
16096 req
.Flags
= MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS
;
16097 req
.EnclosureHandle
= LE_16(ptgt
->m_enclosure
);
16098 req
.Slot
= LE_16(ptgt
->m_slot_num
);
16099 if (act
== MPI2_SEP_REQ_ACTION_WRITE_STATUS
) {
16100 req
.SlotStatus
= LE_32(*status
);
16102 ret
= mptsas_do_passthru(mpt
, (uint8_t *)&req
, (uint8_t *)&rep
, NULL
,
16103 sizeof (req
), sizeof (rep
), NULL
, 0, NULL
, 0, 60, FKIOCTL
);
16105 mptsas_log(mpt
, CE_NOTE
, "mptsas_send_sep: passthru SEP "
16106 "Processor Request message error %d", ret
);
16107 return (DDI_FAILURE
);
16109 /* do passthrough success, check the ioc status */
16110 if (LE_16(rep
.IOCStatus
) != MPI2_IOCSTATUS_SUCCESS
) {
16111 if ((LE_16(rep
.IOCStatus
) & MPI2_IOCSTATUS_MASK
) ==
16112 MPI2_IOCSTATUS_INVALID_FIELD
) {
16113 mptsas_log(mpt
, CE_NOTE
, "send sep act %x: Not "
16114 "supported action, loginfo %x", act
,
16115 LE_32(rep
.IOCLogInfo
));
16116 return (DDI_FAILURE
);
16118 mptsas_log(mpt
, CE_NOTE
, "send_sep act %x: ioc "
16119 "status:%x", act
, LE_16(rep
.IOCStatus
));
16120 return (DDI_FAILURE
);
16122 if (act
!= MPI2_SEP_REQ_ACTION_WRITE_STATUS
) {
16123 *status
= LE_32(rep
.SlotStatus
);
16126 return (DDI_SUCCESS
);
16130 mptsas_dma_addr_create(mptsas_t
*mpt
, ddi_dma_attr_t dma_attr
,
16131 ddi_dma_handle_t
*dma_hdp
, ddi_acc_handle_t
*acc_hdp
, caddr_t
*dma_memp
,
16132 uint32_t alloc_size
, ddi_dma_cookie_t
*cookiep
)
16134 ddi_dma_cookie_t new_cookie
;
16138 if (cookiep
== NULL
)
16139 cookiep
= &new_cookie
;
16141 if (ddi_dma_alloc_handle(mpt
->m_dip
, &dma_attr
, DDI_DMA_SLEEP
,
16142 NULL
, dma_hdp
) != DDI_SUCCESS
) {
16147 if (ddi_dma_mem_alloc(*dma_hdp
, alloc_size
, &mpt
->m_dev_acc_attr
,
16148 DDI_DMA_CONSISTENT
, DDI_DMA_SLEEP
, NULL
, dma_memp
, &alloc_len
,
16149 acc_hdp
) != DDI_SUCCESS
) {
16150 ddi_dma_free_handle(dma_hdp
);
16155 if (ddi_dma_addr_bind_handle(*dma_hdp
, NULL
, *dma_memp
, alloc_len
,
16156 (DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
), DDI_DMA_SLEEP
, NULL
,
16157 cookiep
, &ncookie
) != DDI_DMA_MAPPED
) {
16158 (void) ddi_dma_mem_free(acc_hdp
);
16159 ddi_dma_free_handle(dma_hdp
);
16168 mptsas_dma_addr_destroy(ddi_dma_handle_t
*dma_hdp
, ddi_acc_handle_t
*acc_hdp
)
16170 if (*dma_hdp
== NULL
)
16173 (void) ddi_dma_unbind_handle(*dma_hdp
);
16174 (void) ddi_dma_mem_free(acc_hdp
);
16175 ddi_dma_free_handle(dma_hdp
);
16180 mptsas_outstanding_cmds_n(mptsas_t
*mpt
)
16183 for (i
= 0; i
< mpt
->m_slot_freeq_pair_n
; i
++) {
16184 mutex_enter(&mpt
->m_slot_freeq_pairp
[i
].
16185 m_slot_allocq
.s
.m_fq_mutex
);
16186 mutex_enter(&mpt
->m_slot_freeq_pairp
[i
].
16187 m_slot_releq
.s
.m_fq_mutex
);
16188 n
+= (mpt
->m_slot_freeq_pairp
[i
].m_slot_allocq
.s
.m_fq_n_init
-
16189 mpt
->m_slot_freeq_pairp
[i
].m_slot_allocq
.s
.m_fq_n
-
16190 mpt
->m_slot_freeq_pairp
[i
].m_slot_releq
.s
.m_fq_n
);
16191 mutex_exit(&mpt
->m_slot_freeq_pairp
[i
].
16192 m_slot_releq
.s
.m_fq_mutex
);
16193 mutex_exit(&mpt
->m_slot_freeq_pairp
[i
].
16194 m_slot_allocq
.s
.m_fq_mutex
);
16196 if (mpt
->m_max_requests
- 2 < n
)
16197 panic("mptsas: free slot allocq and releq crazy");