2 * mr_sas.c: source for mr_sas driver
4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 * Copyright (c) 2008-2012, LSI Logic Corporation.
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions are met:
19 * 1. Redistributions of source code must retain the above copyright notice,
20 * this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 * this list of conditions and the following disclaimer in the documentation
24 * and/or other materials provided with the distribution.
26 * 3. Neither the name of the author nor the names of its contributors may be
27 * used to endorse or promote products derived from this software without
28 * specific prior written permission.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
48 * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
49 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
52 #include <sys/types.h>
53 #include <sys/param.h>
55 #include <sys/errno.h>
58 #include <sys/modctl.h>
60 #include <sys/devops.h>
61 #include <sys/cmn_err.h>
64 #include <sys/mkdev.h>
66 #include <sys/scsi/scsi.h>
68 #include <sys/sunddi.h>
69 #include <sys/atomic.h>
70 #include <sys/signal.h>
71 #include <sys/byteorder.h>
73 #include <sys/fs/dv_node.h> /* devfs_clean */
80 #include <sys/ddifm.h>
81 #include <sys/fm/protocol.h>
82 #include <sys/fm/util.h>
83 #include <sys/fm/io/ddi.h>
85 /* Macros to help Skinny and stock 2108/MFI live together. */
86 #define WR_IB_PICK_QPORT(addr, instance) \
87 if ((instance)->skinny) { \
88 WR_IB_LOW_QPORT((addr), (instance)); \
89 WR_IB_HIGH_QPORT(0, (instance)); \
91 WR_IB_QPORT((addr), (instance)); \
97 static void *mrsas_state
= NULL
;
98 static volatile boolean_t mrsas_relaxed_ordering
= B_TRUE
;
99 volatile int debug_level_g
= CL_NONE
;
100 static volatile int msi_enable
= 1;
101 static volatile int ctio_enable
= 1;
103 /* Default Timeout value to issue online controller reset */
104 volatile int debug_timeout_g
= 0xF0; /* 0xB4; */
105 /* Simulate consecutive firmware fault */
106 static volatile int debug_fw_faults_after_ocr_g
= 0;
108 /* Simulate three consecutive timeout for an IO */
109 static volatile int debug_consecutive_timeout_after_ocr_g
= 0;
112 #pragma weak scsi_hba_open
113 #pragma weak scsi_hba_close
114 #pragma weak scsi_hba_ioctl
116 /* Local static prototypes. */
117 static int mrsas_getinfo(dev_info_t
*, ddi_info_cmd_t
, void *, void **);
118 static int mrsas_attach(dev_info_t
*, ddi_attach_cmd_t
);
119 static int mrsas_quiesce(dev_info_t
*);
120 static int mrsas_detach(dev_info_t
*, ddi_detach_cmd_t
);
121 static int mrsas_open(dev_t
*, int, int, cred_t
*);
122 static int mrsas_close(dev_t
, int, int, cred_t
*);
123 static int mrsas_ioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
125 static int mrsas_tran_tgt_init(dev_info_t
*, dev_info_t
*,
126 scsi_hba_tran_t
*, struct scsi_device
*);
127 static struct scsi_pkt
*mrsas_tran_init_pkt(struct scsi_address
*, register
128 struct scsi_pkt
*, struct buf
*, int, int, int, int,
130 static int mrsas_tran_start(struct scsi_address
*,
131 register struct scsi_pkt
*);
132 static int mrsas_tran_abort(struct scsi_address
*, struct scsi_pkt
*);
133 static int mrsas_tran_reset(struct scsi_address
*, int);
134 static int mrsas_tran_getcap(struct scsi_address
*, char *, int);
135 static int mrsas_tran_setcap(struct scsi_address
*, char *, int, int);
136 static void mrsas_tran_destroy_pkt(struct scsi_address
*,
138 static void mrsas_tran_dmafree(struct scsi_address
*, struct scsi_pkt
*);
139 static void mrsas_tran_sync_pkt(struct scsi_address
*, struct scsi_pkt
*);
140 static int mrsas_tran_quiesce(dev_info_t
*dip
);
141 static int mrsas_tran_unquiesce(dev_info_t
*dip
);
142 static uint_t
mrsas_isr();
143 static uint_t
mrsas_softintr();
144 static void mrsas_undo_resources(dev_info_t
*, struct mrsas_instance
*);
146 static void free_space_for_mfi(struct mrsas_instance
*);
147 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance
*);
148 static void issue_cmd_ppc(struct mrsas_cmd
*, struct mrsas_instance
*);
149 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance
*,
151 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance
*,
153 static void enable_intr_ppc(struct mrsas_instance
*);
154 static void disable_intr_ppc(struct mrsas_instance
*);
155 static int intr_ack_ppc(struct mrsas_instance
*);
156 static void flush_cache(struct mrsas_instance
*instance
);
157 void display_scsi_inquiry(caddr_t
);
158 static int start_mfi_aen(struct mrsas_instance
*instance
);
159 static int handle_drv_ioctl(struct mrsas_instance
*instance
,
160 struct mrsas_ioctl
*ioctl
, int mode
);
161 static int handle_mfi_ioctl(struct mrsas_instance
*instance
,
162 struct mrsas_ioctl
*ioctl
, int mode
);
163 static int handle_mfi_aen(struct mrsas_instance
*instance
,
164 struct mrsas_aen
*aen
);
165 static struct mrsas_cmd
*build_cmd(struct mrsas_instance
*,
166 struct scsi_address
*, struct scsi_pkt
*, uchar_t
*);
167 static int alloc_additional_dma_buffer(struct mrsas_instance
*);
168 static void complete_cmd_in_sync_mode(struct mrsas_instance
*,
170 static int mrsas_kill_adapter(struct mrsas_instance
*);
171 static int mrsas_issue_init_mfi(struct mrsas_instance
*);
172 static int mrsas_reset_ppc(struct mrsas_instance
*);
173 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance
*);
174 static int wait_for_outstanding(struct mrsas_instance
*instance
);
175 static int register_mfi_aen(struct mrsas_instance
*instance
,
176 uint32_t seq_num
, uint32_t class_locale_word
);
177 static int issue_mfi_pthru(struct mrsas_instance
*instance
, struct
178 mrsas_ioctl
*ioctl
, struct mrsas_cmd
*cmd
, int mode
);
179 static int issue_mfi_dcmd(struct mrsas_instance
*instance
, struct
180 mrsas_ioctl
*ioctl
, struct mrsas_cmd
*cmd
, int mode
);
181 static int issue_mfi_smp(struct mrsas_instance
*instance
, struct
182 mrsas_ioctl
*ioctl
, struct mrsas_cmd
*cmd
, int mode
);
183 static int issue_mfi_stp(struct mrsas_instance
*instance
, struct
184 mrsas_ioctl
*ioctl
, struct mrsas_cmd
*cmd
, int mode
);
185 static int abort_aen_cmd(struct mrsas_instance
*instance
,
186 struct mrsas_cmd
*cmd_to_abort
);
188 static void mrsas_rem_intrs(struct mrsas_instance
*instance
);
189 static int mrsas_add_intrs(struct mrsas_instance
*instance
, int intr_type
);
191 static void mrsas_tran_tgt_free(dev_info_t
*, dev_info_t
*,
192 scsi_hba_tran_t
*, struct scsi_device
*);
193 static int mrsas_tran_bus_config(dev_info_t
*, uint_t
,
194 ddi_bus_config_op_t
, void *, dev_info_t
**);
195 static int mrsas_parse_devname(char *, int *, int *);
196 static int mrsas_config_all_devices(struct mrsas_instance
*);
197 static int mrsas_config_ld(struct mrsas_instance
*, uint16_t,
198 uint8_t, dev_info_t
**);
199 static int mrsas_name_node(dev_info_t
*, char *, int);
200 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo
*);
201 static void free_additional_dma_buffer(struct mrsas_instance
*);
202 static void io_timeout_checker(void *);
203 static void mrsas_fm_init(struct mrsas_instance
*);
204 static void mrsas_fm_fini(struct mrsas_instance
*);
206 static struct mrsas_function_template mrsas_function_template_ppc
= {
207 .read_fw_status_reg
= read_fw_status_reg_ppc
,
208 .issue_cmd
= issue_cmd_ppc
,
209 .issue_cmd_in_sync_mode
= issue_cmd_in_sync_mode_ppc
,
210 .issue_cmd_in_poll_mode
= issue_cmd_in_poll_mode_ppc
,
211 .enable_intr
= enable_intr_ppc
,
212 .disable_intr
= disable_intr_ppc
,
213 .intr_ack
= intr_ack_ppc
,
214 .init_adapter
= mrsas_init_adapter_ppc
218 static struct mrsas_function_template mrsas_function_template_fusion
= {
219 .read_fw_status_reg
= tbolt_read_fw_status_reg
,
220 .issue_cmd
= tbolt_issue_cmd
,
221 .issue_cmd_in_sync_mode
= tbolt_issue_cmd_in_sync_mode
,
222 .issue_cmd_in_poll_mode
= tbolt_issue_cmd_in_poll_mode
,
223 .enable_intr
= tbolt_enable_intr
,
224 .disable_intr
= tbolt_disable_intr
,
225 .intr_ack
= tbolt_intr_ack
,
226 .init_adapter
= mrsas_init_adapter_tbolt
230 ddi_dma_attr_t mrsas_generic_dma_attr
= {
231 DMA_ATTR_V0
, /* dma_attr_version */
232 0, /* low DMA address range */
233 0xFFFFFFFFU
, /* high DMA address range */
234 0xFFFFFFFFU
, /* DMA counter register */
235 8, /* DMA address alignment */
236 0x07, /* DMA burstsizes */
237 1, /* min DMA size */
238 0xFFFFFFFFU
, /* max DMA size */
239 0xFFFFFFFFU
, /* segment boundary */
240 MRSAS_MAX_SGE_CNT
, /* dma_attr_sglen */
241 512, /* granularity of device */
242 0 /* bus specific DMA flags */
245 int32_t mrsas_max_cap_maxxfer
= 0x1000000;
248 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
251 uint32_t mrsas_tbolt_max_cap_maxxfer
= (512 * 512);
254 * cb_ops contains base level routines
256 static struct cb_ops mrsas_cb_ops
= {
257 mrsas_open
, /* open */
258 mrsas_close
, /* close */
259 nodev
, /* strategy */
264 mrsas_ioctl
, /* ioctl */
269 nodev
, /* cb_prop_op */
271 D_NEW
| D_HOTPLUG
, /* cb_flag */
273 nodev
, /* cb_aread */
274 nodev
/* cb_awrite */
278 * dev_ops contains configuration routines
280 static struct dev_ops mrsas_ops
= {
283 mrsas_getinfo
, /* getinfo */
284 nulldev
, /* identify */
286 mrsas_attach
, /* attach */
287 mrsas_detach
, /* detach */
289 &mrsas_cb_ops
, /* char/block ops */
292 mrsas_quiesce
/* quiesce */
295 static struct modldrv modldrv
= {
296 &mod_driverops
, /* module type - driver */
298 &mrsas_ops
, /* driver ops */
301 static struct modlinkage modlinkage
= {
302 MODREV_1
, /* ml_rev - must be MODREV_1 */
303 &modldrv
, /* ml_linkage */
304 NULL
/* end of driver linkage */
307 static struct ddi_device_acc_attr endian_attr
= {
309 DDI_STRUCTURE_LE_ACC
,
314 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
315 unsigned int enable_fp
= 1;
319 * ************************************************************************** *
321 * common entry points - for loadable kernel modules *
323 * ************************************************************************** *
327 * _init - initialize a loadable module
330 * The driver should perform any one-time resource allocation or data
331 * initialization during driver loading in _init(). For example, the driver
332 * should initialize any mutexes global to the driver in this routine.
333 * The driver should not, however, use _init() to allocate or initialize
334 * anything that has to do with a particular instance of the device.
335 * Per-instance initialization must be done in attach().
342 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
344 ret
= ddi_soft_state_init(&mrsas_state
,
345 sizeof (struct mrsas_instance
), 0);
347 if (ret
!= DDI_SUCCESS
) {
348 cmn_err(CE_WARN
, "mr_sas: could not init state");
352 if ((ret
= scsi_hba_init(&modlinkage
)) != DDI_SUCCESS
) {
353 cmn_err(CE_WARN
, "mr_sas: could not init scsi hba");
354 ddi_soft_state_fini(&mrsas_state
);
358 ret
= mod_install(&modlinkage
);
360 if (ret
!= DDI_SUCCESS
) {
361 cmn_err(CE_WARN
, "mr_sas: mod_install failed");
362 scsi_hba_fini(&modlinkage
);
363 ddi_soft_state_fini(&mrsas_state
);
370 * _info - returns information about a loadable module.
373 * _info() is called to return module information. This is a typical entry
374 * point that does predefined role. It simply calls mod_info().
377 _info(struct modinfo
*modinfop
)
379 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
381 return (mod_info(&modlinkage
, modinfop
));
385 * _fini - prepare a loadable module for unloading
388 * In _fini(), the driver should release any resources that were allocated in
389 * _init(). The driver must remove itself from the system module list.
396 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
398 if ((ret
= mod_remove(&modlinkage
)) != DDI_SUCCESS
) {
400 (CE_WARN
, "_fini: mod_remove() failed, error 0x%X", ret
));
404 scsi_hba_fini(&modlinkage
);
405 con_log(CL_DLEVEL1
, (CE_NOTE
, "_fini: scsi_hba_fini() done."));
407 ddi_soft_state_fini(&mrsas_state
);
408 con_log(CL_DLEVEL1
, (CE_NOTE
, "_fini: ddi_soft_state_fini() done."));
415 * ************************************************************************** *
417 * common entry points - for autoconfiguration *
419 * ************************************************************************** *
422 * attach - adds a device to the system as part of initialization
426 * The kernel calls a driver's attach() entry point to attach an instance of
427 * a device (for MegaRAID, it is instance of a controller) or to resume
428 * operation for an instance of a device that has been suspended or has been
429 * shut down by the power management framework
430 * The attach() entry point typically includes the following types of
432 * - allocate a soft-state structure for the device instance (for MegaRAID,
433 * controller instance)
434 * - initialize per-instance mutexes
435 * - initialize condition variables
436 * - register the device's interrupts (for MegaRAID, controller's interrupts)
437 * - map the registers and memory of the device instance (for MegaRAID,
438 * controller instance)
439 * - create minor device nodes for the device instance (for MegaRAID,
440 * controller instance)
441 * - report that the device instance (for MegaRAID, controller instance) has
445 mrsas_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
460 scsi_hba_tran_t
*tran
;
461 ddi_dma_attr_t tran_dma_attr
;
462 struct mrsas_instance
*instance
;
464 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
467 ASSERT(NO_COMPETING_THREADS
);
469 instance_no
= ddi_get_instance(dip
);
472 * check to see whether this device is in a DMA-capable slot.
474 if (ddi_slaveonly(dip
) == DDI_SUCCESS
) {
475 dev_err(dip
, CE_WARN
, "Device in slave-only slot, unused");
476 return (DDI_FAILURE
);
481 /* allocate the soft state for the instance */
482 if (ddi_soft_state_zalloc(mrsas_state
, instance_no
)
484 dev_err(dip
, CE_WARN
, "Failed to allocate soft state");
485 return (DDI_FAILURE
);
488 instance
= (struct mrsas_instance
*)ddi_get_soft_state
489 (mrsas_state
, instance_no
);
491 if (instance
== NULL
) {
492 dev_err(dip
, CE_WARN
, "Bad soft state");
493 ddi_soft_state_free(mrsas_state
, instance_no
);
494 return (DDI_FAILURE
);
497 instance
->unroll
.softs
= 1;
499 /* Setup the PCI configuration space handles */
500 if (pci_config_setup(dip
, &instance
->pci_handle
) !=
502 dev_err(dip
, CE_WARN
, "pci config setup failed");
504 ddi_soft_state_free(mrsas_state
, instance_no
);
505 return (DDI_FAILURE
);
508 if (ddi_dev_nregs(dip
, &nregs
) != DDI_SUCCESS
) {
509 dev_err(dip
, CE_WARN
, "Failed to get registers");
511 pci_config_teardown(&instance
->pci_handle
);
512 ddi_soft_state_free(mrsas_state
, instance_no
);
513 return (DDI_FAILURE
);
516 vendor_id
= pci_config_get16(instance
->pci_handle
,
518 device_id
= pci_config_get16(instance
->pci_handle
,
521 subsysvid
= pci_config_get16(instance
->pci_handle
,
523 subsysid
= pci_config_get16(instance
->pci_handle
,
526 pci_config_put16(instance
->pci_handle
, PCI_CONF_COMM
,
527 (pci_config_get16(instance
->pci_handle
,
528 PCI_CONF_COMM
) | PCI_COMM_ME
));
529 irq
= pci_config_get8(instance
->pci_handle
,
532 dev_err(dip
, CE_CONT
,
533 "?0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
534 vendor_id
, device_id
, subsysvid
,
535 subsysid
, irq
, MRSAS_VERSION
);
537 /* enable bus-mastering */
538 command
= pci_config_get16(instance
->pci_handle
,
541 if (!(command
& PCI_COMM_ME
)) {
542 command
|= PCI_COMM_ME
;
544 pci_config_put16(instance
->pci_handle
,
545 PCI_CONF_COMM
, command
);
547 con_log(CL_ANN
, (CE_CONT
, "mr_sas%d: "
548 "enable bus-mastering", instance_no
));
550 con_log(CL_DLEVEL1
, (CE_CONT
, "mr_sas%d: "
551 "bus-mastering already set", instance_no
));
554 /* initialize function pointers */
556 case PCI_DEVICE_ID_LSI_INVADER
:
557 case PCI_DEVICE_ID_LSI_FURY
:
558 case PCI_DEVICE_ID_LSI_INTRUDER
:
559 case PCI_DEVICE_ID_LSI_INTRUDER_24
:
560 case PCI_DEVICE_ID_LSI_CUTLASS_52
:
561 case PCI_DEVICE_ID_LSI_CUTLASS_53
:
562 dev_err(dip
, CE_CONT
, "?Gen3 device detected\n");
565 case PCI_DEVICE_ID_LSI_TBOLT
:
566 dev_err(dip
, CE_CONT
, "?TBOLT device detected\n");
569 &mrsas_function_template_fusion
;
573 case PCI_DEVICE_ID_LSI_SKINNY
:
574 case PCI_DEVICE_ID_LSI_SKINNY_NEW
:
576 * FALLTHRU to PPC-style functions, but mark this
577 * instance as Skinny, because the register set is
578 * slightly different (See WR_IB_PICK_QPORT), and
579 * certain other features are available to a Skinny
582 dev_err(dip
, CE_CONT
, "?Skinny device detected\n");
583 instance
->skinny
= 1;
586 case PCI_DEVICE_ID_LSI_2108VDE
:
587 case PCI_DEVICE_ID_LSI_2108V
:
588 dev_err(dip
, CE_CONT
,
589 "?2108 Liberator device detected\n");
592 &mrsas_function_template_ppc
;
596 dev_err(dip
, CE_WARN
, "Invalid device detected");
598 pci_config_teardown(&instance
->pci_handle
);
599 ddi_soft_state_free(mrsas_state
, instance_no
);
600 return (DDI_FAILURE
);
603 instance
->baseaddress
= pci_config_get32(
604 instance
->pci_handle
, PCI_CONF_BASE0
);
605 instance
->baseaddress
&= 0x0fffc;
608 instance
->vendor_id
= vendor_id
;
609 instance
->device_id
= device_id
;
610 instance
->subsysvid
= subsysvid
;
611 instance
->subsysid
= subsysid
;
612 instance
->instance
= instance_no
;
615 instance
->fm_capabilities
= ddi_prop_get_int(
616 DDI_DEV_T_ANY
, instance
->dip
, DDI_PROP_DONTPASS
,
617 "fm-capable", DDI_FM_EREPORT_CAPABLE
|
618 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
619 | DDI_FM_ERRCB_CAPABLE
);
621 mrsas_fm_init(instance
);
623 /* Setup register map */
624 if ((ddi_dev_regsize(instance
->dip
,
625 REGISTER_SET_IO_2108
, ®length
) != DDI_SUCCESS
) ||
626 reglength
< MINIMUM_MFI_MEM_SZ
) {
629 if (reglength
> DEFAULT_MFI_MEM_SZ
) {
630 reglength
= DEFAULT_MFI_MEM_SZ
;
631 con_log(CL_DLEVEL1
, (CE_NOTE
,
632 "mr_sas: register length to map is 0x%lx bytes",
635 if (ddi_regs_map_setup(instance
->dip
,
636 REGISTER_SET_IO_2108
, &instance
->regmap
, 0,
637 reglength
, &endian_attr
, &instance
->regmap_handle
)
639 dev_err(dip
, CE_WARN
, "couldn't map control registers");
643 instance
->unroll
.regs
= 1;
646 * Disable Interrupt Now.
647 * Setup Software interrupt
649 instance
->func_ptr
->disable_intr(instance
);
651 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, 0,
652 "mrsas-enable-msi", &data
) == DDI_SUCCESS
) {
653 if (strncmp(data
, "no", 3) == 0) {
655 con_log(CL_ANN1
, (CE_WARN
,
656 "msi_enable = %d disabled", msi_enable
));
661 dev_err(dip
, CE_CONT
, "?msi_enable = %d\n", msi_enable
);
663 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, 0,
664 "mrsas-enable-fp", &data
) == DDI_SUCCESS
) {
665 if (strncmp(data
, "no", 3) == 0) {
667 dev_err(dip
, CE_NOTE
,
668 "enable_fp = %d, Fast-Path disabled.\n",
675 dev_err(dip
, CE_CONT
, "?enable_fp = %d\n", enable_fp
);
677 /* Check for all supported interrupt types */
678 if (ddi_intr_get_supported_types(
679 dip
, &intr_types
) != DDI_SUCCESS
) {
680 dev_err(dip
, CE_WARN
,
681 "ddi_intr_get_supported_types() failed");
685 con_log(CL_DLEVEL1
, (CE_NOTE
,
686 "ddi_intr_get_supported_types() ret: 0x%x", intr_types
));
688 /* Initialize and Setup Interrupt handler */
689 if (msi_enable
&& (intr_types
& DDI_INTR_TYPE_MSIX
)) {
690 if (mrsas_add_intrs(instance
, DDI_INTR_TYPE_MSIX
) !=
692 dev_err(dip
, CE_WARN
,
693 "MSIX interrupt query failed");
696 instance
->intr_type
= DDI_INTR_TYPE_MSIX
;
697 } else if (msi_enable
&& (intr_types
& DDI_INTR_TYPE_MSI
)) {
698 if (mrsas_add_intrs(instance
, DDI_INTR_TYPE_MSI
) !=
700 dev_err(dip
, CE_WARN
,
701 "MSI interrupt query failed");
704 instance
->intr_type
= DDI_INTR_TYPE_MSI
;
705 } else if (intr_types
& DDI_INTR_TYPE_FIXED
) {
707 if (mrsas_add_intrs(instance
, DDI_INTR_TYPE_FIXED
) !=
709 dev_err(dip
, CE_WARN
,
710 "FIXED interrupt query failed");
713 instance
->intr_type
= DDI_INTR_TYPE_FIXED
;
715 dev_err(dip
, CE_WARN
, "Device cannot "
716 "suppport either FIXED or MSI/X "
721 instance
->unroll
.intr
= 1;
723 if (ddi_prop_lookup_string(DDI_DEV_T_ANY
, dip
, 0,
724 "mrsas-enable-ctio", &data
) == DDI_SUCCESS
) {
725 if (strncmp(data
, "no", 3) == 0) {
727 con_log(CL_ANN1
, (CE_WARN
,
728 "ctio_enable = %d disabled", ctio_enable
));
733 dev_err(dip
, CE_CONT
, "?ctio_enable = %d\n", ctio_enable
);
735 /* setup the mfi based low level driver */
736 if (mrsas_init_adapter(instance
) != DDI_SUCCESS
) {
737 dev_err(dip
, CE_WARN
,
738 "could not initialize the low level driver");
743 /* Initialize all Mutex */
744 INIT_LIST_HEAD(&instance
->completed_pool_list
);
745 mutex_init(&instance
->completed_pool_mtx
, NULL
,
746 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
748 mutex_init(&instance
->sync_map_mtx
, NULL
,
749 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
751 mutex_init(&instance
->app_cmd_pool_mtx
, NULL
,
752 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
754 mutex_init(&instance
->config_dev_mtx
, NULL
,
755 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
757 mutex_init(&instance
->cmd_pend_mtx
, NULL
,
758 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
760 mutex_init(&instance
->ocr_flags_mtx
, NULL
,
761 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
763 mutex_init(&instance
->int_cmd_mtx
, NULL
,
764 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
765 cv_init(&instance
->int_cmd_cv
, NULL
, CV_DRIVER
, NULL
);
767 mutex_init(&instance
->cmd_pool_mtx
, NULL
,
768 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
770 mutex_init(&instance
->reg_write_mtx
, NULL
,
771 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
773 if (instance
->tbolt
) {
774 mutex_init(&instance
->cmd_app_pool_mtx
, NULL
,
775 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
777 mutex_init(&instance
->chip_mtx
, NULL
,
778 MUTEX_DRIVER
, DDI_INTR_PRI(instance
->intr_pri
));
782 instance
->unroll
.mutexs
= 1;
784 instance
->timeout_id
= (timeout_id_t
)-1;
786 /* Register our soft-isr for highlevel interrupts. */
787 instance
->isr_level
= instance
->intr_pri
;
788 if (!(instance
->tbolt
)) {
789 if (instance
->isr_level
== HIGH_LEVEL_INTR
) {
790 if (ddi_add_softintr(dip
,
792 &instance
->soft_intr_id
, NULL
, NULL
,
793 mrsas_softintr
, (caddr_t
)instance
) !=
795 dev_err(dip
, CE_WARN
,
796 "Software ISR did not register");
801 instance
->unroll
.soft_isr
= 1;
806 instance
->softint_running
= 0;
808 /* Allocate a transport structure */
809 tran
= scsi_hba_tran_alloc(dip
, SCSI_HBA_CANSLEEP
);
812 dev_err(dip
, CE_WARN
,
813 "scsi_hba_tran_alloc failed");
817 instance
->tran
= tran
;
818 instance
->unroll
.tran
= 1;
820 tran
->tran_hba_private
= instance
;
821 tran
->tran_tgt_init
= mrsas_tran_tgt_init
;
822 tran
->tran_tgt_probe
= scsi_hba_probe
;
823 tran
->tran_tgt_free
= mrsas_tran_tgt_free
;
824 tran
->tran_init_pkt
= mrsas_tran_init_pkt
;
826 tran
->tran_start
= mrsas_tbolt_tran_start
;
828 tran
->tran_start
= mrsas_tran_start
;
829 tran
->tran_abort
= mrsas_tran_abort
;
830 tran
->tran_reset
= mrsas_tran_reset
;
831 tran
->tran_getcap
= mrsas_tran_getcap
;
832 tran
->tran_setcap
= mrsas_tran_setcap
;
833 tran
->tran_destroy_pkt
= mrsas_tran_destroy_pkt
;
834 tran
->tran_dmafree
= mrsas_tran_dmafree
;
835 tran
->tran_sync_pkt
= mrsas_tran_sync_pkt
;
836 tran
->tran_quiesce
= mrsas_tran_quiesce
;
837 tran
->tran_unquiesce
= mrsas_tran_unquiesce
;
838 tran
->tran_bus_config
= mrsas_tran_bus_config
;
840 if (mrsas_relaxed_ordering
)
841 mrsas_generic_dma_attr
.dma_attr_flags
|=
842 DDI_DMA_RELAXED_ORDERING
;
845 tran_dma_attr
= mrsas_generic_dma_attr
;
846 tran_dma_attr
.dma_attr_sgllen
= instance
->max_num_sge
;
848 /* Attach this instance of the hba */
849 if (scsi_hba_attach_setup(dip
, &tran_dma_attr
, tran
, 0)
851 dev_err(dip
, CE_WARN
,
852 "scsi_hba_attach failed");
856 instance
->unroll
.tranSetup
= 1;
858 (CE_CONT
, "scsi_hba_attach_setup() done."));
860 /* create devctl node for cfgadm command */
861 if (ddi_create_minor_node(dip
, "devctl",
862 S_IFCHR
, INST2DEVCTL(instance_no
),
863 DDI_NT_SCSI_NEXUS
, 0) == DDI_FAILURE
) {
864 dev_err(dip
, CE_WARN
, "failed to create devctl node.");
869 instance
->unroll
.devctl
= 1;
871 /* create scsi node for cfgadm command */
872 if (ddi_create_minor_node(dip
, "scsi", S_IFCHR
,
873 INST2SCSI(instance_no
), DDI_NT_SCSI_ATTACHMENT_POINT
, 0) ==
875 dev_err(dip
, CE_WARN
, "failed to create scsi node.");
880 instance
->unroll
.scsictl
= 1;
882 (void) snprintf(instance
->iocnode
, sizeof (instance
->iocnode
),
883 "%d:lsirdctl", instance_no
);
886 * Create a node for applications
887 * for issuing ioctl to the driver.
889 if (ddi_create_minor_node(dip
, instance
->iocnode
,
890 S_IFCHR
, INST2LSIRDCTL(instance_no
), DDI_PSEUDO
, 0) ==
892 dev_err(dip
, CE_WARN
, "failed to create ioctl node.");
897 instance
->unroll
.ioctl
= 1;
899 /* Create a taskq to handle dr events */
900 if ((instance
->taskq
= ddi_taskq_create(dip
,
901 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI
, 0)) == NULL
) {
902 dev_err(dip
, CE_WARN
, "failed to create taskq.");
903 instance
->taskq
= NULL
;
906 instance
->unroll
.taskq
= 1;
907 con_log(CL_ANN1
, (CE_CONT
, "ddi_taskq_create() done."));
909 /* enable interrupt */
910 instance
->func_ptr
->enable_intr(instance
);
913 if (start_mfi_aen(instance
)) {
914 dev_err(dip
, CE_WARN
, "failed to initiate AEN.");
917 instance
->unroll
.aenPend
= 1;
919 (CE_CONT
, "AEN started for instance %d.", instance_no
));
921 /* Finally! We are on the air. */
924 /* FMA handle checking. */
925 if (mrsas_check_acc_handle(instance
->regmap_handle
) !=
929 if (mrsas_check_acc_handle(instance
->pci_handle
) !=
934 instance
->mr_ld_list
=
935 kmem_zalloc(MRDRV_MAX_LD
* sizeof (struct mrsas_ld
),
937 instance
->unroll
.ldlist_buff
= 1;
939 if (instance
->tbolt
|| instance
->skinny
) {
940 instance
->mr_tbolt_pd_max
= MRSAS_TBOLT_PD_TGT_MAX
;
941 instance
->mr_tbolt_pd_list
=
942 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance
) *
943 sizeof (struct mrsas_tbolt_pd
), KM_SLEEP
);
944 ASSERT(instance
->mr_tbolt_pd_list
);
945 for (i
= 0; i
< instance
->mr_tbolt_pd_max
; i
++) {
946 instance
->mr_tbolt_pd_list
[i
].lun_type
=
948 instance
->mr_tbolt_pd_list
[i
].dev_id
=
952 instance
->unroll
.pdlist_buff
= 1;
956 con_log(CL_ANN
, (CE_NOTE
, "mr_sas: DDI_PM_RESUME"));
959 con_log(CL_ANN
, (CE_NOTE
, "mr_sas: DDI_RESUME"));
963 (CE_WARN
, "mr_sas: invalid attach cmd=%x", cmd
));
964 return (DDI_FAILURE
);
969 (CE_NOTE
, "mrsas_attach() return SUCCESS instance_num %d",
971 return (DDI_SUCCESS
);
975 mrsas_undo_resources(dip
, instance
);
977 mrsas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
978 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
980 mrsas_fm_fini(instance
);
982 pci_config_teardown(&instance
->pci_handle
);
983 ddi_soft_state_free(mrsas_state
, instance_no
);
985 return (DDI_FAILURE
);
989 * getinfo - gets device information
995 * The system calls getinfo() to obtain configuration information that only
996 * the driver knows. The mapping of minor numbers to device instance is
997 * entirely under the control of the driver. The system sometimes needs to ask
998 * the driver which device a particular dev_t represents.
999 * Given the device number return the devinfo pointer from the scsi_device
1004 mrsas_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **resultp
)
1007 int mrsas_minor
= getminor((dev_t
)arg
);
1009 struct mrsas_instance
*instance
;
1011 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1014 case DDI_INFO_DEVT2DEVINFO
:
1015 instance
= (struct mrsas_instance
*)
1016 ddi_get_soft_state(mrsas_state
,
1017 MINOR2INST(mrsas_minor
));
1019 if (instance
== NULL
) {
1023 *resultp
= instance
->dip
;
1027 case DDI_INFO_DEVT2INSTANCE
:
1028 *resultp
= (void *)(intptr_t)
1029 (MINOR2INST(getminor((dev_t
)arg
)));
1041 * detach - detaches a device from the system
1042 * @dip: pointer to the device's dev_info structure
1043 * @cmd: type of detach
1045 * A driver's detach() entry point is called to detach an instance of a device
1046 * that is bound to the driver. The entry point is called with the instance of
1047 * the device node to be detached and with DDI_DETACH, which is specified as
1048 * the cmd argument to the entry point.
1049 * This routine is called during driver unload. We free all the allocated
1050 * resources and call the corresponding LLD so that it can also release all
1054 mrsas_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
1058 struct mrsas_instance
*instance
;
1060 con_log(CL_ANN
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1064 ASSERT(NO_COMPETING_THREADS
);
1066 instance_no
= ddi_get_instance(dip
);
1068 instance
= (struct mrsas_instance
*)ddi_get_soft_state(mrsas_state
,
1072 dev_err(dip
, CE_WARN
, "could not get instance in detach");
1074 return (DDI_FAILURE
);
1079 con_log(CL_ANN
, (CE_NOTE
,
1080 "mrsas_detach: DDI_DETACH"));
1082 mutex_enter(&instance
->config_dev_mtx
);
1083 if (instance
->timeout_id
!= (timeout_id_t
)-1) {
1084 mutex_exit(&instance
->config_dev_mtx
);
1085 (void) untimeout(instance
->timeout_id
);
1086 instance
->timeout_id
= (timeout_id_t
)-1;
1087 mutex_enter(&instance
->config_dev_mtx
);
1088 instance
->unroll
.timer
= 0;
1090 mutex_exit(&instance
->config_dev_mtx
);
1092 if (instance
->unroll
.tranSetup
== 1) {
1093 if (scsi_hba_detach(dip
) != DDI_SUCCESS
) {
1094 dev_err(dip
, CE_WARN
,
1095 "failed to detach");
1096 return (DDI_FAILURE
);
1098 instance
->unroll
.tranSetup
= 0;
1100 (CE_CONT
, "scsi_hba_dettach() done."));
1103 flush_cache(instance
);
1105 mrsas_undo_resources(dip
, instance
);
1107 mrsas_fm_fini(instance
);
1109 pci_config_teardown(&instance
->pci_handle
);
1110 ddi_soft_state_free(mrsas_state
, instance_no
);
1113 case DDI_PM_SUSPEND
:
1114 con_log(CL_ANN
, (CE_NOTE
,
1115 "mrsas_detach: DDI_PM_SUSPEND"));
1119 con_log(CL_ANN
, (CE_NOTE
,
1120 "mrsas_detach: DDI_SUSPEND"));
1124 con_log(CL_ANN
, (CE_WARN
,
1125 "invalid detach command:0x%x", cmd
));
1126 return (DDI_FAILURE
);
1129 return (DDI_SUCCESS
);
1134 mrsas_undo_resources(dev_info_t
*dip
, struct mrsas_instance
*instance
)
1136 con_log(CL_ANN
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1138 if (instance
->unroll
.ioctl
== 1) {
1139 ddi_remove_minor_node(dip
, instance
->iocnode
);
1140 instance
->unroll
.ioctl
= 0;
1143 if (instance
->unroll
.scsictl
== 1) {
1144 ddi_remove_minor_node(dip
, "scsi");
1145 instance
->unroll
.scsictl
= 0;
1148 if (instance
->unroll
.devctl
== 1) {
1149 ddi_remove_minor_node(dip
, "devctl");
1150 instance
->unroll
.devctl
= 0;
1153 if (instance
->unroll
.tranSetup
== 1) {
1154 if (scsi_hba_detach(dip
) != DDI_SUCCESS
) {
1155 dev_err(dip
, CE_WARN
, "failed to detach");
1156 return; /* DDI_FAILURE */
1158 instance
->unroll
.tranSetup
= 0;
1159 con_log(CL_ANN1
, (CE_CONT
, "scsi_hba_dettach() done."));
1162 if (instance
->unroll
.tran
== 1) {
1163 scsi_hba_tran_free(instance
->tran
);
1164 instance
->unroll
.tran
= 0;
1165 con_log(CL_ANN1
, (CE_CONT
, "scsi_hba_tran_free() done."));
1168 if (instance
->unroll
.syncCmd
== 1) {
1169 if (instance
->tbolt
) {
1170 if (abort_syncmap_cmd(instance
,
1171 instance
->map_update_cmd
)) {
1172 dev_err(dip
, CE_WARN
, "mrsas_detach: "
1173 "failed to abort previous syncmap command");
1176 instance
->unroll
.syncCmd
= 0;
1177 con_log(CL_ANN1
, (CE_CONT
, "sync cmd aborted, done."));
1181 if (instance
->unroll
.aenPend
== 1) {
1182 if (abort_aen_cmd(instance
, instance
->aen_cmd
))
1183 dev_err(dip
, CE_WARN
, "mrsas_detach: "
1184 "failed to abort prevous AEN command");
1186 instance
->unroll
.aenPend
= 0;
1187 con_log(CL_ANN1
, (CE_CONT
, "aen cmd aborted, done."));
1188 /* This means the controller is fully initialized and running */
1189 /* Shutdown should be a last command to controller. */
1190 /* shutdown_controller(); */
1194 if (instance
->unroll
.timer
== 1) {
1195 if (instance
->timeout_id
!= (timeout_id_t
)-1) {
1196 (void) untimeout(instance
->timeout_id
);
1197 instance
->timeout_id
= (timeout_id_t
)-1;
1199 instance
->unroll
.timer
= 0;
1203 instance
->func_ptr
->disable_intr(instance
);
1206 if (instance
->unroll
.mutexs
== 1) {
1207 mutex_destroy(&instance
->cmd_pool_mtx
);
1208 mutex_destroy(&instance
->app_cmd_pool_mtx
);
1209 mutex_destroy(&instance
->cmd_pend_mtx
);
1210 mutex_destroy(&instance
->completed_pool_mtx
);
1211 mutex_destroy(&instance
->sync_map_mtx
);
1212 mutex_destroy(&instance
->int_cmd_mtx
);
1213 cv_destroy(&instance
->int_cmd_cv
);
1214 mutex_destroy(&instance
->config_dev_mtx
);
1215 mutex_destroy(&instance
->ocr_flags_mtx
);
1216 mutex_destroy(&instance
->reg_write_mtx
);
1218 if (instance
->tbolt
) {
1219 mutex_destroy(&instance
->cmd_app_pool_mtx
);
1220 mutex_destroy(&instance
->chip_mtx
);
1223 instance
->unroll
.mutexs
= 0;
1224 con_log(CL_ANN1
, (CE_CONT
, "Destroy mutex & cv, done."));
1228 if (instance
->unroll
.soft_isr
== 1) {
1229 ddi_remove_softintr(instance
->soft_intr_id
);
1230 instance
->unroll
.soft_isr
= 0;
1233 if (instance
->unroll
.intr
== 1) {
1234 mrsas_rem_intrs(instance
);
1235 instance
->unroll
.intr
= 0;
1239 if (instance
->unroll
.taskq
== 1) {
1240 if (instance
->taskq
) {
1241 ddi_taskq_destroy(instance
->taskq
);
1242 instance
->unroll
.taskq
= 0;
1248 * free dma memory allocated for
1249 * cmds/frames/queues/driver version etc
1251 if (instance
->unroll
.verBuff
== 1) {
1252 (void) mrsas_free_dma_obj(instance
, instance
->drv_ver_dma_obj
);
1253 instance
->unroll
.verBuff
= 0;
1256 if (instance
->unroll
.pdlist_buff
== 1) {
1257 if (instance
->mr_tbolt_pd_list
!= NULL
) {
1258 kmem_free(instance
->mr_tbolt_pd_list
,
1259 MRSAS_TBOLT_GET_PD_MAX(instance
) *
1260 sizeof (struct mrsas_tbolt_pd
));
1263 instance
->mr_tbolt_pd_list
= NULL
;
1264 instance
->unroll
.pdlist_buff
= 0;
1267 if (instance
->unroll
.ldlist_buff
== 1) {
1268 if (instance
->mr_ld_list
!= NULL
) {
1269 kmem_free(instance
->mr_ld_list
, MRDRV_MAX_LD
1270 * sizeof (struct mrsas_ld
));
1273 instance
->mr_ld_list
= NULL
;
1274 instance
->unroll
.ldlist_buff
= 0;
1277 if (instance
->tbolt
) {
1278 if (instance
->unroll
.alloc_space_mpi2
== 1) {
1279 free_space_for_mpi2(instance
);
1280 instance
->unroll
.alloc_space_mpi2
= 0;
1283 if (instance
->unroll
.alloc_space_mfi
== 1) {
1284 free_space_for_mfi(instance
);
1285 instance
->unroll
.alloc_space_mfi
= 0;
1289 if (instance
->unroll
.regs
== 1) {
1290 ddi_regs_map_free(&instance
->regmap_handle
);
1291 instance
->unroll
.regs
= 0;
1292 con_log(CL_ANN1
, (CE_CONT
, "ddi_regs_map_free() done."));
1299 * ************************************************************************** *
1301 * common entry points - for character driver types *
1303 * ************************************************************************** *
1306 * open - gets access to a device
1312 * Access to a device by one or more application programs is controlled
1313 * through the open() and close() entry points. The primary function of
1314 * open() is to verify that the open request is allowed.
1317 mrsas_open(dev_t
*dev
, int openflags
, int otyp
, cred_t
*credp
)
1321 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1323 /* Check root permissions */
1324 if (drv_priv(credp
) != 0) {
1325 con_log(CL_ANN
, (CE_WARN
,
1326 "mr_sas: Non-root ioctl access denied!"));
1330 /* Verify we are being opened as a character device */
1331 if (otyp
!= OTYP_CHR
) {
1332 con_log(CL_ANN
, (CE_WARN
,
1333 "mr_sas: ioctl node must be a char node"));
1337 if (ddi_get_soft_state(mrsas_state
, MINOR2INST(getminor(*dev
)))
1342 if (scsi_hba_open
) {
1343 rval
= scsi_hba_open(dev
, openflags
, otyp
, credp
);
1350 * close - gives up access to a device
1356 * close() should perform any cleanup necessary to finish using the minor
1357 * device, and prepare the device (and driver) to be opened again.
1360 mrsas_close(dev_t dev
, int openflags
, int otyp
, cred_t
*credp
)
1364 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1366 /* no need for locks! */
1368 if (scsi_hba_close
) {
1369 rval
= scsi_hba_close(dev
, openflags
, otyp
, credp
);
1376 * ioctl - performs a range of I/O commands for character drivers
1384 * ioctl() routine must make sure that user data is copied into or out of the
1385 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1386 * and ddi_copyout(), as appropriate.
1387 * This is a wrapper routine to serialize access to the actual ioctl routine.
1388 * ioctl() should return 0 on success, or the appropriate error number. The
1389 * driver may also set the value returned to the calling process through rvalp.
1393 mrsas_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
1398 struct mrsas_instance
*instance
;
1399 struct mrsas_ioctl
*ioctl
;
1400 struct mrsas_aen aen
;
1401 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1403 instance
= ddi_get_soft_state(mrsas_state
, MINOR2INST(getminor(dev
)));
1405 if (instance
== NULL
) {
1406 /* invalid minor number */
1407 con_log(CL_ANN
, (CE_WARN
, "mr_sas: adapter not found."));
1411 ioctl
= kmem_zalloc(sizeof (struct mrsas_ioctl
), KM_SLEEP
);
1414 switch ((uint_t
)cmd
) {
1415 case MRSAS_IOCTL_FIRMWARE
:
1416 if (ddi_copyin((void *)arg
, ioctl
,
1417 sizeof (struct mrsas_ioctl
), mode
)) {
1418 con_log(CL_ANN
, (CE_WARN
, "mrsas_ioctl: "
1419 "ERROR IOCTL copyin"));
1420 kmem_free(ioctl
, sizeof (struct mrsas_ioctl
));
1424 if (ioctl
->control_code
== MRSAS_DRIVER_IOCTL_COMMON
) {
1425 rval
= handle_drv_ioctl(instance
, ioctl
, mode
);
1427 rval
= handle_mfi_ioctl(instance
, ioctl
, mode
);
1430 if (ddi_copyout((void *)ioctl
, (void *)arg
,
1431 (sizeof (struct mrsas_ioctl
) - 1), mode
)) {
1432 con_log(CL_ANN
, (CE_WARN
,
1433 "mrsas_ioctl: copy_to_user failed"));
1438 case MRSAS_IOCTL_AEN
:
1439 if (ddi_copyin((void *) arg
, &aen
,
1440 sizeof (struct mrsas_aen
), mode
)) {
1441 con_log(CL_ANN
, (CE_WARN
,
1442 "mrsas_ioctl: ERROR AEN copyin"));
1443 kmem_free(ioctl
, sizeof (struct mrsas_ioctl
));
1447 rval
= handle_mfi_aen(instance
, &aen
);
1449 if (ddi_copyout((void *) &aen
, (void *)arg
,
1450 sizeof (struct mrsas_aen
), mode
)) {
1451 con_log(CL_ANN
, (CE_WARN
,
1452 "mrsas_ioctl: copy_to_user failed"));
1458 rval
= scsi_hba_ioctl(dev
, cmd
, arg
,
1459 mode
, credp
, rvalp
);
1461 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_ioctl: "
1462 "scsi_hba_ioctl called, ret = %x.", rval
));
1465 kmem_free(ioctl
, sizeof (struct mrsas_ioctl
));
1470 * ************************************************************************** *
1472 * common entry points - for block driver types *
1474 * ************************************************************************** *
1478 mrsas_quiesce(dev_info_t
*dip
)
1482 struct mrsas_instance
*instance
;
1484 instance_no
= ddi_get_instance(dip
);
1485 instance
= (struct mrsas_instance
*)ddi_get_soft_state
1486 (mrsas_state
, instance_no
);
1488 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1491 con_log(CL_ANN1
, (CE_WARN
, "mr_sas:%d could not get adapter "
1492 "in quiesce", instance_no
));
1493 return (DDI_FAILURE
);
1495 if (instance
->deadadapter
|| instance
->adapterresetinprogress
) {
1496 con_log(CL_ANN1
, (CE_WARN
, "mr_sas:%d adapter is not in "
1497 "healthy state", instance_no
));
1498 return (DDI_FAILURE
);
1501 if (abort_aen_cmd(instance
, instance
->aen_cmd
)) {
1502 con_log(CL_ANN1
, (CE_WARN
, "mrsas_quiesce: "
1503 "failed to abort prevous AEN command QUIESCE"));
1506 if (instance
->tbolt
) {
1507 if (abort_syncmap_cmd(instance
,
1508 instance
->map_update_cmd
)) {
1509 dev_err(dip
, CE_WARN
,
1510 "mrsas_detach: failed to abort "
1511 "previous syncmap command");
1512 return (DDI_FAILURE
);
1516 instance
->func_ptr
->disable_intr(instance
);
1518 con_log(CL_ANN1
, (CE_CONT
, "flushing cache for instance %d",
1521 flush_cache(instance
);
1523 if (wait_for_outstanding(instance
)) {
1525 (CE_CONT
, "wait_for_outstanding: return FAIL.\n"));
1526 return (DDI_FAILURE
);
1528 return (DDI_SUCCESS
);
1532 * ************************************************************************** *
1534 * entry points (SCSI HBA) *
1536 * ************************************************************************** *
1539 * tran_tgt_init - initialize a target device instance
1545 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1546 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1547 * the device's address as valid and supportable for that particular HBA.
1548 * By returning DDI_FAILURE, the instance of the target driver for that device
1549 * is not probed or attached.
1553 mrsas_tran_tgt_init(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
1554 scsi_hba_tran_t
*tran
, struct scsi_device
*sd
)
1556 struct mrsas_instance
*instance
;
1557 uint16_t tgt
= sd
->sd_address
.a_target
;
1558 uint8_t lun
= sd
->sd_address
.a_lun
;
1559 dev_info_t
*child
= NULL
;
1561 con_log(CL_DLEVEL2
, (CE_NOTE
, "mrsas_tgt_init target %d lun %d",
1564 instance
= ADDR2MR(&sd
->sd_address
);
1566 if (ndi_dev_is_persistent_node(tgt_dip
) == 0) {
1568 * If no persistent node exists, we don't allow .conf node
1571 if ((child
= mrsas_find_child(instance
, tgt
, lun
)) != NULL
) {
1573 (CE_NOTE
, "mrsas_tgt_init find child ="
1574 " %p t = %d l = %d", (void *)child
, tgt
, lun
));
1575 if (ndi_merge_node(tgt_dip
, mrsas_name_node
) !=
1577 /* Create this .conf node */
1578 return (DDI_SUCCESS
);
1580 con_log(CL_DLEVEL2
, (CE_NOTE
, "mrsas_tgt_init in ndi_per "
1581 "DDI_FAILURE t = %d l = %d", tgt
, lun
));
1582 return (DDI_FAILURE
);
1586 con_log(CL_DLEVEL2
, (CE_NOTE
, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1587 (void *)instance
->mr_ld_list
[tgt
].dip
, (void *)tgt_dip
));
1589 if (tgt
< MRDRV_MAX_LD
&& lun
== 0) {
1590 if (instance
->mr_ld_list
[tgt
].dip
== NULL
&&
1591 strcmp(ddi_driver_name(sd
->sd_dev
), "sd") == 0) {
1592 mutex_enter(&instance
->config_dev_mtx
);
1593 instance
->mr_ld_list
[tgt
].dip
= tgt_dip
;
1594 instance
->mr_ld_list
[tgt
].lun_type
= MRSAS_LD_LUN
;
1595 instance
->mr_ld_list
[tgt
].flag
= MRDRV_TGT_VALID
;
1596 mutex_exit(&instance
->config_dev_mtx
);
1598 } else if (instance
->tbolt
|| instance
->skinny
) {
1599 if (instance
->mr_tbolt_pd_list
[tgt
].dip
== NULL
) {
1600 mutex_enter(&instance
->config_dev_mtx
);
1601 instance
->mr_tbolt_pd_list
[tgt
].dip
= tgt_dip
;
1602 instance
->mr_tbolt_pd_list
[tgt
].flag
=
1604 mutex_exit(&instance
->config_dev_mtx
);
1605 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_tran_tgt_init:"
1606 "t%xl%x", tgt
, lun
));
1610 return (DDI_SUCCESS
);
1615 mrsas_tran_tgt_free(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
1616 scsi_hba_tran_t
*hba_tran
, struct scsi_device
*sd
)
1618 struct mrsas_instance
*instance
;
1619 int tgt
= sd
->sd_address
.a_target
;
1620 int lun
= sd
->sd_address
.a_lun
;
1622 instance
= ADDR2MR(&sd
->sd_address
);
1624 con_log(CL_DLEVEL2
, (CE_NOTE
, "tgt_free t = %d l = %d", tgt
, lun
));
1626 if (tgt
< MRDRV_MAX_LD
&& lun
== 0) {
1627 if (instance
->mr_ld_list
[tgt
].dip
== tgt_dip
) {
1628 mutex_enter(&instance
->config_dev_mtx
);
1629 instance
->mr_ld_list
[tgt
].dip
= NULL
;
1630 mutex_exit(&instance
->config_dev_mtx
);
1632 } else if (instance
->tbolt
|| instance
->skinny
) {
1633 mutex_enter(&instance
->config_dev_mtx
);
1634 instance
->mr_tbolt_pd_list
[tgt
].dip
= NULL
;
1635 mutex_exit(&instance
->config_dev_mtx
);
1636 con_log(CL_ANN1
, (CE_NOTE
, "tgt_free: Setting dip = NULL"
1637 "for tgt:%x", tgt
));
1642 mrsas_find_child(struct mrsas_instance
*instance
, uint16_t tgt
, uint8_t lun
)
1644 dev_info_t
*child
= NULL
;
1645 char addr
[SCSI_MAXNAMELEN
];
1646 char tmp
[MAXNAMELEN
];
1648 (void) snprintf(addr
, sizeof (addr
), "%x,%x", tgt
, lun
);
1649 for (child
= ddi_get_child(instance
->dip
); child
;
1650 child
= ddi_get_next_sibling(child
)) {
1652 if (ndi_dev_is_persistent_node(child
) == 0) {
1656 if (mrsas_name_node(child
, tmp
, MAXNAMELEN
) !=
1661 if (strcmp(addr
, tmp
) == 0) {
1665 con_log(CL_DLEVEL2
, (CE_NOTE
, "mrsas_find_child: return child = %p",
1677 mrsas_name_node(dev_info_t
*dip
, char *name
, int len
)
1681 tgt
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
,
1682 DDI_PROP_DONTPASS
, "target", -1);
1683 con_log(CL_DLEVEL2
, (CE_NOTE
,
1684 "mrsas_name_node: dip %p tgt %d", (void *)dip
, tgt
));
1686 return (DDI_FAILURE
);
1688 lun
= ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
1691 (CE_NOTE
, "mrsas_name_node: tgt %d lun %d", tgt
, lun
));
1693 return (DDI_FAILURE
);
1695 (void) snprintf(name
, len
, "%x,%x", tgt
, lun
);
1696 return (DDI_SUCCESS
);
1700 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1710 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1711 * structure and DMA resources for a target driver request. The
1712 * tran_init_pkt() entry point is called when the target driver calls the
1713 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1714 * is a request to perform one or more of three possible services:
1715 * - allocation and initialization of a scsi_pkt structure
1716 * - allocation of DMA resources for data transfer
1717 * - reallocation of DMA resources for the next portion of the data transfer
1719 static struct scsi_pkt
*
1720 mrsas_tran_init_pkt(struct scsi_address
*ap
, register struct scsi_pkt
*pkt
,
1721 struct buf
*bp
, int cmdlen
, int statuslen
, int tgtlen
,
1722 int flags
, int (*callback
)(), caddr_t arg
)
1724 struct scsa_cmd
*acmd
;
1725 struct mrsas_instance
*instance
;
1726 struct scsi_pkt
*new_pkt
;
1728 con_log(CL_DLEVEL1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1730 instance
= ADDR2MR(ap
);
1732 /* step #1 : pkt allocation */
1734 pkt
= scsi_hba_pkt_alloc(instance
->dip
, ap
, cmdlen
, statuslen
,
1735 tgtlen
, sizeof (struct scsa_cmd
), callback
, arg
);
1740 acmd
= PKT2CMD(pkt
);
1743 * Initialize the new pkt - we redundantly initialize
1744 * all the fields for illustrative purposes.
1746 acmd
->cmd_pkt
= pkt
;
1747 acmd
->cmd_flags
= 0;
1748 acmd
->cmd_scblen
= statuslen
;
1749 acmd
->cmd_cdblen
= cmdlen
;
1750 acmd
->cmd_dmahandle
= NULL
;
1751 acmd
->cmd_ncookies
= 0;
1752 acmd
->cmd_cookie
= 0;
1753 acmd
->cmd_cookiecnt
= 0;
1756 pkt
->pkt_address
= *ap
;
1757 pkt
->pkt_comp
= (void (*)())NULL
;
1762 pkt
->pkt_statistics
= 0;
1763 pkt
->pkt_reason
= 0;
1766 acmd
= PKT2CMD(pkt
);
1770 /* step #2 : dma allocation/move */
1771 if (bp
&& bp
->b_bcount
!= 0) {
1772 if (acmd
->cmd_dmahandle
== NULL
) {
1773 if (mrsas_dma_alloc(instance
, pkt
, bp
, flags
,
1774 callback
) == DDI_FAILURE
) {
1776 scsi_hba_pkt_free(ap
, new_pkt
);
1781 if (mrsas_dma_move(instance
, pkt
, bp
) == DDI_FAILURE
) {
1791 * tran_start - transport a SCSI command to the addressed target
1795 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1796 * SCSI command to the addressed target. The SCSI command is described
1797 * entirely within the scsi_pkt structure, which the target driver allocated
1798 * through the HBA driver's tran_init_pkt() entry point. If the command
1799 * involves a data transfer, DMA resources must also have been allocated for
1800 * the scsi_pkt structure.
1803 * TRAN_BUSY - request queue is full, no more free scbs
1804 * TRAN_ACCEPT - pkt has been submitted to the instance
1807 mrsas_tran_start(struct scsi_address
*ap
, register struct scsi_pkt
*pkt
)
1809 uchar_t cmd_done
= 0;
1811 struct mrsas_instance
*instance
= ADDR2MR(ap
);
1812 struct mrsas_cmd
*cmd
;
1814 con_log(CL_DLEVEL1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1815 if (instance
->deadadapter
== 1) {
1816 con_log(CL_ANN1
, (CE_WARN
,
1817 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1818 "for IO, as the HBA doesnt take any more IOs"));
1820 pkt
->pkt_reason
= CMD_DEV_GONE
;
1821 pkt
->pkt_statistics
= STAT_DISCON
;
1823 return (TRAN_FATAL_ERROR
);
1826 if (instance
->adapterresetinprogress
) {
1827 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_tran_start: Reset flag set, "
1828 "returning mfi_pkt and setting TRAN_BUSY\n"));
1832 con_log(CL_ANN1
, (CE_CONT
, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1833 __func__
, __LINE__
, pkt
->pkt_cdbp
[0], pkt
->pkt_time
));
1835 pkt
->pkt_reason
= CMD_CMPLT
;
1836 *pkt
->pkt_scbp
= STATUS_GOOD
; /* clear arq scsi_status */
1838 cmd
= build_cmd(instance
, ap
, pkt
, &cmd_done
);
1841 * Check if the command is already completed by the mrsas_build_cmd()
1842 * routine. In which case the busy_flag would be clear and scb will be
1843 * NULL and appropriate reason provided in pkt_reason field
1846 pkt
->pkt_reason
= CMD_CMPLT
;
1847 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
1848 pkt
->pkt_state
|= STATE_GOT_BUS
| STATE_GOT_TARGET
1850 if (((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) && pkt
->pkt_comp
) {
1851 (*pkt
->pkt_comp
)(pkt
);
1854 return (TRAN_ACCEPT
);
1861 if ((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) {
1862 if (instance
->fw_outstanding
> instance
->max_fw_cmds
) {
1863 con_log(CL_ANN
, (CE_CONT
, "mr_sas:Firmware busy"));
1864 DTRACE_PROBE2(start_tran_err
,
1865 uint16_t, instance
->fw_outstanding
,
1866 uint16_t, instance
->max_fw_cmds
);
1867 mrsas_return_mfi_pkt(instance
, cmd
);
1871 /* Synchronize the Cmd frame for the controller */
1872 (void) ddi_dma_sync(cmd
->frame_dma_obj
.dma_handle
, 0, 0,
1873 DDI_DMA_SYNC_FORDEV
);
1874 con_log(CL_ANN
, (CE_CONT
, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1875 "cmd->index:%x\n", pkt
->pkt_cdbp
[0], cmd
->index
));
1876 instance
->func_ptr
->issue_cmd(cmd
, instance
);
1879 struct mrsas_header
*hdr
= &cmd
->frame
->hdr
;
1881 instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
);
1883 pkt
->pkt_reason
= CMD_CMPLT
;
1884 pkt
->pkt_statistics
= 0;
1885 pkt
->pkt_state
|= STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
1887 switch (ddi_get8(cmd
->frame_dma_obj
.acc_handle
,
1888 &hdr
->cmd_status
)) {
1890 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
1893 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
1894 con_log(CL_ANN
, (CE_CONT
,
1895 "mrsas_tran_start: scsi done with error"));
1896 pkt
->pkt_reason
= CMD_CMPLT
;
1897 pkt
->pkt_statistics
= 0;
1899 ((struct scsi_status
*)pkt
->pkt_scbp
)->sts_chk
= 1;
1902 case MFI_STAT_DEVICE_NOT_FOUND
:
1903 con_log(CL_ANN
, (CE_CONT
,
1904 "mrsas_tran_start: device not found error"));
1905 pkt
->pkt_reason
= CMD_DEV_GONE
;
1906 pkt
->pkt_statistics
= STAT_DISCON
;
1910 ((struct scsi_status
*)pkt
->pkt_scbp
)->sts_busy
= 1;
1913 (void) mrsas_common_check(instance
, cmd
);
1914 DTRACE_PROBE2(start_nointr_done
, uint8_t, hdr
->cmd
,
1915 uint8_t, hdr
->cmd_status
);
1916 mrsas_return_mfi_pkt(instance
, cmd
);
1918 if (pkt
->pkt_comp
) {
1919 (*pkt
->pkt_comp
)(pkt
);
1924 return (TRAN_ACCEPT
);
1928 * tran_abort - Abort any commands that are currently in transport
1932 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1933 * commands that are currently in transport for a particular target. This entry
1934 * point is called when a target driver calls scsi_abort(). The tran_abort()
1935 * entry point should attempt to abort the command denoted by the pkt
1936 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1937 * abort all outstanding commands in the transport layer for the particular
1938 * target or logical unit.
1942 mrsas_tran_abort(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1944 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1946 /* abort command not supported by H/W */
1948 return (DDI_FAILURE
);
1952 * tran_reset - reset either the SCSI bus or target
1956 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1957 * the SCSI bus or a particular SCSI target device. This entry point is called
1958 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1959 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1960 * particular target or logical unit must be reset.
1964 mrsas_tran_reset(struct scsi_address
*ap
, int level
)
1966 struct mrsas_instance
*instance
= ADDR2MR(ap
);
1968 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1970 if (wait_for_outstanding(instance
)) {
1972 (CE_CONT
, "wait_for_outstanding: return FAIL.\n"));
1973 return (DDI_FAILURE
);
1975 return (DDI_SUCCESS
);
1980 * tran_getcap - get one of a set of SCSA-defined capabilities
1985 * The target driver can request the current setting of the capability for a
1986 * particular target by setting the whom parameter to nonzero. A whom value of
1987 * zero indicates a request for the current setting of the general capability
1988 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1989 * for undefined capabilities or the current value of the requested capability.
1993 mrsas_tran_getcap(struct scsi_address
*ap
, char *cap
, int whom
)
1997 struct mrsas_instance
*instance
= ADDR2MR(ap
);
1999 con_log(CL_DLEVEL2
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2001 /* we do allow inquiring about capabilities for other targets */
2006 switch (scsi_hba_lookup_capstr(cap
)) {
2007 case SCSI_CAP_DMA_MAX
:
2008 if (instance
->tbolt
) {
2009 /* Limit to 256k max transfer */
2010 rval
= mrsas_tbolt_max_cap_maxxfer
;
2012 /* Limit to 16MB max transfer */
2013 rval
= mrsas_max_cap_maxxfer
;
2016 case SCSI_CAP_MSG_OUT
:
2019 case SCSI_CAP_DISCONNECT
:
2022 case SCSI_CAP_SYNCHRONOUS
:
2025 case SCSI_CAP_WIDE_XFER
:
2028 case SCSI_CAP_TAGGED_QING
:
2031 case SCSI_CAP_UNTAGGED_QING
:
2034 case SCSI_CAP_PARITY
:
2037 case SCSI_CAP_INITIATOR_ID
:
2038 rval
= instance
->init_id
;
2043 case SCSI_CAP_LINKED_CMDS
:
2046 case SCSI_CAP_RESET_NOTIFICATION
:
2049 case SCSI_CAP_GEOMETRY
:
2054 con_log(CL_DLEVEL2
, (CE_NOTE
, "Default cap coming 0x%x",
2055 scsi_hba_lookup_capstr(cap
)));
2064 * tran_setcap - set one of a set of SCSA-defined capabilities
2070 * The target driver might request that the new value be set for a particular
2071 * target by setting the whom parameter to nonzero. A whom value of zero
2072 * means that request is to set the new value for the SCSI bus or for adapter
2073 * hardware in general.
2074 * The tran_setcap() should return the following values as appropriate:
2075 * - -1 for undefined capabilities
2076 * - 0 if the HBA driver cannot set the capability to the requested value
2077 * - 1 if the HBA driver is able to set the capability to the requested value
2081 mrsas_tran_setcap(struct scsi_address
*ap
, char *cap
, int value
, int whom
)
2085 con_log(CL_DLEVEL2
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2087 /* We don't allow setting capabilities for other targets */
2088 if (cap
== NULL
|| whom
== 0) {
2092 switch (scsi_hba_lookup_capstr(cap
)) {
2093 case SCSI_CAP_DMA_MAX
:
2094 case SCSI_CAP_MSG_OUT
:
2095 case SCSI_CAP_PARITY
:
2096 case SCSI_CAP_LINKED_CMDS
:
2097 case SCSI_CAP_RESET_NOTIFICATION
:
2098 case SCSI_CAP_DISCONNECT
:
2099 case SCSI_CAP_SYNCHRONOUS
:
2100 case SCSI_CAP_UNTAGGED_QING
:
2101 case SCSI_CAP_WIDE_XFER
:
2102 case SCSI_CAP_INITIATOR_ID
:
2105 * None of these are settable via
2106 * the capability interface.
2109 case SCSI_CAP_TAGGED_QING
:
2112 case SCSI_CAP_SECTOR_SIZE
:
2116 case SCSI_CAP_TOTAL_SECTORS
:
2128 * tran_destroy_pkt - deallocate scsi_pkt structure
2132 * The tran_destroy_pkt() entry point is the HBA driver function that
2133 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2134 * called when the target driver calls scsi_destroy_pkt(). The
2135 * tran_destroy_pkt() entry point must free any DMA resources that have been
2136 * allocated for the packet. An implicit DMA synchronization occurs if the
2137 * DMA resources are freed and any cached data remains after the completion
2141 mrsas_tran_destroy_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
2143 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
2145 con_log(CL_DLEVEL2
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2147 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
2148 acmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
2150 (void) ddi_dma_unbind_handle(acmd
->cmd_dmahandle
);
2152 ddi_dma_free_handle(&acmd
->cmd_dmahandle
);
2154 acmd
->cmd_dmahandle
= NULL
;
2158 scsi_hba_pkt_free(ap
, pkt
);
2162 * tran_dmafree - deallocates DMA resources
2166 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2167 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2168 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2169 * free only DMA resources allocated for a scsi_pkt structure, not the
2170 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2171 * implicitly performed.
2175 mrsas_tran_dmafree(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
2177 register struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
2179 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2181 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
2182 acmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
2184 (void) ddi_dma_unbind_handle(acmd
->cmd_dmahandle
);
2186 ddi_dma_free_handle(&acmd
->cmd_dmahandle
);
2188 acmd
->cmd_dmahandle
= NULL
;
2193 * tran_sync_pkt - synchronize the DMA object allocated
2197 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2198 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2199 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2200 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2201 * must synchronize the CPU's view of the data. If the data transfer direction
2202 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2203 * device's view of the data.
2207 mrsas_tran_sync_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
2209 register struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
2211 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2213 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
2214 (void) ddi_dma_sync(acmd
->cmd_dmahandle
, acmd
->cmd_dma_offset
,
2215 acmd
->cmd_dma_len
, (acmd
->cmd_flags
& CFLAG_DMASEND
) ?
2216 DDI_DMA_SYNC_FORDEV
: DDI_DMA_SYNC_FORCPU
);
2222 mrsas_tran_quiesce(dev_info_t
*dip
)
2224 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2231 mrsas_tran_unquiesce(dev_info_t
*dip
)
2233 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2240 * mrsas_isr(caddr_t)
2242 * The Interrupt Service Routine
2244 * Collect status for all completed commands and do callback
2248 mrsas_isr(struct mrsas_instance
*instance
)
2256 struct mrsas_cmd
*cmd
;
2257 struct mrsas_header
*hdr
;
2258 struct scsi_pkt
*pkt
;
2260 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2262 if (instance
->tbolt
) {
2263 mutex_enter(&instance
->chip_mtx
);
2264 if ((instance
->intr_type
== DDI_INTR_TYPE_FIXED
) &&
2265 !(instance
->func_ptr
->intr_ack(instance
))) {
2266 mutex_exit(&instance
->chip_mtx
);
2267 return (DDI_INTR_UNCLAIMED
);
2269 retval
= mr_sas_tbolt_process_outstanding_cmd(instance
);
2270 mutex_exit(&instance
->chip_mtx
);
2273 if ((instance
->intr_type
== DDI_INTR_TYPE_FIXED
) &&
2274 !instance
->func_ptr
->intr_ack(instance
)) {
2275 return (DDI_INTR_UNCLAIMED
);
2279 (void) ddi_dma_sync(instance
->mfi_internal_dma_obj
.dma_handle
,
2280 0, 0, DDI_DMA_SYNC_FORCPU
);
2282 if (mrsas_check_dma_handle(instance
->mfi_internal_dma_obj
.dma_handle
)
2284 mrsas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
2285 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
2286 con_log(CL_ANN1
, (CE_WARN
,
2287 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2288 return (DDI_INTR_CLAIMED
);
2290 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
2293 if (debug_consecutive_timeout_after_ocr_g
== 1) {
2294 con_log(CL_ANN1
, (CE_NOTE
,
2295 "simulating consecutive timeout after ocr"));
2296 return (DDI_INTR_CLAIMED
);
2300 mutex_enter(&instance
->completed_pool_mtx
);
2301 mutex_enter(&instance
->cmd_pend_mtx
);
2303 producer
= ddi_get32(instance
->mfi_internal_dma_obj
.acc_handle
,
2304 instance
->producer
);
2305 consumer
= ddi_get32(instance
->mfi_internal_dma_obj
.acc_handle
,
2306 instance
->consumer
);
2308 con_log(CL_ANN
, (CE_CONT
, " producer %x consumer %x ",
2309 producer
, consumer
));
2310 if (producer
== consumer
) {
2311 con_log(CL_ANN
, (CE_WARN
, "producer == consumer case"));
2312 DTRACE_PROBE2(isr_pc_err
, uint32_t, producer
,
2313 uint32_t, consumer
);
2314 mutex_exit(&instance
->cmd_pend_mtx
);
2315 mutex_exit(&instance
->completed_pool_mtx
);
2316 return (DDI_INTR_CLAIMED
);
2319 while (consumer
!= producer
) {
2320 context
= ddi_get32(instance
->mfi_internal_dma_obj
.acc_handle
,
2321 &instance
->reply_queue
[consumer
]);
2322 cmd
= instance
->cmd_list
[context
];
2324 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2325 hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
2327 mlist_del_init(&cmd
->list
);
2332 mlist_del_init(&cmd
->list
);
2336 mlist_add_tail(&cmd
->list
, &instance
->completed_pool_list
);
2339 if (consumer
== (instance
->max_fw_cmds
+ 1)) {
2343 ddi_put32(instance
->mfi_internal_dma_obj
.acc_handle
,
2344 instance
->consumer
, consumer
);
2345 mutex_exit(&instance
->cmd_pend_mtx
);
2346 mutex_exit(&instance
->completed_pool_mtx
);
2348 (void) ddi_dma_sync(instance
->mfi_internal_dma_obj
.dma_handle
,
2349 0, 0, DDI_DMA_SYNC_FORDEV
);
2351 if (instance
->softint_running
) {
2357 if (instance
->isr_level
== HIGH_LEVEL_INTR
) {
2358 if (need_softintr
) {
2359 ddi_trigger_softintr(instance
->soft_intr_id
);
2363 * Not a high-level interrupt, therefore call the soft level
2364 * interrupt explicitly
2366 (void) mrsas_softintr(instance
);
2369 return (DDI_INTR_CLAIMED
);
2374 * ************************************************************************** *
2378 * ************************************************************************** *
2381 * get_mfi_pkt : Get a command from the free pool
2382 * After successful allocation, the caller of this routine
2383 * must clear the frame buffer (memset to zero) before
2384 * using the packet further.
2387 * After clearing the frame buffer the context id of the
2388 * frame buffer SHOULD be restored back.
2391 mrsas_get_mfi_pkt(struct mrsas_instance
*instance
)
2393 mlist_t
*head
= &instance
->cmd_pool_list
;
2394 struct mrsas_cmd
*cmd
= NULL
;
2396 mutex_enter(&instance
->cmd_pool_mtx
);
2398 if (!mlist_empty(head
)) {
2399 cmd
= mlist_entry(head
->next
, struct mrsas_cmd
, list
);
2400 mlist_del_init(head
->next
);
2404 cmd
->retry_count_for_ocr
= 0;
2405 cmd
->drv_pkt_time
= 0;
2408 mutex_exit(&instance
->cmd_pool_mtx
);
2413 static struct mrsas_cmd
*
2414 get_mfi_app_pkt(struct mrsas_instance
*instance
)
2416 mlist_t
*head
= &instance
->app_cmd_pool_list
;
2417 struct mrsas_cmd
*cmd
= NULL
;
2419 mutex_enter(&instance
->app_cmd_pool_mtx
);
2421 if (!mlist_empty(head
)) {
2422 cmd
= mlist_entry(head
->next
, struct mrsas_cmd
, list
);
2423 mlist_del_init(head
->next
);
2427 cmd
->retry_count_for_ocr
= 0;
2428 cmd
->drv_pkt_time
= 0;
2431 mutex_exit(&instance
->app_cmd_pool_mtx
);
2436 * return_mfi_pkt : Return a cmd to free command pool
2439 mrsas_return_mfi_pkt(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
2441 mutex_enter(&instance
->cmd_pool_mtx
);
2442 /* use mlist_add_tail for debug assistance */
2443 mlist_add_tail(&cmd
->list
, &instance
->cmd_pool_list
);
2445 mutex_exit(&instance
->cmd_pool_mtx
);
2449 return_mfi_app_pkt(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
2451 mutex_enter(&instance
->app_cmd_pool_mtx
);
2453 mlist_add(&cmd
->list
, &instance
->app_cmd_pool_list
);
2455 mutex_exit(&instance
->app_cmd_pool_mtx
);
2458 push_pending_mfi_pkt(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
2460 struct scsi_pkt
*pkt
;
2461 struct mrsas_header
*hdr
;
2462 con_log(CL_DLEVEL2
, (CE_NOTE
, "push_pending_pkt(): Called\n"));
2463 mutex_enter(&instance
->cmd_pend_mtx
);
2464 mlist_del_init(&cmd
->list
);
2465 mlist_add_tail(&cmd
->list
, &instance
->cmd_pend_list
);
2466 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2467 hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
2469 con_log(CL_ANN1
, (CE_CONT
,
2470 "push_pending_mfi_pkt: "
2473 (void *)cmd
, cmd
->index
,
2475 /* Wait for specified interval */
2476 cmd
->drv_pkt_time
= ddi_get16(
2477 cmd
->frame_dma_obj
.acc_handle
, &hdr
->timeout
);
2478 if (cmd
->drv_pkt_time
< debug_timeout_g
)
2479 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
2480 con_log(CL_ANN1
, (CE_CONT
,
2481 "push_pending_pkt(): "
2482 "Called IO Timeout Value %x\n",
2483 cmd
->drv_pkt_time
));
2485 if (hdr
&& instance
->timeout_id
== (timeout_id_t
)-1) {
2486 instance
->timeout_id
= timeout(io_timeout_checker
,
2487 (void *) instance
, drv_usectohz(MRSAS_1_SECOND
));
2492 con_log(CL_ANN1
, (CE_CONT
,
2493 "push_pending_mfi_pkt: "
2494 "cmd %p index %x pkt %p, "
2496 (void *)cmd
, cmd
->index
, (void *)pkt
,
2498 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
2500 if (pkt
&& instance
->timeout_id
== (timeout_id_t
)-1) {
2501 instance
->timeout_id
= timeout(io_timeout_checker
,
2502 (void *) instance
, drv_usectohz(MRSAS_1_SECOND
));
2506 mutex_exit(&instance
->cmd_pend_mtx
);
2511 mrsas_print_pending_cmds(struct mrsas_instance
*instance
)
2513 mlist_t
*head
= &instance
->cmd_pend_list
;
2514 mlist_t
*tmp
= head
;
2515 struct mrsas_cmd
*cmd
= NULL
;
2516 struct mrsas_header
*hdr
;
2517 unsigned int flag
= 1;
2518 struct scsi_pkt
*pkt
;
2522 saved_level
= debug_level_g
;
2523 debug_level_g
= CL_ANN1
;
2525 dev_err(instance
->dip
, CE_NOTE
,
2526 "mrsas_print_pending_cmds(): Called");
2529 mutex_enter(&instance
->cmd_pend_mtx
);
2532 mutex_exit(&instance
->cmd_pend_mtx
);
2534 con_log(CL_ANN1
, (CE_CONT
, "mrsas_print_pending_cmds():"
2535 " NO MORE CMDS PENDING....\n"));
2538 cmd
= mlist_entry(tmp
, struct mrsas_cmd
, list
);
2539 mutex_exit(&instance
->cmd_pend_mtx
);
2541 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2542 hdr
= (struct mrsas_header
*)
2545 con_log(CL_ANN1
, (CE_CONT
,
2546 "print: cmd %p index 0x%x "
2547 "drv_pkt_time 0x%x (NO-PKT)"
2548 " hdr %p\n", (void *)cmd
,
2556 con_log(CL_ANN1
, (CE_CONT
,
2557 "print: cmd %p index 0x%x "
2558 "drv_pkt_time 0x%x pkt %p \n",
2559 (void *)cmd
, cmd
->index
,
2560 cmd
->drv_pkt_time
, (void *)pkt
));
2564 if (++cmd_count
== 1) {
2565 mrsas_print_cmd_details(instance
, cmd
,
2568 mrsas_print_cmd_details(instance
, cmd
,
2575 con_log(CL_ANN1
, (CE_CONT
, "mrsas_print_pending_cmds(): Done\n"));
2578 debug_level_g
= saved_level
;
2580 return (DDI_SUCCESS
);
2585 mrsas_complete_pending_cmds(struct mrsas_instance
*instance
)
2588 struct mrsas_cmd
*cmd
= NULL
;
2589 struct scsi_pkt
*pkt
;
2590 struct mrsas_header
*hdr
;
2592 struct mlist_head
*pos
, *next
;
2594 con_log(CL_ANN1
, (CE_NOTE
,
2595 "mrsas_complete_pending_cmds(): Called"));
2597 mutex_enter(&instance
->cmd_pend_mtx
);
2598 mlist_for_each_safe(pos
, next
, &instance
->cmd_pend_list
) {
2599 cmd
= mlist_entry(pos
, struct mrsas_cmd
, list
);
2602 if (pkt
) { /* for IO */
2603 if (((pkt
->pkt_flags
& FLAG_NOINTR
)
2604 == 0) && pkt
->pkt_comp
) {
2609 con_log(CL_ANN1
, (CE_CONT
,
2610 "fail and posting to scsa "
2614 (void *)cmd
, cmd
->index
,
2615 (void *)pkt
, gethrtime()));
2616 (*pkt
->pkt_comp
)(pkt
);
2618 } else { /* for DCMDS */
2619 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2620 hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
2621 con_log(CL_ANN1
, (CE_CONT
,
2622 "posting invalid status to application "
2626 (void *)cmd
, cmd
->index
,
2627 (void *)hdr
, gethrtime()));
2628 hdr
->cmd_status
= MFI_STAT_INVALID_STATUS
;
2629 complete_cmd_in_sync_mode(instance
, cmd
);
2632 mlist_del_init(&cmd
->list
);
2634 con_log(CL_ANN1
, (CE_CONT
,
2635 "mrsas_complete_pending_cmds:"
2638 con_log(CL_ANN1
, (CE_CONT
,
2639 "mrsas_complete_pending_cmds:"
2640 "looping for more commands\n"));
2642 mutex_exit(&instance
->cmd_pend_mtx
);
2644 con_log(CL_ANN1
, (CE_CONT
, "mrsas_complete_pending_cmds(): DONE\n"));
2645 return (DDI_SUCCESS
);
2649 mrsas_print_cmd_details(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
,
2652 struct scsi_pkt
*pkt
= cmd
->pkt
;
2653 Mpi2RaidSCSIIORequest_t
*scsi_io
= cmd
->scsi_io_request
;
2656 ddi_acc_handle_t acc_handle
=
2657 instance
->mpi2_frame_pool_dma_obj
.acc_handle
;
2659 if (detail
== 0xDD) {
2660 saved_level
= debug_level_g
;
2661 debug_level_g
= CL_ANN1
;
2665 if (instance
->tbolt
) {
2666 con_log(CL_ANN1
, (CE_CONT
, "print_cmd_details: cmd %p "
2667 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2668 (void *)cmd
, cmd
->index
, cmd
->SMID
, cmd
->drv_pkt_time
));
2670 con_log(CL_ANN1
, (CE_CONT
, "print_cmd_details: cmd %p "
2671 "cmd->index 0x%x timer 0x%x sec\n",
2672 (void *)cmd
, cmd
->index
, cmd
->drv_pkt_time
));
2676 con_log(CL_ANN1
, (CE_CONT
, "scsi_pkt CDB[0]=0x%x",
2679 con_log(CL_ANN1
, (CE_CONT
, "NO-PKT"));
2682 if ((detail
== 0xDD) && instance
->tbolt
) {
2683 con_log(CL_ANN1
, (CE_CONT
, "RAID_SCSI_IO_REQUEST\n"));
2684 con_log(CL_ANN1
, (CE_CONT
, "DevHandle=0x%X Function=0x%X "
2685 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2686 ddi_get16(acc_handle
, &scsi_io
->DevHandle
),
2687 ddi_get8(acc_handle
, &scsi_io
->Function
),
2688 ddi_get16(acc_handle
, &scsi_io
->IoFlags
),
2689 ddi_get16(acc_handle
, &scsi_io
->SGLFlags
),
2690 ddi_get32(acc_handle
, &scsi_io
->DataLength
)));
2692 for (i
= 0; i
< 32; i
++) {
2693 con_log(CL_ANN1
, (CE_CONT
, "CDB[%d]=0x%x ", i
,
2694 ddi_get8(acc_handle
, &scsi_io
->CDB
.CDB32
[i
])));
2697 con_log(CL_ANN1
, (CE_CONT
, "RAID-CONTEXT\n"));
2698 con_log(CL_ANN1
, (CE_CONT
, "status=0x%X extStatus=0x%X "
2699 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2700 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2701 " regLockLength=0x%X spanArm=0x%X\n",
2702 ddi_get8(acc_handle
, &scsi_io
->RaidContext
.status
),
2703 ddi_get8(acc_handle
, &scsi_io
->RaidContext
.extStatus
),
2704 ddi_get16(acc_handle
, &scsi_io
->RaidContext
.ldTargetId
),
2705 ddi_get16(acc_handle
, &scsi_io
->RaidContext
.timeoutValue
),
2706 ddi_get8(acc_handle
, &scsi_io
->RaidContext
.regLockFlags
),
2707 ddi_get8(acc_handle
, &scsi_io
->RaidContext
.RAIDFlags
),
2708 ddi_get64(acc_handle
, &scsi_io
->RaidContext
.regLockRowLBA
),
2709 ddi_get32(acc_handle
, &scsi_io
->RaidContext
.regLockLength
),
2710 ddi_get8(acc_handle
, &scsi_io
->RaidContext
.spanArm
)));
2713 if (detail
== 0xDD) {
2714 debug_level_g
= saved_level
;
2720 mrsas_issue_pending_cmds(struct mrsas_instance
*instance
)
2722 mlist_t
*head
= &instance
->cmd_pend_list
;
2723 mlist_t
*tmp
= head
->next
;
2724 struct mrsas_cmd
*cmd
= NULL
;
2725 struct scsi_pkt
*pkt
;
2727 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_issue_pending_cmds(): Called"));
2728 while (tmp
!= head
) {
2729 mutex_enter(&instance
->cmd_pend_mtx
);
2730 cmd
= mlist_entry(tmp
, struct mrsas_cmd
, list
);
2732 mutex_exit(&instance
->cmd_pend_mtx
);
2734 con_log(CL_ANN1
, (CE_CONT
,
2735 "mrsas_issue_pending_cmds(): "
2736 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2737 (void *)cmd
, cmd
->index
, cmd
->drv_pkt_time
));
2739 /* Reset command timeout value */
2740 if (cmd
->drv_pkt_time
< debug_timeout_g
)
2741 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
2743 cmd
->retry_count_for_ocr
++;
2745 dev_err(instance
->dip
, CE_CONT
,
2746 "cmd retry count = %d\n",
2747 cmd
->retry_count_for_ocr
);
2749 if (cmd
->retry_count_for_ocr
> IO_RETRY_COUNT
) {
2750 dev_err(instance
->dip
,
2751 CE_WARN
, "mrsas_issue_pending_cmds(): "
2752 "cmd->retry_count exceeded limit >%d\n",
2754 mrsas_print_cmd_details(instance
, cmd
, 0xDD);
2756 dev_err(instance
->dip
, CE_WARN
,
2757 "mrsas_issue_pending_cmds():"
2758 "Calling KILL Adapter");
2759 if (instance
->tbolt
)
2760 mrsas_tbolt_kill_adapter(instance
);
2762 (void) mrsas_kill_adapter(instance
);
2763 return (DDI_FAILURE
);
2768 con_log(CL_ANN1
, (CE_CONT
,
2769 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2771 (void *)cmd
, cmd
->index
,
2776 dev_err(instance
->dip
, CE_CONT
,
2777 "mrsas_issue_pending_cmds(): NO-PKT, "
2778 "cmd %p index 0x%x drv_pkt_time 0x%x",
2779 (void *)cmd
, cmd
->index
, cmd
->drv_pkt_time
);
2783 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
2784 dev_err(instance
->dip
, CE_CONT
,
2785 "mrsas_issue_pending_cmds(): "
2786 "SYNC_CMD == TRUE \n");
2787 instance
->func_ptr
->issue_cmd_in_sync_mode(
2790 instance
->func_ptr
->issue_cmd(cmd
, instance
);
2793 con_log(CL_ANN1
, (CE_CONT
,
2794 "mrsas_issue_pending_cmds: NULL command\n"));
2796 con_log(CL_ANN1
, (CE_CONT
,
2797 "mrsas_issue_pending_cmds:"
2798 "looping for more commands"));
2800 con_log(CL_ANN1
, (CE_CONT
, "mrsas_issue_pending_cmds(): DONE\n"));
2801 return (DDI_SUCCESS
);
2807 * destroy_mfi_frame_pool
2810 destroy_mfi_frame_pool(struct mrsas_instance
*instance
)
2813 uint32_t max_cmd
= instance
->max_fw_cmds
;
2815 struct mrsas_cmd
*cmd
;
2817 /* return all frames to pool */
2819 for (i
= 0; i
< max_cmd
; i
++) {
2821 cmd
= instance
->cmd_list
[i
];
2823 if (cmd
->frame_dma_obj_status
== DMA_OBJ_ALLOCATED
)
2824 (void) mrsas_free_dma_obj(instance
, cmd
->frame_dma_obj
);
2826 cmd
->frame_dma_obj_status
= DMA_OBJ_FREED
;
2832 * create_mfi_frame_pool
2835 create_mfi_frame_pool(struct mrsas_instance
*instance
)
2842 uint32_t tot_frame_size
;
2843 struct mrsas_cmd
*cmd
;
2844 int retval
= DDI_SUCCESS
;
2846 max_cmd
= instance
->max_fw_cmds
;
2847 sge_sz
= sizeof (struct mrsas_sge_ieee
);
2848 /* calculated the number of 64byte frames required for SGL */
2849 sgl_sz
= sge_sz
* instance
->max_num_sge
;
2850 tot_frame_size
= sgl_sz
+ MRMFI_FRAME_SIZE
+ SENSE_LENGTH
;
2852 con_log(CL_DLEVEL3
, (CE_NOTE
, "create_mfi_frame_pool: "
2853 "sgl_sz %x tot_frame_size %x", sgl_sz
, tot_frame_size
));
2855 while (i
< max_cmd
) {
2856 cmd
= instance
->cmd_list
[i
];
2858 cmd
->frame_dma_obj
.size
= tot_frame_size
;
2859 cmd
->frame_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
2860 cmd
->frame_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
2861 cmd
->frame_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
2862 cmd
->frame_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
2863 cmd
->frame_dma_obj
.dma_attr
.dma_attr_align
= 64;
2865 cookie_cnt
= mrsas_alloc_dma_obj(instance
, &cmd
->frame_dma_obj
,
2866 (uchar_t
)DDI_STRUCTURE_LE_ACC
);
2868 if (cookie_cnt
== -1 || cookie_cnt
> 1) {
2869 dev_err(instance
->dip
, CE_WARN
,
2870 "create_mfi_frame_pool: could not alloc.");
2871 retval
= DDI_FAILURE
;
2872 goto mrsas_undo_frame_pool
;
2875 bzero(cmd
->frame_dma_obj
.buffer
, tot_frame_size
);
2877 cmd
->frame_dma_obj_status
= DMA_OBJ_ALLOCATED
;
2878 cmd
->frame
= (union mrsas_frame
*)cmd
->frame_dma_obj
.buffer
;
2879 cmd
->frame_phys_addr
=
2880 cmd
->frame_dma_obj
.dma_cookie
[0].dmac_address
;
2882 cmd
->sense
= (uint8_t *)(((unsigned long)
2883 cmd
->frame_dma_obj
.buffer
) +
2884 tot_frame_size
- SENSE_LENGTH
);
2885 cmd
->sense_phys_addr
=
2886 cmd
->frame_dma_obj
.dma_cookie
[0].dmac_address
+
2887 tot_frame_size
- SENSE_LENGTH
;
2889 if (!cmd
->frame
|| !cmd
->sense
) {
2890 dev_err(instance
->dip
, CE_WARN
,
2891 "pci_pool_alloc failed");
2893 goto mrsas_undo_frame_pool
;
2896 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
2897 &cmd
->frame
->io
.context
, cmd
->index
);
2900 con_log(CL_DLEVEL3
, (CE_NOTE
, "[%x]-%x",
2901 cmd
->index
, cmd
->frame_phys_addr
));
2904 return (DDI_SUCCESS
);
2906 mrsas_undo_frame_pool
:
2908 destroy_mfi_frame_pool(instance
);
2914 * free_additional_dma_buffer
2917 free_additional_dma_buffer(struct mrsas_instance
*instance
)
2919 if (instance
->mfi_internal_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
2920 (void) mrsas_free_dma_obj(instance
,
2921 instance
->mfi_internal_dma_obj
);
2922 instance
->mfi_internal_dma_obj
.status
= DMA_OBJ_FREED
;
2925 if (instance
->mfi_evt_detail_obj
.status
== DMA_OBJ_ALLOCATED
) {
2926 (void) mrsas_free_dma_obj(instance
,
2927 instance
->mfi_evt_detail_obj
);
2928 instance
->mfi_evt_detail_obj
.status
= DMA_OBJ_FREED
;
2933 * alloc_additional_dma_buffer
2936 alloc_additional_dma_buffer(struct mrsas_instance
*instance
)
2938 uint32_t reply_q_sz
;
2939 uint32_t internal_buf_size
= PAGESIZE
*2;
2941 /* max cmds plus 1 + producer & consumer */
2942 reply_q_sz
= sizeof (uint32_t) * (instance
->max_fw_cmds
+ 1 + 2);
2944 instance
->mfi_internal_dma_obj
.size
= internal_buf_size
;
2945 instance
->mfi_internal_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
2946 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
2947 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_count_max
=
2949 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
2951 if (mrsas_alloc_dma_obj(instance
, &instance
->mfi_internal_dma_obj
,
2952 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
2953 dev_err(instance
->dip
, CE_WARN
,
2954 "could not alloc reply queue");
2955 return (DDI_FAILURE
);
2958 bzero(instance
->mfi_internal_dma_obj
.buffer
, internal_buf_size
);
2960 instance
->mfi_internal_dma_obj
.status
|= DMA_OBJ_ALLOCATED
;
2962 instance
->producer
= (uint32_t *)((unsigned long)
2963 instance
->mfi_internal_dma_obj
.buffer
);
2964 instance
->consumer
= (uint32_t *)((unsigned long)
2965 instance
->mfi_internal_dma_obj
.buffer
+ 4);
2966 instance
->reply_queue
= (uint32_t *)((unsigned long)
2967 instance
->mfi_internal_dma_obj
.buffer
+ 8);
2968 instance
->internal_buf
= (caddr_t
)(((unsigned long)
2969 instance
->mfi_internal_dma_obj
.buffer
) + reply_q_sz
+ 8);
2970 instance
->internal_buf_dmac_add
=
2971 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+
2973 instance
->internal_buf_size
= internal_buf_size
-
2976 /* allocate evt_detail */
2977 instance
->mfi_evt_detail_obj
.size
= sizeof (struct mrsas_evt_detail
);
2978 instance
->mfi_evt_detail_obj
.dma_attr
= mrsas_generic_dma_attr
;
2979 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
2980 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
2981 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_sgllen
= 1;
2982 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_align
= 1;
2984 if (mrsas_alloc_dma_obj(instance
, &instance
->mfi_evt_detail_obj
,
2985 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
2986 dev_err(instance
->dip
, CE_WARN
, "alloc_additional_dma_buffer: "
2987 "could not allocate data transfer buffer.");
2988 goto mrsas_undo_internal_buff
;
2991 bzero(instance
->mfi_evt_detail_obj
.buffer
,
2992 sizeof (struct mrsas_evt_detail
));
2994 instance
->mfi_evt_detail_obj
.status
|= DMA_OBJ_ALLOCATED
;
2996 return (DDI_SUCCESS
);
2998 mrsas_undo_internal_buff
:
2999 if (instance
->mfi_internal_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
3000 (void) mrsas_free_dma_obj(instance
,
3001 instance
->mfi_internal_dma_obj
);
3002 instance
->mfi_internal_dma_obj
.status
= DMA_OBJ_FREED
;
3005 return (DDI_FAILURE
);
3010 mrsas_free_cmd_pool(struct mrsas_instance
*instance
)
3017 if (instance
->cmd_list
== NULL
) {
3021 max_cmd
= instance
->max_fw_cmds
;
3023 /* size of cmd_list array */
3024 sz
= sizeof (struct mrsas_cmd
*) * max_cmd
;
3026 /* First free each cmd */
3027 for (i
= 0; i
< max_cmd
; i
++) {
3028 if (instance
->cmd_list
[i
] != NULL
) {
3029 kmem_free(instance
->cmd_list
[i
],
3030 sizeof (struct mrsas_cmd
));
3033 instance
->cmd_list
[i
] = NULL
;
3036 /* Now, free cmd_list array */
3037 if (instance
->cmd_list
!= NULL
)
3038 kmem_free(instance
->cmd_list
, sz
);
3040 instance
->cmd_list
= NULL
;
3042 INIT_LIST_HEAD(&instance
->cmd_pool_list
);
3043 INIT_LIST_HEAD(&instance
->cmd_pend_list
);
3044 if (instance
->tbolt
) {
3045 INIT_LIST_HEAD(&instance
->cmd_app_pool_list
);
3047 INIT_LIST_HEAD(&instance
->app_cmd_pool_list
);
3054 * mrsas_alloc_cmd_pool
3057 mrsas_alloc_cmd_pool(struct mrsas_instance
*instance
)
3062 uint32_t reserve_cmd
;
3065 struct mrsas_cmd
*cmd
;
3067 max_cmd
= instance
->max_fw_cmds
;
3068 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_alloc_cmd_pool: "
3069 "max_cmd %x", max_cmd
));
3072 sz
= sizeof (struct mrsas_cmd
*) * max_cmd
;
3075 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3076 * Allocate the dynamic array first and then allocate individual
3079 instance
->cmd_list
= kmem_zalloc(sz
, KM_SLEEP
);
3080 ASSERT(instance
->cmd_list
);
3082 /* create a frame pool and assign one frame to each cmd */
3083 for (count
= 0; count
< max_cmd
; count
++) {
3084 instance
->cmd_list
[count
] =
3085 kmem_zalloc(sizeof (struct mrsas_cmd
), KM_SLEEP
);
3086 ASSERT(instance
->cmd_list
[count
]);
3089 /* add all the commands to command pool */
3091 INIT_LIST_HEAD(&instance
->cmd_pool_list
);
3092 INIT_LIST_HEAD(&instance
->cmd_pend_list
);
3093 INIT_LIST_HEAD(&instance
->app_cmd_pool_list
);
3096 * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3097 * into app_cmd and regular cmd? For now, just take
3098 * max(1/8th of max, 4);
3100 reserve_cmd
= min(MRSAS_APP_RESERVED_CMDS
,
3101 max(max_cmd
>> 3, MRSAS_APP_MIN_RESERVED_CMDS
));
3103 for (i
= 0; i
< reserve_cmd
; i
++) {
3104 cmd
= instance
->cmd_list
[i
];
3106 mlist_add_tail(&cmd
->list
, &instance
->app_cmd_pool_list
);
3110 for (i
= reserve_cmd
; i
< max_cmd
; i
++) {
3111 cmd
= instance
->cmd_list
[i
];
3113 mlist_add_tail(&cmd
->list
, &instance
->cmd_pool_list
);
3116 return (DDI_SUCCESS
);
3121 for (i
= 0; i
< count
; i
++) {
3122 if (instance
->cmd_list
[i
] != NULL
) {
3123 kmem_free(instance
->cmd_list
[i
],
3124 sizeof (struct mrsas_cmd
));
3126 instance
->cmd_list
[i
] = NULL
;
3130 mrsas_undo_cmd_list
:
3131 if (instance
->cmd_list
!= NULL
)
3132 kmem_free(instance
->cmd_list
, sz
);
3133 instance
->cmd_list
= NULL
;
3135 return (DDI_FAILURE
);
3140 * free_space_for_mfi
3143 free_space_for_mfi(struct mrsas_instance
*instance
)
3147 if (instance
->cmd_list
== NULL
) {
3151 /* Free additional dma buffer */
3152 free_additional_dma_buffer(instance
);
3154 /* Free the MFI frame pool */
3155 destroy_mfi_frame_pool(instance
);
3157 /* Free all the commands in the cmd_list */
3158 /* Free the cmd_list buffer itself */
3159 mrsas_free_cmd_pool(instance
);
3163 * alloc_space_for_mfi
3166 alloc_space_for_mfi(struct mrsas_instance
*instance
)
3168 /* Allocate command pool (memory for cmd_list & individual commands) */
3169 if (mrsas_alloc_cmd_pool(instance
)) {
3170 dev_err(instance
->dip
, CE_WARN
, "error creating cmd pool");
3171 return (DDI_FAILURE
);
3174 /* Allocate MFI Frame pool */
3175 if (create_mfi_frame_pool(instance
)) {
3176 dev_err(instance
->dip
, CE_WARN
,
3177 "error creating frame DMA pool");
3178 goto mfi_undo_cmd_pool
;
3181 /* Allocate additional DMA buffer */
3182 if (alloc_additional_dma_buffer(instance
)) {
3183 dev_err(instance
->dip
, CE_WARN
,
3184 "error creating frame DMA pool");
3185 goto mfi_undo_frame_pool
;
3188 return (DDI_SUCCESS
);
3190 mfi_undo_frame_pool
:
3191 destroy_mfi_frame_pool(instance
);
3194 mrsas_free_cmd_pool(instance
);
3196 return (DDI_FAILURE
);
3205 get_ctrl_info(struct mrsas_instance
*instance
,
3206 struct mrsas_ctrl_info
*ctrl_info
)
3210 struct mrsas_cmd
*cmd
;
3211 struct mrsas_dcmd_frame
*dcmd
;
3212 struct mrsas_ctrl_info
*ci
;
3214 if (instance
->tbolt
) {
3215 cmd
= get_raid_msg_mfi_pkt(instance
);
3217 cmd
= mrsas_get_mfi_pkt(instance
);
3221 con_log(CL_ANN
, (CE_WARN
,
3222 "Failed to get a cmd for ctrl info"));
3223 DTRACE_PROBE2(info_mfi_err
, uint16_t, instance
->fw_outstanding
,
3224 uint16_t, instance
->max_fw_cmds
);
3225 return (DDI_FAILURE
);
3228 /* Clear the frame buffer and assign back the context id */
3229 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
3230 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3233 dcmd
= &cmd
->frame
->dcmd
;
3235 ci
= (struct mrsas_ctrl_info
*)instance
->internal_buf
;
3238 dev_err(instance
->dip
, CE_WARN
,
3239 "Failed to alloc mem for ctrl info");
3240 mrsas_return_mfi_pkt(instance
, cmd
);
3241 return (DDI_FAILURE
);
3244 (void) memset(ci
, 0, sizeof (struct mrsas_ctrl_info
));
3246 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3247 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
3249 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd
, MFI_CMD_OP_DCMD
);
3250 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd_status
,
3251 MFI_CMD_STATUS_POLL_MODE
);
3252 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sge_count
, 1);
3253 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->flags
,
3254 MFI_FRAME_DIR_READ
);
3255 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->timeout
, 0);
3256 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->data_xfer_len
,
3257 sizeof (struct mrsas_ctrl_info
));
3258 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->opcode
,
3259 MR_DCMD_CTRL_GET_INFO
);
3260 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].phys_addr
,
3261 instance
->internal_buf_dmac_add
);
3262 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].length
,
3263 sizeof (struct mrsas_ctrl_info
));
3265 cmd
->frame_count
= 1;
3267 if (instance
->tbolt
) {
3268 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
3271 if (!instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
3274 ctrl_info
->max_request_size
= ddi_get32(
3275 cmd
->frame_dma_obj
.acc_handle
, &ci
->max_request_size
);
3277 ctrl_info
->ld_present_count
= ddi_get16(
3278 cmd
->frame_dma_obj
.acc_handle
, &ci
->ld_present_count
);
3280 ctrl_info
->properties
.on_off_properties
= ddi_get32(
3281 cmd
->frame_dma_obj
.acc_handle
,
3282 &ci
->properties
.on_off_properties
);
3283 ddi_rep_get8(cmd
->frame_dma_obj
.acc_handle
,
3284 (uint8_t *)(ctrl_info
->product_name
),
3285 (uint8_t *)(ci
->product_name
), 80 * sizeof (char),
3287 /* should get more members of ci with ddi_get when needed */
3289 dev_err(instance
->dip
, CE_WARN
,
3290 "get_ctrl_info: Ctrl info failed");
3294 if (mrsas_common_check(instance
, cmd
) != DDI_SUCCESS
) {
3297 if (instance
->tbolt
) {
3298 return_raid_msg_mfi_pkt(instance
, cmd
);
3300 mrsas_return_mfi_pkt(instance
, cmd
);
3310 abort_aen_cmd(struct mrsas_instance
*instance
,
3311 struct mrsas_cmd
*cmd_to_abort
)
3315 struct mrsas_cmd
*cmd
;
3316 struct mrsas_abort_frame
*abort_fr
;
3318 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt: abort_aen:%d", __LINE__
));
3320 if (instance
->tbolt
) {
3321 cmd
= get_raid_msg_mfi_pkt(instance
);
3323 cmd
= mrsas_get_mfi_pkt(instance
);
3327 con_log(CL_ANN1
, (CE_WARN
,
3328 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3329 DTRACE_PROBE2(abort_mfi_err
, uint16_t, instance
->fw_outstanding
,
3330 uint16_t, instance
->max_fw_cmds
);
3331 return (DDI_FAILURE
);
3334 /* Clear the frame buffer and assign back the context id */
3335 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
3336 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3339 abort_fr
= &cmd
->frame
->abort
;
3341 /* prepare and issue the abort frame */
3342 ddi_put8(cmd
->frame_dma_obj
.acc_handle
,
3343 &abort_fr
->cmd
, MFI_CMD_OP_ABORT
);
3344 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &abort_fr
->cmd_status
,
3345 MFI_CMD_STATUS_SYNC_MODE
);
3346 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &abort_fr
->flags
, 0);
3347 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &abort_fr
->abort_context
,
3348 cmd_to_abort
->index
);
3349 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3350 &abort_fr
->abort_mfi_phys_addr_lo
, cmd_to_abort
->frame_phys_addr
);
3351 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3352 &abort_fr
->abort_mfi_phys_addr_hi
, 0);
3354 instance
->aen_cmd
->abort_aen
= 1;
3356 cmd
->frame_count
= 1;
3358 if (instance
->tbolt
) {
3359 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
3362 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
3363 con_log(CL_ANN1
, (CE_WARN
,
3364 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3370 instance
->aen_cmd
->abort_aen
= 1;
3371 instance
->aen_cmd
= 0;
3373 if (instance
->tbolt
) {
3374 return_raid_msg_mfi_pkt(instance
, cmd
);
3376 mrsas_return_mfi_pkt(instance
, cmd
);
3379 atomic_add_16(&instance
->fw_outstanding
, (-1));
3386 mrsas_build_init_cmd(struct mrsas_instance
*instance
,
3387 struct mrsas_cmd
**cmd_ptr
)
3389 struct mrsas_cmd
*cmd
;
3390 struct mrsas_init_frame
*init_frame
;
3391 struct mrsas_init_queue_info
*initq_info
;
3392 struct mrsas_drv_ver drv_ver_info
;
3396 * Prepare a init frame. Note the init frame points to queue info
3397 * structure. Each frame has SGL allocated after first 64 bytes. For
3398 * this frame - since we don't need any SGL - we use SGL's space as
3399 * queue info structure
3404 /* Clear the frame buffer and assign back the context id */
3405 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
3406 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3409 init_frame
= (struct mrsas_init_frame
*)cmd
->frame
;
3410 initq_info
= (struct mrsas_init_queue_info
*)
3411 ((unsigned long)init_frame
+ 64);
3413 (void) memset(init_frame
, 0, MRMFI_FRAME_SIZE
);
3414 (void) memset(initq_info
, 0, sizeof (struct mrsas_init_queue_info
));
3416 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &initq_info
->init_flags
, 0);
3418 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3419 &initq_info
->reply_queue_entries
, instance
->max_fw_cmds
+ 1);
3421 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3422 &initq_info
->producer_index_phys_addr_hi
, 0);
3423 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3424 &initq_info
->producer_index_phys_addr_lo
,
3425 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
);
3427 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3428 &initq_info
->consumer_index_phys_addr_hi
, 0);
3429 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3430 &initq_info
->consumer_index_phys_addr_lo
,
3431 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+ 4);
3433 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3434 &initq_info
->reply_queue_start_phys_addr_hi
, 0);
3435 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3436 &initq_info
->reply_queue_start_phys_addr_lo
,
3437 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+ 8);
3439 ddi_put8(cmd
->frame_dma_obj
.acc_handle
,
3440 &init_frame
->cmd
, MFI_CMD_OP_INIT
);
3441 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->cmd_status
,
3442 MFI_CMD_STATUS_POLL_MODE
);
3443 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->flags
, 0);
3444 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3445 &init_frame
->queue_info_new_phys_addr_lo
,
3446 cmd
->frame_phys_addr
+ 64);
3447 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3448 &init_frame
->queue_info_new_phys_addr_hi
, 0);
3451 /* fill driver version information */
3452 fill_up_drv_ver(&drv_ver_info
);
3454 /* allocate the driver version data transfer buffer */
3455 instance
->drv_ver_dma_obj
.size
= sizeof (drv_ver_info
.drv_ver
);
3456 instance
->drv_ver_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
3457 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
3458 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
3459 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3460 instance
->drv_ver_dma_obj
.dma_attr
.dma_attr_align
= 1;
3462 if (mrsas_alloc_dma_obj(instance
, &instance
->drv_ver_dma_obj
,
3463 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
3464 con_log(CL_ANN
, (CE_WARN
,
3465 "init_mfi : Could not allocate driver version buffer."));
3466 return (DDI_FAILURE
);
3468 /* copy driver version to dma buffer */
3469 (void) memset(instance
->drv_ver_dma_obj
.buffer
, 0,
3470 sizeof (drv_ver_info
.drv_ver
));
3471 ddi_rep_put8(cmd
->frame_dma_obj
.acc_handle
,
3472 (uint8_t *)drv_ver_info
.drv_ver
,
3473 (uint8_t *)instance
->drv_ver_dma_obj
.buffer
,
3474 sizeof (drv_ver_info
.drv_ver
), DDI_DEV_AUTOINCR
);
3477 /* copy driver version physical address to init frame */
3478 ddi_put64(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->driverversion
,
3479 instance
->drv_ver_dma_obj
.dma_cookie
[0].dmac_address
);
3481 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->data_xfer_len
,
3482 sizeof (struct mrsas_init_queue_info
));
3484 cmd
->frame_count
= 1;
3488 return (DDI_SUCCESS
);
3493 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3496 mrsas_init_adapter_ppc(struct mrsas_instance
*instance
)
3498 struct mrsas_cmd
*cmd
;
3501 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3504 if (alloc_space_for_mfi(instance
) != DDI_SUCCESS
) {
3505 con_log(CL_ANN
, (CE_NOTE
,
3506 "Error, failed to allocate memory for MFI adapter"));
3507 return (DDI_FAILURE
);
3510 /* Build INIT command */
3511 cmd
= mrsas_get_mfi_pkt(instance
);
3513 DTRACE_PROBE2(init_adapter_mfi_err
, uint16_t,
3514 instance
->fw_outstanding
, uint16_t, instance
->max_fw_cmds
);
3515 return (DDI_FAILURE
);
3518 if (mrsas_build_init_cmd(instance
, &cmd
) != DDI_SUCCESS
) {
3520 (CE_NOTE
, "Error, failed to build INIT command"));
3522 goto fail_undo_alloc_mfi_space
;
3526 * Disable interrupt before sending init frame ( see linux driver code)
3527 * send INIT MFI frame in polled mode
3529 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
3530 con_log(CL_ANN
, (CE_WARN
, "failed to init firmware"));
3534 if (mrsas_common_check(instance
, cmd
) != DDI_SUCCESS
)
3536 mrsas_return_mfi_pkt(instance
, cmd
);
3539 (instance
->func_ptr
->read_fw_status_reg(instance
) & 0x04000000)) {
3540 con_log(CL_ANN
, (CE_NOTE
, "mr_sas: IEEE SGL's supported"));
3541 instance
->flag_ieee
= 1;
3543 instance
->flag_ieee
= 0;
3546 ASSERT(!instance
->skinny
|| instance
->flag_ieee
);
3548 instance
->unroll
.alloc_space_mfi
= 1;
3549 instance
->unroll
.verBuff
= 1;
3551 return (DDI_SUCCESS
);
3555 (void) mrsas_free_dma_obj(instance
, instance
->drv_ver_dma_obj
);
3557 fail_undo_alloc_mfi_space
:
3558 mrsas_return_mfi_pkt(instance
, cmd
);
3559 free_space_for_mfi(instance
);
3561 return (DDI_FAILURE
);
3566 * mrsas_init_adapter - Initialize adapter.
3569 mrsas_init_adapter(struct mrsas_instance
*instance
)
3571 struct mrsas_ctrl_info ctrl_info
;
3574 /* we expect the FW state to be READY */
3575 if (mfi_state_transition_to_ready(instance
)) {
3576 con_log(CL_ANN
, (CE_WARN
, "mr_sas: F/W is not ready"));
3577 return (DDI_FAILURE
);
3580 /* get various operational parameters from status register */
3581 instance
->max_num_sge
=
3582 (instance
->func_ptr
->read_fw_status_reg(instance
) &
3584 instance
->max_num_sge
=
3585 (instance
->max_num_sge
> MRSAS_MAX_SGE_CNT
) ?
3586 MRSAS_MAX_SGE_CNT
: instance
->max_num_sge
;
3589 * Reduce the max supported cmds by 1. This is to ensure that the
3590 * reply_q_sz (1 more than the max cmd that driver may send)
3591 * does not exceed max cmds that the FW can support
3593 instance
->max_fw_cmds
=
3594 instance
->func_ptr
->read_fw_status_reg(instance
) & 0xFFFF;
3595 instance
->max_fw_cmds
= instance
->max_fw_cmds
- 1;
3599 /* Initialize adapter */
3600 if (instance
->func_ptr
->init_adapter(instance
) != DDI_SUCCESS
) {
3602 (CE_WARN
, "mr_sas: could not initialize adapter"));
3603 return (DDI_FAILURE
);
3606 /* gather misc FW related information */
3607 instance
->disable_online_ctrl_reset
= 0;
3609 if (!get_ctrl_info(instance
, &ctrl_info
)) {
3610 instance
->max_sectors_per_req
= ctrl_info
.max_request_size
;
3611 con_log(CL_ANN1
, (CE_NOTE
,
3612 "product name %s ld present %d",
3613 ctrl_info
.product_name
, ctrl_info
.ld_present_count
));
3615 instance
->max_sectors_per_req
= instance
->max_num_sge
*
3619 if (ctrl_info
.properties
.on_off_properties
& DISABLE_OCR_PROP_FLAG
)
3620 instance
->disable_online_ctrl_reset
= 1;
3622 return (DDI_SUCCESS
);
3629 mrsas_issue_init_mfi(struct mrsas_instance
*instance
)
3631 struct mrsas_cmd
*cmd
;
3632 struct mrsas_init_frame
*init_frame
;
3633 struct mrsas_init_queue_info
*initq_info
;
3636 * Prepare a init frame. Note the init frame points to queue info
3637 * structure. Each frame has SGL allocated after first 64 bytes. For
3638 * this frame - since we don't need any SGL - we use SGL's space as
3639 * queue info structure
3641 con_log(CL_ANN1
, (CE_NOTE
,
3642 "mrsas_issue_init_mfi: entry\n"));
3643 cmd
= get_mfi_app_pkt(instance
);
3646 con_log(CL_ANN1
, (CE_WARN
,
3647 "mrsas_issue_init_mfi: get_pkt failed\n"));
3648 return (DDI_FAILURE
);
3651 /* Clear the frame buffer and assign back the context id */
3652 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
3653 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3656 init_frame
= (struct mrsas_init_frame
*)cmd
->frame
;
3657 initq_info
= (struct mrsas_init_queue_info
*)
3658 ((unsigned long)init_frame
+ 64);
3660 (void) memset(init_frame
, 0, MRMFI_FRAME_SIZE
);
3661 (void) memset(initq_info
, 0, sizeof (struct mrsas_init_queue_info
));
3663 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &initq_info
->init_flags
, 0);
3665 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3666 &initq_info
->reply_queue_entries
, instance
->max_fw_cmds
+ 1);
3667 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3668 &initq_info
->producer_index_phys_addr_hi
, 0);
3669 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3670 &initq_info
->producer_index_phys_addr_lo
,
3671 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
);
3672 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3673 &initq_info
->consumer_index_phys_addr_hi
, 0);
3674 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3675 &initq_info
->consumer_index_phys_addr_lo
,
3676 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+ 4);
3678 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3679 &initq_info
->reply_queue_start_phys_addr_hi
, 0);
3680 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3681 &initq_info
->reply_queue_start_phys_addr_lo
,
3682 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+ 8);
3684 ddi_put8(cmd
->frame_dma_obj
.acc_handle
,
3685 &init_frame
->cmd
, MFI_CMD_OP_INIT
);
3686 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->cmd_status
,
3687 MFI_CMD_STATUS_POLL_MODE
);
3688 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->flags
, 0);
3689 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3690 &init_frame
->queue_info_new_phys_addr_lo
,
3691 cmd
->frame_phys_addr
+ 64);
3692 ddi_put32(cmd
->frame_dma_obj
.acc_handle
,
3693 &init_frame
->queue_info_new_phys_addr_hi
, 0);
3695 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &init_frame
->data_xfer_len
,
3696 sizeof (struct mrsas_init_queue_info
));
3698 cmd
->frame_count
= 1;
3700 /* issue the init frame in polled mode */
3701 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
3702 con_log(CL_ANN1
, (CE_WARN
,
3703 "mrsas_issue_init_mfi():failed to "
3705 return_mfi_app_pkt(instance
, cmd
);
3706 return (DDI_FAILURE
);
3709 if (mrsas_common_check(instance
, cmd
) != DDI_SUCCESS
) {
3710 return_mfi_app_pkt(instance
, cmd
);
3711 return (DDI_FAILURE
);
3714 return_mfi_app_pkt(instance
, cmd
);
3715 con_log(CL_ANN1
, (CE_CONT
, "mrsas_issue_init_mfi: Done"));
3717 return (DDI_SUCCESS
);
3720 * mfi_state_transition_to_ready : Move the FW to READY state
3722 * @reg_set : MFI register set
3725 mfi_state_transition_to_ready(struct mrsas_instance
*instance
)
3729 uint32_t fw_ctrl
= 0;
3732 uint32_t cur_abs_reg_val
;
3733 uint32_t prev_abs_reg_val
;
3737 instance
->func_ptr
->read_fw_status_reg(instance
);
3739 cur_abs_reg_val
& MFI_STATE_MASK
;
3740 con_log(CL_ANN1
, (CE_CONT
,
3741 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state
));
3743 while (fw_state
!= MFI_STATE_READY
) {
3744 con_log(CL_ANN
, (CE_CONT
,
3745 "mfi_state_transition_to_ready:FW state%x", fw_state
));
3748 case MFI_STATE_FAULT
:
3749 con_log(CL_ANN
, (CE_NOTE
,
3750 "mr_sas: FW in FAULT state!!"));
3753 case MFI_STATE_WAIT_HANDSHAKE
:
3754 /* set the CLR bit in IMR0 */
3755 con_log(CL_ANN1
, (CE_NOTE
,
3756 "mr_sas: FW waiting for HANDSHAKE"));
3758 * PCI_Hot Plug: MFI F/W requires
3759 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3762 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3763 if (!instance
->tbolt
&& !instance
->skinny
) {
3764 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE
|
3765 MFI_INIT_HOTPLUG
, instance
);
3767 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE
|
3768 MFI_INIT_HOTPLUG
, instance
);
3770 max_wait
= (instance
->tbolt
== 1) ? 180 : 2;
3771 cur_state
= MFI_STATE_WAIT_HANDSHAKE
;
3773 case MFI_STATE_BOOT_MESSAGE_PENDING
:
3774 /* set the CLR bit in IMR0 */
3775 con_log(CL_ANN1
, (CE_NOTE
,
3776 "mr_sas: FW state boot message pending"));
3778 * PCI_Hot Plug: MFI F/W requires
3779 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3782 if (!instance
->tbolt
&& !instance
->skinny
) {
3783 WR_IB_DOORBELL(MFI_INIT_HOTPLUG
, instance
);
3785 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG
,
3788 max_wait
= (instance
->tbolt
== 1) ? 180 : 10;
3789 cur_state
= MFI_STATE_BOOT_MESSAGE_PENDING
;
3791 case MFI_STATE_OPERATIONAL
:
3792 /* bring it to READY state; assuming max wait 2 secs */
3793 instance
->func_ptr
->disable_intr(instance
);
3794 con_log(CL_ANN1
, (CE_NOTE
,
3795 "mr_sas: FW in OPERATIONAL state"));
3797 * PCI_Hot Plug: MFI F/W requires
3798 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3801 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3802 if (!instance
->tbolt
&& !instance
->skinny
) {
3803 WR_IB_DOORBELL(MFI_RESET_FLAGS
, instance
);
3805 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS
,
3808 for (i
= 0; i
< (10 * 1000); i
++) {
3810 RD_RESERVED0_REGISTER(instance
);
3819 max_wait
= (instance
->tbolt
== 1) ? 180 : 10;
3820 cur_state
= MFI_STATE_OPERATIONAL
;
3822 case MFI_STATE_UNDEFINED
:
3823 /* this state should not last for more than 2 seconds */
3824 con_log(CL_ANN1
, (CE_NOTE
, "FW state undefined"));
3826 max_wait
= (instance
->tbolt
== 1) ? 180 : 2;
3827 cur_state
= MFI_STATE_UNDEFINED
;
3829 case MFI_STATE_BB_INIT
:
3830 max_wait
= (instance
->tbolt
== 1) ? 180 : 2;
3831 cur_state
= MFI_STATE_BB_INIT
;
3833 case MFI_STATE_FW_INIT
:
3834 max_wait
= (instance
->tbolt
== 1) ? 180 : 2;
3835 cur_state
= MFI_STATE_FW_INIT
;
3837 case MFI_STATE_FW_INIT_2
:
3839 cur_state
= MFI_STATE_FW_INIT_2
;
3841 case MFI_STATE_DEVICE_SCAN
:
3843 cur_state
= MFI_STATE_DEVICE_SCAN
;
3844 prev_abs_reg_val
= cur_abs_reg_val
;
3845 con_log(CL_NONE
, (CE_NOTE
,
3846 "Device scan in progress ...\n"));
3848 case MFI_STATE_FLUSH_CACHE
:
3850 cur_state
= MFI_STATE_FLUSH_CACHE
;
3853 con_log(CL_ANN1
, (CE_NOTE
,
3854 "mr_sas: Unknown state 0x%x", fw_state
));
3858 /* the cur_state should not last for more than max_wait secs */
3859 for (i
= 0; i
< (max_wait
* MILLISEC
); i
++) {
3860 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3862 instance
->func_ptr
->read_fw_status_reg(instance
);
3863 fw_state
= cur_abs_reg_val
& MFI_STATE_MASK
;
3865 if (fw_state
== cur_state
) {
3871 if (fw_state
== MFI_STATE_DEVICE_SCAN
) {
3872 if (prev_abs_reg_val
!= cur_abs_reg_val
) {
3877 /* return error if fw_state hasn't changed after max_wait */
3878 if (fw_state
== cur_state
) {
3879 con_log(CL_ANN1
, (CE_WARN
,
3880 "FW state hasn't changed in %d secs", max_wait
));
3885 /* This may also need to apply to Skinny, but for now, don't worry. */
3886 if (!instance
->tbolt
&& !instance
->skinny
) {
3887 fw_ctrl
= RD_IB_DOORBELL(instance
);
3888 con_log(CL_ANN1
, (CE_CONT
,
3889 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl
));
3892 * Write 0xF to the doorbell register to do the following.
3893 * - Abort all outstanding commands (bit 0).
3894 * - Transition from OPERATIONAL to READY state (bit 1).
3895 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3896 * - Set to release FW to continue running (i.e. BIOS handshake
3899 WR_IB_DOORBELL(0xF, instance
);
3902 if (mrsas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
3906 return (DDI_SUCCESS
);
3913 get_seq_num(struct mrsas_instance
*instance
,
3914 struct mrsas_evt_log_info
*eli
)
3916 int ret
= DDI_SUCCESS
;
3918 dma_obj_t dcmd_dma_obj
;
3919 struct mrsas_cmd
*cmd
;
3920 struct mrsas_dcmd_frame
*dcmd
;
3921 struct mrsas_evt_log_info
*eli_tmp
;
3922 if (instance
->tbolt
) {
3923 cmd
= get_raid_msg_mfi_pkt(instance
);
3925 cmd
= mrsas_get_mfi_pkt(instance
);
3929 dev_err(instance
->dip
, CE_WARN
, "failed to get a cmd");
3930 DTRACE_PROBE2(seq_num_mfi_err
, uint16_t,
3931 instance
->fw_outstanding
, uint16_t, instance
->max_fw_cmds
);
3935 /* Clear the frame buffer and assign back the context id */
3936 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
3937 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
3940 dcmd
= &cmd
->frame
->dcmd
;
3942 /* allocate the data transfer buffer */
3943 dcmd_dma_obj
.size
= sizeof (struct mrsas_evt_log_info
);
3944 dcmd_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
3945 dcmd_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
3946 dcmd_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
3947 dcmd_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3948 dcmd_dma_obj
.dma_attr
.dma_attr_align
= 1;
3950 if (mrsas_alloc_dma_obj(instance
, &dcmd_dma_obj
,
3951 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
3952 dev_err(instance
->dip
, CE_WARN
,
3953 "get_seq_num: could not allocate data transfer buffer.");
3954 return (DDI_FAILURE
);
3957 (void) memset(dcmd_dma_obj
.buffer
, 0,
3958 sizeof (struct mrsas_evt_log_info
));
3960 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
3962 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd
, MFI_CMD_OP_DCMD
);
3963 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd_status
, 0);
3964 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sge_count
, 1);
3965 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->flags
,
3966 MFI_FRAME_DIR_READ
);
3967 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->timeout
, 0);
3968 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->data_xfer_len
,
3969 sizeof (struct mrsas_evt_log_info
));
3970 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->opcode
,
3971 MR_DCMD_CTRL_EVENT_GET_INFO
);
3972 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].length
,
3973 sizeof (struct mrsas_evt_log_info
));
3974 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].phys_addr
,
3975 dcmd_dma_obj
.dma_cookie
[0].dmac_address
);
3977 cmd
->sync_cmd
= MRSAS_TRUE
;
3978 cmd
->frame_count
= 1;
3980 if (instance
->tbolt
) {
3981 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
3984 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
3985 dev_err(instance
->dip
, CE_WARN
, "get_seq_num: "
3986 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
3989 eli_tmp
= (struct mrsas_evt_log_info
*)dcmd_dma_obj
.buffer
;
3990 eli
->newest_seq_num
= ddi_get32(cmd
->frame_dma_obj
.acc_handle
,
3991 &eli_tmp
->newest_seq_num
);
3995 if (mrsas_free_dma_obj(instance
, dcmd_dma_obj
) != DDI_SUCCESS
)
3998 if (instance
->tbolt
) {
3999 return_raid_msg_mfi_pkt(instance
, cmd
);
4001 mrsas_return_mfi_pkt(instance
, cmd
);
4011 start_mfi_aen(struct mrsas_instance
*instance
)
4015 struct mrsas_evt_log_info eli
;
4016 union mrsas_evt_class_locale class_locale
;
4018 /* get the latest sequence number from FW */
4019 (void) memset(&eli
, 0, sizeof (struct mrsas_evt_log_info
));
4021 if (get_seq_num(instance
, &eli
)) {
4022 dev_err(instance
->dip
, CE_WARN
,
4023 "start_mfi_aen: failed to get seq num");
4027 /* register AEN with FW for latest sequence number plus 1 */
4028 class_locale
.members
.reserved
= 0;
4029 class_locale
.members
.locale
= LE_16(MR_EVT_LOCALE_ALL
);
4030 class_locale
.members
.class = MR_EVT_CLASS_INFO
;
4031 class_locale
.word
= LE_32(class_locale
.word
);
4032 ret
= register_mfi_aen(instance
, eli
.newest_seq_num
+ 1,
4036 dev_err(instance
->dip
, CE_WARN
,
4037 "start_mfi_aen: aen registration failed");
4049 flush_cache(struct mrsas_instance
*instance
)
4051 struct mrsas_cmd
*cmd
= NULL
;
4052 struct mrsas_dcmd_frame
*dcmd
;
4053 if (instance
->tbolt
) {
4054 cmd
= get_raid_msg_mfi_pkt(instance
);
4056 cmd
= mrsas_get_mfi_pkt(instance
);
4060 con_log(CL_ANN1
, (CE_WARN
,
4061 "flush_cache():Failed to get a cmd for flush_cache"));
4062 DTRACE_PROBE2(flush_cache_err
, uint16_t,
4063 instance
->fw_outstanding
, uint16_t, instance
->max_fw_cmds
);
4067 /* Clear the frame buffer and assign back the context id */
4068 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
4069 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
4072 dcmd
= &cmd
->frame
->dcmd
;
4074 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
4076 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd
, MFI_CMD_OP_DCMD
);
4077 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd_status
, 0x0);
4078 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sge_count
, 0);
4079 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->flags
,
4080 MFI_FRAME_DIR_NONE
);
4081 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->timeout
, 0);
4082 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->data_xfer_len
, 0);
4083 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->opcode
,
4084 MR_DCMD_CTRL_CACHE_FLUSH
);
4085 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->mbox
.b
[0],
4086 MR_FLUSH_CTRL_CACHE
| MR_FLUSH_DISK_CACHE
);
4088 cmd
->frame_count
= 1;
4090 if (instance
->tbolt
) {
4091 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
4094 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
4095 con_log(CL_ANN1
, (CE_WARN
,
4096 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4098 con_log(CL_ANN1
, (CE_CONT
, "flush_cache done"));
4099 if (instance
->tbolt
) {
4100 return_raid_msg_mfi_pkt(instance
, cmd
);
4102 mrsas_return_mfi_pkt(instance
, cmd
);
4108 * service_mfi_aen- Completes an AEN command
4109 * @instance: Adapter soft state
4110 * @cmd: Command to be completed
4114 service_mfi_aen(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
4117 struct mrsas_evt_detail
*evt_detail
=
4118 (struct mrsas_evt_detail
*)instance
->mfi_evt_detail_obj
.buffer
;
4122 mrsas_pd_address_t
*pd_addr
;
4123 ddi_acc_handle_t acc_handle
;
4125 con_log(CL_ANN
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
4127 acc_handle
= cmd
->frame_dma_obj
.acc_handle
;
4128 cmd
->cmd_status
= ddi_get8(acc_handle
, &cmd
->frame
->io
.cmd_status
);
4129 if (cmd
->cmd_status
== ENODATA
) {
4130 cmd
->cmd_status
= 0;
4134 * log the MFI AEN event to the sysevent queue so that
4135 * application will get noticed
4137 if (ddi_log_sysevent(instance
->dip
, DDI_VENDOR_LSI
, "LSIMEGA", "SAS",
4138 NULL
, NULL
, DDI_NOSLEEP
) != DDI_SUCCESS
) {
4139 int instance_no
= ddi_get_instance(instance
->dip
);
4140 con_log(CL_ANN
, (CE_WARN
,
4141 "mr_sas%d: Failed to log AEN event", instance_no
));
4144 * Check for any ld devices that has changed state. i.e. online
4147 con_log(CL_ANN1
, (CE_CONT
,
4148 "AEN: code = %x class = %x locale = %x args = %x",
4149 ddi_get32(acc_handle
, &evt_detail
->code
),
4150 evt_detail
->cl
.members
.class,
4151 ddi_get16(acc_handle
, &evt_detail
->cl
.members
.locale
),
4152 ddi_get8(acc_handle
, &evt_detail
->arg_type
)));
4154 switch (ddi_get32(acc_handle
, &evt_detail
->code
)) {
4155 case MR_EVT_CFG_CLEARED
: {
4156 for (tgt
= 0; tgt
< MRDRV_MAX_LD
; tgt
++) {
4157 if (instance
->mr_ld_list
[tgt
].dip
!= NULL
) {
4158 mutex_enter(&instance
->config_dev_mtx
);
4159 instance
->mr_ld_list
[tgt
].flag
=
4160 (uint8_t)~MRDRV_TGT_VALID
;
4161 mutex_exit(&instance
->config_dev_mtx
);
4162 rval
= mrsas_service_evt(instance
, tgt
, 0,
4163 MRSAS_EVT_UNCONFIG_TGT
, (uintptr_t)NULL
);
4164 con_log(CL_ANN1
, (CE_WARN
,
4165 "mr_sas: CFG CLEARED AEN rval = %d "
4166 "tgt id = %d", rval
, tgt
));
4172 case MR_EVT_LD_DELETED
: {
4173 tgt
= ddi_get16(acc_handle
, &evt_detail
->args
.ld
.target_id
);
4174 mutex_enter(&instance
->config_dev_mtx
);
4175 instance
->mr_ld_list
[tgt
].flag
= (uint8_t)~MRDRV_TGT_VALID
;
4176 mutex_exit(&instance
->config_dev_mtx
);
4177 rval
= mrsas_service_evt(instance
,
4178 ddi_get16(acc_handle
, &evt_detail
->args
.ld
.target_id
), 0,
4179 MRSAS_EVT_UNCONFIG_TGT
, (uintptr_t)NULL
);
4180 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: LD DELETED AEN rval = %d "
4181 "tgt id = %d index = %d", rval
,
4182 ddi_get16(acc_handle
, &evt_detail
->args
.ld
.target_id
),
4183 ddi_get8(acc_handle
, &evt_detail
->args
.ld
.ld_index
)));
4185 } /* End of MR_EVT_LD_DELETED */
4187 case MR_EVT_LD_CREATED
: {
4188 rval
= mrsas_service_evt(instance
,
4189 ddi_get16(acc_handle
, &evt_detail
->args
.ld
.target_id
), 0,
4190 MRSAS_EVT_CONFIG_TGT
, (uintptr_t)NULL
);
4191 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: LD CREATED AEN rval = %d "
4192 "tgt id = %d index = %d", rval
,
4193 ddi_get16(acc_handle
, &evt_detail
->args
.ld
.target_id
),
4194 ddi_get8(acc_handle
, &evt_detail
->args
.ld
.ld_index
)));
4196 } /* End of MR_EVT_LD_CREATED */
4198 case MR_EVT_PD_REMOVED_EXT
: {
4199 if (instance
->tbolt
|| instance
->skinny
) {
4200 pd_addr
= &evt_detail
->args
.pd_addr
;
4201 dtype
= pd_addr
->scsi_dev_type
;
4202 con_log(CL_DLEVEL1
, (CE_NOTE
,
4203 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4204 " arg_type = %d ", dtype
, evt_detail
->arg_type
));
4205 tgt
= ddi_get16(acc_handle
,
4206 &evt_detail
->args
.pd
.device_id
);
4207 mutex_enter(&instance
->config_dev_mtx
);
4208 instance
->mr_tbolt_pd_list
[tgt
].flag
=
4209 (uint8_t)~MRDRV_TGT_VALID
;
4210 mutex_exit(&instance
->config_dev_mtx
);
4211 rval
= mrsas_service_evt(instance
, ddi_get16(
4212 acc_handle
, &evt_detail
->args
.pd
.device_id
),
4213 1, MRSAS_EVT_UNCONFIG_TGT
, (uintptr_t)NULL
);
4214 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: PD_REMOVED:"
4215 "rval = %d tgt id = %d ", rval
,
4216 ddi_get16(acc_handle
,
4217 &evt_detail
->args
.pd
.device_id
)));
4220 } /* End of MR_EVT_PD_REMOVED_EXT */
4222 case MR_EVT_PD_INSERTED_EXT
: {
4223 if (instance
->tbolt
|| instance
->skinny
) {
4224 rval
= mrsas_service_evt(instance
,
4225 ddi_get16(acc_handle
,
4226 &evt_detail
->args
.pd
.device_id
),
4227 1, MRSAS_EVT_CONFIG_TGT
, (uintptr_t)NULL
);
4228 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: PD_INSERTEDi_EXT:"
4229 "rval = %d tgt id = %d ", rval
,
4230 ddi_get16(acc_handle
,
4231 &evt_detail
->args
.pd
.device_id
)));
4234 } /* End of MR_EVT_PD_INSERTED_EXT */
4236 case MR_EVT_PD_STATE_CHANGE
: {
4237 if (instance
->tbolt
|| instance
->skinny
) {
4238 tgt
= ddi_get16(acc_handle
,
4239 &evt_detail
->args
.pd
.device_id
);
4240 if ((evt_detail
->args
.pd_state
.prevState
==
4242 (evt_detail
->args
.pd_state
.newState
!= PD_SYSTEM
)) {
4243 mutex_enter(&instance
->config_dev_mtx
);
4244 instance
->mr_tbolt_pd_list
[tgt
].flag
=
4245 (uint8_t)~MRDRV_TGT_VALID
;
4246 mutex_exit(&instance
->config_dev_mtx
);
4247 rval
= mrsas_service_evt(instance
,
4248 ddi_get16(acc_handle
,
4249 &evt_detail
->args
.pd
.device_id
),
4250 1, MRSAS_EVT_UNCONFIG_TGT
, (uintptr_t)NULL
);
4251 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: PD_REMOVED:"
4252 "rval = %d tgt id = %d ", rval
,
4253 ddi_get16(acc_handle
,
4254 &evt_detail
->args
.pd
.device_id
)));
4257 if ((evt_detail
->args
.pd_state
.prevState
4258 == UNCONFIGURED_GOOD
) &&
4259 (evt_detail
->args
.pd_state
.newState
== PD_SYSTEM
)) {
4260 rval
= mrsas_service_evt(instance
,
4261 ddi_get16(acc_handle
,
4262 &evt_detail
->args
.pd
.device_id
),
4263 1, MRSAS_EVT_CONFIG_TGT
, (uintptr_t)NULL
);
4264 con_log(CL_ANN1
, (CE_WARN
,
4265 "mr_sas: PD_INSERTED: rval = %d "
4266 " tgt id = %d ", rval
,
4267 ddi_get16(acc_handle
,
4268 &evt_detail
->args
.pd
.device_id
)));
4275 } /* End of Main Switch */
4277 /* get copy of seq_num and class/locale for re-registration */
4278 seq_num
= ddi_get32(acc_handle
, &evt_detail
->seq_num
);
4280 (void) memset(instance
->mfi_evt_detail_obj
.buffer
, 0,
4281 sizeof (struct mrsas_evt_detail
));
4283 ddi_put8(acc_handle
, &cmd
->frame
->dcmd
.cmd_status
, 0x0);
4284 ddi_put32(acc_handle
, &cmd
->frame
->dcmd
.mbox
.w
[0], seq_num
);
4286 instance
->aen_seq_num
= seq_num
;
4288 cmd
->frame_count
= 1;
4290 cmd
->retry_count_for_ocr
= 0;
4291 cmd
->drv_pkt_time
= 0;
4293 /* Issue the aen registration frame */
4294 instance
->func_ptr
->issue_cmd(cmd
, instance
);
4298 * complete_cmd_in_sync_mode - Completes an internal command
4299 * @instance: Adapter soft state
4300 * @cmd: Command to be completed
4302 * The issue_cmd_in_sync_mode() function waits for a command to complete
4303 * after it issues a command. This function wakes up that waiting routine by
4304 * calling wake_up() on the wait queue.
4307 complete_cmd_in_sync_mode(struct mrsas_instance
*instance
,
4308 struct mrsas_cmd
*cmd
)
4310 cmd
->cmd_status
= ddi_get8(cmd
->frame_dma_obj
.acc_handle
,
4311 &cmd
->frame
->io
.cmd_status
);
4313 cmd
->sync_cmd
= MRSAS_FALSE
;
4315 con_log(CL_ANN1
, (CE_NOTE
, "complete_cmd_in_sync_mode called %p \n",
4318 mutex_enter(&instance
->int_cmd_mtx
);
4319 if (cmd
->cmd_status
== ENODATA
) {
4320 cmd
->cmd_status
= 0;
4322 cv_broadcast(&instance
->int_cmd_cv
);
4323 mutex_exit(&instance
->int_cmd_mtx
);
4328 * Call this function inside mrsas_softintr.
4329 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4330 * @instance: Adapter soft state
4334 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance
*instance
)
4336 uint32_t cur_abs_reg_val
;
4339 cur_abs_reg_val
= instance
->func_ptr
->read_fw_status_reg(instance
);
4340 fw_state
= cur_abs_reg_val
& MFI_STATE_MASK
;
4341 if (fw_state
== MFI_STATE_FAULT
) {
4342 if (instance
->disable_online_ctrl_reset
== 1) {
4343 dev_err(instance
->dip
, CE_WARN
,
4344 "mrsas_initiate_ocr_if_fw_is_faulty: "
4345 "FW in Fault state, detected in ISR: "
4346 "FW doesn't support ocr ");
4348 return (ADAPTER_RESET_NOT_REQUIRED
);
4350 con_log(CL_ANN
, (CE_NOTE
,
4351 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4352 "state, detected in ISR: FW supports ocr "));
4354 return (ADAPTER_RESET_REQUIRED
);
4358 return (ADAPTER_RESET_NOT_REQUIRED
);
4362 * mrsas_softintr - The Software ISR
4363 * @param arg : HBA soft state
4365 * called from high-level interrupt if hi-level interrupt are not there,
4366 * otherwise triggered as a soft interrupt
4369 mrsas_softintr(struct mrsas_instance
*instance
)
4371 struct scsi_pkt
*pkt
;
4372 struct scsa_cmd
*acmd
;
4373 struct mrsas_cmd
*cmd
;
4374 struct mlist_head
*pos
, *next
;
4375 mlist_t process_list
;
4376 struct mrsas_header
*hdr
;
4377 struct scsi_arq_status
*arqstat
;
4379 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_softintr() called."));
4383 mutex_enter(&instance
->completed_pool_mtx
);
4385 if (mlist_empty(&instance
->completed_pool_list
)) {
4386 mutex_exit(&instance
->completed_pool_mtx
);
4387 return (DDI_INTR_CLAIMED
);
4390 instance
->softint_running
= 1;
4392 INIT_LIST_HEAD(&process_list
);
4393 mlist_splice(&instance
->completed_pool_list
, &process_list
);
4394 INIT_LIST_HEAD(&instance
->completed_pool_list
);
4396 mutex_exit(&instance
->completed_pool_mtx
);
4398 /* perform all callbacks first, before releasing the SCBs */
4399 mlist_for_each_safe(pos
, next
, &process_list
) {
4400 cmd
= mlist_entry(pos
, struct mrsas_cmd
, list
);
4402 /* syncronize the Cmd frame for the controller */
4403 (void) ddi_dma_sync(cmd
->frame_dma_obj
.dma_handle
,
4404 0, 0, DDI_DMA_SYNC_FORCPU
);
4406 if (mrsas_check_dma_handle(cmd
->frame_dma_obj
.dma_handle
) !=
4408 mrsas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
4409 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
4410 con_log(CL_ANN1
, (CE_WARN
,
4412 "FMA check reports DMA handle failure"));
4413 return (DDI_INTR_CLAIMED
);
4416 hdr
= &cmd
->frame
->hdr
;
4418 /* remove the internal command from the process list */
4419 mlist_del_init(&cmd
->list
);
4421 switch (ddi_get8(cmd
->frame_dma_obj
.acc_handle
, &hdr
->cmd
)) {
4422 case MFI_CMD_OP_PD_SCSI
:
4423 case MFI_CMD_OP_LD_SCSI
:
4424 case MFI_CMD_OP_LD_READ
:
4425 case MFI_CMD_OP_LD_WRITE
:
4427 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4428 * could have been issued either through an
4429 * IO path or an IOCTL path. If it was via IOCTL,
4430 * we will send it to internal completion.
4432 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
4433 complete_cmd_in_sync_mode(instance
, cmd
);
4437 /* regular commands */
4439 pkt
= CMD2PKT(acmd
);
4441 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
4442 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
4443 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
4444 acmd
->cmd_dma_offset
,
4446 DDI_DMA_SYNC_FORCPU
);
4450 pkt
->pkt_reason
= CMD_CMPLT
;
4451 pkt
->pkt_statistics
= 0;
4452 pkt
->pkt_state
= STATE_GOT_BUS
4453 | STATE_GOT_TARGET
| STATE_SENT_CMD
4454 | STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
4456 con_log(CL_ANN
, (CE_CONT
,
4457 "CDB[0] = %x completed for %s: size %lx context %x",
4458 pkt
->pkt_cdbp
[0], ((acmd
->islogical
) ? "LD" : "PD"),
4459 acmd
->cmd_dmacount
, hdr
->context
));
4460 DTRACE_PROBE3(softintr_cdb
, uint8_t, pkt
->pkt_cdbp
[0],
4461 uint_t
, acmd
->cmd_cdblen
, ulong_t
,
4462 acmd
->cmd_dmacount
);
4464 if (pkt
->pkt_cdbp
[0] == SCMD_INQUIRY
) {
4465 struct scsi_inquiry
*inq
;
4467 if (acmd
->cmd_dmacount
!= 0) {
4468 bp_mapin(acmd
->cmd_buf
);
4469 inq
= (struct scsi_inquiry
*)
4470 acmd
->cmd_buf
->b_un
.b_addr
;
4472 if (hdr
->cmd_status
== MFI_STAT_OK
) {
4473 display_scsi_inquiry(
4479 DTRACE_PROBE2(softintr_done
, uint8_t, hdr
->cmd
,
4480 uint8_t, hdr
->cmd_status
);
4482 switch (hdr
->cmd_status
) {
4484 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
4486 case MFI_STAT_LD_CC_IN_PROGRESS
:
4487 case MFI_STAT_LD_RECON_IN_PROGRESS
:
4488 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
4490 case MFI_STAT_LD_INIT_IN_PROGRESS
:
4492 (CE_WARN
, "Initialization in Progress"));
4493 pkt
->pkt_reason
= CMD_TRAN_ERR
;
4496 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
4497 con_log(CL_ANN
, (CE_CONT
, "scsi_done error"));
4499 pkt
->pkt_reason
= CMD_CMPLT
;
4500 ((struct scsi_status
*)
4501 pkt
->pkt_scbp
)->sts_chk
= 1;
4503 if (pkt
->pkt_cdbp
[0] == SCMD_TEST_UNIT_READY
) {
4505 (CE_WARN
, "TEST_UNIT_READY fail"));
4507 pkt
->pkt_state
|= STATE_ARQ_DONE
;
4508 arqstat
= (void *)(pkt
->pkt_scbp
);
4509 arqstat
->sts_rqpkt_reason
= CMD_CMPLT
;
4510 arqstat
->sts_rqpkt_resid
= 0;
4511 arqstat
->sts_rqpkt_state
|=
4512 STATE_GOT_BUS
| STATE_GOT_TARGET
4514 | STATE_XFERRED_DATA
;
4515 *(uint8_t *)&arqstat
->sts_rqpkt_status
=
4518 cmd
->frame_dma_obj
.acc_handle
,
4520 &(arqstat
->sts_sensedata
),
4522 sizeof (struct scsi_extended_sense
),
4526 case MFI_STAT_LD_OFFLINE
:
4527 case MFI_STAT_DEVICE_NOT_FOUND
:
4528 con_log(CL_ANN
, (CE_CONT
,
4529 "mrsas_softintr:device not found error"));
4530 pkt
->pkt_reason
= CMD_DEV_GONE
;
4531 pkt
->pkt_statistics
= STAT_DISCON
;
4533 case MFI_STAT_LD_LBA_OUT_OF_RANGE
:
4534 pkt
->pkt_state
|= STATE_ARQ_DONE
;
4535 pkt
->pkt_reason
= CMD_CMPLT
;
4536 ((struct scsi_status
*)
4537 pkt
->pkt_scbp
)->sts_chk
= 1;
4539 arqstat
= (void *)(pkt
->pkt_scbp
);
4540 arqstat
->sts_rqpkt_reason
= CMD_CMPLT
;
4541 arqstat
->sts_rqpkt_resid
= 0;
4542 arqstat
->sts_rqpkt_state
|= STATE_GOT_BUS
4543 | STATE_GOT_TARGET
| STATE_SENT_CMD
4544 | STATE_XFERRED_DATA
;
4545 *(uint8_t *)&arqstat
->sts_rqpkt_status
=
4548 arqstat
->sts_sensedata
.es_valid
= 1;
4549 arqstat
->sts_sensedata
.es_key
=
4550 KEY_ILLEGAL_REQUEST
;
4551 arqstat
->sts_sensedata
.es_class
=
4552 CLASS_EXTENDED_SENSE
;
4555 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4556 * ASC: 0x21h; ASCQ: 0x00h;
4558 arqstat
->sts_sensedata
.es_add_code
= 0x21;
4559 arqstat
->sts_sensedata
.es_qual_code
= 0x00;
4564 con_log(CL_ANN
, (CE_CONT
, "Unknown status!"));
4565 pkt
->pkt_reason
= CMD_TRAN_ERR
;
4570 atomic_add_16(&instance
->fw_outstanding
, (-1));
4572 (void) mrsas_common_check(instance
, cmd
);
4574 if (acmd
->cmd_dmahandle
) {
4575 if (mrsas_check_dma_handle(
4576 acmd
->cmd_dmahandle
) != DDI_SUCCESS
) {
4577 ddi_fm_service_impact(instance
->dip
,
4578 DDI_SERVICE_UNAFFECTED
);
4579 pkt
->pkt_reason
= CMD_TRAN_ERR
;
4580 pkt
->pkt_statistics
= 0;
4584 mrsas_return_mfi_pkt(instance
, cmd
);
4586 /* Call the callback routine */
4587 if (((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) &&
4589 (*pkt
->pkt_comp
)(pkt
);
4594 case MFI_CMD_OP_SMP
:
4595 case MFI_CMD_OP_STP
:
4596 complete_cmd_in_sync_mode(instance
, cmd
);
4599 case MFI_CMD_OP_DCMD
:
4600 /* see if got an event notification */
4601 if (ddi_get32(cmd
->frame_dma_obj
.acc_handle
,
4602 &cmd
->frame
->dcmd
.opcode
) ==
4603 MR_DCMD_CTRL_EVENT_WAIT
) {
4604 if ((instance
->aen_cmd
== cmd
) &&
4605 (instance
->aen_cmd
->abort_aen
)) {
4606 con_log(CL_ANN
, (CE_WARN
,
4608 "aborted_aen returned"));
4610 atomic_add_16(&instance
->fw_outstanding
,
4612 service_mfi_aen(instance
, cmd
);
4615 complete_cmd_in_sync_mode(instance
, cmd
);
4620 case MFI_CMD_OP_ABORT
:
4621 con_log(CL_ANN
, (CE_NOTE
, "MFI_CMD_OP_ABORT complete"));
4623 * MFI_CMD_OP_ABORT successfully completed
4624 * in the synchronous mode
4626 complete_cmd_in_sync_mode(instance
, cmd
);
4630 mrsas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
4631 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
4633 if (cmd
->pkt
!= NULL
) {
4635 if (((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) &&
4638 con_log(CL_ANN1
, (CE_CONT
, "posting to "
4639 "scsa cmd %p index %x pkt %p"
4640 "time %llx, default ", (void *)cmd
,
4641 cmd
->index
, (void *)pkt
,
4644 (*pkt
->pkt_comp
)(pkt
);
4648 con_log(CL_ANN
, (CE_WARN
, "Cmd type unknown !"));
4653 instance
->softint_running
= 0;
4655 return (DDI_INTR_CLAIMED
);
4659 * mrsas_alloc_dma_obj
4661 * Allocate the memory and other resources for an dma object.
4664 mrsas_alloc_dma_obj(struct mrsas_instance
*instance
, dma_obj_t
*obj
,
4665 uchar_t endian_flags
)
4670 struct ddi_device_acc_attr tmp_endian_attr
;
4672 tmp_endian_attr
= endian_attr
;
4673 tmp_endian_attr
.devacc_attr_endian_flags
= endian_flags
;
4674 tmp_endian_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
4676 i
= ddi_dma_alloc_handle(instance
->dip
, &obj
->dma_attr
,
4677 DDI_DMA_SLEEP
, NULL
, &obj
->dma_handle
);
4678 if (i
!= DDI_SUCCESS
) {
4681 case DDI_DMA_BADATTR
:
4682 con_log(CL_ANN
, (CE_WARN
,
4683 "Failed ddi_dma_alloc_handle- Bad attribute"));
4685 case DDI_DMA_NORESOURCES
:
4686 con_log(CL_ANN
, (CE_WARN
,
4687 "Failed ddi_dma_alloc_handle- No Resources"));
4690 con_log(CL_ANN
, (CE_WARN
,
4691 "Failed ddi_dma_alloc_handle: "
4692 "unknown status %d", i
));
4699 if ((ddi_dma_mem_alloc(obj
->dma_handle
, obj
->size
, &tmp_endian_attr
,
4700 DDI_DMA_RDWR
| DDI_DMA_STREAMING
, DDI_DMA_SLEEP
, NULL
,
4701 &obj
->buffer
, &alen
, &obj
->acc_handle
) != DDI_SUCCESS
) ||
4704 ddi_dma_free_handle(&obj
->dma_handle
);
4706 con_log(CL_ANN
, (CE_WARN
, "Failed : ddi_dma_mem_alloc"));
4711 if (ddi_dma_addr_bind_handle(obj
->dma_handle
, NULL
, obj
->buffer
,
4712 obj
->size
, DDI_DMA_RDWR
| DDI_DMA_STREAMING
, DDI_DMA_SLEEP
,
4713 NULL
, &obj
->dma_cookie
[0], &cookie_cnt
) != DDI_SUCCESS
) {
4715 ddi_dma_mem_free(&obj
->acc_handle
);
4716 ddi_dma_free_handle(&obj
->dma_handle
);
4718 con_log(CL_ANN
, (CE_WARN
, "Failed : ddi_dma_addr_bind_handle"));
4723 if (mrsas_check_dma_handle(obj
->dma_handle
) != DDI_SUCCESS
) {
4724 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
4728 if (mrsas_check_acc_handle(obj
->acc_handle
) != DDI_SUCCESS
) {
4729 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
4733 return (cookie_cnt
);
4737 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4739 * De-allocate the memory and other resources for an dma object, which must
4740 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4743 mrsas_free_dma_obj(struct mrsas_instance
*instance
, dma_obj_t obj
)
4746 if ((obj
.dma_handle
== NULL
) || (obj
.acc_handle
== NULL
)) {
4747 return (DDI_SUCCESS
);
4751 * NOTE: These check-handle functions fail if *_handle == NULL, but
4752 * this function succeeds because of the previous check.
4754 if (mrsas_check_dma_handle(obj
.dma_handle
) != DDI_SUCCESS
) {
4755 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
4756 return (DDI_FAILURE
);
4759 if (mrsas_check_acc_handle(obj
.acc_handle
) != DDI_SUCCESS
) {
4760 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
4761 return (DDI_FAILURE
);
4764 (void) ddi_dma_unbind_handle(obj
.dma_handle
);
4765 ddi_dma_mem_free(&obj
.acc_handle
);
4766 ddi_dma_free_handle(&obj
.dma_handle
);
4767 obj
.acc_handle
= NULL
;
4768 return (DDI_SUCCESS
);
4772 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4775 * Allocate dma resources for a new scsi command
4778 mrsas_dma_alloc(struct mrsas_instance
*instance
, struct scsi_pkt
*pkt
,
4779 struct buf
*bp
, int flags
, int (*callback
)())
4785 ddi_dma_attr_t tmp_dma_attr
= mrsas_generic_dma_attr
;
4786 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
4790 if (bp
->b_flags
& B_READ
) {
4791 acmd
->cmd_flags
&= ~CFLAG_DMASEND
;
4792 dma_flags
= DDI_DMA_READ
;
4794 acmd
->cmd_flags
|= CFLAG_DMASEND
;
4795 dma_flags
= DDI_DMA_WRITE
;
4798 if (flags
& PKT_CONSISTENT
) {
4799 acmd
->cmd_flags
|= CFLAG_CONSISTENT
;
4800 dma_flags
|= DDI_DMA_CONSISTENT
;
4803 if (flags
& PKT_DMA_PARTIAL
) {
4804 dma_flags
|= DDI_DMA_PARTIAL
;
4807 dma_flags
|= DDI_DMA_REDZONE
;
4809 cb
= (callback
== NULL_FUNC
) ? DDI_DMA_DONTWAIT
: DDI_DMA_SLEEP
;
4811 tmp_dma_attr
.dma_attr_sgllen
= instance
->max_num_sge
;
4812 tmp_dma_attr
.dma_attr_addr_hi
= 0xffffffffffffffffull
;
4813 if (instance
->tbolt
) {
4815 tmp_dma_attr
.dma_attr_count_max
=
4816 (U64
)mrsas_tbolt_max_cap_maxxfer
; /* limit to 256K */
4817 tmp_dma_attr
.dma_attr_maxxfer
=
4818 (U64
)mrsas_tbolt_max_cap_maxxfer
; /* limit to 256K */
4821 if ((i
= ddi_dma_alloc_handle(instance
->dip
, &tmp_dma_attr
,
4822 cb
, 0, &acmd
->cmd_dmahandle
)) != DDI_SUCCESS
) {
4824 case DDI_DMA_BADATTR
:
4825 bioerror(bp
, EFAULT
);
4826 return (DDI_FAILURE
);
4828 case DDI_DMA_NORESOURCES
:
4830 return (DDI_FAILURE
);
4833 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_alloc_handle: "
4834 "impossible result (0x%x)", i
));
4835 bioerror(bp
, EFAULT
);
4836 return (DDI_FAILURE
);
4840 i
= ddi_dma_buf_bind_handle(acmd
->cmd_dmahandle
, bp
, dma_flags
,
4841 cb
, 0, &acmd
->cmd_dmacookies
[0], &acmd
->cmd_ncookies
);
4844 case DDI_DMA_PARTIAL_MAP
:
4845 if ((dma_flags
& DDI_DMA_PARTIAL
) == 0) {
4846 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_buf_bind_handle: "
4847 "DDI_DMA_PARTIAL_MAP impossible"));
4848 goto no_dma_cookies
;
4851 if (ddi_dma_numwin(acmd
->cmd_dmahandle
, &acmd
->cmd_nwin
) ==
4853 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_numwin failed"));
4854 goto no_dma_cookies
;
4857 if (ddi_dma_getwin(acmd
->cmd_dmahandle
, acmd
->cmd_curwin
,
4858 &acmd
->cmd_dma_offset
, &acmd
->cmd_dma_len
,
4859 &acmd
->cmd_dmacookies
[0], &acmd
->cmd_ncookies
) ==
4862 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_getwin failed"));
4863 goto no_dma_cookies
;
4866 goto get_dma_cookies
;
4867 case DDI_DMA_MAPPED
:
4869 acmd
->cmd_dma_len
= 0;
4870 acmd
->cmd_dma_offset
= 0;
4874 acmd
->cmd_dmacount
= 0;
4876 acmd
->cmd_dmacount
+=
4877 acmd
->cmd_dmacookies
[i
++].dmac_size
;
4879 if (i
== instance
->max_num_sge
||
4880 i
== acmd
->cmd_ncookies
)
4883 ddi_dma_nextcookie(acmd
->cmd_dmahandle
,
4884 &acmd
->cmd_dmacookies
[i
]);
4887 acmd
->cmd_cookie
= i
;
4888 acmd
->cmd_cookiecnt
= i
;
4890 acmd
->cmd_flags
|= CFLAG_DMAVALID
;
4892 if (bp
->b_bcount
>= acmd
->cmd_dmacount
) {
4893 pkt
->pkt_resid
= bp
->b_bcount
- acmd
->cmd_dmacount
;
4898 return (DDI_SUCCESS
);
4899 case DDI_DMA_NORESOURCES
:
4902 case DDI_DMA_NOMAPPING
:
4903 bioerror(bp
, EFAULT
);
4905 case DDI_DMA_TOOBIG
:
4906 bioerror(bp
, EINVAL
);
4909 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_buf_bind_handle:"
4910 " DDI_DMA_INUSE impossible"));
4913 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_buf_bind_handle: "
4914 "impossible result (0x%x)", i
));
4919 ddi_dma_free_handle(&acmd
->cmd_dmahandle
);
4920 acmd
->cmd_dmahandle
= NULL
;
4921 acmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
4922 return (DDI_FAILURE
);
4926 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4928 * move dma resources to next dma window
4932 mrsas_dma_move(struct mrsas_instance
*instance
, struct scsi_pkt
*pkt
,
4937 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
4940 * If there are no more cookies remaining in this window,
4941 * must move to the next window first.
4943 if (acmd
->cmd_cookie
== acmd
->cmd_ncookies
) {
4944 if (acmd
->cmd_curwin
== acmd
->cmd_nwin
&& acmd
->cmd_nwin
== 1) {
4945 return (DDI_SUCCESS
);
4948 /* at last window, cannot move */
4949 if (++acmd
->cmd_curwin
>= acmd
->cmd_nwin
) {
4950 return (DDI_FAILURE
);
4953 if (ddi_dma_getwin(acmd
->cmd_dmahandle
, acmd
->cmd_curwin
,
4954 &acmd
->cmd_dma_offset
, &acmd
->cmd_dma_len
,
4955 &acmd
->cmd_dmacookies
[0], &acmd
->cmd_ncookies
) ==
4957 return (DDI_FAILURE
);
4960 acmd
->cmd_cookie
= 0;
4962 /* still more cookies in this window - get the next one */
4963 ddi_dma_nextcookie(acmd
->cmd_dmahandle
,
4964 &acmd
->cmd_dmacookies
[0]);
4967 /* get remaining cookies in this window, up to our maximum */
4969 acmd
->cmd_dmacount
+= acmd
->cmd_dmacookies
[i
++].dmac_size
;
4972 if (i
== instance
->max_num_sge
||
4973 acmd
->cmd_cookie
== acmd
->cmd_ncookies
) {
4977 ddi_dma_nextcookie(acmd
->cmd_dmahandle
,
4978 &acmd
->cmd_dmacookies
[i
]);
4981 acmd
->cmd_cookiecnt
= i
;
4983 if (bp
->b_bcount
>= acmd
->cmd_dmacount
) {
4984 pkt
->pkt_resid
= bp
->b_bcount
- acmd
->cmd_dmacount
;
4989 return (DDI_SUCCESS
);
4995 static struct mrsas_cmd
*
4996 build_cmd(struct mrsas_instance
*instance
, struct scsi_address
*ap
,
4997 struct scsi_pkt
*pkt
, uchar_t
*cmd_done
)
5002 uint32_t tmp_data_xfer_len
;
5003 ddi_acc_handle_t acc_handle
;
5004 struct mrsas_cmd
*cmd
;
5005 struct mrsas_sge64
*mfi_sgl
;
5006 struct mrsas_sge_ieee
*mfi_sgl_ieee
;
5007 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
5008 struct mrsas_pthru_frame
*pthru
;
5009 struct mrsas_io_frame
*ldio
;
5011 /* find out if this is logical or physical drive command. */
5012 acmd
->islogical
= MRDRV_IS_LOGICAL(ap
);
5013 acmd
->device_id
= MAP_DEVICE_ID(instance
, ap
);
5016 /* get the command packet */
5017 if (!(cmd
= mrsas_get_mfi_pkt(instance
))) {
5018 DTRACE_PROBE2(build_cmd_mfi_err
, uint16_t,
5019 instance
->fw_outstanding
, uint16_t, instance
->max_fw_cmds
);
5023 acc_handle
= cmd
->frame_dma_obj
.acc_handle
;
5025 /* Clear the frame buffer and assign back the context id */
5026 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
5027 ddi_put32(acc_handle
, &cmd
->frame
->hdr
.context
, cmd
->index
);
5031 DTRACE_PROBE3(build_cmds
, uint8_t, pkt
->pkt_cdbp
[0],
5032 ulong_t
, acmd
->cmd_dmacount
, ulong_t
, acmd
->cmd_dma_len
);
5034 /* lets get the command directions */
5035 if (acmd
->cmd_flags
& CFLAG_DMASEND
) {
5036 flags
= MFI_FRAME_DIR_WRITE
;
5038 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
5039 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
5040 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
5041 DDI_DMA_SYNC_FORDEV
);
5043 } else if (acmd
->cmd_flags
& ~CFLAG_DMASEND
) {
5044 flags
= MFI_FRAME_DIR_READ
;
5046 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
5047 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
5048 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
5049 DDI_DMA_SYNC_FORCPU
);
5052 flags
= MFI_FRAME_DIR_NONE
;
5055 if (instance
->flag_ieee
) {
5056 flags
|= MFI_FRAME_IEEE
;
5058 flags
|= MFI_FRAME_SGL64
;
5060 switch (pkt
->pkt_cdbp
[0]) {
5063 * case SCMD_SYNCHRONIZE_CACHE:
5064 * flush_cache(instance);
5065 * mrsas_return_mfi_pkt(instance, cmd);
5079 if (acmd
->islogical
) {
5080 ldio
= (struct mrsas_io_frame
*)cmd
->frame
;
5083 * preare the Logical IO frame:
5084 * 2nd bit is zero for all read cmds
5086 ddi_put8(acc_handle
, &ldio
->cmd
,
5087 (pkt
->pkt_cdbp
[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5088 : MFI_CMD_OP_LD_READ
);
5089 ddi_put8(acc_handle
, &ldio
->cmd_status
, 0x0);
5090 ddi_put8(acc_handle
, &ldio
->scsi_status
, 0x0);
5091 ddi_put8(acc_handle
, &ldio
->target_id
, acmd
->device_id
);
5092 ddi_put16(acc_handle
, &ldio
->timeout
, 0);
5093 ddi_put8(acc_handle
, &ldio
->reserved_0
, 0);
5094 ddi_put16(acc_handle
, &ldio
->pad_0
, 0);
5095 ddi_put16(acc_handle
, &ldio
->flags
, flags
);
5097 /* Initialize sense Information */
5098 bzero(cmd
->sense
, SENSE_LENGTH
);
5099 ddi_put8(acc_handle
, &ldio
->sense_len
, SENSE_LENGTH
);
5100 ddi_put32(acc_handle
, &ldio
->sense_buf_phys_addr_hi
, 0);
5101 ddi_put32(acc_handle
, &ldio
->sense_buf_phys_addr_lo
,
5102 cmd
->sense_phys_addr
);
5103 ddi_put32(acc_handle
, &ldio
->start_lba_hi
, 0);
5104 ddi_put8(acc_handle
, &ldio
->access_byte
,
5105 (acmd
->cmd_cdblen
!= 6) ? pkt
->pkt_cdbp
[1] : 0);
5106 ddi_put8(acc_handle
, &ldio
->sge_count
,
5107 acmd
->cmd_cookiecnt
);
5108 if (instance
->flag_ieee
) {
5110 (struct mrsas_sge_ieee
*)&ldio
->sgl
;
5112 mfi_sgl
= (struct mrsas_sge64
*)&ldio
->sgl
;
5115 (void) ddi_get32(acc_handle
, &ldio
->context
);
5117 if (acmd
->cmd_cdblen
== CDB_GROUP0
) {
5119 ddi_put32(acc_handle
, &ldio
->lba_count
, (
5120 (uint16_t)(pkt
->pkt_cdbp
[4])));
5122 ddi_put32(acc_handle
, &ldio
->start_lba_lo
, (
5123 ((uint32_t)(pkt
->pkt_cdbp
[3])) |
5124 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 8) |
5125 ((uint32_t)((pkt
->pkt_cdbp
[1]) & 0x1F)
5127 } else if (acmd
->cmd_cdblen
== CDB_GROUP1
) {
5129 ddi_put32(acc_handle
, &ldio
->lba_count
, (
5130 ((uint16_t)(pkt
->pkt_cdbp
[8])) |
5131 ((uint16_t)(pkt
->pkt_cdbp
[7]) << 8)));
5133 ddi_put32(acc_handle
, &ldio
->start_lba_lo
, (
5134 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
5135 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
5136 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
5137 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24)));
5138 } else if (acmd
->cmd_cdblen
== CDB_GROUP5
) {
5140 ddi_put32(acc_handle
, &ldio
->lba_count
, (
5141 ((uint32_t)(pkt
->pkt_cdbp
[9])) |
5142 ((uint32_t)(pkt
->pkt_cdbp
[8]) << 8) |
5143 ((uint32_t)(pkt
->pkt_cdbp
[7]) << 16) |
5144 ((uint32_t)(pkt
->pkt_cdbp
[6]) << 24)));
5146 ddi_put32(acc_handle
, &ldio
->start_lba_lo
, (
5147 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
5148 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
5149 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
5150 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24)));
5151 } else if (acmd
->cmd_cdblen
== CDB_GROUP4
) {
5153 ddi_put32(acc_handle
, &ldio
->lba_count
, (
5154 ((uint32_t)(pkt
->pkt_cdbp
[13])) |
5155 ((uint32_t)(pkt
->pkt_cdbp
[12]) << 8) |
5156 ((uint32_t)(pkt
->pkt_cdbp
[11]) << 16) |
5157 ((uint32_t)(pkt
->pkt_cdbp
[10]) << 24)));
5159 ddi_put32(acc_handle
, &ldio
->start_lba_lo
, (
5160 ((uint32_t)(pkt
->pkt_cdbp
[9])) |
5161 ((uint32_t)(pkt
->pkt_cdbp
[8]) << 8) |
5162 ((uint32_t)(pkt
->pkt_cdbp
[7]) << 16) |
5163 ((uint32_t)(pkt
->pkt_cdbp
[6]) << 24)));
5165 ddi_put32(acc_handle
, &ldio
->start_lba_hi
, (
5166 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
5167 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
5168 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
5169 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24)));
5174 /* For all non-rd/wr and physical disk cmds */
5178 switch (pkt
->pkt_cdbp
[0]) {
5179 case SCMD_MODE_SENSE
:
5180 case SCMD_MODE_SENSE_G1
: {
5181 union scsi_cdb
*cdbp
;
5184 cdbp
= (void *)pkt
->pkt_cdbp
;
5185 page_code
= (uint16_t)cdbp
->cdb_un
.sg
.scsi
[0];
5186 switch (page_code
) {
5189 (void) mrsas_mode_sense_build(pkt
);
5190 mrsas_return_mfi_pkt(instance
, cmd
);
5200 pthru
= (struct mrsas_pthru_frame
*)cmd
->frame
;
5202 /* prepare the DCDB frame */
5203 ddi_put8(acc_handle
, &pthru
->cmd
, (acmd
->islogical
) ?
5204 MFI_CMD_OP_LD_SCSI
: MFI_CMD_OP_PD_SCSI
);
5205 ddi_put8(acc_handle
, &pthru
->cmd_status
, 0x0);
5206 ddi_put8(acc_handle
, &pthru
->scsi_status
, 0x0);
5207 ddi_put8(acc_handle
, &pthru
->target_id
, acmd
->device_id
);
5208 ddi_put8(acc_handle
, &pthru
->lun
, 0);
5209 ddi_put8(acc_handle
, &pthru
->cdb_len
, acmd
->cmd_cdblen
);
5210 ddi_put16(acc_handle
, &pthru
->timeout
, 0);
5211 ddi_put16(acc_handle
, &pthru
->flags
, flags
);
5212 tmp_data_xfer_len
= 0;
5213 for (i
= 0; i
< acmd
->cmd_cookiecnt
; i
++) {
5214 tmp_data_xfer_len
+= acmd
->cmd_dmacookies
[i
].dmac_size
;
5216 ddi_put32(acc_handle
, &pthru
->data_xfer_len
,
5218 ddi_put8(acc_handle
, &pthru
->sge_count
, acmd
->cmd_cookiecnt
);
5219 if (instance
->flag_ieee
) {
5220 mfi_sgl_ieee
= (struct mrsas_sge_ieee
*)&pthru
->sgl
;
5222 mfi_sgl
= (struct mrsas_sge64
*)&pthru
->sgl
;
5225 bzero(cmd
->sense
, SENSE_LENGTH
);
5226 ddi_put8(acc_handle
, &pthru
->sense_len
, SENSE_LENGTH
);
5227 ddi_put32(acc_handle
, &pthru
->sense_buf_phys_addr_hi
, 0);
5228 ddi_put32(acc_handle
, &pthru
->sense_buf_phys_addr_lo
,
5229 cmd
->sense_phys_addr
);
5231 (void) ddi_get32(acc_handle
, &pthru
->context
);
5232 ddi_rep_put8(acc_handle
, (uint8_t *)pkt
->pkt_cdbp
,
5233 (uint8_t *)pthru
->cdb
, acmd
->cmd_cdblen
, DDI_DEV_AUTOINCR
);
5238 /* prepare the scatter-gather list for the firmware */
5239 if (instance
->flag_ieee
) {
5240 for (i
= 0; i
< acmd
->cmd_cookiecnt
; i
++, mfi_sgl_ieee
++) {
5241 ddi_put64(acc_handle
, &mfi_sgl_ieee
->phys_addr
,
5242 acmd
->cmd_dmacookies
[i
].dmac_laddress
);
5243 ddi_put32(acc_handle
, &mfi_sgl_ieee
->length
,
5244 acmd
->cmd_dmacookies
[i
].dmac_size
);
5246 sge_bytes
= sizeof (struct mrsas_sge_ieee
)*acmd
->cmd_cookiecnt
;
5248 for (i
= 0; i
< acmd
->cmd_cookiecnt
; i
++, mfi_sgl
++) {
5249 ddi_put64(acc_handle
, &mfi_sgl
->phys_addr
,
5250 acmd
->cmd_dmacookies
[i
].dmac_laddress
);
5251 ddi_put32(acc_handle
, &mfi_sgl
->length
,
5252 acmd
->cmd_dmacookies
[i
].dmac_size
);
5254 sge_bytes
= sizeof (struct mrsas_sge64
)*acmd
->cmd_cookiecnt
;
5257 cmd
->frame_count
= (sge_bytes
/ MRMFI_FRAME_SIZE
) +
5258 ((sge_bytes
% MRMFI_FRAME_SIZE
) ? 1 : 0) + 1;
5260 if (cmd
->frame_count
>= 8) {
5261 cmd
->frame_count
= 8;
5268 * wait_for_outstanding - Wait for all outstanding cmds
5269 * @instance: Adapter soft state
5271 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5272 * complete all its outstanding commands. Returns error if one or more IOs
5273 * are pending after this time period.
5276 wait_for_outstanding(struct mrsas_instance
*instance
)
5279 uint32_t wait_time
= 90;
5281 for (i
= 0; i
< wait_time
; i
++) {
5282 if (!instance
->fw_outstanding
) {
5286 drv_usecwait(MILLISEC
); /* wait for 1000 usecs */;
5289 if (instance
->fw_outstanding
) {
5300 issue_mfi_pthru(struct mrsas_instance
*instance
, struct mrsas_ioctl
*ioctl
,
5301 struct mrsas_cmd
*cmd
, int mode
)
5304 uint32_t kphys_addr
= 0;
5305 uint32_t xferlen
= 0;
5306 uint32_t new_xfer_length
= 0;
5308 ddi_acc_handle_t acc_handle
= cmd
->frame_dma_obj
.acc_handle
;
5309 dma_obj_t pthru_dma_obj
;
5310 struct mrsas_pthru_frame
*kpthru
;
5311 struct mrsas_pthru_frame
*pthru
;
5313 pthru
= &cmd
->frame
->pthru
;
5314 kpthru
= (struct mrsas_pthru_frame
*)&ioctl
->frame
[0];
5316 if (instance
->adapterresetinprogress
) {
5317 con_log(CL_ANN1
, (CE_WARN
, "issue_mfi_pthru: Reset flag set, "
5318 "returning mfi_pkt and setting TRAN_BUSY\n"));
5319 return (DDI_FAILURE
);
5321 model
= ddi_model_convert_from(mode
& FMODELS
);
5322 if (model
== DDI_MODEL_ILP32
) {
5323 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_pthru: DDI_MODEL_LP32"));
5325 xferlen
= kpthru
->sgl
.sge32
[0].length
;
5327 ubuf
= (void *)(ulong_t
)kpthru
->sgl
.sge32
[0].phys_addr
;
5330 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_pthru: DDI_MODEL_LP32"));
5331 xferlen
= kpthru
->sgl
.sge32
[0].length
;
5332 ubuf
= (void *)(ulong_t
)kpthru
->sgl
.sge32
[0].phys_addr
;
5334 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_pthru: DDI_MODEL_LP64"));
5335 xferlen
= kpthru
->sgl
.sge64
[0].length
;
5336 ubuf
= (void *)(ulong_t
)kpthru
->sgl
.sge64
[0].phys_addr
;
5341 /* means IOCTL requires DMA */
5342 /* allocate the data transfer buffer */
5343 /* pthru_dma_obj.size = xferlen; */
5344 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen
, new_xfer_length
,
5346 pthru_dma_obj
.size
= new_xfer_length
;
5347 pthru_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
5348 pthru_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
5349 pthru_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
5350 pthru_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
5351 pthru_dma_obj
.dma_attr
.dma_attr_align
= 1;
5353 /* allocate kernel buffer for DMA */
5354 if (mrsas_alloc_dma_obj(instance
, &pthru_dma_obj
,
5355 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
5356 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_pthru: "
5357 "could not allocate data transfer buffer."));
5358 return (DDI_FAILURE
);
5360 (void) memset(pthru_dma_obj
.buffer
, 0, xferlen
);
5362 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5363 if (kpthru
->flags
& MFI_FRAME_DIR_WRITE
) {
5364 for (i
= 0; i
< xferlen
; i
++) {
5365 if (ddi_copyin((uint8_t *)ubuf
+i
,
5366 (uint8_t *)pthru_dma_obj
.buffer
+i
,
5368 con_log(CL_ANN
, (CE_WARN
,
5369 "issue_mfi_pthru : "
5370 "copy from user space failed"));
5371 return (DDI_FAILURE
);
5376 kphys_addr
= pthru_dma_obj
.dma_cookie
[0].dmac_address
;
5379 ddi_put8(acc_handle
, &pthru
->cmd
, kpthru
->cmd
);
5380 ddi_put8(acc_handle
, &pthru
->sense_len
, SENSE_LENGTH
);
5381 ddi_put8(acc_handle
, &pthru
->cmd_status
, 0);
5382 ddi_put8(acc_handle
, &pthru
->scsi_status
, 0);
5383 ddi_put8(acc_handle
, &pthru
->target_id
, kpthru
->target_id
);
5384 ddi_put8(acc_handle
, &pthru
->lun
, kpthru
->lun
);
5385 ddi_put8(acc_handle
, &pthru
->cdb_len
, kpthru
->cdb_len
);
5386 ddi_put8(acc_handle
, &pthru
->sge_count
, kpthru
->sge_count
);
5387 ddi_put16(acc_handle
, &pthru
->timeout
, kpthru
->timeout
);
5388 ddi_put32(acc_handle
, &pthru
->data_xfer_len
, kpthru
->data_xfer_len
);
5390 ddi_put32(acc_handle
, &pthru
->sense_buf_phys_addr_hi
, 0);
5391 pthru
->sense_buf_phys_addr_lo
= cmd
->sense_phys_addr
;
5392 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5394 ddi_rep_put8(acc_handle
, (uint8_t *)kpthru
->cdb
, (uint8_t *)pthru
->cdb
,
5395 pthru
->cdb_len
, DDI_DEV_AUTOINCR
);
5397 ddi_put16(acc_handle
, &pthru
->flags
, kpthru
->flags
& ~MFI_FRAME_SGL64
);
5398 ddi_put32(acc_handle
, &pthru
->sgl
.sge32
[0].length
, xferlen
);
5399 ddi_put32(acc_handle
, &pthru
->sgl
.sge32
[0].phys_addr
, kphys_addr
);
5401 cmd
->sync_cmd
= MRSAS_TRUE
;
5402 cmd
->frame_count
= 1;
5404 if (instance
->tbolt
) {
5405 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
5408 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
5409 con_log(CL_ANN
, (CE_WARN
,
5410 "issue_mfi_pthru: fw_ioctl failed"));
5412 if (xferlen
&& kpthru
->flags
& MFI_FRAME_DIR_READ
) {
5413 for (i
= 0; i
< xferlen
; i
++) {
5415 (uint8_t *)pthru_dma_obj
.buffer
+i
,
5416 (uint8_t *)ubuf
+i
, 1, mode
)) {
5417 con_log(CL_ANN
, (CE_WARN
,
5418 "issue_mfi_pthru : "
5419 "copy to user space failed"));
5420 return (DDI_FAILURE
);
5426 kpthru
->cmd_status
= ddi_get8(acc_handle
, &pthru
->cmd_status
);
5427 kpthru
->scsi_status
= ddi_get8(acc_handle
, &pthru
->scsi_status
);
5429 con_log(CL_ANN
, (CE_CONT
, "issue_mfi_pthru: cmd_status %x, "
5430 "scsi_status %x", kpthru
->cmd_status
, kpthru
->scsi_status
));
5431 DTRACE_PROBE3(issue_pthru
, uint8_t, kpthru
->cmd
, uint8_t,
5432 kpthru
->cmd_status
, uint8_t, kpthru
->scsi_status
);
5434 if (kpthru
->sense_len
) {
5435 uint_t sense_len
= SENSE_LENGTH
;
5437 (void *)(ulong_t
)kpthru
->sense_buf_phys_addr_lo
;
5438 if (kpthru
->sense_len
<= SENSE_LENGTH
) {
5439 sense_len
= kpthru
->sense_len
;
5442 for (i
= 0; i
< sense_len
; i
++) {
5444 (uint8_t *)cmd
->sense
+i
,
5445 (uint8_t *)sense_ubuf
+i
, 1, mode
)) {
5446 con_log(CL_ANN
, (CE_WARN
,
5447 "issue_mfi_pthru : "
5448 "copy to user space failed"));
5450 con_log(CL_DLEVEL1
, (CE_WARN
,
5451 "Copying Sense info sense_buff[%d] = 0x%X",
5452 i
, *((uint8_t *)cmd
->sense
+ i
)));
5455 (void) ddi_dma_sync(cmd
->frame_dma_obj
.dma_handle
, 0, 0,
5456 DDI_DMA_SYNC_FORDEV
);
5459 /* free kernel buffer */
5460 if (mrsas_free_dma_obj(instance
, pthru_dma_obj
) != DDI_SUCCESS
)
5461 return (DDI_FAILURE
);
5464 return (DDI_SUCCESS
);
5471 issue_mfi_dcmd(struct mrsas_instance
*instance
, struct mrsas_ioctl
*ioctl
,
5472 struct mrsas_cmd
*cmd
, int mode
)
5475 uint32_t kphys_addr
= 0;
5476 uint32_t xferlen
= 0;
5477 uint32_t new_xfer_length
= 0;
5479 dma_obj_t dcmd_dma_obj
;
5480 struct mrsas_dcmd_frame
*kdcmd
;
5481 struct mrsas_dcmd_frame
*dcmd
;
5482 ddi_acc_handle_t acc_handle
= cmd
->frame_dma_obj
.acc_handle
;
5484 dcmd
= &cmd
->frame
->dcmd
;
5485 kdcmd
= (struct mrsas_dcmd_frame
*)&ioctl
->frame
[0];
5487 if (instance
->adapterresetinprogress
) {
5488 con_log(CL_ANN1
, (CE_NOTE
, "Reset flag set, "
5489 "returning mfi_pkt and setting TRAN_BUSY"));
5490 return (DDI_FAILURE
);
5492 model
= ddi_model_convert_from(mode
& FMODELS
);
5493 if (model
== DDI_MODEL_ILP32
) {
5494 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5496 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
5498 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
5501 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5502 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
5503 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
5505 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5506 xferlen
= kdcmd
->sgl
.sge64
[0].length
;
5507 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge64
[0].phys_addr
;
5511 /* means IOCTL requires DMA */
5512 /* allocate the data transfer buffer */
5513 /* dcmd_dma_obj.size = xferlen; */
5514 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen
, new_xfer_length
,
5516 dcmd_dma_obj
.size
= new_xfer_length
;
5517 dcmd_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
5518 dcmd_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
5519 dcmd_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
5520 dcmd_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
5521 dcmd_dma_obj
.dma_attr
.dma_attr_align
= 1;
5523 /* allocate kernel buffer for DMA */
5524 if (mrsas_alloc_dma_obj(instance
, &dcmd_dma_obj
,
5525 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
5527 (CE_WARN
, "issue_mfi_dcmd: could not "
5528 "allocate data transfer buffer."));
5529 return (DDI_FAILURE
);
5531 (void) memset(dcmd_dma_obj
.buffer
, 0, xferlen
);
5533 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5534 if (kdcmd
->flags
& MFI_FRAME_DIR_WRITE
) {
5535 for (i
= 0; i
< xferlen
; i
++) {
5536 if (ddi_copyin((uint8_t *)ubuf
+ i
,
5537 (uint8_t *)dcmd_dma_obj
.buffer
+ i
,
5539 con_log(CL_ANN
, (CE_WARN
,
5541 "copy from user space failed"));
5542 return (DDI_FAILURE
);
5547 kphys_addr
= dcmd_dma_obj
.dma_cookie
[0].dmac_address
;
5550 ddi_put8(acc_handle
, &dcmd
->cmd
, kdcmd
->cmd
);
5551 ddi_put8(acc_handle
, &dcmd
->cmd_status
, 0);
5552 ddi_put8(acc_handle
, &dcmd
->sge_count
, kdcmd
->sge_count
);
5553 ddi_put16(acc_handle
, &dcmd
->timeout
, kdcmd
->timeout
);
5554 ddi_put32(acc_handle
, &dcmd
->data_xfer_len
, kdcmd
->data_xfer_len
);
5555 ddi_put32(acc_handle
, &dcmd
->opcode
, kdcmd
->opcode
);
5557 ddi_rep_put8(acc_handle
, (uint8_t *)kdcmd
->mbox
.b
,
5558 (uint8_t *)dcmd
->mbox
.b
, DCMD_MBOX_SZ
, DDI_DEV_AUTOINCR
);
5560 ddi_put16(acc_handle
, &dcmd
->flags
, kdcmd
->flags
& ~MFI_FRAME_SGL64
);
5561 ddi_put32(acc_handle
, &dcmd
->sgl
.sge32
[0].length
, xferlen
);
5562 ddi_put32(acc_handle
, &dcmd
->sgl
.sge32
[0].phys_addr
, kphys_addr
);
5564 cmd
->sync_cmd
= MRSAS_TRUE
;
5565 cmd
->frame_count
= 1;
5567 if (instance
->tbolt
) {
5568 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
5571 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
5572 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_dcmd: fw_ioctl failed"));
5574 if (xferlen
&& (kdcmd
->flags
& MFI_FRAME_DIR_READ
)) {
5575 for (i
= 0; i
< xferlen
; i
++) {
5577 (uint8_t *)dcmd_dma_obj
.buffer
+ i
,
5578 (uint8_t *)ubuf
+ i
,
5580 con_log(CL_ANN
, (CE_WARN
,
5582 "copy to user space failed"));
5583 return (DDI_FAILURE
);
5589 kdcmd
->cmd_status
= ddi_get8(acc_handle
, &dcmd
->cmd_status
);
5591 (CE_CONT
, "issue_mfi_dcmd: cmd_status %x", kdcmd
->cmd_status
));
5592 DTRACE_PROBE3(issue_dcmd
, uint32_t, kdcmd
->opcode
, uint8_t,
5593 kdcmd
->cmd
, uint8_t, kdcmd
->cmd_status
);
5596 /* free kernel buffer */
5597 if (mrsas_free_dma_obj(instance
, dcmd_dma_obj
) != DDI_SUCCESS
)
5598 return (DDI_FAILURE
);
5601 return (DDI_SUCCESS
);
5608 issue_mfi_smp(struct mrsas_instance
*instance
, struct mrsas_ioctl
*ioctl
,
5609 struct mrsas_cmd
*cmd
, int mode
)
5612 void *response_ubuf
;
5613 uint32_t request_xferlen
= 0;
5614 uint32_t response_xferlen
= 0;
5615 uint32_t new_xfer_length1
= 0;
5616 uint32_t new_xfer_length2
= 0;
5618 dma_obj_t request_dma_obj
;
5619 dma_obj_t response_dma_obj
;
5620 ddi_acc_handle_t acc_handle
= cmd
->frame_dma_obj
.acc_handle
;
5621 struct mrsas_smp_frame
*ksmp
;
5622 struct mrsas_smp_frame
*smp
;
5623 struct mrsas_sge32
*sge32
;
5625 struct mrsas_sge64
*sge64
;
5628 uint64_t tmp_sas_addr
;
5630 smp
= &cmd
->frame
->smp
;
5631 ksmp
= (struct mrsas_smp_frame
*)&ioctl
->frame
[0];
5633 if (instance
->adapterresetinprogress
) {
5634 con_log(CL_ANN1
, (CE_WARN
, "Reset flag set, "
5635 "returning mfi_pkt and setting TRAN_BUSY\n"));
5636 return (DDI_FAILURE
);
5638 model
= ddi_model_convert_from(mode
& FMODELS
);
5639 if (model
== DDI_MODEL_ILP32
) {
5640 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_smp: DDI_MODEL_ILP32"));
5642 sge32
= &ksmp
->sgl
[0].sge32
[0];
5643 response_xferlen
= sge32
[0].length
;
5644 request_xferlen
= sge32
[1].length
;
5645 con_log(CL_ANN
, (CE_CONT
, "issue_mfi_smp: "
5646 "response_xferlen = %x, request_xferlen = %x",
5647 response_xferlen
, request_xferlen
));
5649 response_ubuf
= (void *)(ulong_t
)sge32
[0].phys_addr
;
5650 request_ubuf
= (void *)(ulong_t
)sge32
[1].phys_addr
;
5651 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_smp: "
5652 "response_ubuf = %p, request_ubuf = %p",
5653 response_ubuf
, request_ubuf
));
5656 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_smp: DDI_MODEL_ILP32"));
5658 sge32
= &ksmp
->sgl
[0].sge32
[0];
5659 response_xferlen
= sge32
[0].length
;
5660 request_xferlen
= sge32
[1].length
;
5661 con_log(CL_ANN
, (CE_CONT
, "issue_mfi_smp: "
5662 "response_xferlen = %x, request_xferlen = %x",
5663 response_xferlen
, request_xferlen
));
5665 response_ubuf
= (void *)(ulong_t
)sge32
[0].phys_addr
;
5666 request_ubuf
= (void *)(ulong_t
)sge32
[1].phys_addr
;
5667 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_smp: "
5668 "response_ubuf = %p, request_ubuf = %p",
5669 response_ubuf
, request_ubuf
));
5671 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_smp: DDI_MODEL_LP64"));
5673 sge64
= &ksmp
->sgl
[0].sge64
[0];
5674 response_xferlen
= sge64
[0].length
;
5675 request_xferlen
= sge64
[1].length
;
5677 response_ubuf
= (void *)(ulong_t
)sge64
[0].phys_addr
;
5678 request_ubuf
= (void *)(ulong_t
)sge64
[1].phys_addr
;
5681 if (request_xferlen
) {
5682 /* means IOCTL requires DMA */
5683 /* allocate the data transfer buffer */
5684 /* request_dma_obj.size = request_xferlen; */
5685 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen
,
5686 new_xfer_length1
, PAGESIZE
);
5687 request_dma_obj
.size
= new_xfer_length1
;
5688 request_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
5689 request_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
5690 request_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
5691 request_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
5692 request_dma_obj
.dma_attr
.dma_attr_align
= 1;
5694 /* allocate kernel buffer for DMA */
5695 if (mrsas_alloc_dma_obj(instance
, &request_dma_obj
,
5696 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
5697 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
5698 "could not allocate data transfer buffer."));
5699 return (DDI_FAILURE
);
5701 (void) memset(request_dma_obj
.buffer
, 0, request_xferlen
);
5703 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5704 for (i
= 0; i
< request_xferlen
; i
++) {
5705 if (ddi_copyin((uint8_t *)request_ubuf
+ i
,
5706 (uint8_t *)request_dma_obj
.buffer
+ i
,
5708 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
5709 "copy from user space failed"));
5710 return (DDI_FAILURE
);
5715 if (response_xferlen
) {
5716 /* means IOCTL requires DMA */
5717 /* allocate the data transfer buffer */
5718 /* response_dma_obj.size = response_xferlen; */
5719 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen
,
5720 new_xfer_length2
, PAGESIZE
);
5721 response_dma_obj
.size
= new_xfer_length2
;
5722 response_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
5723 response_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
5724 response_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
5725 response_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
5726 response_dma_obj
.dma_attr
.dma_attr_align
= 1;
5728 /* allocate kernel buffer for DMA */
5729 if (mrsas_alloc_dma_obj(instance
, &response_dma_obj
,
5730 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
5731 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
5732 "could not allocate data transfer buffer."));
5733 return (DDI_FAILURE
);
5735 (void) memset(response_dma_obj
.buffer
, 0, response_xferlen
);
5737 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5738 for (i
= 0; i
< response_xferlen
; i
++) {
5739 if (ddi_copyin((uint8_t *)response_ubuf
+ i
,
5740 (uint8_t *)response_dma_obj
.buffer
+ i
,
5742 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
5743 "copy from user space failed"));
5744 return (DDI_FAILURE
);
5749 ddi_put8(acc_handle
, &smp
->cmd
, ksmp
->cmd
);
5750 ddi_put8(acc_handle
, &smp
->cmd_status
, 0);
5751 ddi_put8(acc_handle
, &smp
->connection_status
, 0);
5752 ddi_put8(acc_handle
, &smp
->sge_count
, ksmp
->sge_count
);
5753 /* smp->context = ksmp->context; */
5754 ddi_put16(acc_handle
, &smp
->timeout
, ksmp
->timeout
);
5755 ddi_put32(acc_handle
, &smp
->data_xfer_len
, ksmp
->data_xfer_len
);
5757 bcopy((void *)&ksmp
->sas_addr
, (void *)&tmp_sas_addr
,
5759 ddi_put64(acc_handle
, &smp
->sas_addr
, tmp_sas_addr
);
5761 ddi_put16(acc_handle
, &smp
->flags
, ksmp
->flags
& ~MFI_FRAME_SGL64
);
5763 model
= ddi_model_convert_from(mode
& FMODELS
);
5764 if (model
== DDI_MODEL_ILP32
) {
5765 con_log(CL_ANN1
, (CE_CONT
,
5766 "issue_mfi_smp: DDI_MODEL_ILP32"));
5768 sge32
= &smp
->sgl
[0].sge32
[0];
5769 ddi_put32(acc_handle
, &sge32
[0].length
, response_xferlen
);
5770 ddi_put32(acc_handle
, &sge32
[0].phys_addr
,
5771 response_dma_obj
.dma_cookie
[0].dmac_address
);
5772 ddi_put32(acc_handle
, &sge32
[1].length
, request_xferlen
);
5773 ddi_put32(acc_handle
, &sge32
[1].phys_addr
,
5774 request_dma_obj
.dma_cookie
[0].dmac_address
);
5777 con_log(CL_ANN1
, (CE_CONT
,
5778 "issue_mfi_smp: DDI_MODEL_ILP32"));
5779 sge32
= &smp
->sgl
[0].sge32
[0];
5780 ddi_put32(acc_handle
, &sge32
[0].length
, response_xferlen
);
5781 ddi_put32(acc_handle
, &sge32
[0].phys_addr
,
5782 response_dma_obj
.dma_cookie
[0].dmac_address
);
5783 ddi_put32(acc_handle
, &sge32
[1].length
, request_xferlen
);
5784 ddi_put32(acc_handle
, &sge32
[1].phys_addr
,
5785 request_dma_obj
.dma_cookie
[0].dmac_address
);
5787 con_log(CL_ANN1
, (CE_CONT
,
5788 "issue_mfi_smp: DDI_MODEL_LP64"));
5789 sge64
= &smp
->sgl
[0].sge64
[0];
5790 ddi_put32(acc_handle
, &sge64
[0].length
, response_xferlen
);
5791 ddi_put64(acc_handle
, &sge64
[0].phys_addr
,
5792 response_dma_obj
.dma_cookie
[0].dmac_address
);
5793 ddi_put32(acc_handle
, &sge64
[1].length
, request_xferlen
);
5794 ddi_put64(acc_handle
, &sge64
[1].phys_addr
,
5795 request_dma_obj
.dma_cookie
[0].dmac_address
);
5798 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_smp : "
5799 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5800 "smp->data_xfer_len = %d", ddi_get32(acc_handle
, &sge32
[0].length
),
5801 ddi_get32(acc_handle
, &sge32
[1].length
),
5802 ddi_get32(acc_handle
, &smp
->data_xfer_len
)));
5804 cmd
->sync_cmd
= MRSAS_TRUE
;
5805 cmd
->frame_count
= 1;
5807 if (instance
->tbolt
) {
5808 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
5811 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
5812 con_log(CL_ANN
, (CE_WARN
,
5813 "issue_mfi_smp: fw_ioctl failed"));
5815 con_log(CL_ANN1
, (CE_CONT
,
5816 "issue_mfi_smp: copy to user space"));
5818 if (request_xferlen
) {
5819 for (i
= 0; i
< request_xferlen
; i
++) {
5821 (uint8_t *)request_dma_obj
.buffer
+
5822 i
, (uint8_t *)request_ubuf
+ i
,
5824 con_log(CL_ANN
, (CE_WARN
,
5825 "issue_mfi_smp : copy to user space"
5827 return (DDI_FAILURE
);
5832 if (response_xferlen
) {
5833 for (i
= 0; i
< response_xferlen
; i
++) {
5835 (uint8_t *)response_dma_obj
.buffer
5836 + i
, (uint8_t *)response_ubuf
5838 con_log(CL_ANN
, (CE_WARN
,
5839 "issue_mfi_smp : copy to "
5840 "user space failed"));
5841 return (DDI_FAILURE
);
5847 ksmp
->cmd_status
= ddi_get8(acc_handle
, &smp
->cmd_status
);
5848 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: smp->cmd_status = %d",
5850 DTRACE_PROBE2(issue_smp
, uint8_t, ksmp
->cmd
, uint8_t, ksmp
->cmd_status
);
5852 if (request_xferlen
) {
5853 /* free kernel buffer */
5854 if (mrsas_free_dma_obj(instance
, request_dma_obj
) !=
5856 return (DDI_FAILURE
);
5859 if (response_xferlen
) {
5860 /* free kernel buffer */
5861 if (mrsas_free_dma_obj(instance
, response_dma_obj
) !=
5863 return (DDI_FAILURE
);
5866 return (DDI_SUCCESS
);
5873 issue_mfi_stp(struct mrsas_instance
*instance
, struct mrsas_ioctl
*ioctl
,
5874 struct mrsas_cmd
*cmd
, int mode
)
5878 uint32_t fis_xferlen
= 0;
5879 uint32_t new_xfer_length1
= 0;
5880 uint32_t new_xfer_length2
= 0;
5881 uint32_t data_xferlen
= 0;
5883 dma_obj_t fis_dma_obj
;
5884 dma_obj_t data_dma_obj
;
5885 struct mrsas_stp_frame
*kstp
;
5886 struct mrsas_stp_frame
*stp
;
5887 ddi_acc_handle_t acc_handle
= cmd
->frame_dma_obj
.acc_handle
;
5890 stp
= &cmd
->frame
->stp
;
5891 kstp
= (struct mrsas_stp_frame
*)&ioctl
->frame
[0];
5893 if (instance
->adapterresetinprogress
) {
5894 con_log(CL_ANN1
, (CE_WARN
, "Reset flag set, "
5895 "returning mfi_pkt and setting TRAN_BUSY\n"));
5896 return (DDI_FAILURE
);
5898 model
= ddi_model_convert_from(mode
& FMODELS
);
5899 if (model
== DDI_MODEL_ILP32
) {
5900 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_stp: DDI_MODEL_ILP32"));
5902 fis_xferlen
= kstp
->sgl
.sge32
[0].length
;
5903 data_xferlen
= kstp
->sgl
.sge32
[1].length
;
5905 fis_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[0].phys_addr
;
5906 data_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[1].phys_addr
;
5909 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_stp: DDI_MODEL_ILP32"));
5911 fis_xferlen
= kstp
->sgl
.sge32
[0].length
;
5912 data_xferlen
= kstp
->sgl
.sge32
[1].length
;
5914 fis_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[0].phys_addr
;
5915 data_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[1].phys_addr
;
5917 con_log(CL_ANN1
, (CE_CONT
, "issue_mfi_stp: DDI_MODEL_LP64"));
5919 fis_xferlen
= kstp
->sgl
.sge64
[0].length
;
5920 data_xferlen
= kstp
->sgl
.sge64
[1].length
;
5922 fis_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge64
[0].phys_addr
;
5923 data_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge64
[1].phys_addr
;
5929 con_log(CL_ANN
, (CE_CONT
, "issue_mfi_stp: "
5930 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf
, fis_xferlen
));
5932 /* means IOCTL requires DMA */
5933 /* allocate the data transfer buffer */
5934 /* fis_dma_obj.size = fis_xferlen; */
5935 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen
,
5936 new_xfer_length1
, PAGESIZE
);
5937 fis_dma_obj
.size
= new_xfer_length1
;
5938 fis_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
5939 fis_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
5940 fis_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
5941 fis_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
5942 fis_dma_obj
.dma_attr
.dma_attr_align
= 1;
5944 /* allocate kernel buffer for DMA */
5945 if (mrsas_alloc_dma_obj(instance
, &fis_dma_obj
,
5946 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
5947 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp : "
5948 "could not allocate data transfer buffer."));
5949 return (DDI_FAILURE
);
5951 (void) memset(fis_dma_obj
.buffer
, 0, fis_xferlen
);
5953 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5954 for (i
= 0; i
< fis_xferlen
; i
++) {
5955 if (ddi_copyin((uint8_t *)fis_ubuf
+ i
,
5956 (uint8_t *)fis_dma_obj
.buffer
+ i
, 1, mode
)) {
5957 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
5958 "copy from user space failed"));
5959 return (DDI_FAILURE
);
5965 con_log(CL_ANN
, (CE_CONT
, "issue_mfi_stp: data_ubuf = %p "
5966 "data_xferlen = %x", data_ubuf
, data_xferlen
));
5968 /* means IOCTL requires DMA */
5969 /* allocate the data transfer buffer */
5970 /* data_dma_obj.size = data_xferlen; */
5971 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen
, new_xfer_length2
,
5973 data_dma_obj
.size
= new_xfer_length2
;
5974 data_dma_obj
.dma_attr
= mrsas_generic_dma_attr
;
5975 data_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
5976 data_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
5977 data_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
5978 data_dma_obj
.dma_attr
.dma_attr_align
= 1;
5980 /* allocate kernel buffer for DMA */
5981 if (mrsas_alloc_dma_obj(instance
, &data_dma_obj
,
5982 (uchar_t
)DDI_STRUCTURE_LE_ACC
) != 1) {
5983 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
5984 "could not allocate data transfer buffer."));
5985 return (DDI_FAILURE
);
5987 (void) memset(data_dma_obj
.buffer
, 0, data_xferlen
);
5989 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5990 for (i
= 0; i
< data_xferlen
; i
++) {
5991 if (ddi_copyin((uint8_t *)data_ubuf
+ i
,
5992 (uint8_t *)data_dma_obj
.buffer
+ i
, 1, mode
)) {
5993 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
5994 "copy from user space failed"));
5995 return (DDI_FAILURE
);
6000 ddi_put8(acc_handle
, &stp
->cmd
, kstp
->cmd
);
6001 ddi_put8(acc_handle
, &stp
->cmd_status
, 0);
6002 ddi_put8(acc_handle
, &stp
->connection_status
, 0);
6003 ddi_put8(acc_handle
, &stp
->target_id
, kstp
->target_id
);
6004 ddi_put8(acc_handle
, &stp
->sge_count
, kstp
->sge_count
);
6006 ddi_put16(acc_handle
, &stp
->timeout
, kstp
->timeout
);
6007 ddi_put32(acc_handle
, &stp
->data_xfer_len
, kstp
->data_xfer_len
);
6009 ddi_rep_put8(acc_handle
, (uint8_t *)kstp
->fis
, (uint8_t *)stp
->fis
, 10,
6012 ddi_put16(acc_handle
, &stp
->flags
, kstp
->flags
& ~MFI_FRAME_SGL64
);
6013 ddi_put32(acc_handle
, &stp
->stp_flags
, kstp
->stp_flags
);
6014 ddi_put32(acc_handle
, &stp
->sgl
.sge32
[0].length
, fis_xferlen
);
6015 ddi_put32(acc_handle
, &stp
->sgl
.sge32
[0].phys_addr
,
6016 fis_dma_obj
.dma_cookie
[0].dmac_address
);
6017 ddi_put32(acc_handle
, &stp
->sgl
.sge32
[1].length
, data_xferlen
);
6018 ddi_put32(acc_handle
, &stp
->sgl
.sge32
[1].phys_addr
,
6019 data_dma_obj
.dma_cookie
[0].dmac_address
);
6021 cmd
->sync_cmd
= MRSAS_TRUE
;
6022 cmd
->frame_count
= 1;
6024 if (instance
->tbolt
) {
6025 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
6028 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
6029 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: fw_ioctl failed"));
6033 for (i
= 0; i
< fis_xferlen
; i
++) {
6035 (uint8_t *)fis_dma_obj
.buffer
+ i
,
6036 (uint8_t *)fis_ubuf
+ i
, 1, mode
)) {
6037 con_log(CL_ANN
, (CE_WARN
,
6038 "issue_mfi_stp : copy to "
6039 "user space failed"));
6040 return (DDI_FAILURE
);
6046 for (i
= 0; i
< data_xferlen
; i
++) {
6048 (uint8_t *)data_dma_obj
.buffer
+ i
,
6049 (uint8_t *)data_ubuf
+ i
, 1, mode
)) {
6050 con_log(CL_ANN
, (CE_WARN
,
6051 "issue_mfi_stp : copy to"
6052 " user space failed"));
6053 return (DDI_FAILURE
);
6058 kstp
->cmd_status
= ddi_get8(acc_handle
, &stp
->cmd_status
);
6059 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_stp: stp->cmd_status = %d",
6061 DTRACE_PROBE2(issue_stp
, uint8_t, kstp
->cmd
, uint8_t, kstp
->cmd_status
);
6064 /* free kernel buffer */
6065 if (mrsas_free_dma_obj(instance
, fis_dma_obj
) != DDI_SUCCESS
)
6066 return (DDI_FAILURE
);
6070 /* free kernel buffer */
6071 if (mrsas_free_dma_obj(instance
, data_dma_obj
) != DDI_SUCCESS
)
6072 return (DDI_FAILURE
);
6075 return (DDI_SUCCESS
);
6082 fill_up_drv_ver(struct mrsas_drv_ver
*dv
)
6084 (void) memset(dv
, 0, sizeof (struct mrsas_drv_ver
));
6086 (void) memcpy(dv
->signature
, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6087 (void) memcpy(dv
->os_name
, "Solaris", strlen("Solaris"));
6088 (void) memcpy(dv
->drv_name
, "mr_sas", strlen("mr_sas"));
6089 (void) memcpy(dv
->drv_ver
, MRSAS_VERSION
, strlen(MRSAS_VERSION
));
6090 (void) memcpy(dv
->drv_rel_date
, MRSAS_RELDATE
,
6091 strlen(MRSAS_RELDATE
));
6099 handle_drv_ioctl(struct mrsas_instance
*instance
, struct mrsas_ioctl
*ioctl
,
6103 int rval
= DDI_SUCCESS
;
6107 uint8_t *pci_conf_buf
;
6111 struct mrsas_dcmd_frame
*kdcmd
;
6112 struct mrsas_drv_ver dv
;
6113 struct mrsas_pci_information pi
;
6115 kdcmd
= (struct mrsas_dcmd_frame
*)&ioctl
->frame
[0];
6117 model
= ddi_model_convert_from(mode
& FMODELS
);
6118 if (model
== DDI_MODEL_ILP32
) {
6119 con_log(CL_ANN1
, (CE_CONT
,
6120 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6122 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
6124 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
6127 con_log(CL_ANN1
, (CE_CONT
,
6128 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6129 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
6130 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
6132 con_log(CL_ANN1
, (CE_CONT
,
6133 "handle_drv_ioctl: DDI_MODEL_LP64"));
6134 xferlen
= kdcmd
->sgl
.sge64
[0].length
;
6135 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge64
[0].phys_addr
;
6138 con_log(CL_ANN1
, (CE_CONT
, "handle_drv_ioctl: "
6139 "dataBuf=%p size=%d bytes", ubuf
, xferlen
));
6141 switch (kdcmd
->opcode
) {
6142 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION
:
6143 con_log(CL_ANN1
, (CE_CONT
, "handle_drv_ioctl: "
6144 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6146 fill_up_drv_ver(&dv
);
6148 if (ddi_copyout(&dv
, ubuf
, xferlen
, mode
)) {
6149 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
6150 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6151 "copy to user space failed"));
6152 kdcmd
->cmd_status
= 1;
6155 kdcmd
->cmd_status
= 0;
6158 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION
:
6159 con_log(CL_ANN1
, (CE_NOTE
, "handle_drv_ioctl: "
6160 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6162 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, instance
->dip
,
6163 0, "reg", &props
, &num_props
)) {
6164 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
6165 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6166 "ddi_prop_look_int_array failed"));
6170 pi
.busNumber
= (props
[0] >> 16) & 0xFF;
6171 pi
.deviceNumber
= (props
[0] >> 11) & 0x1f;
6172 pi
.functionNumber
= (props
[0] >> 8) & 0x7;
6173 ddi_prop_free((void *)props
);
6176 pci_conf_buf
= (uint8_t *)&pi
.pciHeaderInfo
;
6178 for (i
= 0; i
< (sizeof (struct mrsas_pci_information
) -
6179 offsetof(struct mrsas_pci_information
, pciHeaderInfo
));
6182 pci_config_get8(instance
->pci_handle
, i
);
6185 if (ddi_copyout(&pi
, ubuf
, xferlen
, mode
)) {
6186 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
6187 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6188 "copy to user space failed"));
6189 kdcmd
->cmd_status
= 1;
6192 kdcmd
->cmd_status
= 0;
6196 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
6197 "invalid driver specific IOCTL opcode = 0x%x",
6199 kdcmd
->cmd_status
= 1;
6211 handle_mfi_ioctl(struct mrsas_instance
*instance
, struct mrsas_ioctl
*ioctl
,
6214 int rval
= DDI_SUCCESS
;
6216 struct mrsas_header
*hdr
;
6217 struct mrsas_cmd
*cmd
;
6219 if (instance
->tbolt
) {
6220 cmd
= get_raid_msg_mfi_pkt(instance
);
6222 cmd
= mrsas_get_mfi_pkt(instance
);
6225 con_log(CL_ANN
, (CE_WARN
, "mr_sas: "
6226 "failed to get a cmd packet"));
6227 DTRACE_PROBE2(mfi_ioctl_err
, uint16_t,
6228 instance
->fw_outstanding
, uint16_t, instance
->max_fw_cmds
);
6229 return (DDI_FAILURE
);
6232 /* Clear the frame buffer and assign back the context id */
6233 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
6234 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
6237 hdr
= (struct mrsas_header
*)&ioctl
->frame
[0];
6239 switch (ddi_get8(cmd
->frame_dma_obj
.acc_handle
, &hdr
->cmd
)) {
6240 case MFI_CMD_OP_DCMD
:
6241 rval
= issue_mfi_dcmd(instance
, ioctl
, cmd
, mode
);
6243 case MFI_CMD_OP_SMP
:
6244 rval
= issue_mfi_smp(instance
, ioctl
, cmd
, mode
);
6246 case MFI_CMD_OP_STP
:
6247 rval
= issue_mfi_stp(instance
, ioctl
, cmd
, mode
);
6249 case MFI_CMD_OP_LD_SCSI
:
6250 case MFI_CMD_OP_PD_SCSI
:
6251 rval
= issue_mfi_pthru(instance
, ioctl
, cmd
, mode
);
6254 con_log(CL_ANN
, (CE_WARN
, "handle_mfi_ioctl: "
6255 "invalid mfi ioctl hdr->cmd = %d", hdr
->cmd
));
6260 if (mrsas_common_check(instance
, cmd
) != DDI_SUCCESS
)
6263 if (instance
->tbolt
) {
6264 return_raid_msg_mfi_pkt(instance
, cmd
);
6266 mrsas_return_mfi_pkt(instance
, cmd
);
6276 handle_mfi_aen(struct mrsas_instance
*instance
, struct mrsas_aen
*aen
)
6280 rval
= register_mfi_aen(instance
, instance
->aen_seq_num
,
6281 aen
->class_locale_word
);
6283 aen
->cmd_status
= (uint8_t)rval
;
6289 register_mfi_aen(struct mrsas_instance
*instance
, uint32_t seq_num
,
6290 uint32_t class_locale_word
)
6294 struct mrsas_cmd
*cmd
, *aen_cmd
;
6295 struct mrsas_dcmd_frame
*dcmd
;
6296 union mrsas_evt_class_locale curr_aen
;
6297 union mrsas_evt_class_locale prev_aen
;
6299 con_log(CL_ANN
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
6301 * If there an AEN pending already (aen_cmd), check if the
6302 * class_locale of that pending AEN is inclusive of the new
6303 * AEN request we currently have. If it is, then we don't have
6304 * to do anything. In other words, whichever events the current
6305 * AEN request is subscribing to, have already been subscribed
6308 * If the old_cmd is _not_ inclusive, then we have to abort
6309 * that command, form a class_locale that is superset of both
6310 * old and current and re-issue to the FW
6313 curr_aen
.word
= LE_32(class_locale_word
);
6314 curr_aen
.members
.locale
= LE_16(curr_aen
.members
.locale
);
6315 aen_cmd
= instance
->aen_cmd
;
6317 prev_aen
.word
= ddi_get32(aen_cmd
->frame_dma_obj
.acc_handle
,
6318 &aen_cmd
->frame
->dcmd
.mbox
.w
[1]);
6319 prev_aen
.word
= LE_32(prev_aen
.word
);
6320 prev_aen
.members
.locale
= LE_16(prev_aen
.members
.locale
);
6322 * A class whose enum value is smaller is inclusive of all
6323 * higher values. If a PROGRESS (= -1) was previously
6324 * registered, then a new registration requests for higher
6325 * classes need not be sent to FW. They are automatically
6328 * Locale numbers don't have such hierarchy. They are bitmap
6331 if ((prev_aen
.members
.class <= curr_aen
.members
.class) &&
6332 !((prev_aen
.members
.locale
& curr_aen
.members
.locale
) ^
6333 curr_aen
.members
.locale
)) {
6335 * Previously issued event registration includes
6336 * current request. Nothing to do.
6341 curr_aen
.members
.locale
|= prev_aen
.members
.locale
;
6343 if (prev_aen
.members
.class < curr_aen
.members
.class)
6344 curr_aen
.members
.class = prev_aen
.members
.class;
6346 ret_val
= abort_aen_cmd(instance
, aen_cmd
);
6349 con_log(CL_ANN
, (CE_WARN
, "register_mfi_aen: "
6350 "failed to abort prevous AEN command"));
6356 curr_aen
.word
= LE_32(class_locale_word
);
6357 curr_aen
.members
.locale
= LE_16(curr_aen
.members
.locale
);
6360 if (instance
->tbolt
) {
6361 cmd
= get_raid_msg_mfi_pkt(instance
);
6363 cmd
= mrsas_get_mfi_pkt(instance
);
6367 DTRACE_PROBE2(mfi_aen_err
, uint16_t, instance
->fw_outstanding
,
6368 uint16_t, instance
->max_fw_cmds
);
6372 /* Clear the frame buffer and assign back the context id */
6373 (void) memset((char *)&cmd
->frame
[0], 0, sizeof (union mrsas_frame
));
6374 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &cmd
->frame
->hdr
.context
,
6377 dcmd
= &cmd
->frame
->dcmd
;
6379 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6380 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
6382 (void) memset(instance
->mfi_evt_detail_obj
.buffer
, 0,
6383 sizeof (struct mrsas_evt_detail
));
6385 /* Prepare DCMD for aen registration */
6386 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd
, MFI_CMD_OP_DCMD
);
6387 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->cmd_status
, 0x0);
6388 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sge_count
, 1);
6389 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->flags
,
6390 MFI_FRAME_DIR_READ
);
6391 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->timeout
, 0);
6392 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->data_xfer_len
,
6393 sizeof (struct mrsas_evt_detail
));
6394 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->opcode
,
6395 MR_DCMD_CTRL_EVENT_WAIT
);
6396 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->mbox
.w
[0], seq_num
);
6397 curr_aen
.members
.locale
= LE_16(curr_aen
.members
.locale
);
6398 curr_aen
.word
= LE_32(curr_aen
.word
);
6399 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->mbox
.w
[1],
6401 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].phys_addr
,
6402 instance
->mfi_evt_detail_obj
.dma_cookie
[0].dmac_address
);
6403 ddi_put32(cmd
->frame_dma_obj
.acc_handle
, &dcmd
->sgl
.sge32
[0].length
,
6404 sizeof (struct mrsas_evt_detail
));
6406 instance
->aen_seq_num
= seq_num
;
6410 * Store reference to the cmd used to register for AEN. When an
6411 * application wants us to register for AEN, we have to abort this
6412 * cmd and re-register with a new EVENT LOCALE supplied by that app
6414 instance
->aen_cmd
= cmd
;
6416 cmd
->frame_count
= 1;
6418 /* Issue the aen registration frame */
6419 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6420 if (instance
->tbolt
) {
6421 mr_sas_tbolt_build_mfi_cmd(instance
, cmd
);
6423 instance
->func_ptr
->issue_cmd(cmd
, instance
);
6429 display_scsi_inquiry(caddr_t scsi_inq
)
6431 #define MAX_SCSI_DEVICE_CODE 14
6433 char inquiry_buf
[256] = {0};
6435 const char *const scsi_device_types
[] = {
6437 "Sequential-Access",
6454 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Vendor: ");
6455 for (i
= 8; i
< 16; i
++) {
6456 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "%c",
6460 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Model: ");
6462 for (i
= 16; i
< 32; i
++) {
6463 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "%c",
6467 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Rev: ");
6469 for (i
= 32; i
< 36; i
++) {
6470 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "%c",
6474 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "\n");
6477 i
= scsi_inq
[0] & 0x1f;
6480 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Type: %s ",
6481 i
< MAX_SCSI_DEVICE_CODE
? scsi_device_types
[i
] :
6485 len
+= snprintf(inquiry_buf
+ len
, 265 - len
,
6486 " ANSI SCSI revision: %02x", scsi_inq
[2] & 0x07);
6488 if ((scsi_inq
[2] & 0x07) == 1 && (scsi_inq
[3] & 0x0f) == 1) {
6489 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " CCS\n");
6491 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "\n");
6494 con_log(CL_DLEVEL2
, (CE_CONT
, inquiry_buf
));
6498 io_timeout_checker(void *arg
)
6500 struct scsi_pkt
*pkt
;
6501 struct mrsas_instance
*instance
= arg
;
6502 struct mrsas_cmd
*cmd
= NULL
;
6503 struct mrsas_header
*hdr
;
6506 struct mlist_head
*pos
, *next
;
6507 mlist_t process_list
;
6509 if (instance
->adapterresetinprogress
== 1) {
6510 con_log(CL_ANN
, (CE_NOTE
, "io_timeout_checker:"
6511 " reset in progress"));
6513 instance
->timeout_id
= timeout(io_timeout_checker
,
6514 (void *) instance
, drv_usectohz(MRSAS_1_SECOND
));
6518 /* See if this check needs to be in the beginning or last in ISR */
6519 if (mrsas_initiate_ocr_if_fw_is_faulty(instance
) == 1) {
6520 dev_err(instance
->dip
, CE_WARN
, "io_timeout_checker: "
6521 "FW Fault, calling reset adapter");
6522 dev_err(instance
->dip
, CE_CONT
, "io_timeout_checker: "
6523 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6524 instance
->fw_outstanding
, instance
->max_fw_cmds
);
6525 if (instance
->adapterresetinprogress
== 0) {
6526 instance
->adapterresetinprogress
= 1;
6527 if (instance
->tbolt
)
6528 (void) mrsas_tbolt_reset_ppc(instance
);
6530 (void) mrsas_reset_ppc(instance
);
6531 instance
->adapterresetinprogress
= 0;
6533 instance
->timeout_id
= timeout(io_timeout_checker
,
6534 (void *) instance
, drv_usectohz(MRSAS_1_SECOND
));
6538 INIT_LIST_HEAD(&process_list
);
6540 mutex_enter(&instance
->cmd_pend_mtx
);
6541 mlist_for_each_safe(pos
, next
, &instance
->cmd_pend_list
) {
6542 cmd
= mlist_entry(pos
, struct mrsas_cmd
, list
);
6548 if (cmd
->sync_cmd
== MRSAS_TRUE
) {
6549 hdr
= (struct mrsas_header
*)&cmd
->frame
->hdr
;
6553 time
= --cmd
->drv_pkt_time
;
6559 time
= --cmd
->drv_pkt_time
;
6562 dev_err(instance
->dip
, CE_WARN
, "%llx: "
6563 "io_timeout_checker: TIMING OUT: pkt: %p, "
6564 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X",
6565 gethrtime(), (void *)pkt
, (void *)cmd
,
6566 instance
->fw_outstanding
, instance
->max_fw_cmds
);
6572 mutex_exit(&instance
->cmd_pend_mtx
);
6575 if (instance
->disable_online_ctrl_reset
== 1) {
6576 dev_err(instance
->dip
, CE_WARN
, "%s(): OCR is NOT "
6577 "supported by Firmware, KILL adapter!!!",
6580 if (instance
->tbolt
)
6581 mrsas_tbolt_kill_adapter(instance
);
6583 (void) mrsas_kill_adapter(instance
);
6587 if (cmd
->retry_count_for_ocr
<= IO_RETRY_COUNT
) {
6588 if (instance
->adapterresetinprogress
== 0) {
6589 if (instance
->tbolt
) {
6590 (void) mrsas_tbolt_reset_ppc(
6593 (void) mrsas_reset_ppc(
6598 dev_err(instance
->dip
, CE_WARN
,
6599 "io_timeout_checker: "
6600 "cmd %p cmd->index %d "
6601 "timed out even after 3 resets: "
6602 "so KILL adapter", (void *)cmd
, cmd
->index
);
6604 mrsas_print_cmd_details(instance
, cmd
, 0xDD);
6606 if (instance
->tbolt
)
6607 mrsas_tbolt_kill_adapter(instance
);
6609 (void) mrsas_kill_adapter(instance
);
6614 con_log(CL_ANN
, (CE_NOTE
, "mrsas: "
6615 "schedule next timeout check: "
6617 instance
->timeout_id
=
6618 timeout(io_timeout_checker
, (void *)instance
,
6619 drv_usectohz(MRSAS_1_SECOND
));
6623 read_fw_status_reg_ppc(struct mrsas_instance
*instance
)
6625 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance
));
6629 issue_cmd_ppc(struct mrsas_cmd
*cmd
, struct mrsas_instance
*instance
)
6631 struct scsi_pkt
*pkt
;
6632 atomic_inc_16(&instance
->fw_outstanding
);
6636 con_log(CL_DLEVEL1
, (CE_NOTE
, "%llx : issue_cmd_ppc:"
6637 "ISSUED CMD TO FW : called : cmd:"
6638 ": %p instance : %p pkt : %p pkt_time : %x\n",
6639 gethrtime(), (void *)cmd
, (void *)instance
,
6640 (void *)pkt
, cmd
->drv_pkt_time
));
6641 if (instance
->adapterresetinprogress
) {
6642 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
6643 con_log(CL_ANN1
, (CE_NOTE
, "Reset the scsi_pkt timer"));
6645 push_pending_mfi_pkt(instance
, cmd
);
6649 con_log(CL_DLEVEL1
, (CE_NOTE
, "%llx : issue_cmd_ppc:"
6650 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6651 "(NO PKT)\n", gethrtime(), (void *)cmd
, (void *)instance
));
6654 mutex_enter(&instance
->reg_write_mtx
);
6655 /* Issue the command to the FW */
6656 WR_IB_PICK_QPORT((cmd
->frame_phys_addr
) |
6657 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
6658 mutex_exit(&instance
->reg_write_mtx
);
6663 * issue_cmd_in_sync_mode
6666 issue_cmd_in_sync_mode_ppc(struct mrsas_instance
*instance
,
6667 struct mrsas_cmd
*cmd
)
6670 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* MILLISEC
;
6671 struct mrsas_header
*hdr
= &cmd
->frame
->hdr
;
6673 con_log(CL_ANN1
, (CE_NOTE
, "issue_cmd_in_sync_mode_ppc: called"));
6675 if (instance
->adapterresetinprogress
) {
6676 cmd
->drv_pkt_time
= ddi_get16(
6677 cmd
->frame_dma_obj
.acc_handle
, &hdr
->timeout
);
6678 if (cmd
->drv_pkt_time
< debug_timeout_g
)
6679 cmd
->drv_pkt_time
= (uint16_t)debug_timeout_g
;
6681 con_log(CL_ANN1
, (CE_NOTE
, "sync_mode_ppc: "
6682 "issue and return in reset case\n"));
6683 WR_IB_PICK_QPORT((cmd
->frame_phys_addr
) |
6684 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
6686 return (DDI_SUCCESS
);
6688 con_log(CL_ANN1
, (CE_NOTE
, "sync_mode_ppc: pushing the pkt\n"));
6689 push_pending_mfi_pkt(instance
, cmd
);
6692 cmd
->cmd_status
= ENODATA
;
6694 mutex_enter(&instance
->reg_write_mtx
);
6695 /* Issue the command to the FW */
6696 WR_IB_PICK_QPORT((cmd
->frame_phys_addr
) |
6697 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
6698 mutex_exit(&instance
->reg_write_mtx
);
6700 mutex_enter(&instance
->int_cmd_mtx
);
6701 for (i
= 0; i
< msecs
&& (cmd
->cmd_status
== ENODATA
); i
++) {
6702 cv_wait(&instance
->int_cmd_cv
, &instance
->int_cmd_mtx
);
6704 mutex_exit(&instance
->int_cmd_mtx
);
6706 con_log(CL_ANN1
, (CE_NOTE
, "issue_cmd_in_sync_mode_ppc: done"));
6708 if (i
< (msecs
-1)) {
6709 return (DDI_SUCCESS
);
6711 return (DDI_FAILURE
);
6716 * issue_cmd_in_poll_mode
6719 issue_cmd_in_poll_mode_ppc(struct mrsas_instance
*instance
,
6720 struct mrsas_cmd
*cmd
)
6724 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* MILLISEC
;
6725 struct mrsas_header
*frame_hdr
;
6727 con_log(CL_ANN1
, (CE_NOTE
, "issue_cmd_in_poll_mode_ppc: called"));
6729 frame_hdr
= (struct mrsas_header
*)cmd
->frame
;
6730 ddi_put8(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->cmd_status
,
6731 MFI_CMD_STATUS_POLL_MODE
);
6732 flags
= ddi_get16(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->flags
);
6733 flags
|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
;
6735 ddi_put16(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->flags
, flags
);
6737 /* issue the frame using inbound queue port */
6738 WR_IB_PICK_QPORT((cmd
->frame_phys_addr
) |
6739 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
6741 /* wait for cmd_status to change from 0xFF */
6742 for (i
= 0; i
< msecs
&& (
6743 ddi_get8(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->cmd_status
)
6744 == MFI_CMD_STATUS_POLL_MODE
); i
++) {
6745 drv_usecwait(MILLISEC
); /* wait for 1000 usecs */
6748 if (ddi_get8(cmd
->frame_dma_obj
.acc_handle
, &frame_hdr
->cmd_status
)
6749 == MFI_CMD_STATUS_POLL_MODE
) {
6750 con_log(CL_ANN
, (CE_NOTE
, "issue_cmd_in_poll_mode: "
6751 "cmd polling timed out"));
6752 return (DDI_FAILURE
);
6755 return (DDI_SUCCESS
);
6759 enable_intr_ppc(struct mrsas_instance
*instance
)
6763 con_log(CL_ANN1
, (CE_NOTE
, "enable_intr_ppc: called"));
6765 if (instance
->skinny
) {
6766 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6767 WR_OB_INTR_MASK(0xfffffffe, instance
);
6769 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6770 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK
, instance
);
6772 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6773 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK
), instance
);
6776 /* dummy read to force PCI flush */
6777 mask
= RD_OB_INTR_MASK(instance
);
6779 con_log(CL_ANN1
, (CE_NOTE
, "enable_intr_ppc: "
6780 "outbound_intr_mask = 0x%x", mask
));
6784 disable_intr_ppc(struct mrsas_instance
*instance
)
6786 con_log(CL_ANN1
, (CE_NOTE
, "disable_intr_ppc: called"));
6788 con_log(CL_ANN1
, (CE_NOTE
, "disable_intr_ppc: before : "
6789 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance
)));
6791 /* For now, assume there are no extras needed for Skinny support. */
6793 WR_OB_INTR_MASK(OB_INTR_MASK
, instance
);
6795 con_log(CL_ANN1
, (CE_NOTE
, "disable_intr_ppc: after : "
6796 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance
)));
6798 /* dummy read to force PCI flush */
6799 (void) RD_OB_INTR_MASK(instance
);
6803 intr_ack_ppc(struct mrsas_instance
*instance
)
6806 int ret
= DDI_INTR_CLAIMED
;
6808 con_log(CL_ANN1
, (CE_NOTE
, "intr_ack_ppc: called"));
6810 /* check if it is our interrupt */
6811 status
= RD_OB_INTR_STATUS(instance
);
6813 con_log(CL_ANN1
, (CE_NOTE
, "intr_ack_ppc: status = 0x%x", status
));
6816 * NOTE: Some drivers call out SKINNY here, but the return is the same
6817 * for SKINNY and 2108.
6819 if (!(status
& MFI_REPLY_2108_MESSAGE_INTR
)) {
6820 ret
= DDI_INTR_UNCLAIMED
;
6823 if (mrsas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
6824 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
6825 ret
= DDI_INTR_UNCLAIMED
;
6828 if (ret
== DDI_INTR_UNCLAIMED
) {
6833 * Clear the interrupt by writing back the same value.
6834 * Another case where SKINNY is slightly different.
6836 if (instance
->skinny
) {
6837 WR_OB_INTR_STATUS(status
, instance
);
6839 WR_OB_DOORBELL_CLEAR(status
, instance
);
6843 status
= RD_OB_INTR_STATUS(instance
);
6845 con_log(CL_ANN1
, (CE_NOTE
, "intr_ack_ppc: interrupt cleared"));
6851 * Marks HBA as bad. This will be called either when an
6852 * IO packet times out even after 3 FW resets
6853 * or FW is found to be fault even after 3 continuous resets.
6857 mrsas_kill_adapter(struct mrsas_instance
*instance
)
6859 if (instance
->deadadapter
== 1)
6860 return (DDI_FAILURE
);
6862 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_kill_adapter: "
6863 "Writing to doorbell with MFI_STOP_ADP "));
6864 mutex_enter(&instance
->ocr_flags_mtx
);
6865 instance
->deadadapter
= 1;
6866 mutex_exit(&instance
->ocr_flags_mtx
);
6867 instance
->func_ptr
->disable_intr(instance
);
6868 WR_IB_DOORBELL(MFI_STOP_ADP
, instance
);
6869 (void) mrsas_complete_pending_cmds(instance
);
6870 return (DDI_SUCCESS
);
6875 mrsas_reset_ppc(struct mrsas_instance
*instance
)
6879 uint32_t cur_abs_reg_val
;
6882 con_log(CL_ANN
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
6884 if (instance
->deadadapter
== 1) {
6885 dev_err(instance
->dip
, CE_WARN
, "mrsas_reset_ppc: "
6886 "no more resets as HBA has been marked dead ");
6887 return (DDI_FAILURE
);
6889 mutex_enter(&instance
->ocr_flags_mtx
);
6890 instance
->adapterresetinprogress
= 1;
6891 mutex_exit(&instance
->ocr_flags_mtx
);
6892 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: adpterresetinprogress "
6893 "flag set, time %llx", gethrtime()));
6895 instance
->func_ptr
->disable_intr(instance
);
6897 WR_IB_WRITE_SEQ(0, instance
);
6898 WR_IB_WRITE_SEQ(4, instance
);
6899 WR_IB_WRITE_SEQ(0xb, instance
);
6900 WR_IB_WRITE_SEQ(2, instance
);
6901 WR_IB_WRITE_SEQ(7, instance
);
6902 WR_IB_WRITE_SEQ(0xd, instance
);
6903 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: magic number written "
6904 "to write sequence register\n"));
6906 status
= RD_OB_DRWE(instance
);
6908 while (!(status
& DIAG_WRITE_ENABLE
)) {
6910 status
= RD_OB_DRWE(instance
);
6911 if (retry
++ == 100) {
6912 dev_err(instance
->dip
, CE_WARN
,
6913 "mrsas_reset_ppc: DRWE bit "
6914 "check retry count %d", retry
);
6915 return (DDI_FAILURE
);
6918 WR_IB_DRWE(status
| DIAG_RESET_ADAPTER
, instance
);
6920 status
= RD_OB_DRWE(instance
);
6921 while (status
& DIAG_RESET_ADAPTER
) {
6923 status
= RD_OB_DRWE(instance
);
6924 if (retry
++ == 100) {
6925 dev_err(instance
->dip
, CE_WARN
, "mrsas_reset_ppc: "
6926 "RESET FAILED. KILL adapter called.");
6928 (void) mrsas_kill_adapter(instance
);
6929 return (DDI_FAILURE
);
6932 con_log(CL_ANN
, (CE_NOTE
, "mrsas_reset_ppc: Adapter reset complete"));
6933 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
6934 "Calling mfi_state_transition_to_ready"));
6936 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6937 if (mfi_state_transition_to_ready(instance
) ||
6938 debug_fw_faults_after_ocr_g
== 1) {
6940 instance
->func_ptr
->read_fw_status_reg(instance
);
6941 fw_state
= cur_abs_reg_val
& MFI_STATE_MASK
;
6944 con_log(CL_ANN1
, (CE_NOTE
,
6945 "mrsas_reset_ppc :before fake: FW is not ready "
6946 "FW state = 0x%x", fw_state
));
6947 if (debug_fw_faults_after_ocr_g
== 1)
6948 fw_state
= MFI_STATE_FAULT
;
6951 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc : FW is not ready "
6952 "FW state = 0x%x", fw_state
));
6954 if (fw_state
== MFI_STATE_FAULT
) {
6955 /* increment the count */
6956 instance
->fw_fault_count_after_ocr
++;
6957 if (instance
->fw_fault_count_after_ocr
6958 < MAX_FW_RESET_COUNT
) {
6959 dev_err(instance
->dip
, CE_WARN
,
6961 "FW is in fault after OCR count %d "
6963 instance
->fw_fault_count_after_ocr
);
6967 dev_err(instance
->dip
, CE_WARN
,
6969 "Max Reset Count exceeded >%d"
6970 "Mark HBA as bad, KILL adapter",
6971 MAX_FW_RESET_COUNT
);
6973 (void) mrsas_kill_adapter(instance
);
6974 return (DDI_FAILURE
);
6978 /* reset the counter as FW is up after OCR */
6979 instance
->fw_fault_count_after_ocr
= 0;
6982 ddi_put32(instance
->mfi_internal_dma_obj
.acc_handle
,
6983 instance
->producer
, 0);
6985 ddi_put32(instance
->mfi_internal_dma_obj
.acc_handle
,
6986 instance
->consumer
, 0);
6988 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
6989 " after resetting produconsumer chck indexs:"
6990 "producer %x consumer %x", *instance
->producer
,
6991 *instance
->consumer
));
6993 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
6994 "Calling mrsas_issue_init_mfi"));
6995 (void) mrsas_issue_init_mfi(instance
);
6996 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
6997 "mrsas_issue_init_mfi Done"));
6999 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
7000 "Calling mrsas_print_pending_cmd\n"));
7001 (void) mrsas_print_pending_cmds(instance
);
7002 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
7003 "mrsas_print_pending_cmd done\n"));
7005 instance
->func_ptr
->enable_intr(instance
);
7006 instance
->fw_outstanding
= 0;
7008 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
7009 "Calling mrsas_issue_pending_cmds"));
7010 (void) mrsas_issue_pending_cmds(instance
);
7011 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
7012 "issue_pending_cmds done.\n"));
7014 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
7015 "Calling aen registration"));
7018 instance
->aen_cmd
->retry_count_for_ocr
= 0;
7019 instance
->aen_cmd
->drv_pkt_time
= 0;
7021 instance
->func_ptr
->issue_cmd(instance
->aen_cmd
, instance
);
7022 con_log(CL_ANN1
, (CE_NOTE
, "Unsetting adpresetinprogress flag.\n"));
7024 mutex_enter(&instance
->ocr_flags_mtx
);
7025 instance
->adapterresetinprogress
= 0;
7026 mutex_exit(&instance
->ocr_flags_mtx
);
7027 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc: "
7028 "adpterresetinprogress flag unset"));
7030 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_reset_ppc done\n"));
7031 return (DDI_SUCCESS
);
7038 mrsas_common_check(struct mrsas_instance
*instance
, struct mrsas_cmd
*cmd
)
7040 int ret
= DDI_SUCCESS
;
7043 mrsas_check_dma_handle(cmd
->frame_dma_obj
.dma_handle
) !=
7045 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
7046 if (cmd
->pkt
!= NULL
) {
7047 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
7048 cmd
->pkt
->pkt_statistics
= 0;
7052 if (mrsas_check_dma_handle(instance
->mfi_internal_dma_obj
.dma_handle
)
7054 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
7055 if (cmd
!= NULL
&& cmd
->pkt
!= NULL
) {
7056 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
7057 cmd
->pkt
->pkt_statistics
= 0;
7061 if (mrsas_check_dma_handle(instance
->mfi_evt_detail_obj
.dma_handle
) !=
7063 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
7064 if (cmd
!= NULL
&& cmd
->pkt
!= NULL
) {
7065 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
7066 cmd
->pkt
->pkt_statistics
= 0;
7070 if (mrsas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
7071 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
7073 ddi_fm_acc_err_clear(instance
->regmap_handle
, DDI_FME_VER0
);
7075 if (cmd
!= NULL
&& cmd
->pkt
!= NULL
) {
7076 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
7077 cmd
->pkt
->pkt_statistics
= 0;
7087 mrsas_fm_error_cb(dev_info_t
*dip
, ddi_fm_error_t
*err
, const void *impl_data
)
7090 * as the driver can always deal with an error in any dma or
7091 * access handle, we can just return the fme_status value.
7093 pci_ereport_post(dip
, err
, NULL
);
7094 return (err
->fme_status
);
7098 mrsas_fm_init(struct mrsas_instance
*instance
)
7100 /* Need to change iblock to priority for new MSI intr */
7101 ddi_iblock_cookie_t fm_ibc
;
7103 /* Only register with IO Fault Services if we have some capability */
7104 if (instance
->fm_capabilities
) {
7105 /* Adjust access and dma attributes for FMA */
7106 endian_attr
.devacc_attr_access
= DDI_FLAGERR_ACC
;
7107 mrsas_generic_dma_attr
.dma_attr_flags
= DDI_DMA_FLAGERR
;
7110 * Register capabilities with IO Fault Services.
7111 * fm_capabilities will be updated to indicate
7112 * capabilities actually supported (not requested.)
7115 ddi_fm_init(instance
->dip
, &instance
->fm_capabilities
, &fm_ibc
);
7118 * Initialize pci ereport capabilities if ereport
7119 * capable (should always be.)
7122 if (DDI_FM_EREPORT_CAP(instance
->fm_capabilities
) ||
7123 DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
7124 pci_ereport_setup(instance
->dip
);
7128 * Register error callback if error callback capable.
7130 if (DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
7131 ddi_fm_handler_register(instance
->dip
,
7132 mrsas_fm_error_cb
, (void*) instance
);
7135 endian_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
7136 mrsas_generic_dma_attr
.dma_attr_flags
= 0;
7141 mrsas_fm_fini(struct mrsas_instance
*instance
)
7143 /* Only unregister FMA capabilities if registered */
7144 if (instance
->fm_capabilities
) {
7146 * Un-register error callback if error callback capable.
7148 if (DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
7149 ddi_fm_handler_unregister(instance
->dip
);
7153 * Release any resources allocated by pci_ereport_setup()
7155 if (DDI_FM_EREPORT_CAP(instance
->fm_capabilities
) ||
7156 DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
7157 pci_ereport_teardown(instance
->dip
);
7160 /* Unregister from IO Fault Services */
7161 ddi_fm_fini(instance
->dip
);
7163 /* Adjust access and dma attributes for FMA */
7164 endian_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
7165 mrsas_generic_dma_attr
.dma_attr_flags
= 0;
7170 mrsas_check_acc_handle(ddi_acc_handle_t handle
)
7174 if (handle
== NULL
) {
7175 return (DDI_FAILURE
);
7178 ddi_fm_acc_err_get(handle
, &de
, DDI_FME_VERSION
);
7180 return (de
.fme_status
);
7184 mrsas_check_dma_handle(ddi_dma_handle_t handle
)
7188 if (handle
== NULL
) {
7189 return (DDI_FAILURE
);
7192 ddi_fm_dma_err_get(handle
, &de
, DDI_FME_VERSION
);
7194 return (de
.fme_status
);
7198 mrsas_fm_ereport(struct mrsas_instance
*instance
, char *detail
)
7201 char buf
[FM_MAX_CLASS
];
7203 (void) snprintf(buf
, FM_MAX_CLASS
, "%s.%s", DDI_FM_DEVICE
, detail
);
7204 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
7205 if (DDI_FM_EREPORT_CAP(instance
->fm_capabilities
)) {
7206 ddi_fm_ereport_post(instance
->dip
, buf
, ena
, DDI_NOSLEEP
,
7207 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERSION
, NULL
);
7212 mrsas_add_intrs(struct mrsas_instance
*instance
, int intr_type
)
7215 dev_info_t
*dip
= instance
->dip
;
7216 int avail
, actual
, count
;
7219 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_add_intrs: intr_type = %x",
7222 /* Get number of interrupts */
7223 ret
= ddi_intr_get_nintrs(dip
, intr_type
, &count
);
7224 if ((ret
!= DDI_SUCCESS
) || (count
== 0)) {
7225 con_log(CL_ANN
, (CE_WARN
, "ddi_intr_get_nintrs() failed:"
7226 "ret %d count %d", ret
, count
));
7228 return (DDI_FAILURE
);
7231 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_add_intrs: count = %d ", count
));
7233 /* Get number of available interrupts */
7234 ret
= ddi_intr_get_navail(dip
, intr_type
, &avail
);
7235 if ((ret
!= DDI_SUCCESS
) || (avail
== 0)) {
7236 con_log(CL_ANN
, (CE_WARN
, "ddi_intr_get_navail() failed:"
7237 "ret %d avail %d", ret
, avail
));
7239 return (DDI_FAILURE
);
7241 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_add_intrs: avail = %d ", avail
));
7243 /* Only one interrupt routine. So limit the count to 1 */
7249 * Allocate an array of interrupt handlers. Currently we support
7250 * only one interrupt. The framework can be extended later.
7252 instance
->intr_htable_size
= count
* sizeof (ddi_intr_handle_t
);
7253 instance
->intr_htable
= kmem_zalloc(instance
->intr_htable_size
,
7255 ASSERT(instance
->intr_htable
);
7257 flag
= ((intr_type
== DDI_INTR_TYPE_MSI
) ||
7258 (intr_type
== DDI_INTR_TYPE_MSIX
)) ?
7259 DDI_INTR_ALLOC_STRICT
: DDI_INTR_ALLOC_NORMAL
;
7261 /* Allocate interrupt */
7262 ret
= ddi_intr_alloc(dip
, instance
->intr_htable
, intr_type
, 0,
7263 count
, &actual
, flag
);
7265 if ((ret
!= DDI_SUCCESS
) || (actual
== 0)) {
7266 con_log(CL_ANN
, (CE_WARN
, "mrsas_add_intrs: "
7267 "avail = %d", avail
));
7268 goto mrsas_free_htable
;
7271 if (actual
< count
) {
7272 con_log(CL_ANN
, (CE_WARN
, "mrsas_add_intrs: "
7273 "Requested = %d Received = %d", count
, actual
));
7275 instance
->intr_cnt
= actual
;
7278 * Get the priority of the interrupt allocated.
7280 if ((ret
= ddi_intr_get_pri(instance
->intr_htable
[0],
7281 &instance
->intr_pri
)) != DDI_SUCCESS
) {
7282 con_log(CL_ANN
, (CE_WARN
, "mrsas_add_intrs: "
7283 "get priority call failed"));
7284 goto mrsas_free_handles
;
7288 * Test for high level mutex. we don't support them.
7290 if (instance
->intr_pri
>= ddi_intr_get_hilevel_pri()) {
7291 con_log(CL_ANN
, (CE_WARN
, "mrsas_add_intrs: "
7292 "High level interrupts not supported."));
7293 goto mrsas_free_handles
;
7296 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_add_intrs: intr_pri = 0x%x ",
7297 instance
->intr_pri
));
7299 /* Call ddi_intr_add_handler() */
7300 for (i
= 0; i
< actual
; i
++) {
7301 ret
= ddi_intr_add_handler(instance
->intr_htable
[i
],
7302 (ddi_intr_handler_t
*)mrsas_isr
, (caddr_t
)instance
,
7303 (caddr_t
)(uintptr_t)i
);
7305 if (ret
!= DDI_SUCCESS
) {
7306 con_log(CL_ANN
, (CE_WARN
, "mrsas_add_intrs:"
7308 goto mrsas_free_handles
;
7313 con_log(CL_DLEVEL1
, (CE_NOTE
, " ddi_intr_add_handler done"));
7315 if ((ret
= ddi_intr_get_cap(instance
->intr_htable
[0],
7316 &instance
->intr_cap
)) != DDI_SUCCESS
) {
7317 con_log(CL_ANN
, (CE_WARN
, "ddi_intr_get_cap() failed %d",
7319 goto mrsas_free_handlers
;
7322 if (instance
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
7323 con_log(CL_ANN
, (CE_WARN
, "Calling ddi_intr_block _enable"));
7325 (void) ddi_intr_block_enable(instance
->intr_htable
,
7326 instance
->intr_cnt
);
7328 con_log(CL_ANN
, (CE_NOTE
, " calling ddi_intr_enable"));
7330 for (i
= 0; i
< instance
->intr_cnt
; i
++) {
7331 (void) ddi_intr_enable(instance
->intr_htable
[i
]);
7332 con_log(CL_ANN
, (CE_NOTE
, "ddi intr enable returns "
7337 return (DDI_SUCCESS
);
7339 mrsas_free_handlers
:
7340 for (i
= 0; i
< actual
; i
++)
7341 (void) ddi_intr_remove_handler(instance
->intr_htable
[i
]);
7344 for (i
= 0; i
< actual
; i
++)
7345 (void) ddi_intr_free(instance
->intr_htable
[i
]);
7348 if (instance
->intr_htable
!= NULL
)
7349 kmem_free(instance
->intr_htable
, instance
->intr_htable_size
);
7351 instance
->intr_htable
= NULL
;
7352 instance
->intr_htable_size
= 0;
7354 return (DDI_FAILURE
);
7360 mrsas_rem_intrs(struct mrsas_instance
*instance
)
7364 con_log(CL_ANN
, (CE_NOTE
, "mrsas_rem_intrs called"));
7366 /* Disable all interrupts first */
7367 if (instance
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
7368 (void) ddi_intr_block_disable(instance
->intr_htable
,
7369 instance
->intr_cnt
);
7371 for (i
= 0; i
< instance
->intr_cnt
; i
++) {
7372 (void) ddi_intr_disable(instance
->intr_htable
[i
]);
7376 /* Remove all the handlers */
7378 for (i
= 0; i
< instance
->intr_cnt
; i
++) {
7379 (void) ddi_intr_remove_handler(instance
->intr_htable
[i
]);
7380 (void) ddi_intr_free(instance
->intr_htable
[i
]);
7383 if (instance
->intr_htable
!= NULL
)
7384 kmem_free(instance
->intr_htable
, instance
->intr_htable_size
);
7386 instance
->intr_htable
= NULL
;
7387 instance
->intr_htable_size
= 0;
7392 mrsas_tran_bus_config(dev_info_t
*parent
, uint_t flags
,
7393 ddi_bus_config_op_t op
, void *arg
, dev_info_t
**childp
)
7395 struct mrsas_instance
*instance
;
7397 int rval
= NDI_SUCCESS
;
7402 con_log(CL_ANN1
, (CE_NOTE
, "Bus config called for op = %x", op
));
7404 if ((instance
= ddi_get_soft_state(mrsas_state
,
7405 ddi_get_instance(parent
))) == NULL
) {
7406 return (NDI_FAILURE
);
7409 /* Hold nexus during bus_config */
7410 ndi_devi_enter(parent
, &config
);
7412 case BUS_CONFIG_ONE
: {
7414 /* parse wwid/target name out of name given */
7415 if ((ptr
= strchr((char *)arg
, '@')) == NULL
) {
7421 if (mrsas_parse_devname(arg
, &tgt
, &lun
) != 0) {
7427 rval
= mrsas_config_ld(instance
, tgt
, lun
, childp
);
7428 } else if ((instance
->tbolt
|| instance
->skinny
) && lun
!= 0) {
7429 rval
= mrsas_tbolt_config_pd(instance
,
7437 case BUS_CONFIG_DRIVER
:
7438 case BUS_CONFIG_ALL
: {
7440 rval
= mrsas_config_all_devices(instance
);
7447 if (rval
== NDI_SUCCESS
) {
7448 rval
= ndi_busop_bus_config(parent
, flags
, op
, arg
, childp
, 0);
7451 ndi_devi_exit(parent
, config
);
7453 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_tran_bus_config: rval = %x",
7459 mrsas_config_all_devices(struct mrsas_instance
*instance
)
7463 for (tgt
= 0; tgt
< MRDRV_MAX_LD
; tgt
++) {
7464 (void) mrsas_config_ld(instance
, tgt
, 0, NULL
);
7468 /* Config PD devices connected to the card */
7469 if (instance
->tbolt
|| instance
->skinny
) {
7470 for (tgt
= 0; tgt
< instance
->mr_tbolt_pd_max
; tgt
++) {
7471 (void) mrsas_tbolt_config_pd(instance
, tgt
, 1, NULL
);
7480 mrsas_parse_devname(char *devnm
, int *tgt
, int *lun
)
7482 char devbuf
[SCSI_MAXNAMELEN
];
7487 /* Parse dev name and address */
7488 (void) strcpy(devbuf
, devnm
);
7490 for (p
= devbuf
; *p
!= '\0'; p
++) {
7494 } else if (*p
== ':') {
7500 /* Parse target and lun */
7501 for (p
= tp
= addr
, lp
= NULL
; *p
!= '\0'; p
++) {
7509 if (ddi_strtol(tp
, NULL
, 0x10, &num
)) {
7510 return (DDI_FAILURE
); /* Can declare this as constant */
7515 if (ddi_strtol(lp
, NULL
, 0x10, &num
)) {
7516 return (DDI_FAILURE
);
7520 return (DDI_SUCCESS
); /* Success case */
7524 mrsas_config_ld(struct mrsas_instance
*instance
, uint16_t tgt
,
7525 uint8_t lun
, dev_info_t
**ldip
)
7527 struct scsi_device
*sd
;
7531 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_config_ld: t = %d l = %d",
7534 if ((child
= mrsas_find_child(instance
, tgt
, lun
)) != NULL
) {
7538 if (instance
->mr_ld_list
[tgt
].flag
!= MRDRV_TGT_VALID
) {
7539 rval
= mrsas_service_evt(instance
, tgt
, 0,
7540 MRSAS_EVT_UNCONFIG_TGT
, (uintptr_t)NULL
);
7541 con_log(CL_ANN1
, (CE_WARN
,
7542 "mr_sas: DELETING STALE ENTRY rval = %d "
7543 "tgt id = %d ", rval
, tgt
));
7544 return (NDI_FAILURE
);
7546 return (NDI_SUCCESS
);
7549 sd
= kmem_zalloc(sizeof (struct scsi_device
), KM_SLEEP
);
7550 sd
->sd_address
.a_hba_tran
= instance
->tran
;
7551 sd
->sd_address
.a_target
= (uint16_t)tgt
;
7552 sd
->sd_address
.a_lun
= (uint8_t)lun
;
7554 if (scsi_hba_probe(sd
, NULL
) == SCSIPROBE_EXISTS
)
7555 rval
= mrsas_config_scsi_device(instance
, sd
, ldip
);
7559 /* sd_unprobe is blank now. Free buffer manually */
7561 kmem_free(sd
->sd_inq
, SUN_INQSIZE
);
7565 kmem_free(sd
, sizeof (struct scsi_device
));
7566 con_log(CL_DLEVEL1
, (CE_NOTE
, "mrsas_config_ld: return rval = %d",
7572 mrsas_config_scsi_device(struct mrsas_instance
*instance
,
7573 struct scsi_device
*sd
, dev_info_t
**dipp
)
7575 char *nodename
= NULL
;
7576 char **compatible
= NULL
;
7577 int ncompatible
= 0;
7579 dev_info_t
*ldip
= NULL
;
7580 int tgt
= sd
->sd_address
.a_target
;
7581 int lun
= sd
->sd_address
.a_lun
;
7582 int dtype
= sd
->sd_inq
->inq_dtype
& DTYPE_MASK
;
7585 con_log(CL_DLEVEL1
, (CE_NOTE
, "mr_sas: scsi_device t%dL%d", tgt
, lun
));
7586 scsi_hba_nodename_compatible_get(sd
->sd_inq
, NULL
, dtype
,
7587 NULL
, &nodename
, &compatible
, &ncompatible
);
7589 if (nodename
== NULL
) {
7590 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: Found no compatible driver "
7591 "for t%dL%d", tgt
, lun
));
7596 childname
= (dtype
== DTYPE_DIRECT
) ? "sd" : nodename
;
7597 con_log(CL_DLEVEL1
, (CE_NOTE
,
7598 "mr_sas: Childname = %2s nodename = %s", childname
, nodename
));
7600 /* Create a dev node */
7601 rval
= ndi_devi_alloc(instance
->dip
, childname
, DEVI_SID_NODEID
, &ldip
);
7602 con_log(CL_DLEVEL1
, (CE_NOTE
,
7603 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval
));
7604 if (rval
== NDI_SUCCESS
) {
7605 if (ndi_prop_update_int(DDI_DEV_T_NONE
, ldip
, "target", tgt
) !=
7607 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: unable to create "
7608 "property for t%dl%d target", tgt
, lun
));
7612 if (ndi_prop_update_int(DDI_DEV_T_NONE
, ldip
, "lun", lun
) !=
7614 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: unable to create "
7615 "property for t%dl%d lun", tgt
, lun
));
7620 if (ndi_prop_update_string_array(DDI_DEV_T_NONE
, ldip
,
7621 "compatible", compatible
, ncompatible
) !=
7623 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: unable to create "
7624 "property for t%dl%d compatible", tgt
, lun
));
7629 rval
= ndi_devi_online(ldip
, NDI_ONLINE_ATTACH
);
7630 if (rval
!= NDI_SUCCESS
) {
7631 con_log(CL_ANN1
, (CE_WARN
, "mr_sas: unable to online "
7632 "t%dl%d", tgt
, lun
));
7633 ndi_prop_remove_all(ldip
);
7634 (void) ndi_devi_free(ldip
);
7636 con_log(CL_ANN1
, (CE_CONT
, "mr_sas: online Done :"
7637 "0 t%dl%d", tgt
, lun
));
7646 con_log(CL_DLEVEL1
, (CE_NOTE
,
7647 "mr_sas: config_scsi_device rval = %d t%dL%d",
7649 scsi_hba_nodename_compatible_free(nodename
, compatible
);
7655 mrsas_service_evt(struct mrsas_instance
*instance
, int tgt
, int lun
, int event
,
7658 struct mrsas_eventinfo
*mrevt
= NULL
;
7660 con_log(CL_ANN1
, (CE_NOTE
,
7661 "mrsas_service_evt called for t%dl%d event = %d",
7664 if ((instance
->taskq
== NULL
) || (mrevt
=
7665 kmem_zalloc(sizeof (struct mrsas_eventinfo
), KM_NOSLEEP
)) == NULL
) {
7669 mrevt
->instance
= instance
;
7672 mrevt
->event
= event
;
7675 if ((ddi_taskq_dispatch(instance
->taskq
,
7676 (void (*)(void *))mrsas_issue_evt_taskq
, mrevt
, DDI_NOSLEEP
)) !=
7678 con_log(CL_ANN1
, (CE_NOTE
,
7679 "mr_sas: Event task failed for t%dl%d event = %d",
7681 kmem_free(mrevt
, sizeof (struct mrsas_eventinfo
));
7682 return (DDI_FAILURE
);
7684 DTRACE_PROBE3(service_evt
, int, tgt
, int, lun
, int, event
);
7685 return (DDI_SUCCESS
);
7689 mrsas_issue_evt_taskq(struct mrsas_eventinfo
*mrevt
)
7691 struct mrsas_instance
*instance
= mrevt
->instance
;
7692 dev_info_t
*dip
, *pdip
;
7696 con_log(CL_ANN1
, (CE_NOTE
, "mrsas_issue_evt_taskq: called for"
7697 " tgt %d lun %d event %d",
7698 mrevt
->tgt
, mrevt
->lun
, mrevt
->event
));
7700 if (mrevt
->tgt
< MRDRV_MAX_LD
&& mrevt
->lun
== 0) {
7701 mutex_enter(&instance
->config_dev_mtx
);
7702 dip
= instance
->mr_ld_list
[mrevt
->tgt
].dip
;
7703 mutex_exit(&instance
->config_dev_mtx
);
7705 mutex_enter(&instance
->config_dev_mtx
);
7706 dip
= instance
->mr_tbolt_pd_list
[mrevt
->tgt
].dip
;
7707 mutex_exit(&instance
->config_dev_mtx
);
7711 ndi_devi_enter(instance
->dip
, &circ1
);
7712 switch (mrevt
->event
) {
7713 case MRSAS_EVT_CONFIG_TGT
:
7716 if (mrevt
->lun
== 0) {
7717 (void) mrsas_config_ld(instance
, mrevt
->tgt
,
7719 } else if (instance
->tbolt
|| instance
->skinny
) {
7720 (void) mrsas_tbolt_config_pd(instance
,
7724 con_log(CL_ANN1
, (CE_NOTE
,
7725 "mr_sas: EVT_CONFIG_TGT called:"
7726 " for tgt %d lun %d event %d",
7727 mrevt
->tgt
, mrevt
->lun
, mrevt
->event
));
7730 con_log(CL_ANN1
, (CE_NOTE
,
7731 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7732 " for tgt %d lun %d event %d",
7733 mrevt
->tgt
, mrevt
->lun
, mrevt
->event
));
7736 case MRSAS_EVT_UNCONFIG_TGT
:
7738 if (i_ddi_devi_attached(dip
)) {
7740 pdip
= ddi_get_parent(dip
);
7742 devname
= kmem_zalloc(MAXNAMELEN
+ 1, KM_SLEEP
);
7743 (void) ddi_deviname(dip
, devname
);
7745 (void) devfs_clean(pdip
, devname
+ 1,
7747 kmem_free(devname
, MAXNAMELEN
+ 1);
7749 (void) ndi_devi_offline(dip
, NDI_DEVI_REMOVE
);
7750 con_log(CL_ANN1
, (CE_NOTE
,
7751 "mr_sas: EVT_UNCONFIG_TGT called:"
7752 " for tgt %d lun %d event %d",
7753 mrevt
->tgt
, mrevt
->lun
, mrevt
->event
));
7755 con_log(CL_ANN1
, (CE_NOTE
,
7756 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7757 " for tgt %d lun %d event %d",
7758 mrevt
->tgt
, mrevt
->lun
, mrevt
->event
));
7762 kmem_free(mrevt
, sizeof (struct mrsas_eventinfo
));
7763 ndi_devi_exit(instance
->dip
, circ1
);
7768 mrsas_mode_sense_build(struct scsi_pkt
*pkt
)
7770 union scsi_cdb
*cdbp
;
7772 struct scsa_cmd
*acmd
;
7774 struct mode_header
*modehdrp
;
7776 cdbp
= (void *)pkt
->pkt_cdbp
;
7777 page_code
= cdbp
->cdb_un
.sg
.scsi
[0];
7778 acmd
= PKT2CMD(pkt
);
7780 if ((!bp
) && bp
->b_un
.b_addr
&& bp
->b_bcount
&& acmd
->cmd_dmacount
) {
7781 con_log(CL_ANN1
, (CE_WARN
, "Failing MODESENSE Command"));
7782 /* ADD pkt statistics as Command failed. */
7787 bzero(bp
->b_un
.b_addr
, bp
->b_bcount
);
7789 switch (page_code
) {
7791 struct mode_format
*page3p
= NULL
;
7792 modehdrp
= (struct mode_header
*)(bp
->b_un
.b_addr
);
7793 modehdrp
->bdesc_length
= MODE_BLK_DESC_LENGTH
;
7795 page3p
= (void *)((caddr_t
)modehdrp
+
7796 MODE_HEADER_LENGTH
+ MODE_BLK_DESC_LENGTH
);
7797 page3p
->mode_page
.code
= 0x3;
7798 page3p
->mode_page
.length
=
7799 (uchar_t
)(sizeof (struct mode_format
));
7800 page3p
->data_bytes_sect
= 512;
7801 page3p
->sect_track
= 63;
7805 struct mode_geometry
*page4p
= NULL
;
7806 modehdrp
= (struct mode_header
*)(bp
->b_un
.b_addr
);
7807 modehdrp
->bdesc_length
= MODE_BLK_DESC_LENGTH
;
7809 page4p
= (void *)((caddr_t
)modehdrp
+
7810 MODE_HEADER_LENGTH
+ MODE_BLK_DESC_LENGTH
);
7811 page4p
->mode_page
.code
= 0x4;
7812 page4p
->mode_page
.length
=
7813 (uchar_t
)(sizeof (struct mode_geometry
));
7814 page4p
->heads
= 255;
7815 page4p
->rpm
= 10000;