2 * megaraid_sas.c: source for mega_sas driver
4 * MegaRAID device driver for SAS controllers
5 * Copyright (c) 2005-2008, LSI Logic Corporation.
10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions are met:
16 * 1. Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 * this list of conditions and the following disclaimer in the documentation
21 * and/or other materials provided with the distribution.
23 * 3. Neither the name of the author nor the names of its contributors may be
24 * used to endorse or promote products derived from this software without
25 * specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
43 * Use is subject to license terms.
44 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 #include <sys/types.h>
48 #include <sys/param.h>
50 #include <sys/errno.h>
53 #include <sys/modctl.h>
55 #include <sys/devops.h>
56 #include <sys/cmn_err.h>
59 #include <sys/mkdev.h>
61 #include <sys/scsi/scsi.h>
63 #include <sys/sunddi.h>
64 #include <sys/atomic.h>
65 #include <sys/signal.h>
67 #include "megaraid_sas.h"
72 #include <sys/ddifm.h>
73 #include <sys/fm/protocol.h>
74 #include <sys/fm/util.h>
75 #include <sys/fm/io/ddi.h>
80 static void *megasas_state
= NULL
;
81 static int debug_level_g
= CL_ANN
;
83 #pragma weak scsi_hba_open
84 #pragma weak scsi_hba_close
85 #pragma weak scsi_hba_ioctl
87 static ddi_dma_attr_t megasas_generic_dma_attr
= {
88 DMA_ATTR_V0
, /* dma_attr_version */
89 0, /* low DMA address range */
90 0xFFFFFFFFU
, /* high DMA address range */
91 0xFFFFFFFFU
, /* DMA counter register */
92 8, /* DMA address alignment */
93 0x07, /* DMA burstsizes */
95 0xFFFFFFFFU
, /* max DMA size */
96 0xFFFFFFFFU
, /* segment boundary */
97 MEGASAS_MAX_SGE_CNT
, /* dma_attr_sglen */
98 512, /* granularity of device */
99 0 /* bus specific DMA flags */
102 int32_t megasas_max_cap_maxxfer
= 0x1000000;
105 * cb_ops contains base level routines
107 static struct cb_ops megasas_cb_ops
= {
108 megasas_open
, /* open */
109 megasas_close
, /* close */
110 nodev
, /* strategy */
115 megasas_ioctl
, /* ioctl */
120 nodev
, /* cb_prop_op */
122 D_NEW
| D_HOTPLUG
, /* cb_flag */
124 nodev
, /* cb_aread */
125 nodev
/* cb_awrite */
129 * dev_ops contains configuration routines
131 static struct dev_ops megasas_ops
= {
134 megasas_getinfo
, /* getinfo */
135 nulldev
, /* identify */
137 megasas_attach
, /* attach */
138 megasas_detach
, /* detach */
139 megasas_reset
, /* reset */
140 &megasas_cb_ops
, /* char/block ops */
143 ddi_quiesce_not_supported
, /* devo_quiesce */
146 static struct modldrv modldrv
= {
147 &mod_driverops
, /* module type - driver */
149 &megasas_ops
, /* driver ops */
152 static struct modlinkage modlinkage
= {
153 MODREV_1
, /* ml_rev - must be MODREV_1 */
154 &modldrv
, /* ml_linkage */
155 NULL
/* end of driver linkage */
158 static struct ddi_device_acc_attr endian_attr
= {
160 DDI_STRUCTURE_LE_ACC
,
167 * ************************************************************************** *
169 * common entry points - for loadable kernel modules *
171 * ************************************************************************** *
175 * _init - initialize a loadable module
178 * The driver should perform any one-time resource allocation or data
179 * initialization during driver loading in _init(). For example, the driver
180 * should initialize any mutexes global to the driver in this routine.
181 * The driver should not, however, use _init() to allocate or initialize
182 * anything that has to do with a particular instance of the device.
183 * Per-instance initialization must be done in attach().
190 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
192 ret
= ddi_soft_state_init(&megasas_state
,
193 sizeof (struct megasas_instance
), 0);
196 con_log(CL_ANN
, (CE_WARN
, "megaraid: could not init state"));
200 if ((ret
= scsi_hba_init(&modlinkage
)) != 0) {
201 con_log(CL_ANN
, (CE_WARN
, "megaraid: could not init scsi hba"));
202 ddi_soft_state_fini(&megasas_state
);
206 ret
= mod_install(&modlinkage
);
209 con_log(CL_ANN
, (CE_WARN
, "megaraid: mod_install failed"));
210 scsi_hba_fini(&modlinkage
);
211 ddi_soft_state_fini(&megasas_state
);
218 * _info - returns information about a loadable module.
221 * _info() is called to return module information. This is a typical entry
222 * point that does predefined role. It simply calls mod_info().
225 _info(struct modinfo
*modinfop
)
227 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
229 return (mod_info(&modlinkage
, modinfop
));
233 * _fini - prepare a loadable module for unloading
236 * In _fini(), the driver should release any resources that were allocated in
237 * _init(). The driver must remove itself from the system module list.
244 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
246 if ((ret
= mod_remove(&modlinkage
)) != 0)
249 scsi_hba_fini(&modlinkage
);
251 ddi_soft_state_fini(&megasas_state
);
258 * ************************************************************************** *
260 * common entry points - for autoconfiguration *
262 * ************************************************************************** *
265 * attach - adds a device to the system as part of initialization
269 * The kernel calls a driver's attach() entry point to attach an instance of
270 * a device (for MegaRAID, it is instance of a controller) or to resume
271 * operation for an instance of a device that has been suspended or has been
272 * shut down by the power management framework
273 * The attach() entry point typically includes the following types of
275 * - allocate a soft-state structure for the device instance (for MegaRAID,
276 * controller instance)
277 * - initialize per-instance mutexes
278 * - initialize condition variables
279 * - register the device's interrupts (for MegaRAID, controller's interrupts)
280 * - map the registers and memory of the device instance (for MegaRAID,
281 * controller instance)
282 * - create minor device nodes for the device instance (for MegaRAID,
283 * controller instance)
284 * - report that the device instance (for MegaRAID, controller instance) has
288 megasas_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
292 uint8_t added_isr_f
= 0;
293 uint8_t added_soft_isr_f
= 0;
294 uint8_t create_devctl_node_f
= 0;
295 uint8_t create_scsi_node_f
= 0;
296 uint8_t create_ioc_node_f
= 0;
297 uint8_t tran_alloc_f
= 0;
305 scsi_hba_tran_t
*tran
;
306 ddi_dma_attr_t tran_dma_attr
;
307 struct megasas_instance
*instance
;
309 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
312 ASSERT(NO_COMPETING_THREADS
);
314 instance_no
= ddi_get_instance(dip
);
317 * Since we know that some instantiations of this device can be
318 * plugged into slave-only SBus slots, check to see whether this is
321 if (ddi_slaveonly(dip
) == DDI_SUCCESS
) {
322 con_log(CL_ANN
, (CE_WARN
,
323 "mega%d: Device in slave-only slot, unused", instance_no
));
324 return (DDI_FAILURE
);
329 con_log(CL_DLEVEL1
, (CE_NOTE
, "megasas: DDI_ATTACH"));
330 /* allocate the soft state for the instance */
331 if (ddi_soft_state_zalloc(megasas_state
, instance_no
)
333 con_log(CL_ANN
, (CE_WARN
,
334 "mega%d: Failed to allocate soft state",
337 return (DDI_FAILURE
);
340 instance
= (struct megasas_instance
*)ddi_get_soft_state
341 (megasas_state
, instance_no
);
343 if (instance
== NULL
) {
344 con_log(CL_ANN
, (CE_WARN
,
345 "mega%d: Bad soft state", instance_no
));
347 ddi_soft_state_free(megasas_state
, instance_no
);
349 return (DDI_FAILURE
);
352 bzero((caddr_t
)instance
,
353 sizeof (struct megasas_instance
));
355 instance
->func_ptr
= kmem_zalloc(
356 sizeof (struct megasas_func_ptr
), KM_SLEEP
);
357 ASSERT(instance
->func_ptr
);
359 /* Setup the PCI configuration space handles */
360 if (pci_config_setup(dip
, &instance
->pci_handle
) !=
362 con_log(CL_ANN
, (CE_WARN
,
363 "mega%d: pci config setup failed ",
366 kmem_free(instance
->func_ptr
,
367 sizeof (struct megasas_func_ptr
));
368 ddi_soft_state_free(megasas_state
, instance_no
);
370 return (DDI_FAILURE
);
373 if (ddi_dev_nregs(dip
, &nregs
) != DDI_SUCCESS
) {
374 con_log(CL_ANN
, (CE_WARN
,
375 "megaraid: failed to get registers."));
377 pci_config_teardown(&instance
->pci_handle
);
378 kmem_free(instance
->func_ptr
,
379 sizeof (struct megasas_func_ptr
));
380 ddi_soft_state_free(megasas_state
, instance_no
);
382 return (DDI_FAILURE
);
385 vendor_id
= pci_config_get16(instance
->pci_handle
,
387 device_id
= pci_config_get16(instance
->pci_handle
,
390 subsysvid
= pci_config_get16(instance
->pci_handle
,
392 subsysid
= pci_config_get16(instance
->pci_handle
,
395 pci_config_put16(instance
->pci_handle
, PCI_CONF_COMM
,
396 (pci_config_get16(instance
->pci_handle
,
397 PCI_CONF_COMM
) | PCI_COMM_ME
));
398 irq
= pci_config_get8(instance
->pci_handle
,
401 con_log(CL_DLEVEL1
, (CE_CONT
, "megasas%d: "
402 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
403 instance_no
, vendor_id
, device_id
, subsysvid
,
404 subsysid
, irq
, MEGASAS_VERSION
));
406 /* enable bus-mastering */
407 command
= pci_config_get16(instance
->pci_handle
,
410 if (!(command
& PCI_COMM_ME
)) {
411 command
|= PCI_COMM_ME
;
413 pci_config_put16(instance
->pci_handle
,
414 PCI_CONF_COMM
, command
);
416 con_log(CL_ANN
, (CE_CONT
, "megaraid%d: "
417 "enable bus-mastering\n", instance_no
));
419 con_log(CL_DLEVEL1
, (CE_CONT
, "megaraid%d: "
420 "bus-mastering already set\n", instance_no
));
423 /* initialize function pointers */
424 if ((device_id
== PCI_DEVICE_ID_LSI_1078
) ||
425 (device_id
== PCI_DEVICE_ID_LSI_1078DE
)) {
426 con_log(CL_DLEVEL1
, (CE_CONT
, "megasas%d: "
427 "1078R/DE detected\n", instance_no
));
428 instance
->func_ptr
->read_fw_status_reg
=
429 read_fw_status_reg_ppc
;
430 instance
->func_ptr
->issue_cmd
= issue_cmd_ppc
;
431 instance
->func_ptr
->issue_cmd_in_sync_mode
=
432 issue_cmd_in_sync_mode_ppc
;
433 instance
->func_ptr
->issue_cmd_in_poll_mode
=
434 issue_cmd_in_poll_mode_ppc
;
435 instance
->func_ptr
->enable_intr
=
437 instance
->func_ptr
->disable_intr
=
439 instance
->func_ptr
->intr_ack
= intr_ack_ppc
;
441 con_log(CL_DLEVEL1
, (CE_CONT
, "megasas%d: "
442 "1064/8R detected\n", instance_no
));
443 instance
->func_ptr
->read_fw_status_reg
=
444 read_fw_status_reg_xscale
;
445 instance
->func_ptr
->issue_cmd
=
447 instance
->func_ptr
->issue_cmd_in_sync_mode
=
448 issue_cmd_in_sync_mode_xscale
;
449 instance
->func_ptr
->issue_cmd_in_poll_mode
=
450 issue_cmd_in_poll_mode_xscale
;
451 instance
->func_ptr
->enable_intr
=
453 instance
->func_ptr
->disable_intr
=
455 instance
->func_ptr
->intr_ack
=
459 instance
->baseaddress
= pci_config_get32(
460 instance
->pci_handle
, PCI_CONF_BASE0
);
461 instance
->baseaddress
&= 0x0fffc;
464 instance
->vendor_id
= vendor_id
;
465 instance
->device_id
= device_id
;
466 instance
->subsysvid
= subsysvid
;
467 instance
->subsysid
= subsysid
;
470 instance
->fm_capabilities
= ddi_prop_get_int(
471 DDI_DEV_T_ANY
, instance
->dip
, DDI_PROP_DONTPASS
,
472 "fm-capable", DDI_FM_EREPORT_CAPABLE
|
473 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
474 | DDI_FM_ERRCB_CAPABLE
);
476 megasas_fm_init(instance
);
478 /* setup the mfi based low level driver */
479 if (init_mfi(instance
) != DDI_SUCCESS
) {
480 con_log(CL_ANN
, (CE_WARN
, "megaraid: "
481 "could not initialize the low level driver"));
487 * Allocate the interrupt blocking cookie.
488 * It represents the information the framework
489 * needs to block interrupts. This cookie will
490 * be used by the locks shared accross our ISR.
491 * These locks must be initialized before we
495 if (ddi_get_iblock_cookie(dip
, 0,
496 &instance
->iblock_cookie
) != DDI_SUCCESS
) {
501 if (ddi_get_soft_iblock_cookie(dip
, DDI_SOFTINT_HIGH
,
502 &instance
->soft_iblock_cookie
) != DDI_SUCCESS
) {
508 * Initialize the driver mutexes common to
509 * normal/high level isr
511 if (ddi_intr_hilevel(dip
, 0)) {
512 instance
->isr_level
= HIGH_LEVEL_INTR
;
513 mutex_init(&instance
->cmd_pool_mtx
,
514 "cmd_pool_mtx", MUTEX_DRIVER
,
515 instance
->soft_iblock_cookie
);
516 mutex_init(&instance
->cmd_pend_mtx
,
517 "cmd_pend_mtx", MUTEX_DRIVER
,
518 instance
->soft_iblock_cookie
);
521 * Initialize the driver mutexes
522 * specific to soft-isr
524 instance
->isr_level
= NORMAL_LEVEL_INTR
;
525 mutex_init(&instance
->cmd_pool_mtx
,
526 "cmd_pool_mtx", MUTEX_DRIVER
,
527 instance
->iblock_cookie
);
528 mutex_init(&instance
->cmd_pend_mtx
,
529 "cmd_pend_mtx", MUTEX_DRIVER
,
530 instance
->iblock_cookie
);
533 mutex_init(&instance
->completed_pool_mtx
,
534 "completed_pool_mtx", MUTEX_DRIVER
,
535 instance
->iblock_cookie
);
536 mutex_init(&instance
->int_cmd_mtx
, "int_cmd_mtx",
537 MUTEX_DRIVER
, instance
->iblock_cookie
);
538 mutex_init(&instance
->aen_cmd_mtx
, "aen_cmd_mtx",
539 MUTEX_DRIVER
, instance
->iblock_cookie
);
540 mutex_init(&instance
->abort_cmd_mtx
, "abort_cmd_mtx",
541 MUTEX_DRIVER
, instance
->iblock_cookie
);
543 cv_init(&instance
->int_cmd_cv
, NULL
, CV_DRIVER
, NULL
);
544 cv_init(&instance
->abort_cmd_cv
, NULL
, CV_DRIVER
, NULL
);
546 INIT_LIST_HEAD(&instance
->completed_pool_list
);
548 /* Register our isr. */
549 if (ddi_add_intr(dip
, 0, NULL
, NULL
, megasas_isr
,
550 (caddr_t
)instance
) != DDI_SUCCESS
) {
551 con_log(CL_ANN
, (CE_WARN
,
552 " ISR did not register"));
559 /* Register our soft-isr for highlevel interrupts. */
560 if (instance
->isr_level
== HIGH_LEVEL_INTR
) {
561 if (ddi_add_softintr(dip
, DDI_SOFTINT_HIGH
,
562 &instance
->soft_intr_id
, NULL
, NULL
,
563 megasas_softintr
, (caddr_t
)instance
) !=
565 con_log(CL_ANN
, (CE_WARN
,
566 " Software ISR did not register"));
571 added_soft_isr_f
= 1;
574 /* Allocate a transport structure */
575 tran
= scsi_hba_tran_alloc(dip
, SCSI_HBA_CANSLEEP
);
578 con_log(CL_ANN
, (CE_WARN
,
579 "scsi_hba_tran_alloc failed"));
585 instance
->tran
= tran
;
587 tran
->tran_hba_private
= instance
;
588 tran
->tran_tgt_private
= NULL
;
589 tran
->tran_tgt_init
= megasas_tran_tgt_init
;
590 tran
->tran_tgt_probe
= scsi_hba_probe
;
591 tran
->tran_tgt_free
= (void (*)())NULL
;
592 tran
->tran_init_pkt
= megasas_tran_init_pkt
;
593 tran
->tran_start
= megasas_tran_start
;
594 tran
->tran_abort
= megasas_tran_abort
;
595 tran
->tran_reset
= megasas_tran_reset
;
596 tran
->tran_bus_reset
= megasas_tran_bus_reset
;
597 tran
->tran_getcap
= megasas_tran_getcap
;
598 tran
->tran_setcap
= megasas_tran_setcap
;
599 tran
->tran_destroy_pkt
= megasas_tran_destroy_pkt
;
600 tran
->tran_dmafree
= megasas_tran_dmafree
;
601 tran
->tran_sync_pkt
= megasas_tran_sync_pkt
;
602 tran
->tran_reset_notify
= NULL
;
603 tran
->tran_quiesce
= megasas_tran_quiesce
;
604 tran
->tran_unquiesce
= megasas_tran_unquiesce
;
606 tran_dma_attr
= megasas_generic_dma_attr
;
607 tran_dma_attr
.dma_attr_sgllen
= instance
->max_num_sge
;
609 /* Attach this instance of the hba */
610 if (scsi_hba_attach_setup(dip
, &tran_dma_attr
, tran
, 0)
612 con_log(CL_ANN
, (CE_WARN
,
613 "scsi_hba_attach failed\n"));
618 /* create devctl node for cfgadm command */
619 if (ddi_create_minor_node(dip
, "devctl",
620 S_IFCHR
, INST2DEVCTL(instance_no
),
621 DDI_NT_SCSI_NEXUS
, 0) == DDI_FAILURE
) {
622 con_log(CL_ANN
, (CE_WARN
,
623 "megaraid: failed to create devctl node."));
628 create_devctl_node_f
= 1;
630 /* create scsi node for cfgadm command */
631 if (ddi_create_minor_node(dip
, "scsi", S_IFCHR
,
632 INST2SCSI(instance_no
),
633 DDI_NT_SCSI_ATTACHMENT_POINT
, 0) ==
635 con_log(CL_ANN
, (CE_WARN
,
636 "megaraid: failed to create scsi node."));
641 create_scsi_node_f
= 1;
643 (void) sprintf(instance
->iocnode
, "%d:lsirdctl",
647 * Create a node for applications
648 * for issuing ioctl to the driver.
650 if (ddi_create_minor_node(dip
, instance
->iocnode
,
651 S_IFCHR
, INST2LSIRDCTL(instance_no
),
652 DDI_PSEUDO
, 0) == DDI_FAILURE
) {
653 con_log(CL_ANN
, (CE_WARN
,
654 "megaraid: failed to create ioctl node."));
659 create_ioc_node_f
= 1;
661 /* enable interrupt */
662 instance
->func_ptr
->enable_intr(instance
);
665 if (start_mfi_aen(instance
)) {
666 con_log(CL_ANN
, (CE_WARN
,
667 "megaraid: failed to initiate AEN."));
668 goto fail_initiate_aen
;
671 con_log(CL_DLEVEL1
, (CE_NOTE
,
672 "AEN started for instance %d.", instance_no
));
674 /* Finally! We are on the air. */
677 if (megasas_check_acc_handle(instance
->regmap_handle
) !=
681 if (megasas_check_acc_handle(instance
->pci_handle
) !=
687 con_log(CL_ANN
, (CE_NOTE
,
688 "megasas: DDI_PM_RESUME"));
691 con_log(CL_ANN
, (CE_NOTE
,
692 "megasas: DDI_RESUME"));
695 con_log(CL_ANN
, (CE_WARN
,
696 "megasas: invalid attach cmd=%x", cmd
));
697 return (DDI_FAILURE
);
700 return (DDI_SUCCESS
);
704 if (create_devctl_node_f
) {
705 ddi_remove_minor_node(dip
, "devctl");
708 if (create_scsi_node_f
) {
709 ddi_remove_minor_node(dip
, "scsi");
712 if (create_ioc_node_f
) {
713 ddi_remove_minor_node(dip
, instance
->iocnode
);
717 scsi_hba_tran_free(tran
);
721 if (added_soft_isr_f
) {
722 ddi_remove_softintr(instance
->soft_intr_id
);
726 ddi_remove_intr(dip
, 0, instance
->iblock_cookie
);
729 megasas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
730 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
732 megasas_fm_fini(instance
);
734 pci_config_teardown(&instance
->pci_handle
);
736 ddi_soft_state_free(megasas_state
, instance_no
);
738 con_log(CL_ANN
, (CE_NOTE
,
739 "megasas: return failure from mega_attach\n"));
741 return (DDI_FAILURE
);
745 * getinfo - gets device information
751 * The system calls getinfo() to obtain configuration information that only
752 * the driver knows. The mapping of minor numbers to device instance is
753 * entirely under the control of the driver. The system sometimes needs to ask
754 * the driver which device a particular dev_t represents.
755 * Given the device number return the devinfo pointer from the scsi_device
760 megasas_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **resultp
)
763 int megasas_minor
= getminor((dev_t
)arg
);
765 struct megasas_instance
*instance
;
767 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
770 case DDI_INFO_DEVT2DEVINFO
:
771 instance
= (struct megasas_instance
*)
772 ddi_get_soft_state(megasas_state
,
773 MINOR2INST(megasas_minor
));
775 if (instance
== NULL
) {
779 *resultp
= instance
->dip
;
783 case DDI_INFO_DEVT2INSTANCE
:
784 *resultp
= (void *)instance
;
796 * detach - detaches a device from the system
797 * @dip: pointer to the device's dev_info structure
798 * @cmd: type of detach
800 * A driver's detach() entry point is called to detach an instance of a device
801 * that is bound to the driver. The entry point is called with the instance of
802 * the device node to be detached and with DDI_DETACH, which is specified as
803 * the cmd argument to the entry point.
804 * This routine is called during driver unload. We free all the allocated
805 * resources and call the corresponding LLD so that it can also release all
809 megasas_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
813 struct megasas_instance
*instance
;
815 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
818 ASSERT(NO_COMPETING_THREADS
);
820 instance_no
= ddi_get_instance(dip
);
822 instance
= (struct megasas_instance
*)ddi_get_soft_state(megasas_state
,
826 con_log(CL_ANN
, (CE_WARN
,
827 "megasas:%d could not get instance in detach",
830 return (DDI_FAILURE
);
833 con_log(CL_ANN
, (CE_NOTE
,
834 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
835 instance_no
, instance
->vendor_id
, instance
->device_id
,
836 instance
->subsysvid
, instance
->subsysid
));
840 con_log(CL_ANN
, (CE_NOTE
,
841 "megasas_detach: DDI_DETACH\n"));
843 if (scsi_hba_detach(dip
) != DDI_SUCCESS
) {
844 con_log(CL_ANN
, (CE_WARN
,
845 "megasas:%d failed to detach",
848 return (DDI_FAILURE
);
851 scsi_hba_tran_free(instance
->tran
);
853 if (abort_aen_cmd(instance
, instance
->aen_cmd
)) {
854 con_log(CL_ANN
, (CE_WARN
, "megasas_detach: "
855 "failed to abort prevous AEN command\n"));
857 return (DDI_FAILURE
);
860 instance
->func_ptr
->disable_intr(instance
);
862 if (instance
->isr_level
== HIGH_LEVEL_INTR
) {
863 ddi_remove_softintr(instance
->soft_intr_id
);
866 ddi_remove_intr(dip
, 0, instance
->iblock_cookie
);
868 free_space_for_mfi(instance
);
870 megasas_fm_fini(instance
);
872 pci_config_teardown(&instance
->pci_handle
);
874 kmem_free(instance
->func_ptr
,
875 sizeof (struct megasas_func_ptr
));
877 ddi_soft_state_free(megasas_state
, instance_no
);
880 con_log(CL_ANN
, (CE_NOTE
,
881 "megasas_detach: DDI_PM_SUSPEND\n"));
885 con_log(CL_ANN
, (CE_NOTE
,
886 "megasas_detach: DDI_SUSPEND\n"));
890 con_log(CL_ANN
, (CE_WARN
,
891 "invalid detach command:0x%x", cmd
));
892 return (DDI_FAILURE
);
895 return (DDI_SUCCESS
);
899 * ************************************************************************** *
901 * common entry points - for character driver types *
903 * ************************************************************************** *
906 * open - gets access to a device
912 * Access to a device by one or more application programs is controlled
913 * through the open() and close() entry points. The primary function of
914 * open() is to verify that the open request is allowed.
917 megasas_open(dev_t
*dev
, int openflags
, int otyp
, cred_t
*credp
)
921 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
923 /* Check root permissions */
924 if (drv_priv(credp
) != 0) {
925 con_log(CL_ANN
, (CE_WARN
,
926 "megaraid: Non-root ioctl access tried!"));
930 /* Verify we are being opened as a character device */
931 if (otyp
!= OTYP_CHR
) {
932 con_log(CL_ANN
, (CE_WARN
,
933 "megaraid: ioctl node must be a char node\n"));
937 if (ddi_get_soft_state(megasas_state
, MINOR2INST(getminor(*dev
)))
943 rval
= scsi_hba_open(dev
, openflags
, otyp
, credp
);
950 * close - gives up access to a device
956 * close() should perform any cleanup necessary to finish using the minor
957 * device, and prepare the device (and driver) to be opened again.
960 megasas_close(dev_t dev
, int openflags
, int otyp
, cred_t
*credp
)
964 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
966 /* no need for locks! */
968 if (scsi_hba_close
) {
969 rval
= scsi_hba_close(dev
, openflags
, otyp
, credp
);
976 * ioctl - performs a range of I/O commands for character drivers
984 * ioctl() routine must make sure that user data is copied into or out of the
985 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
986 * and ddi_copyout(), as appropriate.
987 * This is a wrapper routine to serialize access to the actual ioctl routine.
988 * ioctl() should return 0 on success, or the appropriate error number. The
989 * driver may also set the value returned to the calling process through rvalp.
992 megasas_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
997 struct megasas_instance
*instance
;
998 struct megasas_ioctl ioctl
;
999 struct megasas_aen aen
;
1001 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1003 instance
= ddi_get_soft_state(megasas_state
, MINOR2INST(getminor(dev
)));
1005 if (instance
== NULL
) {
1006 /* invalid minor number */
1007 con_log(CL_ANN
, (CE_WARN
, "megaraid: adapter not found."));
1011 switch ((uint_t
)cmd
) {
1012 case MEGASAS_IOCTL_FIRMWARE
:
1013 if (ddi_copyin((void *) arg
, &ioctl
,
1014 sizeof (struct megasas_ioctl
), mode
)) {
1015 con_log(CL_ANN
, (CE_WARN
, "megasas_ioctl: "
1016 "ERROR IOCTL copyin"));
1020 if (ioctl
.control_code
== MR_DRIVER_IOCTL_COMMON
) {
1021 rval
= handle_drv_ioctl(instance
, &ioctl
, mode
);
1023 rval
= handle_mfi_ioctl(instance
, &ioctl
, mode
);
1026 if (ddi_copyout((void *) &ioctl
, (void *)arg
,
1027 (sizeof (struct megasas_ioctl
) - 1), mode
)) {
1028 con_log(CL_ANN
, (CE_WARN
,
1029 "megasas_ioctl: copy_to_user failed\n"));
1034 case MEGASAS_IOCTL_AEN
:
1035 if (ddi_copyin((void *) arg
, &aen
,
1036 sizeof (struct megasas_aen
), mode
)) {
1037 con_log(CL_ANN
, (CE_WARN
,
1038 "megasas_ioctl: ERROR AEN copyin"));
1042 rval
= handle_mfi_aen(instance
, &aen
);
1044 if (ddi_copyout((void *) &aen
, (void *)arg
,
1045 sizeof (struct megasas_aen
), mode
)) {
1046 con_log(CL_ANN
, (CE_WARN
,
1047 "megasas_ioctl: copy_to_user failed\n"));
1053 rval
= scsi_hba_ioctl(dev
, cmd
, arg
,
1054 mode
, credp
, rvalp
);
1056 con_log(CL_DLEVEL1
, (CE_NOTE
, "megasas_ioctl: "
1057 "scsi_hba_ioctl called, ret = %x.", rval
));
1064 * ************************************************************************** *
1066 * common entry points - for block driver types *
1068 * ************************************************************************** *
1079 megasas_reset(dev_info_t
*dip
, ddi_reset_cmd_t cmd
)
1083 struct megasas_instance
*instance
;
1085 instance_no
= ddi_get_instance(dip
);
1086 instance
= (struct megasas_instance
*)ddi_get_soft_state
1087 (megasas_state
, instance_no
);
1089 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1092 con_log(CL_ANN
, (CE_WARN
,
1093 "megaraid:%d could not get adapter in reset",
1095 return (DDI_FAILURE
);
1098 con_log(CL_ANN
, (CE_NOTE
, "flushing cache for instance %d ..",
1101 flush_cache(instance
);
1103 return (DDI_SUCCESS
);
1108 * ************************************************************************** *
1110 * entry points (SCSI HBA) *
1112 * ************************************************************************** *
1115 * tran_tgt_init - initialize a target device instance
1121 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1122 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1123 * the device's address as valid and supportable for that particular HBA.
1124 * By returning DDI_FAILURE, the instance of the target driver for that device
1125 * is not probed or attached.
1129 megasas_tran_tgt_init(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
1130 scsi_hba_tran_t
*tran
, struct scsi_device
*sd
)
1132 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1134 return (DDI_SUCCESS
);
1138 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1148 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1149 * structure and DMA resources for a target driver request. The
1150 * tran_init_pkt() entry point is called when the target driver calls the
1151 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1152 * is a request to perform one or more of three possible services:
1153 * - allocation and initialization of a scsi_pkt structure
1154 * - allocation of DMA resources for data transfer
1155 * - reallocation of DMA resources for the next portion of the data transfer
1157 static struct scsi_pkt
*
1158 megasas_tran_init_pkt(struct scsi_address
*ap
, register struct scsi_pkt
*pkt
,
1159 struct buf
*bp
, int cmdlen
, int statuslen
, int tgtlen
,
1160 int flags
, int (*callback
)(), caddr_t arg
)
1162 struct scsa_cmd
*acmd
;
1163 struct megasas_instance
*instance
;
1164 struct scsi_pkt
*new_pkt
;
1166 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1168 instance
= ADDR2MEGA(ap
);
1170 /* step #1 : pkt allocation */
1172 pkt
= scsi_hba_pkt_alloc(instance
->dip
, ap
, cmdlen
, statuslen
,
1173 tgtlen
, sizeof (struct scsa_cmd
), callback
, arg
);
1178 acmd
= PKT2CMD(pkt
);
1181 * Initialize the new pkt - we redundantly initialize
1182 * all the fields for illustrative purposes.
1184 acmd
->cmd_pkt
= pkt
;
1185 acmd
->cmd_flags
= 0;
1186 acmd
->cmd_scblen
= statuslen
;
1187 acmd
->cmd_cdblen
= cmdlen
;
1188 acmd
->cmd_dmahandle
= NULL
;
1189 acmd
->cmd_ncookies
= 0;
1190 acmd
->cmd_cookie
= 0;
1191 acmd
->cmd_cookiecnt
= 0;
1194 pkt
->pkt_address
= *ap
;
1195 pkt
->pkt_comp
= (void (*)())NULL
;
1200 pkt
->pkt_statistics
= 0;
1201 pkt
->pkt_reason
= 0;
1204 acmd
= PKT2CMD(pkt
);
1208 /* step #2 : dma allocation/move */
1209 if (bp
&& bp
->b_bcount
!= 0) {
1210 if (acmd
->cmd_dmahandle
== NULL
) {
1211 if (megasas_dma_alloc(instance
, pkt
, bp
, flags
,
1214 scsi_hba_pkt_free(ap
, new_pkt
);
1217 return ((struct scsi_pkt
*)NULL
);
1220 if (megasas_dma_move(instance
, pkt
, bp
) == -1) {
1221 return ((struct scsi_pkt
*)NULL
);
1230 * tran_start - transport a SCSI command to the addressed target
1234 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1235 * SCSI command to the addressed target. The SCSI command is described
1236 * entirely within the scsi_pkt structure, which the target driver allocated
1237 * through the HBA driver's tran_init_pkt() entry point. If the command
1238 * involves a data transfer, DMA resources must also have been allocated for
1239 * the scsi_pkt structure.
1242 * TRAN_BUSY - request queue is full, no more free scbs
1243 * TRAN_ACCEPT - pkt has been submitted to the instance
1246 megasas_tran_start(struct scsi_address
*ap
, register struct scsi_pkt
*pkt
)
1248 uchar_t cmd_done
= 0;
1250 struct megasas_instance
*instance
= ADDR2MEGA(ap
);
1251 struct megasas_cmd
*cmd
;
1253 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1254 __func__
, __LINE__
, pkt
->pkt_cdbp
[0]));
1256 pkt
->pkt_reason
= CMD_CMPLT
;
1257 *pkt
->pkt_scbp
= STATUS_GOOD
; /* clear arq scsi_status */
1259 cmd
= build_cmd(instance
, ap
, pkt
, &cmd_done
);
1262 * Check if the command is already completed by the mega_build_cmd()
1263 * routine. In which case the busy_flag would be clear and scb will be
1264 * NULL and appropriate reason provided in pkt_reason field
1267 if ((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) {
1268 scsi_hba_pkt_comp(pkt
);
1270 pkt
->pkt_reason
= CMD_CMPLT
;
1271 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
1272 pkt
->pkt_state
|= STATE_GOT_BUS
| STATE_GOT_TARGET
1274 return (TRAN_ACCEPT
);
1281 if ((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) {
1282 if (instance
->fw_outstanding
> instance
->max_fw_cmds
) {
1283 con_log(CL_ANN
, (CE_CONT
, "megasas:Firmware busy"));
1284 return_mfi_pkt(instance
, cmd
);
1288 /* Syncronize the Cmd frame for the controller */
1289 (void) ddi_dma_sync(cmd
->frame_dma_obj
.dma_handle
, 0, 0,
1290 DDI_DMA_SYNC_FORDEV
);
1292 instance
->func_ptr
->issue_cmd(cmd
, instance
);
1295 struct megasas_header
*hdr
= &cmd
->frame
->hdr
;
1297 cmd
->sync_cmd
= MEGASAS_TRUE
;
1299 instance
->func_ptr
-> issue_cmd_in_poll_mode(instance
, cmd
);
1301 pkt
->pkt_reason
= CMD_CMPLT
;
1302 pkt
->pkt_statistics
= 0;
1303 pkt
->pkt_state
|= STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
1305 switch (hdr
->cmd_status
) {
1307 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
1310 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
1312 pkt
->pkt_reason
= CMD_CMPLT
;
1313 pkt
->pkt_statistics
= 0;
1315 ((struct scsi_status
*)pkt
->pkt_scbp
)->sts_chk
= 1;
1318 case MFI_STAT_DEVICE_NOT_FOUND
:
1319 pkt
->pkt_reason
= CMD_DEV_GONE
;
1320 pkt
->pkt_statistics
= STAT_DISCON
;
1324 ((struct scsi_status
*)pkt
->pkt_scbp
)->sts_busy
= 1;
1327 return_mfi_pkt(instance
, cmd
);
1328 (void) megasas_common_check(instance
, cmd
);
1330 scsi_hba_pkt_comp(pkt
);
1334 return (TRAN_ACCEPT
);
1338 * tran_abort - Abort any commands that are currently in transport
1342 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1343 * commands that are currently in transport for a particular target. This entry
1344 * point is called when a target driver calls scsi_abort(). The tran_abort()
1345 * entry point should attempt to abort the command denoted by the pkt
1346 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1347 * abort all outstanding commands in the transport layer for the particular
1348 * target or logical unit.
1352 megasas_tran_abort(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1354 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1356 /* aborting command not supported by H/W */
1358 return (DDI_FAILURE
);
1362 * tran_reset - reset either the SCSI bus or target
1366 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1367 * the SCSI bus or a particular SCSI target device. This entry point is called
1368 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1369 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1370 * particular target or logical unit must be reset.
1374 megasas_tran_reset(struct scsi_address
*ap
, int level
)
1376 struct megasas_instance
*instance
= ADDR2MEGA(ap
);
1378 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1380 if (wait_for_outstanding(instance
)) {
1381 return (DDI_FAILURE
);
1383 return (DDI_SUCCESS
);
1388 * tran_bus_reset - reset the SCSI bus
1392 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1393 * initialized during the HBA driver's attach(). The vector should point to
1394 * an HBA entry point that is to be called when a user initiates a bus reset.
1395 * Implementation is hardware specific. If the HBA driver cannot reset the
1396 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1397 * or not initialize this vector.
1401 megasas_tran_bus_reset(dev_info_t
*dip
, int level
)
1403 int instance_no
= ddi_get_instance(dip
);
1405 struct megasas_instance
*instance
= ddi_get_soft_state(megasas_state
,
1408 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1410 if (wait_for_outstanding(instance
)) {
1411 return (DDI_FAILURE
);
1413 return (DDI_SUCCESS
);
1418 * tran_getcap - get one of a set of SCSA-defined capabilities
1423 * The target driver can request the current setting of the capability for a
1424 * particular target by setting the whom parameter to nonzero. A whom value of
1425 * zero indicates a request for the current setting of the general capability
1426 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1427 * for undefined capabilities or the current value of the requested capability.
1431 megasas_tran_getcap(struct scsi_address
*ap
, char *cap
, int whom
)
1435 struct megasas_instance
*instance
= ADDR2MEGA(ap
);
1437 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1439 /* we do allow inquiring about capabilities for other targets */
1444 switch (scsi_hba_lookup_capstr(cap
)) {
1445 case SCSI_CAP_DMA_MAX
:
1446 /* Limit to 16MB max transfer */
1447 rval
= megasas_max_cap_maxxfer
;
1449 case SCSI_CAP_MSG_OUT
:
1452 case SCSI_CAP_DISCONNECT
:
1455 case SCSI_CAP_SYNCHRONOUS
:
1458 case SCSI_CAP_WIDE_XFER
:
1461 case SCSI_CAP_TAGGED_QING
:
1464 case SCSI_CAP_UNTAGGED_QING
:
1467 case SCSI_CAP_PARITY
:
1470 case SCSI_CAP_INITIATOR_ID
:
1471 rval
= instance
->init_id
;
1476 case SCSI_CAP_LINKED_CMDS
:
1479 case SCSI_CAP_RESET_NOTIFICATION
:
1482 case SCSI_CAP_GEOMETRY
:
1487 con_log(CL_DLEVEL2
, (CE_NOTE
, "Default cap coming 0x%x",
1488 scsi_hba_lookup_capstr(cap
)));
1497 * tran_setcap - set one of a set of SCSA-defined capabilities
1503 * The target driver might request that the new value be set for a particular
1504 * target by setting the whom parameter to nonzero. A whom value of zero
1505 * means that request is to set the new value for the SCSI bus or for adapter
1506 * hardware in general.
1507 * The tran_setcap() should return the following values as appropriate:
1508 * - -1 for undefined capabilities
1509 * - 0 if the HBA driver cannot set the capability to the requested value
1510 * - 1 if the HBA driver is able to set the capability to the requested value
1514 megasas_tran_setcap(struct scsi_address
*ap
, char *cap
, int value
, int whom
)
1518 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1520 /* We don't allow setting capabilities for other targets */
1521 if (cap
== NULL
|| whom
== 0) {
1525 switch (scsi_hba_lookup_capstr(cap
)) {
1526 case SCSI_CAP_DMA_MAX
:
1527 case SCSI_CAP_MSG_OUT
:
1528 case SCSI_CAP_PARITY
:
1529 case SCSI_CAP_LINKED_CMDS
:
1530 case SCSI_CAP_RESET_NOTIFICATION
:
1531 case SCSI_CAP_DISCONNECT
:
1532 case SCSI_CAP_SYNCHRONOUS
:
1533 case SCSI_CAP_UNTAGGED_QING
:
1534 case SCSI_CAP_WIDE_XFER
:
1535 case SCSI_CAP_INITIATOR_ID
:
1538 * None of these are settable via
1539 * the capability interface.
1542 case SCSI_CAP_TAGGED_QING
:
1545 case SCSI_CAP_SECTOR_SIZE
:
1549 case SCSI_CAP_TOTAL_SECTORS
:
1561 * tran_destroy_pkt - deallocate scsi_pkt structure
1565 * The tran_destroy_pkt() entry point is the HBA driver function that
1566 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1567 * called when the target driver calls scsi_destroy_pkt(). The
1568 * tran_destroy_pkt() entry point must free any DMA resources that have been
1569 * allocated for the packet. An implicit DMA synchronization occurs if the
1570 * DMA resources are freed and any cached data remains after the completion
1574 megasas_tran_destroy_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1576 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
1578 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1580 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
1581 acmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
1583 (void) ddi_dma_unbind_handle(acmd
->cmd_dmahandle
);
1585 ddi_dma_free_handle(&acmd
->cmd_dmahandle
);
1587 acmd
->cmd_dmahandle
= NULL
;
1591 scsi_hba_pkt_free(ap
, pkt
);
1595 * tran_dmafree - deallocates DMA resources
1599 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1600 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1601 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1602 * free only DMA resources allocated for a scsi_pkt structure, not the
1603 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1604 * implicitly performed.
1608 megasas_tran_dmafree(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1610 register struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
1612 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1614 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
1615 acmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
1617 (void) ddi_dma_unbind_handle(acmd
->cmd_dmahandle
);
1619 ddi_dma_free_handle(&acmd
->cmd_dmahandle
);
1621 acmd
->cmd_dmahandle
= NULL
;
1626 * tran_sync_pkt - synchronize the DMA object allocated
1630 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1631 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1632 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1633 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1634 * must synchronize the CPU's view of the data. If the data transfer direction
1635 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1636 * device's view of the data.
1640 megasas_tran_sync_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1642 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1645 * following 'ddi_dma_sync()' API call
1646 * already called for each I/O in the ISR
1651 register struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
1653 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
1654 (void) ddi_dma_sync(acmd
->cmd_dmahandle
, acmd
->cmd_dma_offset
,
1655 acmd
->cmd_dma_len
, (acmd
->cmd_flags
& CFLAG_DMASEND
) ?
1656 DDI_DMA_SYNC_FORDEV
: DDI_DMA_SYNC_FORCPU
);
1663 megasas_tran_quiesce(dev_info_t
*dip
)
1665 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1672 megasas_tran_unquiesce(dev_info_t
*dip
)
1674 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1680 * megasas_isr(caddr_t)
1682 * The Interrupt Service Routine
1684 * Collect status for all completed commands and do callback
1688 megasas_isr(struct megasas_instance
*instance
)
1695 struct megasas_cmd
*cmd
;
1697 con_log(CL_ANN1
, (CE_NOTE
, "chkpnt:%s:%d", __func__
, __LINE__
));
1700 if (!instance
->func_ptr
->intr_ack(instance
)) {
1701 return (DDI_INTR_UNCLAIMED
);
1704 (void) ddi_dma_sync(instance
->mfi_internal_dma_obj
.dma_handle
,
1705 0, 0, DDI_DMA_SYNC_FORCPU
);
1707 if (megasas_check_dma_handle(instance
->mfi_internal_dma_obj
.dma_handle
)
1709 megasas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
1710 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
1711 return (DDI_INTR_UNCLAIMED
);
1714 producer
= *instance
->producer
;
1715 consumer
= *instance
->consumer
;
1717 con_log(CL_ANN1
, (CE_CONT
, " producer %x consumer %x ",
1718 producer
, consumer
));
1720 mutex_enter(&instance
->completed_pool_mtx
);
1722 while (consumer
!= producer
) {
1723 context
= instance
->reply_queue
[consumer
];
1724 cmd
= instance
->cmd_list
[context
];
1725 mlist_add_tail(&cmd
->list
, &instance
->completed_pool_list
);
1728 if (consumer
== (instance
->max_fw_cmds
+ 1)) {
1733 mutex_exit(&instance
->completed_pool_mtx
);
1735 *instance
->consumer
= consumer
;
1736 (void) ddi_dma_sync(instance
->mfi_internal_dma_obj
.dma_handle
,
1737 0, 0, DDI_DMA_SYNC_FORDEV
);
1739 if (instance
->softint_running
) {
1745 if (instance
->isr_level
== HIGH_LEVEL_INTR
) {
1746 if (need_softintr
) {
1747 ddi_trigger_softintr(instance
->soft_intr_id
);
1751 * Not a high-level interrupt, therefore call the soft level
1752 * interrupt explicitly
1754 (void) megasas_softintr(instance
);
1757 return (DDI_INTR_CLAIMED
);
1762 * ************************************************************************** *
1766 * ************************************************************************** *
1769 * get_mfi_pkt : Get a command from the free pool
1771 static struct megasas_cmd
*
1772 get_mfi_pkt(struct megasas_instance
*instance
)
1774 mlist_t
*head
= &instance
->cmd_pool_list
;
1775 struct megasas_cmd
*cmd
= NULL
;
1777 mutex_enter(&instance
->cmd_pool_mtx
);
1778 ASSERT(mutex_owned(&instance
->cmd_pool_mtx
));
1780 if (!mlist_empty(head
)) {
1781 cmd
= mlist_entry(head
->next
, struct megasas_cmd
, list
);
1782 mlist_del_init(head
->next
);
1786 mutex_exit(&instance
->cmd_pool_mtx
);
1792 * return_mfi_pkt : Return a cmd to free command pool
1795 return_mfi_pkt(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
)
1797 mutex_enter(&instance
->cmd_pool_mtx
);
1798 ASSERT(mutex_owned(&instance
->cmd_pool_mtx
));
1800 mlist_add(&cmd
->list
, &instance
->cmd_pool_list
);
1802 mutex_exit(&instance
->cmd_pool_mtx
);
1806 * destroy_mfi_frame_pool
1809 destroy_mfi_frame_pool(struct megasas_instance
*instance
)
1812 uint32_t max_cmd
= instance
->max_fw_cmds
;
1814 struct megasas_cmd
*cmd
;
1816 /* return all frames to pool */
1817 for (i
= 0; i
< max_cmd
; i
++) {
1819 cmd
= instance
->cmd_list
[i
];
1821 if (cmd
->frame_dma_obj_status
== DMA_OBJ_ALLOCATED
)
1822 (void) mega_free_dma_obj(instance
, cmd
->frame_dma_obj
);
1824 cmd
->frame_dma_obj_status
= DMA_OBJ_FREED
;
1830 * create_mfi_frame_pool
1833 create_mfi_frame_pool(struct megasas_instance
*instance
)
1840 uint32_t tot_frame_size
;
1842 struct megasas_cmd
*cmd
;
1844 max_cmd
= instance
->max_fw_cmds
;
1846 sge_sz
= sizeof (struct megasas_sge64
);
1848 /* calculated the number of 64byte frames required for SGL */
1849 sgl_sz
= sge_sz
* instance
->max_num_sge
;
1850 tot_frame_size
= sgl_sz
+ MEGAMFI_FRAME_SIZE
+ SENSE_LENGTH
;
1852 con_log(CL_DLEVEL3
, (CE_NOTE
, "create_mfi_frame_pool: "
1853 "sgl_sz %x tot_frame_size %x", sgl_sz
, tot_frame_size
));
1855 while (i
< max_cmd
) {
1856 cmd
= instance
->cmd_list
[i
];
1858 cmd
->frame_dma_obj
.size
= tot_frame_size
;
1859 cmd
->frame_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
1860 cmd
->frame_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
1861 cmd
->frame_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
1862 cmd
->frame_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
1863 cmd
->frame_dma_obj
.dma_attr
.dma_attr_align
= 64;
1866 cookie_cnt
= mega_alloc_dma_obj(instance
, &cmd
->frame_dma_obj
);
1868 if (cookie_cnt
== -1 || cookie_cnt
> 1) {
1869 con_log(CL_ANN
, (CE_WARN
,
1870 "create_mfi_frame_pool: could not alloc."));
1871 return (DDI_FAILURE
);
1874 bzero(cmd
->frame_dma_obj
.buffer
, tot_frame_size
);
1876 cmd
->frame_dma_obj_status
= DMA_OBJ_ALLOCATED
;
1877 cmd
->frame
= (union megasas_frame
*)cmd
->frame_dma_obj
.buffer
;
1878 cmd
->frame_phys_addr
=
1879 cmd
->frame_dma_obj
.dma_cookie
[0].dmac_address
;
1881 cmd
->sense
= (uint8_t *)(((unsigned long)
1882 cmd
->frame_dma_obj
.buffer
) +
1883 tot_frame_size
- SENSE_LENGTH
);
1884 cmd
->sense_phys_addr
=
1885 cmd
->frame_dma_obj
.dma_cookie
[0].dmac_address
+
1886 tot_frame_size
- SENSE_LENGTH
;
1888 if (!cmd
->frame
|| !cmd
->sense
) {
1889 con_log(CL_ANN
, (CE_NOTE
,
1890 "megasas: pci_pool_alloc failed \n"));
1895 cmd
->frame
->io
.context
= cmd
->index
;
1898 con_log(CL_DLEVEL3
, (CE_NOTE
, "[%x]-%x",
1899 cmd
->frame
->io
.context
, cmd
->frame_phys_addr
));
1902 return (DDI_SUCCESS
);
1906 * free_additional_dma_buffer
1909 free_additional_dma_buffer(struct megasas_instance
*instance
)
1911 if (instance
->mfi_internal_dma_obj
.status
== DMA_OBJ_ALLOCATED
) {
1912 (void) mega_free_dma_obj(instance
,
1913 instance
->mfi_internal_dma_obj
);
1914 instance
->mfi_internal_dma_obj
.status
= DMA_OBJ_FREED
;
1917 if (instance
->mfi_evt_detail_obj
.status
== DMA_OBJ_ALLOCATED
) {
1918 (void) mega_free_dma_obj(instance
,
1919 instance
->mfi_evt_detail_obj
);
1920 instance
->mfi_evt_detail_obj
.status
= DMA_OBJ_FREED
;
1925 * alloc_additional_dma_buffer
1928 alloc_additional_dma_buffer(struct megasas_instance
*instance
)
1930 uint32_t reply_q_sz
;
1931 uint32_t internal_buf_size
= PAGESIZE
*2;
1933 /* max cmds plus 1 + producer & consumer */
1934 reply_q_sz
= sizeof (uint32_t) * (instance
->max_fw_cmds
+ 1 + 2);
1936 instance
->mfi_internal_dma_obj
.size
= internal_buf_size
;
1937 instance
->mfi_internal_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
1938 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
1939 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_count_max
=
1941 instance
->mfi_internal_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
1943 if (mega_alloc_dma_obj(instance
, &instance
->mfi_internal_dma_obj
)
1945 con_log(CL_ANN
, (CE_WARN
, "megaraid: could not alloc reply Q"));
1946 return (DDI_FAILURE
);
1949 bzero(instance
->mfi_internal_dma_obj
.buffer
, internal_buf_size
);
1951 instance
->mfi_internal_dma_obj
.status
|= DMA_OBJ_ALLOCATED
;
1953 instance
->producer
= (uint32_t *)((unsigned long)
1954 instance
->mfi_internal_dma_obj
.buffer
);
1955 instance
->consumer
= (uint32_t *)((unsigned long)
1956 instance
->mfi_internal_dma_obj
.buffer
+ 4);
1957 instance
->reply_queue
= (uint32_t *)((unsigned long)
1958 instance
->mfi_internal_dma_obj
.buffer
+ 8);
1959 instance
->internal_buf
= (caddr_t
)(((unsigned long)
1960 instance
->mfi_internal_dma_obj
.buffer
) + reply_q_sz
+ 8);
1961 instance
->internal_buf_dmac_add
=
1962 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+
1964 instance
->internal_buf_size
= internal_buf_size
-
1967 /* allocate evt_detail */
1968 instance
->mfi_evt_detail_obj
.size
= sizeof (struct megasas_evt_detail
);
1969 instance
->mfi_evt_detail_obj
.dma_attr
= megasas_generic_dma_attr
;
1970 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
1971 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
1972 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_sgllen
= 1;
1973 instance
->mfi_evt_detail_obj
.dma_attr
.dma_attr_align
= 1;
1975 if (mega_alloc_dma_obj(instance
, &instance
->mfi_evt_detail_obj
) != 1) {
1976 con_log(CL_ANN
, (CE_WARN
, "alloc_additional_dma_buffer: "
1977 "could not data transfer buffer alloc."));
1978 return (DDI_FAILURE
);
1981 bzero(instance
->mfi_evt_detail_obj
.buffer
,
1982 sizeof (struct megasas_evt_detail
));
1984 instance
->mfi_evt_detail_obj
.status
|= DMA_OBJ_ALLOCATED
;
1986 return (DDI_SUCCESS
);
1990 * free_space_for_mfi
1993 free_space_for_mfi(struct megasas_instance
*instance
)
1996 uint32_t max_cmd
= instance
->max_fw_cmds
;
1999 if (instance
->cmd_list
== NULL
) {
2003 free_additional_dma_buffer(instance
);
2005 /* first free the MFI frame pool */
2006 destroy_mfi_frame_pool(instance
);
2008 /* free all the commands in the cmd_list */
2009 for (i
= 0; i
< instance
->max_fw_cmds
; i
++) {
2010 kmem_free(instance
->cmd_list
[i
],
2011 sizeof (struct megasas_cmd
));
2013 instance
->cmd_list
[i
] = NULL
;
2016 /* free the cmd_list buffer itself */
2017 kmem_free(instance
->cmd_list
,
2018 sizeof (struct megasas_cmd
*) * max_cmd
);
2020 instance
->cmd_list
= NULL
;
2022 INIT_LIST_HEAD(&instance
->cmd_pool_list
);
2026 * alloc_space_for_mfi
2029 alloc_space_for_mfi(struct megasas_instance
*instance
)
2035 struct megasas_cmd
*cmd
;
2037 max_cmd
= instance
->max_fw_cmds
;
2038 sz
= sizeof (struct megasas_cmd
*) * max_cmd
;
2041 * instance->cmd_list is an array of struct megasas_cmd pointers.
2042 * Allocate the dynamic array first and then allocate individual
2045 instance
->cmd_list
= kmem_zalloc(sz
, KM_SLEEP
);
2046 ASSERT(instance
->cmd_list
);
2048 for (i
= 0; i
< max_cmd
; i
++) {
2049 instance
->cmd_list
[i
] = kmem_zalloc(sizeof (struct megasas_cmd
),
2051 ASSERT(instance
->cmd_list
[i
]);
2054 INIT_LIST_HEAD(&instance
->cmd_pool_list
);
2056 /* add all the commands to command pool (instance->cmd_pool) */
2057 for (i
= 0; i
< max_cmd
; i
++) {
2058 cmd
= instance
->cmd_list
[i
];
2061 mlist_add_tail(&cmd
->list
, &instance
->cmd_pool_list
);
2064 /* create a frame pool and assign one frame to each cmd */
2065 if (create_mfi_frame_pool(instance
)) {
2066 con_log(CL_ANN
, (CE_NOTE
, "error creating frame DMA pool\n"));
2067 return (DDI_FAILURE
);
2070 /* create a frame pool and assign one frame to each cmd */
2071 if (alloc_additional_dma_buffer(instance
)) {
2072 con_log(CL_ANN
, (CE_NOTE
, "error creating frame DMA pool\n"));
2073 return (DDI_FAILURE
);
2076 return (DDI_SUCCESS
);
2083 get_ctrl_info(struct megasas_instance
*instance
,
2084 struct megasas_ctrl_info
*ctrl_info
)
2088 struct megasas_cmd
*cmd
;
2089 struct megasas_dcmd_frame
*dcmd
;
2090 struct megasas_ctrl_info
*ci
;
2092 cmd
= get_mfi_pkt(instance
);
2095 con_log(CL_ANN
, (CE_WARN
,
2096 "Failed to get a cmd for ctrl info\n"));
2097 return (DDI_FAILURE
);
2100 dcmd
= &cmd
->frame
->dcmd
;
2102 ci
= (struct megasas_ctrl_info
*)instance
->internal_buf
;
2105 con_log(CL_ANN
, (CE_WARN
,
2106 "Failed to alloc mem for ctrl info\n"));
2107 return_mfi_pkt(instance
, cmd
);
2108 return (DDI_FAILURE
);
2111 (void) memset(ci
, 0, sizeof (struct megasas_ctrl_info
));
2113 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2114 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
2116 dcmd
->cmd
= MFI_CMD_OP_DCMD
;
2117 dcmd
->cmd_status
= MFI_CMD_STATUS_POLL_MODE
;
2118 dcmd
->sge_count
= 1;
2119 dcmd
->flags
= MFI_FRAME_DIR_READ
;
2121 dcmd
->data_xfer_len
= sizeof (struct megasas_ctrl_info
);
2122 dcmd
->opcode
= MR_DCMD_CTRL_GET_INFO
;
2123 dcmd
->sgl
.sge32
[0].phys_addr
= instance
->internal_buf_dmac_add
;
2124 dcmd
->sgl
.sge32
[0].length
= sizeof (struct megasas_ctrl_info
);
2126 cmd
->frame_count
= 1;
2128 if (!instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
2130 (void) memcpy(ctrl_info
, ci
, sizeof (struct megasas_ctrl_info
));
2132 con_log(CL_ANN
, (CE_WARN
, "get_ctrl_info: Ctrl info failed\n"));
2136 return_mfi_pkt(instance
, cmd
);
2137 if (megasas_common_check(instance
, cmd
) != DDI_SUCCESS
) {
2148 abort_aen_cmd(struct megasas_instance
*instance
,
2149 struct megasas_cmd
*cmd_to_abort
)
2153 struct megasas_cmd
*cmd
;
2154 struct megasas_abort_frame
*abort_fr
;
2156 cmd
= get_mfi_pkt(instance
);
2159 con_log(CL_ANN
, (CE_WARN
,
2160 "Failed to get a cmd for ctrl info\n"));
2161 return (DDI_FAILURE
);
2164 abort_fr
= &cmd
->frame
->abort
;
2166 /* prepare and issue the abort frame */
2167 abort_fr
->cmd
= MFI_CMD_OP_ABORT
;
2168 abort_fr
->cmd_status
= MFI_CMD_STATUS_SYNC_MODE
;
2169 abort_fr
->flags
= 0;
2170 abort_fr
->abort_context
= cmd_to_abort
->index
;
2171 abort_fr
->abort_mfi_phys_addr_lo
= cmd_to_abort
->frame_phys_addr
;
2172 abort_fr
->abort_mfi_phys_addr_hi
= 0;
2174 instance
->aen_cmd
->abort_aen
= 1;
2176 cmd
->sync_cmd
= MEGASAS_TRUE
;
2177 cmd
->frame_count
= 1;
2179 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
2180 con_log(CL_ANN
, (CE_WARN
,
2181 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2187 instance
->aen_cmd
->abort_aen
= 1;
2188 instance
->aen_cmd
= 0;
2190 return_mfi_pkt(instance
, cmd
);
2191 (void) megasas_common_check(instance
, cmd
);
2200 init_mfi(struct megasas_instance
*instance
)
2203 struct megasas_cmd
*cmd
;
2204 struct megasas_ctrl_info ctrl_info
;
2205 struct megasas_init_frame
*init_frame
;
2206 struct megasas_init_queue_info
*initq_info
;
2208 if ((ddi_dev_regsize(instance
->dip
, REGISTER_SET_IO
, ®length
)
2209 != DDI_SUCCESS
) || reglength
< MINIMUM_MFI_MEM_SZ
) {
2210 return (DDI_FAILURE
);
2213 if (reglength
> DEFAULT_MFI_MEM_SZ
) {
2214 reglength
= DEFAULT_MFI_MEM_SZ
;
2215 con_log(CL_DLEVEL1
, (CE_NOTE
,
2216 "mega: register length to map is 0x%lx bytes", reglength
));
2219 if (ddi_regs_map_setup(instance
->dip
, REGISTER_SET_IO
,
2220 &instance
->regmap
, 0, reglength
, &endian_attr
,
2221 &instance
->regmap_handle
) != DDI_SUCCESS
) {
2222 con_log(CL_ANN
, (CE_NOTE
,
2223 "megaraid: couldn't map control registers"));
2225 goto fail_mfi_reg_setup
;
2228 /* we expect the FW state to be READY */
2229 if (mfi_state_transition_to_ready(instance
)) {
2230 con_log(CL_ANN
, (CE_WARN
, "megaraid: F/W is not ready"));
2231 goto fail_ready_state
;
2234 /* get various operational parameters from status register */
2235 instance
->max_num_sge
=
2236 (instance
->func_ptr
->read_fw_status_reg(instance
) &
2239 * Reduce the max supported cmds by 1. This is to ensure that the
2240 * reply_q_sz (1 more than the max cmd that driver may send)
2241 * does not exceed max cmds that the FW can support
2243 instance
->max_fw_cmds
=
2244 instance
->func_ptr
->read_fw_status_reg(instance
) & 0xFFFF;
2245 instance
->max_fw_cmds
= instance
->max_fw_cmds
- 1;
2247 instance
->max_num_sge
=
2248 (instance
->max_num_sge
> MEGASAS_MAX_SGE_CNT
) ?
2249 MEGASAS_MAX_SGE_CNT
: instance
->max_num_sge
;
2251 /* create a pool of commands */
2252 if (alloc_space_for_mfi(instance
))
2253 goto fail_alloc_fw_space
;
2255 /* disable interrupt for initial preparation */
2256 instance
->func_ptr
->disable_intr(instance
);
2259 * Prepare a init frame. Note the init frame points to queue info
2260 * structure. Each frame has SGL allocated after first 64 bytes. For
2261 * this frame - since we don't need any SGL - we use SGL's space as
2262 * queue info structure
2264 cmd
= get_mfi_pkt(instance
);
2266 init_frame
= (struct megasas_init_frame
*)cmd
->frame
;
2267 initq_info
= (struct megasas_init_queue_info
*)
2268 ((unsigned long)init_frame
+ 64);
2270 (void) memset(init_frame
, 0, MEGAMFI_FRAME_SIZE
);
2271 (void) memset(initq_info
, 0, sizeof (struct megasas_init_queue_info
));
2273 initq_info
->init_flags
= 0;
2275 initq_info
->reply_queue_entries
= instance
->max_fw_cmds
+ 1;
2277 initq_info
->producer_index_phys_addr_hi
= 0;
2278 initq_info
->producer_index_phys_addr_lo
=
2279 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
;
2281 initq_info
->consumer_index_phys_addr_hi
= 0;
2282 initq_info
->consumer_index_phys_addr_lo
=
2283 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+ 4;
2285 initq_info
->reply_queue_start_phys_addr_hi
= 0;
2286 initq_info
->reply_queue_start_phys_addr_lo
=
2287 instance
->mfi_internal_dma_obj
.dma_cookie
[0].dmac_address
+ 8;
2289 init_frame
->cmd
= MFI_CMD_OP_INIT
;
2290 init_frame
->cmd_status
= MFI_CMD_STATUS_POLL_MODE
;
2291 init_frame
->flags
= 0;
2292 init_frame
->queue_info_new_phys_addr_lo
=
2293 cmd
->frame_phys_addr
+ 64;
2294 init_frame
->queue_info_new_phys_addr_hi
= 0;
2296 init_frame
->data_xfer_len
= sizeof (struct megasas_init_queue_info
);
2298 cmd
->frame_count
= 1;
2300 /* issue the init frame in polled mode */
2301 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
2302 con_log(CL_ANN
, (CE_WARN
, "failed to init firmware"));
2306 return_mfi_pkt(instance
, cmd
);
2307 if (megasas_common_check(instance
, cmd
) != DDI_SUCCESS
) {
2311 /* gather misc FW related information */
2312 if (!get_ctrl_info(instance
, &ctrl_info
)) {
2313 instance
->max_sectors_per_req
= ctrl_info
.max_request_size
;
2314 con_log(CL_ANN1
, (CE_NOTE
, "product name %s ld present %d",
2315 ctrl_info
.product_name
, ctrl_info
.ld_present_count
));
2317 instance
->max_sectors_per_req
= instance
->max_num_sge
*
2321 if (megasas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
2328 fail_alloc_fw_space
:
2330 free_space_for_mfi(instance
);
2333 ddi_regs_map_free(&instance
->regmap_handle
);
2336 return (DDI_FAILURE
);
2340 * mfi_state_transition_to_ready : Move the FW to READY state
2342 * @reg_set : MFI register set
2345 mfi_state_transition_to_ready(struct megasas_instance
*instance
)
2354 instance
->func_ptr
->read_fw_status_reg(instance
) & MFI_STATE_MASK
;
2355 con_log(CL_ANN1
, (CE_NOTE
,
2356 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state
));
2358 while (fw_state
!= MFI_STATE_READY
) {
2359 con_log(CL_ANN
, (CE_NOTE
,
2360 "mfi_state_transition_to_ready:FW state%x", fw_state
));
2363 case MFI_STATE_FAULT
:
2364 con_log(CL_ANN
, (CE_NOTE
,
2365 "megasas: FW in FAULT state!!"));
2368 case MFI_STATE_WAIT_HANDSHAKE
:
2369 /* set the CLR bit in IMR0 */
2370 con_log(CL_ANN
, (CE_NOTE
,
2371 "megasas: FW waiting for HANDSHAKE"));
2373 * PCI_Hot Plug: MFI F/W requires
2374 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2377 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2378 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE
|
2379 MFI_INIT_HOTPLUG
, instance
);
2382 cur_state
= MFI_STATE_WAIT_HANDSHAKE
;
2384 case MFI_STATE_BOOT_MESSAGE_PENDING
:
2385 /* set the CLR bit in IMR0 */
2386 con_log(CL_ANN
, (CE_NOTE
,
2387 "megasas: FW state boot message pending"));
2389 * PCI_Hot Plug: MFI F/W requires
2390 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2393 WR_IB_DOORBELL(MFI_INIT_HOTPLUG
, instance
);
2396 cur_state
= MFI_STATE_BOOT_MESSAGE_PENDING
;
2398 case MFI_STATE_OPERATIONAL
:
2399 /* bring it to READY state; assuming max wait 2 secs */
2400 instance
->func_ptr
->disable_intr(instance
);
2401 con_log(CL_ANN1
, (CE_NOTE
,
2402 "megasas: FW in OPERATIONAL state"));
2404 * PCI_Hot Plug: MFI F/W requires
2405 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2408 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2409 WR_IB_DOORBELL(MFI_RESET_FLAGS
, instance
);
2412 cur_state
= MFI_STATE_OPERATIONAL
;
2414 case MFI_STATE_UNDEFINED
:
2415 /* this state should not last for more than 2 seconds */
2416 con_log(CL_ANN
, (CE_NOTE
, "FW state undefined\n"));
2419 cur_state
= MFI_STATE_UNDEFINED
;
2421 case MFI_STATE_BB_INIT
:
2423 cur_state
= MFI_STATE_BB_INIT
;
2425 case MFI_STATE_FW_INIT
:
2427 cur_state
= MFI_STATE_FW_INIT
;
2429 case MFI_STATE_DEVICE_SCAN
:
2431 cur_state
= MFI_STATE_DEVICE_SCAN
;
2434 con_log(CL_ANN
, (CE_NOTE
,
2435 "megasas: Unknown state 0x%x\n", fw_state
));
2439 /* the cur_state should not last for more than max_wait secs */
2440 for (i
= 0; i
< (max_wait
* MILLISEC
); i
++) {
2441 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2443 instance
->func_ptr
->read_fw_status_reg(instance
) &
2446 if (fw_state
== cur_state
) {
2447 delay(1 * drv_usectohz(MILLISEC
));
2453 /* return error if fw_state hasn't changed after max_wait */
2454 if (fw_state
== cur_state
) {
2455 con_log(CL_ANN
, (CE_NOTE
,
2456 "FW state hasn't changed in %d secs\n", max_wait
));
2461 fw_ctrl
= RD_IB_DOORBELL(instance
);
2463 con_log(CL_ANN1
, (CE_NOTE
,
2464 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl
));
2467 * Write 0xF to the doorbell register to do the following.
2468 * - Abort all outstanding commands (bit 0).
2469 * - Transition from OPERATIONAL to READY state (bit 1).
2470 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2471 * - Set to release FW to continue running (i.e. BIOS handshake
2474 WR_IB_DOORBELL(0xF, instance
);
2476 if (megasas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
2486 get_seq_num(struct megasas_instance
*instance
,
2487 struct megasas_evt_log_info
*eli
)
2491 dma_obj_t dcmd_dma_obj
;
2492 struct megasas_cmd
*cmd
;
2493 struct megasas_dcmd_frame
*dcmd
;
2495 cmd
= get_mfi_pkt(instance
);
2498 cmn_err(CE_WARN
, "megasas: failed to get a cmd\n");
2502 dcmd
= &cmd
->frame
->dcmd
;
2504 /* allocate the data transfer buffer */
2505 dcmd_dma_obj
.size
= sizeof (struct megasas_evt_log_info
);
2506 dcmd_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
2507 dcmd_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
2508 dcmd_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
2509 dcmd_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
2510 dcmd_dma_obj
.dma_attr
.dma_attr_align
= 1;
2512 if (mega_alloc_dma_obj(instance
, &dcmd_dma_obj
) != 1) {
2513 con_log(CL_ANN
, (CE_WARN
,
2514 "get_seq_num: could not data transfer buffer alloc."));
2515 return (DDI_FAILURE
);
2518 (void) memset(dcmd_dma_obj
.buffer
, 0,
2519 sizeof (struct megasas_evt_log_info
));
2521 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
2523 dcmd
->cmd
= MFI_CMD_OP_DCMD
;
2524 dcmd
->cmd_status
= 0;
2525 dcmd
->sge_count
= 1;
2526 dcmd
->flags
= MFI_FRAME_DIR_READ
;
2528 dcmd
->data_xfer_len
= sizeof (struct megasas_evt_log_info
);
2529 dcmd
->opcode
= MR_DCMD_CTRL_EVENT_GET_INFO
;
2530 dcmd
->sgl
.sge32
[0].length
= sizeof (struct megasas_evt_log_info
);
2531 dcmd
->sgl
.sge32
[0].phys_addr
= dcmd_dma_obj
.dma_cookie
[0].dmac_address
;
2533 cmd
->sync_cmd
= MEGASAS_TRUE
;
2534 cmd
->frame_count
= 1;
2536 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
2537 cmn_err(CE_WARN
, "get_seq_num: "
2538 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2541 /* copy the data back into callers buffer */
2542 bcopy(dcmd_dma_obj
.buffer
, eli
,
2543 sizeof (struct megasas_evt_log_info
));
2547 if (mega_free_dma_obj(instance
, dcmd_dma_obj
) != DDI_SUCCESS
)
2550 return_mfi_pkt(instance
, cmd
);
2551 if (megasas_common_check(instance
, cmd
) != DDI_SUCCESS
) {
2561 start_mfi_aen(struct megasas_instance
*instance
)
2565 struct megasas_evt_log_info eli
;
2566 union megasas_evt_class_locale class_locale
;
2568 /* get the latest sequence number from FW */
2569 (void) memset(&eli
, 0, sizeof (struct megasas_evt_log_info
));
2571 if (get_seq_num(instance
, &eli
)) {
2572 cmn_err(CE_WARN
, "start_mfi_aen: failed to get seq num\n");
2576 /* register AEN with FW for latest sequence number plus 1 */
2577 class_locale
.members
.reserved
= 0;
2578 class_locale
.members
.locale
= MR_EVT_LOCALE_ALL
;
2579 class_locale
.members
.class = MR_EVT_CLASS_CRITICAL
;
2581 ret
= register_mfi_aen(instance
, eli
.newest_seq_num
+ 1,
2585 cmn_err(CE_WARN
, "start_mfi_aen: aen registration failed\n");
2596 flush_cache(struct megasas_instance
*instance
)
2598 struct megasas_cmd
*cmd
;
2599 struct megasas_dcmd_frame
*dcmd
;
2601 if (!(cmd
= get_mfi_pkt(instance
)))
2604 dcmd
= &cmd
->frame
->dcmd
;
2606 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
2608 dcmd
->cmd
= MFI_CMD_OP_DCMD
;
2609 dcmd
->cmd_status
= 0x0;
2610 dcmd
->sge_count
= 0;
2611 dcmd
->flags
= MFI_FRAME_DIR_NONE
;
2613 dcmd
->data_xfer_len
= 0;
2614 dcmd
->opcode
= MR_DCMD_CTRL_CACHE_FLUSH
;
2615 dcmd
->mbox
.b
[0] = MR_FLUSH_CTRL_CACHE
| MR_FLUSH_DISK_CACHE
;
2617 cmd
->frame_count
= 1;
2619 if (instance
->func_ptr
->issue_cmd_in_poll_mode(instance
, cmd
)) {
2621 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2623 con_log(CL_DLEVEL1
, (CE_NOTE
, "done"));
2624 return_mfi_pkt(instance
, cmd
);
2625 (void) megasas_common_check(instance
, cmd
);
2629 * service_mfi_aen- Completes an AEN command
2630 * @instance: Adapter soft state
2631 * @cmd: Command to be completed
2635 service_mfi_aen(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
)
2638 struct megasas_evt_detail
*evt_detail
=
2639 (struct megasas_evt_detail
*)instance
->mfi_evt_detail_obj
.buffer
;
2641 cmd
->cmd_status
= cmd
->frame
->io
.cmd_status
;
2643 if (cmd
->cmd_status
== ENODATA
) {
2644 cmd
->cmd_status
= 0;
2648 * log the MFI AEN event to the sysevent queue so that
2649 * application will get noticed
2651 if (ddi_log_sysevent(instance
->dip
, DDI_VENDOR_LSI
, "LSIMEGA", "SAS",
2652 NULL
, NULL
, DDI_NOSLEEP
) != DDI_SUCCESS
) {
2653 int instance_no
= ddi_get_instance(instance
->dip
);
2654 con_log(CL_ANN
, (CE_WARN
,
2655 "mega%d: Failed to log AEN event", instance_no
));
2658 /* get copy of seq_num and class/locale for re-registration */
2659 seq_num
= evt_detail
->seq_num
;
2661 (void) memset(instance
->mfi_evt_detail_obj
.buffer
, 0,
2662 sizeof (struct megasas_evt_detail
));
2664 cmd
->frame
->dcmd
.cmd_status
= 0x0;
2665 cmd
->frame
->dcmd
.mbox
.w
[0] = seq_num
;
2667 instance
->aen_seq_num
= seq_num
;
2669 cmd
->frame_count
= 1;
2671 /* Issue the aen registration frame */
2672 instance
->func_ptr
->issue_cmd(cmd
, instance
);
2676 * complete_cmd_in_sync_mode - Completes an internal command
2677 * @instance: Adapter soft state
2678 * @cmd: Command to be completed
2680 * The issue_cmd_in_sync_mode() function waits for a command to complete
2681 * after it issues a command. This function wakes up that waiting routine by
2682 * calling wake_up() on the wait queue.
2685 complete_cmd_in_sync_mode(struct megasas_instance
*instance
,
2686 struct megasas_cmd
*cmd
)
2688 cmd
->cmd_status
= cmd
->frame
->io
.cmd_status
;
2690 cmd
->sync_cmd
= MEGASAS_FALSE
;
2692 if (cmd
->cmd_status
== ENODATA
) {
2693 cmd
->cmd_status
= 0;
2696 cv_broadcast(&instance
->int_cmd_cv
);
2700 * megasas_softintr - The Software ISR
2701 * @param arg : HBA soft state
2703 * called from high-level interrupt if hi-level interrupt are not there,
2704 * otherwise triggered as a soft interrupt
2707 megasas_softintr(struct megasas_instance
*instance
)
2709 struct scsi_pkt
*pkt
;
2710 struct scsa_cmd
*acmd
;
2711 struct megasas_cmd
*cmd
;
2712 struct mlist_head
*pos
, *next
;
2713 mlist_t process_list
;
2714 struct megasas_header
*hdr
;
2715 struct scsi_arq_status
*arqstat
;
2717 con_log(CL_ANN1
, (CE_CONT
, "megasas_softintr called"));
2720 mutex_enter(&instance
->completed_pool_mtx
);
2722 if (mlist_empty(&instance
->completed_pool_list
)) {
2723 mutex_exit(&instance
->completed_pool_mtx
);
2724 return (DDI_INTR_UNCLAIMED
);
2727 instance
->softint_running
= 1;
2729 INIT_LIST_HEAD(&process_list
);
2730 mlist_splice(&instance
->completed_pool_list
, &process_list
);
2731 INIT_LIST_HEAD(&instance
->completed_pool_list
);
2733 mutex_exit(&instance
->completed_pool_mtx
);
2735 /* perform all callbacks first, before releasing the SCBs */
2736 mlist_for_each_safe(pos
, next
, &process_list
) {
2737 cmd
= mlist_entry(pos
, struct megasas_cmd
, list
);
2739 /* syncronize the Cmd frame for the controller */
2740 (void) ddi_dma_sync(cmd
->frame_dma_obj
.dma_handle
,
2741 0, 0, DDI_DMA_SYNC_FORCPU
);
2743 if (megasas_check_dma_handle(cmd
->frame_dma_obj
.dma_handle
) !=
2745 megasas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
2746 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
2747 return (DDI_INTR_UNCLAIMED
);
2750 hdr
= &cmd
->frame
->hdr
;
2752 /* remove the internal command from the process list */
2753 mlist_del_init(&cmd
->list
);
2756 case MFI_CMD_OP_PD_SCSI
:
2757 case MFI_CMD_OP_LD_SCSI
:
2758 case MFI_CMD_OP_LD_READ
:
2759 case MFI_CMD_OP_LD_WRITE
:
2761 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2762 * could have been issued either through an
2763 * IO path or an IOCTL path. If it was via IOCTL,
2764 * we will send it to internal completion.
2766 if (cmd
->sync_cmd
== MEGASAS_TRUE
) {
2767 complete_cmd_in_sync_mode(instance
, cmd
);
2771 /* regular commands */
2773 pkt
= CMD2PKT(acmd
);
2775 if (acmd
->cmd_flags
& CFLAG_DMAVALID
) {
2776 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
2777 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
2778 acmd
->cmd_dma_offset
,
2780 DDI_DMA_SYNC_FORCPU
);
2784 pkt
->pkt_reason
= CMD_CMPLT
;
2785 pkt
->pkt_statistics
= 0;
2786 pkt
->pkt_state
= STATE_GOT_BUS
2787 | STATE_GOT_TARGET
| STATE_SENT_CMD
2788 | STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
2790 con_log(CL_ANN1
, (CE_CONT
,
2791 "CDB[0] = %x completed for %s: size %lx context %x",
2792 pkt
->pkt_cdbp
[0], ((acmd
->islogical
) ? "LD" : "PD"),
2793 acmd
->cmd_dmacount
, hdr
->context
));
2795 if (pkt
->pkt_cdbp
[0] == SCMD_INQUIRY
) {
2796 struct scsi_inquiry
*inq
;
2798 if (acmd
->cmd_dmacount
!= 0) {
2799 bp_mapin(acmd
->cmd_buf
);
2800 inq
= (struct scsi_inquiry
*)
2801 acmd
->cmd_buf
->b_un
.b_addr
;
2803 /* don't expose physical drives to OS */
2804 if (acmd
->islogical
&&
2805 (hdr
->cmd_status
== MFI_STAT_OK
)) {
2806 display_scsi_inquiry(
2808 } else if ((hdr
->cmd_status
==
2809 MFI_STAT_OK
) && inq
->inq_dtype
==
2812 display_scsi_inquiry(
2815 /* for physical disk */
2817 MFI_STAT_DEVICE_NOT_FOUND
;
2822 switch (hdr
->cmd_status
) {
2824 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
2826 case MFI_STAT_LD_CC_IN_PROGRESS
:
2827 case MFI_STAT_LD_RECON_IN_PROGRESS
:
2828 /* SJ - these are not correct way */
2829 pkt
->pkt_scbp
[0] = STATUS_GOOD
;
2831 case MFI_STAT_LD_INIT_IN_PROGRESS
:
2833 (CE_WARN
, "Initialization in Progress"));
2834 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2837 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
2838 con_log(CL_ANN1
, (CE_CONT
, "scsi_done error"));
2840 pkt
->pkt_reason
= CMD_CMPLT
;
2841 ((struct scsi_status
*)
2842 pkt
->pkt_scbp
)->sts_chk
= 1;
2844 if (pkt
->pkt_cdbp
[0] == SCMD_TEST_UNIT_READY
) {
2847 (CE_WARN
, "TEST_UNIT_READY fail"));
2850 pkt
->pkt_state
|= STATE_ARQ_DONE
;
2851 arqstat
= (void *)(pkt
->pkt_scbp
);
2852 arqstat
->sts_rqpkt_reason
= CMD_CMPLT
;
2853 arqstat
->sts_rqpkt_resid
= 0;
2854 arqstat
->sts_rqpkt_state
|=
2855 STATE_GOT_BUS
| STATE_GOT_TARGET
2857 | STATE_XFERRED_DATA
;
2858 *(uint8_t *)&arqstat
->sts_rqpkt_status
=
2862 &(arqstat
->sts_sensedata
),
2864 offsetof(struct scsi_arq_status
,
2868 case MFI_STAT_LD_OFFLINE
:
2869 case MFI_STAT_DEVICE_NOT_FOUND
:
2870 con_log(CL_ANN1
, (CE_CONT
,
2871 "device not found error"));
2872 pkt
->pkt_reason
= CMD_DEV_GONE
;
2873 pkt
->pkt_statistics
= STAT_DISCON
;
2875 case MFI_STAT_LD_LBA_OUT_OF_RANGE
:
2876 pkt
->pkt_state
|= STATE_ARQ_DONE
;
2877 pkt
->pkt_reason
= CMD_CMPLT
;
2878 ((struct scsi_status
*)
2879 pkt
->pkt_scbp
)->sts_chk
= 1;
2881 arqstat
= (void *)(pkt
->pkt_scbp
);
2882 arqstat
->sts_rqpkt_reason
= CMD_CMPLT
;
2883 arqstat
->sts_rqpkt_resid
= 0;
2884 arqstat
->sts_rqpkt_state
|= STATE_GOT_BUS
2885 | STATE_GOT_TARGET
| STATE_SENT_CMD
2886 | STATE_XFERRED_DATA
;
2887 *(uint8_t *)&arqstat
->sts_rqpkt_status
=
2890 arqstat
->sts_sensedata
.es_valid
= 1;
2891 arqstat
->sts_sensedata
.es_key
=
2892 KEY_ILLEGAL_REQUEST
;
2893 arqstat
->sts_sensedata
.es_class
=
2894 CLASS_EXTENDED_SENSE
;
2897 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2898 * ASC: 0x21h; ASCQ: 0x00h;
2900 arqstat
->sts_sensedata
.es_add_code
= 0x21;
2901 arqstat
->sts_sensedata
.es_qual_code
= 0x00;
2906 con_log(CL_ANN
, (CE_CONT
, "Unknown status!"));
2907 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2912 atomic_add_16(&instance
->fw_outstanding
, (-1));
2914 return_mfi_pkt(instance
, cmd
);
2916 (void) megasas_common_check(instance
, cmd
);
2918 if (acmd
->cmd_dmahandle
) {
2919 if (megasas_check_dma_handle(
2920 acmd
->cmd_dmahandle
) != DDI_SUCCESS
) {
2921 ddi_fm_service_impact(instance
->dip
,
2922 DDI_SERVICE_UNAFFECTED
);
2923 pkt
->pkt_reason
= CMD_TRAN_ERR
;
2924 pkt
->pkt_statistics
= 0;
2928 /* Call the callback routine */
2929 if ((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) {
2930 scsi_hba_pkt_comp(pkt
);
2934 case MFI_CMD_OP_SMP
:
2935 case MFI_CMD_OP_STP
:
2936 complete_cmd_in_sync_mode(instance
, cmd
);
2938 case MFI_CMD_OP_DCMD
:
2939 /* see if got an event notification */
2940 if (cmd
->frame
->dcmd
.opcode
==
2941 MR_DCMD_CTRL_EVENT_WAIT
) {
2942 if ((instance
->aen_cmd
== cmd
) &&
2943 (instance
->aen_cmd
->abort_aen
)) {
2944 con_log(CL_ANN
, (CE_WARN
,
2945 "megasas_softintr: "
2946 "aborted_aen returned"));
2948 service_mfi_aen(instance
, cmd
);
2950 atomic_add_16(&instance
->fw_outstanding
,
2954 complete_cmd_in_sync_mode(instance
, cmd
);
2958 case MFI_CMD_OP_ABORT
:
2959 con_log(CL_ANN
, (CE_WARN
, "MFI_CMD_OP_ABORT complete"));
2961 * MFI_CMD_OP_ABORT successfully completed
2962 * in the synchronous mode
2964 complete_cmd_in_sync_mode(instance
, cmd
);
2967 megasas_fm_ereport(instance
, DDI_FM_DEVICE_NO_RESPONSE
);
2968 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
2970 if (cmd
->pkt
!= NULL
) {
2972 if ((pkt
->pkt_flags
& FLAG_NOINTR
) == 0) {
2973 scsi_hba_pkt_comp(pkt
);
2976 con_log(CL_ANN
, (CE_WARN
, "Cmd type unknown !!"));
2981 instance
->softint_running
= 0;
2983 return (DDI_INTR_CLAIMED
);
2987 * mega_alloc_dma_obj
2989 * Allocate the memory and other resources for an dma object.
2992 mega_alloc_dma_obj(struct megasas_instance
*instance
, dma_obj_t
*obj
)
2997 struct ddi_device_acc_attr tmp_endian_attr
;
2999 tmp_endian_attr
= endian_attr
;
3000 tmp_endian_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
3001 i
= ddi_dma_alloc_handle(instance
->dip
, &obj
->dma_attr
,
3002 DDI_DMA_SLEEP
, NULL
, &obj
->dma_handle
);
3003 if (i
!= DDI_SUCCESS
) {
3006 case DDI_DMA_BADATTR
:
3007 con_log(CL_ANN
, (CE_WARN
,
3008 "Failed ddi_dma_alloc_handle- Bad atrib"));
3010 case DDI_DMA_NORESOURCES
:
3011 con_log(CL_ANN
, (CE_WARN
,
3012 "Failed ddi_dma_alloc_handle- No Resources"));
3015 con_log(CL_ANN
, (CE_WARN
,
3016 "Failed ddi_dma_alloc_handle :unknown %d", i
));
3023 if ((ddi_dma_mem_alloc(obj
->dma_handle
, obj
->size
, &tmp_endian_attr
,
3024 DDI_DMA_RDWR
| DDI_DMA_STREAMING
, DDI_DMA_SLEEP
, NULL
,
3025 &obj
->buffer
, &alen
, &obj
->acc_handle
) != DDI_SUCCESS
) ||
3028 ddi_dma_free_handle(&obj
->dma_handle
);
3030 con_log(CL_ANN
, (CE_WARN
, "Failed : ddi_dma_mem_alloc"));
3035 if (ddi_dma_addr_bind_handle(obj
->dma_handle
, NULL
, obj
->buffer
,
3036 obj
->size
, DDI_DMA_RDWR
| DDI_DMA_STREAMING
, DDI_DMA_SLEEP
,
3037 NULL
, &obj
->dma_cookie
[0], &cookie_cnt
) != DDI_SUCCESS
) {
3039 ddi_dma_mem_free(&obj
->acc_handle
);
3040 ddi_dma_free_handle(&obj
->dma_handle
);
3042 con_log(CL_ANN
, (CE_WARN
, "Failed : ddi_dma_addr_bind_handle"));
3047 if (megasas_check_dma_handle(obj
->dma_handle
) != DDI_SUCCESS
) {
3048 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
3052 if (megasas_check_acc_handle(obj
->acc_handle
) != DDI_SUCCESS
) {
3053 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_LOST
);
3057 return (cookie_cnt
);
3061 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3063 * De-allocate the memory and other resources for an dma object, which must
3064 * have been alloated by a previous call to mega_alloc_dma_obj()
3067 mega_free_dma_obj(struct megasas_instance
*instance
, dma_obj_t obj
)
3070 if (megasas_check_dma_handle(obj
.dma_handle
) != DDI_SUCCESS
) {
3071 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
3072 return (DDI_FAILURE
);
3075 if (megasas_check_acc_handle(obj
.acc_handle
) != DDI_SUCCESS
) {
3076 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
3077 return (DDI_FAILURE
);
3080 (void) ddi_dma_unbind_handle(obj
.dma_handle
);
3081 ddi_dma_mem_free(&obj
.acc_handle
);
3082 ddi_dma_free_handle(&obj
.dma_handle
);
3084 return (DDI_SUCCESS
);
3088 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3091 * Allocate dma resources for a new scsi command
3094 megasas_dma_alloc(struct megasas_instance
*instance
, struct scsi_pkt
*pkt
,
3095 struct buf
*bp
, int flags
, int (*callback
)())
3101 ddi_dma_attr_t tmp_dma_attr
= megasas_generic_dma_attr
;
3102 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
3106 if (bp
->b_flags
& B_READ
) {
3107 acmd
->cmd_flags
&= ~CFLAG_DMASEND
;
3108 dma_flags
= DDI_DMA_READ
;
3110 acmd
->cmd_flags
|= CFLAG_DMASEND
;
3111 dma_flags
= DDI_DMA_WRITE
;
3114 if (flags
& PKT_CONSISTENT
) {
3115 acmd
->cmd_flags
|= CFLAG_CONSISTENT
;
3116 dma_flags
|= DDI_DMA_CONSISTENT
;
3119 if (flags
& PKT_DMA_PARTIAL
) {
3120 dma_flags
|= DDI_DMA_PARTIAL
;
3123 dma_flags
|= DDI_DMA_REDZONE
;
3125 cb
= (callback
== NULL_FUNC
) ? DDI_DMA_DONTWAIT
: DDI_DMA_SLEEP
;
3127 tmp_dma_attr
.dma_attr_sgllen
= instance
->max_num_sge
;
3128 tmp_dma_attr
.dma_attr_addr_hi
= 0xffffffffffffffffull
;
3130 if ((i
= ddi_dma_alloc_handle(instance
->dip
, &tmp_dma_attr
,
3131 cb
, 0, &acmd
->cmd_dmahandle
)) != DDI_SUCCESS
) {
3133 case DDI_DMA_BADATTR
:
3134 bioerror(bp
, EFAULT
);
3137 case DDI_DMA_NORESOURCES
:
3142 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_alloc_handle: "
3143 "0x%x impossible\n", i
));
3144 bioerror(bp
, EFAULT
);
3149 i
= ddi_dma_buf_bind_handle(acmd
->cmd_dmahandle
, bp
, dma_flags
,
3150 cb
, 0, &acmd
->cmd_dmacookies
[0], &acmd
->cmd_ncookies
);
3153 case DDI_DMA_PARTIAL_MAP
:
3154 if ((dma_flags
& DDI_DMA_PARTIAL
) == 0) {
3155 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_buf_bind_handle: "
3156 "DDI_DMA_PARTIAL_MAP impossible\n"));
3157 goto no_dma_cookies
;
3160 if (ddi_dma_numwin(acmd
->cmd_dmahandle
, &acmd
->cmd_nwin
) ==
3162 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_numwin failed\n"));
3163 goto no_dma_cookies
;
3166 if (ddi_dma_getwin(acmd
->cmd_dmahandle
, acmd
->cmd_curwin
,
3167 &acmd
->cmd_dma_offset
, &acmd
->cmd_dma_len
,
3168 &acmd
->cmd_dmacookies
[0], &acmd
->cmd_ncookies
) ==
3171 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_getwin failed\n"));
3172 goto no_dma_cookies
;
3175 goto get_dma_cookies
;
3176 case DDI_DMA_MAPPED
:
3178 acmd
->cmd_dma_len
= 0;
3179 acmd
->cmd_dma_offset
= 0;
3183 acmd
->cmd_dmacount
= 0;
3185 acmd
->cmd_dmacount
+=
3186 acmd
->cmd_dmacookies
[i
++].dmac_size
;
3188 if (i
== instance
->max_num_sge
||
3189 i
== acmd
->cmd_ncookies
)
3192 ddi_dma_nextcookie(acmd
->cmd_dmahandle
,
3193 &acmd
->cmd_dmacookies
[i
]);
3196 acmd
->cmd_cookie
= i
;
3197 acmd
->cmd_cookiecnt
= i
;
3199 acmd
->cmd_flags
|= CFLAG_DMAVALID
;
3201 if (bp
->b_bcount
>= acmd
->cmd_dmacount
) {
3202 pkt
->pkt_resid
= bp
->b_bcount
- acmd
->cmd_dmacount
;
3208 case DDI_DMA_NORESOURCES
:
3211 case DDI_DMA_NOMAPPING
:
3212 bioerror(bp
, EFAULT
);
3214 case DDI_DMA_TOOBIG
:
3215 bioerror(bp
, EINVAL
);
3218 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_buf_bind_handle:"
3219 " DDI_DMA_INUSE impossible\n"));
3222 con_log(CL_ANN
, (CE_PANIC
, "ddi_dma_buf_bind_handle: "
3223 "0x%x impossible\n", i
));
3228 ddi_dma_free_handle(&acmd
->cmd_dmahandle
);
3229 acmd
->cmd_dmahandle
= NULL
;
3230 acmd
->cmd_flags
&= ~CFLAG_DMAVALID
;
3235 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3237 * move dma resources to next dma window
3241 megasas_dma_move(struct megasas_instance
*instance
, struct scsi_pkt
*pkt
,
3246 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
3249 * If there are no more cookies remaining in this window,
3250 * must move to the next window first.
3252 if (acmd
->cmd_cookie
== acmd
->cmd_ncookies
) {
3253 if (acmd
->cmd_curwin
== acmd
->cmd_nwin
&& acmd
->cmd_nwin
== 1) {
3257 /* at last window, cannot move */
3258 if (++acmd
->cmd_curwin
>= acmd
->cmd_nwin
) {
3262 if (ddi_dma_getwin(acmd
->cmd_dmahandle
, acmd
->cmd_curwin
,
3263 &acmd
->cmd_dma_offset
, &acmd
->cmd_dma_len
,
3264 &acmd
->cmd_dmacookies
[0], &acmd
->cmd_ncookies
) ==
3269 acmd
->cmd_cookie
= 0;
3271 /* still more cookies in this window - get the next one */
3272 ddi_dma_nextcookie(acmd
->cmd_dmahandle
,
3273 &acmd
->cmd_dmacookies
[0]);
3276 /* get remaining cookies in this window, up to our maximum */
3278 acmd
->cmd_dmacount
+= acmd
->cmd_dmacookies
[i
++].dmac_size
;
3281 if (i
== instance
->max_num_sge
||
3282 acmd
->cmd_cookie
== acmd
->cmd_ncookies
) {
3286 ddi_dma_nextcookie(acmd
->cmd_dmahandle
,
3287 &acmd
->cmd_dmacookies
[i
]);
3290 acmd
->cmd_cookiecnt
= i
;
3292 if (bp
->b_bcount
>= acmd
->cmd_dmacount
) {
3293 pkt
->pkt_resid
= bp
->b_bcount
- acmd
->cmd_dmacount
;
3304 static struct megasas_cmd
*
3305 build_cmd(struct megasas_instance
*instance
, struct scsi_address
*ap
,
3306 struct scsi_pkt
*pkt
, uchar_t
*cmd_done
)
3312 struct megasas_cmd
*cmd
;
3313 struct megasas_sge64
*mfi_sgl
;
3314 struct scsa_cmd
*acmd
= PKT2CMD(pkt
);
3315 struct megasas_pthru_frame
*pthru
;
3316 struct megasas_io_frame
*ldio
;
3318 /* find out if this is logical or physical drive command. */
3319 acmd
->islogical
= MEGADRV_IS_LOGICAL(ap
);
3320 acmd
->device_id
= MAP_DEVICE_ID(instance
, ap
);
3323 /* get the command packet */
3324 if (!(cmd
= get_mfi_pkt(instance
))) {
3331 /* lets get the command directions */
3332 if (acmd
->cmd_flags
& CFLAG_DMASEND
) {
3333 flags
= MFI_FRAME_DIR_WRITE
;
3335 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
3336 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
3337 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
3338 DDI_DMA_SYNC_FORDEV
);
3340 } else if (acmd
->cmd_flags
& ~CFLAG_DMASEND
) {
3341 flags
= MFI_FRAME_DIR_READ
;
3343 if (acmd
->cmd_flags
& CFLAG_CONSISTENT
) {
3344 (void) ddi_dma_sync(acmd
->cmd_dmahandle
,
3345 acmd
->cmd_dma_offset
, acmd
->cmd_dma_len
,
3346 DDI_DMA_SYNC_FORCPU
);
3349 flags
= MFI_FRAME_DIR_NONE
;
3352 flags
|= MFI_FRAME_SGL64
;
3354 switch (pkt
->pkt_cdbp
[0]) {
3357 * case SCMD_SYNCHRONIZE_CACHE:
3358 * flush_cache(instance);
3359 * return_mfi_pkt(instance, cmd);
3369 if (acmd
->islogical
) {
3370 ldio
= (struct megasas_io_frame
*)cmd
->frame
;
3373 * preare the Logical IO frame:
3374 * 2nd bit is zero for all read cmds
3376 ldio
->cmd
= (pkt
->pkt_cdbp
[0] & 0x02) ?
3377 MFI_CMD_OP_LD_WRITE
: MFI_CMD_OP_LD_READ
;
3378 ldio
->cmd_status
= 0x0;
3379 ldio
->scsi_status
= 0x0;
3380 ldio
->target_id
= acmd
->device_id
;
3382 ldio
->reserved_0
= 0;
3384 ldio
->flags
= flags
;
3386 /* Initialize sense Information */
3387 bzero(cmd
->sense
, SENSE_LENGTH
);
3388 ldio
->sense_len
= SENSE_LENGTH
;
3389 ldio
->sense_buf_phys_addr_hi
= 0;
3390 ldio
->sense_buf_phys_addr_lo
= cmd
->sense_phys_addr
;
3392 ldio
->start_lba_hi
= 0;
3393 ldio
->access_byte
= (acmd
->cmd_cdblen
!= 6) ?
3394 pkt
->pkt_cdbp
[1] : 0;
3395 ldio
->sge_count
= acmd
->cmd_cookiecnt
;
3396 mfi_sgl
= (struct megasas_sge64
*)&ldio
->sgl
;
3398 if (acmd
->cmd_cdblen
== CDB_GROUP0
) {
3399 ldio
->lba_count
= host_to_le16(
3400 (uint16_t)(pkt
->pkt_cdbp
[4]));
3402 ldio
->start_lba_lo
= host_to_le32(
3403 ((uint32_t)(pkt
->pkt_cdbp
[3])) |
3404 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 8) |
3405 ((uint32_t)((pkt
->pkt_cdbp
[1]) & 0x1F)
3407 } else if (acmd
->cmd_cdblen
== CDB_GROUP1
) {
3408 ldio
->lba_count
= host_to_le16(
3409 ((uint16_t)(pkt
->pkt_cdbp
[8])) |
3410 ((uint16_t)(pkt
->pkt_cdbp
[7]) << 8));
3412 ldio
->start_lba_lo
= host_to_le32(
3413 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
3414 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
3415 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
3416 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24));
3417 } else if (acmd
->cmd_cdblen
== CDB_GROUP2
) {
3418 ldio
->lba_count
= host_to_le16(
3419 ((uint16_t)(pkt
->pkt_cdbp
[9])) |
3420 ((uint16_t)(pkt
->pkt_cdbp
[8]) << 8) |
3421 ((uint16_t)(pkt
->pkt_cdbp
[7]) << 16) |
3422 ((uint16_t)(pkt
->pkt_cdbp
[6]) << 24));
3424 ldio
->start_lba_lo
= host_to_le32(
3425 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
3426 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
3427 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
3428 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24));
3429 } else if (acmd
->cmd_cdblen
== CDB_GROUP3
) {
3430 ldio
->lba_count
= host_to_le16(
3431 ((uint16_t)(pkt
->pkt_cdbp
[13])) |
3432 ((uint16_t)(pkt
->pkt_cdbp
[12]) << 8) |
3433 ((uint16_t)(pkt
->pkt_cdbp
[11]) << 16) |
3434 ((uint16_t)(pkt
->pkt_cdbp
[10]) << 24));
3436 ldio
->start_lba_lo
= host_to_le32(
3437 ((uint32_t)(pkt
->pkt_cdbp
[9])) |
3438 ((uint32_t)(pkt
->pkt_cdbp
[8]) << 8) |
3439 ((uint32_t)(pkt
->pkt_cdbp
[7]) << 16) |
3440 ((uint32_t)(pkt
->pkt_cdbp
[6]) << 24));
3442 ldio
->start_lba_lo
= host_to_le32(
3443 ((uint32_t)(pkt
->pkt_cdbp
[5])) |
3444 ((uint32_t)(pkt
->pkt_cdbp
[4]) << 8) |
3445 ((uint32_t)(pkt
->pkt_cdbp
[3]) << 16) |
3446 ((uint32_t)(pkt
->pkt_cdbp
[2]) << 24));
3451 /* fall through For all non-rd/wr cmds */
3453 pthru
= (struct megasas_pthru_frame
*)cmd
->frame
;
3455 /* prepare the DCDB frame */
3456 pthru
->cmd
= (acmd
->islogical
) ?
3457 MFI_CMD_OP_LD_SCSI
: MFI_CMD_OP_PD_SCSI
;
3458 pthru
->cmd_status
= 0x0;
3459 pthru
->scsi_status
= 0x0;
3460 pthru
->target_id
= acmd
->device_id
;
3462 pthru
->cdb_len
= acmd
->cmd_cdblen
;
3464 pthru
->flags
= flags
;
3465 pthru
->data_xfer_len
= acmd
->cmd_dmacount
;
3466 pthru
->sge_count
= acmd
->cmd_cookiecnt
;
3467 mfi_sgl
= (struct megasas_sge64
*)&pthru
->sgl
;
3469 bzero(cmd
->sense
, SENSE_LENGTH
);
3470 pthru
->sense_len
= SENSE_LENGTH
;
3471 pthru
->sense_buf_phys_addr_hi
= 0;
3472 pthru
->sense_buf_phys_addr_lo
= cmd
->sense_phys_addr
;
3474 bcopy(pkt
->pkt_cdbp
, pthru
->cdb
, acmd
->cmd_cdblen
);
3478 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3480 /* prepare the scatter-gather list for the firmware */
3481 for (i
= 0; i
< acmd
->cmd_cookiecnt
; i
++, mfi_sgl
++) {
3482 mfi_sgl
->phys_addr
= acmd
->cmd_dmacookies
[i
].dmac_laddress
;
3483 mfi_sgl
->length
= acmd
->cmd_dmacookies
[i
].dmac_size
;
3486 sge_bytes
= sizeof (struct megasas_sge64
)*acmd
->cmd_cookiecnt
;
3488 cmd
->frame_count
= (sge_bytes
/ MEGAMFI_FRAME_SIZE
) +
3489 ((sge_bytes
% MEGAMFI_FRAME_SIZE
) ? 1 : 0) + 1;
3491 if (cmd
->frame_count
>= 8) {
3492 cmd
->frame_count
= 8;
3499 * wait_for_outstanding - Wait for all outstanding cmds
3500 * @instance: Adapter soft state
3502 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3503 * complete all its outstanding commands. Returns error if one or more IOs
3504 * are pending after this time period.
3507 wait_for_outstanding(struct megasas_instance
*instance
)
3510 uint32_t wait_time
= 90;
3512 for (i
= 0; i
< wait_time
; i
++) {
3513 if (!instance
->fw_outstanding
) {
3517 drv_usecwait(MILLISEC
); /* wait for 1000 usecs */;
3520 if (instance
->fw_outstanding
) {
3524 ddi_fm_acc_err_clear(instance
->regmap_handle
, DDI_FME_VERSION
);
3533 issue_mfi_pthru(struct megasas_instance
*instance
, struct megasas_ioctl
*ioctl
,
3534 struct megasas_cmd
*cmd
, int mode
)
3537 uint32_t kphys_addr
= 0;
3538 uint32_t xferlen
= 0;
3541 dma_obj_t pthru_dma_obj
;
3542 struct megasas_pthru_frame
*kpthru
;
3543 struct megasas_pthru_frame
*pthru
;
3545 pthru
= &cmd
->frame
->pthru
;
3546 kpthru
= (struct megasas_pthru_frame
*)&ioctl
->frame
[0];
3548 model
= ddi_model_convert_from(mode
& FMODELS
);
3549 if (model
== DDI_MODEL_ILP32
) {
3550 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_pthru: DDI_MODEL_LP32"));
3552 xferlen
= kpthru
->sgl
.sge32
[0].length
;
3554 /* SJ! - ubuf needs to be virtual address. */
3555 ubuf
= (void *)(ulong_t
)kpthru
->sgl
.sge32
[0].phys_addr
;
3558 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_pthru: DDI_MODEL_LP32"));
3559 xferlen
= kpthru
->sgl
.sge32
[0].length
;
3560 /* SJ! - ubuf needs to be virtual address. */
3561 ubuf
= (void *)(ulong_t
)kpthru
->sgl
.sge32
[0].phys_addr
;
3563 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_pthru: DDI_MODEL_LP64"));
3564 xferlen
= kpthru
->sgl
.sge64
[0].length
;
3565 /* SJ! - ubuf needs to be virtual address. */
3566 ubuf
= (void *)(ulong_t
)kpthru
->sgl
.sge64
[0].phys_addr
;
3571 /* means IOCTL requires DMA */
3572 /* allocate the data transfer buffer */
3573 pthru_dma_obj
.size
= xferlen
;
3574 pthru_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
3575 pthru_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
3576 pthru_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
3577 pthru_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3578 pthru_dma_obj
.dma_attr
.dma_attr_align
= 1;
3580 /* allocate kernel buffer for DMA */
3581 if (mega_alloc_dma_obj(instance
, &pthru_dma_obj
) != 1) {
3582 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_pthru: "
3583 "could not data transfer buffer alloc."));
3584 return (DDI_FAILURE
);
3587 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3588 if (kpthru
->flags
& MFI_FRAME_DIR_WRITE
) {
3589 if (ddi_copyin(ubuf
, (void *)pthru_dma_obj
.buffer
,
3591 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_pthru: "
3592 "copy from user space failed\n"));
3597 kphys_addr
= pthru_dma_obj
.dma_cookie
[0].dmac_address
;
3600 pthru
->cmd
= kpthru
->cmd
;
3601 pthru
->sense_len
= kpthru
->sense_len
;
3602 pthru
->cmd_status
= kpthru
->cmd_status
;
3603 pthru
->scsi_status
= kpthru
->scsi_status
;
3604 pthru
->target_id
= kpthru
->target_id
;
3605 pthru
->lun
= kpthru
->lun
;
3606 pthru
->cdb_len
= kpthru
->cdb_len
;
3607 pthru
->sge_count
= kpthru
->sge_count
;
3608 pthru
->timeout
= kpthru
->timeout
;
3609 pthru
->data_xfer_len
= kpthru
->data_xfer_len
;
3611 pthru
->sense_buf_phys_addr_hi
= 0;
3612 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3613 pthru
->sense_buf_phys_addr_lo
= 0;
3615 bcopy((void *)kpthru
->cdb
, (void *)pthru
->cdb
, pthru
->cdb_len
);
3617 pthru
->flags
= kpthru
->flags
& ~MFI_FRAME_SGL64
;
3618 pthru
->sgl
.sge32
[0].length
= xferlen
;
3619 pthru
->sgl
.sge32
[0].phys_addr
= kphys_addr
;
3621 cmd
->sync_cmd
= MEGASAS_TRUE
;
3622 cmd
->frame_count
= 1;
3624 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
3625 con_log(CL_ANN
, (CE_WARN
,
3626 "issue_mfi_pthru: fw_ioctl failed\n"));
3628 if (xferlen
&& (kpthru
->flags
& MFI_FRAME_DIR_READ
)) {
3630 if (ddi_copyout(pthru_dma_obj
.buffer
, ubuf
,
3632 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_pthru: "
3633 "copy to user space failed\n"));
3639 kpthru
->cmd_status
= pthru
->cmd_status
;
3640 kpthru
->scsi_status
= pthru
->scsi_status
;
3642 con_log(CL_ANN
, (CE_NOTE
, "issue_mfi_pthru: cmd_status %x, "
3643 "scsi_status %x\n", pthru
->cmd_status
, pthru
->scsi_status
));
3646 /* free kernel buffer */
3647 if (mega_free_dma_obj(instance
, pthru_dma_obj
) != DDI_SUCCESS
)
3658 issue_mfi_dcmd(struct megasas_instance
*instance
, struct megasas_ioctl
*ioctl
,
3659 struct megasas_cmd
*cmd
, int mode
)
3662 uint32_t kphys_addr
= 0;
3663 uint32_t xferlen
= 0;
3665 dma_obj_t dcmd_dma_obj
;
3666 struct megasas_dcmd_frame
*kdcmd
;
3667 struct megasas_dcmd_frame
*dcmd
;
3669 dcmd
= &cmd
->frame
->dcmd
;
3670 kdcmd
= (struct megasas_dcmd_frame
*)&ioctl
->frame
[0];
3672 model
= ddi_model_convert_from(mode
& FMODELS
);
3673 if (model
== DDI_MODEL_ILP32
) {
3674 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3676 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
3678 /* SJ! - ubuf needs to be virtual address. */
3679 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
3684 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3685 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
3686 /* SJ! - ubuf needs to be virtual address. */
3687 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
3689 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3690 xferlen
= kdcmd
->sgl
.sge64
[0].length
;
3691 /* SJ! - ubuf needs to be virtual address. */
3692 ubuf
= (void *)(ulong_t
)dcmd
->sgl
.sge64
[0].phys_addr
;
3696 /* means IOCTL requires DMA */
3697 /* allocate the data transfer buffer */
3698 dcmd_dma_obj
.size
= xferlen
;
3699 dcmd_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
3700 dcmd_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
3701 dcmd_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
3702 dcmd_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3703 dcmd_dma_obj
.dma_attr
.dma_attr_align
= 1;
3705 /* allocate kernel buffer for DMA */
3706 if (mega_alloc_dma_obj(instance
, &dcmd_dma_obj
) != 1) {
3707 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_dcmd: "
3708 "could not data transfer buffer alloc."));
3709 return (DDI_FAILURE
);
3712 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3713 if (kdcmd
->flags
& MFI_FRAME_DIR_WRITE
) {
3714 if (ddi_copyin(ubuf
, (void *)dcmd_dma_obj
.buffer
,
3716 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_dcmd: "
3717 "copy from user space failed\n"));
3722 kphys_addr
= dcmd_dma_obj
.dma_cookie
[0].dmac_address
;
3725 dcmd
->cmd
= kdcmd
->cmd
;
3726 dcmd
->cmd_status
= kdcmd
->cmd_status
;
3727 dcmd
->sge_count
= kdcmd
->sge_count
;
3728 dcmd
->timeout
= kdcmd
->timeout
;
3729 dcmd
->data_xfer_len
= kdcmd
->data_xfer_len
;
3730 dcmd
->opcode
= kdcmd
->opcode
;
3732 bcopy((void *)kdcmd
->mbox
.b
, (void *)dcmd
->mbox
.b
, DCMD_MBOX_SZ
);
3734 dcmd
->flags
= kdcmd
->flags
& ~MFI_FRAME_SGL64
;
3735 dcmd
->sgl
.sge32
[0].length
= xferlen
;
3736 dcmd
->sgl
.sge32
[0].phys_addr
= kphys_addr
;
3738 cmd
->sync_cmd
= MEGASAS_TRUE
;
3739 cmd
->frame_count
= 1;
3741 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
3742 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_dcmd: fw_ioctl failed\n"));
3744 if (xferlen
&& (kdcmd
->flags
& MFI_FRAME_DIR_READ
)) {
3746 if (ddi_copyout(dcmd_dma_obj
.buffer
, ubuf
,
3748 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_dcmd: "
3749 "copy to user space failed\n"));
3755 kdcmd
->cmd_status
= dcmd
->cmd_status
;
3758 /* free kernel buffer */
3759 if (mega_free_dma_obj(instance
, dcmd_dma_obj
) != DDI_SUCCESS
)
3770 issue_mfi_smp(struct megasas_instance
*instance
, struct megasas_ioctl
*ioctl
,
3771 struct megasas_cmd
*cmd
, int mode
)
3774 void *response_ubuf
;
3775 uint32_t request_xferlen
= 0;
3776 uint32_t response_xferlen
= 0;
3778 dma_obj_t request_dma_obj
;
3779 dma_obj_t response_dma_obj
;
3780 struct megasas_smp_frame
*ksmp
;
3781 struct megasas_smp_frame
*smp
;
3782 struct megasas_sge32
*sge32
;
3784 struct megasas_sge64
*sge64
;
3787 smp
= &cmd
->frame
->smp
;
3788 ksmp
= (struct megasas_smp_frame
*)&ioctl
->frame
[0];
3790 model
= ddi_model_convert_from(mode
& FMODELS
);
3791 if (model
== DDI_MODEL_ILP32
) {
3792 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: DDI_MODEL_ILP32"));
3794 sge32
= &ksmp
->sgl
[0].sge32
[0];
3795 response_xferlen
= sge32
[0].length
;
3796 request_xferlen
= sge32
[1].length
;
3797 con_log(CL_ANN
, (CE_NOTE
, "issue_mfi_smp: "
3798 "response_xferlen = %x, request_xferlen = %x",
3799 response_xferlen
, request_xferlen
));
3801 /* SJ! - ubuf needs to be virtual address. */
3803 response_ubuf
= (void *)(ulong_t
)sge32
[0].phys_addr
;
3804 request_ubuf
= (void *)(ulong_t
)sge32
[1].phys_addr
;
3805 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: "
3806 "response_ubuf = %p, request_ubuf = %p",
3807 response_ubuf
, request_ubuf
));
3810 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: DDI_MODEL_ILP32"));
3812 sge32
= &ksmp
->sgl
[0].sge32
[0];
3813 response_xferlen
= sge32
[0].length
;
3814 request_xferlen
= sge32
[1].length
;
3815 con_log(CL_ANN
, (CE_NOTE
, "issue_mfi_smp: "
3816 "response_xferlen = %x, request_xferlen = %x",
3817 response_xferlen
, request_xferlen
));
3819 /* SJ! - ubuf needs to be virtual address. */
3821 response_ubuf
= (void *)(ulong_t
)sge32
[0].phys_addr
;
3822 request_ubuf
= (void *)(ulong_t
)sge32
[1].phys_addr
;
3823 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: "
3824 "response_ubuf = %p, request_ubuf = %p",
3825 response_ubuf
, request_ubuf
));
3827 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: DDI_MODEL_LP64"));
3829 sge64
= &ksmp
->sgl
[0].sge64
[0];
3830 response_xferlen
= sge64
[0].length
;
3831 request_xferlen
= sge64
[1].length
;
3833 /* SJ! - ubuf needs to be virtual address. */
3834 response_ubuf
= (void *)(ulong_t
)sge64
[0].phys_addr
;
3835 request_ubuf
= (void *)(ulong_t
)sge64
[1].phys_addr
;
3838 if (request_xferlen
) {
3839 /* means IOCTL requires DMA */
3840 /* allocate the data transfer buffer */
3841 request_dma_obj
.size
= request_xferlen
;
3842 request_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
3843 request_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
3844 request_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
3845 request_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3846 request_dma_obj
.dma_attr
.dma_attr_align
= 1;
3848 /* allocate kernel buffer for DMA */
3849 if (mega_alloc_dma_obj(instance
, &request_dma_obj
) != 1) {
3850 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
3851 "could not data transfer buffer alloc."));
3852 return (DDI_FAILURE
);
3855 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3856 if (ddi_copyin(request_ubuf
, (void *) request_dma_obj
.buffer
,
3857 request_xferlen
, mode
)) {
3858 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
3859 "copy from user space failed\n"));
3864 if (response_xferlen
) {
3865 /* means IOCTL requires DMA */
3866 /* allocate the data transfer buffer */
3867 response_dma_obj
.size
= response_xferlen
;
3868 response_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
3869 response_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
3870 response_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
3871 response_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
3872 response_dma_obj
.dma_attr
.dma_attr_align
= 1;
3874 /* allocate kernel buffer for DMA */
3875 if (mega_alloc_dma_obj(instance
, &response_dma_obj
) != 1) {
3876 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
3877 "could not data transfer buffer alloc."));
3878 return (DDI_FAILURE
);
3881 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3882 if (ddi_copyin(response_ubuf
, (void *) response_dma_obj
.buffer
,
3883 response_xferlen
, mode
)) {
3884 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
3885 "copy from user space failed\n"));
3890 smp
->cmd
= ksmp
->cmd
;
3891 smp
->cmd_status
= ksmp
->cmd_status
;
3892 smp
->connection_status
= ksmp
->connection_status
;
3893 smp
->sge_count
= ksmp
->sge_count
;
3894 /* smp->context = ksmp->context; */
3895 smp
->timeout
= ksmp
->timeout
;
3896 smp
->data_xfer_len
= ksmp
->data_xfer_len
;
3898 bcopy((void *)&ksmp
->sas_addr
, (void *)&smp
->sas_addr
,
3901 smp
->flags
= ksmp
->flags
& ~MFI_FRAME_SGL64
;
3903 model
= ddi_model_convert_from(mode
& FMODELS
);
3904 if (model
== DDI_MODEL_ILP32
) {
3905 con_log(CL_ANN1
, (CE_NOTE
,
3906 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3908 sge32
= &smp
->sgl
[0].sge32
[0];
3909 sge32
[0].length
= response_xferlen
;
3910 sge32
[0].phys_addr
=
3911 response_dma_obj
.dma_cookie
[0].dmac_address
;
3912 sge32
[1].length
= request_xferlen
;
3913 sge32
[1].phys_addr
=
3914 request_dma_obj
.dma_cookie
[0].dmac_address
;
3917 con_log(CL_ANN1
, (CE_NOTE
,
3918 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3919 sge32
= &smp
->sgl
[0].sge32
[0];
3920 sge32
[0].length
= response_xferlen
;
3921 sge32
[0].phys_addr
=
3922 response_dma_obj
.dma_cookie
[0].dmac_address
;
3923 sge32
[1].length
= request_xferlen
;
3924 sge32
[1].phys_addr
=
3925 request_dma_obj
.dma_cookie
[0].dmac_address
;
3927 con_log(CL_ANN1
, (CE_NOTE
,
3928 "issue_mfi_smp: DDI_MODEL_LP64"));
3929 sge64
= &smp
->sgl
[0].sge64
[0];
3930 sge64
[0].length
= response_xferlen
;
3931 sge64
[0].phys_addr
=
3932 response_dma_obj
.dma_cookie
[0].dmac_address
;
3933 sge64
[1].length
= request_xferlen
;
3934 sge64
[1].phys_addr
=
3935 request_dma_obj
.dma_cookie
[0].dmac_address
;
3938 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: "
3939 "smp->response_xferlen = %d, smp->request_xferlen = %d "
3940 "smp->data_xfer_len = %d", sge32
[0].length
, sge32
[1].length
,
3941 smp
->data_xfer_len
));
3943 cmd
->sync_cmd
= MEGASAS_TRUE
;
3944 cmd
->frame_count
= 1;
3946 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
3947 con_log(CL_ANN
, (CE_WARN
,
3948 "issue_mfi_smp: fw_ioctl failed\n"));
3950 con_log(CL_ANN1
, (CE_NOTE
,
3951 "issue_mfi_smp: copy to user space\n"));
3953 if (request_xferlen
) {
3954 if (ddi_copyout(request_dma_obj
.buffer
, request_ubuf
,
3955 request_xferlen
, mode
)) {
3956 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
3957 "copy to user space failed\n"));
3962 if (response_xferlen
) {
3963 if (ddi_copyout(response_dma_obj
.buffer
, response_ubuf
,
3964 response_xferlen
, mode
)) {
3965 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_smp: "
3966 "copy to user space failed\n"));
3972 ksmp
->cmd_status
= smp
->cmd_status
;
3973 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_smp: smp->cmd_status = %d",
3977 if (request_xferlen
) {
3978 /* free kernel buffer */
3979 if (mega_free_dma_obj(instance
, request_dma_obj
) != DDI_SUCCESS
)
3983 if (response_xferlen
) {
3984 /* free kernel buffer */
3985 if (mega_free_dma_obj(instance
, response_dma_obj
) !=
3997 issue_mfi_stp(struct megasas_instance
*instance
, struct megasas_ioctl
*ioctl
,
3998 struct megasas_cmd
*cmd
, int mode
)
4002 uint32_t fis_xferlen
= 0;
4003 uint32_t data_xferlen
= 0;
4005 dma_obj_t fis_dma_obj
;
4006 dma_obj_t data_dma_obj
;
4007 struct megasas_stp_frame
*kstp
;
4008 struct megasas_stp_frame
*stp
;
4010 stp
= &cmd
->frame
->stp
;
4011 kstp
= (struct megasas_stp_frame
*)&ioctl
->frame
[0];
4013 model
= ddi_model_convert_from(mode
& FMODELS
);
4014 if (model
== DDI_MODEL_ILP32
) {
4015 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_stp: DDI_MODEL_ILP32"));
4017 fis_xferlen
= kstp
->sgl
.sge32
[0].length
;
4018 data_xferlen
= kstp
->sgl
.sge32
[1].length
;
4020 /* SJ! - ubuf needs to be virtual address. */
4021 fis_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[0].phys_addr
;
4022 data_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[1].phys_addr
;
4027 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_stp: DDI_MODEL_ILP32"));
4029 fis_xferlen
= kstp
->sgl
.sge32
[0].length
;
4030 data_xferlen
= kstp
->sgl
.sge32
[1].length
;
4032 /* SJ! - ubuf needs to be virtual address. */
4033 fis_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[0].phys_addr
;
4034 data_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge32
[1].phys_addr
;
4036 con_log(CL_ANN1
, (CE_NOTE
, "issue_mfi_stp: DDI_MODEL_LP64"));
4038 fis_xferlen
= kstp
->sgl
.sge64
[0].length
;
4039 data_xferlen
= kstp
->sgl
.sge64
[1].length
;
4041 /* SJ! - ubuf needs to be virtual address. */
4042 fis_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge64
[0].phys_addr
;
4043 data_ubuf
= (void *)(ulong_t
)kstp
->sgl
.sge64
[1].phys_addr
;
4049 con_log(CL_ANN
, (CE_NOTE
, "issue_mfi_stp: "
4050 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf
, fis_xferlen
));
4052 /* means IOCTL requires DMA */
4053 /* allocate the data transfer buffer */
4054 fis_dma_obj
.size
= fis_xferlen
;
4055 fis_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
4056 fis_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
4057 fis_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
4058 fis_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
4059 fis_dma_obj
.dma_attr
.dma_attr_align
= 1;
4061 /* allocate kernel buffer for DMA */
4062 if (mega_alloc_dma_obj(instance
, &fis_dma_obj
) != 1) {
4063 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
4064 "could not data transfer buffer alloc."));
4065 return (DDI_FAILURE
);
4068 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4069 if (ddi_copyin(fis_ubuf
, (void *)fis_dma_obj
.buffer
,
4070 fis_xferlen
, mode
)) {
4071 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
4072 "copy from user space failed\n"));
4078 con_log(CL_ANN
, (CE_NOTE
, "issue_mfi_stp: data_ubuf = %p "
4079 "data_xferlen = %x", data_ubuf
, data_xferlen
));
4081 /* means IOCTL requires DMA */
4082 /* allocate the data transfer buffer */
4083 data_dma_obj
.size
= data_xferlen
;
4084 data_dma_obj
.dma_attr
= megasas_generic_dma_attr
;
4085 data_dma_obj
.dma_attr
.dma_attr_addr_hi
= 0xFFFFFFFFU
;
4086 data_dma_obj
.dma_attr
.dma_attr_count_max
= 0xFFFFFFFFU
;
4087 data_dma_obj
.dma_attr
.dma_attr_sgllen
= 1;
4088 data_dma_obj
.dma_attr
.dma_attr_align
= 1;
4090 /* allocate kernel buffer for DMA */
4091 if (mega_alloc_dma_obj(instance
, &data_dma_obj
) != 1) {
4092 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
4093 "could not data transfer buffer alloc."));
4094 return (DDI_FAILURE
);
4097 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4098 if (ddi_copyin(data_ubuf
, (void *) data_dma_obj
.buffer
,
4099 data_xferlen
, mode
)) {
4100 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
4101 "copy from user space failed\n"));
4106 stp
->cmd
= kstp
->cmd
;
4107 stp
->cmd_status
= kstp
->cmd_status
;
4108 stp
->connection_status
= kstp
->connection_status
;
4109 stp
->target_id
= kstp
->target_id
;
4110 stp
->sge_count
= kstp
->sge_count
;
4111 /* stp->context = kstp->context; */
4112 stp
->timeout
= kstp
->timeout
;
4113 stp
->data_xfer_len
= kstp
->data_xfer_len
;
4115 bcopy((void *)kstp
->fis
, (void *)stp
->fis
, 10);
4117 stp
->flags
= kstp
->flags
& ~MFI_FRAME_SGL64
;
4118 stp
->stp_flags
= kstp
->stp_flags
;
4119 stp
->sgl
.sge32
[0].length
= fis_xferlen
;
4120 stp
->sgl
.sge32
[0].phys_addr
= fis_dma_obj
.dma_cookie
[0].dmac_address
;
4121 stp
->sgl
.sge32
[1].length
= data_xferlen
;
4122 stp
->sgl
.sge32
[1].phys_addr
= data_dma_obj
.dma_cookie
[0].dmac_address
;
4124 cmd
->sync_cmd
= MEGASAS_TRUE
;
4125 cmd
->frame_count
= 1;
4127 if (instance
->func_ptr
->issue_cmd_in_sync_mode(instance
, cmd
)) {
4128 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: fw_ioctl failed\n"));
4132 if (ddi_copyout(fis_dma_obj
.buffer
, fis_ubuf
,
4133 fis_xferlen
, mode
)) {
4134 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
4135 "copy to user space failed\n"));
4141 if (ddi_copyout(data_dma_obj
.buffer
, data_ubuf
,
4142 data_xferlen
, mode
)) {
4143 con_log(CL_ANN
, (CE_WARN
, "issue_mfi_stp: "
4144 "copy to user space failed\n"));
4150 kstp
->cmd_status
= stp
->cmd_status
;
4153 /* free kernel buffer */
4154 if (mega_free_dma_obj(instance
, fis_dma_obj
) != DDI_SUCCESS
)
4159 /* free kernel buffer */
4160 if (mega_free_dma_obj(instance
, data_dma_obj
) != DDI_SUCCESS
)
4171 fill_up_drv_ver(struct megasas_drv_ver
*dv
)
4173 (void) memset(dv
, 0, sizeof (struct megasas_drv_ver
));
4175 (void) memcpy(dv
->signature
, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4176 (void) memcpy(dv
->os_name
, "Solaris", strlen("Solaris"));
4177 (void) memcpy(dv
->drv_name
, "megaraid_sas", strlen("megaraid_sas"));
4178 (void) memcpy(dv
->drv_ver
, MEGASAS_VERSION
, strlen(MEGASAS_VERSION
));
4179 (void) memcpy(dv
->drv_rel_date
, MEGASAS_RELDATE
,
4180 strlen(MEGASAS_RELDATE
));
4187 handle_drv_ioctl(struct megasas_instance
*instance
, struct megasas_ioctl
*ioctl
,
4195 uint8_t *pci_conf_buf
;
4199 struct megasas_dcmd_frame
*kdcmd
;
4200 struct megasas_drv_ver dv
;
4201 struct megasas_pci_information pi
;
4203 kdcmd
= (struct megasas_dcmd_frame
*)&ioctl
->frame
[0];
4205 model
= ddi_model_convert_from(mode
& FMODELS
);
4206 if (model
== DDI_MODEL_ILP32
) {
4207 con_log(CL_ANN1
, (CE_NOTE
,
4208 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4210 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
4212 /* SJ! - ubuf needs to be virtual address. */
4213 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
4216 con_log(CL_ANN1
, (CE_NOTE
,
4217 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4218 xferlen
= kdcmd
->sgl
.sge32
[0].length
;
4219 /* SJ! - ubuf needs to be virtual address. */
4220 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge32
[0].phys_addr
;
4222 con_log(CL_ANN1
, (CE_NOTE
,
4223 "handle_drv_ioctl: DDI_MODEL_LP64"));
4224 xferlen
= kdcmd
->sgl
.sge64
[0].length
;
4225 /* SJ! - ubuf needs to be virtual address. */
4226 ubuf
= (void *)(ulong_t
)kdcmd
->sgl
.sge64
[0].phys_addr
;
4229 con_log(CL_ANN1
, (CE_NOTE
, "handle_drv_ioctl: "
4230 "dataBuf=%p size=%d bytes", ubuf
, xferlen
));
4232 switch (kdcmd
->opcode
) {
4233 case MR_DRIVER_IOCTL_DRIVER_VERSION
:
4234 con_log(CL_ANN1
, (CE_NOTE
, "handle_drv_ioctl: "
4235 "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4237 fill_up_drv_ver(&dv
);
4239 if (ddi_copyout(&dv
, ubuf
, xferlen
, mode
)) {
4240 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
4241 "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4242 "copy to user space failed\n"));
4243 kdcmd
->cmd_status
= 1;
4246 kdcmd
->cmd_status
= 0;
4249 case MR_DRIVER_IOCTL_PCI_INFORMATION
:
4250 con_log(CL_ANN1
, (CE_NOTE
, "handle_drv_ioctl: "
4251 "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4253 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, instance
->dip
,
4254 0, "reg", &props
, &num_props
)) {
4255 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
4256 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4257 "ddi_prop_look_int_array failed\n"));
4261 pi
.busNumber
= (props
[0] >> 16) & 0xFF;
4262 pi
.deviceNumber
= (props
[0] >> 11) & 0x1f;
4263 pi
.functionNumber
= (props
[0] >> 8) & 0x7;
4264 ddi_prop_free((void *)props
);
4267 pci_conf_buf
= (uint8_t *)&pi
.pciHeaderInfo
;
4269 for (i
= 0; i
< (sizeof (struct megasas_pci_information
) -
4270 offsetof(struct megasas_pci_information
, pciHeaderInfo
));
4273 pci_config_get8(instance
->pci_handle
, i
);
4276 if (ddi_copyout(&pi
, ubuf
, xferlen
, mode
)) {
4277 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
4278 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4279 "copy to user space failed\n"));
4280 kdcmd
->cmd_status
= 1;
4283 kdcmd
->cmd_status
= 0;
4287 con_log(CL_ANN
, (CE_WARN
, "handle_drv_ioctl: "
4288 "invalid driver specific IOCTL opcode = 0x%x",
4290 kdcmd
->cmd_status
= 1;
4302 handle_mfi_ioctl(struct megasas_instance
*instance
, struct megasas_ioctl
*ioctl
,
4307 struct megasas_header
*hdr
;
4308 struct megasas_cmd
*cmd
;
4310 cmd
= get_mfi_pkt(instance
);
4313 con_log(CL_ANN
, (CE_WARN
, "megasas: "
4314 "failed to get a cmd packet\n"));
4318 hdr
= (struct megasas_header
*)&ioctl
->frame
[0];
4321 case MFI_CMD_OP_DCMD
:
4322 rval
= issue_mfi_dcmd(instance
, ioctl
, cmd
, mode
);
4324 case MFI_CMD_OP_SMP
:
4325 rval
= issue_mfi_smp(instance
, ioctl
, cmd
, mode
);
4327 case MFI_CMD_OP_STP
:
4328 rval
= issue_mfi_stp(instance
, ioctl
, cmd
, mode
);
4330 case MFI_CMD_OP_LD_SCSI
:
4331 case MFI_CMD_OP_PD_SCSI
:
4332 rval
= issue_mfi_pthru(instance
, ioctl
, cmd
, mode
);
4335 con_log(CL_ANN
, (CE_WARN
, "handle_mfi_ioctl: "
4336 "invalid mfi ioctl hdr->cmd = %d\n", hdr
->cmd
));
4342 return_mfi_pkt(instance
, cmd
);
4343 if (megasas_common_check(instance
, cmd
) != DDI_SUCCESS
)
4352 handle_mfi_aen(struct megasas_instance
*instance
, struct megasas_aen
*aen
)
4356 rval
= register_mfi_aen(instance
, instance
->aen_seq_num
,
4357 aen
->class_locale_word
);
4359 aen
->cmd_status
= (uint8_t)rval
;
4365 register_mfi_aen(struct megasas_instance
*instance
, uint32_t seq_num
,
4366 uint32_t class_locale_word
)
4370 struct megasas_cmd
*cmd
;
4371 struct megasas_dcmd_frame
*dcmd
;
4372 union megasas_evt_class_locale curr_aen
;
4373 union megasas_evt_class_locale prev_aen
;
4376 * If there an AEN pending already (aen_cmd), check if the
4377 * class_locale of that pending AEN is inclusive of the new
4378 * AEN request we currently have. If it is, then we don't have
4379 * to do anything. In other words, whichever events the current
4380 * AEN request is subscribing to, have already been subscribed
4383 * If the old_cmd is _not_ inclusive, then we have to abort
4384 * that command, form a class_locale that is superset of both
4385 * old and current and re-issue to the FW
4388 curr_aen
.word
= class_locale_word
;
4390 if (instance
->aen_cmd
) {
4391 prev_aen
.word
= instance
->aen_cmd
->frame
->dcmd
.mbox
.w
[1];
4394 * A class whose enum value is smaller is inclusive of all
4395 * higher values. If a PROGRESS (= -1) was previously
4396 * registered, then a new registration requests for higher
4397 * classes need not be sent to FW. They are automatically
4400 * Locale numbers don't have such hierarchy. They are bitmap
4403 if ((prev_aen
.members
.class <= curr_aen
.members
.class) &&
4404 !((prev_aen
.members
.locale
& curr_aen
.members
.locale
) ^
4405 curr_aen
.members
.locale
)) {
4407 * Previously issued event registration includes
4408 * current request. Nothing to do.
4413 curr_aen
.members
.locale
|= prev_aen
.members
.locale
;
4415 if (prev_aen
.members
.class < curr_aen
.members
.class)
4416 curr_aen
.members
.class = prev_aen
.members
.class;
4418 ret_val
= abort_aen_cmd(instance
, instance
->aen_cmd
);
4421 con_log(CL_ANN
, (CE_WARN
, "register_mfi_aen: "
4422 "failed to abort prevous AEN command\n"));
4428 curr_aen
.word
= class_locale_word
;
4431 cmd
= get_mfi_pkt(instance
);
4436 dcmd
= &cmd
->frame
->dcmd
;
4438 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4439 (void) memset(dcmd
->mbox
.b
, 0, DCMD_MBOX_SZ
);
4441 (void) memset(instance
->mfi_evt_detail_obj
.buffer
, 0,
4442 sizeof (struct megasas_evt_detail
));
4444 /* Prepare DCMD for aen registration */
4445 dcmd
->cmd
= MFI_CMD_OP_DCMD
;
4446 dcmd
->cmd_status
= 0x0;
4447 dcmd
->sge_count
= 1;
4448 dcmd
->flags
= MFI_FRAME_DIR_READ
;
4450 dcmd
->data_xfer_len
= sizeof (struct megasas_evt_detail
);
4451 dcmd
->opcode
= MR_DCMD_CTRL_EVENT_WAIT
;
4452 dcmd
->mbox
.w
[0] = seq_num
;
4453 dcmd
->mbox
.w
[1] = curr_aen
.word
;
4454 dcmd
->sgl
.sge32
[0].phys_addr
=
4455 instance
->mfi_evt_detail_obj
.dma_cookie
[0].dmac_address
;
4456 dcmd
->sgl
.sge32
[0].length
= sizeof (struct megasas_evt_detail
);
4458 instance
->aen_seq_num
= seq_num
;
4461 * Store reference to the cmd used to register for AEN. When an
4462 * application wants us to register for AEN, we have to abort this
4463 * cmd and re-register with a new EVENT LOCALE supplied by that app
4465 instance
->aen_cmd
= cmd
;
4467 cmd
->frame_count
= 1;
4469 /* Issue the aen registration frame */
4470 /* atomic_add_16 (&instance->fw_outstanding, 1); */
4471 instance
->func_ptr
->issue_cmd(cmd
, instance
);
4477 display_scsi_inquiry(caddr_t scsi_inq
)
4479 #define MAX_SCSI_DEVICE_CODE 14
4481 char inquiry_buf
[256] = {0};
4483 const char *const scsi_device_types
[] = {
4485 "Sequential-Access",
4502 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Vendor: ");
4503 for (i
= 8; i
< 16; i
++) {
4504 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "%c",
4508 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Model: ");
4510 for (i
= 16; i
< 32; i
++) {
4511 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "%c",
4515 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Rev: ");
4517 for (i
= 32; i
< 36; i
++) {
4518 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "%c",
4522 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "\n");
4525 i
= scsi_inq
[0] & 0x1f;
4528 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " Type: %s ",
4529 i
< MAX_SCSI_DEVICE_CODE
? scsi_device_types
[i
] :
4533 len
+= snprintf(inquiry_buf
+ len
, 265 - len
,
4534 " ANSI SCSI revision: %02x", scsi_inq
[2] & 0x07);
4536 if ((scsi_inq
[2] & 0x07) == 1 && (scsi_inq
[3] & 0x0f) == 1) {
4537 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, " CCS\n");
4539 len
+= snprintf(inquiry_buf
+ len
, 265 - len
, "\n");
4542 con_log(CL_ANN1
, (CE_CONT
, inquiry_buf
));
4546 read_fw_status_reg_xscale(struct megasas_instance
*instance
)
4548 return ((int)RD_OB_MSG_0(instance
));
4552 read_fw_status_reg_ppc(struct megasas_instance
*instance
)
4554 return ((int)RD_OB_SCRATCH_PAD_0(instance
));
4558 issue_cmd_xscale(struct megasas_cmd
*cmd
, struct megasas_instance
*instance
)
4560 atomic_inc_16(&instance
->fw_outstanding
);
4562 /* Issue the command to the FW */
4563 WR_IB_QPORT((host_to_le32(cmd
->frame_phys_addr
) >> 3) |
4564 (cmd
->frame_count
- 1), instance
);
4568 issue_cmd_ppc(struct megasas_cmd
*cmd
, struct megasas_instance
*instance
)
4570 atomic_inc_16(&instance
->fw_outstanding
);
4572 /* Issue the command to the FW */
4573 WR_IB_QPORT((host_to_le32(cmd
->frame_phys_addr
)) |
4574 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
4578 * issue_cmd_in_sync_mode
4581 issue_cmd_in_sync_mode_xscale(struct megasas_instance
*instance
,
4582 struct megasas_cmd
*cmd
)
4585 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* (10 * MILLISEC
);
4587 cmd
->cmd_status
= ENODATA
;
4589 WR_IB_QPORT((host_to_le32(cmd
->frame_phys_addr
) >> 3) |
4590 (cmd
->frame_count
- 1), instance
);
4592 mutex_enter(&instance
->int_cmd_mtx
);
4594 for (i
= 0; i
< msecs
&& (cmd
->cmd_status
== ENODATA
); i
++) {
4595 cv_wait(&instance
->int_cmd_cv
, &instance
->int_cmd_mtx
);
4598 mutex_exit(&instance
->int_cmd_mtx
);
4600 if (i
< (msecs
-1)) {
4608 issue_cmd_in_sync_mode_ppc(struct megasas_instance
*instance
,
4609 struct megasas_cmd
*cmd
)
4612 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* (10 * MILLISEC
);
4614 con_log(CL_ANN1
, (CE_NOTE
, "issue_cmd_in_sync_mode_ppc: called\n"));
4616 cmd
->cmd_status
= ENODATA
;
4618 WR_IB_QPORT((host_to_le32(cmd
->frame_phys_addr
)) |
4619 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
4621 mutex_enter(&instance
->int_cmd_mtx
);
4623 for (i
= 0; i
< msecs
&& (cmd
->cmd_status
== ENODATA
); i
++) {
4624 cv_wait(&instance
->int_cmd_cv
, &instance
->int_cmd_mtx
);
4627 mutex_exit(&instance
->int_cmd_mtx
);
4629 con_log(CL_ANN1
, (CE_NOTE
, "issue_cmd_in_sync_mode_ppc: done\n"));
4631 if (i
< (msecs
-1)) {
4639 * issue_cmd_in_poll_mode
4642 issue_cmd_in_poll_mode_xscale(struct megasas_instance
*instance
,
4643 struct megasas_cmd
*cmd
)
4646 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* MILLISEC
;
4647 struct megasas_header
*frame_hdr
;
4649 frame_hdr
= (struct megasas_header
*)cmd
->frame
;
4650 frame_hdr
->cmd_status
= MFI_CMD_STATUS_POLL_MODE
;
4651 frame_hdr
->flags
|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
;
4653 /* issue the frame using inbound queue port */
4654 WR_IB_QPORT((host_to_le32(cmd
->frame_phys_addr
) >> 3) |
4655 (cmd
->frame_count
- 1), instance
);
4657 /* wait for cmd_status to change from 0xFF */
4658 for (i
= 0; i
< msecs
&& (frame_hdr
->cmd_status
==
4659 MFI_CMD_STATUS_POLL_MODE
); i
++) {
4660 drv_usecwait(MILLISEC
); /* wait for 1000 usecs */
4663 if (frame_hdr
->cmd_status
== MFI_CMD_STATUS_POLL_MODE
) {
4664 con_log(CL_ANN
, (CE_NOTE
, "issue_cmd_in_poll_mode: "
4665 "cmd polling timed out"));
4666 return (DDI_FAILURE
);
4669 return (DDI_SUCCESS
);
4673 issue_cmd_in_poll_mode_ppc(struct megasas_instance
*instance
,
4674 struct megasas_cmd
*cmd
)
4677 uint32_t msecs
= MFI_POLL_TIMEOUT_SECS
* MILLISEC
;
4678 struct megasas_header
*frame_hdr
;
4680 con_log(CL_ANN1
, (CE_NOTE
, "issue_cmd_in_poll_mode_ppc: called\n"));
4682 frame_hdr
= (struct megasas_header
*)cmd
->frame
;
4683 frame_hdr
->cmd_status
= MFI_CMD_STATUS_POLL_MODE
;
4684 frame_hdr
->flags
|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
;
4686 /* issue the frame using inbound queue port */
4687 WR_IB_QPORT((host_to_le32(cmd
->frame_phys_addr
)) |
4688 (((cmd
->frame_count
- 1) << 1) | 1), instance
);
4690 /* wait for cmd_status to change from 0xFF */
4691 for (i
= 0; i
< msecs
&& (frame_hdr
->cmd_status
==
4692 MFI_CMD_STATUS_POLL_MODE
); i
++) {
4693 drv_usecwait(MILLISEC
); /* wait for 1000 usecs */
4696 if (frame_hdr
->cmd_status
== MFI_CMD_STATUS_POLL_MODE
) {
4697 con_log(CL_ANN
, (CE_NOTE
, "issue_cmd_in_poll_mode: "
4698 "cmd polling timed out"));
4699 return (DDI_FAILURE
);
4702 return (DDI_SUCCESS
);
4706 enable_intr_xscale(struct megasas_instance
*instance
)
4708 MFI_ENABLE_INTR(instance
);
4712 enable_intr_ppc(struct megasas_instance
*instance
)
4716 con_log(CL_ANN1
, (CE_NOTE
, "enable_intr_ppc: called\n"));
4718 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4719 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK
, instance
);
4722 * As 1078DE is same as 1078 chip, the interrupt mask
4725 /* WR_OB_INTR_MASK(~0x80000000, instance); */
4726 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR
), instance
);
4728 /* dummy read to force PCI flush */
4729 mask
= RD_OB_INTR_MASK(instance
);
4731 con_log(CL_ANN1
, (CE_NOTE
, "enable_intr_ppc: "
4732 "outbound_intr_mask = 0x%x\n", mask
));
4736 disable_intr_xscale(struct megasas_instance
*instance
)
4738 MFI_DISABLE_INTR(instance
);
4742 disable_intr_ppc(struct megasas_instance
*instance
)
4744 con_log(CL_ANN1
, (CE_NOTE
, "disable_intr_ppc: called\n"));
4746 con_log(CL_ANN1
, (CE_NOTE
, "disable_intr_ppc: before : "
4747 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance
)));
4749 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4750 WR_OB_INTR_MASK(OB_INTR_MASK
, instance
);
4752 con_log(CL_ANN1
, (CE_NOTE
, "disable_intr_ppc: after : "
4753 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance
)));
4755 /* dummy read to force PCI flush */
4756 (void) RD_OB_INTR_MASK(instance
);
4760 intr_ack_xscale(struct megasas_instance
*instance
)
4764 /* check if it is our interrupt */
4765 status
= RD_OB_INTR_STATUS(instance
);
4767 if (!(status
& MFI_OB_INTR_STATUS_MASK
)) {
4768 return (DDI_INTR_UNCLAIMED
);
4771 /* clear the interrupt by writing back the same value */
4772 WR_OB_INTR_STATUS(status
, instance
);
4774 return (DDI_INTR_CLAIMED
);
4778 intr_ack_ppc(struct megasas_instance
*instance
)
4782 con_log(CL_ANN1
, (CE_NOTE
, "intr_ack_ppc: called\n"));
4784 /* check if it is our interrupt */
4785 status
= RD_OB_INTR_STATUS(instance
);
4787 con_log(CL_ANN1
, (CE_NOTE
, "intr_ack_ppc: status = 0x%x\n", status
));
4790 * As 1078DE is same as 1078 chip, the status field
4793 if (!(status
& MFI_REPLY_1078_MESSAGE_INTR
)) {
4794 return (DDI_INTR_UNCLAIMED
);
4797 /* clear the interrupt by writing back the same value */
4798 WR_OB_DOORBELL_CLEAR(status
, instance
);
4801 status
= RD_OB_INTR_STATUS(instance
);
4803 con_log(CL_ANN1
, (CE_NOTE
, "intr_ack_ppc: interrupt cleared\n"));
4805 return (DDI_INTR_CLAIMED
);
4809 megasas_common_check(struct megasas_instance
*instance
,
4810 struct megasas_cmd
*cmd
)
4812 int ret
= DDI_SUCCESS
;
4814 if (megasas_check_dma_handle(cmd
->frame_dma_obj
.dma_handle
) !=
4816 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
4817 if (cmd
->pkt
!= NULL
) {
4818 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
4819 cmd
->pkt
->pkt_statistics
= 0;
4823 if (megasas_check_dma_handle(instance
->mfi_internal_dma_obj
.dma_handle
)
4825 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
4826 if (cmd
->pkt
!= NULL
) {
4827 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
4828 cmd
->pkt
->pkt_statistics
= 0;
4832 if (megasas_check_dma_handle(instance
->mfi_evt_detail_obj
.dma_handle
) !=
4834 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
4835 if (cmd
->pkt
!= NULL
) {
4836 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
4837 cmd
->pkt
->pkt_statistics
= 0;
4841 if (megasas_check_acc_handle(instance
->regmap_handle
) != DDI_SUCCESS
) {
4842 ddi_fm_service_impact(instance
->dip
, DDI_SERVICE_UNAFFECTED
);
4843 ddi_fm_acc_err_clear(instance
->regmap_handle
, DDI_FME_VER0
);
4844 if (cmd
->pkt
!= NULL
) {
4845 cmd
->pkt
->pkt_reason
= CMD_TRAN_ERR
;
4846 cmd
->pkt
->pkt_statistics
= 0;
4856 megasas_fm_error_cb(dev_info_t
*dip
, ddi_fm_error_t
*err
, const void *impl_data
)
4859 * as the driver can always deal with an error in any dma or
4860 * access handle, we can just return the fme_status value.
4862 pci_ereport_post(dip
, err
, NULL
);
4863 return (err
->fme_status
);
4867 megasas_fm_init(struct megasas_instance
*instance
)
4869 /* Need to change iblock to priority for new MSI intr */
4870 ddi_iblock_cookie_t fm_ibc
;
4872 /* Only register with IO Fault Services if we have some capability */
4873 if (instance
->fm_capabilities
) {
4874 /* Adjust access and dma attributes for FMA */
4875 endian_attr
.devacc_attr_access
= DDI_FLAGERR_ACC
;
4876 megasas_generic_dma_attr
.dma_attr_flags
= DDI_DMA_FLAGERR
;
4879 * Register capabilities with IO Fault Services.
4880 * fm_capabilities will be updated to indicate
4881 * capabilities actually supported (not requested.)
4884 ddi_fm_init(instance
->dip
, &instance
->fm_capabilities
, &fm_ibc
);
4887 * Initialize pci ereport capabilities if ereport
4888 * capable (should always be.)
4891 if (DDI_FM_EREPORT_CAP(instance
->fm_capabilities
) ||
4892 DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
4893 pci_ereport_setup(instance
->dip
);
4897 * Register error callback if error callback capable.
4899 if (DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
4900 ddi_fm_handler_register(instance
->dip
,
4901 megasas_fm_error_cb
, (void*) instance
);
4904 endian_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
4905 megasas_generic_dma_attr
.dma_attr_flags
= 0;
4910 megasas_fm_fini(struct megasas_instance
*instance
)
4912 /* Only unregister FMA capabilities if registered */
4913 if (instance
->fm_capabilities
) {
4915 * Un-register error callback if error callback capable.
4917 if (DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
4918 ddi_fm_handler_unregister(instance
->dip
);
4922 * Release any resources allocated by pci_ereport_setup()
4924 if (DDI_FM_EREPORT_CAP(instance
->fm_capabilities
) ||
4925 DDI_FM_ERRCB_CAP(instance
->fm_capabilities
)) {
4926 pci_ereport_teardown(instance
->dip
);
4929 /* Unregister from IO Fault Services */
4930 ddi_fm_fini(instance
->dip
);
4932 /* Adjust access and dma attributes for FMA */
4933 endian_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
4934 megasas_generic_dma_attr
.dma_attr_flags
= 0;
4939 megasas_check_acc_handle(ddi_acc_handle_t handle
)
4943 if (handle
== NULL
) {
4944 return (DDI_FAILURE
);
4947 ddi_fm_acc_err_get(handle
, &de
, DDI_FME_VERSION
);
4949 return (de
.fme_status
);
4953 megasas_check_dma_handle(ddi_dma_handle_t handle
)
4957 if (handle
== NULL
) {
4958 return (DDI_FAILURE
);
4961 ddi_fm_dma_err_get(handle
, &de
, DDI_FME_VERSION
);
4963 return (de
.fme_status
);
4967 megasas_fm_ereport(struct megasas_instance
*instance
, char *detail
)
4970 char buf
[FM_MAX_CLASS
];
4972 (void) snprintf(buf
, FM_MAX_CLASS
, "%s.%s", DDI_FM_DEVICE
, detail
);
4973 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
4974 if (DDI_FM_EREPORT_CAP(instance
->fm_capabilities
)) {
4975 ddi_fm_ereport_post(instance
->dip
, buf
, ena
, DDI_NOSLEEP
,
4976 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERSION
, NULL
);