2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $
99 #include <bus/cam/cam.h>
100 #include <bus/cam/cam_ccb.h>
101 #include <bus/cam/cam_xpt.h>
102 #include <bus/cam/cam_xpt_periph.h>
104 #include <dev/disk/mpt/mpt.h>
105 #include <dev/disk/mpt/mpt_cam.h>
106 #include <dev/disk/mpt/mpt_raid.h>
108 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
109 #include "dev/disk/mpt/mpilib/mpi_init.h"
110 #include "dev/disk/mpt/mpilib/mpi_targ.h"
111 #include "dev/disk/mpt/mpilib/mpi_fc.h"
112 #include "dev/disk/mpt/mpilib/mpi_sas.h"
113 #include <sys/callout.h>
114 #include <sys/kthread.h>
115 #include <sys/sysctl.h>
117 static void mpt_poll(struct cam_sim
*);
118 static timeout_t mpt_timeout
;
119 static void mpt_action(struct cam_sim
*, union ccb
*);
121 mpt_get_spi_settings(struct mpt_softc
*, struct ccb_trans_settings
*);
122 static void mpt_setwidth(struct mpt_softc
*, int, int);
123 static void mpt_setsync(struct mpt_softc
*, int, int, int);
124 static int mpt_update_spi_config(struct mpt_softc
*, int);
126 static mpt_reply_handler_t mpt_scsi_reply_handler
;
127 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler
;
128 static mpt_reply_handler_t mpt_fc_els_reply_handler
;
129 static int mpt_scsi_reply_frame_handler(struct mpt_softc
*, request_t
*,
130 MSG_DEFAULT_REPLY
*);
131 static int mpt_bus_reset(struct mpt_softc
*, target_id_t
, lun_id_t
, int);
132 static int mpt_fc_reset_link(struct mpt_softc
*, int);
134 static int mpt_spawn_recovery_thread(struct mpt_softc
*mpt
);
135 static void mpt_terminate_recovery_thread(struct mpt_softc
*mpt
);
136 static void mpt_recovery_thread(void *arg
);
137 static void mpt_recover_commands(struct mpt_softc
*mpt
);
139 static int mpt_scsi_send_tmf(struct mpt_softc
*, u_int
, u_int
, u_int
,
140 u_int
, u_int
, u_int
, int);
142 static void mpt_fc_post_els(struct mpt_softc
*mpt
, request_t
*, int);
143 static void mpt_post_target_command(struct mpt_softc
*, request_t
*, int);
144 static int mpt_add_els_buffers(struct mpt_softc
*mpt
);
145 static int mpt_add_target_commands(struct mpt_softc
*mpt
);
146 static int mpt_enable_lun(struct mpt_softc
*, target_id_t
, lun_id_t
);
147 static int mpt_disable_lun(struct mpt_softc
*, target_id_t
, lun_id_t
);
148 static void mpt_target_start_io(struct mpt_softc
*, union ccb
*);
149 static cam_status
mpt_abort_target_ccb(struct mpt_softc
*, union ccb
*);
150 static int mpt_abort_target_cmd(struct mpt_softc
*, request_t
*);
151 static void mpt_scsi_tgt_status(struct mpt_softc
*, union ccb
*, request_t
*,
152 uint8_t, uint8_t const *);
154 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc
*, request_t
*, mpt_task_mgmt_t
,
155 tgt_resource_t
*, int);
156 static void mpt_tgt_dump_tgt_state(struct mpt_softc
*, request_t
*);
157 static void mpt_tgt_dump_req_state(struct mpt_softc
*, request_t
*);
158 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler
;
159 static mpt_reply_handler_t mpt_sata_pass_reply_handler
;
161 static uint32_t scsi_io_handler_id
= MPT_HANDLER_ID_NONE
;
162 static uint32_t scsi_tmf_handler_id
= MPT_HANDLER_ID_NONE
;
163 static uint32_t fc_els_handler_id
= MPT_HANDLER_ID_NONE
;
164 static uint32_t sata_pass_handler_id
= MPT_HANDLER_ID_NONE
;
166 static mpt_probe_handler_t mpt_cam_probe
;
167 static mpt_attach_handler_t mpt_cam_attach
;
168 static mpt_enable_handler_t mpt_cam_enable
;
169 static mpt_ready_handler_t mpt_cam_ready
;
170 static mpt_event_handler_t mpt_cam_event
;
171 static mpt_reset_handler_t mpt_cam_ioc_reset
;
172 static mpt_detach_handler_t mpt_cam_detach
;
174 static struct mpt_personality mpt_cam_personality
=
177 .probe
= mpt_cam_probe
,
178 .attach
= mpt_cam_attach
,
179 .enable
= mpt_cam_enable
,
180 .ready
= mpt_cam_ready
,
181 .event
= mpt_cam_event
,
182 .reset
= mpt_cam_ioc_reset
,
183 .detach
= mpt_cam_detach
,
186 DECLARE_MPT_PERSONALITY(mpt_cam
, SI_ORDER_SECOND
);
187 MODULE_DEPEND(mpt_cam
, cam
, 1, 1, 1);
189 int mpt_enable_sata_wc
= -1;
190 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc
);
193 mpt_cam_probe(struct mpt_softc
*mpt
)
198 * Only attach to nodes that support the initiator or target role
199 * (or want to) or have RAID physical devices that need CAM pass-thru
202 if (mpt
->do_cfg_role
) {
203 role
= mpt
->cfg_role
;
207 if ((role
& (MPT_ROLE_TARGET
|MPT_ROLE_INITIATOR
)) != 0 ||
208 (mpt
->ioc_page2
!= NULL
&& mpt
->ioc_page2
->MaxPhysDisks
!= 0)) {
215 mpt_cam_attach(struct mpt_softc
*mpt
)
217 struct cam_devq
*devq
;
218 mpt_handler_t handler
;
223 TAILQ_INIT(&mpt
->request_timeout_list
);
224 maxq
= (mpt
->ioc_facts
.GlobalCredits
< MPT_MAX_REQUESTS(mpt
))?
225 mpt
->ioc_facts
.GlobalCredits
: MPT_MAX_REQUESTS(mpt
);
227 handler
.reply_handler
= mpt_scsi_reply_handler
;
228 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
229 &scsi_io_handler_id
);
235 handler
.reply_handler
= mpt_scsi_tmf_reply_handler
;
236 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
237 &scsi_tmf_handler_id
);
244 * If we're fibre channel and could support target mode, we register
245 * an ELS reply handler and give it resources.
247 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
) != 0) {
248 handler
.reply_handler
= mpt_fc_els_reply_handler
;
249 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
255 if (mpt_add_els_buffers(mpt
) == FALSE
) {
260 maxq
-= mpt
->els_cmds_allocated
;
264 * If we support target mode, we register a reply handler for it,
265 * but don't add command resources until we actually enable target
268 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
) != 0) {
269 handler
.reply_handler
= mpt_scsi_tgt_reply_handler
;
270 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
271 &mpt
->scsi_tgt_handler_id
);
279 handler
.reply_handler
= mpt_sata_pass_reply_handler
;
280 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
281 &sata_pass_handler_id
);
289 * We keep one request reserved for timeout TMF requests.
291 mpt
->tmf_req
= mpt_get_request(mpt
, FALSE
);
292 if (mpt
->tmf_req
== NULL
) {
293 mpt_prt(mpt
, "Unable to allocate dedicated TMF request!\n");
300 * Mark the request as free even though not on the free list.
301 * There is only one TMF request allowed to be outstanding at
302 * a time and the TMF routines perform their own allocation
303 * tracking using the standard state flags.
305 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
309 * The rest of this is CAM foo, for which we need to drop our lock
313 if (mpt_spawn_recovery_thread(mpt
) != 0) {
314 mpt_prt(mpt
, "Unable to spawn recovery thread!\n");
320 * Create the device queue for our SIM(s).
322 devq
= cam_simq_alloc(maxq
);
324 mpt_prt(mpt
, "Unable to allocate CAM SIMQ!\n");
330 * Construct our SIM entry.
333 mpt_sim_alloc(mpt_action
, mpt_poll
, "mpt", mpt
, 1, maxq
, devq
);
334 if (mpt
->sim
== NULL
) {
335 mpt_prt(mpt
, "Unable to allocate CAM SIM!\n");
336 cam_devq_release(devq
);
342 * Register exactly this bus.
345 if (xpt_bus_register(mpt
->sim
, 0) != CAM_SUCCESS
) {
346 mpt_prt(mpt
, "Bus registration Failed!\n");
352 if (xpt_create_path(&mpt
->path
, NULL
, cam_sim_path(mpt
->sim
),
353 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
354 mpt_prt(mpt
, "Unable to allocate Path!\n");
362 * Only register a second bus for RAID physical
363 * devices if the controller supports RAID.
365 if (mpt
->ioc_page2
== NULL
|| mpt
->ioc_page2
->MaxPhysDisks
== 0) {
370 * Create a "bus" to export all hidden disks to CAM.
373 mpt_sim_alloc(mpt_action
, mpt_poll
, "mpt", mpt
, 1, maxq
, devq
);
374 if (mpt
->phydisk_sim
== NULL
) {
375 mpt_prt(mpt
, "Unable to allocate Physical Disk CAM SIM!\n");
384 if (xpt_bus_register(mpt
->phydisk_sim
, 1) !=
386 mpt_prt(mpt
, "Physical Disk Bus registration Failed!\n");
392 if (xpt_create_path(&mpt
->phydisk_path
, NULL
,
393 cam_sim_path(mpt
->phydisk_sim
),
394 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
395 mpt_prt(mpt
, "Unable to allocate Physical Disk Path!\n");
401 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "attached cam\n");
410 * Read FC configuration information
413 mpt_read_config_info_fc(struct mpt_softc
*mpt
)
415 struct sysctl_ctx_list
*ctx
;
416 struct sysctl_oid
*tree
;
417 char *topology
= NULL
;
420 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_FC_PORT
, 0,
421 0, &mpt
->mpt_fcport_page0
.Header
, FALSE
, 5000);
425 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FC Port Page 0 Header: %x %x %x %x\n",
426 mpt
->mpt_fcport_page0
.Header
.PageVersion
,
427 mpt
->mpt_fcport_page0
.Header
.PageLength
,
428 mpt
->mpt_fcport_page0
.Header
.PageNumber
,
429 mpt
->mpt_fcport_page0
.Header
.PageType
);
432 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_fcport_page0
.Header
,
433 sizeof(mpt
->mpt_fcport_page0
), FALSE
, 5000);
435 mpt_prt(mpt
, "failed to read FC Port Page 0\n");
438 mpt2host_config_page_fc_port_0(&mpt
->mpt_fcport_page0
);
440 mpt
->mpt_fcport_speed
= mpt
->mpt_fcport_page0
.CurrentSpeed
;
442 switch (mpt
->mpt_fcport_page0
.Flags
&
443 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK
) {
444 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT
:
445 mpt
->mpt_fcport_speed
= 0;
446 topology
= "<NO LOOP>";
448 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT
:
451 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP
:
452 topology
= "NL-Port";
454 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT
:
457 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP
:
458 topology
= "FL-Port";
461 mpt
->mpt_fcport_speed
= 0;
466 mpt_lprt(mpt
, MPT_PRT_INFO
,
467 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
468 "Speed %u-Gbit\n", topology
,
469 mpt
->mpt_fcport_page0
.WWNN
.High
,
470 mpt
->mpt_fcport_page0
.WWNN
.Low
,
471 mpt
->mpt_fcport_page0
.WWPN
.High
,
472 mpt
->mpt_fcport_page0
.WWPN
.Low
,
473 mpt
->mpt_fcport_speed
);
475 ctx
= device_get_sysctl_ctx(mpt
->dev
);
476 tree
= device_get_sysctl_tree(mpt
->dev
);
478 ksnprintf(mpt
->scinfo
.fc
.wwnn
,
479 sizeof (mpt
->scinfo
.fc
.wwnn
), "0x%08x%08x",
480 mpt
->mpt_fcport_page0
.WWNN
.High
,
481 mpt
->mpt_fcport_page0
.WWNN
.Low
);
483 ksnprintf(mpt
->scinfo
.fc
.wwpn
,
484 sizeof (mpt
->scinfo
.fc
.wwpn
), "0x%08x%08x",
485 mpt
->mpt_fcport_page0
.WWPN
.High
,
486 mpt
->mpt_fcport_page0
.WWPN
.Low
);
488 SYSCTL_ADD_STRING(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
489 "wwnn", CTLFLAG_RD
, mpt
->scinfo
.fc
.wwnn
, 0,
490 "World Wide Node Name");
492 SYSCTL_ADD_STRING(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
493 "wwpn", CTLFLAG_RD
, mpt
->scinfo
.fc
.wwpn
, 0,
494 "World Wide Port Name");
501 * Set FC configuration information.
504 mpt_set_initial_config_fc(struct mpt_softc
*mpt
)
506 CONFIG_PAGE_FC_PORT_1 fc
;
511 r
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_FC_PORT
, 1, 0,
512 &fc
.Header
, FALSE
, 5000);
514 mpt_prt(mpt
, "failed to read FC page 1 header\n");
515 return (mpt_fc_reset_link(mpt
, 1));
518 r
= mpt_read_cfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_NVRAM
, 0,
519 &fc
.Header
, sizeof (fc
), FALSE
, 5000);
521 mpt_prt(mpt
, "failed to read FC page 1\n");
522 return (mpt_fc_reset_link(mpt
, 1));
524 mpt2host_config_page_fc_port_1(&fc
);
527 * Check our flags to make sure we support the role we want.
533 if (fl
& MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT
) {
534 role
|= MPT_ROLE_INITIATOR
;
536 if (fl
& MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG
) {
537 role
|= MPT_ROLE_TARGET
;
540 fl
&= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK
;
542 if (mpt
->do_cfg_role
== 0) {
543 role
= mpt
->cfg_role
;
545 mpt
->do_cfg_role
= 0;
548 if (role
!= mpt
->cfg_role
) {
549 if (mpt
->cfg_role
& MPT_ROLE_INITIATOR
) {
550 if ((role
& MPT_ROLE_INITIATOR
) == 0) {
551 mpt_prt(mpt
, "adding initiator role\n");
552 fl
|= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT
;
555 mpt_prt(mpt
, "keeping initiator role\n");
557 } else if (role
& MPT_ROLE_INITIATOR
) {
558 mpt_prt(mpt
, "removing initiator role\n");
561 if (mpt
->cfg_role
& MPT_ROLE_TARGET
) {
562 if ((role
& MPT_ROLE_TARGET
) == 0) {
563 mpt_prt(mpt
, "adding target role\n");
564 fl
|= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG
;
567 mpt_prt(mpt
, "keeping target role\n");
569 } else if (role
& MPT_ROLE_TARGET
) {
570 mpt_prt(mpt
, "removing target role\n");
573 mpt
->role
= mpt
->cfg_role
;
576 if (fl
& MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG
) {
577 if ((fl
& MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID
) == 0) {
578 mpt_prt(mpt
, "adding OXID option\n");
579 fl
|= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID
;
586 host2mpt_config_page_fc_port_1(&fc
);
587 r
= mpt_write_cfg_page(mpt
,
588 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM
, 0, &fc
.Header
,
589 sizeof(fc
), FALSE
, 5000);
591 mpt_prt(mpt
, "failed to update NVRAM with changes\n");
594 mpt_prt(mpt
, "NOTE: NVRAM changes will not take "
595 "effect until next reboot or IOC reset\n");
601 mptsas_sas_io_unit_pg0(struct mpt_softc
*mpt
, struct mptsas_portinfo
*portinfo
)
603 ConfigExtendedPageHeader_t hdr
;
604 struct mptsas_phyinfo
*phyinfo
;
605 SasIOUnitPage0_t
*buffer
;
608 error
= mpt_read_extcfg_header(mpt
, MPI_SASIOUNITPAGE0_PAGEVERSION
,
609 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT
,
613 if (hdr
.ExtPageLength
== 0) {
618 len
= hdr
.ExtPageLength
* 4;
619 buffer
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
|M_ZERO
);
620 if (buffer
== NULL
) {
625 error
= mpt_read_extcfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_CURRENT
,
626 0, &hdr
, buffer
, len
, 0, 10000);
628 kfree(buffer
, M_DEVBUF
);
632 portinfo
->num_phys
= buffer
->NumPhys
;
633 portinfo
->phy_info
= kmalloc(sizeof(*portinfo
->phy_info
) *
634 portinfo
->num_phys
, M_DEVBUF
, M_NOWAIT
|M_ZERO
);
635 if (portinfo
->phy_info
== NULL
) {
636 kfree(buffer
, M_DEVBUF
);
641 for (i
= 0; i
< portinfo
->num_phys
; i
++) {
642 phyinfo
= &portinfo
->phy_info
[i
];
643 phyinfo
->phy_num
= i
;
644 phyinfo
->port_id
= buffer
->PhyData
[i
].Port
;
645 phyinfo
->negotiated_link_rate
=
646 buffer
->PhyData
[i
].NegotiatedLinkRate
;
648 le16toh(buffer
->PhyData
[i
].ControllerDevHandle
);
651 kfree(buffer
, M_DEVBUF
);
657 mptsas_sas_phy_pg0(struct mpt_softc
*mpt
, struct mptsas_phyinfo
*phy_info
,
658 uint32_t form
, uint32_t form_specific
)
660 ConfigExtendedPageHeader_t hdr
;
661 SasPhyPage0_t
*buffer
;
664 error
= mpt_read_extcfg_header(mpt
, MPI_SASPHY0_PAGEVERSION
, 0, 0,
665 MPI_CONFIG_EXTPAGETYPE_SAS_PHY
, &hdr
,
669 if (hdr
.ExtPageLength
== 0) {
674 buffer
= kmalloc(sizeof(SasPhyPage0_t
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
675 if (buffer
== NULL
) {
680 error
= mpt_read_extcfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_CURRENT
,
681 form
+ form_specific
, &hdr
, buffer
,
682 sizeof(SasPhyPage0_t
), 0, 10000);
684 kfree(buffer
, M_DEVBUF
);
688 phy_info
->hw_link_rate
= buffer
->HwLinkRate
;
689 phy_info
->programmed_link_rate
= buffer
->ProgrammedLinkRate
;
690 phy_info
->identify
.dev_handle
= le16toh(buffer
->OwnerDevHandle
);
691 phy_info
->attached
.dev_handle
= le16toh(buffer
->AttachedDevHandle
);
693 kfree(buffer
, M_DEVBUF
);
699 mptsas_sas_device_pg0(struct mpt_softc
*mpt
, struct mptsas_devinfo
*device_info
,
700 uint32_t form
, uint32_t form_specific
)
702 ConfigExtendedPageHeader_t hdr
;
703 SasDevicePage0_t
*buffer
;
704 uint64_t sas_address
;
707 bzero(device_info
, sizeof(*device_info
));
708 error
= mpt_read_extcfg_header(mpt
, MPI_SASDEVICE0_PAGEVERSION
, 0, 0,
709 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE
,
713 if (hdr
.ExtPageLength
== 0) {
718 buffer
= kmalloc(sizeof(SasDevicePage0_t
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
719 if (buffer
== NULL
) {
724 error
= mpt_read_extcfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_CURRENT
,
725 form
+ form_specific
, &hdr
, buffer
,
726 sizeof(SasDevicePage0_t
), 0, 10000);
728 kfree(buffer
, M_DEVBUF
);
732 device_info
->dev_handle
= le16toh(buffer
->DevHandle
);
733 device_info
->parent_dev_handle
= le16toh(buffer
->ParentDevHandle
);
734 device_info
->enclosure_handle
= le16toh(buffer
->EnclosureHandle
);
735 device_info
->slot
= le16toh(buffer
->Slot
);
736 device_info
->phy_num
= buffer
->PhyNum
;
737 device_info
->physical_port
= buffer
->PhysicalPort
;
738 device_info
->target_id
= buffer
->TargetID
;
739 device_info
->bus
= buffer
->Bus
;
740 bcopy(&buffer
->SASAddress
, &sas_address
, sizeof(uint64_t));
741 device_info
->sas_address
= le64toh(sas_address
);
742 device_info
->device_info
= le32toh(buffer
->DeviceInfo
);
744 kfree(buffer
, M_DEVBUF
);
750 * Read SAS configuration information. Nothing to do yet.
753 mpt_read_config_info_sas(struct mpt_softc
*mpt
)
755 struct mptsas_portinfo
*portinfo
;
756 struct mptsas_phyinfo
*phyinfo
;
759 portinfo
= kmalloc(sizeof(*portinfo
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
760 if (portinfo
== NULL
)
763 error
= mptsas_sas_io_unit_pg0(mpt
, portinfo
);
765 kfree(portinfo
, M_DEVBUF
);
769 for (i
= 0; i
< portinfo
->num_phys
; i
++) {
770 phyinfo
= &portinfo
->phy_info
[i
];
771 error
= mptsas_sas_phy_pg0(mpt
, phyinfo
,
772 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER
<<
773 MPI_SAS_PHY_PGAD_FORM_SHIFT
), i
);
776 error
= mptsas_sas_device_pg0(mpt
, &phyinfo
->identify
,
777 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE
<<
778 MPI_SAS_DEVICE_PGAD_FORM_SHIFT
),
782 phyinfo
->identify
.phy_num
= phyinfo
->phy_num
= i
;
783 if (phyinfo
->attached
.dev_handle
)
784 error
= mptsas_sas_device_pg0(mpt
,
786 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE
<<
787 MPI_SAS_DEVICE_PGAD_FORM_SHIFT
),
788 phyinfo
->attached
.dev_handle
);
792 mpt
->sas_portinfo
= portinfo
;
797 mptsas_set_sata_wc(struct mpt_softc
*mpt
, struct mptsas_devinfo
*devinfo
,
800 SataPassthroughRequest_t
*pass
;
804 req
= mpt_get_request(mpt
, 0);
808 pass
= req
->req_vbuf
;
809 bzero(pass
, sizeof(SataPassthroughRequest_t
));
810 pass
->Function
= MPI_FUNCTION_SATA_PASSTHROUGH
;
811 pass
->TargetID
= devinfo
->target_id
;
812 pass
->Bus
= devinfo
->bus
;
813 pass
->PassthroughFlags
= 0;
814 pass
->ConnectionRate
= MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED
;
815 pass
->DataLength
= 0;
816 pass
->MsgContext
= htole32(req
->index
| sata_pass_handler_id
);
817 pass
->CommandFIS
[0] = 0x27;
818 pass
->CommandFIS
[1] = 0x80;
819 pass
->CommandFIS
[2] = 0xef;
820 pass
->CommandFIS
[3] = (enabled
) ? 0x02 : 0x82;
821 pass
->CommandFIS
[7] = 0x40;
822 pass
->CommandFIS
[15] = 0x08;
824 mpt_check_doorbell(mpt
);
825 mpt_send_cmd(mpt
, req
);
826 error
= mpt_wait_req(mpt
, req
, REQ_STATE_DONE
, REQ_STATE_DONE
, 0,
829 mpt_free_request(mpt
, req
);
830 kprintf("error %d sending passthrough\n", error
);
834 status
= le16toh(req
->IOCStatus
);
835 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
836 mpt_free_request(mpt
, req
);
837 kprintf("IOCSTATUS %d\n", status
);
841 mpt_free_request(mpt
, req
);
845 * Set SAS configuration information. Nothing to do yet.
848 mpt_set_initial_config_sas(struct mpt_softc
*mpt
)
850 struct mptsas_phyinfo
*phyinfo
;
853 if ((mpt_enable_sata_wc
!= -1) && (mpt
->sas_portinfo
!= NULL
)) {
854 for (i
= 0; i
< mpt
->sas_portinfo
->num_phys
; i
++) {
855 phyinfo
= &mpt
->sas_portinfo
->phy_info
[i
];
856 if (phyinfo
->attached
.dev_handle
== 0)
858 if ((phyinfo
->attached
.device_info
&
859 MPI_SAS_DEVICE_INFO_SATA_DEVICE
) == 0)
862 device_printf(mpt
->dev
,
863 "%sabling SATA WC on phy %d\n",
864 (mpt_enable_sata_wc
) ? "En" : "Dis", i
);
865 mptsas_set_sata_wc(mpt
, &phyinfo
->attached
,
874 mpt_sata_pass_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
875 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
879 if (reply_frame
!= NULL
) {
880 req
->IOCStatus
= le16toh(reply_frame
->IOCStatus
);
882 req
->state
&= ~REQ_STATE_QUEUED
;
883 req
->state
|= REQ_STATE_DONE
;
884 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
885 if ((req
->state
& REQ_STATE_NEED_WAKEUP
) != 0) {
887 } else if ((req
->state
& REQ_STATE_TIMEDOUT
) != 0) {
889 * Whew- we can free this request (late completion)
891 mpt_free_request(mpt
, req
);
899 * Read SCSI configuration information
902 mpt_read_config_info_spi(struct mpt_softc
*mpt
)
906 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_PORT
, 0, 0,
907 &mpt
->mpt_port_page0
.Header
, FALSE
, 5000);
911 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "SPI Port Page 0 Header: %x %x %x %x\n",
912 mpt
->mpt_port_page0
.Header
.PageVersion
,
913 mpt
->mpt_port_page0
.Header
.PageLength
,
914 mpt
->mpt_port_page0
.Header
.PageNumber
,
915 mpt
->mpt_port_page0
.Header
.PageType
);
917 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_PORT
, 1, 0,
918 &mpt
->mpt_port_page1
.Header
, FALSE
, 5000);
922 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "SPI Port Page 1 Header: %x %x %x %x\n",
923 mpt
->mpt_port_page1
.Header
.PageVersion
,
924 mpt
->mpt_port_page1
.Header
.PageLength
,
925 mpt
->mpt_port_page1
.Header
.PageNumber
,
926 mpt
->mpt_port_page1
.Header
.PageType
);
928 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_PORT
, 2, 0,
929 &mpt
->mpt_port_page2
.Header
, FALSE
, 5000);
933 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "SPI Port Page 2 Header: %x %x %x %x\n",
934 mpt
->mpt_port_page2
.Header
.PageVersion
,
935 mpt
->mpt_port_page2
.Header
.PageLength
,
936 mpt
->mpt_port_page2
.Header
.PageNumber
,
937 mpt
->mpt_port_page2
.Header
.PageType
);
939 for (i
= 0; i
< 16; i
++) {
940 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_DEVICE
,
941 0, i
, &mpt
->mpt_dev_page0
[i
].Header
, FALSE
, 5000);
945 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
946 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i
,
947 mpt
->mpt_dev_page0
[i
].Header
.PageVersion
,
948 mpt
->mpt_dev_page0
[i
].Header
.PageLength
,
949 mpt
->mpt_dev_page0
[i
].Header
.PageNumber
,
950 mpt
->mpt_dev_page0
[i
].Header
.PageType
);
952 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_DEVICE
,
953 1, i
, &mpt
->mpt_dev_page1
[i
].Header
, FALSE
, 5000);
957 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
958 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i
,
959 mpt
->mpt_dev_page1
[i
].Header
.PageVersion
,
960 mpt
->mpt_dev_page1
[i
].Header
.PageLength
,
961 mpt
->mpt_dev_page1
[i
].Header
.PageNumber
,
962 mpt
->mpt_dev_page1
[i
].Header
.PageType
);
966 * At this point, we don't *have* to fail. As long as we have
967 * valid config header information, we can (barely) lurch
971 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_port_page0
.Header
,
972 sizeof(mpt
->mpt_port_page0
), FALSE
, 5000);
974 mpt_prt(mpt
, "failed to read SPI Port Page 0\n");
976 mpt2host_config_page_scsi_port_0(&mpt
->mpt_port_page0
);
977 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
978 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
979 mpt
->mpt_port_page0
.Capabilities
,
980 mpt
->mpt_port_page0
.PhysicalInterface
);
983 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_port_page1
.Header
,
984 sizeof(mpt
->mpt_port_page1
), FALSE
, 5000);
986 mpt_prt(mpt
, "failed to read SPI Port Page 1\n");
988 mpt2host_config_page_scsi_port_1(&mpt
->mpt_port_page1
);
989 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
990 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
991 mpt
->mpt_port_page1
.Configuration
,
992 mpt
->mpt_port_page1
.OnBusTimerValue
);
995 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_port_page2
.Header
,
996 sizeof(mpt
->mpt_port_page2
), FALSE
, 5000);
998 mpt_prt(mpt
, "failed to read SPI Port Page 2\n");
1000 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1001 "Port Page 2: Flags %x Settings %x\n",
1002 mpt
->mpt_port_page2
.PortFlags
,
1003 mpt
->mpt_port_page2
.PortSettings
);
1004 mpt2host_config_page_scsi_port_2(&mpt
->mpt_port_page2
);
1005 for (i
= 0; i
< 16; i
++) {
1006 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1007 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1008 i
, mpt
->mpt_port_page2
.DeviceSettings
[i
].Timeout
,
1009 mpt
->mpt_port_page2
.DeviceSettings
[i
].SyncFactor
,
1010 mpt
->mpt_port_page2
.DeviceSettings
[i
].DeviceFlags
);
1014 for (i
= 0; i
< 16; i
++) {
1015 rv
= mpt_read_cur_cfg_page(mpt
, i
,
1016 &mpt
->mpt_dev_page0
[i
].Header
, sizeof(*mpt
->mpt_dev_page0
),
1020 "cannot read SPI Target %d Device Page 0\n", i
);
1023 mpt2host_config_page_scsi_device_0(&mpt
->mpt_dev_page0
[i
]);
1024 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1025 "target %d page 0: Negotiated Params %x Information %x\n",
1026 i
, mpt
->mpt_dev_page0
[i
].NegotiatedParameters
,
1027 mpt
->mpt_dev_page0
[i
].Information
);
1029 rv
= mpt_read_cur_cfg_page(mpt
, i
,
1030 &mpt
->mpt_dev_page1
[i
].Header
, sizeof(*mpt
->mpt_dev_page1
),
1034 "cannot read SPI Target %d Device Page 1\n", i
);
1037 mpt2host_config_page_scsi_device_1(&mpt
->mpt_dev_page1
[i
]);
1038 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1039 "target %d page 1: Requested Params %x Configuration %x\n",
1040 i
, mpt
->mpt_dev_page1
[i
].RequestedParameters
,
1041 mpt
->mpt_dev_page1
[i
].Configuration
);
1047 * Validate SPI configuration information.
1049 * In particular, validate SPI Port Page 1.
1052 mpt_set_initial_config_spi(struct mpt_softc
*mpt
)
1054 int error
, i
, pp1val
;
1056 mpt
->mpt_disc_enable
= 0xff;
1057 mpt
->mpt_tag_enable
= 0;
1059 pp1val
= ((1 << mpt
->mpt_ini_id
) <<
1060 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID
) | mpt
->mpt_ini_id
;
1061 if (mpt
->mpt_port_page1
.Configuration
!= pp1val
) {
1062 CONFIG_PAGE_SCSI_PORT_1 tmp
;
1064 mpt_prt(mpt
, "SPI Port Page 1 Config value bad (%x)- should "
1065 "be %x\n", mpt
->mpt_port_page1
.Configuration
, pp1val
);
1066 tmp
= mpt
->mpt_port_page1
;
1067 tmp
.Configuration
= pp1val
;
1068 host2mpt_config_page_scsi_port_1(&tmp
);
1069 error
= mpt_write_cur_cfg_page(mpt
, 0,
1070 &tmp
.Header
, sizeof(tmp
), FALSE
, 5000);
1074 error
= mpt_read_cur_cfg_page(mpt
, 0,
1075 &tmp
.Header
, sizeof(tmp
), FALSE
, 5000);
1079 mpt2host_config_page_scsi_port_1(&tmp
);
1080 if (tmp
.Configuration
!= pp1val
) {
1082 "failed to reset SPI Port Page 1 Config value\n");
1085 mpt
->mpt_port_page1
= tmp
;
1089 * The purpose of this exercise is to get
1090 * all targets back to async/narrow.
1092 * We skip this step if the BIOS has already negotiated
1093 * speeds with the targets.
1095 i
= mpt
->mpt_port_page2
.PortSettings
&
1096 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS
;
1097 if (i
== MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS
) {
1098 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1099 "honoring BIOS transfer negotiations\n");
1101 for (i
= 0; i
< 16; i
++) {
1102 mpt
->mpt_dev_page1
[i
].RequestedParameters
= 0;
1103 mpt
->mpt_dev_page1
[i
].Configuration
= 0;
1104 (void) mpt_update_spi_config(mpt
, i
);
1111 mpt_cam_enable(struct mpt_softc
*mpt
)
1119 if (mpt_read_config_info_fc(mpt
)) {
1122 if (mpt_set_initial_config_fc(mpt
)) {
1125 } else if (mpt
->is_sas
) {
1126 if (mpt_read_config_info_sas(mpt
)) {
1129 if (mpt_set_initial_config_sas(mpt
)) {
1132 } else if (mpt
->is_spi
) {
1133 if (mpt_read_config_info_spi(mpt
)) {
1136 if (mpt_set_initial_config_spi(mpt
)) {
1148 mpt_cam_ready(struct mpt_softc
*mpt
)
1152 * If we're in target mode, hang out resources now
1153 * so we don't cause the world to hang talking to us.
1155 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
)) {
1157 * Try to add some target command resources
1160 if (mpt_add_target_commands(mpt
) == FALSE
) {
1161 mpt_prt(mpt
, "failed to add target commands\n");
1169 mpt_cam_detach(struct mpt_softc
*mpt
)
1171 mpt_handler_t handler
;
1175 mpt_terminate_recovery_thread(mpt
);
1177 handler
.reply_handler
= mpt_scsi_reply_handler
;
1178 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1179 scsi_io_handler_id
);
1180 handler
.reply_handler
= mpt_scsi_tmf_reply_handler
;
1181 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1182 scsi_tmf_handler_id
);
1183 handler
.reply_handler
= mpt_fc_els_reply_handler
;
1184 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1186 handler
.reply_handler
= mpt_scsi_tgt_reply_handler
;
1187 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1188 mpt
->scsi_tgt_handler_id
);
1189 handler
.reply_handler
= mpt_sata_pass_reply_handler
;
1190 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1191 sata_pass_handler_id
);
1193 if (mpt
->tmf_req
!= NULL
) {
1194 mpt
->tmf_req
->state
= REQ_STATE_ALLOCATED
;
1195 mpt_free_request(mpt
, mpt
->tmf_req
);
1196 mpt
->tmf_req
= NULL
;
1198 if (mpt
->sas_portinfo
!= NULL
) {
1199 kfree(mpt
->sas_portinfo
, M_DEVBUF
);
1200 mpt
->sas_portinfo
= NULL
;
1203 if (mpt
->sim
!= NULL
) {
1204 xpt_free_path(mpt
->path
);
1205 xpt_bus_deregister(cam_sim_path(mpt
->sim
));
1206 cam_sim_free(mpt
->sim
);
1210 if (mpt
->phydisk_sim
!= NULL
) {
1211 xpt_free_path(mpt
->phydisk_path
);
1212 xpt_bus_deregister(cam_sim_path(mpt
->phydisk_sim
));
1213 cam_sim_free(mpt
->phydisk_sim
);
1214 mpt
->phydisk_sim
= NULL
;
1219 /* This routine is used after a system crash to dump core onto the swap device.
1222 mpt_poll(struct cam_sim
*sim
)
1224 struct mpt_softc
*mpt
;
1226 mpt
= (struct mpt_softc
*)cam_sim_softc(sim
);
1231 * Watchdog timeout routine for SCSI requests.
1234 mpt_timeout(void *arg
)
1237 struct mpt_softc
*mpt
;
1240 ccb
= (union ccb
*)arg
;
1241 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
1244 req
= ccb
->ccb_h
.ccb_req_ptr
;
1245 mpt_prt(mpt
, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req
,
1246 req
->serno
, ccb
, req
->ccb
);
1247 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1248 if ((req
->state
& REQ_STATE_QUEUED
) == REQ_STATE_QUEUED
) {
1249 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
1250 TAILQ_INSERT_TAIL(&mpt
->request_timeout_list
, req
, links
);
1251 req
->state
|= REQ_STATE_TIMEDOUT
;
1252 mpt_wakeup_recovery_thread(mpt
);
1258 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1260 * Takes a list of physical segments and builds the SGL for SCSI IO command
1261 * and forwards the commard to the IOC after one last check that CAM has not
1262 * aborted the transaction.
1265 mpt_execute_req_a64(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1267 request_t
*req
, *trq
;
1270 struct mpt_softc
*mpt
;
1271 bus_addr_t chain_list_addr
;
1272 int first_lim
, seg
, this_seg_lim
;
1273 uint32_t addr
, cur_off
, flags
, nxt_off
, tf
;
1275 MSG_REQUEST_HEADER
*hdrp
;
1280 req
= (request_t
*)arg
;
1283 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
1284 req
= ccb
->ccb_h
.ccb_req_ptr
;
1286 hdrp
= req
->req_vbuf
;
1287 mpt_off
= req
->req_vbuf
;
1289 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1294 switch (hdrp
->Function
) {
1295 case MPI_FUNCTION_SCSI_IO_REQUEST
:
1296 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
:
1298 sglp
= &((PTR_MSG_SCSI_IO_REQUEST
)hdrp
)->SGL
;
1300 case MPI_FUNCTION_TARGET_ASSIST
:
1302 sglp
= &((PTR_MSG_TARGET_ASSIST_REQUEST
)hdrp
)->SGL
;
1305 mpt_prt(mpt
, "bad fct 0x%x in mpt_execute_req_a64\n",
1312 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1314 mpt_prt(mpt
, "segment count %d too large (max %u)\n",
1315 nseg
, mpt
->max_seg_cnt
);
1320 if (error
!= EFBIG
&& error
!= ENOMEM
) {
1321 mpt_prt(mpt
, "mpt_execute_req_a64: err %d\n", error
);
1323 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
) {
1325 mpt_freeze_ccb(ccb
);
1326 if (error
== EFBIG
) {
1327 status
= CAM_REQ_TOO_BIG
;
1328 } else if (error
== ENOMEM
) {
1329 if (mpt
->outofbeer
== 0) {
1331 xpt_freeze_simq(mpt
->sim
, 1);
1332 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1335 status
= CAM_REQUEUE_REQ
;
1337 status
= CAM_REQ_CMP_ERR
;
1339 mpt_set_ccb_status(ccb
, status
);
1341 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1342 request_t
*cmd_req
=
1343 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1344 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
1345 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
1346 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
1348 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1349 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d", __LINE__
));
1351 mpt_free_request(mpt
, req
);
1356 * No data to transfer?
1357 * Just make a single simple SGL with zero length.
1360 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1361 int tidx
= ((char *)sglp
) - mpt_off
;
1362 memset(&mpt_off
[tidx
], 0xff, MPT_REQUEST_AREA
- tidx
);
1366 SGE_SIMPLE32
*se1
= (SGE_SIMPLE32
*) sglp
;
1367 MPI_pSGE_SET_FLAGS(se1
,
1368 (MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
|
1369 MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_END_OF_LIST
));
1370 se1
->FlagsLength
= htole32(se1
->FlagsLength
);
1375 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_64_BIT_ADDRESSING
;
1377 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1378 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1381 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1382 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1386 if (!(ccb
->ccb_h
.flags
& (CAM_SG_LIST_PHYS
|CAM_DATA_PHYS
))) {
1387 bus_dmasync_op_t op
;
1389 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1390 op
= BUS_DMASYNC_PREREAD
;
1392 op
= BUS_DMASYNC_PREWRITE
;
1395 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1396 op
= BUS_DMASYNC_PREWRITE
;
1398 op
= BUS_DMASYNC_PREREAD
;
1401 bus_dmamap_sync(mpt
->buffer_dmat
, req
->dmap
, op
);
1405 * Okay, fill in what we can at the end of the command frame.
1406 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1407 * the command frame.
1409 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1410 * SIMPLE64 pointers and start doing CHAIN64 entries after
1414 if (nseg
< MPT_NSGL_FIRST(mpt
)) {
1418 * Leave room for CHAIN element
1420 first_lim
= MPT_NSGL_FIRST(mpt
) - 1;
1423 se
= (SGE_SIMPLE64
*) sglp
;
1424 for (seg
= 0; seg
< first_lim
; seg
++, se
++, dm_segs
++) {
1426 memset(se
, 0, sizeof (*se
));
1427 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1428 se
->Address
.Low
= htole32(dm_segs
->ds_addr
& 0xffffffff);
1429 if (sizeof(bus_addr_t
) > 4) {
1430 addr
= ((uint64_t)dm_segs
->ds_addr
) >> 32;
1431 /* SAS1078 36GB limitation WAR */
1432 if (mpt
->is_1078
&& (((uint64_t)dm_segs
->ds_addr
+
1433 MPI_SGE_LENGTH(se
->FlagsLength
)) >> 32) == 9) {
1435 tf
|= MPI_SGE_FLAGS_LOCAL_ADDRESS
;
1437 se
->Address
.High
= htole32(addr
);
1439 if (seg
== first_lim
- 1) {
1440 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1442 if (seg
== nseg
- 1) {
1443 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1444 MPI_SGE_FLAGS_END_OF_BUFFER
;
1446 MPI_pSGE_SET_FLAGS(se
, tf
);
1447 se
->FlagsLength
= htole32(se
->FlagsLength
);
1455 * Tell the IOC where to find the first chain element.
1457 hdrp
->ChainOffset
= ((char *)se
- (char *)hdrp
) >> 2;
1458 nxt_off
= MPT_RQSL(mpt
);
1462 * Make up the rest of the data segments out of a chain element
1463 * (contained in the current request frame) which points to
1464 * SIMPLE64 elements in the next request frame, possibly ending
1465 * with *another* chain element (if there's more).
1467 while (seg
< nseg
) {
1469 * Point to the chain descriptor. Note that the chain
1470 * descriptor is at the end of the *previous* list (whether
1473 ce
= (SGE_CHAIN64
*) se
;
1476 * Before we change our current pointer, make sure we won't
1477 * overflow the request area with this frame. Note that we
1478 * test against 'greater than' here as it's okay in this case
1479 * to have next offset be just outside the request area.
1481 if ((nxt_off
+ MPT_RQSL(mpt
)) > MPT_REQUEST_AREA
) {
1482 nxt_off
= MPT_REQUEST_AREA
;
1487 * Set our SGE element pointer to the beginning of the chain
1488 * list and update our next chain list offset.
1490 se
= (SGE_SIMPLE64
*) &mpt_off
[nxt_off
];
1492 nxt_off
+= MPT_RQSL(mpt
);
1495 * Now initialize the chain descriptor.
1497 memset(ce
, 0, sizeof (*ce
));
1500 * Get the physical address of the chain list.
1502 chain_list_addr
= trq
->req_pbuf
;
1503 chain_list_addr
+= cur_off
;
1504 if (sizeof (bus_addr_t
) > 4) {
1506 htole32(((uint64_t)chain_list_addr
) >> 32);
1508 ce
->Address
.Low
= htole32(chain_list_addr
& 0xffffffff);
1509 ce
->Flags
= MPI_SGE_FLAGS_CHAIN_ELEMENT
|
1510 MPI_SGE_FLAGS_64_BIT_ADDRESSING
;
1513 * If we have more than a frame's worth of segments left,
1514 * set up the chain list to have the last element be another
1517 if ((nseg
- seg
) > MPT_NSGL(mpt
)) {
1518 this_seg_lim
= seg
+ MPT_NSGL(mpt
) - 1;
1520 * The length of the chain is the length in bytes of the
1521 * number of segments plus the next chain element.
1523 * The next chain descriptor offset is the length,
1524 * in words, of the number of segments.
1526 ce
->Length
= (this_seg_lim
- seg
) *
1527 sizeof (SGE_SIMPLE64
);
1528 ce
->NextChainOffset
= ce
->Length
>> 2;
1529 ce
->Length
+= sizeof (SGE_CHAIN64
);
1531 this_seg_lim
= nseg
;
1532 ce
->Length
= (this_seg_lim
- seg
) *
1533 sizeof (SGE_SIMPLE64
);
1535 ce
->Length
= htole16(ce
->Length
);
1538 * Fill in the chain list SGE elements with our segment data.
1540 * If we're the last element in this chain list, set the last
1541 * element flag. If we're the completely last element period,
1542 * set the end of list and end of buffer flags.
1544 while (seg
< this_seg_lim
) {
1546 memset(se
, 0, sizeof (*se
));
1547 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1548 se
->Address
.Low
= htole32(dm_segs
->ds_addr
&
1550 if (sizeof (bus_addr_t
) > 4) {
1551 addr
= ((uint64_t)dm_segs
->ds_addr
) >> 32;
1552 /* SAS1078 36GB limitation WAR */
1554 (((uint64_t)dm_segs
->ds_addr
+
1555 MPI_SGE_LENGTH(se
->FlagsLength
)) >>
1558 tf
|= MPI_SGE_FLAGS_LOCAL_ADDRESS
;
1560 se
->Address
.High
= htole32(addr
);
1562 if (seg
== this_seg_lim
- 1) {
1563 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1565 if (seg
== nseg
- 1) {
1566 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1567 MPI_SGE_FLAGS_END_OF_BUFFER
;
1569 MPI_pSGE_SET_FLAGS(se
, tf
);
1570 se
->FlagsLength
= htole32(se
->FlagsLength
);
1578 * If we have more segments to do and we've used up all of
1579 * the space in a request area, go allocate another one
1580 * and chain to that.
1582 if (seg
< nseg
&& nxt_off
>= MPT_REQUEST_AREA
) {
1585 nrq
= mpt_get_request(mpt
, FALSE
);
1593 * Append the new request area on the tail of our list.
1595 if ((trq
= req
->chain
) == NULL
) {
1598 while (trq
->chain
!= NULL
) {
1604 mpt_off
= trq
->req_vbuf
;
1605 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1606 memset(mpt_off
, 0xff, MPT_REQUEST_AREA
);
1614 * Last time we need to check if this CCB needs to be aborted.
1616 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_INPROG
) {
1617 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1618 request_t
*cmd_req
=
1619 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1620 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
1621 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
1622 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
1625 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1626 ccb
->ccb_h
.status
& CAM_STATUS_MASK
);
1627 if (nseg
&& (ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
1628 bus_dmamap_unload(mpt
->buffer_dmat
, req
->dmap
);
1630 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1631 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d", __LINE__
));
1633 mpt_free_request(mpt
, req
);
1637 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
1638 if (ccb
->ccb_h
.timeout
!= CAM_TIME_INFINITY
) {
1639 mpt_req_timeout(req
, (ccb
->ccb_h
.timeout
* hz
) / 1000,
1642 if (mpt
->verbose
> MPT_PRT_DEBUG
) {
1644 mpt_print_request(req
->req_vbuf
);
1645 for (trq
= req
->chain
; trq
; trq
= trq
->chain
) {
1646 kprintf(" Additional Chain Area %d\n", nc
++);
1647 mpt_dump_sgl(trq
->req_vbuf
, 0);
1651 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1652 request_t
*cmd_req
= MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1653 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
1654 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1655 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) &&
1656 csio
->scsi_status
== SCSI_STATUS_OK
&& tgt
->resid
== 0) {
1657 tgt
->state
= TGT_STATE_MOVING_DATA_AND_STATUS
;
1659 tgt
->state
= TGT_STATE_MOVING_DATA
;
1662 tgt
->state
= TGT_STATE_MOVING_DATA
;
1665 mpt_send_cmd(mpt
, req
);
1669 mpt_execute_req(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1671 request_t
*req
, *trq
;
1674 struct mpt_softc
*mpt
;
1676 uint32_t flags
, nxt_off
;
1678 MSG_REQUEST_HEADER
*hdrp
;
1683 req
= (request_t
*)arg
;
1686 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
1687 req
= ccb
->ccb_h
.ccb_req_ptr
;
1689 hdrp
= req
->req_vbuf
;
1690 mpt_off
= req
->req_vbuf
;
1693 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1698 switch (hdrp
->Function
) {
1699 case MPI_FUNCTION_SCSI_IO_REQUEST
:
1700 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
:
1701 sglp
= &((PTR_MSG_SCSI_IO_REQUEST
)hdrp
)->SGL
;
1703 case MPI_FUNCTION_TARGET_ASSIST
:
1705 sglp
= &((PTR_MSG_TARGET_ASSIST_REQUEST
)hdrp
)->SGL
;
1708 mpt_prt(mpt
, "bad fct 0x%x in mpt_execute_req\n",
1715 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1717 mpt_prt(mpt
, "segment count %d too large (max %u)\n",
1718 nseg
, mpt
->max_seg_cnt
);
1723 if (error
!= EFBIG
&& error
!= ENOMEM
) {
1724 mpt_prt(mpt
, "mpt_execute_req: err %d\n", error
);
1726 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
) {
1728 mpt_freeze_ccb(ccb
);
1729 if (error
== EFBIG
) {
1730 status
= CAM_REQ_TOO_BIG
;
1731 } else if (error
== ENOMEM
) {
1732 if (mpt
->outofbeer
== 0) {
1734 xpt_freeze_simq(mpt
->sim
, 1);
1735 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1738 status
= CAM_REQUEUE_REQ
;
1740 status
= CAM_REQ_CMP_ERR
;
1742 mpt_set_ccb_status(ccb
, status
);
1744 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1745 request_t
*cmd_req
=
1746 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1747 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
1748 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
1749 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
1751 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1752 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d", __LINE__
));
1754 mpt_free_request(mpt
, req
);
1759 * No data to transfer?
1760 * Just make a single simple SGL with zero length.
1763 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1764 int tidx
= ((char *)sglp
) - mpt_off
;
1765 memset(&mpt_off
[tidx
], 0xff, MPT_REQUEST_AREA
- tidx
);
1769 SGE_SIMPLE32
*se1
= (SGE_SIMPLE32
*) sglp
;
1770 MPI_pSGE_SET_FLAGS(se1
,
1771 (MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
|
1772 MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_END_OF_LIST
));
1773 se1
->FlagsLength
= htole32(se1
->FlagsLength
);
1778 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
;
1780 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1781 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1784 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1785 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1789 if (!(ccb
->ccb_h
.flags
& (CAM_SG_LIST_PHYS
|CAM_DATA_PHYS
))) {
1790 bus_dmasync_op_t op
;
1792 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1793 op
= BUS_DMASYNC_PREREAD
;
1795 op
= BUS_DMASYNC_PREWRITE
;
1798 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1799 op
= BUS_DMASYNC_PREWRITE
;
1801 op
= BUS_DMASYNC_PREREAD
;
1804 bus_dmamap_sync(mpt
->buffer_dmat
, req
->dmap
, op
);
1808 * Okay, fill in what we can at the end of the command frame.
1809 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1810 * the command frame.
1812 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1813 * SIMPLE32 pointers and start doing CHAIN32 entries after
1817 if (nseg
< MPT_NSGL_FIRST(mpt
)) {
1821 * Leave room for CHAIN element
1823 first_lim
= MPT_NSGL_FIRST(mpt
) - 1;
1826 se
= (SGE_SIMPLE32
*) sglp
;
1827 for (seg
= 0; seg
< first_lim
; seg
++, se
++, dm_segs
++) {
1830 memset(se
, 0,sizeof (*se
));
1831 se
->Address
= htole32(dm_segs
->ds_addr
);
1833 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1835 if (seg
== first_lim
- 1) {
1836 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1838 if (seg
== nseg
- 1) {
1839 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1840 MPI_SGE_FLAGS_END_OF_BUFFER
;
1842 MPI_pSGE_SET_FLAGS(se
, tf
);
1843 se
->FlagsLength
= htole32(se
->FlagsLength
);
1851 * Tell the IOC where to find the first chain element.
1853 hdrp
->ChainOffset
= ((char *)se
- (char *)hdrp
) >> 2;
1854 nxt_off
= MPT_RQSL(mpt
);
1858 * Make up the rest of the data segments out of a chain element
1859 * (contained in the current request frame) which points to
1860 * SIMPLE32 elements in the next request frame, possibly ending
1861 * with *another* chain element (if there's more).
1863 while (seg
< nseg
) {
1865 uint32_t tf
, cur_off
;
1866 bus_addr_t chain_list_addr
;
1869 * Point to the chain descriptor. Note that the chain
1870 * descriptor is at the end of the *previous* list (whether
1873 ce
= (SGE_CHAIN32
*) se
;
1876 * Before we change our current pointer, make sure we won't
1877 * overflow the request area with this frame. Note that we
1878 * test against 'greater than' here as it's okay in this case
1879 * to have next offset be just outside the request area.
1881 if ((nxt_off
+ MPT_RQSL(mpt
)) > MPT_REQUEST_AREA
) {
1882 nxt_off
= MPT_REQUEST_AREA
;
1887 * Set our SGE element pointer to the beginning of the chain
1888 * list and update our next chain list offset.
1890 se
= (SGE_SIMPLE32
*) &mpt_off
[nxt_off
];
1892 nxt_off
+= MPT_RQSL(mpt
);
1895 * Now initialize the chain descriptor.
1897 memset(ce
, 0, sizeof (*ce
));
1900 * Get the physical address of the chain list.
1902 chain_list_addr
= trq
->req_pbuf
;
1903 chain_list_addr
+= cur_off
;
1907 ce
->Address
= htole32(chain_list_addr
);
1908 ce
->Flags
= MPI_SGE_FLAGS_CHAIN_ELEMENT
;
1912 * If we have more than a frame's worth of segments left,
1913 * set up the chain list to have the last element be another
1916 if ((nseg
- seg
) > MPT_NSGL(mpt
)) {
1917 this_seg_lim
= seg
+ MPT_NSGL(mpt
) - 1;
1919 * The length of the chain is the length in bytes of the
1920 * number of segments plus the next chain element.
1922 * The next chain descriptor offset is the length,
1923 * in words, of the number of segments.
1925 ce
->Length
= (this_seg_lim
- seg
) *
1926 sizeof (SGE_SIMPLE32
);
1927 ce
->NextChainOffset
= ce
->Length
>> 2;
1928 ce
->Length
+= sizeof (SGE_CHAIN32
);
1930 this_seg_lim
= nseg
;
1931 ce
->Length
= (this_seg_lim
- seg
) *
1932 sizeof (SGE_SIMPLE32
);
1934 ce
->Length
= htole16(ce
->Length
);
1937 * Fill in the chain list SGE elements with our segment data.
1939 * If we're the last element in this chain list, set the last
1940 * element flag. If we're the completely last element period,
1941 * set the end of list and end of buffer flags.
1943 while (seg
< this_seg_lim
) {
1944 memset(se
, 0, sizeof (*se
));
1945 se
->Address
= htole32(dm_segs
->ds_addr
);
1947 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1949 if (seg
== this_seg_lim
- 1) {
1950 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1952 if (seg
== nseg
- 1) {
1953 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1954 MPI_SGE_FLAGS_END_OF_BUFFER
;
1956 MPI_pSGE_SET_FLAGS(se
, tf
);
1957 se
->FlagsLength
= htole32(se
->FlagsLength
);
1965 * If we have more segments to do and we've used up all of
1966 * the space in a request area, go allocate another one
1967 * and chain to that.
1969 if (seg
< nseg
&& nxt_off
>= MPT_REQUEST_AREA
) {
1972 nrq
= mpt_get_request(mpt
, FALSE
);
1980 * Append the new request area on the tail of our list.
1982 if ((trq
= req
->chain
) == NULL
) {
1985 while (trq
->chain
!= NULL
) {
1991 mpt_off
= trq
->req_vbuf
;
1992 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1993 memset(mpt_off
, 0xff, MPT_REQUEST_AREA
);
2001 * Last time we need to check if this CCB needs to be aborted.
2003 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_INPROG
) {
2004 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
2005 request_t
*cmd_req
=
2006 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
2007 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
2008 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
2009 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
2012 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2013 ccb
->ccb_h
.status
& CAM_STATUS_MASK
);
2014 if (nseg
&& (ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
2015 bus_dmamap_unload(mpt
->buffer_dmat
, req
->dmap
);
2017 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2018 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d", __LINE__
));
2020 mpt_free_request(mpt
, req
);
2024 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
2025 if (ccb
->ccb_h
.timeout
!= CAM_TIME_INFINITY
) {
2026 mpt_req_timeout(req
, (ccb
->ccb_h
.timeout
* hz
) / 1000,
2029 if (mpt
->verbose
> MPT_PRT_DEBUG
) {
2031 mpt_print_request(req
->req_vbuf
);
2032 for (trq
= req
->chain
; trq
; trq
= trq
->chain
) {
2033 kprintf(" Additional Chain Area %d\n", nc
++);
2034 mpt_dump_sgl(trq
->req_vbuf
, 0);
2038 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
2039 request_t
*cmd_req
= MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
2040 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
2041 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2042 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) &&
2043 csio
->scsi_status
== SCSI_STATUS_OK
&& tgt
->resid
== 0) {
2044 tgt
->state
= TGT_STATE_MOVING_DATA_AND_STATUS
;
2046 tgt
->state
= TGT_STATE_MOVING_DATA
;
2049 tgt
->state
= TGT_STATE_MOVING_DATA
;
2052 mpt_send_cmd(mpt
, req
);
2056 mpt_start(struct cam_sim
*sim
, union ccb
*ccb
)
2059 struct mpt_softc
*mpt
;
2060 MSG_SCSI_IO_REQUEST
*mpt_req
;
2061 struct ccb_scsiio
*csio
= &ccb
->csio
;
2062 struct ccb_hdr
*ccbh
= &ccb
->ccb_h
;
2063 bus_dmamap_callback_t
*cb
;
2067 /* Get the pointer for the physical addapter */
2068 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
2069 raid_passthru
= (sim
== mpt
->phydisk_sim
);
2071 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
2072 if (mpt
->outofbeer
== 0) {
2074 xpt_freeze_simq(mpt
->sim
, 1);
2075 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FREEZEQ\n");
2077 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2078 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
2083 mpt_req_not_spcl(mpt
, req
, "mpt_start", __LINE__
);
2086 if (sizeof (bus_addr_t
) > 4) {
2087 cb
= mpt_execute_req_a64
;
2089 cb
= mpt_execute_req
;
2093 * Link the ccb and the request structure so we can find
2094 * the other knowing either the request or the ccb
2097 ccb
->ccb_h
.ccb_req_ptr
= req
;
2099 /* Now we build the command for the IOC */
2100 mpt_req
= req
->req_vbuf
;
2101 memset(mpt_req
, 0, sizeof (MSG_SCSI_IO_REQUEST
));
2103 mpt_req
->Function
= MPI_FUNCTION_SCSI_IO_REQUEST
;
2104 if (raid_passthru
) {
2105 mpt_req
->Function
= MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
;
2106 if (mpt_map_physdisk(mpt
, ccb
, &tgt
) != 0) {
2107 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2108 mpt_set_ccb_status(ccb
, CAM_DEV_NOT_THERE
);
2112 mpt_req
->Bus
= 0; /* we never set bus here */
2114 tgt
= ccb
->ccb_h
.target_id
;
2115 mpt_req
->Bus
= 0; /* XXX */
2118 mpt_req
->SenseBufferLength
=
2119 (csio
->sense_len
< MPT_SENSE_SIZE
) ?
2120 csio
->sense_len
: MPT_SENSE_SIZE
;
2123 * We use the message context to find the request structure when we
2124 * Get the command completion interrupt from the IOC.
2126 mpt_req
->MsgContext
= htole32(req
->index
| scsi_io_handler_id
);
2128 /* Which physical device to do the I/O on */
2129 mpt_req
->TargetID
= tgt
;
2131 /* We assume a single level LUN type */
2132 if (ccb
->ccb_h
.target_lun
>= MPT_MAX_LUNS
) {
2133 mpt_req
->LUN
[0] = 0x40 | ((ccb
->ccb_h
.target_lun
>> 8) & 0x3f);
2134 mpt_req
->LUN
[1] = ccb
->ccb_h
.target_lun
& 0xff;
2136 mpt_req
->LUN
[1] = ccb
->ccb_h
.target_lun
;
2139 /* Set the direction of the transfer */
2140 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
2141 mpt_req
->Control
= MPI_SCSIIO_CONTROL_READ
;
2142 } else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
2143 mpt_req
->Control
= MPI_SCSIIO_CONTROL_WRITE
;
2145 mpt_req
->Control
= MPI_SCSIIO_CONTROL_NODATATRANSFER
;
2148 if ((ccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
) != 0) {
2149 switch(ccb
->csio
.tag_action
) {
2150 case MSG_HEAD_OF_Q_TAG
:
2151 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_HEADOFQ
;
2154 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_ACAQ
;
2156 case MSG_ORDERED_Q_TAG
:
2157 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_ORDEREDQ
;
2159 case MSG_SIMPLE_Q_TAG
:
2161 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_SIMPLEQ
;
2165 if (mpt
->is_fc
|| mpt
->is_sas
) {
2166 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_SIMPLEQ
;
2168 /* XXX No such thing for a target doing packetized. */
2169 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_UNTAGGED
;
2174 if (ccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) {
2175 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_NO_DISCONNECT
;
2178 mpt_req
->Control
= htole32(mpt_req
->Control
);
2180 /* Copy the scsi command block into place */
2181 if ((ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) != 0) {
2182 bcopy(csio
->cdb_io
.cdb_ptr
, mpt_req
->CDB
, csio
->cdb_len
);
2184 bcopy(csio
->cdb_io
.cdb_bytes
, mpt_req
->CDB
, csio
->cdb_len
);
2187 mpt_req
->CDBLength
= csio
->cdb_len
;
2188 mpt_req
->DataLength
= htole32(csio
->dxfer_len
);
2189 mpt_req
->SenseBufferLowAddr
= htole32(req
->sense_pbuf
);
2192 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2194 if (mpt
->verbose
== MPT_PRT_DEBUG
) {
2196 mpt_prt(mpt
, "mpt_start: %s op 0x%x ",
2197 (mpt_req
->Function
== MPI_FUNCTION_SCSI_IO_REQUEST
)?
2198 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req
->CDB
[0]);
2199 df
= mpt_req
->Control
& MPI_SCSIIO_CONTROL_DATADIRECTION_MASK
;
2200 if (df
!= MPI_SCSIIO_CONTROL_NODATATRANSFER
) {
2201 mpt_prtc(mpt
, "(%s %u byte%s ",
2202 (df
== MPI_SCSIIO_CONTROL_READ
)?
2203 "read" : "write", csio
->dxfer_len
,
2204 (csio
->dxfer_len
== 1)? ")" : "s)");
2206 mpt_prtc(mpt
, "tgt %u lun %u req %p:%u\n", tgt
,
2207 ccb
->ccb_h
.target_lun
, req
, req
->serno
);
2211 * If we have any data to send with this command map it into bus space.
2213 if ((ccbh
->flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
2214 if ((ccbh
->flags
& CAM_SCATTER_VALID
) == 0) {
2216 * We've been given a pointer to a single buffer.
2218 if ((ccbh
->flags
& CAM_DATA_PHYS
) == 0) {
2220 * Virtual address that needs to translated into
2221 * one or more physical address ranges.
2225 error
= bus_dmamap_load(mpt
->buffer_dmat
,
2226 req
->dmap
, csio
->data_ptr
, csio
->dxfer_len
,
2229 if (error
== EINPROGRESS
) {
2231 * So as to maintain ordering,
2232 * freeze the controller queue
2233 * until our mapping is
2236 xpt_freeze_simq(mpt
->sim
, 1);
2237 ccbh
->status
|= CAM_RELEASE_SIMQ
;
2241 * We have been given a pointer to single
2244 struct bus_dma_segment seg
;
2246 (bus_addr_t
)(vm_offset_t
)csio
->data_ptr
;
2247 seg
.ds_len
= csio
->dxfer_len
;
2248 (*cb
)(req
, &seg
, 1, 0);
2252 * We have been given a list of addresses.
2253 * This case could be easily supported but they are not
2254 * currently generated by the CAM subsystem so there
2255 * is no point in wasting the time right now.
2257 struct bus_dma_segment
*segs
;
2258 if ((ccbh
->flags
& CAM_SG_LIST_PHYS
) == 0) {
2259 (*cb
)(req
, NULL
, 0, EFAULT
);
2261 /* Just use the segments provided */
2262 segs
= (struct bus_dma_segment
*)csio
->data_ptr
;
2263 (*cb
)(req
, segs
, csio
->sglist_cnt
, 0);
2267 (*cb
)(req
, NULL
, 0, 0);
2272 mpt_bus_reset(struct mpt_softc
*mpt
, target_id_t tgt
, lun_id_t lun
,
2279 error
= mpt_scsi_send_tmf(mpt
,
2280 (tgt
!= CAM_TARGET_WILDCARD
|| lun
!= CAM_LUN_WILDCARD
) ?
2281 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
2282 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS
,
2283 mpt
->is_fc
? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION
: 0,
2284 0, /* XXX How do I get the channel ID? */
2285 tgt
!= CAM_TARGET_WILDCARD
? tgt
: 0,
2286 lun
!= CAM_LUN_WILDCARD
? lun
: 0,
2291 * mpt_scsi_send_tmf hard resets on failure, so no
2292 * need to do so here.
2295 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error
);
2299 /* Wait for bus reset to be processed by the IOC. */
2300 error
= mpt_wait_req(mpt
, mpt
->tmf_req
, REQ_STATE_DONE
,
2301 REQ_STATE_DONE
, sleep_ok
, 5000);
2303 status
= le16toh(mpt
->tmf_req
->IOCStatus
);
2304 response
= mpt
->tmf_req
->ResponseCode
;
2305 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
2308 mpt_prt(mpt
, "mpt_bus_reset: Reset timed-out. "
2309 "Resetting controller.\n");
2310 mpt_reset(mpt
, TRUE
);
2314 if ((status
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
2315 mpt_prt(mpt
, "mpt_bus_reset: TMF IOC Status 0x%x. "
2316 "Resetting controller.\n", status
);
2317 mpt_reset(mpt
, TRUE
);
2321 if (response
!= MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED
&&
2322 response
!= MPI_SCSITASKMGMT_RSP_TM_COMPLETE
) {
2323 mpt_prt(mpt
, "mpt_bus_reset: TMF Response 0x%x. "
2324 "Resetting controller.\n", response
);
2325 mpt_reset(mpt
, TRUE
);
2332 mpt_fc_reset_link(struct mpt_softc
*mpt
, int dowait
)
2336 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc
;
2338 req
= mpt_get_request(mpt
, FALSE
);
2343 memset(fc
, 0, sizeof(*fc
));
2344 fc
->SendFlags
= MPI_FC_PRIM_SEND_FLAGS_RESET_LINK
;
2345 fc
->Function
= MPI_FUNCTION_FC_PRIMITIVE_SEND
;
2346 fc
->MsgContext
= htole32(req
->index
| fc_els_handler_id
);
2347 mpt_send_cmd(mpt
, req
);
2349 r
= mpt_wait_req(mpt
, req
, REQ_STATE_DONE
,
2350 REQ_STATE_DONE
, FALSE
, 60 * 1000);
2352 mpt_free_request(mpt
, req
);
2359 mpt_cam_rescan_callback(struct cam_periph
*periph
, union ccb
*ccb
)
2361 xpt_free_path(ccb
->ccb_h
.path
);
2362 xpt_free_ccb(&ccb
->ccb_h
);
2366 mpt_cam_event(struct mpt_softc
*mpt
, request_t
*req
,
2367 MSG_EVENT_NOTIFY_REPLY
*msg
)
2369 uint32_t data0
, data1
;
2371 data0
= le32toh(msg
->Data
[0]);
2372 data1
= le32toh(msg
->Data
[1]);
2373 switch(msg
->Event
& 0xFF) {
2374 case MPI_EVENT_UNIT_ATTENTION
:
2375 mpt_prt(mpt
, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2376 (data0
>> 8) & 0xff, data0
& 0xff);
2379 case MPI_EVENT_IOC_BUS_RESET
:
2380 /* We generated a bus reset */
2381 mpt_prt(mpt
, "IOC Generated Bus Reset Port: %d\n",
2382 (data0
>> 8) & 0xff);
2383 xpt_async(AC_BUS_RESET
, mpt
->path
, NULL
);
2386 case MPI_EVENT_EXT_BUS_RESET
:
2387 /* Someone else generated a bus reset */
2388 mpt_prt(mpt
, "External Bus Reset Detected\n");
2390 * These replies don't return EventData like the MPI
2393 xpt_async(AC_BUS_RESET
, mpt
->path
, NULL
);
2396 case MPI_EVENT_RESCAN
:
2401 * In general this means a device has been added to the loop.
2403 mpt_prt(mpt
, "Rescan Port: %d\n", (data0
>> 8) & 0xff);
2404 if (mpt
->ready
== 0) {
2407 if (mpt
->phydisk_sim
) {
2408 pathid
= cam_sim_path(mpt
->phydisk_sim
);
2410 pathid
= cam_sim_path(mpt
->sim
);
2413 * Allocate a CCB, create a wildcard path for this bus,
2414 * and schedule a rescan.
2416 ccb
= xpt_alloc_ccb();
2418 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
, pathid
,
2419 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2420 mpt_prt(mpt
, "unable to create path for rescan\n");
2421 xpt_free_ccb(&ccb
->ccb_h
);
2425 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
, /*lowpri*/5);
2426 ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
2427 ccb
->ccb_h
.cbfcnp
= mpt_cam_rescan_callback
;
2428 ccb
->crcn
.flags
= CAM_FLAG_NONE
;
2430 /* scan is now in progress */
2434 case MPI_EVENT_LINK_STATUS_CHANGE
:
2435 mpt_prt(mpt
, "Port %d: LinkState: %s\n",
2436 (data1
>> 8) & 0xff,
2437 ((data0
& 0xff) == 0)? "Failed" : "Active");
2440 case MPI_EVENT_LOOP_STATE_CHANGE
:
2441 switch ((data0
>> 16) & 0xff) {
2444 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2445 "(Loop Initialization)\n",
2446 (data1
>> 8) & 0xff,
2447 (data0
>> 8) & 0xff,
2449 switch ((data0
>> 8) & 0xff) {
2451 if ((data0
& 0xff) == 0xF7) {
2452 mpt_prt(mpt
, "Device needs AL_PA\n");
2454 mpt_prt(mpt
, "Device %02x doesn't like "
2460 if ((data0
& 0xff) == 0xF7) {
2461 mpt_prt(mpt
, "Device had loop failure "
2462 "at its receiver prior to acquiring"
2465 mpt_prt(mpt
, "Device %02x detected loop"
2466 " failure at its receiver\n",
2471 mpt_prt(mpt
, "Device %02x requests that device "
2472 "%02x reset itself\n",
2474 (data0
>> 8) & 0xFF);
2479 mpt_prt(mpt
, "Port 0x%x: FC LinkEvent: "
2480 "LPE(%02x,%02x) (Loop Port Enable)\n",
2481 (data1
>> 8) & 0xff, /* Port */
2482 (data0
>> 8) & 0xff, /* Character 3 */
2483 (data0
) & 0xff /* Character 4 */);
2486 mpt_prt(mpt
, "Port 0x%x: FC LinkEvent: "
2487 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2488 (data1
>> 8) & 0xff, /* Port */
2489 (data0
>> 8) & 0xff, /* Character 3 */
2490 (data0
) & 0xff /* Character 4 */);
2493 mpt_prt(mpt
, "Port 0x%x: FC LinkEvent: Unknown "
2494 "FC event (%02x %02x %02x)\n",
2495 (data1
>> 8) & 0xff, /* Port */
2496 (data0
>> 16) & 0xff, /* Event */
2497 (data0
>> 8) & 0xff, /* Character 3 */
2498 (data0
) & 0xff /* Character 4 */);
2502 case MPI_EVENT_LOGOUT
:
2503 mpt_prt(mpt
, "FC Logout Port: %d N_PortID: %02x\n",
2504 (data1
>> 8) & 0xff, data0
);
2506 case MPI_EVENT_QUEUE_FULL
:
2508 struct cam_sim
*sim
;
2509 struct cam_path
*tmppath
;
2510 struct ccb_relsim
*crs
;
2511 PTR_EVENT_DATA_QUEUE_FULL pqf
;
2514 pqf
= (PTR_EVENT_DATA_QUEUE_FULL
)msg
->Data
;
2515 pqf
->CurrentDepth
= le16toh(pqf
->CurrentDepth
);
2516 mpt_prt(mpt
, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2517 "%d\n", pqf
->Bus
, pqf
->TargetID
, pqf
->CurrentDepth
);
2518 if (mpt
->phydisk_sim
&& mpt_is_raid_member(mpt
,
2519 pqf
->TargetID
) != 0) {
2520 sim
= mpt
->phydisk_sim
;
2524 for (lun_id
= 0; lun_id
< MPT_MAX_LUNS
; lun_id
++) {
2525 if (xpt_create_path(&tmppath
, NULL
, cam_sim_path(sim
),
2526 pqf
->TargetID
, lun_id
) != CAM_REQ_CMP
) {
2527 mpt_prt(mpt
, "unable to create a path to send "
2531 crs
= &xpt_alloc_ccb()->crs
;
2532 xpt_setup_ccb(&crs
->ccb_h
, tmppath
, 5);
2533 crs
->ccb_h
.func_code
= XPT_REL_SIMQ
;
2534 crs
->ccb_h
.flags
= CAM_DEV_QFREEZE
;
2535 crs
->release_flags
= RELSIM_ADJUST_OPENINGS
;
2536 crs
->openings
= pqf
->CurrentDepth
- 1;
2537 xpt_action((union ccb
*)crs
);
2538 if (crs
->ccb_h
.status
!= CAM_REQ_CMP
) {
2539 mpt_prt(mpt
, "XPT_REL_SIMQ failed\n");
2541 xpt_free_path(tmppath
);
2542 xpt_free_ccb(&crs
->ccb_h
);
2546 case MPI_EVENT_IR_RESYNC_UPDATE
:
2547 mpt_prt(mpt
, "IR resync update %d completed\n",
2548 (data0
>> 16) & 0xff);
2550 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE
:
2553 struct cam_sim
*sim
;
2554 struct cam_path
*tmppath
;
2555 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc
;
2557 psdsc
= (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
)msg
->Data
;
2558 if (mpt
->phydisk_sim
&& mpt_is_raid_member(mpt
,
2559 psdsc
->TargetID
) != 0)
2560 sim
= mpt
->phydisk_sim
;
2563 switch(psdsc
->ReasonCode
) {
2564 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED
:
2565 ccb
= xpt_alloc_ccb();
2566 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
,
2567 cam_sim_path(sim
), psdsc
->TargetID
,
2568 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2570 "unable to create path for rescan\n");
2571 xpt_free_ccb(&ccb
->ccb_h
);
2574 xpt_setup_ccb(&ccb
->ccb_h
, ccb
->ccb_h
.path
, /*lopri*/5);
2575 ccb
->ccb_h
.func_code
= XPT_SCAN_BUS
;
2576 ccb
->ccb_h
.cbfcnp
= mpt_cam_rescan_callback
;
2577 ccb
->crcn
.flags
= CAM_FLAG_NONE
;
2579 /* scan now in progress */
2581 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING
:
2582 if (xpt_create_path(&tmppath
, NULL
, cam_sim_path(sim
),
2583 psdsc
->TargetID
, CAM_LUN_WILDCARD
) !=
2586 "unable to create path for async event");
2589 xpt_async(AC_LOST_DEVICE
, tmppath
, NULL
);
2590 xpt_free_path(tmppath
);
2592 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET
:
2593 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL
:
2594 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET
:
2597 mpt_lprt(mpt
, MPT_PRT_WARN
,
2598 "SAS device status change: Bus: 0x%02x TargetID: "
2599 "0x%02x ReasonCode: 0x%02x\n", psdsc
->Bus
,
2600 psdsc
->TargetID
, psdsc
->ReasonCode
);
2605 case MPI_EVENT_SAS_DISCOVERY_ERROR
:
2607 PTR_EVENT_DATA_DISCOVERY_ERROR pde
;
2609 pde
= (PTR_EVENT_DATA_DISCOVERY_ERROR
)msg
->Data
;
2610 pde
->DiscoveryStatus
= le32toh(pde
->DiscoveryStatus
);
2611 mpt_lprt(mpt
, MPT_PRT_WARN
,
2612 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2613 pde
->Port
, pde
->DiscoveryStatus
);
2616 case MPI_EVENT_EVENT_CHANGE
:
2617 case MPI_EVENT_INTEGRATED_RAID
:
2619 case MPI_EVENT_LOG_ENTRY_ADDED
:
2620 case MPI_EVENT_SAS_DISCOVERY
:
2621 case MPI_EVENT_SAS_PHY_LINK_STATUS
:
2622 case MPI_EVENT_SAS_SES
:
2625 mpt_lprt(mpt
, MPT_PRT_WARN
, "mpt_cam_event: 0x%x\n",
2633 * Reply path for all SCSI I/O requests, called from our
2634 * interrupt handler by extracting our handler index from
2635 * the MsgContext field of the reply from the IOC.
2637 * This routine is optimized for the common case of a
2638 * completion without error. All exception handling is
2639 * offloaded to non-inlined helper routines to minimize
2643 mpt_scsi_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
2644 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
2646 MSG_SCSI_IO_REQUEST
*scsi_req
;
2649 if (req
->state
== REQ_STATE_FREE
) {
2650 mpt_prt(mpt
, "mpt_scsi_reply_handler: req already free\n");
2654 scsi_req
= (MSG_SCSI_IO_REQUEST
*)req
->req_vbuf
;
2657 mpt_prt(mpt
, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2662 mpt_req_untimeout(req
, mpt_timeout
, ccb
);
2663 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2665 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
2666 bus_dmasync_op_t op
;
2668 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
2669 op
= BUS_DMASYNC_POSTREAD
;
2671 op
= BUS_DMASYNC_POSTWRITE
;
2672 bus_dmamap_sync(mpt
->buffer_dmat
, req
->dmap
, op
);
2673 bus_dmamap_unload(mpt
->buffer_dmat
, req
->dmap
);
2676 if (reply_frame
== NULL
) {
2678 * Context only reply, completion without error status.
2680 ccb
->csio
.resid
= 0;
2681 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
2682 ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
2684 mpt_scsi_reply_frame_handler(mpt
, req
, reply_frame
);
2687 if (mpt
->outofbeer
) {
2688 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
2690 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "THAWQ\n");
2692 if (scsi_req
->CDB
[0] == INQUIRY
&& (scsi_req
->CDB
[1] & SI_EVPD
) == 0) {
2693 struct scsi_inquiry_data
*iq
=
2694 (struct scsi_inquiry_data
*)ccb
->csio
.data_ptr
;
2695 if (scsi_req
->Function
==
2696 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
) {
2698 * Fake out the device type so that only the
2699 * pass-thru device will attach.
2701 iq
->device
&= ~0x1F;
2702 iq
->device
|= T_NODEVICE
;
2705 if (mpt
->verbose
== MPT_PRT_DEBUG
) {
2706 mpt_prt(mpt
, "mpt_scsi_reply_handler: %p:%u complete\n",
2709 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d", __LINE__
));
2711 if ((req
->state
& REQ_STATE_TIMEDOUT
) == 0) {
2712 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2714 mpt_prt(mpt
, "completing timedout/aborted req %p:%u\n",
2716 TAILQ_REMOVE(&mpt
->request_timeout_list
, req
, links
);
2718 KASSERT((req
->state
& REQ_STATE_NEED_WAKEUP
) == 0,
2719 ("CCB req needed wakeup"));
2721 mpt_req_not_spcl(mpt
, req
, "mpt_scsi_reply_handler", __LINE__
);
2723 mpt_free_request(mpt
, req
);
2728 mpt_scsi_tmf_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
2729 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
2731 MSG_SCSI_TASK_MGMT_REPLY
*tmf_reply
;
2733 KASSERT(req
== mpt
->tmf_req
, ("TMF Reply not using mpt->tmf_req"));
2735 mpt_req_not_spcl(mpt
, req
, "mpt_scsi_tmf_reply_handler", __LINE__
);
2737 tmf_reply
= (MSG_SCSI_TASK_MGMT_REPLY
*)reply_frame
;
2738 /* Record IOC Status and Response Code of TMF for any waiters. */
2739 req
->IOCStatus
= le16toh(tmf_reply
->IOCStatus
);
2740 req
->ResponseCode
= tmf_reply
->ResponseCode
;
2742 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "TMF complete: req %p:%u status 0x%x\n",
2743 req
, req
->serno
, le16toh(tmf_reply
->IOCStatus
));
2744 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2745 if ((req
->state
& REQ_STATE_NEED_WAKEUP
) != 0) {
2746 req
->state
|= REQ_STATE_DONE
;
2749 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
2755 * XXX: Move to definitions file
2773 mpt_fc_els_send_response(struct mpt_softc
*mpt
, request_t
*req
,
2774 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp
, U8 length
)
2777 MSG_LINK_SERVICE_RSP_REQUEST tmp
;
2778 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp
;
2781 * We are going to reuse the ELS request to send this response back.
2784 memset(rsp
, 0, sizeof(*rsp
));
2786 #ifdef USE_IMMEDIATE_LINK_DATA
2788 * Apparently the IMMEDIATE stuff doesn't seem to work.
2790 rsp
->RspFlags
= LINK_SERVICE_RSP_FLAGS_IMMEDIATE
;
2792 rsp
->RspLength
= length
;
2793 rsp
->Function
= MPI_FUNCTION_FC_LINK_SRVC_RSP
;
2794 rsp
->MsgContext
= htole32(req
->index
| fc_els_handler_id
);
2797 * Copy over information from the original reply frame to
2798 * it's correct place in the response.
2800 memcpy((U8
*)rsp
+ 0x0c, (U8
*)rp
+ 0x1c, 24);
2803 * And now copy back the temporary area to the original frame.
2805 memcpy(req
->req_vbuf
, rsp
, sizeof (MSG_LINK_SERVICE_RSP_REQUEST
));
2806 rsp
= req
->req_vbuf
;
2808 #ifdef USE_IMMEDIATE_LINK_DATA
2809 memcpy((U8
*)&rsp
->SGL
, &((U8
*)req
->req_vbuf
)[MPT_RQSL(mpt
)], length
);
2812 PTR_SGE_SIMPLE32 se
= (PTR_SGE_SIMPLE32
) &rsp
->SGL
;
2813 bus_addr_t paddr
= req
->req_pbuf
;
2814 paddr
+= MPT_RQSL(mpt
);
2817 MPI_SGE_FLAGS_HOST_TO_IOC
|
2818 MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
2819 MPI_SGE_FLAGS_LAST_ELEMENT
|
2820 MPI_SGE_FLAGS_END_OF_LIST
|
2821 MPI_SGE_FLAGS_END_OF_BUFFER
;
2822 fl
<<= MPI_SGE_FLAGS_SHIFT
;
2824 se
->FlagsLength
= htole32(fl
);
2825 se
->Address
= htole32((uint32_t) paddr
);
2832 mpt_send_cmd(mpt
, req
);
2836 mpt_fc_els_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
2837 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
2839 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp
=
2840 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY
) reply_frame
;
2844 U16 status
= le16toh(reply_frame
->IOCStatus
);
2847 int do_refresh
= TRUE
;
2850 KASSERT(mpt_req_on_free_list(mpt
, req
) == 0,
2851 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2852 req
, req
->serno
, rp
->Function
));
2853 if (rp
->Function
!= MPI_FUNCTION_FC_PRIMITIVE_SEND
) {
2854 mpt_req_spcl(mpt
, req
, "fc_els_reply_handler", __LINE__
);
2856 mpt_req_not_spcl(mpt
, req
, "fc_els_reply_handler", __LINE__
);
2859 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2860 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2861 req
, req
->serno
, reply_frame
, reply_frame
->Function
);
2863 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
2864 mpt_prt(mpt
, "ELS REPLY STATUS 0x%x for Function %x\n",
2865 status
, reply_frame
->Function
);
2866 if (status
== MPI_IOCSTATUS_INVALID_STATE
) {
2868 * XXX: to get around shutdown issue
2877 * If the function of a link service response, we recycle the
2878 * response to be a refresh for a new link service request.
2880 * The request pointer is bogus in this case and we have to fetch
2881 * it based upon the TransactionContext.
2883 if (rp
->Function
== MPI_FUNCTION_FC_LINK_SRVC_RSP
) {
2884 /* Freddie Uncle Charlie Katie */
2885 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2886 for (ioindex
= 0; ioindex
< mpt
->els_cmds_allocated
; ioindex
++)
2887 if (mpt
->els_cmd_ptrs
[ioindex
] == req
) {
2891 KASSERT(ioindex
< mpt
->els_cmds_allocated
,
2892 ("can't find my mommie!"));
2894 /* remove from active list as we're going to re-post it */
2895 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2896 req
->state
&= ~REQ_STATE_QUEUED
;
2897 req
->state
|= REQ_STATE_DONE
;
2898 mpt_fc_post_els(mpt
, req
, ioindex
);
2902 if (rp
->Function
== MPI_FUNCTION_FC_PRIMITIVE_SEND
) {
2903 /* remove from active list as we're done */
2904 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2905 req
->state
&= ~REQ_STATE_QUEUED
;
2906 req
->state
|= REQ_STATE_DONE
;
2907 if (req
->state
& REQ_STATE_TIMEDOUT
) {
2908 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2909 "Sync Primitive Send Completed After Timeout\n");
2910 mpt_free_request(mpt
, req
);
2911 } else if ((req
->state
& REQ_STATE_NEED_WAKEUP
) == 0) {
2912 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2913 "Async Primitive Send Complete\n");
2914 mpt_free_request(mpt
, req
);
2916 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2917 "Sync Primitive Send Complete- Waking Waiter\n");
2923 if (rp
->Function
!= MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
) {
2924 mpt_prt(mpt
, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2925 "Length %d Message Flags %x\n", rp
->Function
, rp
->Flags
,
2926 rp
->MsgLength
, rp
->MsgFlags
);
2930 if (rp
->MsgLength
<= 5) {
2932 * This is just a ack of an original ELS buffer post
2934 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2935 "RECV'd ACK of FC_ELS buf post %p:%u\n", req
, req
->serno
);
2940 rctl
= (le32toh(rp
->Rctl_Did
) & MPI_FC_RCTL_MASK
) >> MPI_FC_RCTL_SHIFT
;
2941 type
= (le32toh(rp
->Type_Fctl
) & MPI_FC_TYPE_MASK
) >> MPI_FC_TYPE_SHIFT
;
2943 elsbuf
= &((U32
*)req
->req_vbuf
)[MPT_RQSL(mpt
)/sizeof (U32
)];
2944 cmd
= be32toh(elsbuf
[0]) >> 24;
2946 if (rp
->Flags
& MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED
) {
2947 mpt_lprt(mpt
, MPT_PRT_ALWAYS
, "ELS_REPLY: response unneeded\n");
2951 ioindex
= le32toh(rp
->TransactionContext
);
2952 req
= mpt
->els_cmd_ptrs
[ioindex
];
2954 if (rctl
== ELS
&& type
== 1) {
2958 * Send back a PRLI ACC
2960 mpt_prt(mpt
, "PRLI from 0x%08x%08x\n",
2961 le32toh(rp
->Wwn
.PortNameHigh
),
2962 le32toh(rp
->Wwn
.PortNameLow
));
2963 elsbuf
[0] = htobe32(0x02100014);
2964 elsbuf
[1] |= htobe32(0x00000100);
2965 elsbuf
[4] = htobe32(0x00000002);
2966 if (mpt
->role
& MPT_ROLE_TARGET
)
2967 elsbuf
[4] |= htobe32(0x00000010);
2968 if (mpt
->role
& MPT_ROLE_INITIATOR
)
2969 elsbuf
[4] |= htobe32(0x00000020);
2970 /* remove from active list as we're done */
2971 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2972 req
->state
&= ~REQ_STATE_QUEUED
;
2973 req
->state
|= REQ_STATE_DONE
;
2974 mpt_fc_els_send_response(mpt
, req
, rp
, 20);
2978 memset(elsbuf
, 0, 5 * (sizeof (U32
)));
2979 elsbuf
[0] = htobe32(0x02100014);
2980 elsbuf
[1] = htobe32(0x08000100);
2981 mpt_prt(mpt
, "PRLO from 0x%08x%08x\n",
2982 le32toh(rp
->Wwn
.PortNameHigh
),
2983 le32toh(rp
->Wwn
.PortNameLow
));
2984 /* remove from active list as we're done */
2985 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2986 req
->state
&= ~REQ_STATE_QUEUED
;
2987 req
->state
|= REQ_STATE_DONE
;
2988 mpt_fc_els_send_response(mpt
, req
, rp
, 20);
2992 mpt_prt(mpt
, "ELS TYPE 1 COMMAND: %x\n", cmd
);
2995 } else if (rctl
== ABTS
&& type
== 0) {
2996 uint16_t rx_id
= le16toh(rp
->Rxid
);
2997 uint16_t ox_id
= le16toh(rp
->Oxid
);
2998 request_t
*tgt_req
= NULL
;
3001 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
3002 ox_id
, rx_id
, le32toh(rp
->Wwn
.PortNameHigh
),
3003 le32toh(rp
->Wwn
.PortNameLow
));
3004 if (rx_id
>= mpt
->mpt_max_tgtcmds
) {
3005 mpt_prt(mpt
, "Bad RX_ID 0x%x\n", rx_id
);
3006 } else if (mpt
->tgt_cmd_ptrs
== NULL
) {
3007 mpt_prt(mpt
, "No TGT CMD PTRS\n");
3009 tgt_req
= mpt
->tgt_cmd_ptrs
[rx_id
];
3012 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, tgt_req
);
3017 * Check to make sure we have the correct command
3018 * The reply descriptor in the target state should
3019 * should contain an IoIndex that should match the
3022 * It'd be nice to have OX_ID to crosscheck with
3025 ct_id
= GET_IO_INDEX(tgt
->reply_desc
);
3027 if (ct_id
!= rx_id
) {
3028 mpt_lprt(mpt
, MPT_PRT_ERROR
, "ABORT Mismatch: "
3029 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3037 "CCB (%p): lun %u flags %x status %x\n",
3038 ccb
, ccb
->ccb_h
.target_lun
,
3039 ccb
->ccb_h
.flags
, ccb
->ccb_h
.status
);
3041 mpt_prt(mpt
, "target state 0x%x resid %u xfrd %u rpwrd "
3042 "%x nxfers %x\n", tgt
->state
,
3043 tgt
->resid
, tgt
->bytes_xfered
, tgt
->reply_desc
,
3046 if (mpt_abort_target_cmd(mpt
, tgt_req
)) {
3047 mpt_prt(mpt
, "unable to start TargetAbort\n");
3050 mpt_prt(mpt
, "no back pointer for RX_ID 0x%x\n", rx_id
);
3052 memset(elsbuf
, 0, 5 * (sizeof (U32
)));
3053 elsbuf
[0] = htobe32(0);
3054 elsbuf
[1] = htobe32((ox_id
<< 16) | rx_id
);
3055 elsbuf
[2] = htobe32(0x000ffff);
3057 * Dork with the reply frame so that the response to it
3060 rp
->Rctl_Did
+= ((BA_ACC
- ABTS
) << MPI_FC_RCTL_SHIFT
);
3061 /* remove from active list as we're done */
3062 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
3063 req
->state
&= ~REQ_STATE_QUEUED
;
3064 req
->state
|= REQ_STATE_DONE
;
3065 mpt_fc_els_send_response(mpt
, req
, rp
, 12);
3068 mpt_prt(mpt
, "ELS: RCTL %x TYPE %x CMD %x\n", rctl
, type
, cmd
);
3070 if (do_refresh
== TRUE
) {
3071 /* remove from active list as we're done */
3072 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
3073 req
->state
&= ~REQ_STATE_QUEUED
;
3074 req
->state
|= REQ_STATE_DONE
;
3075 mpt_fc_post_els(mpt
, req
, ioindex
);
3081 * Clean up all SCSI Initiator personality state in response
3082 * to a controller reset.
3085 mpt_cam_ioc_reset(struct mpt_softc
*mpt
, int type
)
3089 * The pending list is already run down by
3090 * the generic handler. Perform the same
3091 * operation on the timed out request list.
3093 mpt_complete_request_chain(mpt
, &mpt
->request_timeout_list
,
3094 MPI_IOCSTATUS_INVALID_STATE
);
3097 * XXX: We need to repost ELS and Target Command Buffers?
3101 * Inform the XPT that a bus reset has occurred.
3103 xpt_async(AC_BUS_RESET
, mpt
->path
, NULL
);
3107 * Parse additional completion information in the reply
3108 * frame for SCSI I/O requests.
3111 mpt_scsi_reply_frame_handler(struct mpt_softc
*mpt
, request_t
*req
,
3112 MSG_DEFAULT_REPLY
*reply_frame
)
3115 MSG_SCSI_IO_REPLY
*scsi_io_reply
;
3119 MPT_DUMP_REPLY_FRAME(mpt
, reply_frame
);
3120 KASSERT(reply_frame
->Function
== MPI_FUNCTION_SCSI_IO_REQUEST
3121 || reply_frame
->Function
== MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
,
3122 ("MPT SCSI I/O Handler called with incorrect reply type"));
3123 KASSERT((reply_frame
->MsgFlags
& MPI_MSGFLAGS_CONTINUATION_REPLY
) == 0,
3124 ("MPT SCSI I/O Handler called with continuation reply"));
3126 scsi_io_reply
= (MSG_SCSI_IO_REPLY
*)reply_frame
;
3127 ioc_status
= le16toh(scsi_io_reply
->IOCStatus
);
3128 ioc_status
&= MPI_IOCSTATUS_MASK
;
3129 sstate
= scsi_io_reply
->SCSIState
;
3133 ccb
->csio
.dxfer_len
- le32toh(scsi_io_reply
->TransferCount
);
3135 if ((sstate
& MPI_SCSI_STATE_AUTOSENSE_VALID
) != 0
3136 && (ccb
->ccb_h
.flags
& (CAM_SENSE_PHYS
| CAM_SENSE_PTR
)) == 0) {
3137 uint32_t sense_returned
;
3139 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
3141 sense_returned
= le32toh(scsi_io_reply
->SenseCount
);
3142 if (sense_returned
< ccb
->csio
.sense_len
)
3143 ccb
->csio
.sense_resid
= ccb
->csio
.sense_len
-
3146 ccb
->csio
.sense_resid
= 0;
3148 bzero(&ccb
->csio
.sense_data
, sizeof(ccb
->csio
.sense_data
));
3149 bcopy(req
->sense_vbuf
, &ccb
->csio
.sense_data
,
3150 min(ccb
->csio
.sense_len
, sense_returned
));
3153 if ((sstate
& MPI_SCSI_STATE_QUEUE_TAG_REJECTED
) != 0) {
3155 * Tag messages rejected, but non-tagged retry
3158 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3162 switch(ioc_status
) {
3163 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH
:
3166 * Linux driver indicates that a zero
3167 * transfer length with this error code
3168 * indicates a CRC error.
3170 * No need to swap the bytes for checking
3173 if (scsi_io_reply
->TransferCount
== 0) {
3174 mpt_set_ccb_status(ccb
, CAM_UNCOR_PARITY
);
3178 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN
:
3179 case MPI_IOCSTATUS_SUCCESS
:
3180 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR
:
3181 if ((sstate
& MPI_SCSI_STATE_NO_SCSI_STATUS
) != 0) {
3183 * Status was never returned for this transaction.
3185 mpt_set_ccb_status(ccb
, CAM_UNEXP_BUSFREE
);
3186 } else if (scsi_io_reply
->SCSIStatus
!= SCSI_STATUS_OK
) {
3187 ccb
->csio
.scsi_status
= scsi_io_reply
->SCSIStatus
;
3188 mpt_set_ccb_status(ccb
, CAM_SCSI_STATUS_ERROR
);
3189 if ((sstate
& MPI_SCSI_STATE_AUTOSENSE_FAILED
) != 0)
3190 mpt_set_ccb_status(ccb
, CAM_AUTOSENSE_FAIL
);
3191 } else if ((sstate
& MPI_SCSI_STATE_RESPONSE_INFO_VALID
) != 0) {
3193 /* XXX Handle SPI-Packet and FCP-2 response info. */
3194 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3196 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3198 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN
:
3199 mpt_set_ccb_status(ccb
, CAM_DATA_RUN_ERR
);
3201 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR
:
3202 mpt_set_ccb_status(ccb
, CAM_UNCOR_PARITY
);
3204 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE
:
3206 * Since selection timeouts and "device really not
3207 * there" are grouped into this error code, report
3208 * selection timeout. Selection timeouts are
3209 * typically retried before giving up on the device
3210 * whereas "device not there" errors are considered
3213 mpt_set_ccb_status(ccb
, CAM_SEL_TIMEOUT
);
3215 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR
:
3216 mpt_set_ccb_status(ccb
, CAM_SEQUENCE_FAIL
);
3218 case MPI_IOCSTATUS_SCSI_INVALID_BUS
:
3219 mpt_set_ccb_status(ccb
, CAM_PATH_INVALID
);
3221 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID
:
3222 mpt_set_ccb_status(ccb
, CAM_TID_INVALID
);
3224 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED
:
3225 ccb
->ccb_h
.status
= CAM_UA_TERMIO
;
3227 case MPI_IOCSTATUS_INVALID_STATE
:
3229 * The IOC has been reset. Emulate a bus reset.
3232 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED
:
3233 ccb
->ccb_h
.status
= CAM_SCSI_BUS_RESET
;
3235 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED
:
3236 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED
:
3238 * Don't clobber any timeout status that has
3239 * already been set for this transaction. We
3240 * want the SCSI layer to be able to differentiate
3241 * between the command we aborted due to timeout
3242 * and any innocent bystanders.
3244 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_INPROG
)
3246 mpt_set_ccb_status(ccb
, CAM_REQ_TERMIO
);
3249 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES
:
3250 mpt_set_ccb_status(ccb
, CAM_RESRC_UNAVAIL
);
3252 case MPI_IOCSTATUS_BUSY
:
3253 mpt_set_ccb_status(ccb
, CAM_BUSY
);
3255 case MPI_IOCSTATUS_INVALID_FUNCTION
:
3256 case MPI_IOCSTATUS_INVALID_SGL
:
3257 case MPI_IOCSTATUS_INTERNAL_ERROR
:
3258 case MPI_IOCSTATUS_INVALID_FIELD
:
3261 * Some of the above may need to kick
3262 * of a recovery action!!!!
3264 ccb
->ccb_h
.status
= CAM_UNREC_HBA_ERROR
;
3268 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
3269 mpt_freeze_ccb(ccb
);
3276 mpt_action(struct cam_sim
*sim
, union ccb
*ccb
)
3278 struct mpt_softc
*mpt
;
3279 struct ccb_trans_settings
*cts
;
3284 CAM_DEBUG(ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("mpt_action\n"));
3286 mpt
= (struct mpt_softc
*)cam_sim_softc(sim
);
3287 raid_passthru
= (sim
== mpt
->phydisk_sim
);
3288 MPT_LOCK_ASSERT(mpt
);
3290 tgt
= ccb
->ccb_h
.target_id
;
3291 lun
= ccb
->ccb_h
.target_lun
;
3292 if (raid_passthru
&&
3293 ccb
->ccb_h
.func_code
!= XPT_PATH_INQ
&&
3294 ccb
->ccb_h
.func_code
!= XPT_RESET_BUS
&&
3295 ccb
->ccb_h
.func_code
!= XPT_RESET_DEV
) {
3296 if (mpt_map_physdisk(mpt
, ccb
, &tgt
) != 0) {
3297 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3298 mpt_set_ccb_status(ccb
, CAM_DEV_NOT_THERE
);
3303 ccb
->ccb_h
.ccb_mpt_ptr
= mpt
;
3305 switch (ccb
->ccb_h
.func_code
) {
3306 case XPT_SCSI_IO
: /* Execute the requested I/O operation */
3308 * Do a couple of preliminary checks...
3310 if ((ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) != 0) {
3311 if ((ccb
->ccb_h
.flags
& CAM_CDB_PHYS
) != 0) {
3312 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3313 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3317 /* Max supported CDB length is 16 bytes */
3318 /* XXX Unless we implement the new 32byte message type */
3319 if (ccb
->csio
.cdb_len
>
3320 sizeof (((PTR_MSG_SCSI_IO_REQUEST
)0)->CDB
)) {
3321 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3322 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3325 #ifdef MPT_TEST_MULTIPATH
3326 if (mpt
->failure_id
== ccb
->ccb_h
.target_id
) {
3327 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3328 mpt_set_ccb_status(ccb
, CAM_SEL_TIMEOUT
);
3332 ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
3333 mpt_start(sim
, ccb
);
3337 if (raid_passthru
) {
3338 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3339 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3343 if (ccb
->ccb_h
.func_code
== XPT_RESET_BUS
) {
3345 xpt_print(ccb
->ccb_h
.path
, "reset bus\n");
3348 xpt_print(ccb
->ccb_h
.path
, "reset device\n");
3350 (void) mpt_bus_reset(mpt
, tgt
, lun
, FALSE
);
3353 * mpt_bus_reset is always successful in that it
3354 * will fall back to a hard reset should a bus
3355 * reset attempt fail.
3357 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3358 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3363 union ccb
*accb
= ccb
->cab
.abort_ccb
;
3364 switch (accb
->ccb_h
.func_code
) {
3365 case XPT_ACCEPT_TARGET_IO
:
3366 case XPT_IMMED_NOTIFY
:
3367 ccb
->ccb_h
.status
= mpt_abort_target_ccb(mpt
, ccb
);
3369 case XPT_CONT_TARGET_IO
:
3370 mpt_prt(mpt
, "cannot abort active CTIOs yet\n");
3371 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3374 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3377 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
3383 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3384 #define DP_DISC_ENABLE 0x1
3385 #define DP_DISC_DISABL 0x2
3386 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3388 #define DP_TQING_ENABLE 0x4
3389 #define DP_TQING_DISABL 0x8
3390 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3392 #define DP_WIDE 0x10
3393 #define DP_NARROW 0x20
3394 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3396 #define DP_SYNC 0x40
3398 case XPT_SET_TRAN_SETTINGS
: /* Nexus Settings */
3400 struct ccb_trans_settings_scsi
*scsi
;
3401 struct ccb_trans_settings_spi
*spi
;
3409 if (mpt
->is_fc
|| mpt
->is_sas
) {
3410 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3414 scsi
= &cts
->proto_specific
.scsi
;
3415 spi
= &cts
->xport_specific
.spi
;
3418 * We can be called just to valid transport and proto versions
3420 if (scsi
->valid
== 0 && spi
->valid
== 0) {
3421 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3426 * Skip attempting settings on RAID volume disks.
3427 * Other devices on the bus get the normal treatment.
3429 if (mpt
->phydisk_sim
&& raid_passthru
== 0 &&
3430 mpt_is_raid_volume(mpt
, tgt
) != 0) {
3431 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3432 "no transfer settings for RAID vols\n");
3433 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3437 i
= mpt
->mpt_port_page2
.PortSettings
&
3438 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS
;
3439 j
= mpt
->mpt_port_page2
.PortFlags
&
3440 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK
;
3441 if (i
== MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS
&&
3442 j
== MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV
) {
3443 mpt_lprt(mpt
, MPT_PRT_ALWAYS
,
3444 "honoring BIOS transfer negotiations\n");
3445 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3453 if ((spi
->valid
& CTS_SPI_VALID_DISC
) != 0) {
3454 dval
|= ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) != 0) ?
3455 DP_DISC_ENABLE
: DP_DISC_DISABL
;
3458 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
3459 dval
|= ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0) ?
3460 DP_TQING_ENABLE
: DP_TQING_DISABL
;
3463 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0) {
3464 dval
|= (spi
->bus_width
== MSG_EXT_WDTR_BUS_16_BIT
) ?
3465 DP_WIDE
: DP_NARROW
;
3468 if (spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) {
3470 offset
= spi
->sync_offset
;
3472 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
=
3473 &mpt
->mpt_dev_page1
[tgt
];
3474 offset
= ptr
->RequestedParameters
;
3475 offset
&= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK
;
3476 offset
>>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET
;
3478 if (spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) {
3480 period
= spi
->sync_period
;
3482 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
=
3483 &mpt
->mpt_dev_page1
[tgt
];
3484 period
= ptr
->RequestedParameters
;
3485 period
&= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK
;
3486 period
>>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD
;
3488 if (dval
& DP_DISC_ENABLE
) {
3489 mpt
->mpt_disc_enable
|= (1 << tgt
);
3490 } else if (dval
& DP_DISC_DISABL
) {
3491 mpt
->mpt_disc_enable
&= ~(1 << tgt
);
3493 if (dval
& DP_TQING_ENABLE
) {
3494 mpt
->mpt_tag_enable
|= (1 << tgt
);
3495 } else if (dval
& DP_TQING_DISABL
) {
3496 mpt
->mpt_tag_enable
&= ~(1 << tgt
);
3498 if (dval
& DP_WIDTH
) {
3499 mpt_setwidth(mpt
, tgt
, 1);
3501 if (dval
& DP_SYNC
) {
3502 mpt_setsync(mpt
, tgt
, period
, offset
);
3505 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3508 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3509 "set [%d]: 0x%x period 0x%x offset %d\n",
3510 tgt
, dval
, period
, offset
);
3511 if (mpt_update_spi_config(mpt
, tgt
)) {
3512 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3514 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3518 case XPT_GET_TRAN_SETTINGS
:
3520 struct ccb_trans_settings_scsi
*scsi
;
3522 cts
->protocol
= PROTO_SCSI
;
3524 struct ccb_trans_settings_fc
*fc
=
3525 &cts
->xport_specific
.fc
;
3526 cts
->protocol_version
= SCSI_REV_SPC
;
3527 cts
->transport
= XPORT_FC
;
3528 cts
->transport_version
= 0;
3529 fc
->valid
= CTS_FC_VALID_SPEED
;
3530 fc
->bitrate
= 100000;
3531 } else if (mpt
->is_sas
) {
3532 struct ccb_trans_settings_sas
*sas
=
3533 &cts
->xport_specific
.sas
;
3534 cts
->protocol_version
= SCSI_REV_SPC2
;
3535 cts
->transport
= XPORT_SAS
;
3536 cts
->transport_version
= 0;
3537 sas
->valid
= CTS_SAS_VALID_SPEED
;
3538 sas
->bitrate
= 300000;
3540 cts
->protocol_version
= SCSI_REV_2
;
3541 cts
->transport
= XPORT_SPI
;
3542 cts
->transport_version
= 2;
3543 if (mpt_get_spi_settings(mpt
, cts
) != 0) {
3544 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3548 scsi
= &cts
->proto_specific
.scsi
;
3549 scsi
->valid
= CTS_SCSI_VALID_TQ
;
3550 scsi
->flags
= CTS_SCSI_FLAGS_TAG_ENB
;
3551 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3554 case XPT_CALC_GEOMETRY
:
3556 struct ccb_calc_geometry
*ccg
;
3559 if (ccg
->block_size
== 0) {
3560 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3561 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3564 cam_calc_geometry(ccg
, /*extended*/1);
3565 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d", __LINE__
));
3568 case XPT_PATH_INQ
: /* Path routing inquiry */
3570 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
3572 cpi
->version_num
= 1;
3573 cpi
->target_sprt
= 0;
3574 cpi
->hba_eng_cnt
= 0;
3575 cpi
->max_target
= mpt
->port_facts
[0].MaxDevices
- 1;
3576 #if 0 /* XXX swildner */
3577 cpi
->maxio
= (mpt
->max_cam_seg_cnt
- 1) * PAGE_SIZE
;
3580 * FC cards report MAX_DEVICES of 512, but
3581 * the MSG_SCSI_IO_REQUEST target id field
3582 * is only 8 bits. Until we fix the driver
3583 * to support 'channels' for bus overflow,
3586 if (cpi
->max_target
> 255) {
3587 cpi
->max_target
= 255;
3591 * VMware ESX reports > 16 devices and then dies when we probe.
3593 if (mpt
->is_spi
&& cpi
->max_target
> 15) {
3594 cpi
->max_target
= 15;
3599 cpi
->max_lun
= MPT_MAX_LUNS
;
3600 cpi
->initiator_id
= mpt
->mpt_ini_id
;
3601 cpi
->bus_id
= cam_sim_bus(sim
);
3604 * The base speed is the speed of the underlying connection.
3606 cpi
->protocol
= PROTO_SCSI
;
3608 cpi
->hba_misc
= PIM_NOBUSRESET
;
3609 cpi
->base_transfer_speed
= 100000;
3610 cpi
->hba_inquiry
= PI_TAG_ABLE
;
3611 cpi
->transport
= XPORT_FC
;
3612 cpi
->transport_version
= 0;
3613 cpi
->protocol_version
= SCSI_REV_SPC
;
3614 } else if (mpt
->is_sas
) {
3615 cpi
->hba_misc
= PIM_NOBUSRESET
;
3616 cpi
->base_transfer_speed
= 300000;
3617 cpi
->hba_inquiry
= PI_TAG_ABLE
;
3618 cpi
->transport
= XPORT_SAS
;
3619 cpi
->transport_version
= 0;
3620 cpi
->protocol_version
= SCSI_REV_SPC2
;
3622 cpi
->hba_misc
= PIM_SEQSCAN
;
3623 cpi
->base_transfer_speed
= 3300;
3624 cpi
->hba_inquiry
= PI_SDTR_ABLE
|PI_TAG_ABLE
|PI_WIDE_16
;
3625 cpi
->transport
= XPORT_SPI
;
3626 cpi
->transport_version
= 2;
3627 cpi
->protocol_version
= SCSI_REV_2
;
3631 * We give our fake RAID passhtru bus a width that is MaxVolumes
3632 * wide and restrict it to one lun.
3634 if (raid_passthru
) {
3635 cpi
->max_target
= mpt
->ioc_page2
->MaxPhysDisks
- 1;
3636 cpi
->initiator_id
= cpi
->max_target
+ 1;
3640 if ((mpt
->role
& MPT_ROLE_INITIATOR
) == 0) {
3641 cpi
->hba_misc
|= PIM_NOINITIATOR
;
3643 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
)) {
3645 PIT_PROCESSOR
| PIT_DISCONNECT
| PIT_TERM_IO
;
3647 cpi
->target_sprt
= 0;
3649 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
3650 strncpy(cpi
->hba_vid
, "LSI", HBA_IDLEN
);
3651 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
3652 cpi
->unit_number
= cam_sim_unit(sim
);
3653 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
3656 case XPT_EN_LUN
: /* Enable LUN as a target */
3660 if (ccb
->cel
.enable
)
3661 result
= mpt_enable_lun(mpt
,
3662 ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
);
3664 result
= mpt_disable_lun(mpt
,
3665 ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
);
3667 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3669 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3673 case XPT_NOTIFY_ACK
: /* recycle notify ack */
3674 case XPT_IMMED_NOTIFY
: /* Add Immediate Notify Resource */
3675 case XPT_ACCEPT_TARGET_IO
: /* Add Accept Target IO Resource */
3677 tgt_resource_t
*trtp
;
3678 lun_id_t lun
= ccb
->ccb_h
.target_lun
;
3679 ccb
->ccb_h
.sim_priv
.entries
[0].field
= 0;
3680 ccb
->ccb_h
.sim_priv
.entries
[1].ptr
= mpt
;
3681 ccb
->ccb_h
.flags
= 0;
3683 if (lun
== CAM_LUN_WILDCARD
) {
3684 if (ccb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
) {
3685 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3688 trtp
= &mpt
->trt_wildcard
;
3689 } else if (lun
>= MPT_MAX_LUNS
) {
3690 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3693 trtp
= &mpt
->trt
[lun
];
3695 if (ccb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
3696 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
3697 "Put FREE ATIO %p lun %d\n", ccb
, lun
);
3698 STAILQ_INSERT_TAIL(&trtp
->atios
, &ccb
->ccb_h
,
3700 } else if (ccb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
) {
3701 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
3702 "Put FREE INOT lun %d\n", lun
);
3703 STAILQ_INSERT_TAIL(&trtp
->inots
, &ccb
->ccb_h
,
3706 mpt_lprt(mpt
, MPT_PRT_ALWAYS
, "Got Notify ACK\n");
3708 mpt_set_ccb_status(ccb
, CAM_REQ_INPROG
);
3711 case XPT_CONT_TARGET_IO
:
3712 mpt_target_start_io(mpt
, ccb
);
3716 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
3723 mpt_get_spi_settings(struct mpt_softc
*mpt
, struct ccb_trans_settings
*cts
)
3725 struct ccb_trans_settings_scsi
*scsi
= &cts
->proto_specific
.scsi
;
3726 struct ccb_trans_settings_spi
*spi
= &cts
->xport_specific
.spi
;
3728 uint32_t dval
, pval
, oval
;
3731 if (IS_CURRENT_SETTINGS(cts
) == 0) {
3732 tgt
= cts
->ccb_h
.target_id
;
3733 } else if (xpt_path_sim(cts
->ccb_h
.path
) == mpt
->phydisk_sim
) {
3734 if (mpt_map_physdisk(mpt
, (union ccb
*)cts
, &tgt
)) {
3738 tgt
= cts
->ccb_h
.target_id
;
3742 * We aren't looking at Port Page 2 BIOS settings here-
3743 * sometimes these have been known to be bogus XXX.
3745 * For user settings, we pick the max from port page 0
3747 * For current settings we read the current settings out from
3748 * device page 0 for that target.
3750 if (IS_CURRENT_SETTINGS(cts
)) {
3751 CONFIG_PAGE_SCSI_DEVICE_0 tmp
;
3754 tmp
= mpt
->mpt_dev_page0
[tgt
];
3755 rv
= mpt_read_cur_cfg_page(mpt
, tgt
, &tmp
.Header
,
3756 sizeof(tmp
), FALSE
, 5000);
3758 mpt_prt(mpt
, "can't get tgt %d config page 0\n", tgt
);
3761 mpt2host_config_page_scsi_device_0(&tmp
);
3763 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
3764 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt
,
3765 tmp
.NegotiatedParameters
, tmp
.Information
);
3766 dval
|= (tmp
.NegotiatedParameters
& MPI_SCSIDEVPAGE0_NP_WIDE
) ?
3767 DP_WIDE
: DP_NARROW
;
3768 dval
|= (mpt
->mpt_disc_enable
& (1 << tgt
)) ?
3769 DP_DISC_ENABLE
: DP_DISC_DISABL
;
3770 dval
|= (mpt
->mpt_tag_enable
& (1 << tgt
)) ?
3771 DP_TQING_ENABLE
: DP_TQING_DISABL
;
3772 oval
= tmp
.NegotiatedParameters
;
3773 oval
&= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK
;
3774 oval
>>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET
;
3775 pval
= tmp
.NegotiatedParameters
;
3776 pval
&= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK
;
3777 pval
>>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD
;
3778 mpt
->mpt_dev_page0
[tgt
] = tmp
;
3780 dval
= DP_WIDE
|DP_DISC_ENABLE
|DP_TQING_ENABLE
|DP_SYNC
;
3781 oval
= mpt
->mpt_port_page0
.Capabilities
;
3782 oval
= MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval
);
3783 pval
= mpt
->mpt_port_page0
.Capabilities
;
3784 pval
= MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval
);
3791 spi
->sync_offset
= oval
;
3792 spi
->sync_period
= pval
;
3793 spi
->valid
|= CTS_SPI_VALID_SYNC_OFFSET
;
3794 spi
->valid
|= CTS_SPI_VALID_SYNC_RATE
;
3795 spi
->valid
|= CTS_SPI_VALID_BUS_WIDTH
;
3796 if (dval
& DP_WIDE
) {
3797 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
3799 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
3801 if (cts
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
) {
3802 scsi
->valid
= CTS_SCSI_VALID_TQ
;
3803 if (dval
& DP_TQING_ENABLE
) {
3804 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
3806 spi
->valid
|= CTS_SPI_VALID_DISC
;
3807 if (dval
& DP_DISC_ENABLE
) {
3808 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
3811 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3812 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt
,
3813 IS_CURRENT_SETTINGS(cts
) ? "ACTIVE" : "NVRAM ", dval
, pval
, oval
);
3818 mpt_setwidth(struct mpt_softc
*mpt
, int tgt
, int onoff
)
3820 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
;
3822 ptr
= &mpt
->mpt_dev_page1
[tgt
];
3824 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_WIDE
;
3826 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_WIDE
;
3831 mpt_setsync(struct mpt_softc
*mpt
, int tgt
, int period
, int offset
)
3833 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
;
3835 ptr
= &mpt
->mpt_dev_page1
[tgt
];
3836 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK
;
3837 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK
;
3838 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_DT
;
3839 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_QAS
;
3840 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_IU
;
3844 ptr
->RequestedParameters
|=
3845 period
<< MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD
;
3846 ptr
->RequestedParameters
|=
3847 offset
<< MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET
;
3849 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_DT
;
3852 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_QAS
;
3853 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_IU
;
3858 mpt_update_spi_config(struct mpt_softc
*mpt
, int tgt
)
3860 CONFIG_PAGE_SCSI_DEVICE_1 tmp
;
3863 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3864 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3865 tgt
, mpt
->mpt_dev_page1
[tgt
].RequestedParameters
);
3866 tmp
= mpt
->mpt_dev_page1
[tgt
];
3867 host2mpt_config_page_scsi_device_1(&tmp
);
3868 rv
= mpt_write_cur_cfg_page(mpt
, tgt
,
3869 &tmp
.Header
, sizeof(tmp
), FALSE
, 5000);
3871 mpt_prt(mpt
, "mpt_update_spi_config: write cur page failed\n");
3877 /****************************** Timeout Recovery ******************************/
3879 mpt_spawn_recovery_thread(struct mpt_softc
*mpt
)
3883 error
= kthread_create(mpt_recovery_thread
, mpt
,
3884 &mpt
->recovery_thread
, "mpt_recovery%d", mpt
->unit
);
3889 mpt_terminate_recovery_thread(struct mpt_softc
*mpt
)
3892 if (mpt
->recovery_thread
== NULL
) {
3895 mpt
->shutdwn_recovery
= 1;
3898 * Sleep on a slightly different location
3899 * for this interlock just for added safety.
3901 mpt_sleep(mpt
, &mpt
->recovery_thread
, 0, "thtrm", 0);
3905 mpt_recovery_thread(void *arg
)
3907 struct mpt_softc
*mpt
;
3909 mpt
= (struct mpt_softc
*)arg
;
3912 if (TAILQ_EMPTY(&mpt
->request_timeout_list
) != 0) {
3913 if (mpt
->shutdwn_recovery
== 0) {
3914 mpt_sleep(mpt
, mpt
, 0, "idle", 0);
3917 if (mpt
->shutdwn_recovery
!= 0) {
3920 mpt_recover_commands(mpt
);
3922 mpt
->recovery_thread
= NULL
;
3923 wakeup(&mpt
->recovery_thread
);
3929 mpt_scsi_send_tmf(struct mpt_softc
*mpt
, u_int type
, u_int flags
,
3930 u_int channel
, u_int target
, u_int lun
, u_int abort_ctx
, int sleep_ok
)
3932 MSG_SCSI_TASK_MGMT
*tmf_req
;
3936 * Wait for any current TMF request to complete.
3937 * We're only allowed to issue one TMF at a time.
3939 error
= mpt_wait_req(mpt
, mpt
->tmf_req
, REQ_STATE_FREE
, REQ_STATE_FREE
,
3940 sleep_ok
, MPT_TMF_MAX_TIMEOUT
);
3942 mpt_reset(mpt
, TRUE
);
3946 mpt_assign_serno(mpt
, mpt
->tmf_req
);
3947 mpt
->tmf_req
->state
= REQ_STATE_ALLOCATED
|REQ_STATE_QUEUED
;
3949 tmf_req
= (MSG_SCSI_TASK_MGMT
*)mpt
->tmf_req
->req_vbuf
;
3950 memset(tmf_req
, 0, sizeof(*tmf_req
));
3951 tmf_req
->TargetID
= target
;
3952 tmf_req
->Bus
= channel
;
3953 tmf_req
->Function
= MPI_FUNCTION_SCSI_TASK_MGMT
;
3954 tmf_req
->TaskType
= type
;
3955 tmf_req
->MsgFlags
= flags
;
3956 tmf_req
->MsgContext
=
3957 htole32(mpt
->tmf_req
->index
| scsi_tmf_handler_id
);
3958 if (lun
> MPT_MAX_LUNS
) {
3959 tmf_req
->LUN
[0] = 0x40 | ((lun
>> 8) & 0x3f);
3960 tmf_req
->LUN
[1] = lun
& 0xff;
3962 tmf_req
->LUN
[1] = lun
;
3964 tmf_req
->TaskMsgContext
= abort_ctx
;
3966 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
3967 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt
->tmf_req
,
3968 mpt
->tmf_req
->serno
, tmf_req
->MsgContext
);
3969 if (mpt
->verbose
> MPT_PRT_DEBUG
) {
3970 mpt_print_request(tmf_req
);
3973 KASSERT(mpt_req_on_pending_list(mpt
, mpt
->tmf_req
) == 0,
3974 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3975 TAILQ_INSERT_HEAD(&mpt
->request_pending_list
, mpt
->tmf_req
, links
);
3976 error
= mpt_send_handshake_cmd(mpt
, sizeof(*tmf_req
), tmf_req
);
3977 if (error
!= MPT_OK
) {
3978 TAILQ_REMOVE(&mpt
->request_pending_list
, mpt
->tmf_req
, links
);
3979 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
3980 mpt_reset(mpt
, TRUE
);
3986 * When a command times out, it is placed on the requeust_timeout_list
3987 * and we wake our recovery thread. The MPT-Fusion architecture supports
3988 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3989 * the timedout transactions. The next TMF is issued either by the
3990 * completion handler of the current TMF waking our recovery thread,
3991 * or the TMF timeout handler causing a hard reset sequence.
3994 mpt_recover_commands(struct mpt_softc
*mpt
)
4000 if (TAILQ_EMPTY(&mpt
->request_timeout_list
) != 0) {
4002 * No work to do- leave.
4004 mpt_prt(mpt
, "mpt_recover_commands: no requests.\n");
4009 * Flush any commands whose completion coincides with their timeout.
4013 if (TAILQ_EMPTY(&mpt
->request_timeout_list
) != 0) {
4015 * The timedout commands have already
4016 * completed. This typically means
4017 * that either the timeout value was on
4018 * the hairy edge of what the device
4019 * requires or - more likely - interrupts
4020 * are not happening.
4022 mpt_prt(mpt
, "Timedout requests already complete. "
4023 "Interrupts may not be functioning.\n");
4024 mpt_enable_ints(mpt
);
4029 * We have no visibility into the current state of the
4030 * controller, so attempt to abort the commands in the
4031 * order they timed-out. For initiator commands, we
4032 * depend on the reply handler pulling requests off
4035 while ((req
= TAILQ_FIRST(&mpt
->request_timeout_list
)) != NULL
) {
4038 MSG_REQUEST_HEADER
*hdrp
= req
->req_vbuf
;
4040 mpt_prt(mpt
, "attempting to abort req %p:%u function %x\n",
4041 req
, req
->serno
, hdrp
->Function
);
4044 mpt_prt(mpt
, "null ccb in timed out request. "
4045 "Resetting Controller.\n");
4046 mpt_reset(mpt
, TRUE
);
4049 mpt_set_ccb_status(ccb
, CAM_CMD_TIMEOUT
);
4052 * Check to see if this is not an initiator command and
4053 * deal with it differently if it is.
4055 switch (hdrp
->Function
) {
4056 case MPI_FUNCTION_SCSI_IO_REQUEST
:
4057 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
:
4061 * XXX: FIX ME: need to abort target assists...
4063 mpt_prt(mpt
, "just putting it back on the pend q\n");
4064 TAILQ_REMOVE(&mpt
->request_timeout_list
, req
, links
);
4065 TAILQ_INSERT_HEAD(&mpt
->request_pending_list
, req
,
4070 error
= mpt_scsi_send_tmf(mpt
,
4071 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK
,
4072 0, 0, ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
,
4073 htole32(req
->index
| scsi_io_handler_id
), TRUE
);
4077 * mpt_scsi_send_tmf hard resets on failure, so no
4078 * need to do so here. Our queue should be emptied
4079 * by the hard reset.
4084 error
= mpt_wait_req(mpt
, mpt
->tmf_req
, REQ_STATE_DONE
,
4085 REQ_STATE_DONE
, TRUE
, 500);
4087 status
= le16toh(mpt
->tmf_req
->IOCStatus
);
4088 response
= mpt
->tmf_req
->ResponseCode
;
4089 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
4093 * If we've errored out,, reset the controller.
4095 mpt_prt(mpt
, "mpt_recover_commands: abort timed-out. "
4096 "Resetting controller\n");
4097 mpt_reset(mpt
, TRUE
);
4101 if ((status
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
4102 mpt_prt(mpt
, "mpt_recover_commands: IOC Status 0x%x. "
4103 "Resetting controller.\n", status
);
4104 mpt_reset(mpt
, TRUE
);
4108 if (response
!= MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED
&&
4109 response
!= MPI_SCSITASKMGMT_RSP_TM_COMPLETE
) {
4110 mpt_prt(mpt
, "mpt_recover_commands: TMF Response 0x%x. "
4111 "Resetting controller.\n", response
);
4112 mpt_reset(mpt
, TRUE
);
4115 mpt_prt(mpt
, "abort of req %p:%u completed\n", req
, req
->serno
);
4119 /************************ Target Mode Support ****************************/
4121 mpt_fc_post_els(struct mpt_softc
*mpt
, request_t
*req
, int ioindex
)
4123 MSG_LINK_SERVICE_BUFFER_POST_REQUEST
*fc
;
4124 PTR_SGE_TRANSACTION32 tep
;
4125 PTR_SGE_SIMPLE32 se
;
4129 paddr
= req
->req_pbuf
;
4130 paddr
+= MPT_RQSL(mpt
);
4133 memset(fc
, 0, MPT_REQUEST_AREA
);
4134 fc
->BufferCount
= 1;
4135 fc
->Function
= MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
;
4136 fc
->MsgContext
= htole32(req
->index
| fc_els_handler_id
);
4139 * Okay, set up ELS buffer pointers. ELS buffer pointers
4140 * consist of a TE SGL element (with details length of zero)
4141 * followed by a SIMPLE SGL element which holds the address
4145 tep
= (PTR_SGE_TRANSACTION32
) &fc
->SGL
;
4147 tep
->ContextSize
= 4;
4149 tep
->TransactionContext
[0] = htole32(ioindex
);
4151 se
= (PTR_SGE_SIMPLE32
) &tep
->TransactionDetails
[0];
4153 MPI_SGE_FLAGS_HOST_TO_IOC
|
4154 MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
4155 MPI_SGE_FLAGS_LAST_ELEMENT
|
4156 MPI_SGE_FLAGS_END_OF_LIST
|
4157 MPI_SGE_FLAGS_END_OF_BUFFER
;
4158 fl
<<= MPI_SGE_FLAGS_SHIFT
;
4159 fl
|= (MPT_NRFM(mpt
) - MPT_RQSL(mpt
));
4160 se
->FlagsLength
= htole32(fl
);
4161 se
->Address
= htole32((uint32_t) paddr
);
4162 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4163 "add ELS index %d ioindex %d for %p:%u\n",
4164 req
->index
, ioindex
, req
, req
->serno
);
4165 KASSERT(((req
->state
& REQ_STATE_LOCKED
) != 0),
4166 ("mpt_fc_post_els: request not locked"));
4167 mpt_send_cmd(mpt
, req
);
4171 mpt_post_target_command(struct mpt_softc
*mpt
, request_t
*req
, int ioindex
)
4173 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc
;
4174 PTR_CMD_BUFFER_DESCRIPTOR cb
;
4177 paddr
= req
->req_pbuf
;
4178 paddr
+= MPT_RQSL(mpt
);
4179 memset(req
->req_vbuf
, 0, MPT_REQUEST_AREA
);
4180 MPT_TGT_STATE(mpt
, req
)->state
= TGT_STATE_LOADING
;
4183 fc
->BufferCount
= 1;
4184 fc
->Function
= MPI_FUNCTION_TARGET_CMD_BUFFER_POST
;
4185 fc
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4187 cb
= &fc
->Buffer
[0];
4188 cb
->IoIndex
= htole16(ioindex
);
4189 cb
->u
.PhysicalAddress32
= htole32((U32
) paddr
);
4191 mpt_check_doorbell(mpt
);
4192 mpt_send_cmd(mpt
, req
);
4196 mpt_add_els_buffers(struct mpt_softc
*mpt
)
4200 if (mpt
->is_fc
== 0) {
4204 if (mpt
->els_cmds_allocated
) {
4208 mpt
->els_cmd_ptrs
= kmalloc(MPT_MAX_ELS
* sizeof (request_t
*),
4209 M_DEVBUF
, M_NOWAIT
| M_ZERO
);
4211 if (mpt
->els_cmd_ptrs
== NULL
) {
4216 * Feed the chip some ELS buffer resources
4218 for (i
= 0; i
< MPT_MAX_ELS
; i
++) {
4219 request_t
*req
= mpt_get_request(mpt
, FALSE
);
4223 req
->state
|= REQ_STATE_LOCKED
;
4224 mpt
->els_cmd_ptrs
[i
] = req
;
4225 mpt_fc_post_els(mpt
, req
, i
);
4229 mpt_prt(mpt
, "unable to add ELS buffer resources\n");
4230 kfree(mpt
->els_cmd_ptrs
, M_DEVBUF
);
4231 mpt
->els_cmd_ptrs
= NULL
;
4234 if (i
!= MPT_MAX_ELS
) {
4235 mpt_lprt(mpt
, MPT_PRT_INFO
,
4236 "only added %d of %d ELS buffers\n", i
, MPT_MAX_ELS
);
4238 mpt
->els_cmds_allocated
= i
;
4243 mpt_add_target_commands(struct mpt_softc
*mpt
)
4247 if (mpt
->tgt_cmd_ptrs
) {
4251 max
= MPT_MAX_REQUESTS(mpt
) >> 1;
4252 if (max
> mpt
->mpt_max_tgtcmds
) {
4253 max
= mpt
->mpt_max_tgtcmds
;
4256 kmalloc(max
* sizeof (request_t
*), M_DEVBUF
, M_NOWAIT
| M_ZERO
);
4257 if (mpt
->tgt_cmd_ptrs
== NULL
) {
4259 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4263 for (i
= 0; i
< max
; i
++) {
4266 req
= mpt_get_request(mpt
, FALSE
);
4270 req
->state
|= REQ_STATE_LOCKED
;
4271 mpt
->tgt_cmd_ptrs
[i
] = req
;
4272 mpt_post_target_command(mpt
, req
, i
);
4277 mpt_lprt(mpt
, MPT_PRT_ERROR
, "could not add any target bufs\n");
4278 kfree(mpt
->tgt_cmd_ptrs
, M_DEVBUF
);
4279 mpt
->tgt_cmd_ptrs
= NULL
;
4283 mpt
->tgt_cmds_allocated
= i
;
4286 mpt_lprt(mpt
, MPT_PRT_INFO
,
4287 "added %d of %d target bufs\n", i
, max
);
4293 mpt_enable_lun(struct mpt_softc
*mpt
, target_id_t tgt
, lun_id_t lun
)
4296 if (tgt
== CAM_TARGET_WILDCARD
&& lun
== CAM_LUN_WILDCARD
) {
4298 } else if (lun
>= MPT_MAX_LUNS
) {
4300 } else if (tgt
!= CAM_TARGET_WILDCARD
&& tgt
!= 0) {
4303 if (mpt
->tenabled
== 0) {
4305 (void) mpt_fc_reset_link(mpt
, 0);
4309 if (lun
== CAM_LUN_WILDCARD
) {
4310 mpt
->trt_wildcard
.enabled
= 1;
4312 mpt
->trt
[lun
].enabled
= 1;
4318 mpt_disable_lun(struct mpt_softc
*mpt
, target_id_t tgt
, lun_id_t lun
)
4322 if (tgt
== CAM_TARGET_WILDCARD
&& lun
== CAM_LUN_WILDCARD
) {
4324 } else if (lun
>= MPT_MAX_LUNS
) {
4326 } else if (tgt
!= CAM_TARGET_WILDCARD
&& tgt
!= 0) {
4329 if (lun
== CAM_LUN_WILDCARD
) {
4330 mpt
->trt_wildcard
.enabled
= 0;
4332 mpt
->trt
[lun
].enabled
= 0;
4334 for (i
= 0; i
< MPT_MAX_LUNS
; i
++) {
4335 if (mpt
->trt
[lun
].enabled
) {
4339 if (i
== MPT_MAX_LUNS
&& mpt
->twildcard
== 0) {
4341 (void) mpt_fc_reset_link(mpt
, 0);
4349 * Called with MPT lock held
4352 mpt_target_start_io(struct mpt_softc
*mpt
, union ccb
*ccb
)
4354 struct ccb_scsiio
*csio
= &ccb
->csio
;
4355 request_t
*cmd_req
= MPT_TAG_2_REQ(mpt
, csio
->tag_id
);
4356 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
4358 switch (tgt
->state
) {
4359 case TGT_STATE_IN_CAM
:
4361 case TGT_STATE_MOVING_DATA
:
4362 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
4363 xpt_freeze_simq(mpt
->sim
, 1);
4364 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4365 tgt
->ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
4369 mpt_prt(mpt
, "ccb %p flags 0x%x tag 0x%08x had bad request "
4370 "starting I/O\n", ccb
, csio
->ccb_h
.flags
, csio
->tag_id
);
4371 mpt_tgt_dump_req_state(mpt
, cmd_req
);
4372 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
4377 if (csio
->dxfer_len
) {
4378 bus_dmamap_callback_t
*cb
;
4379 PTR_MSG_TARGET_ASSIST_REQUEST ta
;
4382 KASSERT((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
,
4383 ("dxfer_len %u but direction is NONE", csio
->dxfer_len
));
4385 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
4386 if (mpt
->outofbeer
== 0) {
4388 xpt_freeze_simq(mpt
->sim
, 1);
4389 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FREEZEQ\n");
4391 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4392 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
4396 ccb
->ccb_h
.status
= CAM_SIM_QUEUED
| CAM_REQ_INPROG
;
4397 if (sizeof (bus_addr_t
) > 4) {
4398 cb
= mpt_execute_req_a64
;
4400 cb
= mpt_execute_req
;
4404 ccb
->ccb_h
.ccb_req_ptr
= req
;
4407 * Record the currently active ccb and the
4408 * request for it in our target state area.
4413 memset(req
->req_vbuf
, 0, MPT_RQSL(mpt
));
4417 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
=
4419 ta
->QueueTag
= ssp
->InitiatorTag
;
4420 } else if (mpt
->is_spi
) {
4421 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
=
4423 ta
->QueueTag
= sp
->Tag
;
4425 ta
->Function
= MPI_FUNCTION_TARGET_ASSIST
;
4426 ta
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4427 ta
->ReplyWord
= htole32(tgt
->reply_desc
);
4428 if (csio
->ccb_h
.target_lun
> MPT_MAX_LUNS
) {
4430 0x40 | ((csio
->ccb_h
.target_lun
>> 8) & 0x3f);
4431 ta
->LUN
[1] = csio
->ccb_h
.target_lun
& 0xff;
4433 ta
->LUN
[1] = csio
->ccb_h
.target_lun
;
4436 ta
->RelativeOffset
= tgt
->bytes_xfered
;
4437 ta
->DataLength
= ccb
->csio
.dxfer_len
;
4438 if (ta
->DataLength
> tgt
->resid
) {
4439 ta
->DataLength
= tgt
->resid
;
4443 * XXX Should be done after data transfer completes?
4445 tgt
->resid
-= csio
->dxfer_len
;
4446 tgt
->bytes_xfered
+= csio
->dxfer_len
;
4448 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
4449 ta
->TargetAssistFlags
|=
4450 TARGET_ASSIST_FLAGS_DATA_DIRECTION
;
4453 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4454 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) &&
4455 csio
->scsi_status
== SCSI_STATUS_OK
&& tgt
->resid
== 0) {
4456 ta
->TargetAssistFlags
|=
4457 TARGET_ASSIST_FLAGS_AUTO_STATUS
;
4460 tgt
->state
= TGT_STATE_SETTING_UP_FOR_DATA
;
4462 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4463 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4464 "nxtstate=%d\n", csio
, csio
->tag_id
, csio
->dxfer_len
,
4465 tgt
->resid
, ccb
->ccb_h
.flags
, req
, req
->serno
, tgt
->state
);
4467 if ((ccb
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
4468 if ((ccb
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
4471 error
= bus_dmamap_load(mpt
->buffer_dmat
,
4472 req
->dmap
, csio
->data_ptr
, csio
->dxfer_len
,
4475 if (error
== EINPROGRESS
) {
4476 xpt_freeze_simq(mpt
->sim
, 1);
4477 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
4481 * We have been given a pointer to single
4484 struct bus_dma_segment seg
;
4485 seg
.ds_addr
= (bus_addr_t
)
4486 (vm_offset_t
)csio
->data_ptr
;
4487 seg
.ds_len
= csio
->dxfer_len
;
4488 (*cb
)(req
, &seg
, 1, 0);
4492 * We have been given a list of addresses.
4493 * This case could be easily supported but they are not
4494 * currently generated by the CAM subsystem so there
4495 * is no point in wasting the time right now.
4497 struct bus_dma_segment
*sgs
;
4498 if ((ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
4499 (*cb
)(req
, NULL
, 0, EFAULT
);
4501 /* Just use the segments provided */
4502 sgs
= (struct bus_dma_segment
*)csio
->data_ptr
;
4503 (*cb
)(req
, sgs
, csio
->sglist_cnt
, 0);
4507 uint8_t *sp
= NULL
, sense
[MPT_SENSE_SIZE
];
4510 * XXX: I don't know why this seems to happen, but
4511 * XXX: completing the CCB seems to make things happy.
4512 * XXX: This seems to happen if the initiator requests
4513 * XXX: enough data that we have to do multiple CTIOs.
4515 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) == 0) {
4516 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4517 "Meaningless STATUS CCB (%p): flags %x status %x "
4518 "resid %d bytes_xfered %u\n", ccb
, ccb
->ccb_h
.flags
,
4519 ccb
->ccb_h
.status
, tgt
->resid
, tgt
->bytes_xfered
);
4520 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
4521 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4525 if (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) {
4527 memcpy(sp
, &csio
->sense_data
,
4528 min(csio
->sense_len
, MPT_SENSE_SIZE
));
4530 mpt_scsi_tgt_status(mpt
, ccb
, cmd_req
, csio
->scsi_status
, sp
);
4535 mpt_scsi_tgt_local(struct mpt_softc
*mpt
, request_t
*cmd_req
,
4536 uint32_t lun
, int send
, uint8_t *data
, size_t length
)
4538 mpt_tgt_state_t
*tgt
;
4539 PTR_MSG_TARGET_ASSIST_REQUEST ta
;
4547 * We enter with resid set to the data load for the command.
4549 tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
4550 if (length
== 0 || tgt
->resid
== 0) {
4552 mpt_scsi_tgt_status(mpt
, NULL
, cmd_req
, 0, NULL
);
4556 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
4557 mpt_prt(mpt
, "out of resources- dropping local response\n");
4563 memset(req
->req_vbuf
, 0, MPT_RQSL(mpt
));
4567 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
= cmd_req
->req_vbuf
;
4568 ta
->QueueTag
= ssp
->InitiatorTag
;
4569 } else if (mpt
->is_spi
) {
4570 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
= cmd_req
->req_vbuf
;
4571 ta
->QueueTag
= sp
->Tag
;
4573 ta
->Function
= MPI_FUNCTION_TARGET_ASSIST
;
4574 ta
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4575 ta
->ReplyWord
= htole32(tgt
->reply_desc
);
4576 if (lun
> MPT_MAX_LUNS
) {
4577 ta
->LUN
[0] = 0x40 | ((lun
>> 8) & 0x3f);
4578 ta
->LUN
[1] = lun
& 0xff;
4582 ta
->RelativeOffset
= 0;
4583 ta
->DataLength
= length
;
4585 dptr
= req
->req_vbuf
;
4586 dptr
+= MPT_RQSL(mpt
);
4587 pptr
= req
->req_pbuf
;
4588 pptr
+= MPT_RQSL(mpt
);
4589 memcpy(dptr
, data
, min(length
, MPT_RQSL(mpt
)));
4591 se
= (SGE_SIMPLE32
*) &ta
->SGL
[0];
4592 memset(se
, 0,sizeof (*se
));
4594 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
;
4596 ta
->TargetAssistFlags
|= TARGET_ASSIST_FLAGS_DATA_DIRECTION
;
4597 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
4600 MPI_pSGE_SET_LENGTH(se
, length
);
4601 flags
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
4602 flags
|= MPI_SGE_FLAGS_END_OF_LIST
| MPI_SGE_FLAGS_END_OF_BUFFER
;
4603 MPI_pSGE_SET_FLAGS(se
, flags
);
4607 tgt
->resid
-= length
;
4608 tgt
->bytes_xfered
= length
;
4609 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4610 tgt
->state
= TGT_STATE_MOVING_DATA_AND_STATUS
;
4612 tgt
->state
= TGT_STATE_MOVING_DATA
;
4614 mpt_send_cmd(mpt
, req
);
4618 * Abort queued up CCBs
4621 mpt_abort_target_ccb(struct mpt_softc
*mpt
, union ccb
*ccb
)
4623 struct mpt_hdr_stailq
*lp
;
4624 struct ccb_hdr
*srch
;
4626 union ccb
*accb
= ccb
->cab
.abort_ccb
;
4627 tgt_resource_t
*trtp
;
4629 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "aborting ccb %p\n", accb
);
4631 if (ccb
->ccb_h
.target_lun
== CAM_LUN_WILDCARD
) {
4632 trtp
= &mpt
->trt_wildcard
;
4634 trtp
= &mpt
->trt
[ccb
->ccb_h
.target_lun
];
4637 if (accb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
4639 } else if (accb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
) {
4642 return (CAM_REQ_INVALID
);
4645 STAILQ_FOREACH(srch
, lp
, sim_links
.stqe
) {
4646 if (srch
== &accb
->ccb_h
) {
4648 STAILQ_REMOVE(lp
, srch
, ccb_hdr
, sim_links
.stqe
);
4653 accb
->ccb_h
.status
= CAM_REQ_ABORTED
;
4655 return (CAM_REQ_CMP
);
4657 mpt_prt(mpt
, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb
);
4658 return (CAM_PATH_INVALID
);
4662 * Ask the MPT to abort the current target command
4665 mpt_abort_target_cmd(struct mpt_softc
*mpt
, request_t
*cmd_req
)
4669 PTR_MSG_TARGET_MODE_ABORT abtp
;
4671 req
= mpt_get_request(mpt
, FALSE
);
4675 abtp
= req
->req_vbuf
;
4676 memset(abtp
, 0, sizeof (*abtp
));
4678 abtp
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4679 abtp
->AbortType
= TARGET_MODE_ABORT_TYPE_EXACT_IO
;
4680 abtp
->Function
= MPI_FUNCTION_TARGET_MODE_ABORT
;
4681 abtp
->ReplyWord
= htole32(MPT_TGT_STATE(mpt
, cmd_req
)->reply_desc
);
4683 if (mpt
->is_fc
|| mpt
->is_sas
) {
4684 mpt_send_cmd(mpt
, req
);
4686 error
= mpt_send_handshake_cmd(mpt
, sizeof(*req
), req
);
4692 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4693 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4694 * FC929 to set bogus FC_RSP fields (nonzero residuals
4695 * but w/o RESID fields set). This causes QLogic initiators
4696 * to think maybe that a frame was lost.
4698 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4699 * we use allocated requests to do TARGET_ASSIST and we
4700 * need to know when to release them.
4704 mpt_scsi_tgt_status(struct mpt_softc
*mpt
, union ccb
*ccb
, request_t
*cmd_req
,
4705 uint8_t status
, uint8_t const *sense_data
)
4708 mpt_tgt_state_t
*tgt
;
4709 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp
;
4715 cmd_vbuf
= cmd_req
->req_vbuf
;
4716 cmd_vbuf
+= MPT_RQSL(mpt
);
4717 tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
4719 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
4720 if (mpt
->outofbeer
== 0) {
4722 xpt_freeze_simq(mpt
->sim
, 1);
4723 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FREEZEQ\n");
4726 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4727 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
4731 "could not allocate status request- dropping\n");
4737 ccb
->ccb_h
.ccb_mpt_ptr
= mpt
;
4738 ccb
->ccb_h
.ccb_req_ptr
= req
;
4742 * Record the currently active ccb, if any, and the
4743 * request for it in our target state area.
4747 tgt
->state
= TGT_STATE_SENDING_STATUS
;
4750 paddr
= req
->req_pbuf
;
4751 paddr
+= MPT_RQSL(mpt
);
4753 memset(tp
, 0, sizeof (*tp
));
4754 tp
->Function
= MPI_FUNCTION_TARGET_STATUS_SEND
;
4756 PTR_MPI_TARGET_FCP_CMD_BUFFER fc
=
4757 (PTR_MPI_TARGET_FCP_CMD_BUFFER
) cmd_vbuf
;
4761 sts_vbuf
= req
->req_vbuf
;
4762 sts_vbuf
+= MPT_RQSL(mpt
);
4763 rsp
= (uint32_t *) sts_vbuf
;
4764 memcpy(tp
->LUN
, fc
->FcpLun
, sizeof (tp
->LUN
));
4767 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4768 * It has to be big-endian in memory and is organized
4769 * in 32 bit words, which are much easier to deal with
4770 * as words which are swizzled as needed.
4772 * All we're filling here is the FC_RSP payload.
4773 * We may just have the chip synthesize it if
4774 * we have no residual and an OK status.
4777 memset(rsp
, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER
));
4781 rsp
[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4782 rsp
[3] = htobe32(tgt
->resid
);
4783 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4784 resplen
= sizeof (MPI_TARGET_FCP_RSP_BUFFER
);
4787 if (status
== SCSI_STATUS_CHECK_COND
) {
4790 rsp
[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4791 rsp
[4] = htobe32(MPT_SENSE_SIZE
);
4793 memcpy(&rsp
[8], sense_data
, MPT_SENSE_SIZE
);
4795 mpt_prt(mpt
, "mpt_scsi_tgt_status: CHECK CONDI"
4796 "TION but no sense data?\n");
4797 memset(&rsp
[8], 0, MPT_SENSE_SIZE
);
4799 for (i
= 8; i
< (8 + (MPT_SENSE_SIZE
>> 2)); i
++) {
4800 rsp
[i
] = htobe32(rsp
[i
]);
4802 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4803 resplen
= sizeof (MPI_TARGET_FCP_RSP_BUFFER
);
4806 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4807 resplen
= sizeof (MPI_TARGET_FCP_RSP_BUFFER
);
4809 rsp
[2] = htobe32(rsp
[2]);
4810 } else if (mpt
->is_sas
) {
4811 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
=
4812 (PTR_MPI_TARGET_SSP_CMD_BUFFER
) cmd_vbuf
;
4813 memcpy(tp
->LUN
, ssp
->LogicalUnitNumber
, sizeof (tp
->LUN
));
4815 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
=
4816 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER
) cmd_vbuf
;
4817 tp
->StatusCode
= status
;
4818 tp
->QueueTag
= htole16(sp
->Tag
);
4819 memcpy(tp
->LUN
, sp
->LogicalUnitNumber
, sizeof (tp
->LUN
));
4822 tp
->ReplyWord
= htole32(tgt
->reply_desc
);
4823 tp
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4825 #ifdef WE_CAN_USE_AUTO_REPOST
4826 tp
->MsgFlags
= TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER
;
4828 if (status
== SCSI_STATUS_OK
&& resplen
== 0) {
4829 tp
->MsgFlags
|= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS
;
4831 tp
->StatusDataSGE
.u
.Address32
= htole32((uint32_t) paddr
);
4833 MPI_SGE_FLAGS_HOST_TO_IOC
|
4834 MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
4835 MPI_SGE_FLAGS_LAST_ELEMENT
|
4836 MPI_SGE_FLAGS_END_OF_LIST
|
4837 MPI_SGE_FLAGS_END_OF_BUFFER
;
4838 fl
<<= MPI_SGE_FLAGS_SHIFT
;
4840 tp
->StatusDataSGE
.FlagsLength
= htole32(fl
);
4843 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4844 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4845 ccb
, sense_data
?"h" : "hout", ccb
? ccb
->csio
.tag_id
: -1, req
,
4846 req
->serno
, tgt
->resid
);
4848 ccb
->ccb_h
.status
= CAM_SIM_QUEUED
| CAM_REQ_INPROG
;
4849 mpt_req_timeout(req
, 60 * hz
, mpt_timeout
, ccb
);
4851 mpt_send_cmd(mpt
, req
);
4855 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc
*mpt
, request_t
*req
, mpt_task_mgmt_t fc
,
4856 tgt_resource_t
*trtp
, int init_id
)
4858 struct ccb_immed_notify
*inot
;
4859 mpt_tgt_state_t
*tgt
;
4861 tgt
= MPT_TGT_STATE(mpt
, req
);
4862 inot
= (struct ccb_immed_notify
*) STAILQ_FIRST(&trtp
->inots
);
4864 mpt_lprt(mpt
, MPT_PRT_WARN
, "no INOTSs- sending back BSY\n");
4865 mpt_scsi_tgt_status(mpt
, NULL
, req
, SCSI_STATUS_BUSY
, NULL
);
4868 STAILQ_REMOVE_HEAD(&trtp
->inots
, sim_links
.stqe
);
4869 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
4870 "Get FREE INOT %p lun %d\n", inot
, inot
->ccb_h
.target_lun
);
4872 memset(&inot
->sense_data
, 0, sizeof (inot
->sense_data
));
4873 inot
->sense_len
= 0;
4874 memset(inot
->message_args
, 0, sizeof (inot
->message_args
));
4875 inot
->initiator_id
= init_id
; /* XXX */
4878 * This is a somewhat grotesque attempt to map from task management
4879 * to old style SCSI messages. God help us all.
4882 case MPT_ABORT_TASK_SET
:
4883 inot
->message_args
[0] = MSG_ABORT_TAG
;
4885 case MPT_CLEAR_TASK_SET
:
4886 inot
->message_args
[0] = MSG_CLEAR_TASK_SET
;
4888 case MPT_TARGET_RESET
:
4889 inot
->message_args
[0] = MSG_TARGET_RESET
;
4892 inot
->message_args
[0] = MSG_CLEAR_ACA
;
4894 case MPT_TERMINATE_TASK
:
4895 inot
->message_args
[0] = MSG_ABORT_TAG
;
4898 inot
->message_args
[0] = MSG_NOOP
;
4901 tgt
->ccb
= (union ccb
*) inot
;
4902 inot
->ccb_h
.status
= CAM_MESSAGE_RECV
|CAM_DEV_QFRZN
;
4903 xpt_done((union ccb
*)inot
);
4907 mpt_scsi_tgt_atio(struct mpt_softc
*mpt
, request_t
*req
, uint32_t reply_desc
)
4909 static uint8_t null_iqd
[SHORT_INQUIRY_LENGTH
] = {
4910 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4911 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4912 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4913 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4916 struct ccb_accept_tio
*atiop
;
4919 mpt_tgt_state_t
*tgt
;
4920 tgt_resource_t
*trtp
= NULL
;
4925 mpt_task_mgmt_t fct
= MPT_NIL_TMT_VALUE
;
4929 * Stash info for the current command where we can get at it later.
4931 vbuf
= req
->req_vbuf
;
4932 vbuf
+= MPT_RQSL(mpt
);
4935 * Get our state pointer set up.
4937 tgt
= MPT_TGT_STATE(mpt
, req
);
4938 if (tgt
->state
!= TGT_STATE_LOADED
) {
4939 mpt_tgt_dump_req_state(mpt
, req
);
4940 panic("bad target state in mpt_scsi_tgt_atio");
4942 memset(tgt
, 0, sizeof (mpt_tgt_state_t
));
4943 tgt
->state
= TGT_STATE_IN_CAM
;
4944 tgt
->reply_desc
= reply_desc
;
4945 ioindex
= GET_IO_INDEX(reply_desc
);
4946 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
4947 mpt_dump_data(mpt
, "mpt_scsi_tgt_atio response", vbuf
,
4948 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER
),
4949 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER
),
4950 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER
))));
4953 PTR_MPI_TARGET_FCP_CMD_BUFFER fc
;
4954 fc
= (PTR_MPI_TARGET_FCP_CMD_BUFFER
) vbuf
;
4955 if (fc
->FcpCntl
[2]) {
4957 * Task Management Request
4959 switch (fc
->FcpCntl
[2]) {
4961 fct
= MPT_ABORT_TASK_SET
;
4964 fct
= MPT_CLEAR_TASK_SET
;
4967 fct
= MPT_TARGET_RESET
;
4970 fct
= MPT_CLEAR_ACA
;
4973 fct
= MPT_TERMINATE_TASK
;
4976 mpt_prt(mpt
, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4978 mpt_scsi_tgt_status(mpt
, 0, req
,
4983 switch (fc
->FcpCntl
[1]) {
4985 tag_action
= MSG_SIMPLE_Q_TAG
;
4988 tag_action
= MSG_HEAD_OF_Q_TAG
;
4991 tag_action
= MSG_ORDERED_Q_TAG
;
4995 * Bah. Ignore Untagged Queing and ACA
4997 tag_action
= MSG_SIMPLE_Q_TAG
;
5001 tgt
->resid
= be32toh(fc
->FcpDl
);
5003 lunptr
= fc
->FcpLun
;
5004 itag
= be16toh(fc
->OptionalOxid
);
5005 } else if (mpt
->is_sas
) {
5006 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
;
5007 ssp
= (PTR_MPI_TARGET_SSP_CMD_BUFFER
) vbuf
;
5009 lunptr
= ssp
->LogicalUnitNumber
;
5010 itag
= ssp
->InitiatorTag
;
5012 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
;
5013 sp
= (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER
) vbuf
;
5015 lunptr
= sp
->LogicalUnitNumber
;
5020 * Generate a simple lun
5022 switch (lunptr
[0] & 0xc0) {
5024 lun
= ((lunptr
[0] & 0x3f) << 8) | lunptr
[1];
5030 mpt_lprt(mpt
, MPT_PRT_ERROR
, "cannot handle this type lun\n");
5036 * Deal with non-enabled or bad luns here.
5038 if (lun
>= MPT_MAX_LUNS
|| mpt
->tenabled
== 0 ||
5039 mpt
->trt
[lun
].enabled
== 0) {
5040 if (mpt
->twildcard
) {
5041 trtp
= &mpt
->trt_wildcard
;
5042 } else if (fct
== MPT_NIL_TMT_VALUE
) {
5044 * In this case, we haven't got an upstream listener
5045 * for either a specific lun or wildcard luns. We
5046 * have to make some sensible response. For regular
5047 * inquiry, just return some NOT HERE inquiry data.
5048 * For VPD inquiry, report illegal field in cdb.
5049 * For REQUEST SENSE, just return NO SENSE data.
5050 * REPORT LUNS gets illegal command.
5051 * All other commands get 'no such device'.
5053 uint8_t *sp
, cond
, buf
[MPT_SENSE_SIZE
];
5056 memset(buf
, 0, MPT_SENSE_SIZE
);
5057 cond
= SCSI_STATUS_CHECK_COND
;
5062 tgt
->tag_id
= MPT_MAKE_TAGID(mpt
, req
, ioindex
);
5072 len
= min(tgt
->resid
, cdbp
[4]);
5073 len
= min(len
, sizeof (null_iqd
));
5074 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5075 "local inquiry %ld bytes\n", (long) len
);
5076 mpt_scsi_tgt_local(mpt
, req
, lun
, 1,
5083 len
= min(tgt
->resid
, cdbp
[4]);
5084 len
= min(len
, sizeof (buf
));
5085 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5086 "local reqsense %ld bytes\n", (long) len
);
5087 mpt_scsi_tgt_local(mpt
, req
, lun
, 1,
5092 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "REPORT LUNS\n");
5096 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5097 "CMD 0x%x to unmanaged lun %u\n",
5102 mpt_scsi_tgt_status(mpt
, NULL
, req
, cond
, sp
);
5105 /* otherwise, leave trtp NULL */
5107 trtp
= &mpt
->trt
[lun
];
5111 * Deal with any task management
5113 if (fct
!= MPT_NIL_TMT_VALUE
) {
5115 mpt_prt(mpt
, "task mgmt function %x but no listener\n",
5117 mpt_scsi_tgt_status(mpt
, 0, req
,
5120 mpt_scsi_tgt_tsk_mgmt(mpt
, req
, fct
, trtp
,
5121 GET_INITIATOR_INDEX(reply_desc
));
5127 atiop
= (struct ccb_accept_tio
*) STAILQ_FIRST(&trtp
->atios
);
5128 if (atiop
== NULL
) {
5129 mpt_lprt(mpt
, MPT_PRT_WARN
,
5130 "no ATIOs for lun %u- sending back %s\n", lun
,
5131 mpt
->tenabled
? "QUEUE FULL" : "BUSY");
5132 mpt_scsi_tgt_status(mpt
, NULL
, req
,
5133 mpt
->tenabled
? SCSI_STATUS_QUEUE_FULL
: SCSI_STATUS_BUSY
,
5137 STAILQ_REMOVE_HEAD(&trtp
->atios
, sim_links
.stqe
);
5138 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
5139 "Get FREE ATIO %p lun %d\n", atiop
, atiop
->ccb_h
.target_lun
);
5140 atiop
->ccb_h
.ccb_mpt_ptr
= mpt
;
5141 atiop
->ccb_h
.status
= CAM_CDB_RECVD
;
5142 atiop
->ccb_h
.target_lun
= lun
;
5143 atiop
->sense_len
= 0;
5144 atiop
->init_id
= GET_INITIATOR_INDEX(reply_desc
);
5145 atiop
->cdb_len
= mpt_cdblen(cdbp
[0], 16);
5146 memcpy(atiop
->cdb_io
.cdb_bytes
, cdbp
, atiop
->cdb_len
);
5149 * The tag we construct here allows us to find the
5150 * original request that the command came in with.
5152 * This way we don't have to depend on anything but the
5153 * tag to find things when CCBs show back up from CAM.
5155 atiop
->tag_id
= MPT_MAKE_TAGID(mpt
, req
, ioindex
);
5156 tgt
->tag_id
= atiop
->tag_id
;
5158 atiop
->tag_action
= tag_action
;
5159 atiop
->ccb_h
.flags
= CAM_TAG_ACTION_VALID
;
5161 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
5163 mpt_prt(mpt
, "START_CCB %p for lun %u CDB=<", atiop
,
5164 atiop
->ccb_h
.target_lun
);
5165 for (i
= 0; i
< atiop
->cdb_len
; i
++) {
5166 mpt_prtc(mpt
, "%02x%c", cdbp
[i
] & 0xff,
5167 (i
== (atiop
->cdb_len
- 1))? '>' : ' ');
5169 mpt_prtc(mpt
, " itag %x tag %x rdesc %x dl=%u\n",
5170 itag
, atiop
->tag_id
, tgt
->reply_desc
, tgt
->resid
);
5173 xpt_done((union ccb
*)atiop
);
5177 mpt_tgt_dump_tgt_state(struct mpt_softc
*mpt
, request_t
*req
)
5179 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, req
);
5181 mpt_prt(mpt
, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5182 "nx %d tag 0x%08x state=%d\n", req
, req
->serno
, tgt
->reply_desc
,
5183 tgt
->resid
, tgt
->bytes_xfered
, tgt
->ccb
, tgt
->req
, tgt
->nxfers
,
5184 tgt
->tag_id
, tgt
->state
);
5188 mpt_tgt_dump_req_state(struct mpt_softc
*mpt
, request_t
*req
)
5191 mpt_prt(mpt
, "req %p:%u index %u (%x) state %x\n", req
, req
->serno
,
5192 req
->index
, req
->index
, req
->state
);
5193 mpt_tgt_dump_tgt_state(mpt
, req
);
5197 mpt_scsi_tgt_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
5198 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
5204 if (reply_frame
== NULL
) {
5206 * Figure out what the state of the command is.
5208 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, req
);
5211 mpt_req_spcl(mpt
, req
, "turbo scsi_tgt_reply", __LINE__
);
5213 mpt_req_not_spcl(mpt
, tgt
->req
,
5214 "turbo scsi_tgt_reply associated req", __LINE__
);
5217 switch(tgt
->state
) {
5218 case TGT_STATE_LOADED
:
5220 * This is a new command starting.
5222 mpt_scsi_tgt_atio(mpt
, req
, reply_desc
);
5224 case TGT_STATE_MOVING_DATA
:
5226 uint8_t *sp
= NULL
, sense
[MPT_SENSE_SIZE
];
5229 if (tgt
->req
== NULL
) {
5230 panic("mpt: turbo target reply with null "
5231 "associated request moving data");
5235 if (tgt
->is_local
== 0) {
5236 panic("mpt: turbo target reply with "
5237 "null associated ccb moving data");
5240 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5241 "TARGET_ASSIST local done\n");
5242 TAILQ_REMOVE(&mpt
->request_pending_list
,
5244 mpt_free_request(mpt
, tgt
->req
);
5246 mpt_scsi_tgt_status(mpt
, NULL
, req
,
5252 mpt_req_untimeout(req
, mpt_timeout
, ccb
);
5253 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5254 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5255 ccb
, tgt
->req
, tgt
->req
->serno
, ccb
->csio
.tag_id
);
5257 * Free the Target Assist Request
5259 KASSERT(tgt
->req
->ccb
== ccb
,
5260 ("tgt->req %p:%u tgt->req->ccb %p", tgt
->req
,
5261 tgt
->req
->serno
, tgt
->req
->ccb
));
5262 TAILQ_REMOVE(&mpt
->request_pending_list
,
5264 mpt_free_request(mpt
, tgt
->req
);
5268 * Do we need to send status now? That is, are
5269 * we done with all our data transfers?
5271 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) == 0) {
5272 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
5273 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
5274 KASSERT(ccb
->ccb_h
.status
,
5275 ("zero ccb sts at %d", __LINE__
));
5276 tgt
->state
= TGT_STATE_IN_CAM
;
5277 if (mpt
->outofbeer
) {
5278 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
5280 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "THAWQ\n");
5286 * Otherwise, send status (and sense)
5288 if (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) {
5290 memcpy(sp
, &ccb
->csio
.sense_data
,
5291 min(ccb
->csio
.sense_len
, MPT_SENSE_SIZE
));
5293 mpt_scsi_tgt_status(mpt
, ccb
, req
,
5294 ccb
->csio
.scsi_status
, sp
);
5297 case TGT_STATE_SENDING_STATUS
:
5298 case TGT_STATE_MOVING_DATA_AND_STATUS
:
5303 if (tgt
->req
== NULL
) {
5304 panic("mpt: turbo target reply with null "
5305 "associated request sending status");
5312 TGT_STATE_MOVING_DATA_AND_STATUS
) {
5315 mpt_req_untimeout(req
, mpt_timeout
, ccb
);
5316 if (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) {
5317 ccb
->ccb_h
.status
|= CAM_SENT_SENSE
;
5319 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5320 "TARGET_STATUS tag %x sts %x flgs %x req "
5321 "%p\n", ccb
->csio
.tag_id
, ccb
->ccb_h
.status
,
5322 ccb
->ccb_h
.flags
, tgt
->req
);
5324 * Free the Target Send Status Request
5326 KASSERT(tgt
->req
->ccb
== ccb
,
5327 ("tgt->req %p:%u tgt->req->ccb %p",
5328 tgt
->req
, tgt
->req
->serno
, tgt
->req
->ccb
));
5330 * Notify CAM that we're done
5332 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
5333 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
5334 KASSERT(ccb
->ccb_h
.status
,
5335 ("ZERO ccb sts at %d", __LINE__
));
5338 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5339 "TARGET_STATUS non-CAM for req %p:%u\n",
5340 tgt
->req
, tgt
->req
->serno
);
5342 TAILQ_REMOVE(&mpt
->request_pending_list
,
5344 mpt_free_request(mpt
, tgt
->req
);
5348 * And re-post the Command Buffer.
5349 * This will reset the state.
5351 ioindex
= GET_IO_INDEX(reply_desc
);
5352 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5354 mpt_post_target_command(mpt
, req
, ioindex
);
5357 * And post a done for anyone who cares
5360 if (mpt
->outofbeer
) {
5361 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
5363 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "THAWQ\n");
5369 case TGT_STATE_NIL
: /* XXX This Never Happens XXX */
5370 tgt
->state
= TGT_STATE_LOADED
;
5373 mpt_prt(mpt
, "Unknown Target State 0x%x in Context "
5374 "Reply Function\n", tgt
->state
);
5379 status
= le16toh(reply_frame
->IOCStatus
);
5380 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
5381 dbg
= MPT_PRT_ERROR
;
5383 dbg
= MPT_PRT_DEBUG1
;
5387 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5388 req
, req
->serno
, reply_frame
, reply_frame
->Function
, status
);
5390 switch (reply_frame
->Function
) {
5391 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST
:
5393 mpt_tgt_state_t
*tgt
;
5395 mpt_req_spcl(mpt
, req
, "tgt reply BUFFER POST", __LINE__
);
5397 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
5403 tgt
= MPT_TGT_STATE(mpt
, req
);
5404 KASSERT(tgt
->state
== TGT_STATE_LOADING
,
5405 ("bad state 0x%x on reply to buffer post", tgt
->state
));
5406 mpt_assign_serno(mpt
, req
);
5407 tgt
->state
= TGT_STATE_LOADED
;
5410 case MPI_FUNCTION_TARGET_ASSIST
:
5412 mpt_req_not_spcl(mpt
, req
, "tgt reply TARGET ASSIST", __LINE__
);
5414 mpt_prt(mpt
, "target assist completion\n");
5415 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5416 mpt_free_request(mpt
, req
);
5418 case MPI_FUNCTION_TARGET_STATUS_SEND
:
5420 mpt_req_not_spcl(mpt
, req
, "tgt reply STATUS SEND", __LINE__
);
5422 mpt_prt(mpt
, "status send completion\n");
5423 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5424 mpt_free_request(mpt
, req
);
5426 case MPI_FUNCTION_TARGET_MODE_ABORT
:
5428 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp
=
5429 (PTR_MSG_TARGET_MODE_ABORT_REPLY
) reply_frame
;
5430 PTR_MSG_TARGET_MODE_ABORT abtp
=
5431 (PTR_MSG_TARGET_MODE_ABORT
) req
->req_vbuf
;
5432 uint32_t cc
= GET_IO_INDEX(le32toh(abtp
->ReplyWord
));
5434 mpt_req_not_spcl(mpt
, req
, "tgt reply TMODE ABORT", __LINE__
);
5436 mpt_prt(mpt
, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5437 cc
, le16toh(abtrp
->IOCStatus
), le32toh(abtrp
->AbortCount
));
5438 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5439 mpt_free_request(mpt
, req
);
5443 mpt_prt(mpt
, "Unknown Target Address Reply Function code: "
5444 "0x%x\n", reply_frame
->Function
);