2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.68 2009/07/02 00:43:10 delphij Exp $
97 #include <sys/cdefs.h>
99 #include <dev/disk/mpt/mpt.h>
100 #include <dev/disk/mpt/mpt_cam.h>
101 #include <dev/disk/mpt/mpt_raid.h>
103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/disk/mpt/mpilib/mpi_init.h"
105 #include "dev/disk/mpt/mpilib/mpi_targ.h"
106 #include "dev/disk/mpt/mpilib/mpi_fc.h"
107 #include "dev/disk/mpt/mpilib/mpi_sas.h"
108 #if __FreeBSD_version >= 500000
109 #include <sys/sysctl.h>
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
114 #if __FreeBSD_version >= 700025 || defined(__DragonFly__)
115 #ifndef CAM_NEW_TRAN_CODE
116 #define CAM_NEW_TRAN_CODE 1
120 static void mpt_poll(struct cam_sim
*);
121 static timeout_t mpt_timeout
;
122 static void mpt_action(struct cam_sim
*, union ccb
*);
124 mpt_get_spi_settings(struct mpt_softc
*, struct ccb_trans_settings
*);
125 static void mpt_setwidth(struct mpt_softc
*, int, int);
126 static void mpt_setsync(struct mpt_softc
*, int, int, int);
127 static int mpt_update_spi_config(struct mpt_softc
*, int);
128 static void mpt_calc_geometry(struct ccb_calc_geometry
*ccg
, int extended
);
130 static mpt_reply_handler_t mpt_scsi_reply_handler
;
131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler
;
132 static mpt_reply_handler_t mpt_fc_els_reply_handler
;
133 static int mpt_scsi_reply_frame_handler(struct mpt_softc
*, request_t
*,
134 MSG_DEFAULT_REPLY
*);
135 static int mpt_bus_reset(struct mpt_softc
*, target_id_t
, lun_id_t
, int);
136 static int mpt_fc_reset_link(struct mpt_softc
*, int);
138 static int mpt_spawn_recovery_thread(struct mpt_softc
*mpt
);
139 static void mpt_terminate_recovery_thread(struct mpt_softc
*mpt
);
140 static void mpt_recovery_thread(void *arg
);
141 static void mpt_recover_commands(struct mpt_softc
*mpt
);
143 static int mpt_scsi_send_tmf(struct mpt_softc
*, u_int
, u_int
, u_int
,
144 u_int
, u_int
, u_int
, int);
146 static void mpt_fc_post_els(struct mpt_softc
*mpt
, request_t
*, int);
147 static void mpt_post_target_command(struct mpt_softc
*, request_t
*, int);
148 static int mpt_add_els_buffers(struct mpt_softc
*mpt
);
149 static int mpt_add_target_commands(struct mpt_softc
*mpt
);
150 static int mpt_enable_lun(struct mpt_softc
*, target_id_t
, lun_id_t
);
151 static int mpt_disable_lun(struct mpt_softc
*, target_id_t
, lun_id_t
);
152 static void mpt_target_start_io(struct mpt_softc
*, union ccb
*);
153 static cam_status
mpt_abort_target_ccb(struct mpt_softc
*, union ccb
*);
154 static int mpt_abort_target_cmd(struct mpt_softc
*, request_t
*);
155 static void mpt_scsi_tgt_status(struct mpt_softc
*, union ccb
*, request_t
*,
156 uint8_t, uint8_t const *);
158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc
*, request_t
*, mpt_task_mgmt_t
,
159 tgt_resource_t
*, int);
160 static void mpt_tgt_dump_tgt_state(struct mpt_softc
*, request_t
*);
161 static void mpt_tgt_dump_req_state(struct mpt_softc
*, request_t
*);
162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler
;
163 static mpt_reply_handler_t mpt_sata_pass_reply_handler
;
165 static uint32_t scsi_io_handler_id
= MPT_HANDLER_ID_NONE
;
166 static uint32_t scsi_tmf_handler_id
= MPT_HANDLER_ID_NONE
;
167 static uint32_t fc_els_handler_id
= MPT_HANDLER_ID_NONE
;
168 static uint32_t sata_pass_handler_id
= MPT_HANDLER_ID_NONE
;
170 static mpt_probe_handler_t mpt_cam_probe
;
171 static mpt_attach_handler_t mpt_cam_attach
;
172 static mpt_enable_handler_t mpt_cam_enable
;
173 static mpt_ready_handler_t mpt_cam_ready
;
174 static mpt_event_handler_t mpt_cam_event
;
175 static mpt_reset_handler_t mpt_cam_ioc_reset
;
176 static mpt_detach_handler_t mpt_cam_detach
;
178 static struct mpt_personality mpt_cam_personality
=
181 .probe
= mpt_cam_probe
,
182 .attach
= mpt_cam_attach
,
183 .enable
= mpt_cam_enable
,
184 .ready
= mpt_cam_ready
,
185 .event
= mpt_cam_event
,
186 .reset
= mpt_cam_ioc_reset
,
187 .detach
= mpt_cam_detach
,
190 DECLARE_MPT_PERSONALITY(mpt_cam
, SI_ORDER_SECOND
);
191 MODULE_DEPEND(mpt_cam
, cam
, 1, 1, 1);
193 int mpt_enable_sata_wc
= -1;
194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc
);
197 mpt_cam_probe(struct mpt_softc
*mpt
)
202 * Only attach to nodes that support the initiator or target role
203 * (or want to) or have RAID physical devices that need CAM pass-thru
206 if (mpt
->do_cfg_role
) {
207 role
= mpt
->cfg_role
;
211 if ((role
& (MPT_ROLE_TARGET
|MPT_ROLE_INITIATOR
)) != 0 ||
212 (mpt
->ioc_page2
!= NULL
&& mpt
->ioc_page2
->MaxPhysDisks
!= 0)) {
219 mpt_cam_attach(struct mpt_softc
*mpt
)
221 struct cam_devq
*devq
;
222 mpt_handler_t handler
;
227 TAILQ_INIT(&mpt
->request_timeout_list
);
228 maxq
= (mpt
->ioc_facts
.GlobalCredits
< MPT_MAX_REQUESTS(mpt
))?
229 mpt
->ioc_facts
.GlobalCredits
: MPT_MAX_REQUESTS(mpt
);
231 handler
.reply_handler
= mpt_scsi_reply_handler
;
232 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
233 &scsi_io_handler_id
);
239 handler
.reply_handler
= mpt_scsi_tmf_reply_handler
;
240 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
241 &scsi_tmf_handler_id
);
248 * If we're fibre channel and could support target mode, we register
249 * an ELS reply handler and give it resources.
251 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
) != 0) {
252 handler
.reply_handler
= mpt_fc_els_reply_handler
;
253 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
259 if (mpt_add_els_buffers(mpt
) == FALSE
) {
264 maxq
-= mpt
->els_cmds_allocated
;
268 * If we support target mode, we register a reply handler for it,
269 * but don't add command resources until we actually enable target
272 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
) != 0) {
273 handler
.reply_handler
= mpt_scsi_tgt_reply_handler
;
274 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
275 &mpt
->scsi_tgt_handler_id
);
283 handler
.reply_handler
= mpt_sata_pass_reply_handler
;
284 error
= mpt_register_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
285 &sata_pass_handler_id
);
293 * We keep one request reserved for timeout TMF requests.
295 mpt
->tmf_req
= mpt_get_request(mpt
, FALSE
);
296 if (mpt
->tmf_req
== NULL
) {
297 mpt_prt(mpt
, "Unable to allocate dedicated TMF request!\n");
304 * Mark the request as free even though not on the free list.
305 * There is only one TMF request allowed to be outstanding at
306 * a time and the TMF routines perform their own allocation
307 * tracking using the standard state flags.
309 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
313 * The rest of this is CAM foo, for which we need to drop our lock
317 if (mpt_spawn_recovery_thread(mpt
) != 0) {
318 mpt_prt(mpt
, "Unable to spawn recovery thread!\n");
324 * Create the device queue for our SIM(s).
326 devq
= cam_simq_alloc(maxq
);
328 mpt_prt(mpt
, "Unable to allocate CAM SIMQ!\n");
334 * Construct our SIM entry.
337 mpt_sim_alloc(mpt_action
, mpt_poll
, "mpt", mpt
, 1, maxq
, devq
);
338 if (mpt
->sim
== NULL
) {
339 mpt_prt(mpt
, "Unable to allocate CAM SIM!\n");
340 cam_devq_release(devq
);
346 * Register exactly this bus.
349 if (mpt_xpt_bus_register(mpt
->sim
, mpt
->dev
, 0) != CAM_SUCCESS
) {
350 mpt_prt(mpt
, "Bus registration Failed!\n");
356 if (xpt_create_path(&mpt
->path
, NULL
, cam_sim_path(mpt
->sim
),
357 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
358 mpt_prt(mpt
, "Unable to allocate Path!\n");
366 * Only register a second bus for RAID physical
367 * devices if the controller supports RAID.
369 if (mpt
->ioc_page2
== NULL
|| mpt
->ioc_page2
->MaxPhysDisks
== 0) {
374 * Create a "bus" to export all hidden disks to CAM.
377 mpt_sim_alloc(mpt_action
, mpt_poll
, "mpt", mpt
, 1, maxq
, devq
);
378 if (mpt
->phydisk_sim
== NULL
) {
379 mpt_prt(mpt
, "Unable to allocate Physical Disk CAM SIM!\n");
388 if (mpt_xpt_bus_register(mpt
->phydisk_sim
, mpt
->dev
, 1) !=
390 mpt_prt(mpt
, "Physical Disk Bus registration Failed!\n");
396 if (xpt_create_path(&mpt
->phydisk_path
, NULL
,
397 cam_sim_path(mpt
->phydisk_sim
),
398 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
399 mpt_prt(mpt
, "Unable to allocate Physical Disk Path!\n");
405 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "attached cam\n");
414 * Read FC configuration information
417 mpt_read_config_info_fc(struct mpt_softc
*mpt
)
419 char *topology
= NULL
;
422 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_FC_PORT
, 0,
423 0, &mpt
->mpt_fcport_page0
.Header
, FALSE
, 5000);
427 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FC Port Page 0 Header: %x %x %x %x\n",
428 mpt
->mpt_fcport_page0
.Header
.PageVersion
,
429 mpt
->mpt_fcport_page0
.Header
.PageLength
,
430 mpt
->mpt_fcport_page0
.Header
.PageNumber
,
431 mpt
->mpt_fcport_page0
.Header
.PageType
);
434 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_fcport_page0
.Header
,
435 sizeof(mpt
->mpt_fcport_page0
), FALSE
, 5000);
437 mpt_prt(mpt
, "failed to read FC Port Page 0\n");
440 mpt2host_config_page_fc_port_0(&mpt
->mpt_fcport_page0
);
442 mpt
->mpt_fcport_speed
= mpt
->mpt_fcport_page0
.CurrentSpeed
;
444 switch (mpt
->mpt_fcport_page0
.Flags
&
445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK
) {
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT
:
447 mpt
->mpt_fcport_speed
= 0;
448 topology
= "<NO LOOP>";
450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT
:
453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP
:
454 topology
= "NL-Port";
456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT
:
459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP
:
460 topology
= "FL-Port";
463 mpt
->mpt_fcport_speed
= 0;
468 mpt_lprt(mpt
, MPT_PRT_INFO
,
469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470 "Speed %u-Gbit\n", topology
,
471 (unsigned)mpt
->mpt_fcport_page0
.WWNN
.High
,
472 (unsigned)mpt
->mpt_fcport_page0
.WWNN
.Low
,
473 (unsigned)mpt
->mpt_fcport_page0
.WWPN
.High
,
474 (unsigned)mpt
->mpt_fcport_page0
.WWPN
.Low
,
475 (unsigned)mpt
->mpt_fcport_speed
);
476 #if __FreeBSD_version >= 500000
479 struct sysctl_ctx_list
*ctx
= device_get_sysctl_ctx(mpt
->dev
);
480 struct sysctl_oid
*tree
= device_get_sysctl_tree(mpt
->dev
);
482 snprintf(mpt
->scinfo
.fc
.wwnn
,
483 sizeof (mpt
->scinfo
.fc
.wwnn
), "0x%08x%08x",
484 mpt
->mpt_fcport_page0
.WWNN
.High
,
485 mpt
->mpt_fcport_page0
.WWNN
.Low
);
487 snprintf(mpt
->scinfo
.fc
.wwpn
,
488 sizeof (mpt
->scinfo
.fc
.wwpn
), "0x%08x%08x",
489 mpt
->mpt_fcport_page0
.WWPN
.High
,
490 mpt
->mpt_fcport_page0
.WWPN
.Low
);
492 SYSCTL_ADD_STRING(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
493 "wwnn", CTLFLAG_RD
, mpt
->scinfo
.fc
.wwnn
, 0,
494 "World Wide Node Name");
496 SYSCTL_ADD_STRING(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
497 "wwpn", CTLFLAG_RD
, mpt
->scinfo
.fc
.wwpn
, 0,
498 "World Wide Port Name");
507 * Set FC configuration information.
510 mpt_set_initial_config_fc(struct mpt_softc
*mpt
)
513 CONFIG_PAGE_FC_PORT_1 fc
;
518 r
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_FC_PORT
, 1, 0,
519 &fc
.Header
, FALSE
, 5000);
521 mpt_prt(mpt
, "failed to read FC page 1 header\n");
522 return (mpt_fc_reset_link(mpt
, 1));
525 r
= mpt_read_cfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_NVRAM
, 0,
526 &fc
.Header
, sizeof (fc
), FALSE
, 5000);
528 mpt_prt(mpt
, "failed to read FC page 1\n");
529 return (mpt_fc_reset_link(mpt
, 1));
531 mpt2host_config_page_fc_port_1(&fc
);
534 * Check our flags to make sure we support the role we want.
540 if (fl
& MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT
) {
541 role
|= MPT_ROLE_INITIATOR
;
543 if (fl
& MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG
) {
544 role
|= MPT_ROLE_TARGET
;
547 fl
&= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK
;
549 if (mpt
->do_cfg_role
== 0) {
550 role
= mpt
->cfg_role
;
552 mpt
->do_cfg_role
= 0;
555 if (role
!= mpt
->cfg_role
) {
556 if (mpt
->cfg_role
& MPT_ROLE_INITIATOR
) {
557 if ((role
& MPT_ROLE_INITIATOR
) == 0) {
558 mpt_prt(mpt
, "adding initiator role\n");
559 fl
|= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT
;
562 mpt_prt(mpt
, "keeping initiator role\n");
564 } else if (role
& MPT_ROLE_INITIATOR
) {
565 mpt_prt(mpt
, "removing initiator role\n");
568 if (mpt
->cfg_role
& MPT_ROLE_TARGET
) {
569 if ((role
& MPT_ROLE_TARGET
) == 0) {
570 mpt_prt(mpt
, "adding target role\n");
571 fl
|= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG
;
574 mpt_prt(mpt
, "keeping target role\n");
576 } else if (role
& MPT_ROLE_TARGET
) {
577 mpt_prt(mpt
, "removing target role\n");
580 mpt
->role
= mpt
->cfg_role
;
583 if (fl
& MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG
) {
584 if ((fl
& MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID
) == 0) {
585 mpt_prt(mpt
, "adding OXID option\n");
586 fl
|= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID
;
593 host2mpt_config_page_fc_port_1(&fc
);
594 r
= mpt_write_cfg_page(mpt
,
595 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM
, 0, &fc
.Header
,
596 sizeof(fc
), FALSE
, 5000);
598 mpt_prt(mpt
, "failed to update NVRAM with changes\n");
601 mpt_prt(mpt
, "NOTE: NVRAM changes will not take "
602 "effect until next reboot or IOC reset\n");
608 mptsas_sas_io_unit_pg0(struct mpt_softc
*mpt
, struct mptsas_portinfo
*portinfo
)
610 ConfigExtendedPageHeader_t hdr
;
611 struct mptsas_phyinfo
*phyinfo
;
612 SasIOUnitPage0_t
*buffer
;
615 error
= mpt_read_extcfg_header(mpt
, MPI_SASIOUNITPAGE0_PAGEVERSION
,
616 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT
,
620 if (hdr
.ExtPageLength
== 0) {
625 len
= hdr
.ExtPageLength
* 4;
626 buffer
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
|M_ZERO
);
627 if (buffer
== NULL
) {
632 error
= mpt_read_extcfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_CURRENT
,
633 0, &hdr
, buffer
, len
, 0, 10000);
635 kfree(buffer
, M_DEVBUF
);
639 portinfo
->num_phys
= buffer
->NumPhys
;
640 portinfo
->phy_info
= kmalloc(sizeof(*portinfo
->phy_info
) *
641 portinfo
->num_phys
, M_DEVBUF
, M_NOWAIT
|M_ZERO
);
642 if (portinfo
->phy_info
== NULL
) {
643 kfree(buffer
, M_DEVBUF
);
648 for (i
= 0; i
< portinfo
->num_phys
; i
++) {
649 phyinfo
= &portinfo
->phy_info
[i
];
650 phyinfo
->phy_num
= i
;
651 phyinfo
->port_id
= buffer
->PhyData
[i
].Port
;
652 phyinfo
->negotiated_link_rate
=
653 buffer
->PhyData
[i
].NegotiatedLinkRate
;
655 le16toh(buffer
->PhyData
[i
].ControllerDevHandle
);
658 kfree(buffer
, M_DEVBUF
);
664 mptsas_sas_phy_pg0(struct mpt_softc
*mpt
, struct mptsas_phyinfo
*phy_info
,
665 uint32_t form
, uint32_t form_specific
)
667 ConfigExtendedPageHeader_t hdr
;
668 SasPhyPage0_t
*buffer
;
671 error
= mpt_read_extcfg_header(mpt
, MPI_SASPHY0_PAGEVERSION
, 0, 0,
672 MPI_CONFIG_EXTPAGETYPE_SAS_PHY
, &hdr
,
676 if (hdr
.ExtPageLength
== 0) {
681 buffer
= kmalloc(sizeof(SasPhyPage0_t
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
682 if (buffer
== NULL
) {
687 error
= mpt_read_extcfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_CURRENT
,
688 form
+ form_specific
, &hdr
, buffer
,
689 sizeof(SasPhyPage0_t
), 0, 10000);
691 kfree(buffer
, M_DEVBUF
);
695 phy_info
->hw_link_rate
= buffer
->HwLinkRate
;
696 phy_info
->programmed_link_rate
= buffer
->ProgrammedLinkRate
;
697 phy_info
->identify
.dev_handle
= le16toh(buffer
->OwnerDevHandle
);
698 phy_info
->attached
.dev_handle
= le16toh(buffer
->AttachedDevHandle
);
700 kfree(buffer
, M_DEVBUF
);
706 mptsas_sas_device_pg0(struct mpt_softc
*mpt
, struct mptsas_devinfo
*device_info
,
707 uint32_t form
, uint32_t form_specific
)
709 ConfigExtendedPageHeader_t hdr
;
710 SasDevicePage0_t
*buffer
;
711 uint64_t sas_address
;
714 bzero(device_info
, sizeof(*device_info
));
715 error
= mpt_read_extcfg_header(mpt
, MPI_SASDEVICE0_PAGEVERSION
, 0, 0,
716 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE
,
720 if (hdr
.ExtPageLength
== 0) {
725 buffer
= kmalloc(sizeof(SasDevicePage0_t
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
726 if (buffer
== NULL
) {
731 error
= mpt_read_extcfg_page(mpt
, MPI_CONFIG_ACTION_PAGE_READ_CURRENT
,
732 form
+ form_specific
, &hdr
, buffer
,
733 sizeof(SasDevicePage0_t
), 0, 10000);
735 kfree(buffer
, M_DEVBUF
);
739 device_info
->dev_handle
= le16toh(buffer
->DevHandle
);
740 device_info
->parent_dev_handle
= le16toh(buffer
->ParentDevHandle
);
741 device_info
->enclosure_handle
= le16toh(buffer
->EnclosureHandle
);
742 device_info
->slot
= le16toh(buffer
->Slot
);
743 device_info
->phy_num
= buffer
->PhyNum
;
744 device_info
->physical_port
= buffer
->PhysicalPort
;
745 device_info
->target_id
= buffer
->TargetID
;
746 device_info
->bus
= buffer
->Bus
;
747 bcopy(&buffer
->SASAddress
, &sas_address
, sizeof(uint64_t));
748 device_info
->sas_address
= le64toh(sas_address
);
749 device_info
->device_info
= le32toh(buffer
->DeviceInfo
);
751 kfree(buffer
, M_DEVBUF
);
757 * Read SAS configuration information. Nothing to do yet.
760 mpt_read_config_info_sas(struct mpt_softc
*mpt
)
762 struct mptsas_portinfo
*portinfo
;
763 struct mptsas_phyinfo
*phyinfo
;
766 portinfo
= kmalloc(sizeof(*portinfo
), M_DEVBUF
, M_NOWAIT
|M_ZERO
);
767 if (portinfo
== NULL
)
770 error
= mptsas_sas_io_unit_pg0(mpt
, portinfo
);
772 kfree(portinfo
, M_DEVBUF
);
776 for (i
= 0; i
< portinfo
->num_phys
; i
++) {
777 phyinfo
= &portinfo
->phy_info
[i
];
778 error
= mptsas_sas_phy_pg0(mpt
, phyinfo
,
779 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER
<<
780 MPI_SAS_PHY_PGAD_FORM_SHIFT
), i
);
783 error
= mptsas_sas_device_pg0(mpt
, &phyinfo
->identify
,
784 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE
<<
785 MPI_SAS_DEVICE_PGAD_FORM_SHIFT
),
789 phyinfo
->identify
.phy_num
= phyinfo
->phy_num
= i
;
790 if (phyinfo
->attached
.dev_handle
)
791 error
= mptsas_sas_device_pg0(mpt
,
793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE
<<
794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT
),
795 phyinfo
->attached
.dev_handle
);
799 mpt
->sas_portinfo
= portinfo
;
804 mptsas_set_sata_wc(struct mpt_softc
*mpt
, struct mptsas_devinfo
*devinfo
,
807 SataPassthroughRequest_t
*pass
;
811 req
= mpt_get_request(mpt
, 0);
815 pass
= req
->req_vbuf
;
816 bzero(pass
, sizeof(SataPassthroughRequest_t
));
817 pass
->Function
= MPI_FUNCTION_SATA_PASSTHROUGH
;
818 pass
->TargetID
= devinfo
->target_id
;
819 pass
->Bus
= devinfo
->bus
;
820 pass
->PassthroughFlags
= 0;
821 pass
->ConnectionRate
= MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED
;
822 pass
->DataLength
= 0;
823 pass
->MsgContext
= htole32(req
->index
| sata_pass_handler_id
);
824 pass
->CommandFIS
[0] = 0x27;
825 pass
->CommandFIS
[1] = 0x80;
826 pass
->CommandFIS
[2] = 0xef;
827 pass
->CommandFIS
[3] = (enabled
) ? 0x02 : 0x82;
828 pass
->CommandFIS
[7] = 0x40;
829 pass
->CommandFIS
[15] = 0x08;
831 mpt_check_doorbell(mpt
);
832 mpt_send_cmd(mpt
, req
);
833 error
= mpt_wait_req(mpt
, req
, REQ_STATE_DONE
, REQ_STATE_DONE
, 0,
836 mpt_free_request(mpt
, req
);
837 kprintf("error %d sending passthrough\n", error
);
841 status
= le16toh(req
->IOCStatus
);
842 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
843 mpt_free_request(mpt
, req
);
844 kprintf("IOCSTATUS %d\n", status
);
848 mpt_free_request(mpt
, req
);
852 * Set SAS configuration information. Nothing to do yet.
855 mpt_set_initial_config_sas(struct mpt_softc
*mpt
)
857 struct mptsas_phyinfo
*phyinfo
;
860 if ((mpt_enable_sata_wc
!= -1) && (mpt
->sas_portinfo
!= NULL
)) {
861 for (i
= 0; i
< mpt
->sas_portinfo
->num_phys
; i
++) {
862 phyinfo
= &mpt
->sas_portinfo
->phy_info
[i
];
863 if (phyinfo
->attached
.dev_handle
== 0)
865 if ((phyinfo
->attached
.device_info
&
866 MPI_SAS_DEVICE_INFO_SATA_DEVICE
) == 0)
869 device_printf(mpt
->dev
,
870 "%sabling SATA WC on phy %d\n",
871 (mpt_enable_sata_wc
) ? "En" : "Dis", i
);
872 mptsas_set_sata_wc(mpt
, &phyinfo
->attached
,
881 mpt_sata_pass_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
882 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
886 if (reply_frame
!= NULL
) {
887 req
->IOCStatus
= le16toh(reply_frame
->IOCStatus
);
889 req
->state
&= ~REQ_STATE_QUEUED
;
890 req
->state
|= REQ_STATE_DONE
;
891 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
892 if ((req
->state
& REQ_STATE_NEED_WAKEUP
) != 0) {
894 } else if ((req
->state
& REQ_STATE_TIMEDOUT
) != 0) {
896 * Whew- we can free this request (late completion)
898 mpt_free_request(mpt
, req
);
906 * Read SCSI configuration information
909 mpt_read_config_info_spi(struct mpt_softc
*mpt
)
913 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_PORT
, 0, 0,
914 &mpt
->mpt_port_page0
.Header
, FALSE
, 5000);
918 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "SPI Port Page 0 Header: %x %x %x %x\n",
919 mpt
->mpt_port_page0
.Header
.PageVersion
,
920 mpt
->mpt_port_page0
.Header
.PageLength
,
921 mpt
->mpt_port_page0
.Header
.PageNumber
,
922 mpt
->mpt_port_page0
.Header
.PageType
);
924 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_PORT
, 1, 0,
925 &mpt
->mpt_port_page1
.Header
, FALSE
, 5000);
929 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "SPI Port Page 1 Header: %x %x %x %x\n",
930 mpt
->mpt_port_page1
.Header
.PageVersion
,
931 mpt
->mpt_port_page1
.Header
.PageLength
,
932 mpt
->mpt_port_page1
.Header
.PageNumber
,
933 mpt
->mpt_port_page1
.Header
.PageType
);
935 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_PORT
, 2, 0,
936 &mpt
->mpt_port_page2
.Header
, FALSE
, 5000);
940 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "SPI Port Page 2 Header: %x %x %x %x\n",
941 mpt
->mpt_port_page2
.Header
.PageVersion
,
942 mpt
->mpt_port_page2
.Header
.PageLength
,
943 mpt
->mpt_port_page2
.Header
.PageNumber
,
944 mpt
->mpt_port_page2
.Header
.PageType
);
946 for (i
= 0; i
< 16; i
++) {
947 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_DEVICE
,
948 0, i
, &mpt
->mpt_dev_page0
[i
].Header
, FALSE
, 5000);
952 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
953 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i
,
954 mpt
->mpt_dev_page0
[i
].Header
.PageVersion
,
955 mpt
->mpt_dev_page0
[i
].Header
.PageLength
,
956 mpt
->mpt_dev_page0
[i
].Header
.PageNumber
,
957 mpt
->mpt_dev_page0
[i
].Header
.PageType
);
959 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_SCSI_DEVICE
,
960 1, i
, &mpt
->mpt_dev_page1
[i
].Header
, FALSE
, 5000);
964 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
965 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i
,
966 mpt
->mpt_dev_page1
[i
].Header
.PageVersion
,
967 mpt
->mpt_dev_page1
[i
].Header
.PageLength
,
968 mpt
->mpt_dev_page1
[i
].Header
.PageNumber
,
969 mpt
->mpt_dev_page1
[i
].Header
.PageType
);
973 * At this point, we don't *have* to fail. As long as we have
974 * valid config header information, we can (barely) lurch
978 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_port_page0
.Header
,
979 sizeof(mpt
->mpt_port_page0
), FALSE
, 5000);
981 mpt_prt(mpt
, "failed to read SPI Port Page 0\n");
983 mpt2host_config_page_scsi_port_0(&mpt
->mpt_port_page0
);
984 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
985 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
986 (unsigned)mpt
->mpt_port_page0
.Capabilities
,
987 (unsigned)mpt
->mpt_port_page0
.PhysicalInterface
);
990 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_port_page1
.Header
,
991 sizeof(mpt
->mpt_port_page1
), FALSE
, 5000);
993 mpt_prt(mpt
, "failed to read SPI Port Page 1\n");
995 mpt2host_config_page_scsi_port_1(&mpt
->mpt_port_page1
);
996 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
998 (unsigned)mpt
->mpt_port_page1
.Configuration
,
999 (unsigned)mpt
->mpt_port_page1
.OnBusTimerValue
);
1002 rv
= mpt_read_cur_cfg_page(mpt
, 0, &mpt
->mpt_port_page2
.Header
,
1003 sizeof(mpt
->mpt_port_page2
), FALSE
, 5000);
1005 mpt_prt(mpt
, "failed to read SPI Port Page 2\n");
1007 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1008 "Port Page 2: Flags %x Settings %x\n",
1009 (unsigned)mpt
->mpt_port_page2
.PortFlags
,
1010 (unsigned)mpt
->mpt_port_page2
.PortSettings
);
1011 mpt2host_config_page_scsi_port_2(&mpt
->mpt_port_page2
);
1012 for (i
= 0; i
< 16; i
++) {
1013 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1014 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1015 i
, mpt
->mpt_port_page2
.DeviceSettings
[i
].Timeout
,
1016 mpt
->mpt_port_page2
.DeviceSettings
[i
].SyncFactor
,
1017 mpt
->mpt_port_page2
.DeviceSettings
[i
].DeviceFlags
);
1021 for (i
= 0; i
< 16; i
++) {
1022 rv
= mpt_read_cur_cfg_page(mpt
, i
,
1023 &mpt
->mpt_dev_page0
[i
].Header
, sizeof(*mpt
->mpt_dev_page0
),
1027 "cannot read SPI Target %d Device Page 0\n", i
);
1030 mpt2host_config_page_scsi_device_0(&mpt
->mpt_dev_page0
[i
]);
1031 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1032 "target %d page 0: Negotiated Params %x Information %x\n",
1034 (unsigned)mpt
->mpt_dev_page0
[i
].NegotiatedParameters
,
1035 (unsigned)mpt
->mpt_dev_page0
[i
].Information
);
1037 rv
= mpt_read_cur_cfg_page(mpt
, i
,
1038 &mpt
->mpt_dev_page1
[i
].Header
, sizeof(*mpt
->mpt_dev_page1
),
1042 "cannot read SPI Target %d Device Page 1\n", i
);
1045 mpt2host_config_page_scsi_device_1(&mpt
->mpt_dev_page1
[i
]);
1046 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1047 "target %d page 1: Requested Params %x Configuration %x\n",
1049 (unsigned)mpt
->mpt_dev_page1
[i
].RequestedParameters
,
1050 (unsigned)mpt
->mpt_dev_page1
[i
].Configuration
);
1056 * Validate SPI configuration information.
1058 * In particular, validate SPI Port Page 1.
1061 mpt_set_initial_config_spi(struct mpt_softc
*mpt
)
1063 int i
, pp1val
= ((1 << mpt
->mpt_ini_id
) << 16) | mpt
->mpt_ini_id
;
1066 mpt
->mpt_disc_enable
= 0xff;
1067 mpt
->mpt_tag_enable
= 0;
1069 if (mpt
->mpt_port_page1
.Configuration
!= pp1val
) {
1070 CONFIG_PAGE_SCSI_PORT_1 tmp
;
1072 mpt_prt(mpt
, "SPI Port Page 1 Config value bad (%x)- should "
1074 (unsigned)mpt
->mpt_port_page1
.Configuration
,
1076 tmp
= mpt
->mpt_port_page1
;
1077 tmp
.Configuration
= pp1val
;
1078 host2mpt_config_page_scsi_port_1(&tmp
);
1079 error
= mpt_write_cur_cfg_page(mpt
, 0,
1080 &tmp
.Header
, sizeof(tmp
), FALSE
, 5000);
1084 error
= mpt_read_cur_cfg_page(mpt
, 0,
1085 &tmp
.Header
, sizeof(tmp
), FALSE
, 5000);
1089 mpt2host_config_page_scsi_port_1(&tmp
);
1090 if (tmp
.Configuration
!= pp1val
) {
1092 "failed to reset SPI Port Page 1 Config value\n");
1095 mpt
->mpt_port_page1
= tmp
;
1099 * The purpose of this exercise is to get
1100 * all targets back to async/narrow.
1102 * We skip this step if the BIOS has already negotiated
1103 * speeds with the targets.
1105 i
= mpt
->mpt_port_page2
.PortSettings
&
1106 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS
;
1107 if (i
== MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS
) {
1108 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
1109 "honoring BIOS transfer negotiations\n");
1111 for (i
= 0; i
< 16; i
++) {
1112 mpt
->mpt_dev_page1
[i
].RequestedParameters
= 0;
1113 mpt
->mpt_dev_page1
[i
].Configuration
= 0;
1114 (void) mpt_update_spi_config(mpt
, i
);
1121 mpt_cam_enable(struct mpt_softc
*mpt
)
1129 if (mpt_read_config_info_fc(mpt
)) {
1132 if (mpt_set_initial_config_fc(mpt
)) {
1135 } else if (mpt
->is_sas
) {
1136 if (mpt_read_config_info_sas(mpt
)) {
1139 if (mpt_set_initial_config_sas(mpt
)) {
1142 } else if (mpt
->is_spi
) {
1143 if (mpt_read_config_info_spi(mpt
)) {
1146 if (mpt_set_initial_config_spi(mpt
)) {
1158 mpt_cam_ready(struct mpt_softc
*mpt
)
1161 * If we're in target mode, hang out resources now
1162 * so we don't cause the world to hang talking to us.
1164 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
)) {
1166 * Try to add some target command resources
1169 if (mpt_add_target_commands(mpt
) == FALSE
) {
1170 mpt_prt(mpt
, "failed to add target commands\n");
1178 mpt_cam_detach(struct mpt_softc
*mpt
)
1180 mpt_handler_t handler
;
1184 mpt_terminate_recovery_thread(mpt
);
1186 handler
.reply_handler
= mpt_scsi_reply_handler
;
1187 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1188 scsi_io_handler_id
);
1189 handler
.reply_handler
= mpt_scsi_tmf_reply_handler
;
1190 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1191 scsi_tmf_handler_id
);
1192 handler
.reply_handler
= mpt_fc_els_reply_handler
;
1193 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1195 handler
.reply_handler
= mpt_scsi_tgt_reply_handler
;
1196 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1197 mpt
->scsi_tgt_handler_id
);
1198 handler
.reply_handler
= mpt_sata_pass_reply_handler
;
1199 mpt_deregister_handler(mpt
, MPT_HANDLER_REPLY
, handler
,
1200 sata_pass_handler_id
);
1202 if (mpt
->tmf_req
!= NULL
) {
1203 mpt
->tmf_req
->state
= REQ_STATE_ALLOCATED
;
1204 mpt_free_request(mpt
, mpt
->tmf_req
);
1205 mpt
->tmf_req
= NULL
;
1207 if (mpt
->sas_portinfo
!= NULL
) {
1208 kfree(mpt
->sas_portinfo
, M_DEVBUF
);
1209 mpt
->sas_portinfo
= NULL
;
1213 if (mpt
->sim
!= NULL
) {
1214 xpt_free_path(mpt
->path
);
1215 xpt_bus_deregister(cam_sim_path(mpt
->sim
));
1216 cam_sim_free(mpt
->sim
);
1220 if (mpt
->phydisk_sim
!= NULL
) {
1221 xpt_free_path(mpt
->phydisk_path
);
1222 xpt_bus_deregister(cam_sim_path(mpt
->phydisk_sim
));
1223 cam_sim_free(mpt
->phydisk_sim
);
1224 mpt
->phydisk_sim
= NULL
;
1228 /* This routine is used after a system crash to dump core onto the swap device.
1231 mpt_poll(struct cam_sim
*sim
)
1233 struct mpt_softc
*mpt
;
1235 mpt
= (struct mpt_softc
*)cam_sim_softc(sim
);
1240 * Watchdog timeout routine for SCSI requests.
1243 mpt_timeout(void *arg
)
1246 struct mpt_softc
*mpt
;
1249 ccb
= (union ccb
*)arg
;
1250 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
1253 req
= ccb
->ccb_h
.ccb_req_ptr
;
1254 mpt_prt(mpt
, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req
,
1255 req
->serno
, ccb
, req
->ccb
);
1256 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1257 if ((req
->state
& REQ_STATE_QUEUED
) == REQ_STATE_QUEUED
) {
1258 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
1259 TAILQ_INSERT_TAIL(&mpt
->request_timeout_list
, req
, links
);
1260 req
->state
|= REQ_STATE_TIMEDOUT
;
1261 mpt_wakeup_recovery_thread(mpt
);
1267 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1269 * Takes a list of physical segments and builds the SGL for SCSI IO command
1270 * and forwards the commard to the IOC after one last check that CAM has not
1271 * aborted the transaction.
1274 mpt_execute_req_a64(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1276 request_t
*req
, *trq
;
1279 struct mpt_softc
*mpt
;
1281 uint32_t flags
, nxt_off
;
1283 MSG_REQUEST_HEADER
*hdrp
;
1288 req
= (request_t
*)arg
;
1291 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
1292 req
= ccb
->ccb_h
.ccb_req_ptr
;
1294 hdrp
= req
->req_vbuf
;
1295 mpt_off
= req
->req_vbuf
;
1297 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1302 switch (hdrp
->Function
) {
1303 case MPI_FUNCTION_SCSI_IO_REQUEST
:
1304 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
:
1306 sglp
= &((PTR_MSG_SCSI_IO_REQUEST
)hdrp
)->SGL
;
1308 case MPI_FUNCTION_TARGET_ASSIST
:
1310 sglp
= &((PTR_MSG_TARGET_ASSIST_REQUEST
)hdrp
)->SGL
;
1313 mpt_prt(mpt
, "bad fct 0x%x in mpt_execute_req_a64\n",
1320 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1322 mpt_prt(mpt
, "segment count %d too large (max %u)\n",
1323 nseg
, mpt
->max_seg_cnt
);
1328 if (error
!= EFBIG
&& error
!= ENOMEM
) {
1329 mpt_prt(mpt
, "mpt_execute_req_a64: err %d\n", error
);
1331 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
) {
1333 mpt_freeze_ccb(ccb
);
1334 if (error
== EFBIG
) {
1335 status
= CAM_REQ_TOO_BIG
;
1336 } else if (error
== ENOMEM
) {
1337 if (mpt
->outofbeer
== 0) {
1339 xpt_freeze_simq(mpt
->sim
, 1);
1340 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1343 status
= CAM_REQUEUE_REQ
;
1345 status
= CAM_REQ_CMP_ERR
;
1347 mpt_set_ccb_status(ccb
, status
);
1349 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1350 request_t
*cmd_req
=
1351 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1352 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
1353 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
1354 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
1356 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1357 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d\n", __LINE__
));
1359 CAMLOCK_2_MPTLOCK(mpt
);
1360 mpt_free_request(mpt
, req
);
1361 MPTLOCK_2_CAMLOCK(mpt
);
1366 * No data to transfer?
1367 * Just make a single simple SGL with zero length.
1370 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1371 int tidx
= ((char *)sglp
) - mpt_off
;
1372 memset(&mpt_off
[tidx
], 0xff, MPT_REQUEST_AREA
- tidx
);
1376 SGE_SIMPLE32
*se1
= (SGE_SIMPLE32
*) sglp
;
1377 MPI_pSGE_SET_FLAGS(se1
,
1378 (MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
|
1379 MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_END_OF_LIST
));
1380 se1
->FlagsLength
= htole32(se1
->FlagsLength
);
1385 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_64_BIT_ADDRESSING
;
1387 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1388 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1391 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1392 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1396 if (!(ccb
->ccb_h
.flags
& (CAM_SG_LIST_PHYS
|CAM_DATA_PHYS
))) {
1397 bus_dmasync_op_t op
;
1399 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1400 op
= BUS_DMASYNC_PREREAD
;
1402 op
= BUS_DMASYNC_PREWRITE
;
1405 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1406 op
= BUS_DMASYNC_PREWRITE
;
1408 op
= BUS_DMASYNC_PREREAD
;
1411 bus_dmamap_sync(mpt
->buffer_dmat
, req
->dmap
, op
);
1415 * Okay, fill in what we can at the end of the command frame.
1416 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1417 * the command frame.
1419 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1420 * SIMPLE64 pointers and start doing CHAIN64 entries after
1424 if (nseg
< MPT_NSGL_FIRST(mpt
)) {
1428 * Leave room for CHAIN element
1430 first_lim
= MPT_NSGL_FIRST(mpt
) - 1;
1433 se
= (SGE_SIMPLE64
*) sglp
;
1434 for (seg
= 0; seg
< first_lim
; seg
++, se
++, dm_segs
++) {
1437 memset(se
, 0, sizeof (*se
));
1438 se
->Address
.Low
= htole32(dm_segs
->ds_addr
& 0xffffffff);
1439 if (sizeof(bus_addr_t
) > 4) {
1441 htole32(((uint64_t)dm_segs
->ds_addr
) >> 32);
1443 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1445 if (seg
== first_lim
- 1) {
1446 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1448 if (seg
== nseg
- 1) {
1449 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1450 MPI_SGE_FLAGS_END_OF_BUFFER
;
1452 MPI_pSGE_SET_FLAGS(se
, tf
);
1453 se
->FlagsLength
= htole32(se
->FlagsLength
);
1461 * Tell the IOC where to find the first chain element.
1463 hdrp
->ChainOffset
= ((char *)se
- (char *)hdrp
) >> 2;
1464 nxt_off
= MPT_RQSL(mpt
);
1468 * Make up the rest of the data segments out of a chain element
1469 * (contiained in the current request frame) which points to
1470 * SIMPLE64 elements in the next request frame, possibly ending
1471 * with *another* chain element (if there's more).
1473 while (seg
< nseg
) {
1475 uint32_t tf
, cur_off
;
1476 bus_addr_t chain_list_addr
;
1479 * Point to the chain descriptor. Note that the chain
1480 * descriptor is at the end of the *previous* list (whether
1483 ce
= (SGE_CHAIN64
*) se
;
1486 * Before we change our current pointer, make sure we won't
1487 * overflow the request area with this frame. Note that we
1488 * test against 'greater than' here as it's okay in this case
1489 * to have next offset be just outside the request area.
1491 if ((nxt_off
+ MPT_RQSL(mpt
)) > MPT_REQUEST_AREA
) {
1492 nxt_off
= MPT_REQUEST_AREA
;
1497 * Set our SGE element pointer to the beginning of the chain
1498 * list and update our next chain list offset.
1500 se
= (SGE_SIMPLE64
*) &mpt_off
[nxt_off
];
1502 nxt_off
+= MPT_RQSL(mpt
);
1505 * Now initialized the chain descriptor.
1507 memset(ce
, 0, sizeof (*ce
));
1510 * Get the physical address of the chain list.
1512 chain_list_addr
= trq
->req_pbuf
;
1513 chain_list_addr
+= cur_off
;
1514 if (sizeof (bus_addr_t
) > 4) {
1516 htole32(((uint64_t)chain_list_addr
) >> 32);
1518 ce
->Address
.Low
= htole32(chain_list_addr
& 0xffffffff);
1519 ce
->Flags
= MPI_SGE_FLAGS_CHAIN_ELEMENT
|
1520 MPI_SGE_FLAGS_64_BIT_ADDRESSING
;
1523 * If we have more than a frame's worth of segments left,
1524 * set up the chain list to have the last element be another
1527 if ((nseg
- seg
) > MPT_NSGL(mpt
)) {
1528 this_seg_lim
= seg
+ MPT_NSGL(mpt
) - 1;
1530 * The length of the chain is the length in bytes of the
1531 * number of segments plus the next chain element.
1533 * The next chain descriptor offset is the length,
1534 * in words, of the number of segments.
1536 ce
->Length
= (this_seg_lim
- seg
) *
1537 sizeof (SGE_SIMPLE64
);
1538 ce
->NextChainOffset
= ce
->Length
>> 2;
1539 ce
->Length
+= sizeof (SGE_CHAIN64
);
1541 this_seg_lim
= nseg
;
1542 ce
->Length
= (this_seg_lim
- seg
) *
1543 sizeof (SGE_SIMPLE64
);
1545 ce
->Length
= htole16(ce
->Length
);
1548 * Fill in the chain list SGE elements with our segment data.
1550 * If we're the last element in this chain list, set the last
1551 * element flag. If we're the completely last element period,
1552 * set the end of list and end of buffer flags.
1554 while (seg
< this_seg_lim
) {
1555 memset(se
, 0, sizeof (*se
));
1556 se
->Address
.Low
= htole32(dm_segs
->ds_addr
&
1558 if (sizeof (bus_addr_t
) > 4) {
1560 htole32(((uint64_t)dm_segs
->ds_addr
) >> 32);
1562 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1564 if (seg
== this_seg_lim
- 1) {
1565 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1567 if (seg
== nseg
- 1) {
1568 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1569 MPI_SGE_FLAGS_END_OF_BUFFER
;
1571 MPI_pSGE_SET_FLAGS(se
, tf
);
1572 se
->FlagsLength
= htole32(se
->FlagsLength
);
1580 * If we have more segments to do and we've used up all of
1581 * the space in a request area, go allocate another one
1582 * and chain to that.
1584 if (seg
< nseg
&& nxt_off
>= MPT_REQUEST_AREA
) {
1587 CAMLOCK_2_MPTLOCK(mpt
);
1588 nrq
= mpt_get_request(mpt
, FALSE
);
1589 MPTLOCK_2_CAMLOCK(mpt
);
1597 * Append the new request area on the tail of our list.
1599 if ((trq
= req
->chain
) == NULL
) {
1602 while (trq
->chain
!= NULL
) {
1608 mpt_off
= trq
->req_vbuf
;
1609 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1610 memset(mpt_off
, 0xff, MPT_REQUEST_AREA
);
1618 * Last time we need to check if this CCB needs to be aborted.
1620 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_INPROG
) {
1621 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1622 request_t
*cmd_req
=
1623 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1624 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
1625 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
1626 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
1629 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1630 ccb
->ccb_h
.status
& CAM_STATUS_MASK
);
1631 if (nseg
&& (ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
1632 bus_dmamap_unload(mpt
->buffer_dmat
, req
->dmap
);
1634 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1635 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d\n", __LINE__
));
1637 CAMLOCK_2_MPTLOCK(mpt
);
1638 mpt_free_request(mpt
, req
);
1639 MPTLOCK_2_CAMLOCK(mpt
);
1643 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
1644 if (ccb
->ccb_h
.timeout
!= CAM_TIME_INFINITY
) {
1645 mpt_req_timeout(req
, (ccb
->ccb_h
.timeout
* hz
) / 1000,
1648 if (mpt
->verbose
> MPT_PRT_DEBUG
) {
1650 mpt_print_request(req
->req_vbuf
);
1651 for (trq
= req
->chain
; trq
; trq
= trq
->chain
) {
1652 kprintf(" Additional Chain Area %d\n", nc
++);
1653 mpt_dump_sgl(trq
->req_vbuf
, 0);
1657 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1658 request_t
*cmd_req
= MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1659 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
1660 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1661 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) &&
1662 csio
->scsi_status
== SCSI_STATUS_OK
&& tgt
->resid
== 0) {
1663 tgt
->state
= TGT_STATE_MOVING_DATA_AND_STATUS
;
1665 tgt
->state
= TGT_STATE_MOVING_DATA
;
1668 tgt
->state
= TGT_STATE_MOVING_DATA
;
1671 CAMLOCK_2_MPTLOCK(mpt
);
1672 mpt_send_cmd(mpt
, req
);
1673 MPTLOCK_2_CAMLOCK(mpt
);
1677 mpt_execute_req(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1679 request_t
*req
, *trq
;
1682 struct mpt_softc
*mpt
;
1684 uint32_t flags
, nxt_off
;
1686 MSG_REQUEST_HEADER
*hdrp
;
1691 req
= (request_t
*)arg
;
1694 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
1695 req
= ccb
->ccb_h
.ccb_req_ptr
;
1697 hdrp
= req
->req_vbuf
;
1698 mpt_off
= req
->req_vbuf
;
1701 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1706 switch (hdrp
->Function
) {
1707 case MPI_FUNCTION_SCSI_IO_REQUEST
:
1708 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
:
1709 sglp
= &((PTR_MSG_SCSI_IO_REQUEST
)hdrp
)->SGL
;
1711 case MPI_FUNCTION_TARGET_ASSIST
:
1713 sglp
= &((PTR_MSG_TARGET_ASSIST_REQUEST
)hdrp
)->SGL
;
1716 mpt_prt(mpt
, "bad fct 0x%x in mpt_execute_req\n",
1723 if (error
== 0 && ((uint32_t)nseg
) >= mpt
->max_seg_cnt
) {
1725 mpt_prt(mpt
, "segment count %d too large (max %u)\n",
1726 nseg
, mpt
->max_seg_cnt
);
1731 if (error
!= EFBIG
&& error
!= ENOMEM
) {
1732 mpt_prt(mpt
, "mpt_execute_req: err %d\n", error
);
1734 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
) {
1736 mpt_freeze_ccb(ccb
);
1737 if (error
== EFBIG
) {
1738 status
= CAM_REQ_TOO_BIG
;
1739 } else if (error
== ENOMEM
) {
1740 if (mpt
->outofbeer
== 0) {
1742 xpt_freeze_simq(mpt
->sim
, 1);
1743 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1746 status
= CAM_REQUEUE_REQ
;
1748 status
= CAM_REQ_CMP_ERR
;
1750 mpt_set_ccb_status(ccb
, status
);
1752 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
1753 request_t
*cmd_req
=
1754 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
1755 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
1756 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
1757 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
1759 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
1760 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d\n", __LINE__
));
1762 CAMLOCK_2_MPTLOCK(mpt
);
1763 mpt_free_request(mpt
, req
);
1764 MPTLOCK_2_CAMLOCK(mpt
);
1769 * No data to transfer?
1770 * Just make a single simple SGL with zero length.
1773 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
1774 int tidx
= ((char *)sglp
) - mpt_off
;
1775 memset(&mpt_off
[tidx
], 0xff, MPT_REQUEST_AREA
- tidx
);
1779 SGE_SIMPLE32
*se1
= (SGE_SIMPLE32
*) sglp
;
1780 MPI_pSGE_SET_FLAGS(se1
,
1781 (MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
|
1782 MPI_SGE_FLAGS_SIMPLE_ELEMENT
| MPI_SGE_FLAGS_END_OF_LIST
));
1783 se1
->FlagsLength
= htole32(se1
->FlagsLength
);
1788 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
;
1790 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1791 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1794 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1795 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
1799 if (!(ccb
->ccb_h
.flags
& (CAM_SG_LIST_PHYS
|CAM_DATA_PHYS
))) {
1800 bus_dmasync_op_t op
;
1802 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1803 op
= BUS_DMASYNC_PREREAD
;
1805 op
= BUS_DMASYNC_PREWRITE
;
1808 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1809 op
= BUS_DMASYNC_PREWRITE
;
1811 op
= BUS_DMASYNC_PREREAD
;
1814 bus_dmamap_sync(mpt
->buffer_dmat
, req
->dmap
, op
);
1818 * Okay, fill in what we can at the end of the command frame.
1819 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1820 * the command frame.
1822 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1823 * SIMPLE32 pointers and start doing CHAIN32 entries after
1827 if (nseg
< MPT_NSGL_FIRST(mpt
)) {
1831 * Leave room for CHAIN element
1833 first_lim
= MPT_NSGL_FIRST(mpt
) - 1;
1836 se
= (SGE_SIMPLE32
*) sglp
;
1837 for (seg
= 0; seg
< first_lim
; seg
++, se
++, dm_segs
++) {
1840 memset(se
, 0,sizeof (*se
));
1841 se
->Address
= htole32(dm_segs
->ds_addr
);
1845 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1847 if (seg
== first_lim
- 1) {
1848 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1850 if (seg
== nseg
- 1) {
1851 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1852 MPI_SGE_FLAGS_END_OF_BUFFER
;
1854 MPI_pSGE_SET_FLAGS(se
, tf
);
1855 se
->FlagsLength
= htole32(se
->FlagsLength
);
1863 * Tell the IOC where to find the first chain element.
1865 hdrp
->ChainOffset
= ((char *)se
- (char *)hdrp
) >> 2;
1866 nxt_off
= MPT_RQSL(mpt
);
1870 * Make up the rest of the data segments out of a chain element
1871 * (contiained in the current request frame) which points to
1872 * SIMPLE32 elements in the next request frame, possibly ending
1873 * with *another* chain element (if there's more).
1875 while (seg
< nseg
) {
1877 uint32_t tf
, cur_off
;
1878 bus_addr_t chain_list_addr
;
1881 * Point to the chain descriptor. Note that the chain
1882 * descriptor is at the end of the *previous* list (whether
1885 ce
= (SGE_CHAIN32
*) se
;
1888 * Before we change our current pointer, make sure we won't
1889 * overflow the request area with this frame. Note that we
1890 * test against 'greater than' here as it's okay in this case
1891 * to have next offset be just outside the request area.
1893 if ((nxt_off
+ MPT_RQSL(mpt
)) > MPT_REQUEST_AREA
) {
1894 nxt_off
= MPT_REQUEST_AREA
;
1899 * Set our SGE element pointer to the beginning of the chain
1900 * list and update our next chain list offset.
1902 se
= (SGE_SIMPLE32
*) &mpt_off
[nxt_off
];
1904 nxt_off
+= MPT_RQSL(mpt
);
1907 * Now initialized the chain descriptor.
1909 memset(ce
, 0, sizeof (*ce
));
1912 * Get the physical address of the chain list.
1914 chain_list_addr
= trq
->req_pbuf
;
1915 chain_list_addr
+= cur_off
;
1919 ce
->Address
= htole32(chain_list_addr
);
1920 ce
->Flags
= MPI_SGE_FLAGS_CHAIN_ELEMENT
;
1924 * If we have more than a frame's worth of segments left,
1925 * set up the chain list to have the last element be another
1928 if ((nseg
- seg
) > MPT_NSGL(mpt
)) {
1929 this_seg_lim
= seg
+ MPT_NSGL(mpt
) - 1;
1931 * The length of the chain is the length in bytes of the
1932 * number of segments plus the next chain element.
1934 * The next chain descriptor offset is the length,
1935 * in words, of the number of segments.
1937 ce
->Length
= (this_seg_lim
- seg
) *
1938 sizeof (SGE_SIMPLE32
);
1939 ce
->NextChainOffset
= ce
->Length
>> 2;
1940 ce
->Length
+= sizeof (SGE_CHAIN32
);
1942 this_seg_lim
= nseg
;
1943 ce
->Length
= (this_seg_lim
- seg
) *
1944 sizeof (SGE_SIMPLE32
);
1946 ce
->Length
= htole16(ce
->Length
);
1949 * Fill in the chain list SGE elements with our segment data.
1951 * If we're the last element in this chain list, set the last
1952 * element flag. If we're the completely last element period,
1953 * set the end of list and end of buffer flags.
1955 while (seg
< this_seg_lim
) {
1956 memset(se
, 0, sizeof (*se
));
1957 se
->Address
= htole32(dm_segs
->ds_addr
);
1962 MPI_pSGE_SET_LENGTH(se
, dm_segs
->ds_len
);
1964 if (seg
== this_seg_lim
- 1) {
1965 tf
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
1967 if (seg
== nseg
- 1) {
1968 tf
|= MPI_SGE_FLAGS_END_OF_LIST
|
1969 MPI_SGE_FLAGS_END_OF_BUFFER
;
1971 MPI_pSGE_SET_FLAGS(se
, tf
);
1972 se
->FlagsLength
= htole32(se
->FlagsLength
);
1980 * If we have more segments to do and we've used up all of
1981 * the space in a request area, go allocate another one
1982 * and chain to that.
1984 if (seg
< nseg
&& nxt_off
>= MPT_REQUEST_AREA
) {
1987 CAMLOCK_2_MPTLOCK(mpt
);
1988 nrq
= mpt_get_request(mpt
, FALSE
);
1989 MPTLOCK_2_CAMLOCK(mpt
);
1997 * Append the new request area on the tail of our list.
1999 if ((trq
= req
->chain
) == NULL
) {
2002 while (trq
->chain
!= NULL
) {
2008 mpt_off
= trq
->req_vbuf
;
2009 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
2010 memset(mpt_off
, 0xff, MPT_REQUEST_AREA
);
2018 * Last time we need to check if this CCB needs to be aborted.
2020 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_INPROG
) {
2021 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
2022 request_t
*cmd_req
=
2023 MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
2024 MPT_TGT_STATE(mpt
, cmd_req
)->state
= TGT_STATE_IN_CAM
;
2025 MPT_TGT_STATE(mpt
, cmd_req
)->ccb
= NULL
;
2026 MPT_TGT_STATE(mpt
, cmd_req
)->req
= NULL
;
2029 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2030 ccb
->ccb_h
.status
& CAM_STATUS_MASK
);
2031 if (nseg
&& (ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
2032 bus_dmamap_unload(mpt
->buffer_dmat
, req
->dmap
);
2034 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2035 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d\n", __LINE__
));
2037 CAMLOCK_2_MPTLOCK(mpt
);
2038 mpt_free_request(mpt
, req
);
2039 MPTLOCK_2_CAMLOCK(mpt
);
2043 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
2044 if (ccb
->ccb_h
.timeout
!= CAM_TIME_INFINITY
) {
2045 mpt_req_timeout(req
, (ccb
->ccb_h
.timeout
* hz
) / 1000,
2048 if (mpt
->verbose
> MPT_PRT_DEBUG
) {
2050 mpt_print_request(req
->req_vbuf
);
2051 for (trq
= req
->chain
; trq
; trq
= trq
->chain
) {
2052 kprintf(" Additional Chain Area %d\n", nc
++);
2053 mpt_dump_sgl(trq
->req_vbuf
, 0);
2057 if (hdrp
->Function
== MPI_FUNCTION_TARGET_ASSIST
) {
2058 request_t
*cmd_req
= MPT_TAG_2_REQ(mpt
, ccb
->csio
.tag_id
);
2059 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
2060 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2061 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) &&
2062 csio
->scsi_status
== SCSI_STATUS_OK
&& tgt
->resid
== 0) {
2063 tgt
->state
= TGT_STATE_MOVING_DATA_AND_STATUS
;
2065 tgt
->state
= TGT_STATE_MOVING_DATA
;
2068 tgt
->state
= TGT_STATE_MOVING_DATA
;
2071 CAMLOCK_2_MPTLOCK(mpt
);
2072 mpt_send_cmd(mpt
, req
);
2073 MPTLOCK_2_CAMLOCK(mpt
);
2077 mpt_start(struct cam_sim
*sim
, union ccb
*ccb
)
2080 struct mpt_softc
*mpt
;
2081 MSG_SCSI_IO_REQUEST
*mpt_req
;
2082 struct ccb_scsiio
*csio
= &ccb
->csio
;
2083 struct ccb_hdr
*ccbh
= &ccb
->ccb_h
;
2084 bus_dmamap_callback_t
*cb
;
2088 /* Get the pointer for the physical addapter */
2089 mpt
= ccb
->ccb_h
.ccb_mpt_ptr
;
2090 raid_passthru
= (sim
== mpt
->phydisk_sim
);
2092 CAMLOCK_2_MPTLOCK(mpt
);
2093 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
2094 if (mpt
->outofbeer
== 0) {
2096 xpt_freeze_simq(mpt
->sim
, 1);
2097 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FREEZEQ\n");
2099 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2100 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
2101 MPTLOCK_2_CAMLOCK(mpt
);
2106 mpt_req_not_spcl(mpt
, req
, "mpt_start", __LINE__
);
2108 MPTLOCK_2_CAMLOCK(mpt
);
2110 if (sizeof (bus_addr_t
) > 4) {
2111 cb
= mpt_execute_req_a64
;
2113 cb
= mpt_execute_req
;
2117 * Link the ccb and the request structure so we can find
2118 * the other knowing either the request or the ccb
2121 ccb
->ccb_h
.ccb_req_ptr
= req
;
2123 /* Now we build the command for the IOC */
2124 mpt_req
= req
->req_vbuf
;
2125 memset(mpt_req
, 0, sizeof (MSG_SCSI_IO_REQUEST
));
2127 mpt_req
->Function
= MPI_FUNCTION_SCSI_IO_REQUEST
;
2128 if (raid_passthru
) {
2129 mpt_req
->Function
= MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
;
2130 CAMLOCK_2_MPTLOCK(mpt
);
2131 if (mpt_map_physdisk(mpt
, ccb
, &tgt
) != 0) {
2132 MPTLOCK_2_CAMLOCK(mpt
);
2133 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2134 mpt_set_ccb_status(ccb
, CAM_DEV_NOT_THERE
);
2138 MPTLOCK_2_CAMLOCK(mpt
);
2139 mpt_req
->Bus
= 0; /* we never set bus here */
2141 tgt
= ccb
->ccb_h
.target_id
;
2142 mpt_req
->Bus
= 0; /* XXX */
2145 mpt_req
->SenseBufferLength
=
2146 (csio
->sense_len
< MPT_SENSE_SIZE
) ?
2147 csio
->sense_len
: MPT_SENSE_SIZE
;
2150 * We use the message context to find the request structure when we
2151 * Get the command completion interrupt from the IOC.
2153 mpt_req
->MsgContext
= htole32(req
->index
| scsi_io_handler_id
);
2155 /* Which physical device to do the I/O on */
2156 mpt_req
->TargetID
= tgt
;
2158 /* We assume a single level LUN type */
2159 if (ccb
->ccb_h
.target_lun
>= MPT_MAX_LUNS
) {
2160 mpt_req
->LUN
[0] = 0x40 | ((ccb
->ccb_h
.target_lun
>> 8) & 0x3f);
2161 mpt_req
->LUN
[1] = ccb
->ccb_h
.target_lun
& 0xff;
2163 mpt_req
->LUN
[1] = ccb
->ccb_h
.target_lun
;
2166 /* Set the direction of the transfer */
2167 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
2168 mpt_req
->Control
= MPI_SCSIIO_CONTROL_READ
;
2169 } else if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
2170 mpt_req
->Control
= MPI_SCSIIO_CONTROL_WRITE
;
2172 mpt_req
->Control
= MPI_SCSIIO_CONTROL_NODATATRANSFER
;
2175 if ((ccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
) != 0) {
2176 switch(ccb
->csio
.tag_action
) {
2177 case MSG_HEAD_OF_Q_TAG
:
2178 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_HEADOFQ
;
2181 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_ACAQ
;
2183 case MSG_ORDERED_Q_TAG
:
2184 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_ORDEREDQ
;
2186 case MSG_SIMPLE_Q_TAG
:
2188 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_SIMPLEQ
;
2192 if (mpt
->is_fc
|| mpt
->is_sas
) {
2193 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_SIMPLEQ
;
2195 /* XXX No such thing for a target doing packetized. */
2196 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_UNTAGGED
;
2201 if (ccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) {
2202 mpt_req
->Control
|= MPI_SCSIIO_CONTROL_NO_DISCONNECT
;
2205 mpt_req
->Control
= htole32(mpt_req
->Control
);
2207 /* Copy the scsi command block into place */
2208 if ((ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) != 0) {
2209 bcopy(csio
->cdb_io
.cdb_ptr
, mpt_req
->CDB
, csio
->cdb_len
);
2211 bcopy(csio
->cdb_io
.cdb_bytes
, mpt_req
->CDB
, csio
->cdb_len
);
2214 mpt_req
->CDBLength
= csio
->cdb_len
;
2215 mpt_req
->DataLength
= htole32(csio
->dxfer_len
);
2216 mpt_req
->SenseBufferLowAddr
= htole32(req
->sense_pbuf
);
2219 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2221 if (mpt
->verbose
== MPT_PRT_DEBUG
) {
2223 mpt_prt(mpt
, "mpt_start: %s op 0x%x ",
2224 (mpt_req
->Function
== MPI_FUNCTION_SCSI_IO_REQUEST
)?
2225 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req
->CDB
[0]);
2226 df
= mpt_req
->Control
& MPI_SCSIIO_CONTROL_DATADIRECTION_MASK
;
2227 if (df
!= MPI_SCSIIO_CONTROL_NODATATRANSFER
) {
2228 mpt_prtc(mpt
, "(%s %u byte%s ",
2229 (df
== MPI_SCSIIO_CONTROL_READ
)?
2230 "read" : "write", csio
->dxfer_len
,
2231 (csio
->dxfer_len
== 1)? ")" : "s)");
2233 mpt_prtc(mpt
, "tgt %u lun %u req %p:%u\n", tgt
,
2234 ccb
->ccb_h
.target_lun
, req
, req
->serno
);
2238 * If we have any data to send with this command map it into bus space.
2240 if ((ccbh
->flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
2241 if ((ccbh
->flags
& CAM_SCATTER_VALID
) == 0) {
2243 * We've been given a pointer to a single buffer.
2245 if ((ccbh
->flags
& CAM_DATA_PHYS
) == 0) {
2247 * Virtual address that needs to translated into
2248 * one or more physical address ranges.
2251 error
= bus_dmamap_load(mpt
->buffer_dmat
,
2252 req
->dmap
, csio
->data_ptr
, csio
->dxfer_len
,
2254 if (error
== EINPROGRESS
) {
2256 * So as to maintain ordering,
2257 * freeze the controller queue
2258 * until our mapping is
2261 xpt_freeze_simq(mpt
->sim
, 1);
2262 ccbh
->status
|= CAM_RELEASE_SIMQ
;
2266 * We have been given a pointer to single
2269 struct bus_dma_segment seg
;
2271 (bus_addr_t
)(vm_offset_t
)csio
->data_ptr
;
2272 seg
.ds_len
= csio
->dxfer_len
;
2273 (*cb
)(req
, &seg
, 1, 0);
2277 * We have been given a list of addresses.
2278 * This case could be easily supported but they are not
2279 * currently generated by the CAM subsystem so there
2280 * is no point in wasting the time right now.
2282 struct bus_dma_segment
*segs
;
2283 if ((ccbh
->flags
& CAM_SG_LIST_PHYS
) == 0) {
2284 (*cb
)(req
, NULL
, 0, EFAULT
);
2286 /* Just use the segments provided */
2287 segs
= (struct bus_dma_segment
*)csio
->data_ptr
;
2288 (*cb
)(req
, segs
, csio
->sglist_cnt
, 0);
2292 (*cb
)(req
, NULL
, 0, 0);
2297 mpt_bus_reset(struct mpt_softc
*mpt
, target_id_t tgt
, lun_id_t lun
,
2304 error
= mpt_scsi_send_tmf(mpt
,
2305 (tgt
!= CAM_TARGET_WILDCARD
|| lun
!= CAM_LUN_WILDCARD
) ?
2306 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
2307 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS
,
2308 mpt
->is_fc
? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION
: 0,
2309 0, /* XXX How do I get the channel ID? */
2310 tgt
!= CAM_TARGET_WILDCARD
? tgt
: 0,
2311 lun
!= CAM_LUN_WILDCARD
? lun
: 0,
2316 * mpt_scsi_send_tmf hard resets on failure, so no
2317 * need to do so here.
2320 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error
);
2324 /* Wait for bus reset to be processed by the IOC. */
2325 error
= mpt_wait_req(mpt
, mpt
->tmf_req
, REQ_STATE_DONE
,
2326 REQ_STATE_DONE
, sleep_ok
, 5000);
2328 status
= le16toh(mpt
->tmf_req
->IOCStatus
);
2329 response
= mpt
->tmf_req
->ResponseCode
;
2330 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
2333 mpt_prt(mpt
, "mpt_bus_reset: Reset timed-out. "
2334 "Resetting controller.\n");
2335 mpt_reset(mpt
, TRUE
);
2339 if ((status
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
2340 mpt_prt(mpt
, "mpt_bus_reset: TMF IOC Status 0x%x. "
2341 "Resetting controller.\n", status
);
2342 mpt_reset(mpt
, TRUE
);
2346 if (response
!= MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED
&&
2347 response
!= MPI_SCSITASKMGMT_RSP_TM_COMPLETE
) {
2348 mpt_prt(mpt
, "mpt_bus_reset: TMF Response 0x%x. "
2349 "Resetting controller.\n", response
);
2350 mpt_reset(mpt
, TRUE
);
2357 mpt_fc_reset_link(struct mpt_softc
*mpt
, int dowait
)
2361 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc
;
2363 req
= mpt_get_request(mpt
, FALSE
);
2368 memset(fc
, 0, sizeof(*fc
));
2369 fc
->SendFlags
= MPI_FC_PRIM_SEND_FLAGS_RESET_LINK
;
2370 fc
->Function
= MPI_FUNCTION_FC_PRIMITIVE_SEND
;
2371 fc
->MsgContext
= htole32(req
->index
| fc_els_handler_id
);
2372 mpt_send_cmd(mpt
, req
);
2374 r
= mpt_wait_req(mpt
, req
, REQ_STATE_DONE
,
2375 REQ_STATE_DONE
, FALSE
, 60 * 1000);
2377 mpt_free_request(mpt
, req
);
2384 mpt_cam_event(struct mpt_softc
*mpt
, request_t
*req
,
2385 MSG_EVENT_NOTIFY_REPLY
*msg
)
2387 uint32_t data0
, data1
;
2389 data0
= le32toh(msg
->Data
[0]);
2390 data1
= le32toh(msg
->Data
[1]);
2391 switch(msg
->Event
& 0xFF) {
2392 case MPI_EVENT_UNIT_ATTENTION
:
2393 mpt_prt(mpt
, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2394 (data0
>> 8) & 0xff, data0
& 0xff);
2397 case MPI_EVENT_IOC_BUS_RESET
:
2398 /* We generated a bus reset */
2399 mpt_prt(mpt
, "IOC Generated Bus Reset Port: %d\n",
2400 (data0
>> 8) & 0xff);
2401 xpt_async(AC_BUS_RESET
, mpt
->path
, NULL
);
2404 case MPI_EVENT_EXT_BUS_RESET
:
2405 /* Someone else generated a bus reset */
2406 mpt_prt(mpt
, "External Bus Reset Detected\n");
2408 * These replies don't return EventData like the MPI
2411 xpt_async(AC_BUS_RESET
, mpt
->path
, NULL
);
2414 case MPI_EVENT_RESCAN
:
2415 #if __FreeBSD_version >= 600000
2420 * In general this means a device has been added to the loop.
2422 mpt_prt(mpt
, "Rescan Port: %d\n", (data0
>> 8) & 0xff);
2423 if (mpt
->ready
== 0) {
2426 if (mpt
->phydisk_sim
) {
2427 pathid
= cam_sim_path(mpt
->phydisk_sim
);
2429 pathid
= cam_sim_path(mpt
->sim
);
2431 MPTLOCK_2_CAMLOCK(mpt
);
2433 * Allocate a CCB, create a wildcard path for this bus,
2434 * and schedule a rescan.
2436 ccb
= xpt_alloc_ccb_nowait();
2438 mpt_prt(mpt
, "unable to alloc CCB for rescan\n");
2439 CAMLOCK_2_MPTLOCK(mpt
);
2443 if (xpt_create_path(&ccb
->ccb_h
.path
, xpt_periph
, pathid
,
2444 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2445 CAMLOCK_2_MPTLOCK(mpt
);
2446 mpt_prt(mpt
, "unable to create path for rescan\n");
2451 CAMLOCK_2_MPTLOCK(mpt
);
2455 mpt_prt(mpt
, "Rescan Port: %d\n", (data0
>> 8) & 0xff);
2458 case MPI_EVENT_LINK_STATUS_CHANGE
:
2459 mpt_prt(mpt
, "Port %d: LinkState: %s\n",
2460 (data1
>> 8) & 0xff,
2461 ((data0
& 0xff) == 0)? "Failed" : "Active");
2464 case MPI_EVENT_LOOP_STATE_CHANGE
:
2465 switch ((data0
>> 16) & 0xff) {
2468 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2469 "(Loop Initialization)\n",
2470 (data1
>> 8) & 0xff,
2471 (data0
>> 8) & 0xff,
2473 switch ((data0
>> 8) & 0xff) {
2475 if ((data0
& 0xff) == 0xF7) {
2476 mpt_prt(mpt
, "Device needs AL_PA\n");
2478 mpt_prt(mpt
, "Device %02x doesn't like "
2484 if ((data0
& 0xff) == 0xF7) {
2485 mpt_prt(mpt
, "Device had loop failure "
2486 "at its receiver prior to acquiring"
2489 mpt_prt(mpt
, "Device %02x detected loop"
2490 " failure at its receiver\n",
2495 mpt_prt(mpt
, "Device %02x requests that device "
2496 "%02x reset itself\n",
2498 (data0
>> 8) & 0xFF);
2503 mpt_prt(mpt
, "Port 0x%x: FC LinkEvent: "
2504 "LPE(%02x,%02x) (Loop Port Enable)\n",
2505 (data1
>> 8) & 0xff, /* Port */
2506 (data0
>> 8) & 0xff, /* Character 3 */
2507 (data0
) & 0xff /* Character 4 */);
2510 mpt_prt(mpt
, "Port 0x%x: FC LinkEvent: "
2511 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2512 (data1
>> 8) & 0xff, /* Port */
2513 (data0
>> 8) & 0xff, /* Character 3 */
2514 (data0
) & 0xff /* Character 4 */);
2517 mpt_prt(mpt
, "Port 0x%x: FC LinkEvent: Unknown "
2518 "FC event (%02x %02x %02x)\n",
2519 (data1
>> 8) & 0xff, /* Port */
2520 (data0
>> 16) & 0xff, /* Event */
2521 (data0
>> 8) & 0xff, /* Character 3 */
2522 (data0
) & 0xff /* Character 4 */);
2526 case MPI_EVENT_LOGOUT
:
2527 mpt_prt(mpt
, "FC Logout Port: %d N_PortID: %02x\n",
2528 (data1
>> 8) & 0xff, data0
);
2530 case MPI_EVENT_QUEUE_FULL
:
2532 struct cam_sim
*sim
;
2533 struct cam_path
*tmppath
;
2534 struct ccb_relsim crs
;
2535 PTR_EVENT_DATA_QUEUE_FULL pqf
;
2538 pqf
= (PTR_EVENT_DATA_QUEUE_FULL
)msg
->Data
;
2539 pqf
->CurrentDepth
= le16toh(pqf
->CurrentDepth
);
2540 mpt_prt(mpt
, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2541 "%d\n", pqf
->Bus
, pqf
->TargetID
, pqf
->CurrentDepth
);
2542 if (mpt
->phydisk_sim
) {
2543 sim
= mpt
->phydisk_sim
;
2547 MPTLOCK_2_CAMLOCK(mpt
);
2548 for (lun_id
= 0; lun_id
< MPT_MAX_LUNS
; lun_id
++) {
2549 if (xpt_create_path(&tmppath
, NULL
, cam_sim_path(sim
),
2550 pqf
->TargetID
, lun_id
) != CAM_REQ_CMP
) {
2551 mpt_prt(mpt
, "unable to create a path to send "
2553 CAMLOCK_2_MPTLOCK(mpt
);
2556 xpt_setup_ccb(&crs
.ccb_h
, tmppath
, 5);
2557 crs
.ccb_h
.func_code
= XPT_REL_SIMQ
;
2558 crs
.release_flags
= RELSIM_ADJUST_OPENINGS
;
2559 crs
.openings
= pqf
->CurrentDepth
- 1;
2560 xpt_action((union ccb
*)&crs
);
2561 if (crs
.ccb_h
.status
!= CAM_REQ_CMP
) {
2562 mpt_prt(mpt
, "XPT_REL_SIMQ failed\n");
2564 xpt_free_path(tmppath
);
2566 CAMLOCK_2_MPTLOCK(mpt
);
2569 case MPI_EVENT_EVENT_CHANGE
:
2570 case MPI_EVENT_INTEGRATED_RAID
:
2571 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE
:
2572 case MPI_EVENT_SAS_SES
:
2575 mpt_lprt(mpt
, MPT_PRT_WARN
, "mpt_cam_event: 0x%x\n",
2576 (unsigned)msg
->Event
& 0xFF);
2583 * Reply path for all SCSI I/O requests, called from our
2584 * interrupt handler by extracting our handler index from
2585 * the MsgContext field of the reply from the IOC.
2587 * This routine is optimized for the common case of a
2588 * completion without error. All exception handling is
2589 * offloaded to non-inlined helper routines to minimize
2593 mpt_scsi_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
2594 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
2596 MSG_SCSI_IO_REQUEST
*scsi_req
;
2599 if (req
->state
== REQ_STATE_FREE
) {
2600 mpt_prt(mpt
, "mpt_scsi_reply_handler: req already free\n");
2604 scsi_req
= (MSG_SCSI_IO_REQUEST
*)req
->req_vbuf
;
2607 mpt_prt(mpt
, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2612 mpt_req_untimeout(req
, mpt_timeout
, ccb
);
2613 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
2615 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
2616 bus_dmasync_op_t op
;
2618 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
2619 op
= BUS_DMASYNC_POSTREAD
;
2621 op
= BUS_DMASYNC_POSTWRITE
;
2622 bus_dmamap_sync(mpt
->buffer_dmat
, req
->dmap
, op
);
2623 bus_dmamap_unload(mpt
->buffer_dmat
, req
->dmap
);
2626 if (reply_frame
== NULL
) {
2628 * Context only reply, completion without error status.
2630 ccb
->csio
.resid
= 0;
2631 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
2632 ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
2634 mpt_scsi_reply_frame_handler(mpt
, req
, reply_frame
);
2637 if (mpt
->outofbeer
) {
2638 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
2640 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "THAWQ\n");
2642 if (scsi_req
->CDB
[0] == INQUIRY
&& (scsi_req
->CDB
[1] & SI_EVPD
) == 0) {
2643 struct scsi_inquiry_data
*iq
=
2644 (struct scsi_inquiry_data
*)ccb
->csio
.data_ptr
;
2645 if (scsi_req
->Function
==
2646 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
) {
2648 * Fake out the device type so that only the
2649 * pass-thru device will attach.
2651 iq
->device
&= ~0x1F;
2652 iq
->device
|= T_NODEVICE
;
2655 if (mpt
->verbose
== MPT_PRT_DEBUG
) {
2656 mpt_prt(mpt
, "mpt_scsi_reply_handler: %p:%u complete\n",
2659 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d\n", __LINE__
));
2660 MPTLOCK_2_CAMLOCK(mpt
);
2662 CAMLOCK_2_MPTLOCK(mpt
);
2663 if ((req
->state
& REQ_STATE_TIMEDOUT
) == 0) {
2664 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2666 mpt_prt(mpt
, "completing timedout/aborted req %p:%u\n",
2668 TAILQ_REMOVE(&mpt
->request_timeout_list
, req
, links
);
2670 KASSERT((req
->state
& REQ_STATE_NEED_WAKEUP
) == 0,
2671 ("CCB req needed wakeup"));
2673 mpt_req_not_spcl(mpt
, req
, "mpt_scsi_reply_handler", __LINE__
);
2675 mpt_free_request(mpt
, req
);
2680 mpt_scsi_tmf_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
2681 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
2683 MSG_SCSI_TASK_MGMT_REPLY
*tmf_reply
;
2685 KASSERT(req
== mpt
->tmf_req
, ("TMF Reply not using mpt->tmf_req"));
2687 mpt_req_not_spcl(mpt
, req
, "mpt_scsi_tmf_reply_handler", __LINE__
);
2689 tmf_reply
= (MSG_SCSI_TASK_MGMT_REPLY
*)reply_frame
;
2690 /* Record IOC Status and Response Code of TMF for any waiters. */
2691 req
->IOCStatus
= le16toh(tmf_reply
->IOCStatus
);
2692 req
->ResponseCode
= tmf_reply
->ResponseCode
;
2694 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "TMF complete: req %p:%u status 0x%x\n",
2695 req
, req
->serno
, le16toh(tmf_reply
->IOCStatus
));
2696 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2697 if ((req
->state
& REQ_STATE_NEED_WAKEUP
) != 0) {
2698 req
->state
|= REQ_STATE_DONE
;
2701 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
2707 * XXX: Move to definitions file
2725 mpt_fc_els_send_response(struct mpt_softc
*mpt
, request_t
*req
,
2726 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp
, U8 length
)
2729 MSG_LINK_SERVICE_RSP_REQUEST tmp
;
2730 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp
;
2733 * We are going to reuse the ELS request to send this response back.
2736 memset(rsp
, 0, sizeof(*rsp
));
2738 #ifdef USE_IMMEDIATE_LINK_DATA
2740 * Apparently the IMMEDIATE stuff doesn't seem to work.
2742 rsp
->RspFlags
= LINK_SERVICE_RSP_FLAGS_IMMEDIATE
;
2744 rsp
->RspLength
= length
;
2745 rsp
->Function
= MPI_FUNCTION_FC_LINK_SRVC_RSP
;
2746 rsp
->MsgContext
= htole32(req
->index
| fc_els_handler_id
);
2749 * Copy over information from the original reply frame to
2750 * it's correct place in the response.
2752 memcpy((U8
*)rsp
+ 0x0c, (U8
*)rp
+ 0x1c, 24);
2755 * And now copy back the temporary area to the original frame.
2757 memcpy(req
->req_vbuf
, rsp
, sizeof (MSG_LINK_SERVICE_RSP_REQUEST
));
2758 rsp
= req
->req_vbuf
;
2760 #ifdef USE_IMMEDIATE_LINK_DATA
2761 memcpy((U8
*)&rsp
->SGL
, &((U8
*)req
->req_vbuf
)[MPT_RQSL(mpt
)], length
);
2764 PTR_SGE_SIMPLE32 se
= (PTR_SGE_SIMPLE32
) &rsp
->SGL
;
2765 bus_addr_t paddr
= req
->req_pbuf
;
2766 paddr
+= MPT_RQSL(mpt
);
2769 MPI_SGE_FLAGS_HOST_TO_IOC
|
2770 MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
2771 MPI_SGE_FLAGS_LAST_ELEMENT
|
2772 MPI_SGE_FLAGS_END_OF_LIST
|
2773 MPI_SGE_FLAGS_END_OF_BUFFER
;
2774 fl
<<= MPI_SGE_FLAGS_SHIFT
;
2776 se
->FlagsLength
= htole32(fl
);
2777 se
->Address
= htole32((uint32_t) paddr
);
2784 mpt_send_cmd(mpt
, req
);
2788 mpt_fc_els_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
2789 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
2791 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp
=
2792 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY
) reply_frame
;
2796 U16 status
= le16toh(reply_frame
->IOCStatus
);
2799 int do_refresh
= TRUE
;
2802 KASSERT(mpt_req_on_free_list(mpt
, req
) == 0,
2803 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2804 req
, req
->serno
, rp
->Function
));
2805 if (rp
->Function
!= MPI_FUNCTION_FC_PRIMITIVE_SEND
) {
2806 mpt_req_spcl(mpt
, req
, "fc_els_reply_handler", __LINE__
);
2808 mpt_req_not_spcl(mpt
, req
, "fc_els_reply_handler", __LINE__
);
2811 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2812 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2813 req
, req
->serno
, reply_frame
, reply_frame
->Function
);
2815 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
2816 mpt_prt(mpt
, "ELS REPLY STATUS 0x%x for Function %x\n",
2817 status
, reply_frame
->Function
);
2818 if (status
== MPI_IOCSTATUS_INVALID_STATE
) {
2820 * XXX: to get around shutdown issue
2829 * If the function of a link service response, we recycle the
2830 * response to be a refresh for a new link service request.
2832 * The request pointer is bogus in this case and we have to fetch
2833 * it based upon the TransactionContext.
2835 if (rp
->Function
== MPI_FUNCTION_FC_LINK_SRVC_RSP
) {
2836 /* Freddie Uncle Charlie Katie */
2837 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2838 for (ioindex
= 0; ioindex
< mpt
->els_cmds_allocated
; ioindex
++)
2839 if (mpt
->els_cmd_ptrs
[ioindex
] == req
) {
2843 KASSERT(ioindex
< mpt
->els_cmds_allocated
,
2844 ("can't find my mommie!"));
2846 /* remove from active list as we're going to re-post it */
2847 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2848 req
->state
&= ~REQ_STATE_QUEUED
;
2849 req
->state
|= REQ_STATE_DONE
;
2850 mpt_fc_post_els(mpt
, req
, ioindex
);
2854 if (rp
->Function
== MPI_FUNCTION_FC_PRIMITIVE_SEND
) {
2855 /* remove from active list as we're done */
2856 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2857 req
->state
&= ~REQ_STATE_QUEUED
;
2858 req
->state
|= REQ_STATE_DONE
;
2859 if (req
->state
& REQ_STATE_TIMEDOUT
) {
2860 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2861 "Sync Primitive Send Completed After Timeout\n");
2862 mpt_free_request(mpt
, req
);
2863 } else if ((req
->state
& REQ_STATE_NEED_WAKEUP
) == 0) {
2864 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2865 "Async Primitive Send Complete\n");
2866 mpt_free_request(mpt
, req
);
2868 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2869 "Sync Primitive Send Complete- Waking Waiter\n");
2875 if (rp
->Function
!= MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
) {
2876 mpt_prt(mpt
, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2877 "Length %d Message Flags %x\n", rp
->Function
, rp
->Flags
,
2878 rp
->MsgLength
, rp
->MsgFlags
);
2882 if (rp
->MsgLength
<= 5) {
2884 * This is just a ack of an original ELS buffer post
2886 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
2887 "RECV'd ACK of FC_ELS buf post %p:%u\n", req
, req
->serno
);
2892 rctl
= (le32toh(rp
->Rctl_Did
) & MPI_FC_RCTL_MASK
) >> MPI_FC_RCTL_SHIFT
;
2893 type
= (le32toh(rp
->Type_Fctl
) & MPI_FC_TYPE_MASK
) >> MPI_FC_TYPE_SHIFT
;
2895 elsbuf
= &((U32
*)req
->req_vbuf
)[MPT_RQSL(mpt
)/sizeof (U32
)];
2896 cmd
= be32toh(elsbuf
[0]) >> 24;
2898 if (rp
->Flags
& MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED
) {
2899 mpt_lprt(mpt
, MPT_PRT_ALWAYS
, "ELS_REPLY: response unneeded\n");
2903 ioindex
= le32toh(rp
->TransactionContext
);
2904 req
= mpt
->els_cmd_ptrs
[ioindex
];
2906 if (rctl
== ELS
&& type
== 1) {
2910 * Send back a PRLI ACC
2912 mpt_prt(mpt
, "PRLI from 0x%08x%08x\n",
2913 le32toh(rp
->Wwn
.PortNameHigh
),
2914 le32toh(rp
->Wwn
.PortNameLow
));
2915 elsbuf
[0] = htobe32(0x02100014);
2916 elsbuf
[1] |= htobe32(0x00000100);
2917 elsbuf
[4] = htobe32(0x00000002);
2918 if (mpt
->role
& MPT_ROLE_TARGET
)
2919 elsbuf
[4] |= htobe32(0x00000010);
2920 if (mpt
->role
& MPT_ROLE_INITIATOR
)
2921 elsbuf
[4] |= htobe32(0x00000020);
2922 /* remove from active list as we're done */
2923 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2924 req
->state
&= ~REQ_STATE_QUEUED
;
2925 req
->state
|= REQ_STATE_DONE
;
2926 mpt_fc_els_send_response(mpt
, req
, rp
, 20);
2930 memset(elsbuf
, 0, 5 * (sizeof (U32
)));
2931 elsbuf
[0] = htobe32(0x02100014);
2932 elsbuf
[1] = htobe32(0x08000100);
2933 mpt_prt(mpt
, "PRLO from 0x%08x%08x\n",
2934 le32toh(rp
->Wwn
.PortNameHigh
),
2935 le32toh(rp
->Wwn
.PortNameLow
));
2936 /* remove from active list as we're done */
2937 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
2938 req
->state
&= ~REQ_STATE_QUEUED
;
2939 req
->state
|= REQ_STATE_DONE
;
2940 mpt_fc_els_send_response(mpt
, req
, rp
, 20);
2944 mpt_prt(mpt
, "ELS TYPE 1 COMMAND: %x\n", cmd
);
2947 } else if (rctl
== ABTS
&& type
== 0) {
2948 uint16_t rx_id
= le16toh(rp
->Rxid
);
2949 uint16_t ox_id
= le16toh(rp
->Oxid
);
2950 request_t
*tgt_req
= NULL
;
2953 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2954 ox_id
, rx_id
, le32toh(rp
->Wwn
.PortNameHigh
),
2955 le32toh(rp
->Wwn
.PortNameLow
));
2956 if (rx_id
>= mpt
->mpt_max_tgtcmds
) {
2957 mpt_prt(mpt
, "Bad RX_ID 0x%x\n", rx_id
);
2958 } else if (mpt
->tgt_cmd_ptrs
== NULL
) {
2959 mpt_prt(mpt
, "No TGT CMD PTRS\n");
2961 tgt_req
= mpt
->tgt_cmd_ptrs
[rx_id
];
2964 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, tgt_req
);
2965 union ccb
*ccb
= tgt
->ccb
;
2969 * Check to make sure we have the correct command
2970 * The reply descriptor in the target state should
2971 * should contain an IoIndex that should match the
2974 * It'd be nice to have OX_ID to crosscheck with
2977 ct_id
= GET_IO_INDEX(tgt
->reply_desc
);
2979 if (ct_id
!= rx_id
) {
2980 mpt_lprt(mpt
, MPT_PRT_ERROR
, "ABORT Mismatch: "
2981 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2989 "CCB (%p): lun %u flags %x status %x\n",
2990 ccb
, ccb
->ccb_h
.target_lun
,
2991 ccb
->ccb_h
.flags
, ccb
->ccb_h
.status
);
2993 mpt_prt(mpt
, "target state 0x%x resid %u xfrd %u rpwrd "
2994 "%x nxfers %x\n", tgt
->state
,
2995 tgt
->resid
, tgt
->bytes_xfered
, tgt
->reply_desc
,
2998 if (mpt_abort_target_cmd(mpt
, tgt_req
)) {
2999 mpt_prt(mpt
, "unable to start TargetAbort\n");
3002 mpt_prt(mpt
, "no back pointer for RX_ID 0x%x\n", rx_id
);
3004 memset(elsbuf
, 0, 5 * (sizeof (U32
)));
3005 elsbuf
[0] = htobe32(0);
3006 elsbuf
[1] = htobe32((ox_id
<< 16) | rx_id
);
3007 elsbuf
[2] = htobe32(0x000ffff);
3009 * Dork with the reply frame so that the reponse to it
3012 rp
->Rctl_Did
+= ((BA_ACC
- ABTS
) << MPI_FC_RCTL_SHIFT
);
3013 /* remove from active list as we're done */
3014 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
3015 req
->state
&= ~REQ_STATE_QUEUED
;
3016 req
->state
|= REQ_STATE_DONE
;
3017 mpt_fc_els_send_response(mpt
, req
, rp
, 12);
3020 mpt_prt(mpt
, "ELS: RCTL %x TYPE %x CMD %x\n", rctl
, type
, cmd
);
3022 if (do_refresh
== TRUE
) {
3023 /* remove from active list as we're done */
3024 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
3025 req
->state
&= ~REQ_STATE_QUEUED
;
3026 req
->state
|= REQ_STATE_DONE
;
3027 mpt_fc_post_els(mpt
, req
, ioindex
);
3033 * Clean up all SCSI Initiator personality state in response
3034 * to a controller reset.
3037 mpt_cam_ioc_reset(struct mpt_softc
*mpt
, int type
)
3040 * The pending list is already run down by
3041 * the generic handler. Perform the same
3042 * operation on the timed out request list.
3044 mpt_complete_request_chain(mpt
, &mpt
->request_timeout_list
,
3045 MPI_IOCSTATUS_INVALID_STATE
);
3048 * XXX: We need to repost ELS and Target Command Buffers?
3052 * Inform the XPT that a bus reset has occurred.
3054 xpt_async(AC_BUS_RESET
, mpt
->path
, NULL
);
3058 * Parse additional completion information in the reply
3059 * frame for SCSI I/O requests.
3062 mpt_scsi_reply_frame_handler(struct mpt_softc
*mpt
, request_t
*req
,
3063 MSG_DEFAULT_REPLY
*reply_frame
)
3066 MSG_SCSI_IO_REPLY
*scsi_io_reply
;
3070 MPT_DUMP_REPLY_FRAME(mpt
, reply_frame
);
3071 KASSERT(reply_frame
->Function
== MPI_FUNCTION_SCSI_IO_REQUEST
3072 || reply_frame
->Function
== MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
,
3073 ("MPT SCSI I/O Handler called with incorrect reply type"));
3074 KASSERT((reply_frame
->MsgFlags
& MPI_MSGFLAGS_CONTINUATION_REPLY
) == 0,
3075 ("MPT SCSI I/O Handler called with continuation reply"));
3077 scsi_io_reply
= (MSG_SCSI_IO_REPLY
*)reply_frame
;
3078 ioc_status
= le16toh(scsi_io_reply
->IOCStatus
);
3079 ioc_status
&= MPI_IOCSTATUS_MASK
;
3080 sstate
= scsi_io_reply
->SCSIState
;
3084 ccb
->csio
.dxfer_len
- le32toh(scsi_io_reply
->TransferCount
);
3086 if ((sstate
& MPI_SCSI_STATE_AUTOSENSE_VALID
) != 0
3087 && (ccb
->ccb_h
.flags
& (CAM_SENSE_PHYS
| CAM_SENSE_PTR
)) == 0) {
3088 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
3089 ccb
->csio
.sense_resid
=
3090 ccb
->csio
.sense_len
- le32toh(scsi_io_reply
->SenseCount
);
3091 bcopy(req
->sense_vbuf
, &ccb
->csio
.sense_data
,
3092 min(ccb
->csio
.sense_len
,
3093 le32toh(scsi_io_reply
->SenseCount
)));
3096 if ((sstate
& MPI_SCSI_STATE_QUEUE_TAG_REJECTED
) != 0) {
3098 * Tag messages rejected, but non-tagged retry
3101 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3105 switch(ioc_status
) {
3106 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH
:
3109 * Linux driver indicates that a zero
3110 * transfer length with this error code
3111 * indicates a CRC error.
3113 * No need to swap the bytes for checking
3116 if (scsi_io_reply
->TransferCount
== 0) {
3117 mpt_set_ccb_status(ccb
, CAM_UNCOR_PARITY
);
3121 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN
:
3122 case MPI_IOCSTATUS_SUCCESS
:
3123 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR
:
3124 if ((sstate
& MPI_SCSI_STATE_NO_SCSI_STATUS
) != 0) {
3126 * Status was never returned for this transaction.
3128 mpt_set_ccb_status(ccb
, CAM_UNEXP_BUSFREE
);
3129 } else if (scsi_io_reply
->SCSIStatus
!= SCSI_STATUS_OK
) {
3130 ccb
->csio
.scsi_status
= scsi_io_reply
->SCSIStatus
;
3131 mpt_set_ccb_status(ccb
, CAM_SCSI_STATUS_ERROR
);
3132 if ((sstate
& MPI_SCSI_STATE_AUTOSENSE_FAILED
) != 0)
3133 mpt_set_ccb_status(ccb
, CAM_AUTOSENSE_FAIL
);
3134 } else if ((sstate
& MPI_SCSI_STATE_RESPONSE_INFO_VALID
) != 0) {
3136 /* XXX Handle SPI-Packet and FCP-2 reponse info. */
3137 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3139 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3141 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN
:
3142 mpt_set_ccb_status(ccb
, CAM_DATA_RUN_ERR
);
3144 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR
:
3145 mpt_set_ccb_status(ccb
, CAM_UNCOR_PARITY
);
3147 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE
:
3149 * Since selection timeouts and "device really not
3150 * there" are grouped into this error code, report
3151 * selection timeout. Selection timeouts are
3152 * typically retried before giving up on the device
3153 * whereas "device not there" errors are considered
3156 mpt_set_ccb_status(ccb
, CAM_SEL_TIMEOUT
);
3158 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR
:
3159 mpt_set_ccb_status(ccb
, CAM_SEQUENCE_FAIL
);
3161 case MPI_IOCSTATUS_SCSI_INVALID_BUS
:
3162 mpt_set_ccb_status(ccb
, CAM_PATH_INVALID
);
3164 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID
:
3165 mpt_set_ccb_status(ccb
, CAM_TID_INVALID
);
3167 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED
:
3168 ccb
->ccb_h
.status
= CAM_UA_TERMIO
;
3170 case MPI_IOCSTATUS_INVALID_STATE
:
3172 * The IOC has been reset. Emulate a bus reset.
3175 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED
:
3176 ccb
->ccb_h
.status
= CAM_SCSI_BUS_RESET
;
3178 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED
:
3179 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED
:
3181 * Don't clobber any timeout status that has
3182 * already been set for this transaction. We
3183 * want the SCSI layer to be able to differentiate
3184 * between the command we aborted due to timeout
3185 * and any innocent bystanders.
3187 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_INPROG
)
3189 mpt_set_ccb_status(ccb
, CAM_REQ_TERMIO
);
3192 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES
:
3193 mpt_set_ccb_status(ccb
, CAM_RESRC_UNAVAIL
);
3195 case MPI_IOCSTATUS_BUSY
:
3196 mpt_set_ccb_status(ccb
, CAM_BUSY
);
3198 case MPI_IOCSTATUS_INVALID_FUNCTION
:
3199 case MPI_IOCSTATUS_INVALID_SGL
:
3200 case MPI_IOCSTATUS_INTERNAL_ERROR
:
3201 case MPI_IOCSTATUS_INVALID_FIELD
:
3204 * Some of the above may need to kick
3205 * of a recovery action!!!!
3207 ccb
->ccb_h
.status
= CAM_UNREC_HBA_ERROR
;
3211 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
3212 mpt_freeze_ccb(ccb
);
3219 mpt_action(struct cam_sim
*sim
, union ccb
*ccb
)
3221 struct mpt_softc
*mpt
;
3222 struct ccb_trans_settings
*cts
;
3227 CAM_DEBUG(ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("mpt_action\n"));
3229 mpt
= (struct mpt_softc
*)cam_sim_softc(sim
);
3230 raid_passthru
= (sim
== mpt
->phydisk_sim
);
3231 MPT_LOCK_ASSERT(mpt
);
3233 tgt
= ccb
->ccb_h
.target_id
;
3234 lun
= ccb
->ccb_h
.target_lun
;
3235 if (raid_passthru
&&
3236 ccb
->ccb_h
.func_code
!= XPT_PATH_INQ
&&
3237 ccb
->ccb_h
.func_code
!= XPT_RESET_BUS
&&
3238 ccb
->ccb_h
.func_code
!= XPT_RESET_DEV
) {
3239 CAMLOCK_2_MPTLOCK(mpt
);
3240 if (mpt_map_physdisk(mpt
, ccb
, &tgt
) != 0) {
3241 MPTLOCK_2_CAMLOCK(mpt
);
3242 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3243 mpt_set_ccb_status(ccb
, CAM_DEV_NOT_THERE
);
3247 MPTLOCK_2_CAMLOCK(mpt
);
3249 ccb
->ccb_h
.ccb_mpt_ptr
= mpt
;
3251 switch (ccb
->ccb_h
.func_code
) {
3252 case XPT_SCSI_IO
: /* Execute the requested I/O operation */
3254 * Do a couple of preliminary checks...
3256 if ((ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) != 0) {
3257 if ((ccb
->ccb_h
.flags
& CAM_CDB_PHYS
) != 0) {
3258 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3259 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3263 /* Max supported CDB length is 16 bytes */
3264 /* XXX Unless we implement the new 32byte message type */
3265 if (ccb
->csio
.cdb_len
>
3266 sizeof (((PTR_MSG_SCSI_IO_REQUEST
)0)->CDB
)) {
3267 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3268 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3271 #ifdef MPT_TEST_MULTIPATH
3272 if (mpt
->failure_id
== ccb
->ccb_h
.target_id
) {
3273 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3274 mpt_set_ccb_status(ccb
, CAM_SEL_TIMEOUT
);
3278 ccb
->csio
.scsi_status
= SCSI_STATUS_OK
;
3279 mpt_start(sim
, ccb
);
3283 if (raid_passthru
) {
3284 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3285 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3289 if (ccb
->ccb_h
.func_code
== XPT_RESET_BUS
) {
3291 xpt_print(ccb
->ccb_h
.path
, "reset bus\n");
3294 xpt_print(ccb
->ccb_h
.path
, "reset device\n");
3296 CAMLOCK_2_MPTLOCK(mpt
);
3297 (void) mpt_bus_reset(mpt
, tgt
, lun
, FALSE
);
3298 MPTLOCK_2_CAMLOCK(mpt
);
3301 * mpt_bus_reset is always successful in that it
3302 * will fall back to a hard reset should a bus
3303 * reset attempt fail.
3305 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3306 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3311 union ccb
*accb
= ccb
->cab
.abort_ccb
;
3312 CAMLOCK_2_MPTLOCK(mpt
);
3313 switch (accb
->ccb_h
.func_code
) {
3314 case XPT_ACCEPT_TARGET_IO
:
3315 case XPT_IMMED_NOTIFY
:
3316 ccb
->ccb_h
.status
= mpt_abort_target_ccb(mpt
, ccb
);
3318 case XPT_CONT_TARGET_IO
:
3319 mpt_prt(mpt
, "cannot abort active CTIOs yet\n");
3320 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3323 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
3326 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
3329 MPTLOCK_2_CAMLOCK(mpt
);
3333 #ifdef CAM_NEW_TRAN_CODE
3334 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3336 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3338 #define DP_DISC_ENABLE 0x1
3339 #define DP_DISC_DISABL 0x2
3340 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3342 #define DP_TQING_ENABLE 0x4
3343 #define DP_TQING_DISABL 0x8
3344 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3346 #define DP_WIDE 0x10
3347 #define DP_NARROW 0x20
3348 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3350 #define DP_SYNC 0x40
3352 case XPT_SET_TRAN_SETTINGS
: /* Nexus Settings */
3354 #ifdef CAM_NEW_TRAN_CODE
3355 struct ccb_trans_settings_scsi
*scsi
;
3356 struct ccb_trans_settings_spi
*spi
;
3365 if (mpt
->is_fc
|| mpt
->is_sas
) {
3366 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3370 #ifdef CAM_NEW_TRAN_CODE
3371 scsi
= &cts
->proto_specific
.scsi
;
3372 spi
= &cts
->xport_specific
.spi
;
3375 * We can be called just to valid transport and proto versions
3377 if (scsi
->valid
== 0 && spi
->valid
== 0) {
3378 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3384 * Skip attempting settings on RAID volume disks.
3385 * Other devices on the bus get the normal treatment.
3387 if (mpt
->phydisk_sim
&& raid_passthru
== 0 &&
3388 mpt_is_raid_volume(mpt
, tgt
) != 0) {
3389 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3390 "no transfer settings for RAID vols\n");
3391 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3395 i
= mpt
->mpt_port_page2
.PortSettings
&
3396 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS
;
3397 j
= mpt
->mpt_port_page2
.PortFlags
&
3398 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK
;
3399 if (i
== MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS
&&
3400 j
== MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV
) {
3401 mpt_lprt(mpt
, MPT_PRT_ALWAYS
,
3402 "honoring BIOS transfer negotiations\n");
3403 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3411 #ifndef CAM_NEW_TRAN_CODE
3412 if ((cts
->valid
& CCB_TRANS_DISC_VALID
) != 0) {
3413 dval
|= (cts
->flags
& CCB_TRANS_DISC_ENB
) ?
3414 DP_DISC_ENABLE
: DP_DISC_DISABL
;
3417 if ((cts
->valid
& CCB_TRANS_TQ_VALID
) != 0) {
3418 dval
|= (cts
->flags
& CCB_TRANS_TAG_ENB
) ?
3419 DP_TQING_ENABLE
: DP_TQING_DISABL
;
3422 if ((cts
->valid
& CCB_TRANS_BUS_WIDTH_VALID
) != 0) {
3423 dval
|= cts
->bus_width
? DP_WIDE
: DP_NARROW
;
3426 if ((cts
->valid
& CCB_TRANS_SYNC_RATE_VALID
) &&
3427 (cts
->valid
& CCB_TRANS_SYNC_OFFSET_VALID
)) {
3429 period
= cts
->sync_period
;
3430 offset
= cts
->sync_offset
;
3433 if ((spi
->valid
& CTS_SPI_VALID_DISC
) != 0) {
3434 dval
|= ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) != 0) ?
3435 DP_DISC_ENABLE
: DP_DISC_DISABL
;
3438 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
3439 dval
|= ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0) ?
3440 DP_TQING_ENABLE
: DP_TQING_DISABL
;
3443 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0) {
3444 dval
|= (spi
->bus_width
== MSG_EXT_WDTR_BUS_16_BIT
) ?
3445 DP_WIDE
: DP_NARROW
;
3448 if (spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) {
3450 offset
= spi
->sync_offset
;
3452 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
=
3453 &mpt
->mpt_dev_page1
[tgt
];
3454 offset
= ptr
->RequestedParameters
;
3455 offset
&= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK
;
3456 offset
>>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET
;
3458 if (spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) {
3460 period
= spi
->sync_period
;
3462 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
=
3463 &mpt
->mpt_dev_page1
[tgt
];
3464 period
= ptr
->RequestedParameters
;
3465 period
&= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK
;
3466 period
>>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD
;
3469 CAMLOCK_2_MPTLOCK(mpt
);
3470 if (dval
& DP_DISC_ENABLE
) {
3471 mpt
->mpt_disc_enable
|= (1 << tgt
);
3472 } else if (dval
& DP_DISC_DISABL
) {
3473 mpt
->mpt_disc_enable
&= ~(1 << tgt
);
3475 if (dval
& DP_TQING_ENABLE
) {
3476 mpt
->mpt_tag_enable
|= (1 << tgt
);
3477 } else if (dval
& DP_TQING_DISABL
) {
3478 mpt
->mpt_tag_enable
&= ~(1 << tgt
);
3480 if (dval
& DP_WIDTH
) {
3481 mpt_setwidth(mpt
, tgt
, 1);
3483 if (dval
& DP_SYNC
) {
3484 mpt_setsync(mpt
, tgt
, period
, offset
);
3487 MPTLOCK_2_CAMLOCK(mpt
);
3488 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3491 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3492 "set [%d]: 0x%x period 0x%x offset %d\n",
3493 tgt
, dval
, period
, offset
);
3494 if (mpt_update_spi_config(mpt
, tgt
)) {
3495 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3497 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3499 MPTLOCK_2_CAMLOCK(mpt
);
3502 case XPT_GET_TRAN_SETTINGS
:
3504 #ifdef CAM_NEW_TRAN_CODE
3505 struct ccb_trans_settings_scsi
*scsi
;
3507 cts
->protocol
= PROTO_SCSI
;
3509 struct ccb_trans_settings_fc
*fc
=
3510 &cts
->xport_specific
.fc
;
3511 cts
->protocol_version
= SCSI_REV_SPC
;
3512 cts
->transport
= XPORT_FC
;
3513 cts
->transport_version
= 0;
3514 fc
->valid
= CTS_FC_VALID_SPEED
;
3515 fc
->bitrate
= 100000;
3516 } else if (mpt
->is_sas
) {
3517 struct ccb_trans_settings_sas
*sas
=
3518 &cts
->xport_specific
.sas
;
3519 cts
->protocol_version
= SCSI_REV_SPC2
;
3520 cts
->transport
= XPORT_SAS
;
3521 cts
->transport_version
= 0;
3522 sas
->valid
= CTS_SAS_VALID_SPEED
;
3523 sas
->bitrate
= 300000;
3525 cts
->protocol_version
= SCSI_REV_2
;
3526 cts
->transport
= XPORT_SPI
;
3527 cts
->transport_version
= 2;
3528 if (mpt_get_spi_settings(mpt
, cts
) != 0) {
3529 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3533 scsi
= &cts
->proto_specific
.scsi
;
3534 scsi
->valid
= CTS_SCSI_VALID_TQ
;
3535 scsi
->flags
= CTS_SCSI_FLAGS_TAG_ENB
;
3539 cts
->flags
= CCB_TRANS_TAG_ENB
| CCB_TRANS_DISC_ENB
;
3540 cts
->valid
= CCB_TRANS_DISC_VALID
| CCB_TRANS_TQ_VALID
;
3541 cts
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
3542 } else if (mpt
->is_sas
) {
3543 cts
->flags
= CCB_TRANS_TAG_ENB
| CCB_TRANS_DISC_ENB
;
3544 cts
->valid
= CCB_TRANS_DISC_VALID
| CCB_TRANS_TQ_VALID
;
3545 cts
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
3546 } else if (mpt_get_spi_settings(mpt
, cts
) != 0) {
3547 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3551 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3554 case XPT_CALC_GEOMETRY
:
3556 struct ccb_calc_geometry
*ccg
;
3559 if (ccg
->block_size
== 0) {
3560 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
3561 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3564 mpt_calc_geometry(ccg
, /*extended*/1);
3565 KASSERT(ccb
->ccb_h
.status
, ("zero ccb sts at %d\n", __LINE__
));
3568 case XPT_PATH_INQ
: /* Path routing inquiry */
3570 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
3572 cpi
->version_num
= 1;
3573 cpi
->target_sprt
= 0;
3574 cpi
->hba_eng_cnt
= 0;
3575 cpi
->max_target
= mpt
->port_facts
[0].MaxDevices
- 1;
3577 * FC cards report MAX_DEVICES of 512, but
3578 * the MSG_SCSI_IO_REQUEST target id field
3579 * is only 8 bits. Until we fix the driver
3580 * to support 'channels' for bus overflow,
3583 if (cpi
->max_target
> 255) {
3584 cpi
->max_target
= 255;
3588 * VMware ESX reports > 16 devices and then dies when we probe.
3590 if (mpt
->is_spi
&& cpi
->max_target
> 15) {
3591 cpi
->max_target
= 15;
3596 cpi
->max_lun
= MPT_MAX_LUNS
;
3597 cpi
->initiator_id
= mpt
->mpt_ini_id
;
3598 cpi
->bus_id
= cam_sim_bus(sim
);
3601 * The base speed is the speed of the underlying connection.
3603 #ifdef CAM_NEW_TRAN_CODE
3604 cpi
->protocol
= PROTO_SCSI
;
3606 cpi
->hba_misc
= PIM_NOBUSRESET
;
3607 cpi
->base_transfer_speed
= 100000;
3608 cpi
->hba_inquiry
= PI_TAG_ABLE
;
3609 cpi
->transport
= XPORT_FC
;
3610 cpi
->transport_version
= 0;
3611 cpi
->protocol_version
= SCSI_REV_SPC
;
3612 } else if (mpt
->is_sas
) {
3613 cpi
->hba_misc
= PIM_NOBUSRESET
;
3614 cpi
->base_transfer_speed
= 300000;
3615 cpi
->hba_inquiry
= PI_TAG_ABLE
;
3616 cpi
->transport
= XPORT_SAS
;
3617 cpi
->transport_version
= 0;
3618 cpi
->protocol_version
= SCSI_REV_SPC2
;
3620 cpi
->hba_misc
= PIM_SEQSCAN
;
3621 cpi
->base_transfer_speed
= 3300;
3622 cpi
->hba_inquiry
= PI_SDTR_ABLE
|PI_TAG_ABLE
|PI_WIDE_16
;
3623 cpi
->transport
= XPORT_SPI
;
3624 cpi
->transport_version
= 2;
3625 cpi
->protocol_version
= SCSI_REV_2
;
3629 cpi
->hba_misc
= PIM_NOBUSRESET
;
3630 cpi
->base_transfer_speed
= 100000;
3631 cpi
->hba_inquiry
= PI_TAG_ABLE
;
3632 } else if (mpt
->is_sas
) {
3633 cpi
->hba_misc
= PIM_NOBUSRESET
;
3634 cpi
->base_transfer_speed
= 300000;
3635 cpi
->hba_inquiry
= PI_TAG_ABLE
;
3637 cpi
->hba_misc
= PIM_SEQSCAN
;
3638 cpi
->base_transfer_speed
= 3300;
3639 cpi
->hba_inquiry
= PI_SDTR_ABLE
|PI_TAG_ABLE
|PI_WIDE_16
;
3644 * We give our fake RAID passhtru bus a width that is MaxVolumes
3645 * wide and restrict it to one lun.
3647 if (raid_passthru
) {
3648 cpi
->max_target
= mpt
->ioc_page2
->MaxPhysDisks
- 1;
3649 cpi
->initiator_id
= cpi
->max_target
+ 1;
3653 if ((mpt
->role
& MPT_ROLE_INITIATOR
) == 0) {
3654 cpi
->hba_misc
|= PIM_NOINITIATOR
;
3656 if (mpt
->is_fc
&& (mpt
->role
& MPT_ROLE_TARGET
)) {
3658 PIT_PROCESSOR
| PIT_DISCONNECT
| PIT_TERM_IO
;
3660 cpi
->target_sprt
= 0;
3662 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
3663 strncpy(cpi
->hba_vid
, "LSI", HBA_IDLEN
);
3664 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
3665 cpi
->unit_number
= cam_sim_unit(sim
);
3666 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
3669 case XPT_EN_LUN
: /* Enable LUN as a target */
3673 CAMLOCK_2_MPTLOCK(mpt
);
3674 if (ccb
->cel
.enable
)
3675 result
= mpt_enable_lun(mpt
,
3676 ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
);
3678 result
= mpt_disable_lun(mpt
,
3679 ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
);
3680 MPTLOCK_2_CAMLOCK(mpt
);
3682 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
3684 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
3688 case XPT_NOTIFY_ACK
: /* recycle notify ack */
3689 case XPT_IMMED_NOTIFY
: /* Add Immediate Notify Resource */
3690 case XPT_ACCEPT_TARGET_IO
: /* Add Accept Target IO Resource */
3692 tgt_resource_t
*trtp
;
3693 lun_id_t lun
= ccb
->ccb_h
.target_lun
;
3694 ccb
->ccb_h
.sim_priv
.entries
[0].field
= 0;
3695 ccb
->ccb_h
.sim_priv
.entries
[1].ptr
= mpt
;
3696 ccb
->ccb_h
.flags
= 0;
3698 if (lun
== CAM_LUN_WILDCARD
) {
3699 if (ccb
->ccb_h
.target_id
!= CAM_TARGET_WILDCARD
) {
3700 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3703 trtp
= &mpt
->trt_wildcard
;
3704 } else if (lun
>= MPT_MAX_LUNS
) {
3705 mpt_set_ccb_status(ccb
, CAM_REQ_INVALID
);
3708 trtp
= &mpt
->trt
[lun
];
3710 CAMLOCK_2_MPTLOCK(mpt
);
3711 if (ccb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
3712 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
3713 "Put FREE ATIO %p lun %d\n", ccb
, lun
);
3714 STAILQ_INSERT_TAIL(&trtp
->atios
, &ccb
->ccb_h
,
3716 } else if (ccb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
) {
3717 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
3718 "Put FREE INOT lun %d\n", lun
);
3719 STAILQ_INSERT_TAIL(&trtp
->inots
, &ccb
->ccb_h
,
3722 mpt_lprt(mpt
, MPT_PRT_ALWAYS
, "Got Notify ACK\n");
3724 mpt_set_ccb_status(ccb
, CAM_REQ_INPROG
);
3725 MPTLOCK_2_CAMLOCK(mpt
);
3728 case XPT_CONT_TARGET_IO
:
3729 CAMLOCK_2_MPTLOCK(mpt
);
3730 mpt_target_start_io(mpt
, ccb
);
3731 MPTLOCK_2_CAMLOCK(mpt
);
3735 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
3742 mpt_get_spi_settings(struct mpt_softc
*mpt
, struct ccb_trans_settings
*cts
)
3744 #ifdef CAM_NEW_TRAN_CODE
3745 struct ccb_trans_settings_scsi
*scsi
= &cts
->proto_specific
.scsi
;
3746 struct ccb_trans_settings_spi
*spi
= &cts
->xport_specific
.spi
;
3749 uint32_t dval
, pval
, oval
;
3752 if (IS_CURRENT_SETTINGS(cts
) == 0) {
3753 tgt
= cts
->ccb_h
.target_id
;
3754 } else if (xpt_path_sim(cts
->ccb_h
.path
) == mpt
->phydisk_sim
) {
3755 if (mpt_map_physdisk(mpt
, (union ccb
*)cts
, &tgt
)) {
3759 tgt
= cts
->ccb_h
.target_id
;
3763 * We aren't looking at Port Page 2 BIOS settings here-
3764 * sometimes these have been known to be bogus XXX.
3766 * For user settings, we pick the max from port page 0
3768 * For current settings we read the current settings out from
3769 * device page 0 for that target.
3771 if (IS_CURRENT_SETTINGS(cts
)) {
3772 CONFIG_PAGE_SCSI_DEVICE_0 tmp
;
3775 CAMLOCK_2_MPTLOCK(mpt
);
3776 tmp
= mpt
->mpt_dev_page0
[tgt
];
3777 rv
= mpt_read_cur_cfg_page(mpt
, tgt
, &tmp
.Header
,
3778 sizeof(tmp
), FALSE
, 5000);
3780 MPTLOCK_2_CAMLOCK(mpt
);
3781 mpt_prt(mpt
, "can't get tgt %d config page 0\n", tgt
);
3784 mpt2host_config_page_scsi_device_0(&tmp
);
3786 MPTLOCK_2_CAMLOCK(mpt
);
3787 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
3788 "mpt_get_spi_settings[%d]: current NP %x Info %x\n",
3790 (unsigned)tmp
.NegotiatedParameters
,
3791 (unsigned)tmp
.Information
);
3792 dval
|= (tmp
.NegotiatedParameters
& MPI_SCSIDEVPAGE0_NP_WIDE
) ?
3793 DP_WIDE
: DP_NARROW
;
3794 dval
|= (mpt
->mpt_disc_enable
& (1 << tgt
)) ?
3795 DP_DISC_ENABLE
: DP_DISC_DISABL
;
3796 dval
|= (mpt
->mpt_tag_enable
& (1 << tgt
)) ?
3797 DP_TQING_ENABLE
: DP_TQING_DISABL
;
3798 oval
= tmp
.NegotiatedParameters
;
3799 oval
&= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK
;
3800 oval
>>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET
;
3801 pval
= tmp
.NegotiatedParameters
;
3802 pval
&= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK
;
3803 pval
>>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD
;
3804 mpt
->mpt_dev_page0
[tgt
] = tmp
;
3806 dval
= DP_WIDE
|DP_DISC_ENABLE
|DP_TQING_ENABLE
|DP_SYNC
;
3807 oval
= mpt
->mpt_port_page0
.Capabilities
;
3808 oval
= MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval
);
3809 pval
= mpt
->mpt_port_page0
.Capabilities
;
3810 pval
= MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval
);
3813 #ifndef CAM_NEW_TRAN_CODE
3814 cts
->flags
&= ~(CCB_TRANS_DISC_ENB
|CCB_TRANS_TAG_ENB
);
3816 cts
->sync_period
= pval
;
3817 cts
->sync_offset
= oval
;
3818 cts
->valid
|= CCB_TRANS_SYNC_RATE_VALID
;
3819 cts
->valid
|= CCB_TRANS_SYNC_OFFSET_VALID
;
3820 cts
->valid
|= CCB_TRANS_BUS_WIDTH_VALID
;
3821 if (dval
& DP_WIDE
) {
3822 cts
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
3824 cts
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
3826 if (cts
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
) {
3827 cts
->valid
|= CCB_TRANS_DISC_VALID
| CCB_TRANS_TQ_VALID
;
3828 if (dval
& DP_DISC_ENABLE
) {
3829 cts
->flags
|= CCB_TRANS_DISC_ENB
;
3831 if (dval
& DP_TQING_ENABLE
) {
3832 cts
->flags
|= CCB_TRANS_TAG_ENB
;
3840 spi
->sync_offset
= oval
;
3841 spi
->sync_period
= pval
;
3842 spi
->valid
|= CTS_SPI_VALID_SYNC_OFFSET
;
3843 spi
->valid
|= CTS_SPI_VALID_SYNC_RATE
;
3844 spi
->valid
|= CTS_SPI_VALID_BUS_WIDTH
;
3845 if (dval
& DP_WIDE
) {
3846 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
3848 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
3850 if (cts
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
) {
3851 scsi
->valid
= CTS_SCSI_VALID_TQ
;
3852 if (dval
& DP_TQING_ENABLE
) {
3853 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
3855 spi
->valid
|= CTS_SPI_VALID_DISC
;
3856 if (dval
& DP_DISC_ENABLE
) {
3857 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
3861 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3862 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt
,
3863 IS_CURRENT_SETTINGS(cts
)? "ACTIVE" : "NVRAM ", dval
, pval
, oval
);
3868 mpt_setwidth(struct mpt_softc
*mpt
, int tgt
, int onoff
)
3870 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
;
3872 ptr
= &mpt
->mpt_dev_page1
[tgt
];
3874 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_WIDE
;
3876 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_WIDE
;
3881 mpt_setsync(struct mpt_softc
*mpt
, int tgt
, int period
, int offset
)
3883 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr
;
3885 ptr
= &mpt
->mpt_dev_page1
[tgt
];
3886 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK
;
3887 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK
;
3888 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_DT
;
3889 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_QAS
;
3890 ptr
->RequestedParameters
&= ~MPI_SCSIDEVPAGE1_RP_IU
;
3894 ptr
->RequestedParameters
|=
3895 period
<< MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD
;
3896 ptr
->RequestedParameters
|=
3897 offset
<< MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET
;
3899 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_DT
;
3902 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_QAS
;
3903 ptr
->RequestedParameters
|= MPI_SCSIDEVPAGE1_RP_IU
;
3908 mpt_update_spi_config(struct mpt_softc
*mpt
, int tgt
)
3910 CONFIG_PAGE_SCSI_DEVICE_1 tmp
;
3913 mpt_lprt(mpt
, MPT_PRT_NEGOTIATION
,
3914 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3916 (unsigned)mpt
->mpt_dev_page1
[tgt
].RequestedParameters
);
3917 tmp
= mpt
->mpt_dev_page1
[tgt
];
3918 host2mpt_config_page_scsi_device_1(&tmp
);
3919 rv
= mpt_write_cur_cfg_page(mpt
, tgt
,
3920 &tmp
.Header
, sizeof(tmp
), FALSE
, 5000);
3922 mpt_prt(mpt
, "mpt_update_spi_config: write cur page failed\n");
3929 mpt_calc_geometry(struct ccb_calc_geometry
*ccg
, int extended
)
3931 #if __FreeBSD_version >= 500000
3932 cam_calc_geometry(ccg
, extended
);
3935 uint32_t secs_per_cylinder
;
3937 if (ccg
->block_size
== 0) {
3938 ccg
->ccb_h
.status
= CAM_REQ_INVALID
;
3941 size_mb
= ccg
->volume_size
/ ((1024L * 1024L) / ccg
->block_size
);
3942 if (size_mb
> 1024 && extended
) {
3944 ccg
->secs_per_track
= 63;
3947 ccg
->secs_per_track
= 32;
3949 secs_per_cylinder
= ccg
->heads
* ccg
->secs_per_track
;
3950 ccg
->cylinders
= ccg
->volume_size
/ secs_per_cylinder
;
3951 ccg
->ccb_h
.status
= CAM_REQ_CMP
;
3955 /****************************** Timeout Recovery ******************************/
3957 mpt_spawn_recovery_thread(struct mpt_softc
*mpt
)
3961 error
= mpt_kthread_create(mpt_recovery_thread
, mpt
,
3962 &mpt
->recovery_thread
, /*flags*/0,
3963 /*altstack*/0, "mpt_recovery%d", mpt
->unit
);
3968 mpt_terminate_recovery_thread(struct mpt_softc
*mpt
)
3970 if (mpt
->recovery_thread
== NULL
) {
3973 mpt
->shutdwn_recovery
= 1;
3976 * Sleep on a slightly different location
3977 * for this interlock just for added safety.
3979 mpt_sleep(mpt
, &mpt
->recovery_thread
, PUSER
, "thtrm", 0);
3983 mpt_recovery_thread(void *arg
)
3985 struct mpt_softc
*mpt
;
3987 mpt
= (struct mpt_softc
*)arg
;
3990 if (TAILQ_EMPTY(&mpt
->request_timeout_list
) != 0) {
3991 if (mpt
->shutdwn_recovery
== 0) {
3992 mpt_sleep(mpt
, mpt
, PUSER
, "idle", 0);
3995 if (mpt
->shutdwn_recovery
!= 0) {
3998 mpt_recover_commands(mpt
);
4000 mpt
->recovery_thread
= NULL
;
4001 wakeup(&mpt
->recovery_thread
);
4003 mpt_kthread_exit(0);
4007 mpt_scsi_send_tmf(struct mpt_softc
*mpt
, u_int type
, u_int flags
,
4008 u_int channel
, u_int target
, u_int lun
, u_int abort_ctx
, int sleep_ok
)
4010 MSG_SCSI_TASK_MGMT
*tmf_req
;
4014 * Wait for any current TMF request to complete.
4015 * We're only allowed to issue one TMF at a time.
4017 error
= mpt_wait_req(mpt
, mpt
->tmf_req
, REQ_STATE_FREE
, REQ_STATE_FREE
,
4018 sleep_ok
, MPT_TMF_MAX_TIMEOUT
);
4020 mpt_reset(mpt
, TRUE
);
4024 mpt_assign_serno(mpt
, mpt
->tmf_req
);
4025 mpt
->tmf_req
->state
= REQ_STATE_ALLOCATED
|REQ_STATE_QUEUED
;
4027 tmf_req
= (MSG_SCSI_TASK_MGMT
*)mpt
->tmf_req
->req_vbuf
;
4028 memset(tmf_req
, 0, sizeof(*tmf_req
));
4029 tmf_req
->TargetID
= target
;
4030 tmf_req
->Bus
= channel
;
4031 tmf_req
->Function
= MPI_FUNCTION_SCSI_TASK_MGMT
;
4032 tmf_req
->TaskType
= type
;
4033 tmf_req
->MsgFlags
= flags
;
4034 tmf_req
->MsgContext
=
4035 htole32(mpt
->tmf_req
->index
| scsi_tmf_handler_id
);
4036 if (lun
> MPT_MAX_LUNS
) {
4037 tmf_req
->LUN
[0] = 0x40 | ((lun
>> 8) & 0x3f);
4038 tmf_req
->LUN
[1] = lun
& 0xff;
4040 tmf_req
->LUN
[1] = lun
;
4042 tmf_req
->TaskMsgContext
= abort_ctx
;
4044 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4045 "Issuing TMF %p:%u with MsgContext of 0x%x\n",
4047 (unsigned)mpt
->tmf_req
->serno
,
4048 (unsigned)tmf_req
->MsgContext
);
4049 if (mpt
->verbose
> MPT_PRT_DEBUG
) {
4050 mpt_print_request(tmf_req
);
4053 KASSERT(mpt_req_on_pending_list(mpt
, mpt
->tmf_req
) == 0,
4054 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4055 TAILQ_INSERT_HEAD(&mpt
->request_pending_list
, mpt
->tmf_req
, links
);
4056 error
= mpt_send_handshake_cmd(mpt
, sizeof(*tmf_req
), tmf_req
);
4057 if (error
!= MPT_OK
) {
4058 TAILQ_REMOVE(&mpt
->request_pending_list
, mpt
->tmf_req
, links
);
4059 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
4060 mpt_reset(mpt
, TRUE
);
4066 * When a command times out, it is placed on the requeust_timeout_list
4067 * and we wake our recovery thread. The MPT-Fusion architecture supports
4068 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4069 * the timedout transactions. The next TMF is issued either by the
4070 * completion handler of the current TMF waking our recovery thread,
4071 * or the TMF timeout handler causing a hard reset sequence.
4074 mpt_recover_commands(struct mpt_softc
*mpt
)
4080 if (TAILQ_EMPTY(&mpt
->request_timeout_list
) != 0) {
4082 * No work to do- leave.
4084 mpt_prt(mpt
, "mpt_recover_commands: no requests.\n");
4089 * Flush any commands whose completion coincides with their timeout.
4093 if (TAILQ_EMPTY(&mpt
->request_timeout_list
) != 0) {
4095 * The timedout commands have already
4096 * completed. This typically means
4097 * that either the timeout value was on
4098 * the hairy edge of what the device
4099 * requires or - more likely - interrupts
4100 * are not happening.
4102 mpt_prt(mpt
, "Timedout requests already complete. "
4103 "Interrupts may not be functioning.\n");
4104 mpt_enable_ints(mpt
);
4109 * We have no visibility into the current state of the
4110 * controller, so attempt to abort the commands in the
4111 * order they timed-out. For initiator commands, we
4112 * depend on the reply handler pulling requests off
4115 while ((req
= TAILQ_FIRST(&mpt
->request_timeout_list
)) != NULL
) {
4118 MSG_REQUEST_HEADER
*hdrp
= req
->req_vbuf
;
4120 mpt_prt(mpt
, "attempting to abort req %p:%u function %x\n",
4121 req
, req
->serno
, hdrp
->Function
);
4124 mpt_prt(mpt
, "null ccb in timed out request. "
4125 "Resetting Controller.\n");
4126 mpt_reset(mpt
, TRUE
);
4129 mpt_set_ccb_status(ccb
, CAM_CMD_TIMEOUT
);
4132 * Check to see if this is not an initiator command and
4133 * deal with it differently if it is.
4135 switch (hdrp
->Function
) {
4136 case MPI_FUNCTION_SCSI_IO_REQUEST
:
4137 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
:
4141 * XXX: FIX ME: need to abort target assists...
4143 mpt_prt(mpt
, "just putting it back on the pend q\n");
4144 TAILQ_REMOVE(&mpt
->request_timeout_list
, req
, links
);
4145 TAILQ_INSERT_HEAD(&mpt
->request_pending_list
, req
,
4150 error
= mpt_scsi_send_tmf(mpt
,
4151 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK
,
4152 0, 0, ccb
->ccb_h
.target_id
, ccb
->ccb_h
.target_lun
,
4153 htole32(req
->index
| scsi_io_handler_id
), TRUE
);
4157 * mpt_scsi_send_tmf hard resets on failure, so no
4158 * need to do so here. Our queue should be emptied
4159 * by the hard reset.
4164 error
= mpt_wait_req(mpt
, mpt
->tmf_req
, REQ_STATE_DONE
,
4165 REQ_STATE_DONE
, TRUE
, 500);
4167 status
= le16toh(mpt
->tmf_req
->IOCStatus
);
4168 response
= mpt
->tmf_req
->ResponseCode
;
4169 mpt
->tmf_req
->state
= REQ_STATE_FREE
;
4173 * If we've errored out,, reset the controller.
4175 mpt_prt(mpt
, "mpt_recover_commands: abort timed-out. "
4176 "Resetting controller\n");
4177 mpt_reset(mpt
, TRUE
);
4181 if ((status
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
4182 mpt_prt(mpt
, "mpt_recover_commands: IOC Status 0x%x. "
4183 "Resetting controller.\n", status
);
4184 mpt_reset(mpt
, TRUE
);
4188 if (response
!= MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED
&&
4189 response
!= MPI_SCSITASKMGMT_RSP_TM_COMPLETE
) {
4190 mpt_prt(mpt
, "mpt_recover_commands: TMF Response 0x%x. "
4191 "Resetting controller.\n", response
);
4192 mpt_reset(mpt
, TRUE
);
4195 mpt_prt(mpt
, "abort of req %p:%u completed\n", req
, req
->serno
);
4199 /************************ Target Mode Support ****************************/
4201 mpt_fc_post_els(struct mpt_softc
*mpt
, request_t
*req
, int ioindex
)
4203 MSG_LINK_SERVICE_BUFFER_POST_REQUEST
*fc
;
4204 PTR_SGE_TRANSACTION32 tep
;
4205 PTR_SGE_SIMPLE32 se
;
4209 paddr
= req
->req_pbuf
;
4210 paddr
+= MPT_RQSL(mpt
);
4213 memset(fc
, 0, MPT_REQUEST_AREA
);
4214 fc
->BufferCount
= 1;
4215 fc
->Function
= MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
;
4216 fc
->MsgContext
= htole32(req
->index
| fc_els_handler_id
);
4219 * Okay, set up ELS buffer pointers. ELS buffer pointers
4220 * consist of a TE SGL element (with details length of zero)
4221 * followe by a SIMPLE SGL element which holds the address
4225 tep
= (PTR_SGE_TRANSACTION32
) &fc
->SGL
;
4227 tep
->ContextSize
= 4;
4229 tep
->TransactionContext
[0] = htole32(ioindex
);
4231 se
= (PTR_SGE_SIMPLE32
) &tep
->TransactionDetails
[0];
4233 MPI_SGE_FLAGS_HOST_TO_IOC
|
4234 MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
4235 MPI_SGE_FLAGS_LAST_ELEMENT
|
4236 MPI_SGE_FLAGS_END_OF_LIST
|
4237 MPI_SGE_FLAGS_END_OF_BUFFER
;
4238 fl
<<= MPI_SGE_FLAGS_SHIFT
;
4239 fl
|= (MPT_NRFM(mpt
) - MPT_RQSL(mpt
));
4240 se
->FlagsLength
= htole32(fl
);
4241 se
->Address
= htole32((uint32_t) paddr
);
4242 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4243 "add ELS index %d ioindex %d for %p:%u\n",
4244 req
->index
, ioindex
, req
, req
->serno
);
4245 KASSERT(((req
->state
& REQ_STATE_LOCKED
) != 0),
4246 ("mpt_fc_post_els: request not locked"));
4247 mpt_send_cmd(mpt
, req
);
4251 mpt_post_target_command(struct mpt_softc
*mpt
, request_t
*req
, int ioindex
)
4253 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc
;
4254 PTR_CMD_BUFFER_DESCRIPTOR cb
;
4257 paddr
= req
->req_pbuf
;
4258 paddr
+= MPT_RQSL(mpt
);
4259 memset(req
->req_vbuf
, 0, MPT_REQUEST_AREA
);
4260 MPT_TGT_STATE(mpt
, req
)->state
= TGT_STATE_LOADING
;
4263 fc
->BufferCount
= 1;
4264 fc
->Function
= MPI_FUNCTION_TARGET_CMD_BUFFER_POST
;
4265 fc
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4267 cb
= &fc
->Buffer
[0];
4268 cb
->IoIndex
= htole16(ioindex
);
4269 cb
->u
.PhysicalAddress32
= htole32((U32
) paddr
);
4271 mpt_check_doorbell(mpt
);
4272 mpt_send_cmd(mpt
, req
);
4276 mpt_add_els_buffers(struct mpt_softc
*mpt
)
4280 if (mpt
->is_fc
== 0) {
4284 if (mpt
->els_cmds_allocated
) {
4288 mpt
->els_cmd_ptrs
= kmalloc(MPT_MAX_ELS
* sizeof (request_t
*),
4289 M_DEVBUF
, M_NOWAIT
| M_ZERO
);
4291 if (mpt
->els_cmd_ptrs
== NULL
) {
4296 * Feed the chip some ELS buffer resources
4298 for (i
= 0; i
< MPT_MAX_ELS
; i
++) {
4299 request_t
*req
= mpt_get_request(mpt
, FALSE
);
4303 req
->state
|= REQ_STATE_LOCKED
;
4304 mpt
->els_cmd_ptrs
[i
] = req
;
4305 mpt_fc_post_els(mpt
, req
, i
);
4309 mpt_prt(mpt
, "unable to add ELS buffer resources\n");
4310 kfree(mpt
->els_cmd_ptrs
, M_DEVBUF
);
4311 mpt
->els_cmd_ptrs
= NULL
;
4314 if (i
!= MPT_MAX_ELS
) {
4315 mpt_lprt(mpt
, MPT_PRT_INFO
,
4316 "only added %d of %d ELS buffers\n", i
, MPT_MAX_ELS
);
4318 mpt
->els_cmds_allocated
= i
;
4323 mpt_add_target_commands(struct mpt_softc
*mpt
)
4327 if (mpt
->tgt_cmd_ptrs
) {
4331 max
= MPT_MAX_REQUESTS(mpt
) >> 1;
4332 if (max
> mpt
->mpt_max_tgtcmds
) {
4333 max
= mpt
->mpt_max_tgtcmds
;
4336 kmalloc(max
* sizeof (request_t
*), M_DEVBUF
, M_NOWAIT
| M_ZERO
);
4337 if (mpt
->tgt_cmd_ptrs
== NULL
) {
4339 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4343 for (i
= 0; i
< max
; i
++) {
4346 req
= mpt_get_request(mpt
, FALSE
);
4350 req
->state
|= REQ_STATE_LOCKED
;
4351 mpt
->tgt_cmd_ptrs
[i
] = req
;
4352 mpt_post_target_command(mpt
, req
, i
);
4357 mpt_lprt(mpt
, MPT_PRT_ERROR
, "could not add any target bufs\n");
4358 kfree(mpt
->tgt_cmd_ptrs
, M_DEVBUF
);
4359 mpt
->tgt_cmd_ptrs
= NULL
;
4363 mpt
->tgt_cmds_allocated
= i
;
4366 mpt_lprt(mpt
, MPT_PRT_INFO
,
4367 "added %d of %d target bufs\n", i
, max
);
4373 mpt_enable_lun(struct mpt_softc
*mpt
, target_id_t tgt
, lun_id_t lun
)
4375 if (tgt
== CAM_TARGET_WILDCARD
&& lun
== CAM_LUN_WILDCARD
) {
4377 } else if (lun
>= MPT_MAX_LUNS
) {
4379 } else if (tgt
!= CAM_TARGET_WILDCARD
&& tgt
!= 0) {
4382 if (mpt
->tenabled
== 0) {
4384 (void) mpt_fc_reset_link(mpt
, 0);
4388 if (lun
== CAM_LUN_WILDCARD
) {
4389 mpt
->trt_wildcard
.enabled
= 1;
4391 mpt
->trt
[lun
].enabled
= 1;
4397 mpt_disable_lun(struct mpt_softc
*mpt
, target_id_t tgt
, lun_id_t lun
)
4400 if (tgt
== CAM_TARGET_WILDCARD
&& lun
== CAM_LUN_WILDCARD
) {
4402 } else if (lun
>= MPT_MAX_LUNS
) {
4404 } else if (tgt
!= CAM_TARGET_WILDCARD
&& tgt
!= 0) {
4407 if (lun
== CAM_LUN_WILDCARD
) {
4408 mpt
->trt_wildcard
.enabled
= 0;
4410 mpt
->trt
[lun
].enabled
= 0;
4412 for (i
= 0; i
< MPT_MAX_LUNS
; i
++) {
4413 if (mpt
->trt
[lun
].enabled
) {
4417 if (i
== MPT_MAX_LUNS
&& mpt
->twildcard
== 0) {
4419 (void) mpt_fc_reset_link(mpt
, 0);
4427 * Called with MPT lock held
4430 mpt_target_start_io(struct mpt_softc
*mpt
, union ccb
*ccb
)
4432 struct ccb_scsiio
*csio
= &ccb
->csio
;
4433 request_t
*cmd_req
= MPT_TAG_2_REQ(mpt
, csio
->tag_id
);
4434 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
4436 switch (tgt
->state
) {
4437 case TGT_STATE_IN_CAM
:
4439 case TGT_STATE_MOVING_DATA
:
4440 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
4441 xpt_freeze_simq(mpt
->sim
, 1);
4442 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4443 tgt
->ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
4444 MPTLOCK_2_CAMLOCK(mpt
);
4446 CAMLOCK_2_MPTLOCK(mpt
);
4449 mpt_prt(mpt
, "ccb %p flags 0x%x tag 0x%08x had bad request "
4450 "starting I/O\n", ccb
, csio
->ccb_h
.flags
, csio
->tag_id
);
4451 mpt_tgt_dump_req_state(mpt
, cmd_req
);
4452 mpt_set_ccb_status(ccb
, CAM_REQ_CMP_ERR
);
4453 MPTLOCK_2_CAMLOCK(mpt
);
4455 CAMLOCK_2_MPTLOCK(mpt
);
4459 if (csio
->dxfer_len
) {
4460 bus_dmamap_callback_t
*cb
;
4461 PTR_MSG_TARGET_ASSIST_REQUEST ta
;
4464 KASSERT((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
,
4465 ("dxfer_len %u but direction is NONE\n", csio
->dxfer_len
));
4467 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
4468 if (mpt
->outofbeer
== 0) {
4470 xpt_freeze_simq(mpt
->sim
, 1);
4471 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FREEZEQ\n");
4473 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4474 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
4475 MPTLOCK_2_CAMLOCK(mpt
);
4477 CAMLOCK_2_MPTLOCK(mpt
);
4480 ccb
->ccb_h
.status
= CAM_SIM_QUEUED
| CAM_REQ_INPROG
;
4481 if (sizeof (bus_addr_t
) > 4) {
4482 cb
= mpt_execute_req_a64
;
4484 cb
= mpt_execute_req
;
4488 ccb
->ccb_h
.ccb_req_ptr
= req
;
4491 * Record the currently active ccb and the
4492 * request for it in our target state area.
4497 memset(req
->req_vbuf
, 0, MPT_RQSL(mpt
));
4501 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
=
4503 ta
->QueueTag
= ssp
->InitiatorTag
;
4504 } else if (mpt
->is_spi
) {
4505 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
=
4507 ta
->QueueTag
= sp
->Tag
;
4509 ta
->Function
= MPI_FUNCTION_TARGET_ASSIST
;
4510 ta
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4511 ta
->ReplyWord
= htole32(tgt
->reply_desc
);
4512 if (csio
->ccb_h
.target_lun
> MPT_MAX_LUNS
) {
4514 0x40 | ((csio
->ccb_h
.target_lun
>> 8) & 0x3f);
4515 ta
->LUN
[1] = csio
->ccb_h
.target_lun
& 0xff;
4517 ta
->LUN
[1] = csio
->ccb_h
.target_lun
;
4520 ta
->RelativeOffset
= tgt
->bytes_xfered
;
4521 ta
->DataLength
= ccb
->csio
.dxfer_len
;
4522 if (ta
->DataLength
> tgt
->resid
) {
4523 ta
->DataLength
= tgt
->resid
;
4527 * XXX Should be done after data transfer completes?
4529 tgt
->resid
-= csio
->dxfer_len
;
4530 tgt
->bytes_xfered
+= csio
->dxfer_len
;
4532 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
4533 ta
->TargetAssistFlags
|=
4534 TARGET_ASSIST_FLAGS_DATA_DIRECTION
;
4537 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4538 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) &&
4539 csio
->scsi_status
== SCSI_STATUS_OK
&& tgt
->resid
== 0) {
4540 ta
->TargetAssistFlags
|=
4541 TARGET_ASSIST_FLAGS_AUTO_STATUS
;
4544 tgt
->state
= TGT_STATE_SETTING_UP_FOR_DATA
;
4546 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4547 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4548 "nxtstate=%d\n", csio
, csio
->tag_id
, csio
->dxfer_len
,
4549 tgt
->resid
, ccb
->ccb_h
.flags
, req
, req
->serno
, tgt
->state
);
4551 MPTLOCK_2_CAMLOCK(mpt
);
4552 if ((ccb
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
4553 if ((ccb
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
4555 error
= bus_dmamap_load(mpt
->buffer_dmat
,
4556 req
->dmap
, csio
->data_ptr
, csio
->dxfer_len
,
4558 if (error
== EINPROGRESS
) {
4559 xpt_freeze_simq(mpt
->sim
, 1);
4560 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
4564 * We have been given a pointer to single
4567 struct bus_dma_segment seg
;
4568 seg
.ds_addr
= (bus_addr_t
)
4569 (vm_offset_t
)csio
->data_ptr
;
4570 seg
.ds_len
= csio
->dxfer_len
;
4571 (*cb
)(req
, &seg
, 1, 0);
4575 * We have been given a list of addresses.
4576 * This case could be easily supported but they are not
4577 * currently generated by the CAM subsystem so there
4578 * is no point in wasting the time right now.
4580 struct bus_dma_segment
*sgs
;
4581 if ((ccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
4582 (*cb
)(req
, NULL
, 0, EFAULT
);
4584 /* Just use the segments provided */
4585 sgs
= (struct bus_dma_segment
*)csio
->data_ptr
;
4586 (*cb
)(req
, sgs
, csio
->sglist_cnt
, 0);
4589 CAMLOCK_2_MPTLOCK(mpt
);
4591 uint8_t *sp
= NULL
, sense
[MPT_SENSE_SIZE
];
4594 * XXX: I don't know why this seems to happen, but
4595 * XXX: completing the CCB seems to make things happy.
4596 * XXX: This seems to happen if the initiator requests
4597 * XXX: enough data that we have to do multiple CTIOs.
4599 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) == 0) {
4600 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4601 "Meaningless STATUS CCB (%p): flags %x status %x "
4602 "resid %d bytes_xfered %u\n", ccb
, ccb
->ccb_h
.flags
,
4603 ccb
->ccb_h
.status
, tgt
->resid
, tgt
->bytes_xfered
);
4604 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
4605 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4606 MPTLOCK_2_CAMLOCK(mpt
);
4608 CAMLOCK_2_MPTLOCK(mpt
);
4611 if (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) {
4613 memcpy(sp
, &csio
->sense_data
,
4614 min(csio
->sense_len
, MPT_SENSE_SIZE
));
4616 mpt_scsi_tgt_status(mpt
, ccb
, cmd_req
, csio
->scsi_status
, sp
);
4621 mpt_scsi_tgt_local(struct mpt_softc
*mpt
, request_t
*cmd_req
,
4622 uint32_t lun
, int send
, uint8_t *data
, size_t length
)
4624 mpt_tgt_state_t
*tgt
;
4625 PTR_MSG_TARGET_ASSIST_REQUEST ta
;
4633 * We enter with resid set to the data load for the command.
4635 tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
4636 if (length
== 0 || tgt
->resid
== 0) {
4638 mpt_scsi_tgt_status(mpt
, NULL
, cmd_req
, 0, NULL
);
4642 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
4643 mpt_prt(mpt
, "out of resources- dropping local response\n");
4649 memset(req
->req_vbuf
, 0, MPT_RQSL(mpt
));
4653 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
= cmd_req
->req_vbuf
;
4654 ta
->QueueTag
= ssp
->InitiatorTag
;
4655 } else if (mpt
->is_spi
) {
4656 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
= cmd_req
->req_vbuf
;
4657 ta
->QueueTag
= sp
->Tag
;
4659 ta
->Function
= MPI_FUNCTION_TARGET_ASSIST
;
4660 ta
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4661 ta
->ReplyWord
= htole32(tgt
->reply_desc
);
4662 if (lun
> MPT_MAX_LUNS
) {
4663 ta
->LUN
[0] = 0x40 | ((lun
>> 8) & 0x3f);
4664 ta
->LUN
[1] = lun
& 0xff;
4668 ta
->RelativeOffset
= 0;
4669 ta
->DataLength
= length
;
4671 dptr
= req
->req_vbuf
;
4672 dptr
+= MPT_RQSL(mpt
);
4673 pptr
= req
->req_pbuf
;
4674 pptr
+= MPT_RQSL(mpt
);
4675 memcpy(dptr
, data
, min(length
, MPT_RQSL(mpt
)));
4677 se
= (SGE_SIMPLE32
*) &ta
->SGL
[0];
4678 memset(se
, 0,sizeof (*se
));
4680 flags
= MPI_SGE_FLAGS_SIMPLE_ELEMENT
;
4682 ta
->TargetAssistFlags
|= TARGET_ASSIST_FLAGS_DATA_DIRECTION
;
4683 flags
|= MPI_SGE_FLAGS_HOST_TO_IOC
;
4686 MPI_pSGE_SET_LENGTH(se
, length
);
4687 flags
|= MPI_SGE_FLAGS_LAST_ELEMENT
;
4688 flags
|= MPI_SGE_FLAGS_END_OF_LIST
| MPI_SGE_FLAGS_END_OF_BUFFER
;
4689 MPI_pSGE_SET_FLAGS(se
, flags
);
4693 tgt
->resid
-= length
;
4694 tgt
->bytes_xfered
= length
;
4695 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4696 tgt
->state
= TGT_STATE_MOVING_DATA_AND_STATUS
;
4698 tgt
->state
= TGT_STATE_MOVING_DATA
;
4700 mpt_send_cmd(mpt
, req
);
4704 * Abort queued up CCBs
4707 mpt_abort_target_ccb(struct mpt_softc
*mpt
, union ccb
*ccb
)
4709 struct mpt_hdr_stailq
*lp
;
4710 struct ccb_hdr
*srch
;
4712 union ccb
*accb
= ccb
->cab
.abort_ccb
;
4713 tgt_resource_t
*trtp
;
4715 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "aborting ccb %p\n", accb
);
4717 if (ccb
->ccb_h
.target_lun
== CAM_LUN_WILDCARD
) {
4718 trtp
= &mpt
->trt_wildcard
;
4720 trtp
= &mpt
->trt
[ccb
->ccb_h
.target_lun
];
4723 if (accb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
4725 } else if (accb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
) {
4728 return (CAM_REQ_INVALID
);
4731 STAILQ_FOREACH(srch
, lp
, sim_links
.stqe
) {
4732 if (srch
== &accb
->ccb_h
) {
4734 STAILQ_REMOVE(lp
, srch
, ccb_hdr
, sim_links
.stqe
);
4739 accb
->ccb_h
.status
= CAM_REQ_ABORTED
;
4741 return (CAM_REQ_CMP
);
4743 mpt_prt(mpt
, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb
);
4744 return (CAM_PATH_INVALID
);
4748 * Ask the MPT to abort the current target command
4751 mpt_abort_target_cmd(struct mpt_softc
*mpt
, request_t
*cmd_req
)
4755 PTR_MSG_TARGET_MODE_ABORT abtp
;
4757 req
= mpt_get_request(mpt
, FALSE
);
4761 abtp
= req
->req_vbuf
;
4762 memset(abtp
, 0, sizeof (*abtp
));
4764 abtp
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4765 abtp
->AbortType
= TARGET_MODE_ABORT_TYPE_EXACT_IO
;
4766 abtp
->Function
= MPI_FUNCTION_TARGET_MODE_ABORT
;
4767 abtp
->ReplyWord
= htole32(MPT_TGT_STATE(mpt
, cmd_req
)->reply_desc
);
4769 if (mpt
->is_fc
|| mpt
->is_sas
) {
4770 mpt_send_cmd(mpt
, req
);
4772 error
= mpt_send_handshake_cmd(mpt
, sizeof(*req
), req
);
4778 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4779 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4780 * FC929 to set bogus FC_RSP fields (nonzero residuals
4781 * but w/o RESID fields set). This causes QLogic initiators
4782 * to think maybe that a frame was lost.
4784 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4785 * we use allocated requests to do TARGET_ASSIST and we
4786 * need to know when to release them.
4790 mpt_scsi_tgt_status(struct mpt_softc
*mpt
, union ccb
*ccb
, request_t
*cmd_req
,
4791 uint8_t status
, uint8_t const *sense_data
)
4794 mpt_tgt_state_t
*tgt
;
4795 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp
;
4801 cmd_vbuf
= cmd_req
->req_vbuf
;
4802 cmd_vbuf
+= MPT_RQSL(mpt
);
4803 tgt
= MPT_TGT_STATE(mpt
, cmd_req
);
4805 if ((req
= mpt_get_request(mpt
, FALSE
)) == NULL
) {
4806 if (mpt
->outofbeer
== 0) {
4808 xpt_freeze_simq(mpt
->sim
, 1);
4809 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "FREEZEQ\n");
4812 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
4813 mpt_set_ccb_status(ccb
, CAM_REQUEUE_REQ
);
4814 MPTLOCK_2_CAMLOCK(mpt
);
4816 CAMLOCK_2_MPTLOCK(mpt
);
4819 "could not allocate status request- dropping\n");
4825 ccb
->ccb_h
.ccb_mpt_ptr
= mpt
;
4826 ccb
->ccb_h
.ccb_req_ptr
= req
;
4830 * Record the currently active ccb, if any, and the
4831 * request for it in our target state area.
4835 tgt
->state
= TGT_STATE_SENDING_STATUS
;
4838 paddr
= req
->req_pbuf
;
4839 paddr
+= MPT_RQSL(mpt
);
4841 memset(tp
, 0, sizeof (*tp
));
4842 tp
->Function
= MPI_FUNCTION_TARGET_STATUS_SEND
;
4844 PTR_MPI_TARGET_FCP_CMD_BUFFER fc
=
4845 (PTR_MPI_TARGET_FCP_CMD_BUFFER
) cmd_vbuf
;
4849 sts_vbuf
= req
->req_vbuf
;
4850 sts_vbuf
+= MPT_RQSL(mpt
);
4851 rsp
= (uint32_t *) sts_vbuf
;
4852 memcpy(tp
->LUN
, fc
->FcpLun
, sizeof (tp
->LUN
));
4855 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4856 * It has to be big-endian in memory and is organized
4857 * in 32 bit words, which are much easier to deal with
4858 * as words which are swizzled as needed.
4860 * All we're filling here is the FC_RSP payload.
4861 * We may just have the chip synthesize it if
4862 * we have no residual and an OK status.
4865 memset(rsp
, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER
));
4869 rsp
[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4870 rsp
[3] = htobe32(tgt
->resid
);
4871 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4872 resplen
= sizeof (MPI_TARGET_FCP_RSP_BUFFER
);
4875 if (status
== SCSI_STATUS_CHECK_COND
) {
4878 rsp
[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4879 rsp
[4] = htobe32(MPT_SENSE_SIZE
);
4881 memcpy(&rsp
[8], sense_data
, MPT_SENSE_SIZE
);
4883 mpt_prt(mpt
, "mpt_scsi_tgt_status: CHECK CONDI"
4884 "TION but no sense data?\n");
4885 memset(&rsp
, 0, MPT_SENSE_SIZE
);
4887 for (i
= 8; i
< (8 + (MPT_SENSE_SIZE
>> 2)); i
++) {
4888 rsp
[i
] = htobe32(rsp
[i
]);
4890 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4891 resplen
= sizeof (MPI_TARGET_FCP_RSP_BUFFER
);
4894 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4895 resplen
= sizeof (MPI_TARGET_FCP_RSP_BUFFER
);
4897 rsp
[2] = htobe32(rsp
[2]);
4898 } else if (mpt
->is_sas
) {
4899 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
=
4900 (PTR_MPI_TARGET_SSP_CMD_BUFFER
) cmd_vbuf
;
4901 memcpy(tp
->LUN
, ssp
->LogicalUnitNumber
, sizeof (tp
->LUN
));
4903 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
=
4904 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER
) cmd_vbuf
;
4905 tp
->StatusCode
= status
;
4906 tp
->QueueTag
= htole16(sp
->Tag
);
4907 memcpy(tp
->LUN
, sp
->LogicalUnitNumber
, sizeof (tp
->LUN
));
4910 tp
->ReplyWord
= htole32(tgt
->reply_desc
);
4911 tp
->MsgContext
= htole32(req
->index
| mpt
->scsi_tgt_handler_id
);
4913 #ifdef WE_CAN_USE_AUTO_REPOST
4914 tp
->MsgFlags
= TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER
;
4916 if (status
== SCSI_STATUS_OK
&& resplen
== 0) {
4917 tp
->MsgFlags
|= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS
;
4919 tp
->StatusDataSGE
.u
.Address32
= htole32((uint32_t) paddr
);
4921 MPI_SGE_FLAGS_HOST_TO_IOC
|
4922 MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
4923 MPI_SGE_FLAGS_LAST_ELEMENT
|
4924 MPI_SGE_FLAGS_END_OF_LIST
|
4925 MPI_SGE_FLAGS_END_OF_BUFFER
;
4926 fl
<<= MPI_SGE_FLAGS_SHIFT
;
4928 tp
->StatusDataSGE
.FlagsLength
= htole32(fl
);
4931 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
4932 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4933 ccb
, sense_data
?"h" : "hout", ccb
? ccb
->csio
.tag_id
: -1, req
,
4934 req
->serno
, tgt
->resid
);
4936 ccb
->ccb_h
.status
= CAM_SIM_QUEUED
| CAM_REQ_INPROG
;
4937 mpt_req_timeout(req
, 60 * hz
, mpt_timeout
, ccb
);
4939 mpt_send_cmd(mpt
, req
);
4943 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc
*mpt
, request_t
*req
, mpt_task_mgmt_t fc
,
4944 tgt_resource_t
*trtp
, int init_id
)
4946 struct ccb_immed_notify
*inot
;
4947 mpt_tgt_state_t
*tgt
;
4949 tgt
= MPT_TGT_STATE(mpt
, req
);
4950 inot
= (struct ccb_immed_notify
*) STAILQ_FIRST(&trtp
->inots
);
4952 mpt_lprt(mpt
, MPT_PRT_WARN
, "no INOTSs- sending back BSY\n");
4953 mpt_scsi_tgt_status(mpt
, NULL
, req
, SCSI_STATUS_BUSY
, NULL
);
4956 STAILQ_REMOVE_HEAD(&trtp
->inots
, sim_links
.stqe
);
4957 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
4958 "Get FREE INOT %p lun %d\n", inot
, inot
->ccb_h
.target_lun
);
4960 memset(&inot
->sense_data
, 0, sizeof (inot
->sense_data
));
4961 inot
->sense_len
= 0;
4962 memset(inot
->message_args
, 0, sizeof (inot
->message_args
));
4963 inot
->initiator_id
= init_id
; /* XXX */
4966 * This is a somewhat grotesque attempt to map from task management
4967 * to old style SCSI messages. God help us all.
4970 case MPT_ABORT_TASK_SET
:
4971 inot
->message_args
[0] = MSG_ABORT_TAG
;
4973 case MPT_CLEAR_TASK_SET
:
4974 inot
->message_args
[0] = MSG_CLEAR_TASK_SET
;
4976 case MPT_TARGET_RESET
:
4977 inot
->message_args
[0] = MSG_TARGET_RESET
;
4980 inot
->message_args
[0] = MSG_CLEAR_ACA
;
4982 case MPT_TERMINATE_TASK
:
4983 inot
->message_args
[0] = MSG_ABORT_TAG
;
4986 inot
->message_args
[0] = MSG_NOOP
;
4989 tgt
->ccb
= (union ccb
*) inot
;
4990 inot
->ccb_h
.status
= CAM_MESSAGE_RECV
|CAM_DEV_QFRZN
;
4991 MPTLOCK_2_CAMLOCK(mpt
);
4992 xpt_done((union ccb
*)inot
);
4993 CAMLOCK_2_MPTLOCK(mpt
);
4997 mpt_scsi_tgt_atio(struct mpt_softc
*mpt
, request_t
*req
, uint32_t reply_desc
)
4999 static uint8_t null_iqd
[SHORT_INQUIRY_LENGTH
] = {
5000 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
5001 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
5002 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
5003 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
5006 struct ccb_accept_tio
*atiop
;
5009 mpt_tgt_state_t
*tgt
;
5010 tgt_resource_t
*trtp
= NULL
;
5015 mpt_task_mgmt_t fct
= MPT_NIL_TMT_VALUE
;
5019 * First, DMA sync the received command-
5020 * which is in the *request* * phys area.
5022 * XXX: We could optimize this for a range
5024 bus_dmamap_sync(mpt
->request_dmat
, mpt
->request_dmap
,
5025 BUS_DMASYNC_POSTREAD
);
5028 * Stash info for the current command where we can get at it later.
5030 vbuf
= req
->req_vbuf
;
5031 vbuf
+= MPT_RQSL(mpt
);
5034 * Get our state pointer set up.
5036 tgt
= MPT_TGT_STATE(mpt
, req
);
5037 if (tgt
->state
!= TGT_STATE_LOADED
) {
5038 mpt_tgt_dump_req_state(mpt
, req
);
5039 panic("bad target state in mpt_scsi_tgt_atio");
5041 memset(tgt
, 0, sizeof (mpt_tgt_state_t
));
5042 tgt
->state
= TGT_STATE_IN_CAM
;
5043 tgt
->reply_desc
= reply_desc
;
5044 ioindex
= GET_IO_INDEX(reply_desc
);
5045 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
5046 mpt_dump_data(mpt
, "mpt_scsi_tgt_atio response", vbuf
,
5047 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER
),
5048 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER
),
5049 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER
))));
5052 PTR_MPI_TARGET_FCP_CMD_BUFFER fc
;
5053 fc
= (PTR_MPI_TARGET_FCP_CMD_BUFFER
) vbuf
;
5054 if (fc
->FcpCntl
[2]) {
5056 * Task Management Request
5058 switch (fc
->FcpCntl
[2]) {
5060 fct
= MPT_ABORT_TASK_SET
;
5063 fct
= MPT_CLEAR_TASK_SET
;
5066 fct
= MPT_TARGET_RESET
;
5069 fct
= MPT_CLEAR_ACA
;
5072 fct
= MPT_TERMINATE_TASK
;
5075 mpt_prt(mpt
, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5077 mpt_scsi_tgt_status(mpt
, 0, req
,
5082 switch (fc
->FcpCntl
[1]) {
5084 tag_action
= MSG_SIMPLE_Q_TAG
;
5087 tag_action
= MSG_HEAD_OF_Q_TAG
;
5090 tag_action
= MSG_ORDERED_Q_TAG
;
5094 * Bah. Ignore Untagged Queing and ACA
5096 tag_action
= MSG_SIMPLE_Q_TAG
;
5100 tgt
->resid
= be32toh(fc
->FcpDl
);
5102 lunptr
= fc
->FcpLun
;
5103 itag
= be16toh(fc
->OptionalOxid
);
5104 } else if (mpt
->is_sas
) {
5105 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp
;
5106 ssp
= (PTR_MPI_TARGET_SSP_CMD_BUFFER
) vbuf
;
5108 lunptr
= ssp
->LogicalUnitNumber
;
5109 itag
= ssp
->InitiatorTag
;
5111 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp
;
5112 sp
= (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER
) vbuf
;
5114 lunptr
= sp
->LogicalUnitNumber
;
5119 * Generate a simple lun
5121 switch (lunptr
[0] & 0xc0) {
5123 lun
= ((lunptr
[0] & 0x3f) << 8) | lunptr
[1];
5129 mpt_lprt(mpt
, MPT_PRT_ERROR
, "cannot handle this type lun\n");
5135 * Deal with non-enabled or bad luns here.
5137 if (lun
>= MPT_MAX_LUNS
|| mpt
->tenabled
== 0 ||
5138 mpt
->trt
[lun
].enabled
== 0) {
5139 if (mpt
->twildcard
) {
5140 trtp
= &mpt
->trt_wildcard
;
5141 } else if (fct
== MPT_NIL_TMT_VALUE
) {
5143 * In this case, we haven't got an upstream listener
5144 * for either a specific lun or wildcard luns. We
5145 * have to make some sensible response. For regular
5146 * inquiry, just return some NOT HERE inquiry data.
5147 * For VPD inquiry, report illegal field in cdb.
5148 * For REQUEST SENSE, just return NO SENSE data.
5149 * REPORT LUNS gets illegal command.
5150 * All other commands get 'no such device'.
5152 uint8_t *sp
, cond
, buf
[MPT_SENSE_SIZE
];
5155 memset(buf
, 0, MPT_SENSE_SIZE
);
5156 cond
= SCSI_STATUS_CHECK_COND
;
5161 tgt
->tag_id
= MPT_MAKE_TAGID(mpt
, req
, ioindex
);
5171 len
= min(tgt
->resid
, cdbp
[4]);
5172 len
= min(len
, sizeof (null_iqd
));
5173 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5174 "local inquiry %ld bytes\n", (long) len
);
5175 mpt_scsi_tgt_local(mpt
, req
, lun
, 1,
5182 len
= min(tgt
->resid
, cdbp
[4]);
5183 len
= min(len
, sizeof (buf
));
5184 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5185 "local reqsense %ld bytes\n", (long) len
);
5186 mpt_scsi_tgt_local(mpt
, req
, lun
, 1,
5191 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "REPORT LUNS\n");
5195 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5196 "CMD 0x%x to unmanaged lun %u\n",
5201 mpt_scsi_tgt_status(mpt
, NULL
, req
, cond
, sp
);
5204 /* otherwise, leave trtp NULL */
5206 trtp
= &mpt
->trt
[lun
];
5210 * Deal with any task management
5212 if (fct
!= MPT_NIL_TMT_VALUE
) {
5214 mpt_prt(mpt
, "task mgmt function %x but no listener\n",
5216 mpt_scsi_tgt_status(mpt
, 0, req
,
5219 mpt_scsi_tgt_tsk_mgmt(mpt
, req
, fct
, trtp
,
5220 GET_INITIATOR_INDEX(reply_desc
));
5226 atiop
= (struct ccb_accept_tio
*) STAILQ_FIRST(&trtp
->atios
);
5227 if (atiop
== NULL
) {
5228 mpt_lprt(mpt
, MPT_PRT_WARN
,
5229 "no ATIOs for lun %u- sending back %s\n", lun
,
5230 mpt
->tenabled
? "QUEUE FULL" : "BUSY");
5231 mpt_scsi_tgt_status(mpt
, NULL
, req
,
5232 mpt
->tenabled
? SCSI_STATUS_QUEUE_FULL
: SCSI_STATUS_BUSY
,
5236 STAILQ_REMOVE_HEAD(&trtp
->atios
, sim_links
.stqe
);
5237 mpt_lprt(mpt
, MPT_PRT_DEBUG1
,
5238 "Get FREE ATIO %p lun %d\n", atiop
, atiop
->ccb_h
.target_lun
);
5239 atiop
->ccb_h
.ccb_mpt_ptr
= mpt
;
5240 atiop
->ccb_h
.status
= CAM_CDB_RECVD
;
5241 atiop
->ccb_h
.target_lun
= lun
;
5242 atiop
->sense_len
= 0;
5243 atiop
->init_id
= GET_INITIATOR_INDEX(reply_desc
);
5244 atiop
->cdb_len
= mpt_cdblen(cdbp
[0], 16);
5245 memcpy(atiop
->cdb_io
.cdb_bytes
, cdbp
, atiop
->cdb_len
);
5248 * The tag we construct here allows us to find the
5249 * original request that the command came in with.
5251 * This way we don't have to depend on anything but the
5252 * tag to find things when CCBs show back up from CAM.
5254 atiop
->tag_id
= MPT_MAKE_TAGID(mpt
, req
, ioindex
);
5255 tgt
->tag_id
= atiop
->tag_id
;
5257 atiop
->tag_action
= tag_action
;
5258 atiop
->ccb_h
.flags
= CAM_TAG_ACTION_VALID
;
5260 if (mpt
->verbose
>= MPT_PRT_DEBUG
) {
5262 mpt_prt(mpt
, "START_CCB %p for lun %u CDB=<", atiop
,
5263 atiop
->ccb_h
.target_lun
);
5264 for (i
= 0; i
< atiop
->cdb_len
; i
++) {
5265 mpt_prtc(mpt
, "%02x%c", cdbp
[i
] & 0xff,
5266 (i
== (atiop
->cdb_len
- 1))? '>' : ' ');
5268 mpt_prtc(mpt
, " itag %x tag %x rdesc %x dl=%u\n",
5269 itag
, atiop
->tag_id
, tgt
->reply_desc
, tgt
->resid
);
5272 MPTLOCK_2_CAMLOCK(mpt
);
5273 xpt_done((union ccb
*)atiop
);
5274 CAMLOCK_2_MPTLOCK(mpt
);
5278 mpt_tgt_dump_tgt_state(struct mpt_softc
*mpt
, request_t
*req
)
5280 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, req
);
5282 mpt_prt(mpt
, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5283 "nx %d tag 0x%08x state=%d\n", req
, req
->serno
, tgt
->reply_desc
,
5284 tgt
->resid
, tgt
->bytes_xfered
, tgt
->ccb
, tgt
->req
, tgt
->nxfers
,
5285 tgt
->tag_id
, tgt
->state
);
5289 mpt_tgt_dump_req_state(struct mpt_softc
*mpt
, request_t
*req
)
5291 mpt_prt(mpt
, "req %p:%u index %u (%x) state %x\n", req
, req
->serno
,
5292 req
->index
, req
->index
, req
->state
);
5293 mpt_tgt_dump_tgt_state(mpt
, req
);
5297 mpt_scsi_tgt_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
5298 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
5304 if (reply_frame
== NULL
) {
5306 * Figure out what the state of the command is.
5308 mpt_tgt_state_t
*tgt
= MPT_TGT_STATE(mpt
, req
);
5311 mpt_req_spcl(mpt
, req
, "turbo scsi_tgt_reply", __LINE__
);
5313 mpt_req_not_spcl(mpt
, tgt
->req
,
5314 "turbo scsi_tgt_reply associated req", __LINE__
);
5317 switch(tgt
->state
) {
5318 case TGT_STATE_LOADED
:
5320 * This is a new command starting.
5322 mpt_scsi_tgt_atio(mpt
, req
, reply_desc
);
5324 case TGT_STATE_MOVING_DATA
:
5326 uint8_t *sp
= NULL
, sense
[MPT_SENSE_SIZE
];
5329 if (tgt
->req
== NULL
) {
5330 panic("mpt: turbo target reply with null "
5331 "associated request moving data");
5335 if (tgt
->is_local
== 0) {
5336 panic("mpt: turbo target reply with "
5337 "null associated ccb moving data");
5340 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5341 "TARGET_ASSIST local done\n");
5342 TAILQ_REMOVE(&mpt
->request_pending_list
,
5344 mpt_free_request(mpt
, tgt
->req
);
5346 mpt_scsi_tgt_status(mpt
, NULL
, req
,
5352 mpt_req_untimeout(req
, mpt_timeout
, ccb
);
5353 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5354 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5355 ccb
, tgt
->req
, tgt
->req
->serno
, ccb
->csio
.tag_id
);
5357 * Free the Target Assist Request
5359 KASSERT(tgt
->req
->ccb
== ccb
,
5360 ("tgt->req %p:%u tgt->req->ccb %p", tgt
->req
,
5361 tgt
->req
->serno
, tgt
->req
->ccb
));
5362 TAILQ_REMOVE(&mpt
->request_pending_list
,
5364 mpt_free_request(mpt
, tgt
->req
);
5368 * Do we need to send status now? That is, are
5369 * we done with all our data transfers?
5371 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) == 0) {
5372 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
5373 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
5374 KASSERT(ccb
->ccb_h
.status
,
5375 ("zero ccb sts at %d\n", __LINE__
));
5376 tgt
->state
= TGT_STATE_IN_CAM
;
5377 if (mpt
->outofbeer
) {
5378 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
5380 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "THAWQ\n");
5382 MPTLOCK_2_CAMLOCK(mpt
);
5384 CAMLOCK_2_MPTLOCK(mpt
);
5388 * Otherwise, send status (and sense)
5390 if (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) {
5392 memcpy(sp
, &ccb
->csio
.sense_data
,
5393 min(ccb
->csio
.sense_len
, MPT_SENSE_SIZE
));
5395 mpt_scsi_tgt_status(mpt
, ccb
, req
,
5396 ccb
->csio
.scsi_status
, sp
);
5399 case TGT_STATE_SENDING_STATUS
:
5400 case TGT_STATE_MOVING_DATA_AND_STATUS
:
5405 if (tgt
->req
== NULL
) {
5406 panic("mpt: turbo target reply with null "
5407 "associated request sending status");
5414 TGT_STATE_MOVING_DATA_AND_STATUS
) {
5417 mpt_req_untimeout(req
, mpt_timeout
, ccb
);
5418 if (ccb
->ccb_h
.flags
& CAM_SEND_SENSE
) {
5419 ccb
->ccb_h
.status
|= CAM_SENT_SENSE
;
5421 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5422 "TARGET_STATUS tag %x sts %x flgs %x req "
5423 "%p\n", ccb
->csio
.tag_id
, ccb
->ccb_h
.status
,
5424 ccb
->ccb_h
.flags
, tgt
->req
);
5426 * Free the Target Send Status Request
5428 KASSERT(tgt
->req
->ccb
== ccb
,
5429 ("tgt->req %p:%u tgt->req->ccb %p",
5430 tgt
->req
, tgt
->req
->serno
, tgt
->req
->ccb
));
5432 * Notify CAM that we're done
5434 mpt_set_ccb_status(ccb
, CAM_REQ_CMP
);
5435 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
5436 KASSERT(ccb
->ccb_h
.status
,
5437 ("ZERO ccb sts at %d\n", __LINE__
));
5440 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
5441 "TARGET_STATUS non-CAM for req %p:%u\n",
5442 tgt
->req
, tgt
->req
->serno
);
5444 TAILQ_REMOVE(&mpt
->request_pending_list
,
5446 mpt_free_request(mpt
, tgt
->req
);
5450 * And re-post the Command Buffer.
5451 * This will reset the state.
5453 ioindex
= GET_IO_INDEX(reply_desc
);
5454 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5456 mpt_post_target_command(mpt
, req
, ioindex
);
5459 * And post a done for anyone who cares
5462 if (mpt
->outofbeer
) {
5463 ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
5465 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "THAWQ\n");
5467 MPTLOCK_2_CAMLOCK(mpt
);
5469 CAMLOCK_2_MPTLOCK(mpt
);
5473 case TGT_STATE_NIL
: /* XXX This Never Happens XXX */
5474 tgt
->state
= TGT_STATE_LOADED
;
5477 mpt_prt(mpt
, "Unknown Target State 0x%x in Context "
5478 "Reply Function\n", tgt
->state
);
5483 status
= le16toh(reply_frame
->IOCStatus
);
5484 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
5485 dbg
= MPT_PRT_ERROR
;
5487 dbg
= MPT_PRT_DEBUG1
;
5491 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5492 req
, req
->serno
, reply_frame
, reply_frame
->Function
, status
);
5494 switch (reply_frame
->Function
) {
5495 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST
:
5497 mpt_tgt_state_t
*tgt
;
5499 mpt_req_spcl(mpt
, req
, "tgt reply BUFFER POST", __LINE__
);
5501 if (status
!= MPI_IOCSTATUS_SUCCESS
) {
5507 tgt
= MPT_TGT_STATE(mpt
, req
);
5508 KASSERT(tgt
->state
== TGT_STATE_LOADING
,
5509 ("bad state 0x%x on reply to buffer post\n", tgt
->state
));
5510 mpt_assign_serno(mpt
, req
);
5511 tgt
->state
= TGT_STATE_LOADED
;
5514 case MPI_FUNCTION_TARGET_ASSIST
:
5516 mpt_req_not_spcl(mpt
, req
, "tgt reply TARGET ASSIST", __LINE__
);
5518 mpt_prt(mpt
, "target assist completion\n");
5519 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5520 mpt_free_request(mpt
, req
);
5522 case MPI_FUNCTION_TARGET_STATUS_SEND
:
5524 mpt_req_not_spcl(mpt
, req
, "tgt reply STATUS SEND", __LINE__
);
5526 mpt_prt(mpt
, "status send completion\n");
5527 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5528 mpt_free_request(mpt
, req
);
5530 case MPI_FUNCTION_TARGET_MODE_ABORT
:
5532 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp
=
5533 (PTR_MSG_TARGET_MODE_ABORT_REPLY
) reply_frame
;
5534 PTR_MSG_TARGET_MODE_ABORT abtp
=
5535 (PTR_MSG_TARGET_MODE_ABORT
) req
->req_vbuf
;
5536 uint32_t cc
= GET_IO_INDEX(le32toh(abtp
->ReplyWord
));
5538 mpt_req_not_spcl(mpt
, req
, "tgt reply TMODE ABORT", __LINE__
);
5540 mpt_prt(mpt
, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5541 cc
, le16toh(abtrp
->IOCStatus
), le32toh(abtrp
->AbortCount
));
5542 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
5543 mpt_free_request(mpt
, req
);
5547 mpt_prt(mpt
, "Unknown Target Address Reply Function code: "
5548 "0x%x\n", reply_frame
->Function
);