2 * Generic routines for LSI Fusion adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 * $FreeBSD: head/sys/dev/mpt/mpt.c 241874 2012-10-22 10:42:59Z marius $
100 #include <dev/disk/mpt/mpt.h>
101 #include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
102 #include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
104 #include <dev/disk/mpt/mpilib/mpi.h>
105 #include <dev/disk/mpt/mpilib/mpi_ioc.h>
106 #include <dev/disk/mpt/mpilib/mpi_fc.h>
107 #include <dev/disk/mpt/mpilib/mpi_targ.h>
109 #include <sys/sysctl.h>
111 #define MPT_MAX_TRYS 3
112 #define MPT_MAX_WAIT 300000
114 static int maxwait_ack
= 0;
115 static int maxwait_int
= 0;
116 static int maxwait_state
= 0;
118 static TAILQ_HEAD(, mpt_softc
) mpt_tailq
= TAILQ_HEAD_INITIALIZER(mpt_tailq
);
119 mpt_reply_handler_t
*mpt_reply_handlers
[MPT_NUM_REPLY_HANDLERS
];
121 static mpt_reply_handler_t mpt_default_reply_handler
;
122 static mpt_reply_handler_t mpt_config_reply_handler
;
123 static mpt_reply_handler_t mpt_handshake_reply_handler
;
124 static mpt_reply_handler_t mpt_event_reply_handler
;
125 static void mpt_send_event_ack(struct mpt_softc
*mpt
, request_t
*ack_req
,
126 MSG_EVENT_NOTIFY_REPLY
*msg
, uint32_t context
);
127 static int mpt_send_event_request(struct mpt_softc
*mpt
, int onoff
);
128 static int mpt_soft_reset(struct mpt_softc
*mpt
);
129 static void mpt_hard_reset(struct mpt_softc
*mpt
);
130 static int mpt_dma_buf_alloc(struct mpt_softc
*mpt
);
131 static void mpt_dma_buf_free(struct mpt_softc
*mpt
);
132 static int mpt_configure_ioc(struct mpt_softc
*mpt
, int, int);
133 static int mpt_enable_ioc(struct mpt_softc
*mpt
, int);
135 /************************* Personality Module Support *************************/
137 * We include one extra entry that is guaranteed to be NULL
138 * to simplify our itterator.
140 static struct mpt_personality
*mpt_personalities
[MPT_MAX_PERSONALITIES
+ 1];
141 static __inline
struct mpt_personality
*
142 mpt_pers_find(struct mpt_softc
*, u_int
);
143 static __inline
struct mpt_personality
*
144 mpt_pers_find_reverse(struct mpt_softc
*, u_int
);
146 static __inline
struct mpt_personality
*
147 mpt_pers_find(struct mpt_softc
*mpt
, u_int start_at
)
149 KASSERT(start_at
<= MPT_MAX_PERSONALITIES
,
150 ("mpt_pers_find: starting position out of range"));
152 while (start_at
< MPT_MAX_PERSONALITIES
153 && (mpt
->mpt_pers_mask
& (0x1 << start_at
)) == 0) {
156 return (mpt_personalities
[start_at
]);
160 * Used infrequently, so no need to optimize like a forward
161 * traversal where we use the MAX+1 is guaranteed to be NULL
164 static __inline
struct mpt_personality
*
165 mpt_pers_find_reverse(struct mpt_softc
*mpt
, u_int start_at
)
167 while (start_at
< MPT_MAX_PERSONALITIES
168 && (mpt
->mpt_pers_mask
& (0x1 << start_at
)) == 0) {
171 if (start_at
< MPT_MAX_PERSONALITIES
)
172 return (mpt_personalities
[start_at
]);
176 #define MPT_PERS_FOREACH(mpt, pers) \
177 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
179 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
181 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
182 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
184 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
186 static mpt_load_handler_t mpt_stdload
;
187 static mpt_probe_handler_t mpt_stdprobe
;
188 static mpt_attach_handler_t mpt_stdattach
;
189 static mpt_enable_handler_t mpt_stdenable
;
190 static mpt_ready_handler_t mpt_stdready
;
191 static mpt_event_handler_t mpt_stdevent
;
192 static mpt_reset_handler_t mpt_stdreset
;
193 static mpt_shutdown_handler_t mpt_stdshutdown
;
194 static mpt_detach_handler_t mpt_stddetach
;
195 static mpt_unload_handler_t mpt_stdunload
;
196 static struct mpt_personality mpt_default_personality
=
199 .probe
= mpt_stdprobe
,
200 .attach
= mpt_stdattach
,
201 .enable
= mpt_stdenable
,
202 .ready
= mpt_stdready
,
203 .event
= mpt_stdevent
,
204 .reset
= mpt_stdreset
,
205 .shutdown
= mpt_stdshutdown
,
206 .detach
= mpt_stddetach
,
207 .unload
= mpt_stdunload
210 static mpt_load_handler_t mpt_core_load
;
211 static mpt_attach_handler_t mpt_core_attach
;
212 static mpt_enable_handler_t mpt_core_enable
;
213 static mpt_reset_handler_t mpt_core_ioc_reset
;
214 static mpt_event_handler_t mpt_core_event
;
215 static mpt_shutdown_handler_t mpt_core_shutdown
;
216 static mpt_shutdown_handler_t mpt_core_detach
;
217 static mpt_unload_handler_t mpt_core_unload
;
218 static struct mpt_personality mpt_core_personality
=
221 .load
= mpt_core_load
,
222 // .attach = mpt_core_attach,
223 // .enable = mpt_core_enable,
224 .event
= mpt_core_event
,
225 .reset
= mpt_core_ioc_reset
,
226 .shutdown
= mpt_core_shutdown
,
227 .detach
= mpt_core_detach
,
228 .unload
= mpt_core_unload
,
232 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
233 * ordering information. We want the core to always register FIRST.
234 * other modules are set to SI_ORDER_SECOND.
236 static moduledata_t mpt_core_mod
= {
237 "mpt_core", mpt_modevent
, &mpt_core_personality
239 DECLARE_MODULE(mpt_core
, mpt_core_mod
, SI_SUB_DRIVERS
, SI_ORDER_FIRST
);
240 MODULE_VERSION(mpt_core
, 1);
242 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
245 mpt_modevent(module_t mod
, int type
, void *data
)
247 struct mpt_personality
*pers
;
250 pers
= (struct mpt_personality
*)data
;
256 mpt_load_handler_t
**def_handler
;
257 mpt_load_handler_t
**pers_handler
;
260 for (i
= 0; i
< MPT_MAX_PERSONALITIES
; i
++) {
261 if (mpt_personalities
[i
] == NULL
)
264 if (i
>= MPT_MAX_PERSONALITIES
) {
269 mpt_personalities
[i
] = pers
;
271 /* Install standard/noop handlers for any NULL entries. */
272 def_handler
= MPT_PERS_FIRST_HANDLER(&mpt_default_personality
);
273 pers_handler
= MPT_PERS_FIRST_HANDLER(pers
);
274 while (pers_handler
<= MPT_PERS_LAST_HANDLER(pers
)) {
275 if (*pers_handler
== NULL
)
276 *pers_handler
= *def_handler
;
281 error
= (pers
->load(pers
));
283 mpt_personalities
[i
] = NULL
;
289 error
= pers
->unload(pers
);
290 mpt_personalities
[pers
->id
] = NULL
;
300 mpt_stdload(struct mpt_personality
*pers
)
303 /* Load is always successful. */
308 mpt_stdprobe(struct mpt_softc
*mpt
)
311 /* Probe is always successful. */
316 mpt_stdattach(struct mpt_softc
*mpt
)
319 /* Attach is always successful. */
324 mpt_stdenable(struct mpt_softc
*mpt
)
327 /* Enable is always successful. */
332 mpt_stdready(struct mpt_softc
*mpt
)
338 mpt_stdevent(struct mpt_softc
*mpt
, request_t
*req
, MSG_EVENT_NOTIFY_REPLY
*msg
)
341 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "mpt_stdevent: 0x%x\n", msg
->Event
& 0xFF);
342 /* Event was not for us. */
347 mpt_stdreset(struct mpt_softc
*mpt
, int type
)
353 mpt_stdshutdown(struct mpt_softc
*mpt
)
359 mpt_stddetach(struct mpt_softc
*mpt
)
365 mpt_stdunload(struct mpt_personality
*pers
)
368 /* Unload is always successful. */
373 * Post driver attachment, we may want to perform some global actions.
374 * Here is the hook to do so.
378 mpt_postattach(void *unused
)
380 struct mpt_softc
*mpt
;
381 struct mpt_personality
*pers
;
383 TAILQ_FOREACH(mpt
, &mpt_tailq
, links
) {
384 MPT_PERS_FOREACH(mpt
, pers
)
388 SYSINIT(mptdev
, SI_SUB_CONFIGURE
, SI_ORDER_MIDDLE
, mpt_postattach
, NULL
);
390 /******************************* Bus DMA Support ******************************/
392 mpt_map_rquest(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
394 struct mpt_map_info
*map_info
;
396 map_info
= (struct mpt_map_info
*)arg
;
397 map_info
->error
= error
;
398 map_info
->phys
= segs
->ds_addr
;
401 /**************************** Reply/Event Handling ****************************/
403 mpt_register_handler(struct mpt_softc
*mpt
, mpt_handler_type type
,
404 mpt_handler_t handler
, uint32_t *phandler_id
)
408 case MPT_HANDLER_REPLY
:
413 if (phandler_id
== NULL
)
416 free_cbi
= MPT_HANDLER_ID_NONE
;
417 for (cbi
= 0; cbi
< MPT_NUM_REPLY_HANDLERS
; cbi
++) {
419 * If the same handler is registered multiple
420 * times, don't error out. Just return the
421 * index of the original registration.
423 if (mpt_reply_handlers
[cbi
] == handler
.reply_handler
) {
424 *phandler_id
= MPT_CBI_TO_HID(cbi
);
429 * Fill from the front in the hope that
430 * all registered handlers consume only a
433 * We don't break on the first empty slot so
434 * that the full table is checked to see if
435 * this handler was previously registered.
437 if (free_cbi
== MPT_HANDLER_ID_NONE
&&
438 (mpt_reply_handlers
[cbi
]
439 == mpt_default_reply_handler
))
442 if (free_cbi
== MPT_HANDLER_ID_NONE
) {
445 mpt_reply_handlers
[free_cbi
] = handler
.reply_handler
;
446 *phandler_id
= MPT_CBI_TO_HID(free_cbi
);
450 mpt_prt(mpt
, "mpt_register_handler unknown type %d\n", type
);
457 mpt_deregister_handler(struct mpt_softc
*mpt
, mpt_handler_type type
,
458 mpt_handler_t handler
, uint32_t handler_id
)
462 case MPT_HANDLER_REPLY
:
466 cbi
= MPT_CBI(handler_id
);
467 if (cbi
>= MPT_NUM_REPLY_HANDLERS
468 || mpt_reply_handlers
[cbi
] != handler
.reply_handler
)
470 mpt_reply_handlers
[cbi
] = mpt_default_reply_handler
;
474 mpt_prt(mpt
, "mpt_deregister_handler unknown type %d\n", type
);
481 mpt_default_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
482 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
486 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
487 req
, req
->serno
, reply_desc
, reply_frame
);
489 if (reply_frame
!= NULL
)
490 mpt_dump_reply_frame(mpt
, reply_frame
);
492 mpt_prt(mpt
, "Reply Frame Ignored\n");
494 return (/*free_reply*/TRUE
);
498 mpt_config_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
499 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
503 if (reply_frame
!= NULL
) {
505 MSG_CONFIG_REPLY
*reply
;
507 cfgp
= (MSG_CONFIG
*)req
->req_vbuf
;
508 reply
= (MSG_CONFIG_REPLY
*)reply_frame
;
509 req
->IOCStatus
= le16toh(reply_frame
->IOCStatus
);
510 bcopy(&reply
->Header
, &cfgp
->Header
,
511 sizeof(cfgp
->Header
));
512 cfgp
->ExtPageLength
= reply
->ExtPageLength
;
513 cfgp
->ExtPageType
= reply
->ExtPageType
;
515 req
->state
&= ~REQ_STATE_QUEUED
;
516 req
->state
|= REQ_STATE_DONE
;
517 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
518 if ((req
->state
& REQ_STATE_NEED_WAKEUP
) != 0) {
520 } else if ((req
->state
& REQ_STATE_TIMEDOUT
) != 0) {
522 * Whew- we can free this request (late completion)
524 mpt_free_request(mpt
, req
);
532 mpt_handshake_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
533 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
536 /* Nothing to be done. */
541 mpt_event_reply_handler(struct mpt_softc
*mpt
, request_t
*req
,
542 uint32_t reply_desc
, MSG_DEFAULT_REPLY
*reply_frame
)
546 KASSERT(reply_frame
!= NULL
, ("null reply in mpt_event_reply_handler"));
547 KASSERT(req
!= NULL
, ("null request in mpt_event_reply_handler"));
550 switch (reply_frame
->Function
) {
551 case MPI_FUNCTION_EVENT_NOTIFICATION
:
553 MSG_EVENT_NOTIFY_REPLY
*msg
;
554 struct mpt_personality
*pers
;
558 msg
= (MSG_EVENT_NOTIFY_REPLY
*)reply_frame
;
559 msg
->EventDataLength
= le16toh(msg
->EventDataLength
);
560 msg
->IOCStatus
= le16toh(msg
->IOCStatus
);
561 msg
->IOCLogInfo
= le32toh(msg
->IOCLogInfo
);
562 msg
->Event
= le32toh(msg
->Event
);
563 MPT_PERS_FOREACH(mpt
, pers
)
564 handled
+= pers
->event(mpt
, req
, msg
);
566 if (handled
== 0 && mpt
->mpt_pers_mask
== 0) {
567 mpt_lprt(mpt
, MPT_PRT_INFO
,
568 "No Handlers For Any Event Notify Frames. "
569 "Event %#x (ACK %sequired).\n",
570 msg
->Event
, msg
->AckRequired
? "r" : "not r");
571 } else if (handled
== 0) {
573 msg
->AckRequired
? MPT_PRT_WARN
: MPT_PRT_INFO
,
574 "Unhandled Event Notify Frame. Event %#x "
575 "(ACK %sequired).\n",
576 msg
->Event
, msg
->AckRequired
? "r" : "not r");
579 if (msg
->AckRequired
) {
583 context
= req
->index
| MPT_REPLY_HANDLER_EVENTS
;
584 ack_req
= mpt_get_request(mpt
, FALSE
);
585 if (ack_req
== NULL
) {
586 struct mpt_evtf_record
*evtf
;
588 evtf
= (struct mpt_evtf_record
*)reply_frame
;
589 evtf
->context
= context
;
590 LIST_INSERT_HEAD(&mpt
->ack_frames
, evtf
, links
);
594 mpt_send_event_ack(mpt
, ack_req
, msg
, context
);
596 * Don't check for CONTINUATION_REPLY here
602 case MPI_FUNCTION_PORT_ENABLE
:
603 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "enable port reply\n");
605 case MPI_FUNCTION_EVENT_ACK
:
608 mpt_prt(mpt
, "unknown event function: %x\n",
609 reply_frame
->Function
);
614 * I'm not sure that this continuation stuff works as it should.
616 * I've had FC async events occur that free the frame up because
617 * the continuation bit isn't set, and then additional async events
618 * then occur using the same context. As you might imagine, this
619 * leads to Very Bad Thing.
621 * Let's just be safe for now and not free them up until we figure
622 * out what's actually happening here.
625 if ((reply_frame
->MsgFlags
& MPI_MSGFLAGS_CONTINUATION_REPLY
) == 0) {
626 TAILQ_REMOVE(&mpt
->request_pending_list
, req
, links
);
627 mpt_free_request(mpt
, req
);
628 mpt_prt(mpt
, "event_reply %x for req %p:%u NOT a continuation",
629 reply_frame
->Function
, req
, req
->serno
);
630 if (reply_frame
->Function
== MPI_FUNCTION_EVENT_NOTIFICATION
) {
631 MSG_EVENT_NOTIFY_REPLY
*msg
=
632 (MSG_EVENT_NOTIFY_REPLY
*)reply_frame
;
633 mpt_prtc(mpt
, " Event=0x%x AckReq=%d",
634 msg
->Event
, msg
->AckRequired
);
637 mpt_prt(mpt
, "event_reply %x for %p:%u IS a continuation",
638 reply_frame
->Function
, req
, req
->serno
);
639 if (reply_frame
->Function
== MPI_FUNCTION_EVENT_NOTIFICATION
) {
640 MSG_EVENT_NOTIFY_REPLY
*msg
=
641 (MSG_EVENT_NOTIFY_REPLY
*)reply_frame
;
642 mpt_prtc(mpt
, " Event=0x%x AckReq=%d",
643 msg
->Event
, msg
->AckRequired
);
652 * Process an asynchronous event from the IOC.
655 mpt_core_event(struct mpt_softc
*mpt
, request_t
*req
,
656 MSG_EVENT_NOTIFY_REPLY
*msg
)
659 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "mpt_core_event: 0x%x\n",
661 switch(msg
->Event
& 0xFF) {
664 case MPI_EVENT_LOG_DATA
:
668 /* Some error occurred that LSI wants logged */
669 mpt_prt(mpt
, "EvtLogData: IOCLogInfo: 0x%08x\n",
671 mpt_prt(mpt
, "\tEvtLogData: Event Data:");
672 for (i
= 0; i
< msg
->EventDataLength
; i
++)
673 mpt_prtc(mpt
, " %08x", msg
->Data
[i
]);
677 case MPI_EVENT_EVENT_CHANGE
:
679 * This is just an acknowledgement
680 * of our mpt_send_event_request.
683 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE
:
693 mpt_send_event_ack(struct mpt_softc
*mpt
, request_t
*ack_req
,
694 MSG_EVENT_NOTIFY_REPLY
*msg
, uint32_t context
)
698 ackp
= (MSG_EVENT_ACK
*)ack_req
->req_vbuf
;
699 memset(ackp
, 0, sizeof (*ackp
));
700 ackp
->Function
= MPI_FUNCTION_EVENT_ACK
;
701 ackp
->Event
= htole32(msg
->Event
);
702 ackp
->EventContext
= htole32(msg
->EventContext
);
703 ackp
->MsgContext
= htole32(context
);
704 mpt_check_doorbell(mpt
);
705 mpt_send_cmd(mpt
, ack_req
);
708 /***************************** Interrupt Handling *****************************/
712 struct mpt_softc
*mpt
;
716 mpt
= (struct mpt_softc
*)arg
;
717 mpt_lprt(mpt
, MPT_PRT_DEBUG2
, "enter mpt_intr\n");
718 MPT_LOCK_ASSERT(mpt
);
720 while ((reply_desc
= mpt_pop_reply_queue(mpt
)) != MPT_REPLY_EMPTY
) {
722 MSG_DEFAULT_REPLY
*reply_frame
;
723 uint32_t reply_baddr
;
734 if ((reply_desc
& MPI_ADDRESS_REPLY_A_BIT
) != 0) {
736 * Ensure that the reply frame is coherent.
738 reply_baddr
= MPT_REPLY_BADDR(reply_desc
);
739 offset
= reply_baddr
- (mpt
->reply_phys
& 0xFFFFFFFF);
740 bus_dmamap_sync_range(mpt
->reply_dmat
,
741 mpt
->reply_dmap
, offset
, MPT_REPLY_SIZE
,
742 BUS_DMASYNC_POSTREAD
);
743 reply_frame
= MPT_REPLY_OTOV(mpt
, offset
);
744 ctxt_idx
= le32toh(reply_frame
->MsgContext
);
748 type
= MPI_GET_CONTEXT_REPLY_TYPE(reply_desc
);
749 ctxt_idx
= reply_desc
;
750 mpt_lprt(mpt
, MPT_PRT_DEBUG1
, "Context Reply: 0x%08x\n",
754 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT
:
755 ctxt_idx
&= MPI_CONTEXT_REPLY_CONTEXT_MASK
;
757 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET
:
758 ctxt_idx
= GET_IO_INDEX(reply_desc
);
759 if (mpt
->tgt_cmd_ptrs
== NULL
) {
761 "mpt_intr: no target cmd ptrs\n");
762 reply_desc
= MPT_REPLY_EMPTY
;
765 if (ctxt_idx
>= mpt
->tgt_cmds_allocated
) {
767 "mpt_intr: bad tgt cmd ctxt %u\n",
769 reply_desc
= MPT_REPLY_EMPTY
;
773 req
= mpt
->tgt_cmd_ptrs
[ctxt_idx
];
775 mpt_prt(mpt
, "no request backpointer "
776 "at index %u", ctxt_idx
);
777 reply_desc
= MPT_REPLY_EMPTY
;
782 * Reformulate ctxt_idx to be just as if
783 * it were another type of context reply
784 * so the code below will find the request
785 * via indexing into the pool.
788 req
->index
| mpt
->scsi_tgt_handler_id
;
791 case MPI_CONTEXT_REPLY_TYPE_LAN
:
792 mpt_prt(mpt
, "LAN CONTEXT REPLY: 0x%08x\n",
794 reply_desc
= MPT_REPLY_EMPTY
;
797 mpt_prt(mpt
, "Context Reply 0x%08x?\n", type
);
798 reply_desc
= MPT_REPLY_EMPTY
;
801 if (reply_desc
== MPT_REPLY_EMPTY
) {
802 if (ntrips
++ > 1000) {
809 cb_index
= MPT_CONTEXT_TO_CBI(ctxt_idx
);
810 req_index
= MPT_CONTEXT_TO_REQI(ctxt_idx
);
811 if (req_index
< MPT_MAX_REQUESTS(mpt
)) {
812 req
= &mpt
->request_pool
[req_index
];
814 mpt_prt(mpt
, "WARN: mpt_intr index == %d (reply_desc =="
815 " 0x%x)\n", req_index
, reply_desc
);
818 bus_dmamap_sync(mpt
->request_dmat
, mpt
->request_dmap
,
819 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
820 free_rf
= mpt_reply_handlers
[cb_index
](mpt
, req
,
821 reply_desc
, reply_frame
);
823 if (reply_frame
!= NULL
&& free_rf
) {
824 bus_dmamap_sync_range(mpt
->reply_dmat
,
825 mpt
->reply_dmap
, offset
, MPT_REPLY_SIZE
,
826 BUS_DMASYNC_PREREAD
);
827 mpt_free_reply(mpt
, reply_baddr
);
831 * If we got ourselves disabled, don't get stuck in a loop
834 mpt_disable_ints(mpt
);
837 if (ntrips
++ > 1000) {
841 mpt_lprt(mpt
, MPT_PRT_DEBUG2
, "exit mpt_intr\n");
844 /******************************* Error Recovery *******************************/
846 mpt_complete_request_chain(struct mpt_softc
*mpt
, struct req_queue
*chain
,
849 MSG_DEFAULT_REPLY ioc_status_frame
;
852 memset(&ioc_status_frame
, 0, sizeof(ioc_status_frame
));
853 ioc_status_frame
.MsgLength
= roundup2(sizeof(ioc_status_frame
), 4);
854 ioc_status_frame
.IOCStatus
= iocstatus
;
855 while((req
= TAILQ_FIRST(chain
)) != NULL
) {
856 MSG_REQUEST_HEADER
*msg_hdr
;
859 bus_dmamap_sync(mpt
->request_dmat
, mpt
->request_dmap
,
860 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
861 msg_hdr
= (MSG_REQUEST_HEADER
*)req
->req_vbuf
;
862 ioc_status_frame
.Function
= msg_hdr
->Function
;
863 ioc_status_frame
.MsgContext
= msg_hdr
->MsgContext
;
864 cb_index
= MPT_CONTEXT_TO_CBI(le32toh(msg_hdr
->MsgContext
));
865 mpt_reply_handlers
[cb_index
](mpt
, req
, msg_hdr
->MsgContext
,
867 if (mpt_req_on_pending_list(mpt
, req
) != 0)
868 TAILQ_REMOVE(chain
, req
, links
);
872 /********************************* Diagnostics ********************************/
874 * Perform a diagnostic dump of a reply frame.
877 mpt_dump_reply_frame(struct mpt_softc
*mpt
, MSG_DEFAULT_REPLY
*reply_frame
)
880 mpt_prt(mpt
, "Address Reply:\n");
881 mpt_print_reply(reply_frame
);
884 /******************************* Doorbell Access ******************************/
885 static __inline
uint32_t mpt_rd_db(struct mpt_softc
*mpt
);
886 static __inline
uint32_t mpt_rd_intr(struct mpt_softc
*mpt
);
888 static __inline
uint32_t
889 mpt_rd_db(struct mpt_softc
*mpt
)
892 return mpt_read(mpt
, MPT_OFFSET_DOORBELL
);
895 static __inline
uint32_t
896 mpt_rd_intr(struct mpt_softc
*mpt
)
899 return mpt_read(mpt
, MPT_OFFSET_INTR_STATUS
);
902 /* Busy wait for a door bell to be read by IOC */
904 mpt_wait_db_ack(struct mpt_softc
*mpt
)
908 for (i
=0; i
< MPT_MAX_WAIT
; i
++) {
909 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt
))) {
910 maxwait_ack
= i
> maxwait_ack
? i
: maxwait_ack
;
918 /* Busy wait for a door bell interrupt */
920 mpt_wait_db_int(struct mpt_softc
*mpt
)
924 for (i
= 0; i
< MPT_MAX_WAIT
; i
++) {
925 if (MPT_DB_INTR(mpt_rd_intr(mpt
))) {
926 maxwait_int
= i
> maxwait_int
? i
: maxwait_int
;
934 /* Wait for IOC to transition to a give state */
936 mpt_check_doorbell(struct mpt_softc
*mpt
)
938 uint32_t db
= mpt_rd_db(mpt
);
940 if (MPT_STATE(db
) != MPT_DB_STATE_RUNNING
) {
941 mpt_prt(mpt
, "Device not running\n");
946 /* Wait for IOC to transition to a give state */
948 mpt_wait_state(struct mpt_softc
*mpt
, enum DB_STATE_BITS state
)
952 for (i
= 0; i
< MPT_MAX_WAIT
; i
++) {
953 uint32_t db
= mpt_rd_db(mpt
);
954 if (MPT_STATE(db
) == state
) {
955 maxwait_state
= i
> maxwait_state
? i
: maxwait_state
;
964 /************************* Intialization/Configuration ************************/
965 static int mpt_download_fw(struct mpt_softc
*mpt
);
967 /* Issue the reset COMMAND to the IOC */
969 mpt_soft_reset(struct mpt_softc
*mpt
)
972 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "soft reset\n");
974 /* Have to use hard reset if we are not in Running state */
975 if (MPT_STATE(mpt_rd_db(mpt
)) != MPT_DB_STATE_RUNNING
) {
976 mpt_prt(mpt
, "soft reset failed: device not running\n");
980 /* If door bell is in use we don't have a chance of getting
981 * a word in since the IOC probably crashed in message
982 * processing. So don't waste our time.
984 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt
))) {
985 mpt_prt(mpt
, "soft reset failed: doorbell wedged\n");
989 /* Send the reset request to the IOC */
990 mpt_write(mpt
, MPT_OFFSET_DOORBELL
,
991 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET
<< MPI_DOORBELL_FUNCTION_SHIFT
);
992 if (mpt_wait_db_ack(mpt
) != MPT_OK
) {
993 mpt_prt(mpt
, "soft reset failed: ack timeout\n");
997 /* Wait for the IOC to reload and come out of reset state */
998 if (mpt_wait_state(mpt
, MPT_DB_STATE_READY
) != MPT_OK
) {
999 mpt_prt(mpt
, "soft reset failed: device did not restart\n");
1007 mpt_enable_diag_mode(struct mpt_softc
*mpt
)
1014 if ((mpt_read(mpt
, MPT_OFFSET_DIAGNOSTIC
) & MPI_DIAG_DRWE
) != 0)
1017 /* Enable diagnostic registers */
1018 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, 0xFF);
1019 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, MPI_WRSEQ_1ST_KEY_VALUE
);
1020 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, MPI_WRSEQ_2ND_KEY_VALUE
);
1021 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, MPI_WRSEQ_3RD_KEY_VALUE
);
1022 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, MPI_WRSEQ_4TH_KEY_VALUE
);
1023 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, MPI_WRSEQ_5TH_KEY_VALUE
);
1033 mpt_disable_diag_mode(struct mpt_softc
*mpt
)
1036 mpt_write(mpt
, MPT_OFFSET_SEQUENCE
, 0xFFFFFFFF);
1039 /* This is a magic diagnostic reset that resets all the ARM
1040 * processors in the chip.
1043 mpt_hard_reset(struct mpt_softc
*mpt
)
1049 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "hard reset\n");
1052 mpt_write(mpt
, MPT_OFFSET_RESET_1078
, 0x07);
1057 error
= mpt_enable_diag_mode(mpt
);
1059 mpt_prt(mpt
, "WARNING - Could not enter diagnostic mode !\n");
1060 mpt_prt(mpt
, "Trying to reset anyway.\n");
1063 diagreg
= mpt_read(mpt
, MPT_OFFSET_DIAGNOSTIC
);
1066 * This appears to be a workaround required for some
1067 * firmware or hardware revs.
1069 mpt_write(mpt
, MPT_OFFSET_DIAGNOSTIC
, diagreg
| MPI_DIAG_DISABLE_ARM
);
1072 /* Diag. port is now active so we can now hit the reset bit */
1073 mpt_write(mpt
, MPT_OFFSET_DIAGNOSTIC
, diagreg
| MPI_DIAG_RESET_ADAPTER
);
1076 * Ensure that the reset has finished. We delay 1ms
1077 * prior to reading the register to make sure the chip
1078 * has sufficiently completed its reset to handle register
1084 diagreg
= mpt_read(mpt
, MPT_OFFSET_DIAGNOSTIC
);
1085 } while (--wait
&& (diagreg
& MPI_DIAG_RESET_ADAPTER
) == 0);
1088 mpt_prt(mpt
, "WARNING - Failed hard reset! "
1089 "Trying to initialize anyway.\n");
1093 * If we have firmware to download, it must be loaded before
1094 * the controller will become operational. Do so now.
1096 if (mpt
->fw_image
!= NULL
) {
1098 error
= mpt_download_fw(mpt
);
1101 mpt_prt(mpt
, "WARNING - Firmware Download Failed!\n");
1102 mpt_prt(mpt
, "Trying to initialize anyway.\n");
1107 * Reseting the controller should have disabled write
1108 * access to the diagnostic registers, but disable
1109 * manually to be sure.
1111 mpt_disable_diag_mode(mpt
);
1115 mpt_core_ioc_reset(struct mpt_softc
*mpt
, int type
)
1119 * Complete all pending requests with a status
1120 * appropriate for an IOC reset.
1122 mpt_complete_request_chain(mpt
, &mpt
->request_pending_list
,
1123 MPI_IOCSTATUS_INVALID_STATE
);
1127 * Reset the IOC when needed. Try software command first then if needed
1128 * poke at the magic diagnostic reset. Note that a hard reset resets
1129 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1130 * fouls up the PCI configuration registers.
1133 mpt_reset(struct mpt_softc
*mpt
, int reinit
)
1135 struct mpt_personality
*pers
;
1140 * Try a soft reset. If that fails, get out the big hammer.
1143 if ((ret
= mpt_soft_reset(mpt
)) != MPT_OK
) {
1145 for (cnt
= 0; cnt
< 5; cnt
++) {
1146 /* Failed; do a hard reset */
1147 mpt_hard_reset(mpt
);
1150 * Wait for the IOC to reload
1151 * and come out of reset state
1153 ret
= mpt_wait_state(mpt
, MPT_DB_STATE_READY
);
1154 if (ret
== MPT_OK
) {
1158 * Okay- try to check again...
1160 ret
= mpt_wait_state(mpt
, MPT_DB_STATE_READY
);
1161 if (ret
== MPT_OK
) {
1164 mpt_prt(mpt
, "mpt_reset: failed hard reset (%d:%d)\n",
1169 if (retry_cnt
== 0) {
1171 * Invoke reset handlers. We bump the reset count so
1172 * that mpt_wait_req() understands that regardless of
1173 * the specified wait condition, it should stop its wait.
1176 MPT_PERS_FOREACH(mpt
, pers
)
1177 pers
->reset(mpt
, ret
);
1181 ret
= mpt_enable_ioc(mpt
, 1);
1182 if (ret
== MPT_OK
) {
1183 mpt_enable_ints(mpt
);
1186 if (ret
!= MPT_OK
&& retry_cnt
++ < 2) {
1192 /* Return a command buffer to the free queue */
1194 mpt_free_request(struct mpt_softc
*mpt
, request_t
*req
)
1197 struct mpt_evtf_record
*record
;
1198 uint32_t offset
, reply_baddr
;
1200 if (req
== NULL
|| req
!= &mpt
->request_pool
[req
->index
]) {
1201 panic("mpt_free_request: bad req ptr");
1203 if ((nxt
= req
->chain
) != NULL
) {
1205 mpt_free_request(mpt
, nxt
); /* NB: recursion */
1207 KASSERT(req
->state
!= REQ_STATE_FREE
, ("freeing free request"));
1208 KASSERT(!(req
->state
& REQ_STATE_LOCKED
), ("freeing locked request"));
1209 MPT_LOCK_ASSERT(mpt
);
1210 KASSERT(mpt_req_on_free_list(mpt
, req
) == 0,
1211 ("mpt_free_request: req %p:%u func %x already on freelist",
1212 req
, req
->serno
, ((MSG_REQUEST_HEADER
*)req
->req_vbuf
)->Function
));
1213 KASSERT(mpt_req_on_pending_list(mpt
, req
) == 0,
1214 ("mpt_free_request: req %p:%u func %x on pending list",
1215 req
, req
->serno
, ((MSG_REQUEST_HEADER
*)req
->req_vbuf
)->Function
));
1217 mpt_req_not_spcl(mpt
, req
, "mpt_free_request", __LINE__
);
1221 if (LIST_EMPTY(&mpt
->ack_frames
)) {
1223 * Insert free ones at the tail
1226 req
->state
= REQ_STATE_FREE
;
1228 memset(req
->req_vbuf
, 0xff, sizeof (MSG_REQUEST_HEADER
));
1230 TAILQ_INSERT_TAIL(&mpt
->request_free_list
, req
, links
);
1231 if (mpt
->getreqwaiter
!= 0) {
1232 mpt
->getreqwaiter
= 0;
1233 wakeup(&mpt
->request_free_list
);
1239 * Process an ack frame deferred due to resource shortage.
1241 record
= LIST_FIRST(&mpt
->ack_frames
);
1242 LIST_REMOVE(record
, links
);
1243 req
->state
= REQ_STATE_ALLOCATED
;
1244 mpt_assign_serno(mpt
, req
);
1245 mpt_send_event_ack(mpt
, req
, &record
->reply
, record
->context
);
1246 offset
= (uint32_t)((uint8_t *)record
- mpt
->reply
);
1247 reply_baddr
= offset
+ (mpt
->reply_phys
& 0xFFFFFFFF);
1248 bus_dmamap_sync_range(mpt
->reply_dmat
, mpt
->reply_dmap
, offset
,
1249 MPT_REPLY_SIZE
, BUS_DMASYNC_PREREAD
);
1250 mpt_free_reply(mpt
, reply_baddr
);
1253 /* Get a command buffer from the free queue */
1255 mpt_get_request(struct mpt_softc
*mpt
, int sleep_ok
)
1260 MPT_LOCK_ASSERT(mpt
);
1261 req
= TAILQ_FIRST(&mpt
->request_free_list
);
1263 KASSERT(req
== &mpt
->request_pool
[req
->index
],
1264 ("mpt_get_request: corrupted request free list"));
1265 KASSERT(req
->state
== REQ_STATE_FREE
,
1266 ("req %p:%u not free on free list %x index %d function %x",
1267 req
, req
->serno
, req
->state
, req
->index
,
1268 ((MSG_REQUEST_HEADER
*)req
->req_vbuf
)->Function
));
1269 TAILQ_REMOVE(&mpt
->request_free_list
, req
, links
);
1270 req
->state
= REQ_STATE_ALLOCATED
;
1272 mpt_assign_serno(mpt
, req
);
1273 } else if (sleep_ok
!= 0) {
1274 mpt
->getreqwaiter
= 1;
1275 mpt_sleep(mpt
, &mpt
->request_free_list
, 0, "mptgreq", 0);
1281 /* Pass the command to the IOC */
1283 mpt_send_cmd(struct mpt_softc
*mpt
, request_t
*req
)
1286 if (mpt
->verbose
> MPT_PRT_DEBUG2
) {
1287 mpt_dump_request(mpt
, req
);
1289 bus_dmamap_sync(mpt
->request_dmat
, mpt
->request_dmap
,
1290 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1291 req
->state
|= REQ_STATE_QUEUED
;
1292 KASSERT(mpt_req_on_free_list(mpt
, req
) == 0,
1293 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1294 req
, req
->serno
, ((MSG_REQUEST_HEADER
*)req
->req_vbuf
)->Function
));
1295 KASSERT(mpt_req_on_pending_list(mpt
, req
) == 0,
1296 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1297 req
, req
->serno
, ((MSG_REQUEST_HEADER
*)req
->req_vbuf
)->Function
));
1298 TAILQ_INSERT_HEAD(&mpt
->request_pending_list
, req
, links
);
1299 mpt_write(mpt
, MPT_OFFSET_REQUEST_Q
, (uint32_t) req
->req_pbuf
);
1303 * Wait for a request to complete.
1306 * mpt softc of controller executing request
1307 * req request to wait for
1308 * sleep_ok nonzero implies may sleep in this context
1309 * time_ms timeout in ms. 0 implies no timeout.
1312 * 0 Request completed
1313 * non-0 Timeout fired before request completion.
1316 mpt_wait_req(struct mpt_softc
*mpt
, request_t
*req
,
1317 mpt_req_state_t state
, mpt_req_state_t mask
,
1318 int sleep_ok
, int time_ms
)
1325 * timeout is in ms. 0 indicates infinite wait.
1326 * Convert to ticks or 500us units depending on
1329 if (sleep_ok
!= 0) {
1330 timeout
= (time_ms
* hz
) / 1000;
1332 timeout
= time_ms
* 2;
1334 req
->state
|= REQ_STATE_NEED_WAKEUP
;
1335 mask
&= ~REQ_STATE_NEED_WAKEUP
;
1336 saved_cnt
= mpt
->reset_cnt
;
1337 while ((req
->state
& mask
) != state
&& mpt
->reset_cnt
== saved_cnt
) {
1338 if (sleep_ok
!= 0) {
1339 error
= mpt_sleep(mpt
, req
, 0, "mptreq", timeout
);
1340 if (error
== EWOULDBLOCK
) {
1345 if (time_ms
!= 0 && --timeout
== 0) {
1352 req
->state
&= ~REQ_STATE_NEED_WAKEUP
;
1353 if (mpt
->reset_cnt
!= saved_cnt
) {
1356 if (time_ms
&& timeout
<= 0) {
1357 MSG_REQUEST_HEADER
*msg_hdr
= req
->req_vbuf
;
1358 req
->state
|= REQ_STATE_TIMEDOUT
;
1359 mpt_prt(mpt
, "mpt_wait_req(%x) timed out\n", msg_hdr
->Function
);
1366 * Send a command to the IOC via the handshake register.
1368 * Only done at initialization time and for certain unusual
1369 * commands such as device/bus reset as specified by LSI.
1372 mpt_send_handshake_cmd(struct mpt_softc
*mpt
, size_t len
, void *cmd
)
1375 uint32_t data
, *data32
;
1377 /* Check condition of the IOC */
1378 data
= mpt_rd_db(mpt
);
1379 if ((MPT_STATE(data
) != MPT_DB_STATE_READY
1380 && MPT_STATE(data
) != MPT_DB_STATE_RUNNING
1381 && MPT_STATE(data
) != MPT_DB_STATE_FAULT
)
1382 || MPT_DB_IS_IN_USE(data
)) {
1383 mpt_prt(mpt
, "handshake aborted - invalid doorbell state\n");
1388 /* We move things in 32 bit chunks */
1389 len
= (len
+ 3) >> 2;
1392 /* Clear any left over pending doorbell interrupts */
1393 if (MPT_DB_INTR(mpt_rd_intr(mpt
)))
1394 mpt_write(mpt
, MPT_OFFSET_INTR_STATUS
, 0);
1397 * Tell the handshake reg. we are going to send a command
1398 * and how long it is going to be.
1400 data
= (MPI_FUNCTION_HANDSHAKE
<< MPI_DOORBELL_FUNCTION_SHIFT
) |
1401 (len
<< MPI_DOORBELL_ADD_DWORDS_SHIFT
);
1402 mpt_write(mpt
, MPT_OFFSET_DOORBELL
, data
);
1404 /* Wait for the chip to notice */
1405 if (mpt_wait_db_int(mpt
) != MPT_OK
) {
1406 mpt_prt(mpt
, "mpt_send_handshake_cmd: db ignored\n");
1410 /* Clear the interrupt */
1411 mpt_write(mpt
, MPT_OFFSET_INTR_STATUS
, 0);
1413 if (mpt_wait_db_ack(mpt
) != MPT_OK
) {
1414 mpt_prt(mpt
, "mpt_send_handshake_cmd: db ack timed out\n");
1418 /* Send the command */
1419 for (i
= 0; i
< len
; i
++) {
1420 mpt_write(mpt
, MPT_OFFSET_DOORBELL
, htole32(*data32
++));
1421 if (mpt_wait_db_ack(mpt
) != MPT_OK
) {
1423 "mpt_send_handshake_cmd: timeout @ index %d\n", i
);
1430 /* Get the response from the handshake register */
1432 mpt_recv_handshake_reply(struct mpt_softc
*mpt
, size_t reply_len
, void *reply
)
1434 int left
, reply_left
;
1437 MSG_DEFAULT_REPLY
*hdr
;
1439 /* We move things out in 16 bit chunks */
1441 data16
= (u_int16_t
*)reply
;
1443 hdr
= (MSG_DEFAULT_REPLY
*)reply
;
1445 /* Get first word */
1446 if (mpt_wait_db_int(mpt
) != MPT_OK
) {
1447 mpt_prt(mpt
, "mpt_recv_handshake_cmd timeout1\n");
1450 data
= mpt_read(mpt
, MPT_OFFSET_DOORBELL
);
1451 *data16
++ = le16toh(data
& MPT_DB_DATA_MASK
);
1452 mpt_write(mpt
, MPT_OFFSET_INTR_STATUS
, 0);
1454 /* Get Second Word */
1455 if (mpt_wait_db_int(mpt
) != MPT_OK
) {
1456 mpt_prt(mpt
, "mpt_recv_handshake_cmd timeout2\n");
1459 data
= mpt_read(mpt
, MPT_OFFSET_DOORBELL
);
1460 *data16
++ = le16toh(data
& MPT_DB_DATA_MASK
);
1461 mpt_write(mpt
, MPT_OFFSET_INTR_STATUS
, 0);
1464 * With the second word, we can now look at the length.
1465 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1467 if ((reply_len
>> 1) != hdr
->MsgLength
&&
1468 (hdr
->Function
!= MPI_FUNCTION_IOC_FACTS
)){
1469 mpt_prt(mpt
, "reply length does not match message length: "
1470 "got %x; expected %zx for function %x\n",
1471 hdr
->MsgLength
<< 2, reply_len
<< 1, hdr
->Function
);
1474 /* Get rest of the reply; but don't overflow the provided buffer */
1475 left
= (hdr
->MsgLength
<< 1) - 2;
1476 reply_left
= reply_len
- 2;
1480 if (mpt_wait_db_int(mpt
) != MPT_OK
) {
1481 mpt_prt(mpt
, "mpt_recv_handshake_cmd timeout3\n");
1484 data
= mpt_read(mpt
, MPT_OFFSET_DOORBELL
);
1485 datum
= le16toh(data
& MPT_DB_DATA_MASK
);
1487 if (reply_left
-- > 0)
1490 mpt_write(mpt
, MPT_OFFSET_INTR_STATUS
, 0);
1493 /* One more wait & clear at the end */
1494 if (mpt_wait_db_int(mpt
) != MPT_OK
) {
1495 mpt_prt(mpt
, "mpt_recv_handshake_cmd timeout4\n");
1498 mpt_write(mpt
, MPT_OFFSET_INTR_STATUS
, 0);
1500 if ((hdr
->IOCStatus
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
1501 if (mpt
->verbose
>= MPT_PRT_TRACE
)
1502 mpt_print_reply(hdr
);
1503 return (MPT_FAIL
| hdr
->IOCStatus
);
1510 mpt_get_iocfacts(struct mpt_softc
*mpt
, MSG_IOC_FACTS_REPLY
*freplp
)
1512 MSG_IOC_FACTS f_req
;
1515 memset(&f_req
, 0, sizeof f_req
);
1516 f_req
.Function
= MPI_FUNCTION_IOC_FACTS
;
1517 f_req
.MsgContext
= htole32(MPT_REPLY_HANDLER_HANDSHAKE
);
1518 error
= mpt_send_handshake_cmd(mpt
, sizeof f_req
, &f_req
);
1522 error
= mpt_recv_handshake_reply(mpt
, sizeof (*freplp
), freplp
);
1527 mpt_get_portfacts(struct mpt_softc
*mpt
, U8 port
, MSG_PORT_FACTS_REPLY
*freplp
)
1529 MSG_PORT_FACTS f_req
;
1532 memset(&f_req
, 0, sizeof f_req
);
1533 f_req
.Function
= MPI_FUNCTION_PORT_FACTS
;
1534 f_req
.PortNumber
= port
;
1535 f_req
.MsgContext
= htole32(MPT_REPLY_HANDLER_HANDSHAKE
);
1536 error
= mpt_send_handshake_cmd(mpt
, sizeof f_req
, &f_req
);
1540 error
= mpt_recv_handshake_reply(mpt
, sizeof (*freplp
), freplp
);
1545 * Send the initialization request. This is where we specify how many
1546 * SCSI busses and how many devices per bus we wish to emulate.
1547 * This is also the command that specifies the max size of the reply
1548 * frames from the IOC that we will be allocating.
1551 mpt_send_ioc_init(struct mpt_softc
*mpt
, uint32_t who
)
1555 MSG_IOC_INIT_REPLY reply
;
1557 memset(&init
, 0, sizeof init
);
1559 init
.Function
= MPI_FUNCTION_IOC_INIT
;
1560 init
.MaxDevices
= 0; /* at least 256 devices per bus */
1561 init
.MaxBuses
= 16; /* at least 16 busses */
1563 init
.MsgVersion
= htole16(MPI_VERSION
);
1564 init
.HeaderVersion
= htole16(MPI_HEADER_VERSION
);
1565 init
.ReplyFrameSize
= htole16(MPT_REPLY_SIZE
);
1566 init
.MsgContext
= htole32(MPT_REPLY_HANDLER_HANDSHAKE
);
1568 if ((error
= mpt_send_handshake_cmd(mpt
, sizeof init
, &init
)) != 0) {
1572 error
= mpt_recv_handshake_reply(mpt
, sizeof reply
, &reply
);
1578 * Utiltity routine to read configuration headers and pages
1581 mpt_issue_cfg_req(struct mpt_softc
*mpt
, request_t
*req
, cfgparms_t
*params
,
1582 bus_addr_t addr
, bus_size_t len
, int sleep_ok
, int timeout_ms
)
1587 cfgp
= req
->req_vbuf
;
1588 memset(cfgp
, 0, sizeof *cfgp
);
1589 cfgp
->Action
= params
->Action
;
1590 cfgp
->Function
= MPI_FUNCTION_CONFIG
;
1591 cfgp
->Header
.PageVersion
= params
->PageVersion
;
1592 cfgp
->Header
.PageNumber
= params
->PageNumber
;
1593 cfgp
->PageAddress
= htole32(params
->PageAddress
);
1594 if ((params
->PageType
& MPI_CONFIG_PAGETYPE_MASK
) ==
1595 MPI_CONFIG_PAGETYPE_EXTENDED
) {
1596 cfgp
->Header
.PageType
= MPI_CONFIG_PAGETYPE_EXTENDED
;
1597 cfgp
->Header
.PageLength
= 0;
1598 cfgp
->ExtPageLength
= htole16(params
->ExtPageLength
);
1599 cfgp
->ExtPageType
= params
->ExtPageType
;
1601 cfgp
->Header
.PageType
= params
->PageType
;
1602 cfgp
->Header
.PageLength
= params
->PageLength
;
1604 se
= (SGE_SIMPLE32
*)&cfgp
->PageBufferSGE
;
1605 se
->Address
= htole32(addr
);
1606 MPI_pSGE_SET_LENGTH(se
, len
);
1607 MPI_pSGE_SET_FLAGS(se
, (MPI_SGE_FLAGS_SIMPLE_ELEMENT
|
1608 MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
|
1609 MPI_SGE_FLAGS_END_OF_LIST
|
1610 ((params
->Action
== MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1611 || params
->Action
== MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM
)
1612 ? MPI_SGE_FLAGS_HOST_TO_IOC
: MPI_SGE_FLAGS_IOC_TO_HOST
)));
1613 se
->FlagsLength
= htole32(se
->FlagsLength
);
1614 cfgp
->MsgContext
= htole32(req
->index
| MPT_REPLY_HANDLER_CONFIG
);
1616 mpt_check_doorbell(mpt
);
1617 mpt_send_cmd(mpt
, req
);
1618 return (mpt_wait_req(mpt
, req
, REQ_STATE_DONE
, REQ_STATE_DONE
,
1619 sleep_ok
, timeout_ms
));
1623 mpt_read_extcfg_header(struct mpt_softc
*mpt
, int PageVersion
, int PageNumber
,
1624 uint32_t PageAddress
, int ExtPageType
,
1625 CONFIG_EXTENDED_PAGE_HEADER
*rslt
,
1626 int sleep_ok
, int timeout_ms
)
1630 MSG_CONFIG_REPLY
*cfgp
;
1633 req
= mpt_get_request(mpt
, sleep_ok
);
1635 mpt_prt(mpt
, "mpt_extread_cfg_header: Get request failed!\n");
1639 bzero(¶ms
, sizeof(params
));
1640 params
.Action
= MPI_CONFIG_ACTION_PAGE_HEADER
;
1641 params
.PageVersion
= PageVersion
;
1642 params
.PageLength
= 0;
1643 params
.PageNumber
= PageNumber
;
1644 params
.PageType
= MPI_CONFIG_PAGETYPE_EXTENDED
;
1645 params
.PageAddress
= PageAddress
;
1646 params
.ExtPageType
= ExtPageType
;
1647 params
.ExtPageLength
= 0;
1648 error
= mpt_issue_cfg_req(mpt
, req
, ¶ms
, /*addr*/0, /*len*/0,
1649 sleep_ok
, timeout_ms
);
1652 * Leave the request. Without resetting the chip, it's
1653 * still owned by it and we'll just get into trouble
1654 * freeing it now. Mark it as abandoned so that if it
1655 * shows up later it can be freed.
1657 mpt_prt(mpt
, "read_extcfg_header timed out\n");
1661 switch (req
->IOCStatus
& MPI_IOCSTATUS_MASK
) {
1662 case MPI_IOCSTATUS_SUCCESS
:
1663 cfgp
= req
->req_vbuf
;
1664 rslt
->PageVersion
= cfgp
->Header
.PageVersion
;
1665 rslt
->PageNumber
= cfgp
->Header
.PageNumber
;
1666 rslt
->PageType
= cfgp
->Header
.PageType
;
1667 rslt
->ExtPageLength
= le16toh(cfgp
->ExtPageLength
);
1668 rslt
->ExtPageType
= cfgp
->ExtPageType
;
1671 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE
:
1672 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1673 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1674 MPI_CONFIG_PAGETYPE_EXTENDED
, PageNumber
, PageAddress
);
1678 mpt_prt(mpt
, "mpt_read_extcfg_header: Config Info Status %x\n",
1683 mpt_free_request(mpt
, req
);
1688 mpt_read_extcfg_page(struct mpt_softc
*mpt
, int Action
, uint32_t PageAddress
,
1689 CONFIG_EXTENDED_PAGE_HEADER
*hdr
, void *buf
, size_t len
,
1690 int sleep_ok
, int timeout_ms
)
1696 req
= mpt_get_request(mpt
, sleep_ok
);
1698 mpt_prt(mpt
, "mpt_read_extcfg_page: Get request failed!\n");
1702 params
.Action
= Action
;
1703 params
.PageVersion
= hdr
->PageVersion
;
1704 params
.PageLength
= 0;
1705 params
.PageNumber
= hdr
->PageNumber
;
1706 params
.PageType
= MPI_CONFIG_PAGETYPE_EXTENDED
;
1707 params
.PageAddress
= PageAddress
;
1708 params
.ExtPageType
= hdr
->ExtPageType
;
1709 params
.ExtPageLength
= hdr
->ExtPageLength
;
1710 error
= mpt_issue_cfg_req(mpt
, req
, ¶ms
,
1711 req
->req_pbuf
+ MPT_RQSL(mpt
),
1712 len
, sleep_ok
, timeout_ms
);
1714 mpt_prt(mpt
, "read_extcfg_page(%d) timed out\n", Action
);
1718 if ((req
->IOCStatus
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
1719 mpt_prt(mpt
, "mpt_read_extcfg_page: Config Info Status %x\n",
1721 mpt_free_request(mpt
, req
);
1724 memcpy(buf
, ((uint8_t *)req
->req_vbuf
)+MPT_RQSL(mpt
), len
);
1725 mpt_free_request(mpt
, req
);
1730 mpt_read_cfg_header(struct mpt_softc
*mpt
, int PageType
, int PageNumber
,
1731 uint32_t PageAddress
, CONFIG_PAGE_HEADER
*rslt
,
1732 int sleep_ok
, int timeout_ms
)
1739 req
= mpt_get_request(mpt
, sleep_ok
);
1741 mpt_prt(mpt
, "mpt_read_cfg_header: Get request failed!\n");
1745 bzero(¶ms
, sizeof(params
));
1746 params
.Action
= MPI_CONFIG_ACTION_PAGE_HEADER
;
1747 params
.PageVersion
= 0;
1748 params
.PageLength
= 0;
1749 params
.PageNumber
= PageNumber
;
1750 params
.PageType
= PageType
;
1751 params
.PageAddress
= PageAddress
;
1752 error
= mpt_issue_cfg_req(mpt
, req
, ¶ms
, /*addr*/0, /*len*/0,
1753 sleep_ok
, timeout_ms
);
1756 * Leave the request. Without resetting the chip, it's
1757 * still owned by it and we'll just get into trouble
1758 * freeing it now. Mark it as abandoned so that if it
1759 * shows up later it can be freed.
1761 mpt_prt(mpt
, "read_cfg_header timed out\n");
1765 switch (req
->IOCStatus
& MPI_IOCSTATUS_MASK
) {
1766 case MPI_IOCSTATUS_SUCCESS
:
1767 cfgp
= req
->req_vbuf
;
1768 bcopy(&cfgp
->Header
, rslt
, sizeof(*rslt
));
1771 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE
:
1772 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1773 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1774 PageType
, PageNumber
, PageAddress
);
1778 mpt_prt(mpt
, "mpt_read_cfg_header: Config Info Status %x\n",
1783 mpt_free_request(mpt
, req
);
1788 mpt_read_cfg_page(struct mpt_softc
*mpt
, int Action
, uint32_t PageAddress
,
1789 CONFIG_PAGE_HEADER
*hdr
, size_t len
, int sleep_ok
,
1796 req
= mpt_get_request(mpt
, sleep_ok
);
1798 mpt_prt(mpt
, "mpt_read_cfg_page: Get request failed!\n");
1802 bzero(¶ms
, sizeof(params
));
1803 params
.Action
= Action
;
1804 params
.PageVersion
= hdr
->PageVersion
;
1805 params
.PageLength
= hdr
->PageLength
;
1806 params
.PageNumber
= hdr
->PageNumber
;
1807 params
.PageType
= hdr
->PageType
& MPI_CONFIG_PAGETYPE_MASK
;
1808 params
.PageAddress
= PageAddress
;
1809 error
= mpt_issue_cfg_req(mpt
, req
, ¶ms
,
1810 req
->req_pbuf
+ MPT_RQSL(mpt
),
1811 len
, sleep_ok
, timeout_ms
);
1813 mpt_prt(mpt
, "read_cfg_page(%d) timed out\n", Action
);
1817 if ((req
->IOCStatus
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
1818 mpt_prt(mpt
, "mpt_read_cfg_page: Config Info Status %x\n",
1820 mpt_free_request(mpt
, req
);
1823 memcpy(hdr
, ((uint8_t *)req
->req_vbuf
)+MPT_RQSL(mpt
), len
);
1824 mpt_free_request(mpt
, req
);
1829 mpt_write_cfg_page(struct mpt_softc
*mpt
, int Action
, uint32_t PageAddress
,
1830 CONFIG_PAGE_HEADER
*hdr
, size_t len
, int sleep_ok
,
1838 hdr_attr
= hdr
->PageType
& MPI_CONFIG_PAGEATTR_MASK
;
1839 if (hdr_attr
!= MPI_CONFIG_PAGEATTR_CHANGEABLE
&&
1840 hdr_attr
!= MPI_CONFIG_PAGEATTR_PERSISTENT
) {
1841 mpt_prt(mpt
, "page type 0x%x not changeable\n",
1842 hdr
->PageType
& MPI_CONFIG_PAGETYPE_MASK
);
1848 * We shouldn't mask off other bits here.
1850 hdr
->PageType
&= MPI_CONFIG_PAGETYPE_MASK
;
1853 req
= mpt_get_request(mpt
, sleep_ok
);
1857 memcpy(((caddr_t
)req
->req_vbuf
) + MPT_RQSL(mpt
), hdr
, len
);
1860 * There isn't any point in restoring stripped out attributes
1861 * if you then mask them going down to issue the request.
1864 bzero(¶ms
, sizeof(params
));
1865 params
.Action
= Action
;
1866 params
.PageVersion
= hdr
->PageVersion
;
1867 params
.PageLength
= hdr
->PageLength
;
1868 params
.PageNumber
= hdr
->PageNumber
;
1869 params
.PageAddress
= PageAddress
;
1871 /* Restore stripped out attributes */
1872 hdr
->PageType
|= hdr_attr
;
1873 params
.PageType
= hdr
->PageType
& MPI_CONFIG_PAGETYPE_MASK
;
1875 params
.PageType
= hdr
->PageType
;
1877 error
= mpt_issue_cfg_req(mpt
, req
, ¶ms
,
1878 req
->req_pbuf
+ MPT_RQSL(mpt
),
1879 len
, sleep_ok
, timeout_ms
);
1881 mpt_prt(mpt
, "mpt_write_cfg_page timed out\n");
1885 if ((req
->IOCStatus
& MPI_IOCSTATUS_MASK
) != MPI_IOCSTATUS_SUCCESS
) {
1886 mpt_prt(mpt
, "mpt_write_cfg_page: Config Info Status %x\n",
1888 mpt_free_request(mpt
, req
);
1891 mpt_free_request(mpt
, req
);
1896 * Read IOC configuration information
1899 mpt_read_config_info_ioc(struct mpt_softc
*mpt
)
1901 CONFIG_PAGE_HEADER hdr
;
1902 struct mpt_raid_volume
*mpt_raid
;
1907 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_IOC
,
1908 2, 0, &hdr
, FALSE
, 5000);
1910 * If it's an invalid page, so what? Not a supported function....
1919 mpt_lprt(mpt
, MPT_PRT_DEBUG
,
1920 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1921 hdr
.PageVersion
, hdr
.PageLength
<< 2,
1922 hdr
.PageNumber
, hdr
.PageType
);
1924 len
= hdr
.PageLength
* sizeof(uint32_t);
1925 mpt
->ioc_page2
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
1926 if (mpt
->ioc_page2
== NULL
) {
1927 mpt_prt(mpt
, "unable to allocate memory for IOC page 2\n");
1928 mpt_raid_free_mem(mpt
);
1931 memcpy(&mpt
->ioc_page2
->Header
, &hdr
, sizeof(hdr
));
1932 rv
= mpt_read_cur_cfg_page(mpt
, 0,
1933 &mpt
->ioc_page2
->Header
, len
, FALSE
, 5000);
1935 mpt_prt(mpt
, "failed to read IOC Page 2\n");
1936 mpt_raid_free_mem(mpt
);
1939 mpt2host_config_page_ioc2(mpt
->ioc_page2
);
1941 if (mpt
->ioc_page2
->CapabilitiesFlags
!= 0) {
1944 mpt_prt(mpt
, "Capabilities: (");
1945 for (mask
= 1; mask
!= 0; mask
<<= 1) {
1946 if ((mpt
->ioc_page2
->CapabilitiesFlags
& mask
) == 0) {
1950 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
:
1951 mpt_prtc(mpt
, " RAID-0");
1953 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
:
1954 mpt_prtc(mpt
, " RAID-1E");
1956 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT
:
1957 mpt_prtc(mpt
, " RAID-1");
1959 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT
:
1960 mpt_prtc(mpt
, " SES");
1962 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT
:
1963 mpt_prtc(mpt
, " SAFTE");
1965 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT
:
1966 mpt_prtc(mpt
, " Multi-Channel-Arrays");
1971 mpt_prtc(mpt
, " )\n");
1972 if ((mpt
->ioc_page2
->CapabilitiesFlags
1973 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1974 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1975 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT
)) != 0) {
1976 mpt_prt(mpt
, "%d Active Volume%s(%d Max)\n",
1977 mpt
->ioc_page2
->NumActiveVolumes
,
1978 mpt
->ioc_page2
->NumActiveVolumes
!= 1
1980 mpt
->ioc_page2
->MaxVolumes
);
1981 mpt_prt(mpt
, "%d Hidden Drive Member%s(%d Max)\n",
1982 mpt
->ioc_page2
->NumActivePhysDisks
,
1983 mpt
->ioc_page2
->NumActivePhysDisks
!= 1
1985 mpt
->ioc_page2
->MaxPhysDisks
);
1989 len
= mpt
->ioc_page2
->MaxVolumes
* sizeof(struct mpt_raid_volume
);
1990 mpt
->raid_volumes
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
1991 if (mpt
->raid_volumes
== NULL
) {
1992 mpt_prt(mpt
, "Could not allocate RAID volume data\n");
1993 mpt_raid_free_mem(mpt
);
1998 * Copy critical data out of ioc_page2 so that we can
1999 * safely refresh the page without windows of unreliable
2002 mpt
->raid_max_volumes
= mpt
->ioc_page2
->MaxVolumes
;
2004 len
= sizeof(*mpt
->raid_volumes
->config_page
) +
2005 (sizeof (RAID_VOL0_PHYS_DISK
) * (mpt
->ioc_page2
->MaxPhysDisks
- 1));
2006 for (i
= 0; i
< mpt
->ioc_page2
->MaxVolumes
; i
++) {
2007 mpt_raid
= &mpt
->raid_volumes
[i
];
2008 mpt_raid
->config_page
=
2009 kmalloc(len
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
2010 if (mpt_raid
->config_page
== NULL
) {
2011 mpt_prt(mpt
, "Could not allocate RAID page data\n");
2012 mpt_raid_free_mem(mpt
);
2016 mpt
->raid_page0_len
= len
;
2018 len
= mpt
->ioc_page2
->MaxPhysDisks
* sizeof(struct mpt_raid_disk
);
2019 mpt
->raid_disks
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
2020 if (mpt
->raid_disks
== NULL
) {
2021 mpt_prt(mpt
, "Could not allocate RAID disk data\n");
2022 mpt_raid_free_mem(mpt
);
2025 mpt
->raid_max_disks
= mpt
->ioc_page2
->MaxPhysDisks
;
2030 rv
= mpt_read_cfg_header(mpt
, MPI_CONFIG_PAGETYPE_IOC
,
2031 3, 0, &hdr
, FALSE
, 5000);
2033 mpt_raid_free_mem(mpt
);
2037 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "IOC Page 3 Header: %x %x %x %x\n",
2038 hdr
.PageVersion
, hdr
.PageLength
, hdr
.PageNumber
, hdr
.PageType
);
2040 len
= hdr
.PageLength
* sizeof(uint32_t);
2041 mpt
->ioc_page3
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
2042 if (mpt
->ioc_page3
== NULL
) {
2043 mpt_prt(mpt
, "unable to allocate memory for IOC page 3\n");
2044 mpt_raid_free_mem(mpt
);
2047 memcpy(&mpt
->ioc_page3
->Header
, &hdr
, sizeof(hdr
));
2048 rv
= mpt_read_cur_cfg_page(mpt
, 0,
2049 &mpt
->ioc_page3
->Header
, len
, FALSE
, 5000);
2051 mpt_raid_free_mem(mpt
);
2054 mpt2host_config_page_ioc3(mpt
->ioc_page3
);
2055 mpt_raid_wakeup(mpt
);
2063 mpt_send_port_enable(struct mpt_softc
*mpt
, int port
)
2066 MSG_PORT_ENABLE
*enable_req
;
2069 req
= mpt_get_request(mpt
, /*sleep_ok*/FALSE
);
2073 enable_req
= req
->req_vbuf
;
2074 memset(enable_req
, 0, MPT_RQSL(mpt
));
2076 enable_req
->Function
= MPI_FUNCTION_PORT_ENABLE
;
2077 enable_req
->MsgContext
= htole32(req
->index
| MPT_REPLY_HANDLER_CONFIG
);
2078 enable_req
->PortNumber
= port
;
2080 mpt_check_doorbell(mpt
);
2081 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "enabling port %d\n", port
);
2083 mpt_send_cmd(mpt
, req
);
2084 error
= mpt_wait_req(mpt
, req
, REQ_STATE_DONE
, REQ_STATE_DONE
,
2085 FALSE
, (mpt
->is_sas
|| mpt
->is_fc
)? 300000 : 30000);
2087 mpt_prt(mpt
, "port %d enable timed out\n", port
);
2090 mpt_free_request(mpt
, req
);
2091 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "enabled port %d\n", port
);
2096 * Enable/Disable asynchronous event reporting.
2099 mpt_send_event_request(struct mpt_softc
*mpt
, int onoff
)
2102 MSG_EVENT_NOTIFY
*enable_req
;
2104 req
= mpt_get_request(mpt
, FALSE
);
2108 enable_req
= req
->req_vbuf
;
2109 memset(enable_req
, 0, sizeof *enable_req
);
2111 enable_req
->Function
= MPI_FUNCTION_EVENT_NOTIFICATION
;
2112 enable_req
->MsgContext
= htole32(req
->index
| MPT_REPLY_HANDLER_EVENTS
);
2113 enable_req
->Switch
= onoff
;
2115 mpt_check_doorbell(mpt
);
2116 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "%sabling async events\n",
2117 onoff
? "en" : "dis");
2119 * Send the command off, but don't wait for it.
2121 mpt_send_cmd(mpt
, req
);
2126 * Un-mask the interrupts on the chip.
2129 mpt_enable_ints(struct mpt_softc
*mpt
)
2132 /* Unmask every thing except door bell int */
2133 mpt_write(mpt
, MPT_OFFSET_INTR_MASK
, MPT_INTR_DB_MASK
);
2137 * Mask the interrupts on the chip.
2140 mpt_disable_ints(struct mpt_softc
*mpt
)
2143 /* Mask all interrupts */
2144 mpt_write(mpt
, MPT_OFFSET_INTR_MASK
,
2145 MPT_INTR_REPLY_MASK
| MPT_INTR_DB_MASK
);
2149 mpt_sysctl_attach(struct mpt_softc
*mpt
)
2151 struct sysctl_ctx_list
*ctx
= device_get_sysctl_ctx(mpt
->dev
);
2152 struct sysctl_oid
*tree
= device_get_sysctl_tree(mpt
->dev
);
2154 SYSCTL_ADD_UINT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
2155 "debug", CTLFLAG_RW
, &mpt
->verbose
, 0,
2156 "Debugging/Verbose level");
2157 SYSCTL_ADD_UINT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
2158 "role", CTLFLAG_RD
, &mpt
->role
, 0,
2160 #ifdef MPT_TEST_MULTIPATH
2161 SYSCTL_ADD_INT(ctx
, SYSCTL_CHILDREN(tree
), OID_AUTO
,
2162 "failure_id", CTLFLAG_RW
, &mpt
->failure_id
, -1,
2163 "Next Target to Fail");
2168 mpt_attach(struct mpt_softc
*mpt
)
2170 struct mpt_personality
*pers
;
2174 mpt_core_attach(mpt
);
2175 mpt_core_enable(mpt
);
2177 TAILQ_INSERT_TAIL(&mpt_tailq
, mpt
, links
);
2178 for (i
= 0; i
< MPT_MAX_PERSONALITIES
; i
++) {
2179 pers
= mpt_personalities
[i
];
2183 if (pers
->probe(mpt
) == 0) {
2184 error
= pers
->attach(mpt
);
2189 mpt
->mpt_pers_mask
|= (0x1 << pers
->id
);
2195 * Now that we've attached everything, do the enable function
2196 * for all of the personalities. This allows the personalities
2197 * to do setups that are appropriate for them prior to enabling
2200 for (i
= 0; i
< MPT_MAX_PERSONALITIES
; i
++) {
2201 pers
= mpt_personalities
[i
];
2202 if (pers
!= NULL
&& MPT_PERS_ATTACHED(pers
, mpt
) != 0) {
2203 error
= pers
->enable(mpt
);
2205 mpt_prt(mpt
, "personality %s attached but would"
2206 " not enable (%d)\n", pers
->name
, error
);
2216 mpt_shutdown(struct mpt_softc
*mpt
)
2218 struct mpt_personality
*pers
;
2220 MPT_PERS_FOREACH_REVERSE(mpt
, pers
) {
2221 pers
->shutdown(mpt
);
2227 mpt_detach(struct mpt_softc
*mpt
)
2229 struct mpt_personality
*pers
;
2231 MPT_PERS_FOREACH_REVERSE(mpt
, pers
) {
2233 mpt
->mpt_pers_mask
&= ~(0x1 << pers
->id
);
2236 TAILQ_REMOVE(&mpt_tailq
, mpt
, links
);
2241 mpt_core_load(struct mpt_personality
*pers
)
2246 * Setup core handlers and insert the default handler
2247 * into all "empty slots".
2249 for (i
= 0; i
< MPT_NUM_REPLY_HANDLERS
; i
++) {
2250 mpt_reply_handlers
[i
] = mpt_default_reply_handler
;
2253 mpt_reply_handlers
[MPT_CBI(MPT_REPLY_HANDLER_EVENTS
)] =
2254 mpt_event_reply_handler
;
2255 mpt_reply_handlers
[MPT_CBI(MPT_REPLY_HANDLER_CONFIG
)] =
2256 mpt_config_reply_handler
;
2257 mpt_reply_handlers
[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE
)] =
2258 mpt_handshake_reply_handler
;
2263 * Initialize per-instance driver data and perform
2264 * initial controller configuration.
2267 mpt_core_attach(struct mpt_softc
*mpt
)
2271 LIST_INIT(&mpt
->ack_frames
);
2272 /* Put all request buffers on the free list */
2273 TAILQ_INIT(&mpt
->request_pending_list
);
2274 TAILQ_INIT(&mpt
->request_free_list
);
2275 TAILQ_INIT(&mpt
->request_timeout_list
);
2276 for (val
= 0; val
< MPT_MAX_LUNS
; val
++) {
2277 STAILQ_INIT(&mpt
->trt
[val
].atios
);
2278 STAILQ_INIT(&mpt
->trt
[val
].inots
);
2280 STAILQ_INIT(&mpt
->trt_wildcard
.atios
);
2281 STAILQ_INIT(&mpt
->trt_wildcard
.inots
);
2282 #ifdef MPT_TEST_MULTIPATH
2283 mpt
->failure_id
= -1;
2285 mpt
->scsi_tgt_handler_id
= MPT_HANDLER_ID_NONE
;
2286 mpt_sysctl_attach(mpt
);
2287 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "doorbell req = %s\n",
2288 mpt_ioc_diag(mpt_read(mpt
, MPT_OFFSET_DOORBELL
)));
2291 error
= mpt_configure_ioc(mpt
, 0, 0);
2298 mpt_core_enable(struct mpt_softc
*mpt
)
2302 * We enter with the IOC enabled, but async events
2303 * not enabled, ports not enabled and interrupts
2309 * Enable asynchronous event reporting- all personalities
2310 * have attached so that they should be able to now field
2313 mpt_send_event_request(mpt
, 1);
2316 * Catch any pending interrupts
2318 * This seems to be crucial- otherwise
2319 * the portenable below times out.
2326 mpt_enable_ints(mpt
);
2329 * Catch any pending interrupts
2331 * This seems to be crucial- otherwise
2332 * the portenable below times out.
2339 if (mpt_send_port_enable(mpt
, 0) != MPT_OK
) {
2340 mpt_prt(mpt
, "failed to enable port 0\n");
2349 mpt_core_shutdown(struct mpt_softc
*mpt
)
2352 mpt_disable_ints(mpt
);
2356 mpt_core_detach(struct mpt_softc
*mpt
)
2363 mpt_disable_ints(mpt
);
2365 /* Make sure no request has pending timeouts. */
2366 for (val
= 0; val
< MPT_MAX_REQUESTS(mpt
); val
++) {
2367 request_t
*req
= &mpt
->request_pool
[val
];
2368 mpt_callout_drain(mpt
, &req
->callout
);
2371 mpt_dma_buf_free(mpt
);
2375 mpt_core_unload(struct mpt_personality
*pers
)
2378 /* Unload is always successful. */
2382 #define FW_UPLOAD_REQ_SIZE \
2383 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2384 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2387 mpt_upload_fw(struct mpt_softc
*mpt
)
2389 uint8_t fw_req_buf
[FW_UPLOAD_REQ_SIZE
];
2390 MSG_FW_UPLOAD_REPLY fw_reply
;
2391 MSG_FW_UPLOAD
*fw_req
;
2392 FW_UPLOAD_TCSGE
*tsge
;
2397 memset(&fw_req_buf
, 0, sizeof(fw_req_buf
));
2398 fw_req
= (MSG_FW_UPLOAD
*)fw_req_buf
;
2399 fw_req
->ImageType
= MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM
;
2400 fw_req
->Function
= MPI_FUNCTION_FW_UPLOAD
;
2401 fw_req
->MsgContext
= htole32(MPT_REPLY_HANDLER_HANDSHAKE
);
2402 tsge
= (FW_UPLOAD_TCSGE
*)&fw_req
->SGL
;
2403 tsge
->DetailsLength
= 12;
2404 tsge
->Flags
= MPI_SGE_FLAGS_TRANSACTION_ELEMENT
;
2405 tsge
->ImageSize
= htole32(mpt
->fw_image_size
);
2406 sge
= (SGE_SIMPLE32
*)(tsge
+ 1);
2407 flags
= (MPI_SGE_FLAGS_LAST_ELEMENT
| MPI_SGE_FLAGS_END_OF_BUFFER
2408 | MPI_SGE_FLAGS_END_OF_LIST
| MPI_SGE_FLAGS_SIMPLE_ELEMENT
2409 | MPI_SGE_FLAGS_32_BIT_ADDRESSING
| MPI_SGE_FLAGS_IOC_TO_HOST
);
2410 flags
<<= MPI_SGE_FLAGS_SHIFT
;
2411 sge
->FlagsLength
= htole32(flags
| mpt
->fw_image_size
);
2412 sge
->Address
= htole32(mpt
->fw_phys
);
2413 bus_dmamap_sync(mpt
->fw_dmat
, mpt
->fw_dmap
, BUS_DMASYNC_PREREAD
);
2414 error
= mpt_send_handshake_cmd(mpt
, sizeof(fw_req_buf
), &fw_req_buf
);
2417 error
= mpt_recv_handshake_reply(mpt
, sizeof(fw_reply
), &fw_reply
);
2418 bus_dmamap_sync(mpt
->fw_dmat
, mpt
->fw_dmap
, BUS_DMASYNC_POSTREAD
);
2423 mpt_diag_outsl(struct mpt_softc
*mpt
, uint32_t addr
,
2424 uint32_t *data
, bus_size_t len
)
2428 data_end
= data
+ (roundup2(len
, sizeof(uint32_t)) / 4);
2430 pci_enable_io(mpt
->dev
, SYS_RES_IOPORT
);
2432 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_ADDR
, addr
);
2433 while (data
!= data_end
) {
2434 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_DATA
, *data
);
2438 pci_disable_io(mpt
->dev
, SYS_RES_IOPORT
);
2443 mpt_download_fw(struct mpt_softc
*mpt
)
2445 MpiFwHeader_t
*fw_hdr
;
2447 uint32_t ext_offset
;
2450 if (mpt
->pci_pio_reg
== NULL
) {
2451 mpt_prt(mpt
, "No PIO resource!\n");
2455 mpt_prt(mpt
, "Downloading Firmware - Image Size %d\n",
2456 mpt
->fw_image_size
);
2458 error
= mpt_enable_diag_mode(mpt
);
2460 mpt_prt(mpt
, "Could not enter diagnostic mode!\n");
2464 mpt_write(mpt
, MPT_OFFSET_DIAGNOSTIC
,
2465 MPI_DIAG_RW_ENABLE
|MPI_DIAG_DISABLE_ARM
);
2467 fw_hdr
= (MpiFwHeader_t
*)mpt
->fw_image
;
2468 bus_dmamap_sync(mpt
->fw_dmat
, mpt
->fw_dmap
, BUS_DMASYNC_PREWRITE
);
2469 mpt_diag_outsl(mpt
, fw_hdr
->LoadStartAddress
, (uint32_t*)fw_hdr
,
2471 bus_dmamap_sync(mpt
->fw_dmat
, mpt
->fw_dmap
, BUS_DMASYNC_POSTWRITE
);
2473 ext_offset
= fw_hdr
->NextImageHeaderOffset
;
2474 while (ext_offset
!= 0) {
2475 MpiExtImageHeader_t
*ext
;
2477 ext
= (MpiExtImageHeader_t
*)((uintptr_t)fw_hdr
+ ext_offset
);
2478 ext_offset
= ext
->NextImageHeaderOffset
;
2479 bus_dmamap_sync(mpt
->fw_dmat
, mpt
->fw_dmap
,
2480 BUS_DMASYNC_PREWRITE
);
2481 mpt_diag_outsl(mpt
, ext
->LoadStartAddress
, (uint32_t*)ext
,
2483 bus_dmamap_sync(mpt
->fw_dmat
, mpt
->fw_dmap
,
2484 BUS_DMASYNC_POSTWRITE
);
2488 pci_enable_io(mpt
->dev
, SYS_RES_IOPORT
);
2490 /* Setup the address to jump to on reset. */
2491 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_ADDR
, fw_hdr
->IopResetRegAddr
);
2492 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_DATA
, fw_hdr
->IopResetVectorValue
);
2495 * The controller sets the "flash bad" status after attempting
2496 * to auto-boot from flash. Clear the status so that the controller
2497 * will continue the boot process with our newly installed firmware.
2499 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_ADDR
, MPT_DIAG_MEM_CFG_BASE
);
2500 data
= mpt_pio_read(mpt
, MPT_OFFSET_DIAG_DATA
) | MPT_DIAG_MEM_CFG_BADFL
;
2501 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_ADDR
, MPT_DIAG_MEM_CFG_BASE
);
2502 mpt_pio_write(mpt
, MPT_OFFSET_DIAG_DATA
, data
);
2505 pci_disable_io(mpt
->dev
, SYS_RES_IOPORT
);
2509 * Re-enable the processor and clear the boot halt flag.
2511 data
= mpt_read(mpt
, MPT_OFFSET_DIAGNOSTIC
);
2512 data
&= ~(MPI_DIAG_PREVENT_IOC_BOOT
|MPI_DIAG_DISABLE_ARM
);
2513 mpt_write(mpt
, MPT_OFFSET_DIAGNOSTIC
, data
);
2515 mpt_disable_diag_mode(mpt
);
2520 mpt_dma_buf_alloc(struct mpt_softc
*mpt
)
2522 struct mpt_map_info mi
;
2527 /* Create a child tag for data buffers */
2528 if (mpt_dma_tag_create(mpt
, mpt
->parent_dmat
, 1,
2529 0, BUS_SPACE_MAXADDR
, BUS_SPACE_MAXADDR
,
2530 NULL
, NULL
, (mpt
->max_cam_seg_cnt
- 1) * PAGE_SIZE
,
2531 mpt
->max_cam_seg_cnt
, BUS_SPACE_MAXSIZE_32BIT
, 0,
2532 &mpt
->buffer_dmat
) != 0) {
2533 mpt_prt(mpt
, "cannot create a dma tag for data buffers\n");
2537 /* Create a child tag for request buffers */
2538 if (mpt_dma_tag_create(mpt
, mpt
->parent_dmat
, PAGE_SIZE
, 0,
2539 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
,
2540 NULL
, NULL
, MPT_REQ_MEM_SIZE(mpt
), 1, BUS_SPACE_MAXSIZE_32BIT
, 0,
2541 &mpt
->request_dmat
) != 0) {
2542 mpt_prt(mpt
, "cannot create a dma tag for requests\n");
2546 /* Allocate some DMA accessible memory for requests */
2547 if (bus_dmamem_alloc(mpt
->request_dmat
, (void **)&mpt
->request
,
2548 BUS_DMA_NOWAIT
| BUS_DMA_COHERENT
, &mpt
->request_dmap
) != 0) {
2549 mpt_prt(mpt
, "cannot allocate %d bytes of request memory\n",
2550 MPT_REQ_MEM_SIZE(mpt
));
2557 /* Load and lock it into "bus space" */
2558 bus_dmamap_load(mpt
->request_dmat
, mpt
->request_dmap
, mpt
->request
,
2559 MPT_REQ_MEM_SIZE(mpt
), mpt_map_rquest
, &mi
, 0);
2562 mpt_prt(mpt
, "error %d loading dma map for DMA request queue\n",
2566 mpt
->request_phys
= mi
.phys
;
2569 * Now create per-request dma maps
2572 pptr
= mpt
->request_phys
;
2573 vptr
= mpt
->request
;
2574 end
= pptr
+ MPT_REQ_MEM_SIZE(mpt
);
2576 request_t
*req
= &mpt
->request_pool
[i
];
2579 /* Store location of Request Data */
2580 req
->req_pbuf
= pptr
;
2581 req
->req_vbuf
= vptr
;
2583 pptr
+= MPT_REQUEST_AREA
;
2584 vptr
+= MPT_REQUEST_AREA
;
2586 req
->sense_pbuf
= (pptr
- MPT_SENSE_SIZE
);
2587 req
->sense_vbuf
= (vptr
- MPT_SENSE_SIZE
);
2589 error
= bus_dmamap_create(mpt
->buffer_dmat
, 0, &req
->dmap
);
2591 mpt_prt(mpt
, "error %d creating per-cmd DMA maps\n",
2601 mpt_dma_buf_free(struct mpt_softc
*mpt
)
2605 if (mpt
->request_dmat
== 0) {
2606 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "already released dma memory\n");
2609 for (i
= 0; i
< MPT_MAX_REQUESTS(mpt
); i
++) {
2610 bus_dmamap_destroy(mpt
->buffer_dmat
, mpt
->request_pool
[i
].dmap
);
2612 bus_dmamap_unload(mpt
->request_dmat
, mpt
->request_dmap
);
2613 bus_dmamem_free(mpt
->request_dmat
, mpt
->request
, mpt
->request_dmap
);
2614 bus_dma_tag_destroy(mpt
->request_dmat
);
2615 mpt
->request_dmat
= 0;
2616 bus_dma_tag_destroy(mpt
->buffer_dmat
);
2620 * Allocate/Initialize data structures for the controller. Called
2621 * once at instance startup.
2624 mpt_configure_ioc(struct mpt_softc
*mpt
, int tn
, int needreset
)
2626 PTR_MSG_PORT_FACTS_REPLY pfp
;
2627 int error
, port
, val
;
2630 if (tn
== MPT_MAX_TRYS
) {
2635 * No need to reset if the IOC is already in the READY state.
2637 * Force reset if initialization failed previously.
2638 * Note that a hard_reset of the second channel of a '929
2639 * will stop operation of the first channel. Hopefully, if the
2640 * first channel is ok, the second will not require a hard
2643 if (needreset
|| MPT_STATE(mpt_rd_db(mpt
)) != MPT_DB_STATE_READY
) {
2644 if (mpt_reset(mpt
, FALSE
) != MPT_OK
) {
2645 return (mpt_configure_ioc(mpt
, tn
++, 1));
2650 if (mpt_get_iocfacts(mpt
, &mpt
->ioc_facts
) != MPT_OK
) {
2651 mpt_prt(mpt
, "mpt_get_iocfacts failed\n");
2652 return (mpt_configure_ioc(mpt
, tn
++, 1));
2654 mpt2host_iocfacts_reply(&mpt
->ioc_facts
);
2656 mpt_prt(mpt
, "MPI Version=%d.%d.%d.%d\n",
2657 mpt
->ioc_facts
.MsgVersion
>> 8,
2658 mpt
->ioc_facts
.MsgVersion
& 0xFF,
2659 mpt
->ioc_facts
.HeaderVersion
>> 8,
2660 mpt
->ioc_facts
.HeaderVersion
& 0xFF);
2663 * Now that we know request frame size, we can calculate
2664 * the actual (reasonable) segment limit for read/write I/O.
2666 * This limit is constrained by:
2668 * + The size of each area we allocate per command (and how
2669 * many chain segments we can fit into it).
2670 * + The total number of areas we've set up.
2671 * + The actual chain depth the card will allow.
2673 * The first area's segment count is limited by the I/O request
2674 * at the head of it. We cannot allocate realistically more
2675 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2676 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2679 /* total number of request areas we (can) allocate */
2680 mpt
->max_seg_cnt
= MPT_MAX_REQUESTS(mpt
) - 2;
2682 /* converted to the number of chain areas possible */
2683 mpt
->max_seg_cnt
*= MPT_NRFM(mpt
);
2685 /* limited by the number of chain areas the card will support */
2686 if (mpt
->max_seg_cnt
> mpt
->ioc_facts
.MaxChainDepth
) {
2687 mpt_lprt(mpt
, MPT_PRT_INFO
,
2688 "chain depth limited to %u (from %u)\n",
2689 mpt
->ioc_facts
.MaxChainDepth
, mpt
->max_seg_cnt
);
2690 mpt
->max_seg_cnt
= mpt
->ioc_facts
.MaxChainDepth
;
2693 /* converted to the number of simple sges in chain segments. */
2694 mpt
->max_seg_cnt
*= (MPT_NSGL(mpt
) - 1);
2697 * Use this as the basis for reporting the maximum I/O size to CAM.
2699 mpt
->max_cam_seg_cnt
= min(mpt
->max_seg_cnt
, (MAXPHYS
/ PAGE_SIZE
) + 1);
2701 error
= mpt_dma_buf_alloc(mpt
);
2703 mpt_prt(mpt
, "mpt_dma_buf_alloc() failed!\n");
2707 for (val
= 0; val
< MPT_MAX_REQUESTS(mpt
); val
++) {
2708 request_t
*req
= &mpt
->request_pool
[val
];
2709 req
->state
= REQ_STATE_ALLOCATED
;
2710 mpt_callout_init(mpt
, &req
->callout
);
2711 mpt_free_request(mpt
, req
);
2714 mpt_lprt(mpt
, MPT_PRT_INFO
, "Maximum Segment Count: %u, Maximum "
2715 "CAM Segment Count: %u\n", mpt
->max_seg_cnt
,
2716 mpt
->max_cam_seg_cnt
);
2718 mpt_lprt(mpt
, MPT_PRT_INFO
, "MsgLength=%u IOCNumber = %d\n",
2719 mpt
->ioc_facts
.MsgLength
, mpt
->ioc_facts
.IOCNumber
);
2720 mpt_lprt(mpt
, MPT_PRT_INFO
,
2721 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2722 "Request Frame Size %u bytes Max Chain Depth %u\n",
2723 mpt
->ioc_facts
.GlobalCredits
, mpt
->ioc_facts
.BlockSize
,
2724 mpt
->ioc_facts
.RequestFrameSize
<< 2,
2725 mpt
->ioc_facts
.MaxChainDepth
);
2726 mpt_lprt(mpt
, MPT_PRT_INFO
, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2727 "Flags=%#x\n", mpt
->ioc_facts
.NumberOfPorts
,
2728 mpt
->ioc_facts
.FWImageSize
, mpt
->ioc_facts
.Flags
);
2730 len
= mpt
->ioc_facts
.NumberOfPorts
* sizeof (MSG_PORT_FACTS_REPLY
);
2731 mpt
->port_facts
= kmalloc(len
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
2732 if (mpt
->port_facts
== NULL
) {
2733 mpt_prt(mpt
, "unable to allocate memory for port facts\n");
2738 if ((mpt
->ioc_facts
.Flags
& MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT
) &&
2739 (mpt
->fw_uploaded
== 0)) {
2740 struct mpt_map_info mi
;
2743 * In some configurations, the IOC's firmware is
2744 * stored in a shared piece of system NVRAM that
2745 * is only accessible via the BIOS. In this
2746 * case, the firmware keeps a copy of firmware in
2747 * RAM until the OS driver retrieves it. Once
2748 * retrieved, we are responsible for re-downloading
2749 * the firmware after any hard-reset.
2751 mpt
->fw_image_size
= mpt
->ioc_facts
.FWImageSize
;
2752 error
= mpt_dma_tag_create(mpt
, mpt
->parent_dmat
, 1, 0,
2753 BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
, NULL
, NULL
,
2754 mpt
->fw_image_size
, 1, mpt
->fw_image_size
, 0,
2757 mpt_prt(mpt
, "cannot create firmware dma tag\n");
2760 error
= bus_dmamem_alloc(mpt
->fw_dmat
,
2761 (void **)&mpt
->fw_image
, BUS_DMA_NOWAIT
|
2762 BUS_DMA_COHERENT
, &mpt
->fw_dmap
);
2764 mpt_prt(mpt
, "cannot allocate firmware memory\n");
2765 bus_dma_tag_destroy(mpt
->fw_dmat
);
2770 bus_dmamap_load(mpt
->fw_dmat
, mpt
->fw_dmap
,
2771 mpt
->fw_image
, mpt
->fw_image_size
, mpt_map_rquest
, &mi
, 0);
2772 mpt
->fw_phys
= mi
.phys
;
2774 error
= mpt_upload_fw(mpt
);
2776 mpt_prt(mpt
, "firmware upload failed.\n");
2777 bus_dmamap_unload(mpt
->fw_dmat
, mpt
->fw_dmap
);
2778 bus_dmamem_free(mpt
->fw_dmat
, mpt
->fw_image
,
2780 bus_dma_tag_destroy(mpt
->fw_dmat
);
2781 mpt
->fw_image
= NULL
;
2784 mpt
->fw_uploaded
= 1;
2787 for (port
= 0; port
< mpt
->ioc_facts
.NumberOfPorts
; port
++) {
2788 pfp
= &mpt
->port_facts
[port
];
2789 error
= mpt_get_portfacts(mpt
, 0, pfp
);
2790 if (error
!= MPT_OK
) {
2792 "mpt_get_portfacts on port %d failed\n", port
);
2793 kfree(mpt
->port_facts
, M_DEVBUF
);
2794 mpt
->port_facts
= NULL
;
2795 return (mpt_configure_ioc(mpt
, tn
++, 1));
2797 mpt2host_portfacts_reply(pfp
);
2800 error
= MPT_PRT_INFO
;
2802 error
= MPT_PRT_DEBUG
;
2804 mpt_lprt(mpt
, error
,
2805 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2806 port
, pfp
->PortType
, pfp
->ProtocolFlags
, pfp
->PortSCSIID
,
2812 * XXX: Not yet supporting more than port 0
2814 pfp
= &mpt
->port_facts
[0];
2815 if (pfp
->PortType
== MPI_PORTFACTS_PORTTYPE_FC
) {
2819 } else if (pfp
->PortType
== MPI_PORTFACTS_PORTTYPE_SAS
) {
2823 } else if (pfp
->PortType
== MPI_PORTFACTS_PORTTYPE_SCSI
) {
2827 if (mpt
->mpt_ini_id
== MPT_INI_ID_NONE
)
2828 mpt
->mpt_ini_id
= pfp
->PortSCSIID
;
2829 } else if (pfp
->PortType
== MPI_PORTFACTS_PORTTYPE_ISCSI
) {
2830 mpt_prt(mpt
, "iSCSI not supported yet\n");
2832 } else if (pfp
->PortType
== MPI_PORTFACTS_PORTTYPE_INACTIVE
) {
2833 mpt_prt(mpt
, "Inactive Port\n");
2836 mpt_prt(mpt
, "unknown Port Type %#x\n", pfp
->PortType
);
2841 * Set our role with what this port supports.
2843 * Note this might be changed later in different modules
2844 * if this is different from what is wanted.
2846 mpt
->role
= MPT_ROLE_NONE
;
2847 if (pfp
->ProtocolFlags
& MPI_PORTFACTS_PROTOCOL_INITIATOR
) {
2848 mpt
->role
|= MPT_ROLE_INITIATOR
;
2850 if (pfp
->ProtocolFlags
& MPI_PORTFACTS_PROTOCOL_TARGET
) {
2851 mpt
->role
|= MPT_ROLE_TARGET
;
2857 if (mpt_enable_ioc(mpt
, 1) != MPT_OK
) {
2858 mpt_prt(mpt
, "unable to initialize IOC\n");
2863 * Read IOC configuration information.
2865 * We need this to determine whether or not we have certain
2866 * settings for Integrated Mirroring (e.g.).
2868 mpt_read_config_info_ioc(mpt
);
2874 mpt_enable_ioc(struct mpt_softc
*mpt
, int portenable
)
2879 if (mpt_send_ioc_init(mpt
, MPI_WHOINIT_HOST_DRIVER
) != MPT_OK
) {
2880 mpt_prt(mpt
, "mpt_send_ioc_init failed\n");
2884 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "mpt_send_ioc_init ok\n");
2886 if (mpt_wait_state(mpt
, MPT_DB_STATE_RUNNING
) != MPT_OK
) {
2887 mpt_prt(mpt
, "IOC failed to go to run state\n");
2890 mpt_lprt(mpt
, MPT_PRT_DEBUG
, "IOC now at RUNSTATE\n");
2893 * Give it reply buffers
2895 * Do *not* exceed global credits.
2897 for (val
= 0, pptr
= mpt
->reply_phys
;
2898 (pptr
+ MPT_REPLY_SIZE
) < (mpt
->reply_phys
+ PAGE_SIZE
);
2899 pptr
+= MPT_REPLY_SIZE
) {
2900 mpt_free_reply(mpt
, pptr
);
2901 if (++val
== mpt
->ioc_facts
.GlobalCredits
- 1)
2907 * Enable the port if asked. This is only done if we're resetting
2908 * the IOC after initial startup.
2912 * Enable asynchronous event reporting
2914 mpt_send_event_request(mpt
, 1);
2916 if (mpt_send_port_enable(mpt
, 0) != MPT_OK
) {
2917 mpt_prt(mpt
, "%s: failed to enable port 0\n", __func__
);
2925 * Endian Conversion Functions- only used on Big Endian machines
2927 #if _BYTE_ORDER == _BIG_ENDIAN
2929 mpt2host_sge_simple_union(SGE_SIMPLE_UNION
*sge
)
2932 MPT_2_HOST32(sge
, FlagsLength
);
2933 MPT_2_HOST32(sge
, u
.Address64
.Low
);
2934 MPT_2_HOST32(sge
, u
.Address64
.High
);
2938 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY
*rp
)
2941 MPT_2_HOST16(rp
, MsgVersion
);
2942 MPT_2_HOST16(rp
, HeaderVersion
);
2943 MPT_2_HOST32(rp
, MsgContext
);
2944 MPT_2_HOST16(rp
, IOCExceptions
);
2945 MPT_2_HOST16(rp
, IOCStatus
);
2946 MPT_2_HOST32(rp
, IOCLogInfo
);
2947 MPT_2_HOST16(rp
, ReplyQueueDepth
);
2948 MPT_2_HOST16(rp
, RequestFrameSize
);
2949 MPT_2_HOST16(rp
, Reserved_0101_FWVersion
);
2950 MPT_2_HOST16(rp
, ProductID
);
2951 MPT_2_HOST32(rp
, CurrentHostMfaHighAddr
);
2952 MPT_2_HOST16(rp
, GlobalCredits
);
2953 MPT_2_HOST32(rp
, CurrentSenseBufferHighAddr
);
2954 MPT_2_HOST16(rp
, CurReplyFrameSize
);
2955 MPT_2_HOST32(rp
, FWImageSize
);
2956 MPT_2_HOST32(rp
, IOCCapabilities
);
2957 MPT_2_HOST32(rp
, FWVersion
.Word
);
2958 MPT_2_HOST16(rp
, HighPriorityQueueDepth
);
2959 MPT_2_HOST16(rp
, Reserved2
);
2960 mpt2host_sge_simple_union(&rp
->HostPageBufferSGE
);
2961 MPT_2_HOST32(rp
, ReplyFifoHostSignalingAddr
);
2965 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY
*pfp
)
2968 MPT_2_HOST16(pfp
, Reserved
);
2969 MPT_2_HOST16(pfp
, Reserved1
);
2970 MPT_2_HOST32(pfp
, MsgContext
);
2971 MPT_2_HOST16(pfp
, Reserved2
);
2972 MPT_2_HOST16(pfp
, IOCStatus
);
2973 MPT_2_HOST32(pfp
, IOCLogInfo
);
2974 MPT_2_HOST16(pfp
, MaxDevices
);
2975 MPT_2_HOST16(pfp
, PortSCSIID
);
2976 MPT_2_HOST16(pfp
, ProtocolFlags
);
2977 MPT_2_HOST16(pfp
, MaxPostedCmdBuffers
);
2978 MPT_2_HOST16(pfp
, MaxPersistentIDs
);
2979 MPT_2_HOST16(pfp
, MaxLanBuckets
);
2980 MPT_2_HOST16(pfp
, Reserved4
);
2981 MPT_2_HOST32(pfp
, Reserved5
);
2985 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2
*ioc2
)
2989 MPT_2_HOST32(ioc2
, CapabilitiesFlags
);
2990 for (i
= 0; i
< MPI_IOC_PAGE_2_RAID_VOLUME_MAX
; i
++) {
2991 MPT_2_HOST16(ioc2
, RaidVolume
[i
].Reserved3
);
2996 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3
*ioc3
)
2999 MPT_2_HOST16(ioc3
, Reserved2
);
3003 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0
*sp0
)
3006 MPT_2_HOST32(sp0
, Capabilities
);
3007 MPT_2_HOST32(sp0
, PhysicalInterface
);
3011 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1
*sp1
)
3014 MPT_2_HOST32(sp1
, Configuration
);
3015 MPT_2_HOST32(sp1
, OnBusTimerValue
);
3016 MPT_2_HOST16(sp1
, IDConfig
);
3020 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1
*sp1
)
3023 HOST_2_MPT32(sp1
, Configuration
);
3024 HOST_2_MPT32(sp1
, OnBusTimerValue
);
3025 HOST_2_MPT16(sp1
, IDConfig
);
3029 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2
*sp2
)
3033 MPT_2_HOST32(sp2
, PortFlags
);
3034 MPT_2_HOST32(sp2
, PortSettings
);
3035 for (i
= 0; i
< sizeof(sp2
->DeviceSettings
) /
3036 sizeof(*sp2
->DeviceSettings
); i
++) {
3037 MPT_2_HOST16(sp2
, DeviceSettings
[i
].DeviceFlags
);
3042 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0
*sd0
)
3045 MPT_2_HOST32(sd0
, NegotiatedParameters
);
3046 MPT_2_HOST32(sd0
, Information
);
3050 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1
*sd1
)
3053 MPT_2_HOST32(sd1
, RequestedParameters
);
3054 MPT_2_HOST32(sd1
, Reserved
);
3055 MPT_2_HOST32(sd1
, Configuration
);
3059 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1
*sd1
)
3062 HOST_2_MPT32(sd1
, RequestedParameters
);
3063 HOST_2_MPT32(sd1
, Reserved
);
3064 HOST_2_MPT32(sd1
, Configuration
);
3068 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0
*fp0
)
3071 MPT_2_HOST32(fp0
, Flags
);
3072 MPT_2_HOST32(fp0
, PortIdentifier
);
3073 MPT_2_HOST32(fp0
, WWNN
.Low
);
3074 MPT_2_HOST32(fp0
, WWNN
.High
);
3075 MPT_2_HOST32(fp0
, WWPN
.Low
);
3076 MPT_2_HOST32(fp0
, WWPN
.High
);
3077 MPT_2_HOST32(fp0
, SupportedServiceClass
);
3078 MPT_2_HOST32(fp0
, SupportedSpeeds
);
3079 MPT_2_HOST32(fp0
, CurrentSpeed
);
3080 MPT_2_HOST32(fp0
, MaxFrameSize
);
3081 MPT_2_HOST32(fp0
, FabricWWNN
.Low
);
3082 MPT_2_HOST32(fp0
, FabricWWNN
.High
);
3083 MPT_2_HOST32(fp0
, FabricWWPN
.Low
);
3084 MPT_2_HOST32(fp0
, FabricWWPN
.High
);
3085 MPT_2_HOST32(fp0
, DiscoveredPortsCount
);
3086 MPT_2_HOST32(fp0
, MaxInitiators
);
3090 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1
*fp1
)
3093 MPT_2_HOST32(fp1
, Flags
);
3094 MPT_2_HOST32(fp1
, NoSEEPROMWWNN
.Low
);
3095 MPT_2_HOST32(fp1
, NoSEEPROMWWNN
.High
);
3096 MPT_2_HOST32(fp1
, NoSEEPROMWWPN
.Low
);
3097 MPT_2_HOST32(fp1
, NoSEEPROMWWPN
.High
);
3101 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1
*fp1
)
3104 HOST_2_MPT32(fp1
, Flags
);
3105 HOST_2_MPT32(fp1
, NoSEEPROMWWNN
.Low
);
3106 HOST_2_MPT32(fp1
, NoSEEPROMWWNN
.High
);
3107 HOST_2_MPT32(fp1
, NoSEEPROMWWPN
.Low
);
3108 HOST_2_MPT32(fp1
, NoSEEPROMWWPN
.High
);
3112 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0
*volp
)
3116 MPT_2_HOST16(volp
, VolumeStatus
.Reserved
);
3117 MPT_2_HOST16(volp
, VolumeSettings
.Settings
);
3118 MPT_2_HOST32(volp
, MaxLBA
);
3119 MPT_2_HOST32(volp
, MaxLBAHigh
);
3120 MPT_2_HOST32(volp
, StripeSize
);
3121 MPT_2_HOST32(volp
, Reserved2
);
3122 MPT_2_HOST32(volp
, Reserved3
);
3123 for (i
= 0; i
< MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
; i
++) {
3124 MPT_2_HOST16(volp
, PhysDisk
[i
].Reserved
);
3129 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0
*rpd0
)
3132 MPT_2_HOST32(rpd0
, Reserved1
);
3133 MPT_2_HOST16(rpd0
, PhysDiskStatus
.Reserved
);
3134 MPT_2_HOST32(rpd0
, MaxLBA
);
3135 MPT_2_HOST16(rpd0
, ErrorData
.Reserved
);
3136 MPT_2_HOST16(rpd0
, ErrorData
.ErrorCount
);
3137 MPT_2_HOST16(rpd0
, ErrorData
.SmartCount
);
3141 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR
*vi
)
3144 MPT_2_HOST16(vi
, TotalBlocks
.High
);
3145 MPT_2_HOST16(vi
, TotalBlocks
.Low
);
3146 MPT_2_HOST16(vi
, BlocksRemaining
.High
);
3147 MPT_2_HOST16(vi
, BlocksRemaining
.Low
);