AMD64 - Fix format conversions and other warnings.
[dragonfly.git] / sys / dev / disk / mpt / mpt.c
blobadf171bfc20eadeb98e991b6c61ee001ea292acd
1 /*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 /*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
65 /*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
80 * redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 * $FreeBSD: src/sys/dev/mpt/mpt.c,v 1.49 2009/01/07 21:52:47 marius Exp $
99 #include <sys/cdefs.h>
101 #include <dev/disk/mpt/mpt.h>
102 #include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
103 #include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
105 #include <dev/disk/mpt/mpilib/mpi.h>
106 #include <dev/disk/mpt/mpilib/mpi_ioc.h>
107 #include <dev/disk/mpt/mpilib/mpi_fc.h>
108 #include <dev/disk/mpt/mpilib/mpi_targ.h>
110 #include <sys/sysctl.h>
112 #define MPT_MAX_TRYS 3
113 #define MPT_MAX_WAIT 300000
115 static int maxwait_ack = 0;
116 static int maxwait_int = 0;
117 static int maxwait_state = 0;
119 static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
122 static mpt_reply_handler_t mpt_default_reply_handler;
123 static mpt_reply_handler_t mpt_config_reply_handler;
124 static mpt_reply_handler_t mpt_handshake_reply_handler;
125 static mpt_reply_handler_t mpt_event_reply_handler;
126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129 static int mpt_soft_reset(struct mpt_softc *mpt);
130 static void mpt_hard_reset(struct mpt_softc *mpt);
131 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
132 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
134 /************************* Personality Module Support *************************/
136 * We include one extra entry that is guaranteed to be NULL
137 * to simplify our itterator.
139 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140 static __inline struct mpt_personality*
141 mpt_pers_find(struct mpt_softc *, u_int);
142 static __inline struct mpt_personality*
143 mpt_pers_find_reverse(struct mpt_softc *, u_int);
145 static __inline struct mpt_personality *
146 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
148 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149 ("mpt_pers_find: starting position out of range\n"));
151 while (start_at < MPT_MAX_PERSONALITIES
152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
153 start_at++;
155 return (mpt_personalities[start_at]);
159 * Used infrequently, so no need to optimize like a forward
160 * traversal where we use the MAX+1 is guaranteed to be NULL
161 * trick.
163 static __inline struct mpt_personality *
164 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
166 while (start_at < MPT_MAX_PERSONALITIES
167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
168 start_at--;
170 if (start_at < MPT_MAX_PERSONALITIES)
171 return (mpt_personalities[start_at]);
172 return (NULL);
175 #define MPT_PERS_FOREACH(mpt, pers) \
176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
177 pers != NULL; \
178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
180 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
182 pers != NULL; \
183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
185 static mpt_load_handler_t mpt_stdload;
186 static mpt_probe_handler_t mpt_stdprobe;
187 static mpt_attach_handler_t mpt_stdattach;
188 static mpt_enable_handler_t mpt_stdenable;
189 static mpt_ready_handler_t mpt_stdready;
190 static mpt_event_handler_t mpt_stdevent;
191 static mpt_reset_handler_t mpt_stdreset;
192 static mpt_shutdown_handler_t mpt_stdshutdown;
193 static mpt_detach_handler_t mpt_stddetach;
194 static mpt_unload_handler_t mpt_stdunload;
195 static struct mpt_personality mpt_default_personality =
197 .load = mpt_stdload,
198 .probe = mpt_stdprobe,
199 .attach = mpt_stdattach,
200 .enable = mpt_stdenable,
201 .ready = mpt_stdready,
202 .event = mpt_stdevent,
203 .reset = mpt_stdreset,
204 .shutdown = mpt_stdshutdown,
205 .detach = mpt_stddetach,
206 .unload = mpt_stdunload
209 static mpt_load_handler_t mpt_core_load;
210 static mpt_attach_handler_t mpt_core_attach;
211 static mpt_enable_handler_t mpt_core_enable;
212 static mpt_reset_handler_t mpt_core_ioc_reset;
213 static mpt_event_handler_t mpt_core_event;
214 static mpt_shutdown_handler_t mpt_core_shutdown;
215 static mpt_shutdown_handler_t mpt_core_detach;
216 static mpt_unload_handler_t mpt_core_unload;
217 static struct mpt_personality mpt_core_personality =
219 .name = "mpt_core",
220 .load = mpt_core_load,
221 // .attach = mpt_core_attach,
222 // .enable = mpt_core_enable,
223 .event = mpt_core_event,
224 .reset = mpt_core_ioc_reset,
225 .shutdown = mpt_core_shutdown,
226 .detach = mpt_core_detach,
227 .unload = mpt_core_unload,
231 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
232 * ordering information. We want the core to always register FIRST.
233 * other modules are set to SI_ORDER_SECOND.
235 static moduledata_t mpt_core_mod = {
236 "mpt_core", mpt_modevent, &mpt_core_personality
238 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
239 MODULE_VERSION(mpt_core, 1);
241 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
244 mpt_modevent(module_t mod, int type, void *data)
246 struct mpt_personality *pers;
247 int error;
249 pers = (struct mpt_personality *)data;
251 error = 0;
252 switch (type) {
253 case MOD_LOAD:
255 mpt_load_handler_t **def_handler;
256 mpt_load_handler_t **pers_handler;
257 int i;
259 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
260 if (mpt_personalities[i] == NULL)
261 break;
263 if (i >= MPT_MAX_PERSONALITIES) {
264 error = ENOMEM;
265 break;
267 pers->id = i;
268 mpt_personalities[i] = pers;
270 /* Install standard/noop handlers for any NULL entries. */
271 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
272 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
273 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
274 if (*pers_handler == NULL)
275 *pers_handler = *def_handler;
276 pers_handler++;
277 def_handler++;
280 error = (pers->load(pers));
281 if (error != 0)
282 mpt_personalities[i] = NULL;
283 break;
285 case MOD_SHUTDOWN:
286 break;
287 #if __FreeBSD_version >= 500000
288 case MOD_QUIESCE:
289 break;
290 #endif
291 case MOD_UNLOAD:
292 error = pers->unload(pers);
293 mpt_personalities[pers->id] = NULL;
294 break;
295 default:
296 error = EINVAL;
297 break;
299 return (error);
303 mpt_stdload(struct mpt_personality *pers)
305 /* Load is always successfull. */
306 return (0);
310 mpt_stdprobe(struct mpt_softc *mpt)
312 /* Probe is always successfull. */
313 return (0);
317 mpt_stdattach(struct mpt_softc *mpt)
319 /* Attach is always successfull. */
320 return (0);
324 mpt_stdenable(struct mpt_softc *mpt)
326 /* Enable is always successfull. */
327 return (0);
330 void
331 mpt_stdready(struct mpt_softc *mpt)
337 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
339 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%lx\n",
340 msg->Event & 0xFF);
341 /* Event was not for us. */
342 return (0);
345 void
346 mpt_stdreset(struct mpt_softc *mpt, int type)
350 void
351 mpt_stdshutdown(struct mpt_softc *mpt)
355 void
356 mpt_stddetach(struct mpt_softc *mpt)
361 mpt_stdunload(struct mpt_personality *pers)
363 /* Unload is always successfull. */
364 return (0);
368 * Post driver attachment, we may want to perform some global actions.
369 * Here is the hook to do so.
372 static void
373 mpt_postattach(void *unused)
375 struct mpt_softc *mpt;
376 struct mpt_personality *pers;
378 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
379 MPT_PERS_FOREACH(mpt, pers)
380 pers->ready(mpt);
383 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
386 /******************************* Bus DMA Support ******************************/
387 void
388 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
390 struct mpt_map_info *map_info;
392 map_info = (struct mpt_map_info *)arg;
393 map_info->error = error;
394 map_info->phys = segs->ds_addr;
397 /**************************** Reply/Event Handling ****************************/
399 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
400 mpt_handler_t handler, uint32_t *phandler_id)
403 switch (type) {
404 case MPT_HANDLER_REPLY:
406 u_int cbi;
407 u_int free_cbi;
409 if (phandler_id == NULL)
410 return (EINVAL);
412 free_cbi = MPT_HANDLER_ID_NONE;
413 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
415 * If the same handler is registered multiple
416 * times, don't error out. Just return the
417 * index of the original registration.
419 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
420 *phandler_id = MPT_CBI_TO_HID(cbi);
421 return (0);
425 * Fill from the front in the hope that
426 * all registered handlers consume only a
427 * single cache line.
429 * We don't break on the first empty slot so
430 * that the full table is checked to see if
431 * this handler was previously registered.
433 if (free_cbi == MPT_HANDLER_ID_NONE &&
434 (mpt_reply_handlers[cbi]
435 == mpt_default_reply_handler))
436 free_cbi = cbi;
438 if (free_cbi == MPT_HANDLER_ID_NONE) {
439 return (ENOMEM);
441 mpt_reply_handlers[free_cbi] = handler.reply_handler;
442 *phandler_id = MPT_CBI_TO_HID(free_cbi);
443 break;
445 default:
446 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
447 return (EINVAL);
449 return (0);
453 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
454 mpt_handler_t handler, uint32_t handler_id)
457 switch (type) {
458 case MPT_HANDLER_REPLY:
460 u_int cbi;
462 cbi = MPT_CBI(handler_id);
463 if (cbi >= MPT_NUM_REPLY_HANDLERS
464 || mpt_reply_handlers[cbi] != handler.reply_handler)
465 return (ENOENT);
466 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
467 break;
469 default:
470 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
471 return (EINVAL);
473 return (0);
476 static int
477 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
478 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
480 mpt_prt(mpt,
481 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
482 req, req->serno, reply_desc, reply_frame);
484 if (reply_frame != NULL)
485 mpt_dump_reply_frame(mpt, reply_frame);
487 mpt_prt(mpt, "Reply Frame Ignored\n");
489 return (/*free_reply*/TRUE);
492 static int
493 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
494 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
496 if (req != NULL) {
498 if (reply_frame != NULL) {
499 MSG_CONFIG *cfgp;
500 MSG_CONFIG_REPLY *reply;
502 cfgp = (MSG_CONFIG *)req->req_vbuf;
503 reply = (MSG_CONFIG_REPLY *)reply_frame;
504 req->IOCStatus = le16toh(reply_frame->IOCStatus);
505 bcopy(&reply->Header, &cfgp->Header,
506 sizeof(cfgp->Header));
507 cfgp->ExtPageLength = reply->ExtPageLength;
508 cfgp->ExtPageType = reply->ExtPageType;
510 req->state &= ~REQ_STATE_QUEUED;
511 req->state |= REQ_STATE_DONE;
512 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
513 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
514 wakeup(req);
515 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
517 * Whew- we can free this request (late completion)
519 mpt_free_request(mpt, req);
523 return (TRUE);
526 static int
527 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
528 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
530 /* Nothing to be done. */
531 return (TRUE);
534 static int
535 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
536 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
538 int free_reply;
540 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
541 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
543 free_reply = TRUE;
544 switch (reply_frame->Function) {
545 case MPI_FUNCTION_EVENT_NOTIFICATION:
547 MSG_EVENT_NOTIFY_REPLY *msg;
548 struct mpt_personality *pers;
549 u_int handled;
551 handled = 0;
552 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
553 msg->EventDataLength = le16toh(msg->EventDataLength);
554 msg->IOCStatus = le16toh(msg->IOCStatus);
555 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
556 msg->Event = le32toh(msg->Event);
557 MPT_PERS_FOREACH(mpt, pers)
558 handled += pers->event(mpt, req, msg);
560 if (handled == 0 && mpt->mpt_pers_mask == 0) {
561 mpt_lprt(mpt, MPT_PRT_INFO,
562 "No Handlers For Any Event Notify Frames. "
563 "Event %#x (ACK %sequired).\n",
564 (unsigned)msg->Event,
565 msg->AckRequired? "r" : "not r");
566 } else if (handled == 0) {
567 mpt_lprt(mpt,
568 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
569 "Unhandled Event Notify Frame. Event %#x "
570 "(ACK %sequired).\n",
571 (unsigned)msg->Event,
572 msg->AckRequired? "r" : "not r");
575 if (msg->AckRequired) {
576 request_t *ack_req;
577 uint32_t context;
579 context = req->index | MPT_REPLY_HANDLER_EVENTS;
580 ack_req = mpt_get_request(mpt, FALSE);
581 if (ack_req == NULL) {
582 struct mpt_evtf_record *evtf;
584 evtf = (struct mpt_evtf_record *)reply_frame;
585 evtf->context = context;
586 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
587 free_reply = FALSE;
588 break;
590 mpt_send_event_ack(mpt, ack_req, msg, context);
592 * Don't check for CONTINUATION_REPLY here
594 return (free_reply);
596 break;
598 case MPI_FUNCTION_PORT_ENABLE:
599 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
600 break;
601 case MPI_FUNCTION_EVENT_ACK:
602 break;
603 default:
604 mpt_prt(mpt, "unknown event function: %x\n",
605 reply_frame->Function);
606 break;
610 * I'm not sure that this continuation stuff works as it should.
612 * I've had FC async events occur that free the frame up because
613 * the continuation bit isn't set, and then additional async events
614 * then occur using the same context. As you might imagine, this
615 * leads to Very Bad Thing.
617 * Let's just be safe for now and not free them up until we figure
618 * out what's actually happening here.
620 #if 0
621 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
622 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
623 mpt_free_request(mpt, req);
624 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
625 reply_frame->Function, req, req->serno);
626 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
627 MSG_EVENT_NOTIFY_REPLY *msg =
628 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
629 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
630 msg->Event, msg->AckRequired);
632 } else {
633 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
634 reply_frame->Function, req, req->serno);
635 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
636 MSG_EVENT_NOTIFY_REPLY *msg =
637 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
638 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
639 msg->Event, msg->AckRequired);
641 mpt_prtc(mpt, "\n");
643 #endif
644 return (free_reply);
648 * Process an asynchronous event from the IOC.
650 static int
651 mpt_core_event(struct mpt_softc *mpt, request_t *req,
652 MSG_EVENT_NOTIFY_REPLY *msg)
654 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
655 (unsigned)(msg->Event & 0xFF));
656 switch(msg->Event & 0xFF) {
657 case MPI_EVENT_NONE:
658 break;
659 case MPI_EVENT_LOG_DATA:
661 int i;
663 /* Some error occured that LSI wants logged */
664 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
665 (unsigned)msg->IOCLogInfo);
666 mpt_prt(mpt, "\tEvtLogData: Event Data:");
667 for (i = 0; i < msg->EventDataLength; i++)
668 mpt_prtc(mpt, " %08x", (unsigned)msg->Data[i]);
669 mpt_prtc(mpt, "\n");
670 break;
672 case MPI_EVENT_EVENT_CHANGE:
674 * This is just an acknowledgement
675 * of our mpt_send_event_request.
677 break;
678 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
679 break;
680 default:
681 return (0);
682 break;
684 return (1);
687 static void
688 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
689 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
691 MSG_EVENT_ACK *ackp;
693 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
694 memset(ackp, 0, sizeof (*ackp));
695 ackp->Function = MPI_FUNCTION_EVENT_ACK;
696 ackp->Event = htole32(msg->Event);
697 ackp->EventContext = htole32(msg->EventContext);
698 ackp->MsgContext = htole32(context);
699 mpt_check_doorbell(mpt);
700 mpt_send_cmd(mpt, ack_req);
703 /***************************** Interrupt Handling *****************************/
704 void
705 mpt_intr(void *arg)
707 struct mpt_softc *mpt;
708 uint32_t reply_desc;
709 int ntrips = 0;
711 mpt = (struct mpt_softc *)arg;
712 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
713 MPT_LOCK_ASSERT(mpt);
715 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
716 request_t *req;
717 MSG_DEFAULT_REPLY *reply_frame;
718 uint32_t reply_baddr;
719 uint32_t ctxt_idx;
720 u_int cb_index;
721 u_int req_index;
722 int free_rf;
724 req = NULL;
725 reply_frame = NULL;
726 reply_baddr = 0;
727 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
728 u_int offset;
730 * Insure that the reply frame is coherent.
732 reply_baddr = MPT_REPLY_BADDR(reply_desc);
733 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
734 bus_dmamap_sync_range(mpt->reply_dmat,
735 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
736 BUS_DMASYNC_POSTREAD);
737 reply_frame = MPT_REPLY_OTOV(mpt, offset);
738 ctxt_idx = le32toh(reply_frame->MsgContext);
739 } else {
740 uint32_t type;
742 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
743 ctxt_idx = reply_desc;
744 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
745 reply_desc);
747 switch (type) {
748 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
749 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
750 break;
751 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
752 ctxt_idx = GET_IO_INDEX(reply_desc);
753 if (mpt->tgt_cmd_ptrs == NULL) {
754 mpt_prt(mpt,
755 "mpt_intr: no target cmd ptrs\n");
756 reply_desc = MPT_REPLY_EMPTY;
757 break;
759 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
760 mpt_prt(mpt,
761 "mpt_intr: bad tgt cmd ctxt %u\n",
762 ctxt_idx);
763 reply_desc = MPT_REPLY_EMPTY;
764 ntrips = 1000;
765 break;
767 req = mpt->tgt_cmd_ptrs[ctxt_idx];
768 if (req == NULL) {
769 mpt_prt(mpt, "no request backpointer "
770 "at index %u", ctxt_idx);
771 reply_desc = MPT_REPLY_EMPTY;
772 ntrips = 1000;
773 break;
776 * Reformulate ctxt_idx to be just as if
777 * it were another type of context reply
778 * so the code below will find the request
779 * via indexing into the pool.
781 ctxt_idx =
782 req->index | mpt->scsi_tgt_handler_id;
783 req = NULL;
784 break;
785 case MPI_CONTEXT_REPLY_TYPE_LAN:
786 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
787 reply_desc);
788 reply_desc = MPT_REPLY_EMPTY;
789 break;
790 default:
791 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
792 reply_desc = MPT_REPLY_EMPTY;
793 break;
795 if (reply_desc == MPT_REPLY_EMPTY) {
796 if (ntrips++ > 1000) {
797 break;
799 continue;
803 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
804 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
805 if (req_index < MPT_MAX_REQUESTS(mpt)) {
806 req = &mpt->request_pool[req_index];
807 } else {
808 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
809 " 0x%x)\n", req_index, reply_desc);
812 free_rf = mpt_reply_handlers[cb_index](mpt, req,
813 reply_desc, reply_frame);
815 if (reply_frame != NULL && free_rf) {
816 mpt_free_reply(mpt, reply_baddr);
820 * If we got ourselves disabled, don't get stuck in a loop
822 if (mpt->disabled) {
823 mpt_disable_ints(mpt);
824 break;
826 if (ntrips++ > 1000) {
827 break;
830 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
833 /******************************* Error Recovery *******************************/
834 void
835 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
836 u_int iocstatus)
838 MSG_DEFAULT_REPLY ioc_status_frame;
839 request_t *req;
841 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
842 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
843 ioc_status_frame.IOCStatus = iocstatus;
844 while((req = TAILQ_FIRST(chain)) != NULL) {
845 MSG_REQUEST_HEADER *msg_hdr;
846 u_int cb_index;
848 TAILQ_REMOVE(chain, req, links);
849 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
850 ioc_status_frame.Function = msg_hdr->Function;
851 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
852 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
853 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
854 &ioc_status_frame);
858 /********************************* Diagnostics ********************************/
860 * Perform a diagnostic dump of a reply frame.
862 void
863 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
865 mpt_prt(mpt, "Address Reply:\n");
866 mpt_print_reply(reply_frame);
869 /******************************* Doorbell Access ******************************/
870 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
871 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
873 static __inline uint32_t
874 mpt_rd_db(struct mpt_softc *mpt)
876 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
879 static __inline uint32_t
880 mpt_rd_intr(struct mpt_softc *mpt)
882 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
885 /* Busy wait for a door bell to be read by IOC */
886 static int
887 mpt_wait_db_ack(struct mpt_softc *mpt)
889 int i;
890 for (i=0; i < MPT_MAX_WAIT; i++) {
891 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
892 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
893 return (MPT_OK);
895 DELAY(200);
897 return (MPT_FAIL);
900 /* Busy wait for a door bell interrupt */
901 static int
902 mpt_wait_db_int(struct mpt_softc *mpt)
904 int i;
905 for (i = 0; i < MPT_MAX_WAIT; i++) {
906 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
907 maxwait_int = i > maxwait_int ? i : maxwait_int;
908 return MPT_OK;
910 DELAY(100);
912 return (MPT_FAIL);
915 /* Wait for IOC to transition to a give state */
916 void
917 mpt_check_doorbell(struct mpt_softc *mpt)
919 uint32_t db = mpt_rd_db(mpt);
920 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
921 mpt_prt(mpt, "Device not running\n");
922 mpt_print_db(db);
926 /* Wait for IOC to transition to a give state */
927 static int
928 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
930 int i;
932 for (i = 0; i < MPT_MAX_WAIT; i++) {
933 uint32_t db = mpt_rd_db(mpt);
934 if (MPT_STATE(db) == state) {
935 maxwait_state = i > maxwait_state ? i : maxwait_state;
936 return (MPT_OK);
938 DELAY(100);
940 return (MPT_FAIL);
944 /************************* Intialization/Configuration ************************/
945 static int mpt_download_fw(struct mpt_softc *mpt);
947 /* Issue the reset COMMAND to the IOC */
948 static int
949 mpt_soft_reset(struct mpt_softc *mpt)
951 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
953 /* Have to use hard reset if we are not in Running state */
954 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
955 mpt_prt(mpt, "soft reset failed: device not running\n");
956 return (MPT_FAIL);
959 /* If door bell is in use we don't have a chance of getting
960 * a word in since the IOC probably crashed in message
961 * processing. So don't waste our time.
963 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
964 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
965 return (MPT_FAIL);
968 /* Send the reset request to the IOC */
969 mpt_write(mpt, MPT_OFFSET_DOORBELL,
970 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
971 if (mpt_wait_db_ack(mpt) != MPT_OK) {
972 mpt_prt(mpt, "soft reset failed: ack timeout\n");
973 return (MPT_FAIL);
976 /* Wait for the IOC to reload and come out of reset state */
977 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
978 mpt_prt(mpt, "soft reset failed: device did not restart\n");
979 return (MPT_FAIL);
982 return MPT_OK;
985 static int
986 mpt_enable_diag_mode(struct mpt_softc *mpt)
988 int try;
990 try = 20;
991 while (--try) {
993 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
994 break;
996 /* Enable diagnostic registers */
997 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
998 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
999 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
1000 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1001 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1002 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1004 DELAY(100000);
1006 if (try == 0)
1007 return (EIO);
1008 return (0);
1011 static void
1012 mpt_disable_diag_mode(struct mpt_softc *mpt)
1014 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1017 /* This is a magic diagnostic reset that resets all the ARM
1018 * processors in the chip.
1020 static void
1021 mpt_hard_reset(struct mpt_softc *mpt)
1023 int error;
1024 int wait;
1025 uint32_t diagreg;
1027 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1029 error = mpt_enable_diag_mode(mpt);
1030 if (error) {
1031 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1032 mpt_prt(mpt, "Trying to reset anyway.\n");
1035 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1038 * This appears to be a workaround required for some
1039 * firmware or hardware revs.
1041 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1042 DELAY(1000);
1044 /* Diag. port is now active so we can now hit the reset bit */
1045 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1048 * Ensure that the reset has finished. We delay 1ms
1049 * prior to reading the register to make sure the chip
1050 * has sufficiently completed its reset to handle register
1051 * accesses.
1053 wait = 5000;
1054 do {
1055 DELAY(1000);
1056 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1057 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1059 if (wait == 0) {
1060 mpt_prt(mpt, "WARNING - Failed hard reset! "
1061 "Trying to initialize anyway.\n");
1065 * If we have firmware to download, it must be loaded before
1066 * the controller will become operational. Do so now.
1068 if (mpt->fw_image != NULL) {
1070 error = mpt_download_fw(mpt);
1072 if (error) {
1073 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1074 mpt_prt(mpt, "Trying to initialize anyway.\n");
1079 * Reseting the controller should have disabled write
1080 * access to the diagnostic registers, but disable
1081 * manually to be sure.
1083 mpt_disable_diag_mode(mpt);
1086 static void
1087 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1090 * Complete all pending requests with a status
1091 * appropriate for an IOC reset.
1093 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1094 MPI_IOCSTATUS_INVALID_STATE);
1099 * Reset the IOC when needed. Try software command first then if needed
1100 * poke at the magic diagnostic reset. Note that a hard reset resets
1101 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1102 * fouls up the PCI configuration registers.
1105 mpt_reset(struct mpt_softc *mpt, int reinit)
1107 struct mpt_personality *pers;
1108 int ret;
1109 int retry_cnt = 0;
1112 * Try a soft reset. If that fails, get out the big hammer.
1114 again:
1115 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1116 int cnt;
1117 for (cnt = 0; cnt < 5; cnt++) {
1118 /* Failed; do a hard reset */
1119 mpt_hard_reset(mpt);
1122 * Wait for the IOC to reload
1123 * and come out of reset state
1125 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1126 if (ret == MPT_OK) {
1127 break;
1130 * Okay- try to check again...
1132 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1133 if (ret == MPT_OK) {
1134 break;
1136 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1137 retry_cnt, cnt);
1141 if (retry_cnt == 0) {
1143 * Invoke reset handlers. We bump the reset count so
1144 * that mpt_wait_req() understands that regardless of
1145 * the specified wait condition, it should stop its wait.
1147 mpt->reset_cnt++;
1148 MPT_PERS_FOREACH(mpt, pers)
1149 pers->reset(mpt, ret);
1152 if (reinit) {
1153 ret = mpt_enable_ioc(mpt, 1);
1154 if (ret == MPT_OK) {
1155 mpt_enable_ints(mpt);
1158 if (ret != MPT_OK && retry_cnt++ < 2) {
1159 goto again;
1161 return ret;
1164 /* Return a command buffer to the free queue */
1165 void
1166 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1168 request_t *nxt;
1169 struct mpt_evtf_record *record;
1170 uint32_t reply_baddr;
1172 if (req == NULL || req != &mpt->request_pool[req->index]) {
1173 panic("mpt_free_request bad req ptr\n");
1174 return;
1176 if ((nxt = req->chain) != NULL) {
1177 req->chain = NULL;
1178 mpt_free_request(mpt, nxt); /* NB: recursion */
1180 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1181 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1182 MPT_LOCK_ASSERT(mpt);
1183 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1184 ("mpt_free_request: req %p:%u func %x already on freelist",
1185 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1186 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1187 ("mpt_free_request: req %p:%u func %x on pending list",
1188 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1189 #ifdef INVARIANTS
1190 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1191 #endif
1193 req->ccb = NULL;
1194 if (LIST_EMPTY(&mpt->ack_frames)) {
1196 * Insert free ones at the tail
1198 req->serno = 0;
1199 req->state = REQ_STATE_FREE;
1200 #ifdef INVARIANTS
1201 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1202 #endif
1203 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1204 if (mpt->getreqwaiter != 0) {
1205 mpt->getreqwaiter = 0;
1206 wakeup(&mpt->request_free_list);
1208 return;
1212 * Process an ack frame deferred due to resource shortage.
1214 record = LIST_FIRST(&mpt->ack_frames);
1215 LIST_REMOVE(record, links);
1216 req->state = REQ_STATE_ALLOCATED;
1217 mpt_assign_serno(mpt, req);
1218 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1219 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1220 + (mpt->reply_phys & 0xFFFFFFFF);
1221 mpt_free_reply(mpt, reply_baddr);
1224 /* Get a command buffer from the free queue */
1225 request_t *
1226 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1228 request_t *req;
1230 retry:
1231 MPT_LOCK_ASSERT(mpt);
1232 req = TAILQ_FIRST(&mpt->request_free_list);
1233 if (req != NULL) {
1234 KASSERT(req == &mpt->request_pool[req->index],
1235 ("mpt_get_request: corrupted request free list\n"));
1236 KASSERT(req->state == REQ_STATE_FREE,
1237 ("req %p:%u not free on free list %x index %d function %x",
1238 req, req->serno, req->state, req->index,
1239 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1240 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1241 req->state = REQ_STATE_ALLOCATED;
1242 req->chain = NULL;
1243 mpt_assign_serno(mpt, req);
1244 mpt_callout_init(&req->callout);
1245 } else if (sleep_ok != 0) {
1246 mpt->getreqwaiter = 1;
1247 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1248 goto retry;
1250 return (req);
1253 /* Pass the command to the IOC */
1254 void
1255 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1257 if (mpt->verbose > MPT_PRT_DEBUG2) {
1258 mpt_dump_request(mpt, req);
1260 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1261 BUS_DMASYNC_PREWRITE);
1262 req->state |= REQ_STATE_QUEUED;
1263 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1264 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1265 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1266 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1267 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1268 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1269 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1270 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1274 * Wait for a request to complete.
1276 * Inputs:
1277 * mpt softc of controller executing request
1278 * req request to wait for
1279 * sleep_ok nonzero implies may sleep in this context
1280 * time_ms timeout in ms. 0 implies no timeout.
1282 * Return Values:
1283 * 0 Request completed
1284 * non-0 Timeout fired before request completion.
1287 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1288 mpt_req_state_t state, mpt_req_state_t mask,
1289 int sleep_ok, int time_ms)
1291 int error;
1292 int timeout;
1293 u_int saved_cnt;
1296 * timeout is in ms. 0 indicates infinite wait.
1297 * Convert to ticks or 500us units depending on
1298 * our sleep mode.
1300 if (sleep_ok != 0) {
1301 timeout = (time_ms * hz) / 1000;
1302 } else {
1303 timeout = time_ms * 2;
1305 req->state |= REQ_STATE_NEED_WAKEUP;
1306 mask &= ~REQ_STATE_NEED_WAKEUP;
1307 saved_cnt = mpt->reset_cnt;
1308 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1309 if (sleep_ok != 0) {
1310 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1311 if (error == EWOULDBLOCK) {
1312 timeout = 0;
1313 break;
1315 } else {
1316 if (time_ms != 0 && --timeout == 0) {
1317 break;
1319 DELAY(500);
1320 mpt_intr(mpt);
1323 req->state &= ~REQ_STATE_NEED_WAKEUP;
1324 if (mpt->reset_cnt != saved_cnt) {
1325 return (EIO);
1327 if (time_ms && timeout <= 0) {
1328 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1329 req->state |= REQ_STATE_TIMEDOUT;
1330 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1331 return (ETIMEDOUT);
1333 return (0);
1337 * Send a command to the IOC via the handshake register.
1339 * Only done at initialization time and for certain unusual
1340 * commands such as device/bus reset as specified by LSI.
1343 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1345 int i;
1346 uint32_t data, *data32;
1348 /* Check condition of the IOC */
1349 data = mpt_rd_db(mpt);
1350 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1351 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1352 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1353 || MPT_DB_IS_IN_USE(data)) {
1354 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1355 mpt_print_db(data);
1356 return (EBUSY);
1359 /* We move things in 32 bit chunks */
1360 len = (len + 3) >> 2;
1361 data32 = cmd;
1363 /* Clear any left over pending doorbell interrupts */
1364 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1365 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1368 * Tell the handshake reg. we are going to send a command
1369 * and how long it is going to be.
1371 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1372 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1373 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1375 /* Wait for the chip to notice */
1376 if (mpt_wait_db_int(mpt) != MPT_OK) {
1377 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1378 return (ETIMEDOUT);
1381 /* Clear the interrupt */
1382 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1384 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1385 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1386 return (ETIMEDOUT);
1389 /* Send the command */
1390 for (i = 0; i < len; i++) {
1391 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1392 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1393 mpt_prt(mpt,
1394 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1395 return (ETIMEDOUT);
1398 return MPT_OK;
1401 /* Get the response from the handshake register */
1403 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1405 int left, reply_left;
1406 u_int16_t *data16;
1407 uint32_t data;
1408 MSG_DEFAULT_REPLY *hdr;
1410 /* We move things out in 16 bit chunks */
1411 reply_len >>= 1;
1412 data16 = (u_int16_t *)reply;
1414 hdr = (MSG_DEFAULT_REPLY *)reply;
1416 /* Get first word */
1417 if (mpt_wait_db_int(mpt) != MPT_OK) {
1418 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1419 return ETIMEDOUT;
1421 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1422 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1423 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1425 /* Get Second Word */
1426 if (mpt_wait_db_int(mpt) != MPT_OK) {
1427 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1428 return ETIMEDOUT;
1430 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1431 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1432 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1435 * With the second word, we can now look at the length.
1436 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1438 if ((reply_len >> 1) != hdr->MsgLength &&
1439 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1440 #if __FreeBSD_version >= 500000
1441 mpt_prt(mpt, "reply length does not match message length: "
1442 "got %x; expected %zx for function %x\n",
1443 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1444 #else
1445 mpt_prt(mpt, "reply length does not match message length: "
1446 "got %x; expected %zx for function %x\n",
1447 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1448 #endif
1451 /* Get rest of the reply; but don't overflow the provided buffer */
1452 left = (hdr->MsgLength << 1) - 2;
1453 reply_left = reply_len - 2;
1454 while (left--) {
1455 u_int16_t datum;
1457 if (mpt_wait_db_int(mpt) != MPT_OK) {
1458 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1459 return ETIMEDOUT;
1461 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1462 datum = le16toh(data & MPT_DB_DATA_MASK);
1464 if (reply_left-- > 0)
1465 *data16++ = datum;
1467 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1470 /* One more wait & clear at the end */
1471 if (mpt_wait_db_int(mpt) != MPT_OK) {
1472 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1473 return ETIMEDOUT;
1475 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1477 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1478 if (mpt->verbose >= MPT_PRT_TRACE)
1479 mpt_print_reply(hdr);
1480 return (MPT_FAIL | hdr->IOCStatus);
1483 return (0);
1486 static int
1487 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1489 MSG_IOC_FACTS f_req;
1490 int error;
1492 memset(&f_req, 0, sizeof f_req);
1493 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1494 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1495 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1496 if (error) {
1497 return(error);
1499 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1500 return (error);
1503 static int
1504 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1506 MSG_PORT_FACTS f_req;
1507 int error;
1509 memset(&f_req, 0, sizeof f_req);
1510 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1511 f_req.PortNumber = port;
1512 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1513 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1514 if (error) {
1515 return(error);
1517 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1518 return (error);
1522 * Send the initialization request. This is where we specify how many
1523 * SCSI busses and how many devices per bus we wish to emulate.
1524 * This is also the command that specifies the max size of the reply
1525 * frames from the IOC that we will be allocating.
1527 static int
1528 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1530 int error = 0;
1531 MSG_IOC_INIT init;
1532 MSG_IOC_INIT_REPLY reply;
1534 memset(&init, 0, sizeof init);
1535 init.WhoInit = who;
1536 init.Function = MPI_FUNCTION_IOC_INIT;
1537 init.MaxDevices = 0; /* at least 256 devices per bus */
1538 init.MaxBuses = 16; /* at least 16 busses */
1540 init.MsgVersion = htole16(MPI_VERSION);
1541 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1542 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1543 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1545 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1546 return(error);
1549 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1550 return (error);
1555 * Utiltity routine to read configuration headers and pages
1558 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1559 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1561 MSG_CONFIG *cfgp;
1562 SGE_SIMPLE32 *se;
1564 cfgp = req->req_vbuf;
1565 memset(cfgp, 0, sizeof *cfgp);
1566 cfgp->Action = params->Action;
1567 cfgp->Function = MPI_FUNCTION_CONFIG;
1568 cfgp->Header.PageVersion = params->PageVersion;
1569 cfgp->Header.PageNumber = params->PageNumber;
1570 cfgp->PageAddress = htole32(params->PageAddress);
1571 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1572 MPI_CONFIG_PAGETYPE_EXTENDED) {
1573 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1574 cfgp->Header.PageLength = 0;
1575 cfgp->ExtPageLength = htole16(params->ExtPageLength);
1576 cfgp->ExtPageType = params->ExtPageType;
1577 } else {
1578 cfgp->Header.PageType = params->PageType;
1579 cfgp->Header.PageLength = params->PageLength;
1581 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1582 se->Address = htole32(addr);
1583 MPI_pSGE_SET_LENGTH(se, len);
1584 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1585 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1586 MPI_SGE_FLAGS_END_OF_LIST |
1587 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1588 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1589 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1590 se->FlagsLength = htole32(se->FlagsLength);
1591 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1593 mpt_check_doorbell(mpt);
1594 mpt_send_cmd(mpt, req);
1595 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1596 sleep_ok, timeout_ms));
1600 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1601 uint32_t PageAddress, int ExtPageType,
1602 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1603 int sleep_ok, int timeout_ms)
1605 request_t *req;
1606 cfgparms_t params;
1607 MSG_CONFIG_REPLY *cfgp;
1608 int error;
1610 req = mpt_get_request(mpt, sleep_ok);
1611 if (req == NULL) {
1612 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1613 return (ENOMEM);
1616 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1617 params.PageVersion = PageVersion;
1618 params.PageLength = 0;
1619 params.PageNumber = PageNumber;
1620 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1621 params.PageAddress = PageAddress;
1622 params.ExtPageType = ExtPageType;
1623 params.ExtPageLength = 0;
1624 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1625 sleep_ok, timeout_ms);
1626 if (error != 0) {
1628 * Leave the request. Without resetting the chip, it's
1629 * still owned by it and we'll just get into trouble
1630 * freeing it now. Mark it as abandoned so that if it
1631 * shows up later it can be freed.
1633 mpt_prt(mpt, "read_extcfg_header timed out\n");
1634 return (ETIMEDOUT);
1637 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1638 case MPI_IOCSTATUS_SUCCESS:
1639 cfgp = req->req_vbuf;
1640 rslt->PageVersion = cfgp->Header.PageVersion;
1641 rslt->PageNumber = cfgp->Header.PageNumber;
1642 rslt->PageType = cfgp->Header.PageType;
1643 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
1644 rslt->ExtPageType = cfgp->ExtPageType;
1645 error = 0;
1646 break;
1647 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1648 mpt_lprt(mpt, MPT_PRT_DEBUG,
1649 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1650 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1651 error = EINVAL;
1652 break;
1653 default:
1654 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1655 req->IOCStatus);
1656 error = EIO;
1657 break;
1659 mpt_free_request(mpt, req);
1660 return (error);
1664 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1665 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1666 int sleep_ok, int timeout_ms)
1668 request_t *req;
1669 cfgparms_t params;
1670 int error;
1672 req = mpt_get_request(mpt, sleep_ok);
1673 if (req == NULL) {
1674 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1675 return (-1);
1678 params.Action = Action;
1679 params.PageVersion = hdr->PageVersion;
1680 params.PageLength = 0;
1681 params.PageNumber = hdr->PageNumber;
1682 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1683 params.PageAddress = PageAddress;
1684 params.ExtPageType = hdr->ExtPageType;
1685 params.ExtPageLength = hdr->ExtPageLength;
1686 error = mpt_issue_cfg_req(mpt, req, &params,
1687 req->req_pbuf + MPT_RQSL(mpt),
1688 len, sleep_ok, timeout_ms);
1689 if (error != 0) {
1690 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1691 return (-1);
1694 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1695 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1696 req->IOCStatus);
1697 mpt_free_request(mpt, req);
1698 return (-1);
1700 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1701 BUS_DMASYNC_POSTREAD);
1702 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1703 mpt_free_request(mpt, req);
1704 return (0);
1708 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1709 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1710 int sleep_ok, int timeout_ms)
1712 request_t *req;
1713 cfgparms_t params;
1714 MSG_CONFIG *cfgp;
1715 int error;
1717 req = mpt_get_request(mpt, sleep_ok);
1718 if (req == NULL) {
1719 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1720 return (ENOMEM);
1723 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1724 params.PageVersion = 0;
1725 params.PageLength = 0;
1726 params.PageNumber = PageNumber;
1727 params.PageType = PageType;
1728 params.PageAddress = PageAddress;
1729 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1730 sleep_ok, timeout_ms);
1731 if (error != 0) {
1733 * Leave the request. Without resetting the chip, it's
1734 * still owned by it and we'll just get into trouble
1735 * freeing it now. Mark it as abandoned so that if it
1736 * shows up later it can be freed.
1738 mpt_prt(mpt, "read_cfg_header timed out\n");
1739 return (ETIMEDOUT);
1742 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1743 case MPI_IOCSTATUS_SUCCESS:
1744 cfgp = req->req_vbuf;
1745 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1746 error = 0;
1747 break;
1748 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1749 mpt_lprt(mpt, MPT_PRT_DEBUG,
1750 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1751 PageType, PageNumber, PageAddress);
1752 error = EINVAL;
1753 break;
1754 default:
1755 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1756 req->IOCStatus);
1757 error = EIO;
1758 break;
1760 mpt_free_request(mpt, req);
1761 return (error);
1765 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1766 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1767 int timeout_ms)
1769 request_t *req;
1770 cfgparms_t params;
1771 int error;
1773 req = mpt_get_request(mpt, sleep_ok);
1774 if (req == NULL) {
1775 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1776 return (-1);
1779 params.Action = Action;
1780 params.PageVersion = hdr->PageVersion;
1781 params.PageLength = hdr->PageLength;
1782 params.PageNumber = hdr->PageNumber;
1783 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1784 params.PageAddress = PageAddress;
1785 error = mpt_issue_cfg_req(mpt, req, &params,
1786 req->req_pbuf + MPT_RQSL(mpt),
1787 len, sleep_ok, timeout_ms);
1788 if (error != 0) {
1789 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1790 return (-1);
1793 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1794 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1795 req->IOCStatus);
1796 mpt_free_request(mpt, req);
1797 return (-1);
1799 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1800 BUS_DMASYNC_POSTREAD);
1801 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1802 mpt_free_request(mpt, req);
1803 return (0);
1807 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1808 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1809 int timeout_ms)
1811 request_t *req;
1812 cfgparms_t params;
1813 u_int hdr_attr;
1814 int error;
1816 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1817 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1818 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1819 mpt_prt(mpt, "page type 0x%x not changeable\n",
1820 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1821 return (-1);
1824 #if 0
1826 * We shouldn't mask off other bits here.
1828 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1829 #endif
1831 req = mpt_get_request(mpt, sleep_ok);
1832 if (req == NULL)
1833 return (-1);
1835 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1838 * There isn't any point in restoring stripped out attributes
1839 * if you then mask them going down to issue the request.
1842 params.Action = Action;
1843 params.PageVersion = hdr->PageVersion;
1844 params.PageLength = hdr->PageLength;
1845 params.PageNumber = hdr->PageNumber;
1846 params.PageAddress = PageAddress;
1847 #if 0
1848 /* Restore stripped out attributes */
1849 hdr->PageType |= hdr_attr;
1850 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1851 #else
1852 params.PageType = hdr->PageType;
1853 #endif
1854 error = mpt_issue_cfg_req(mpt, req, &params,
1855 req->req_pbuf + MPT_RQSL(mpt),
1856 len, sleep_ok, timeout_ms);
1857 if (error != 0) {
1858 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1859 return (-1);
1862 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1863 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1864 req->IOCStatus);
1865 mpt_free_request(mpt, req);
1866 return (-1);
1868 mpt_free_request(mpt, req);
1869 return (0);
1873 * Read IOC configuration information
1875 static int
1876 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1878 CONFIG_PAGE_HEADER hdr;
1879 struct mpt_raid_volume *mpt_raid;
1880 int rv;
1881 int i;
1882 size_t len;
1884 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1885 2, 0, &hdr, FALSE, 5000);
1887 * If it's an invalid page, so what? Not a supported function....
1889 if (rv == EINVAL) {
1890 return (0);
1892 if (rv) {
1893 return (rv);
1896 mpt_lprt(mpt, MPT_PRT_DEBUG,
1897 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1898 hdr.PageVersion, hdr.PageLength << 2,
1899 hdr.PageNumber, hdr.PageType);
1901 len = hdr.PageLength * sizeof(uint32_t);
1902 mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1903 if (mpt->ioc_page2 == NULL) {
1904 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1905 mpt_raid_free_mem(mpt);
1906 return (ENOMEM);
1908 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1909 rv = mpt_read_cur_cfg_page(mpt, 0,
1910 &mpt->ioc_page2->Header, len, FALSE, 5000);
1911 if (rv) {
1912 mpt_prt(mpt, "failed to read IOC Page 2\n");
1913 mpt_raid_free_mem(mpt);
1914 return (EIO);
1916 mpt2host_config_page_ioc2(mpt->ioc_page2);
1918 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1919 uint32_t mask;
1921 mpt_prt(mpt, "Capabilities: (");
1922 for (mask = 1; mask != 0; mask <<= 1) {
1923 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1924 continue;
1926 switch (mask) {
1927 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1928 mpt_prtc(mpt, " RAID-0");
1929 break;
1930 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1931 mpt_prtc(mpt, " RAID-1E");
1932 break;
1933 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1934 mpt_prtc(mpt, " RAID-1");
1935 break;
1936 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1937 mpt_prtc(mpt, " SES");
1938 break;
1939 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1940 mpt_prtc(mpt, " SAFTE");
1941 break;
1942 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1943 mpt_prtc(mpt, " Multi-Channel-Arrays");
1944 default:
1945 break;
1948 mpt_prtc(mpt, " )\n");
1949 if ((mpt->ioc_page2->CapabilitiesFlags
1950 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1951 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1952 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1953 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1954 mpt->ioc_page2->NumActiveVolumes,
1955 mpt->ioc_page2->NumActiveVolumes != 1
1956 ? "s " : " ",
1957 mpt->ioc_page2->MaxVolumes);
1958 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1959 mpt->ioc_page2->NumActivePhysDisks,
1960 mpt->ioc_page2->NumActivePhysDisks != 1
1961 ? "s " : " ",
1962 mpt->ioc_page2->MaxPhysDisks);
1966 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1967 mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1968 if (mpt->raid_volumes == NULL) {
1969 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1970 mpt_raid_free_mem(mpt);
1971 return (ENOMEM);
1975 * Copy critical data out of ioc_page2 so that we can
1976 * safely refresh the page without windows of unreliable
1977 * data.
1979 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1981 len = sizeof(*mpt->raid_volumes->config_page) +
1982 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1983 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1984 mpt_raid = &mpt->raid_volumes[i];
1985 mpt_raid->config_page =
1986 kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1987 if (mpt_raid->config_page == NULL) {
1988 mpt_prt(mpt, "Could not allocate RAID page data\n");
1989 mpt_raid_free_mem(mpt);
1990 return (ENOMEM);
1993 mpt->raid_page0_len = len;
1995 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1996 mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1997 if (mpt->raid_disks == NULL) {
1998 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1999 mpt_raid_free_mem(mpt);
2000 return (ENOMEM);
2002 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2005 * Load page 3.
2007 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2008 3, 0, &hdr, FALSE, 5000);
2009 if (rv) {
2010 mpt_raid_free_mem(mpt);
2011 return (EIO);
2014 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2015 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2017 len = hdr.PageLength * sizeof(uint32_t);
2018 mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2019 if (mpt->ioc_page3 == NULL) {
2020 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2021 mpt_raid_free_mem(mpt);
2022 return (ENOMEM);
2024 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2025 rv = mpt_read_cur_cfg_page(mpt, 0,
2026 &mpt->ioc_page3->Header, len, FALSE, 5000);
2027 if (rv) {
2028 mpt_raid_free_mem(mpt);
2029 return (EIO);
2031 mpt2host_config_page_ioc3(mpt->ioc_page3);
2032 mpt_raid_wakeup(mpt);
2033 return (0);
2037 * Enable IOC port
2039 static int
2040 mpt_send_port_enable(struct mpt_softc *mpt, int port)
2042 request_t *req;
2043 MSG_PORT_ENABLE *enable_req;
2044 int error;
2046 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2047 if (req == NULL)
2048 return (-1);
2050 enable_req = req->req_vbuf;
2051 memset(enable_req, 0, MPT_RQSL(mpt));
2053 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
2054 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2055 enable_req->PortNumber = port;
2057 mpt_check_doorbell(mpt);
2058 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2060 mpt_send_cmd(mpt, req);
2061 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2062 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
2063 if (error != 0) {
2064 mpt_prt(mpt, "port %d enable timed out\n", port);
2065 return (-1);
2067 mpt_free_request(mpt, req);
2068 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2069 return (0);
2073 * Enable/Disable asynchronous event reporting.
2075 static int
2076 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
2078 request_t *req;
2079 MSG_EVENT_NOTIFY *enable_req;
2081 req = mpt_get_request(mpt, FALSE);
2082 if (req == NULL) {
2083 return (ENOMEM);
2085 enable_req = req->req_vbuf;
2086 memset(enable_req, 0, sizeof *enable_req);
2088 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
2089 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2090 enable_req->Switch = onoff;
2092 mpt_check_doorbell(mpt);
2093 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2094 onoff ? "en" : "dis");
2096 * Send the command off, but don't wait for it.
2098 mpt_send_cmd(mpt, req);
2099 return (0);
2103 * Un-mask the interrupts on the chip.
2105 void
2106 mpt_enable_ints(struct mpt_softc *mpt)
2108 /* Unmask every thing except door bell int */
2109 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2113 * Mask the interrupts on the chip.
2115 void
2116 mpt_disable_ints(struct mpt_softc *mpt)
2118 /* Mask all interrupts */
2119 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2120 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2123 static void
2124 mpt_sysctl_attach(struct mpt_softc *mpt)
2126 #if __FreeBSD_version >= 500000
2127 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
2128 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
2130 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2131 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2132 "Debugging/Verbose level");
2133 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2134 "role", CTLFLAG_RD, &mpt->role, 0,
2135 "HBA role");
2136 #ifdef MPT_TEST_MULTIPATH
2137 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2138 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2139 "Next Target to Fail");
2140 #endif
2141 #endif
2145 mpt_attach(struct mpt_softc *mpt)
2147 struct mpt_personality *pers;
2148 int i;
2149 int error;
2151 mpt_core_attach(mpt);
2152 mpt_core_enable(mpt);
2154 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2155 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2156 pers = mpt_personalities[i];
2157 if (pers == NULL) {
2158 continue;
2160 if (pers->probe(mpt) == 0) {
2161 error = pers->attach(mpt);
2162 if (error != 0) {
2163 mpt_detach(mpt);
2164 return (error);
2166 mpt->mpt_pers_mask |= (0x1 << pers->id);
2167 pers->use_count++;
2172 * Now that we've attached everything, do the enable function
2173 * for all of the personalities. This allows the personalities
2174 * to do setups that are appropriate for them prior to enabling
2175 * any ports.
2177 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2178 pers = mpt_personalities[i];
2179 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2180 error = pers->enable(mpt);
2181 if (error != 0) {
2182 mpt_prt(mpt, "personality %s attached but would"
2183 " not enable (%d)\n", pers->name, error);
2184 mpt_detach(mpt);
2185 return (error);
2189 return (0);
2193 mpt_shutdown(struct mpt_softc *mpt)
2195 struct mpt_personality *pers;
2197 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2198 pers->shutdown(mpt);
2200 return (0);
2204 mpt_detach(struct mpt_softc *mpt)
2206 struct mpt_personality *pers;
2208 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2209 pers->detach(mpt);
2210 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2211 pers->use_count--;
2213 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2214 return (0);
2218 mpt_core_load(struct mpt_personality *pers)
2220 int i;
2223 * Setup core handlers and insert the default handler
2224 * into all "empty slots".
2226 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2227 mpt_reply_handlers[i] = mpt_default_reply_handler;
2230 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2231 mpt_event_reply_handler;
2232 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2233 mpt_config_reply_handler;
2234 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2235 mpt_handshake_reply_handler;
2236 return (0);
2240 * Initialize per-instance driver data and perform
2241 * initial controller configuration.
2244 mpt_core_attach(struct mpt_softc *mpt)
2246 int val, error;
2248 LIST_INIT(&mpt->ack_frames);
2249 /* Put all request buffers on the free list */
2250 TAILQ_INIT(&mpt->request_pending_list);
2251 TAILQ_INIT(&mpt->request_free_list);
2252 TAILQ_INIT(&mpt->request_timeout_list);
2253 MPT_LOCK(mpt);
2254 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2255 request_t *req = &mpt->request_pool[val];
2256 req->state = REQ_STATE_ALLOCATED;
2257 mpt_free_request(mpt, req);
2259 MPT_UNLOCK(mpt);
2260 for (val = 0; val < MPT_MAX_LUNS; val++) {
2261 STAILQ_INIT(&mpt->trt[val].atios);
2262 STAILQ_INIT(&mpt->trt[val].inots);
2264 STAILQ_INIT(&mpt->trt_wildcard.atios);
2265 STAILQ_INIT(&mpt->trt_wildcard.inots);
2266 #ifdef MPT_TEST_MULTIPATH
2267 mpt->failure_id = -1;
2268 #endif
2269 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2270 mpt_sysctl_attach(mpt);
2271 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2272 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2274 MPT_LOCK(mpt);
2275 error = mpt_configure_ioc(mpt, 0, 0);
2276 MPT_UNLOCK(mpt);
2278 return (error);
2282 mpt_core_enable(struct mpt_softc *mpt)
2285 * We enter with the IOC enabled, but async events
2286 * not enabled, ports not enabled and interrupts
2287 * not enabled.
2289 MPT_LOCK(mpt);
2292 * Enable asynchronous event reporting- all personalities
2293 * have attached so that they should be able to now field
2294 * async events.
2296 mpt_send_event_request(mpt, 1);
2299 * Catch any pending interrupts
2301 * This seems to be crucial- otherwise
2302 * the portenable below times out.
2304 mpt_intr(mpt);
2307 * Enable Interrupts
2309 mpt_enable_ints(mpt);
2312 * Catch any pending interrupts
2314 * This seems to be crucial- otherwise
2315 * the portenable below times out.
2317 mpt_intr(mpt);
2320 * Enable the port.
2322 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2323 mpt_prt(mpt, "failed to enable port 0\n");
2324 MPT_UNLOCK(mpt);
2325 return (ENXIO);
2327 MPT_UNLOCK(mpt);
2328 return (0);
2331 void
2332 mpt_core_shutdown(struct mpt_softc *mpt)
2334 mpt_disable_ints(mpt);
2337 void
2338 mpt_core_detach(struct mpt_softc *mpt)
2341 * XXX: FREE MEMORY
2343 mpt_disable_ints(mpt);
2347 mpt_core_unload(struct mpt_personality *pers)
2349 /* Unload is always successfull. */
2350 return (0);
2353 #define FW_UPLOAD_REQ_SIZE \
2354 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2355 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2357 static int
2358 mpt_upload_fw(struct mpt_softc *mpt)
2360 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2361 MSG_FW_UPLOAD_REPLY fw_reply;
2362 MSG_FW_UPLOAD *fw_req;
2363 FW_UPLOAD_TCSGE *tsge;
2364 SGE_SIMPLE32 *sge;
2365 uint32_t flags;
2366 int error;
2368 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2369 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2370 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2371 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2372 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2373 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2374 tsge->DetailsLength = 12;
2375 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2376 tsge->ImageSize = htole32(mpt->fw_image_size);
2377 sge = (SGE_SIMPLE32 *)(tsge + 1);
2378 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2379 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2380 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2381 flags <<= MPI_SGE_FLAGS_SHIFT;
2382 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2383 sge->Address = htole32(mpt->fw_phys);
2384 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2385 if (error)
2386 return(error);
2387 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2388 return (error);
2391 static void
2392 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2393 uint32_t *data, bus_size_t len)
2395 uint32_t *data_end;
2397 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2398 if (mpt->is_sas) {
2399 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2401 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2402 while (data != data_end) {
2403 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2404 data++;
2406 if (mpt->is_sas) {
2407 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2411 static int
2412 mpt_download_fw(struct mpt_softc *mpt)
2414 MpiFwHeader_t *fw_hdr;
2415 int error;
2416 uint32_t ext_offset;
2417 uint32_t data;
2419 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2420 mpt->fw_image_size);
2422 error = mpt_enable_diag_mode(mpt);
2423 if (error != 0) {
2424 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2425 return (EIO);
2428 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2429 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2431 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2432 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2433 fw_hdr->ImageSize);
2435 ext_offset = fw_hdr->NextImageHeaderOffset;
2436 while (ext_offset != 0) {
2437 MpiExtImageHeader_t *ext;
2439 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2440 ext_offset = ext->NextImageHeaderOffset;
2442 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2443 ext->ImageSize);
2446 if (mpt->is_sas) {
2447 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2449 /* Setup the address to jump to on reset. */
2450 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2451 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2454 * The controller sets the "flash bad" status after attempting
2455 * to auto-boot from flash. Clear the status so that the controller
2456 * will continue the boot process with our newly installed firmware.
2458 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2459 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2460 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2461 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2463 if (mpt->is_sas) {
2464 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2468 * Re-enable the processor and clear the boot halt flag.
2470 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2471 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2472 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2474 mpt_disable_diag_mode(mpt);
2475 return (0);
2479 * Allocate/Initialize data structures for the controller. Called
2480 * once at instance startup.
2482 static int
2483 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2485 PTR_MSG_PORT_FACTS_REPLY pfp;
2486 int error, port;
2487 size_t len;
2489 if (tn == MPT_MAX_TRYS) {
2490 return (-1);
2494 * No need to reset if the IOC is already in the READY state.
2496 * Force reset if initialization failed previously.
2497 * Note that a hard_reset of the second channel of a '929
2498 * will stop operation of the first channel. Hopefully, if the
2499 * first channel is ok, the second will not require a hard
2500 * reset.
2502 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2503 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2504 return (mpt_configure_ioc(mpt, tn++, 1));
2506 needreset = 0;
2509 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2510 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2511 return (mpt_configure_ioc(mpt, tn++, 1));
2513 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2515 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2516 mpt->ioc_facts.MsgVersion >> 8,
2517 mpt->ioc_facts.MsgVersion & 0xFF,
2518 mpt->ioc_facts.HeaderVersion >> 8,
2519 mpt->ioc_facts.HeaderVersion & 0xFF);
2522 * Now that we know request frame size, we can calculate
2523 * the actual (reasonable) segment limit for read/write I/O.
2525 * This limit is constrained by:
2527 * + The size of each area we allocate per command (and how
2528 * many chain segments we can fit into it).
2529 * + The total number of areas we've set up.
2530 * + The actual chain depth the card will allow.
2532 * The first area's segment count is limited by the I/O request
2533 * at the head of it. We cannot allocate realistically more
2534 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2535 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2538 /* total number of request areas we (can) allocate */
2539 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2541 /* converted to the number of chain areas possible */
2542 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2544 /* limited by the number of chain areas the card will support */
2545 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2546 mpt_lprt(mpt, MPT_PRT_DEBUG,
2547 "chain depth limited to %u (from %u)\n",
2548 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2549 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2552 /* converted to the number of simple sges in chain segments. */
2553 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2555 mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
2556 mpt->max_seg_cnt);
2557 mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
2558 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2559 mpt_lprt(mpt, MPT_PRT_DEBUG,
2560 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2561 "Request Frame Size %u bytes Max Chain Depth %u\n",
2562 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2563 mpt->ioc_facts.RequestFrameSize << 2,
2564 mpt->ioc_facts.MaxChainDepth);
2565 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2566 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2567 (int)mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2569 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2570 mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2571 if (mpt->port_facts == NULL) {
2572 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2573 return (ENOMEM);
2577 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2578 (mpt->fw_uploaded == 0)) {
2579 struct mpt_map_info mi;
2582 * In some configurations, the IOC's firmware is
2583 * stored in a shared piece of system NVRAM that
2584 * is only accessable via the BIOS. In this
2585 * case, the firmware keeps a copy of firmware in
2586 * RAM until the OS driver retrieves it. Once
2587 * retrieved, we are responsible for re-downloading
2588 * the firmware after any hard-reset.
2590 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2591 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2592 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2593 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2594 &mpt->fw_dmat);
2595 if (error != 0) {
2596 mpt_prt(mpt, "cannot create firmwarew dma tag\n");
2597 return (ENOMEM);
2599 error = bus_dmamem_alloc(mpt->fw_dmat,
2600 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap);
2601 if (error != 0) {
2602 mpt_prt(mpt, "cannot allocate firmware memory\n");
2603 bus_dma_tag_destroy(mpt->fw_dmat);
2604 return (ENOMEM);
2606 mi.mpt = mpt;
2607 mi.error = 0;
2608 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2609 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2610 mpt->fw_phys = mi.phys;
2612 error = mpt_upload_fw(mpt);
2613 if (error != 0) {
2614 mpt_prt(mpt, "firmware upload failed.\n");
2615 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2616 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2617 mpt->fw_dmap);
2618 bus_dma_tag_destroy(mpt->fw_dmat);
2619 mpt->fw_image = NULL;
2620 return (EIO);
2622 mpt->fw_uploaded = 1;
2625 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2626 pfp = &mpt->port_facts[port];
2627 error = mpt_get_portfacts(mpt, 0, pfp);
2628 if (error != MPT_OK) {
2629 mpt_prt(mpt,
2630 "mpt_get_portfacts on port %d failed\n", port);
2631 kfree(mpt->port_facts, M_DEVBUF);
2632 mpt->port_facts = NULL;
2633 return (mpt_configure_ioc(mpt, tn++, 1));
2635 mpt2host_portfacts_reply(pfp);
2637 if (port > 0) {
2638 error = MPT_PRT_INFO;
2639 } else {
2640 error = MPT_PRT_DEBUG;
2642 mpt_lprt(mpt, error,
2643 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2644 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2645 pfp->MaxDevices);
2650 * XXX: Not yet supporting more than port 0
2652 pfp = &mpt->port_facts[0];
2653 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2654 mpt->is_fc = 1;
2655 mpt->is_sas = 0;
2656 mpt->is_spi = 0;
2657 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2658 mpt->is_fc = 0;
2659 mpt->is_sas = 1;
2660 mpt->is_spi = 0;
2661 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2662 mpt->is_fc = 0;
2663 mpt->is_sas = 0;
2664 mpt->is_spi = 1;
2665 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2666 mpt_prt(mpt, "iSCSI not supported yet\n");
2667 return (ENXIO);
2668 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2669 mpt_prt(mpt, "Inactive Port\n");
2670 return (ENXIO);
2671 } else {
2672 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2673 return (ENXIO);
2677 * Set our role with what this port supports.
2679 * Note this might be changed later in different modules
2680 * if this is different from what is wanted.
2682 mpt->role = MPT_ROLE_NONE;
2683 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2684 mpt->role |= MPT_ROLE_INITIATOR;
2686 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2687 mpt->role |= MPT_ROLE_TARGET;
2691 * Enable the IOC
2693 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2694 mpt_prt(mpt, "unable to initialize IOC\n");
2695 return (ENXIO);
2699 * Read IOC configuration information.
2701 * We need this to determine whether or not we have certain
2702 * settings for Integrated Mirroring (e.g.).
2704 mpt_read_config_info_ioc(mpt);
2706 return (0);
2709 static int
2710 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2712 uint32_t pptr;
2713 int val;
2715 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2716 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2717 return (EIO);
2720 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2722 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2723 mpt_prt(mpt, "IOC failed to go to run state\n");
2724 return (ENXIO);
2726 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2729 * Give it reply buffers
2731 * Do *not* exceed global credits.
2733 for (val = 0, pptr = mpt->reply_phys;
2734 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2735 pptr += MPT_REPLY_SIZE) {
2736 mpt_free_reply(mpt, pptr);
2737 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2738 break;
2743 * Enable the port if asked. This is only done if we're resetting
2744 * the IOC after initial startup.
2746 if (portenable) {
2748 * Enable asynchronous event reporting
2750 mpt_send_event_request(mpt, 1);
2752 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2753 mpt_prt(mpt, "failed to enable port 0\n");
2754 return (ENXIO);
2757 return (MPT_OK);
2761 * Endian Conversion Functions- only used on Big Endian machines
2763 #if _BYTE_ORDER == _BIG_ENDIAN
2764 void
2765 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2768 MPT_2_HOST32(sge, FlagsLength);
2769 MPT_2_HOST32(sge, u.Address64.Low);
2770 MPT_2_HOST32(sge, u.Address64.High);
2773 void
2774 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2777 MPT_2_HOST16(rp, MsgVersion);
2778 MPT_2_HOST16(rp, HeaderVersion);
2779 MPT_2_HOST32(rp, MsgContext);
2780 MPT_2_HOST16(rp, IOCExceptions);
2781 MPT_2_HOST16(rp, IOCStatus);
2782 MPT_2_HOST32(rp, IOCLogInfo);
2783 MPT_2_HOST16(rp, ReplyQueueDepth);
2784 MPT_2_HOST16(rp, RequestFrameSize);
2785 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2786 MPT_2_HOST16(rp, ProductID);
2787 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2788 MPT_2_HOST16(rp, GlobalCredits);
2789 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2790 MPT_2_HOST16(rp, CurReplyFrameSize);
2791 MPT_2_HOST32(rp, FWImageSize);
2792 MPT_2_HOST32(rp, IOCCapabilities);
2793 MPT_2_HOST32(rp, FWVersion.Word);
2794 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2795 MPT_2_HOST16(rp, Reserved2);
2796 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2797 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2800 void
2801 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2804 MPT_2_HOST16(pfp, Reserved);
2805 MPT_2_HOST16(pfp, Reserved1);
2806 MPT_2_HOST32(pfp, MsgContext);
2807 MPT_2_HOST16(pfp, Reserved2);
2808 MPT_2_HOST16(pfp, IOCStatus);
2809 MPT_2_HOST32(pfp, IOCLogInfo);
2810 MPT_2_HOST16(pfp, MaxDevices);
2811 MPT_2_HOST16(pfp, PortSCSIID);
2812 MPT_2_HOST16(pfp, ProtocolFlags);
2813 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2814 MPT_2_HOST16(pfp, MaxPersistentIDs);
2815 MPT_2_HOST16(pfp, MaxLanBuckets);
2816 MPT_2_HOST16(pfp, Reserved4);
2817 MPT_2_HOST32(pfp, Reserved5);
2820 void
2821 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
2823 int i;
2825 MPT_2_HOST32(ioc2, CapabilitiesFlags);
2826 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2827 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2831 void
2832 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
2835 MPT_2_HOST16(ioc3, Reserved2);
2838 void
2839 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
2842 MPT_2_HOST32(sp0, Capabilities);
2843 MPT_2_HOST32(sp0, PhysicalInterface);
2846 void
2847 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2850 MPT_2_HOST32(sp1, Configuration);
2851 MPT_2_HOST32(sp1, OnBusTimerValue);
2852 MPT_2_HOST16(sp1, IDConfig);
2855 void
2856 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2859 HOST_2_MPT32(sp1, Configuration);
2860 HOST_2_MPT32(sp1, OnBusTimerValue);
2861 HOST_2_MPT16(sp1, IDConfig);
2864 void
2865 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
2867 int i;
2869 MPT_2_HOST32(sp2, PortFlags);
2870 MPT_2_HOST32(sp2, PortSettings);
2871 for (i = 0; i < sizeof(sp2->DeviceSettings) /
2872 sizeof(*sp2->DeviceSettings); i++) {
2873 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
2877 void
2878 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
2881 MPT_2_HOST32(sd0, NegotiatedParameters);
2882 MPT_2_HOST32(sd0, Information);
2885 void
2886 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
2889 MPT_2_HOST32(sd1, RequestedParameters);
2890 MPT_2_HOST32(sd1, Reserved);
2891 MPT_2_HOST32(sd1, Configuration);
2894 void
2895 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
2898 HOST_2_MPT32(sd1, RequestedParameters);
2899 HOST_2_MPT32(sd1, Reserved);
2900 HOST_2_MPT32(sd1, Configuration);
2903 void
2904 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
2907 MPT_2_HOST32(fp0, Flags);
2908 MPT_2_HOST32(fp0, PortIdentifier);
2909 MPT_2_HOST32(fp0, WWNN.Low);
2910 MPT_2_HOST32(fp0, WWNN.High);
2911 MPT_2_HOST32(fp0, WWPN.Low);
2912 MPT_2_HOST32(fp0, WWPN.High);
2913 MPT_2_HOST32(fp0, SupportedServiceClass);
2914 MPT_2_HOST32(fp0, SupportedSpeeds);
2915 MPT_2_HOST32(fp0, CurrentSpeed);
2916 MPT_2_HOST32(fp0, MaxFrameSize);
2917 MPT_2_HOST32(fp0, FabricWWNN.Low);
2918 MPT_2_HOST32(fp0, FabricWWNN.High);
2919 MPT_2_HOST32(fp0, FabricWWPN.Low);
2920 MPT_2_HOST32(fp0, FabricWWPN.High);
2921 MPT_2_HOST32(fp0, DiscoveredPortsCount);
2922 MPT_2_HOST32(fp0, MaxInitiators);
2925 void
2926 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
2929 MPT_2_HOST32(fp1, Flags);
2930 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
2931 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
2932 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
2933 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
2936 void
2937 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
2940 HOST_2_MPT32(fp1, Flags);
2941 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
2942 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
2943 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
2944 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
2947 void
2948 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
2950 int i;
2952 MPT_2_HOST16(volp, VolumeStatus.Reserved);
2953 MPT_2_HOST16(volp, VolumeSettings.Settings);
2954 MPT_2_HOST32(volp, MaxLBA);
2955 MPT_2_HOST32(volp, MaxLBAHigh);
2956 MPT_2_HOST32(volp, StripeSize);
2957 MPT_2_HOST32(volp, Reserved2);
2958 MPT_2_HOST32(volp, Reserved3);
2959 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
2960 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
2964 void
2965 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
2968 MPT_2_HOST32(rpd0, Reserved1);
2969 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
2970 MPT_2_HOST32(rpd0, MaxLBA);
2971 MPT_2_HOST16(rpd0, ErrorData.Reserved);
2972 MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
2973 MPT_2_HOST16(rpd0, ErrorData.SmartCount);
2976 void
2977 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
2980 MPT_2_HOST16(vi, TotalBlocks.High);
2981 MPT_2_HOST16(vi, TotalBlocks.Low);
2982 MPT_2_HOST16(vi, BlocksRemaining.High);
2983 MPT_2_HOST16(vi, BlocksRemaining.Low);
2985 #endif