amd64 port: mainly on the pmap headers, identify_cpu and initcpu
[dragonfly/port-amd64.git] / sys / dev / raid / mly / mly.c
blob214b883747518a54816897f1a526a81c040a7543
1 /*-
2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: src/sys/dev/mly/mly.c,v 1.3.2.3 2001/03/05 20:17:24 msmith Exp $
28 * $DragonFly: src/sys/dev/raid/mly/mly.c,v 1.18 2007/04/13 02:51:34 y0netan1 Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/conf.h>
37 #include <sys/device.h>
38 #include <sys/ctype.h>
39 #include <sys/ioccom.h>
40 #include <sys/stat.h>
41 #include <sys/rman.h>
42 #include <sys/thread2.h>
44 #include <bus/cam/scsi/scsi_all.h>
46 #include "mlyreg.h"
47 #include "mlyio.h"
48 #include "mlyvar.h"
49 #define MLY_DEFINE_TABLES
50 #include "mly_tables.h"
52 static int mly_get_controllerinfo(struct mly_softc *sc);
53 static void mly_scan_devices(struct mly_softc *sc);
54 static void mly_rescan_btl(struct mly_softc *sc, int bus, int target);
55 static void mly_complete_rescan(struct mly_command *mc);
56 static int mly_get_eventstatus(struct mly_softc *sc);
57 static int mly_enable_mmbox(struct mly_softc *sc);
58 static int mly_flush(struct mly_softc *sc);
59 static int mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data,
60 size_t datasize, u_int8_t *status, void *sense_buffer, size_t *sense_length);
61 static void mly_fetch_event(struct mly_softc *sc);
62 static void mly_complete_event(struct mly_command *mc);
63 static void mly_process_event(struct mly_softc *sc, struct mly_event *me);
64 static void mly_periodic(void *data);
66 static int mly_immediate_command(struct mly_command *mc);
67 static int mly_start(struct mly_command *mc);
68 static void mly_complete(void *context, int pending);
70 static void mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error);
71 static int mly_alloc_commands(struct mly_softc *sc);
72 static void mly_map_command(struct mly_command *mc);
73 static void mly_unmap_command(struct mly_command *mc);
75 static int mly_fwhandshake(struct mly_softc *sc);
77 static void mly_describe_controller(struct mly_softc *sc);
78 #ifdef MLY_DEBUG
79 static void mly_printstate(struct mly_softc *sc);
80 static void mly_print_command(struct mly_command *mc);
81 static void mly_print_packet(struct mly_command *mc);
82 static void mly_panic(struct mly_softc *sc, char *reason);
83 #endif
84 void mly_print_controller(int controller);
86 static d_open_t mly_user_open;
87 static d_close_t mly_user_close;
88 static d_ioctl_t mly_user_ioctl;
89 static int mly_user_command(struct mly_softc *sc, struct mly_user_command *uc);
90 static int mly_user_health(struct mly_softc *sc, struct mly_user_health *uh);
92 #define MLY_CDEV_MAJOR 158
94 static struct dev_ops mly_ops = {
95 { "mly", MLY_CDEV_MAJOR, 0 },
96 .d_open = mly_user_open,
97 .d_close = mly_user_close,
98 .d_ioctl = mly_user_ioctl,
101 /********************************************************************************
102 ********************************************************************************
103 Device Interface
104 ********************************************************************************
105 ********************************************************************************/
107 /********************************************************************************
108 * Initialise the controller and softc
111 mly_attach(struct mly_softc *sc)
113 int error;
115 debug_called(1);
117 callout_init(&sc->mly_periodic);
120 * Initialise per-controller queues.
122 mly_initq_free(sc);
123 mly_initq_ready(sc);
124 mly_initq_busy(sc);
125 mly_initq_complete(sc);
127 #if defined(__FreeBSD__) && __FreeBSD_version >= 500005
129 * Initialise command-completion task.
131 TASK_INIT(&sc->mly_task_complete, 0, mly_complete, sc);
132 #endif
134 /* disable interrupts before we start talking to the controller */
135 MLY_MASK_INTERRUPTS(sc);
138 * Wait for the controller to come ready, handshake with the firmware if required.
139 * This is typically only necessary on platforms where the controller BIOS does not
140 * run.
142 if ((error = mly_fwhandshake(sc)))
143 return(error);
146 * Allocate command buffers
148 if ((error = mly_alloc_commands(sc)))
149 return(error);
152 * Obtain controller feature information
154 if ((error = mly_get_controllerinfo(sc)))
155 return(error);
158 * Get the current event counter for health purposes, populate the initial
159 * health status buffer.
161 if ((error = mly_get_eventstatus(sc)))
162 return(error);
165 * Enable memory-mailbox mode
167 if ((error = mly_enable_mmbox(sc)))
168 return(error);
171 * Attach to CAM.
173 if ((error = mly_cam_attach(sc)))
174 return(error);
177 * Print a little information about the controller
179 mly_describe_controller(sc);
182 * Mark all attached devices for rescan
184 mly_scan_devices(sc);
187 * Instigate the first status poll immediately. Rescan completions won't
188 * happen until interrupts are enabled, which should still be before
189 * the SCSI subsystem gets to us. (XXX assuming CAM and interrupt-driven
190 * discovery here...)
192 mly_periodic((void *)sc);
195 * Create the control device.
197 dev_ops_add(&mly_ops, -1, device_get_unit(sc->mly_dev));
198 sc->mly_dev_t = make_dev(&mly_ops, device_get_unit(sc->mly_dev),
199 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
200 "mly%d", device_get_unit(sc->mly_dev));
201 sc->mly_dev_t->si_drv1 = sc;
203 /* enable interrupts now */
204 MLY_UNMASK_INTERRUPTS(sc);
206 return(0);
209 /********************************************************************************
210 * Bring the controller to a state where it can be safely left alone.
212 void
213 mly_detach(struct mly_softc *sc)
216 debug_called(1);
218 /* kill the periodic event */
219 callout_stop(&sc->mly_periodic);
221 sc->mly_state |= MLY_STATE_SUSPEND;
223 /* flush controller */
224 mly_printf(sc, "flushing cache...");
225 kprintf("%s\n", mly_flush(sc) ? "failed" : "done");
227 MLY_MASK_INTERRUPTS(sc);
230 /********************************************************************************
231 ********************************************************************************
232 Command Wrappers
233 ********************************************************************************
234 ********************************************************************************/
236 /********************************************************************************
237 * Fill in the mly_controllerinfo and mly_controllerparam fields in the softc.
239 static int
240 mly_get_controllerinfo(struct mly_softc *sc)
242 struct mly_command_ioctl mci;
243 u_int8_t status;
244 int error;
246 debug_called(1);
248 if (sc->mly_controllerinfo != NULL)
249 kfree(sc->mly_controllerinfo, M_DEVBUF);
251 /* build the getcontrollerinfo ioctl and send it */
252 bzero(&mci, sizeof(mci));
253 sc->mly_controllerinfo = NULL;
254 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
255 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerinfo, sizeof(*sc->mly_controllerinfo),
256 &status, NULL, NULL)))
257 return(error);
258 if (status != 0)
259 return(EIO);
261 if (sc->mly_controllerparam != NULL)
262 kfree(sc->mly_controllerparam, M_DEVBUF);
264 /* build the getcontrollerparameter ioctl and send it */
265 bzero(&mci, sizeof(mci));
266 sc->mly_controllerparam = NULL;
267 mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
268 if ((error = mly_ioctl(sc, &mci, (void **)&sc->mly_controllerparam, sizeof(*sc->mly_controllerparam),
269 &status, NULL, NULL)))
270 return(error);
271 if (status != 0)
272 return(EIO);
274 return(0);
277 /********************************************************************************
278 * Schedule all possible devices for a rescan.
281 static void
282 mly_scan_devices(struct mly_softc *sc)
284 int bus, target, nchn;
286 debug_called(1);
289 * Clear any previous BTL information.
291 bzero(&sc->mly_btl, sizeof(sc->mly_btl));
294 * Mark all devices as requiring a rescan, and let the early periodic scan collect them.
296 nchn = sc->mly_controllerinfo->physical_channels_present +
297 sc->mly_controllerinfo->virtual_channels_present;
298 for (bus = 0; bus < nchn; bus++)
299 for (target = 0; target < MLY_MAX_TARGETS; target++)
300 sc->mly_btl[bus][target].mb_flags = MLY_BTL_RESCAN;
304 /********************************************************************************
305 * Rescan a device, possibly as a consequence of getting an event which suggests
306 * that it may have changed.
308 static void
309 mly_rescan_btl(struct mly_softc *sc, int bus, int target)
311 struct mly_command *mc;
312 struct mly_command_ioctl *mci;
314 debug_called(2);
316 /* get a command */
317 mc = NULL;
318 if (mly_alloc_command(sc, &mc))
319 return; /* we'll be retried soon */
321 /* set up the data buffer */
322 mc->mc_data = kmalloc(sizeof(union mly_devinfo), M_DEVBUF, M_INTWAIT | M_ZERO);
323 mc->mc_flags |= MLY_CMD_DATAIN;
324 mc->mc_complete = mly_complete_rescan;
326 sc->mly_btl[bus][target].mb_flags &= ~MLY_BTL_RESCAN;
329 * Build the ioctl.
331 * At this point we are committed to sending this request, as it
332 * will be the only one constructed for this particular update.
334 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
335 mci->opcode = MDACMD_IOCTL;
336 mci->addr.phys.controller = 0;
337 mci->timeout.value = 30;
338 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
339 if (bus >= sc->mly_controllerinfo->physical_channels_present) {
340 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getlogdevinfovalid);
341 mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
342 mci->addr.log.logdev = ((bus - sc->mly_controllerinfo->physical_channels_present) * MLY_MAX_TARGETS)
343 + target;
344 debug(2, "logical device %d", mci->addr.log.logdev);
345 } else {
346 mc->mc_length = mci->data_size = sizeof(struct mly_ioctl_getphysdevinfovalid);
347 mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
348 mci->addr.phys.lun = 0;
349 mci->addr.phys.target = target;
350 mci->addr.phys.channel = bus;
351 debug(2, "physical device %d:%d", mci->addr.phys.channel, mci->addr.phys.target);
355 * Use the ready queue to get this command dispatched.
357 mly_enqueue_ready(mc);
358 mly_startio(sc);
361 /********************************************************************************
362 * Handle the completion of a rescan operation
364 static void
365 mly_complete_rescan(struct mly_command *mc)
367 struct mly_softc *sc = mc->mc_sc;
368 struct mly_ioctl_getlogdevinfovalid *ldi;
369 struct mly_ioctl_getphysdevinfovalid *pdi;
370 int bus, target;
372 debug_called(2);
374 /* iff the command completed OK, we should use the result to update our data */
375 if (mc->mc_status == 0) {
376 if (mc->mc_length == sizeof(*ldi)) {
377 ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
378 bus = MLY_LOGDEV_BUS(sc, ldi->logical_device_number);
379 target = MLY_LOGDEV_TARGET(ldi->logical_device_number);
380 sc->mly_btl[bus][target].mb_flags = MLY_BTL_LOGICAL; /* clears all other flags */
381 sc->mly_btl[bus][target].mb_type = ldi->raid_level;
382 sc->mly_btl[bus][target].mb_state = ldi->state;
383 debug(2, "BTL rescan for %d returns %s, %s", ldi->logical_device_number,
384 mly_describe_code(mly_table_device_type, ldi->raid_level),
385 mly_describe_code(mly_table_device_state, ldi->state));
386 } else if (mc->mc_length == sizeof(*pdi)) {
387 pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
388 bus = pdi->channel;
389 target = pdi->target;
390 sc->mly_btl[bus][target].mb_flags = MLY_BTL_PHYSICAL; /* clears all other flags */
391 sc->mly_btl[bus][target].mb_type = MLY_DEVICE_TYPE_PHYSICAL;
392 sc->mly_btl[bus][target].mb_state = pdi->state;
393 sc->mly_btl[bus][target].mb_speed = pdi->speed;
394 sc->mly_btl[bus][target].mb_width = pdi->width;
395 if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
396 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_PROTECTED;
397 debug(2, "BTL rescan for %d:%d returns %s", bus, target,
398 mly_describe_code(mly_table_device_state, pdi->state));
399 } else {
400 mly_printf(sc, "BTL rescan result corrupted\n");
402 } else {
404 * A request sent for a device beyond the last device present will fail.
405 * We don't care about this, so we do nothing about it.
408 kfree(mc->mc_data, M_DEVBUF);
409 mly_release_command(mc);
412 /********************************************************************************
413 * Get the current health status and set the 'next event' counter to suit.
415 static int
416 mly_get_eventstatus(struct mly_softc *sc)
418 struct mly_command_ioctl mci;
419 struct mly_health_status *mh;
420 u_int8_t status;
421 int error;
423 /* build the gethealthstatus ioctl and send it */
424 bzero(&mci, sizeof(mci));
425 mh = NULL;
426 mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
428 if ((error = mly_ioctl(sc, &mci, (void **)&mh, sizeof(*mh), &status, NULL, NULL)))
429 return(error);
430 if (status != 0)
431 return(EIO);
433 /* get the event counter */
434 sc->mly_event_change = mh->change_counter;
435 sc->mly_event_waiting = mh->next_event;
436 sc->mly_event_counter = mh->next_event;
438 /* save the health status into the memory mailbox */
439 bcopy(mh, &sc->mly_mmbox->mmm_health.status, sizeof(*mh));
441 debug(1, "initial change counter %d, event counter %d", mh->change_counter, mh->next_event);
443 kfree(mh, M_DEVBUF);
444 return(0);
447 /********************************************************************************
448 * Enable the memory mailbox mode.
450 static int
451 mly_enable_mmbox(struct mly_softc *sc)
453 struct mly_command_ioctl mci;
454 u_int8_t *sp, status;
455 int error;
457 debug_called(1);
459 /* build the ioctl and send it */
460 bzero(&mci, sizeof(mci));
461 mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
462 /* set buffer addresses */
463 mci.param.setmemorymailbox.command_mailbox_physaddr =
464 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
465 mci.param.setmemorymailbox.status_mailbox_physaddr =
466 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
467 mci.param.setmemorymailbox.health_buffer_physaddr =
468 sc->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
470 /* set buffer sizes - abuse of data_size field is revolting */
471 sp = (u_int8_t *)&mci.data_size;
472 sp[0] = ((sizeof(union mly_command_packet) * MLY_MMBOX_COMMANDS) / 1024);
473 sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) / 1024;
474 mci.param.setmemorymailbox.health_buffer_size = sizeof(union mly_health_region) / 1024;
476 debug(1, "memory mailbox at %p (0x%llx/%d 0x%llx/%d 0x%llx/%d", sc->mly_mmbox,
477 mci.param.setmemorymailbox.command_mailbox_physaddr, sp[0],
478 mci.param.setmemorymailbox.status_mailbox_physaddr, sp[1],
479 mci.param.setmemorymailbox.health_buffer_physaddr,
480 mci.param.setmemorymailbox.health_buffer_size);
482 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
483 return(error);
484 if (status != 0)
485 return(EIO);
486 sc->mly_state |= MLY_STATE_MMBOX_ACTIVE;
487 debug(1, "memory mailbox active");
488 return(0);
491 /********************************************************************************
492 * Flush all pending I/O from the controller.
494 static int
495 mly_flush(struct mly_softc *sc)
497 struct mly_command_ioctl mci;
498 u_int8_t status;
499 int error;
501 debug_called(1);
503 /* build the ioctl */
504 bzero(&mci, sizeof(mci));
505 mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
506 mci.param.deviceoperation.operation_device = MLY_OPDEVICE_PHYSICAL_CONTROLLER;
508 /* pass it off to the controller */
509 if ((error = mly_ioctl(sc, &mci, NULL, 0, &status, NULL, NULL)))
510 return(error);
512 return((status == 0) ? 0 : EIO);
515 /********************************************************************************
516 * Perform an ioctl command.
518 * If (data) is not NULL, the command requires data transfer. If (*data) is NULL
519 * the command requires data transfer from the controller, and we will allocate
520 * a buffer for it. If (*data) is not NULL, the command requires data transfer
521 * to the controller.
523 * XXX passing in the whole ioctl structure is ugly. Better ideas?
525 * XXX we don't even try to handle the case where datasize > 4k. We should.
527 static int
528 mly_ioctl(struct mly_softc *sc, struct mly_command_ioctl *ioctl, void **data, size_t datasize,
529 u_int8_t *status, void *sense_buffer, size_t *sense_length)
531 struct mly_command *mc;
532 struct mly_command_ioctl *mci;
533 int error;
535 debug_called(1);
537 mc = NULL;
538 if (mly_alloc_command(sc, &mc)) {
539 error = ENOMEM;
540 goto out;
543 /* copy the ioctl structure, but save some important fields and then fixup */
544 mci = &mc->mc_packet->ioctl;
545 ioctl->sense_buffer_address = mci->sense_buffer_address;
546 ioctl->maximum_sense_size = mci->maximum_sense_size;
547 *mci = *ioctl;
548 mci->opcode = MDACMD_IOCTL;
549 mci->timeout.value = 30;
550 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
552 /* handle the data buffer */
553 if (data != NULL) {
554 if (*data == NULL) {
555 /* allocate data buffer */
556 mc->mc_data = kmalloc(datasize, M_DEVBUF, M_INTWAIT);
557 mc->mc_flags |= MLY_CMD_DATAIN;
558 } else {
559 mc->mc_data = *data;
560 mc->mc_flags |= MLY_CMD_DATAOUT;
562 mc->mc_length = datasize;
563 mc->mc_packet->generic.data_size = datasize;
566 /* run the command */
567 if ((error = mly_immediate_command(mc)))
568 goto out;
570 /* clean up and return any data */
571 *status = mc->mc_status;
572 if ((mc->mc_sense > 0) && (sense_buffer != NULL)) {
573 bcopy(mc->mc_packet, sense_buffer, mc->mc_sense);
574 *sense_length = mc->mc_sense;
575 goto out;
578 /* should we return a data pointer? */
579 if ((data != NULL) && (*data == NULL))
580 *data = mc->mc_data;
582 /* command completed OK */
583 error = 0;
585 out:
586 if (mc != NULL) {
587 /* do we need to free a data buffer we allocated? */
588 if (error && (mc->mc_data != NULL) && (*data == NULL))
589 kfree(mc->mc_data, M_DEVBUF);
590 mly_release_command(mc);
592 return(error);
595 /********************************************************************************
596 * Fetch one event from the controller.
598 static void
599 mly_fetch_event(struct mly_softc *sc)
601 struct mly_command *mc;
602 struct mly_command_ioctl *mci;
603 u_int32_t event;
605 debug_called(2);
607 /* get a command */
608 mc = NULL;
609 if (mly_alloc_command(sc, &mc))
610 return; /* we'll get retried the next time a command completes */
612 /* set up the data buffer */
613 mc->mc_data = kmalloc(sizeof(struct mly_event), M_DEVBUF, M_INTWAIT|M_ZERO);
614 mc->mc_length = sizeof(struct mly_event);
615 mc->mc_flags |= MLY_CMD_DATAIN;
616 mc->mc_complete = mly_complete_event;
619 * Get an event number to fetch. It's possible that we've raced with another
620 * context for the last event, in which case there will be no more events.
622 crit_enter();
623 if (sc->mly_event_counter == sc->mly_event_waiting) {
624 mly_release_command(mc);
625 crit_exit();
626 return;
628 event = sc->mly_event_counter++;
629 crit_exit();
632 * Build the ioctl.
634 * At this point we are committed to sending this request, as it
635 * will be the only one constructed for this particular event number.
637 mci = (struct mly_command_ioctl *)&mc->mc_packet->ioctl;
638 mci->opcode = MDACMD_IOCTL;
639 mci->data_size = sizeof(struct mly_event);
640 mci->addr.phys.lun = (event >> 16) & 0xff;
641 mci->addr.phys.target = (event >> 24) & 0xff;
642 mci->addr.phys.channel = 0;
643 mci->addr.phys.controller = 0;
644 mci->timeout.value = 30;
645 mci->timeout.scale = MLY_TIMEOUT_SECONDS;
646 mci->sub_ioctl = MDACIOCTL_GETEVENT;
647 mci->param.getevent.sequence_number_low = event & 0xffff;
649 debug(2, "fetch event %u", event);
652 * Use the ready queue to get this command dispatched.
654 mly_enqueue_ready(mc);
655 mly_startio(sc);
658 /********************************************************************************
659 * Handle the completion of an event poll.
661 * Note that we don't actually have to instigate another poll; the completion of
662 * this command will trigger that if there are any more events to poll for.
664 static void
665 mly_complete_event(struct mly_command *mc)
667 struct mly_softc *sc = mc->mc_sc;
668 struct mly_event *me = (struct mly_event *)mc->mc_data;
670 debug_called(2);
673 * If the event was successfully fetched, process it.
675 if (mc->mc_status == SCSI_STATUS_OK) {
676 mly_process_event(sc, me);
677 kfree(me, M_DEVBUF);
679 mly_release_command(mc);
682 /********************************************************************************
683 * Process a controller event.
685 static void
686 mly_process_event(struct mly_softc *sc, struct mly_event *me)
688 struct scsi_sense_data *ssd = (struct scsi_sense_data *)&me->sense[0];
689 char *fp, *tp;
690 int bus, target, event, class, action;
693 * Errors can be reported using vendor-unique sense data. In this case, the
694 * event code will be 0x1c (Request sense data present), the sense key will
695 * be 0x09 (vendor specific), the MSB of the ASC will be set, and the
696 * actual event code will be a 16-bit value comprised of the ASCQ (low byte)
697 * and low seven bits of the ASC (low seven bits of the high byte).
699 if ((me->code == 0x1c) &&
700 ((ssd->flags & SSD_KEY) == SSD_KEY_Vendor_Specific) &&
701 (ssd->add_sense_code & 0x80)) {
702 event = ((int)(ssd->add_sense_code & ~0x80) << 8) + ssd->add_sense_code_qual;
703 } else {
704 event = me->code;
707 /* look up event, get codes */
708 fp = mly_describe_code(mly_table_event, event);
710 debug(2, "Event %d code 0x%x", me->sequence_number, me->code);
712 /* quiet event? */
713 class = fp[0];
714 if (isupper(class) && bootverbose)
715 class = tolower(class);
717 /* get action code, text string */
718 action = fp[1];
719 tp = &fp[2];
722 * Print some information about the event.
724 * This code uses a table derived from the corresponding portion of the Linux
725 * driver, and thus the parser is very similar.
727 switch(class) {
728 case 'p': /* error on physical device */
729 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
730 if (action == 'r')
731 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
732 break;
733 case 'l': /* error on logical unit */
734 case 'm': /* message about logical unit */
735 bus = MLY_LOGDEV_BUS(sc, me->lun);
736 target = MLY_LOGDEV_TARGET(me->lun);
737 mly_name_device(sc, bus, target);
738 mly_printf(sc, "logical device %d (%s) %s\n", me->lun, sc->mly_btl[bus][target].mb_name, tp);
739 if (action == 'r')
740 sc->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
741 break;
742 break;
743 case 's': /* report of sense data */
744 if (((ssd->flags & SSD_KEY) == SSD_KEY_NO_SENSE) ||
745 (((ssd->flags & SSD_KEY) == SSD_KEY_NOT_READY) &&
746 (ssd->add_sense_code == 0x04) &&
747 ((ssd->add_sense_code_qual == 0x01) || (ssd->add_sense_code_qual == 0x02))))
748 break; /* ignore NO_SENSE or NOT_READY in one case */
750 mly_printf(sc, "physical device %d:%d %s\n", me->channel, me->target, tp);
751 mly_printf(sc, " sense key %d asc %02x ascq %02x\n",
752 ssd->flags & SSD_KEY, ssd->add_sense_code, ssd->add_sense_code_qual);
753 mly_printf(sc, " info %4D csi %4D\n", ssd->info, "", ssd->cmd_spec_info, "");
754 if (action == 'r')
755 sc->mly_btl[me->channel][me->target].mb_flags |= MLY_BTL_RESCAN;
756 break;
757 case 'e':
758 mly_printf(sc, tp, me->target, me->lun);
759 break;
760 case 'c':
761 mly_printf(sc, "controller %s\n", tp);
762 break;
763 case '?':
764 mly_printf(sc, "%s - %d\n", tp, me->code);
765 break;
766 default: /* probably a 'noisy' event being ignored */
767 break;
771 /********************************************************************************
772 * Perform periodic activities.
774 static void
775 mly_periodic(void *data)
777 struct mly_softc *sc = (struct mly_softc *)data;
778 int nchn, bus, target;
780 debug_called(2);
783 * Scan devices.
785 nchn = sc->mly_controllerinfo->physical_channels_present +
786 sc->mly_controllerinfo->virtual_channels_present;
787 for (bus = 0; bus < nchn; bus++) {
788 for (target = 0; target < MLY_MAX_TARGETS; target++) {
790 /* ignore the controller in this scan */
791 if (target == sc->mly_controllerparam->initiator_id)
792 continue;
794 /* perform device rescan? */
795 if (sc->mly_btl[bus][target].mb_flags & MLY_BTL_RESCAN)
796 mly_rescan_btl(sc, bus, target);
800 callout_reset(&sc->mly_periodic, hz, mly_periodic, sc);
803 /********************************************************************************
804 ********************************************************************************
805 Command Processing
806 ********************************************************************************
807 ********************************************************************************/
809 /********************************************************************************
810 * Run a command and wait for it to complete.
813 static int
814 mly_immediate_command(struct mly_command *mc)
816 struct mly_softc *sc = mc->mc_sc;
817 int error;
819 debug_called(2);
821 /* spinning at splcam is ugly, but we're only used during controller init */
822 crit_enter();
823 if ((error = mly_start(mc))) {
824 crit_exit();
825 return(error);
828 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON) {
829 /* sleep on the command */
830 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
831 tsleep(mc, 0, "mlywait", 0);
833 } else {
834 /* spin and collect status while we do */
835 while(!(mc->mc_flags & MLY_CMD_COMPLETE)) {
836 mly_done(mc->mc_sc);
839 crit_exit();
840 return(0);
843 /********************************************************************************
844 * Start as much queued I/O as possible on the controller
846 void
847 mly_startio(struct mly_softc *sc)
849 struct mly_command *mc;
851 debug_called(2);
853 for (;;) {
855 /* try for a ready command */
856 mc = mly_dequeue_ready(sc);
858 /* try to build a command from a queued ccb */
859 if (!mc)
860 mly_cam_command(sc, &mc);
862 /* no command == nothing to do */
863 if (!mc)
864 break;
866 /* try to post the command */
867 if (mly_start(mc)) {
868 /* controller busy, or no resources - defer for later */
869 mly_requeue_ready(mc);
870 break;
875 /********************************************************************************
876 * Deliver a command to the controller; allocate controller resources at the
877 * last moment.
879 static int
880 mly_start(struct mly_command *mc)
882 struct mly_softc *sc = mc->mc_sc;
883 union mly_command_packet *pkt;
885 debug_called(2);
888 * Set the command up for delivery to the controller.
890 mly_map_command(mc);
891 mc->mc_packet->generic.command_id = mc->mc_slot;
893 crit_enter();
896 * Do we have to use the hardware mailbox?
898 if (!(sc->mly_state & MLY_STATE_MMBOX_ACTIVE)) {
900 * Check to see if the controller is ready for us.
902 if (MLY_IDBR_TRUE(sc, MLY_HM_CMDSENT)) {
903 crit_exit();
904 return(EBUSY);
906 mc->mc_flags |= MLY_CMD_BUSY;
909 * It's ready, send the command.
911 MLY_SET_MBOX(sc, sc->mly_command_mailbox, &mc->mc_packetphys);
912 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_CMDSENT);
914 } else { /* use memory-mailbox mode */
916 pkt = &sc->mly_mmbox->mmm_command[sc->mly_mmbox_command_index];
918 /* check to see if the next index is free yet */
919 if (pkt->mmbox.flag != 0) {
920 crit_exit();
921 return(EBUSY);
923 mc->mc_flags |= MLY_CMD_BUSY;
925 /* copy in new command */
926 bcopy(mc->mc_packet->mmbox.data, pkt->mmbox.data, sizeof(pkt->mmbox.data));
927 /* barrier to ensure completion of previous write before we write the flag */
928 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle? */
929 /* copy flag last */
930 pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
931 /* barrier to ensure completion of previous write before we notify the controller */
932 bus_space_barrier(NULL, NULL, 0, 0, BUS_SPACE_BARRIER_WRITE); /* tag/handle */
934 /* signal controller, update index */
935 MLY_SET_REG(sc, sc->mly_idbr, MLY_AM_CMDSENT);
936 sc->mly_mmbox_command_index = (sc->mly_mmbox_command_index + 1) % MLY_MMBOX_COMMANDS;
939 mly_enqueue_busy(mc);
940 crit_exit();
941 return(0);
944 /********************************************************************************
945 * Pick up command status from the controller, schedule a completion event
947 void
948 mly_done(struct mly_softc *sc)
950 struct mly_command *mc;
951 union mly_status_packet *sp;
952 u_int16_t slot;
953 int worked;
955 crit_enter();
956 worked = 0;
958 /* pick up hardware-mailbox commands */
959 if (MLY_ODBR_TRUE(sc, MLY_HM_STSREADY)) {
960 slot = MLY_GET_REG2(sc, sc->mly_status_mailbox);
961 if (slot < MLY_SLOT_MAX) {
962 mc = &sc->mly_command[slot - MLY_SLOT_START];
963 mc->mc_status = MLY_GET_REG(sc, sc->mly_status_mailbox + 2);
964 mc->mc_sense = MLY_GET_REG(sc, sc->mly_status_mailbox + 3);
965 mc->mc_resid = MLY_GET_REG4(sc, sc->mly_status_mailbox + 4);
966 mly_remove_busy(mc);
967 mc->mc_flags &= ~MLY_CMD_BUSY;
968 mly_enqueue_complete(mc);
969 worked = 1;
970 } else {
971 /* slot 0xffff may mean "extremely bogus command" */
972 mly_printf(sc, "got HM completion for illegal slot %u\n", slot);
974 /* unconditionally acknowledge status */
975 MLY_SET_REG(sc, sc->mly_odbr, MLY_HM_STSREADY);
976 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
979 /* pick up memory-mailbox commands */
980 if (MLY_ODBR_TRUE(sc, MLY_AM_STSREADY)) {
981 for (;;) {
982 sp = &sc->mly_mmbox->mmm_status[sc->mly_mmbox_status_index];
984 /* check for more status */
985 if (sp->mmbox.flag == 0)
986 break;
988 /* get slot number */
989 slot = sp->status.command_id;
990 if (slot < MLY_SLOT_MAX) {
991 mc = &sc->mly_command[slot - MLY_SLOT_START];
992 mc->mc_status = sp->status.status;
993 mc->mc_sense = sp->status.sense_length;
994 mc->mc_resid = sp->status.residue;
995 mly_remove_busy(mc);
996 mc->mc_flags &= ~MLY_CMD_BUSY;
997 mly_enqueue_complete(mc);
998 worked = 1;
999 } else {
1000 /* slot 0xffff may mean "extremely bogus command" */
1001 mly_printf(sc, "got AM completion for illegal slot %u at %d\n",
1002 slot, sc->mly_mmbox_status_index);
1005 /* clear and move to next index */
1006 sp->mmbox.flag = 0;
1007 sc->mly_mmbox_status_index = (sc->mly_mmbox_status_index + 1) % MLY_MMBOX_STATUS;
1009 /* acknowledge that we have collected status value(s) */
1010 MLY_SET_REG(sc, sc->mly_odbr, MLY_AM_STSREADY);
1013 crit_exit();
1014 if (worked) {
1015 #if defined(__FreeBSD__) && __FreeBSD_version >= 500005
1016 if (sc->mly_state & MLY_STATE_INTERRUPTS_ON)
1017 taskqueue_enqueue(taskqueue_swi, &sc->mly_task_complete);
1018 else
1019 #endif
1020 mly_complete(sc, 0);
1024 /********************************************************************************
1025 * Process completed commands
1027 static void
1028 mly_complete(void *context, int pending)
1030 struct mly_softc *sc = (struct mly_softc *)context;
1031 struct mly_command *mc;
1032 void (* mc_complete)(struct mly_command *mc);
1035 debug_called(2);
1038 * Spin pulling commands off the completed queue and processing them.
1040 while ((mc = mly_dequeue_complete(sc)) != NULL) {
1043 * Free controller resources, mark command complete.
1045 * Note that as soon as we mark the command complete, it may be freed
1046 * out from under us, so we need to save the mc_complete field in
1047 * order to later avoid dereferencing mc. (We would not expect to
1048 * have a polling/sleeping consumer with mc_complete != NULL).
1050 mly_unmap_command(mc);
1051 mc_complete = mc->mc_complete;
1052 mc->mc_flags |= MLY_CMD_COMPLETE;
1055 * Call completion handler or wake up sleeping consumer.
1057 if (mc_complete != NULL) {
1058 mc_complete(mc);
1059 } else {
1060 wakeup(mc);
1065 * We may have freed up controller resources which would allow us
1066 * to push more commands onto the controller, so we check here.
1068 mly_startio(sc);
1071 * The controller may have updated the health status information,
1072 * so check for it here.
1074 * Note that we only check for health status after a completed command. It
1075 * might be wise to ping the controller occasionally if it's been idle for
1076 * a while just to check up on it. While a filesystem is mounted, or I/O is
1077 * active this isn't really an issue.
1079 if (sc->mly_mmbox->mmm_health.status.change_counter != sc->mly_event_change) {
1080 sc->mly_event_change = sc->mly_mmbox->mmm_health.status.change_counter;
1081 debug(1, "event change %d, event status update, %d -> %d", sc->mly_event_change,
1082 sc->mly_event_waiting, sc->mly_mmbox->mmm_health.status.next_event);
1083 sc->mly_event_waiting = sc->mly_mmbox->mmm_health.status.next_event;
1085 /* wake up anyone that might be interested in this */
1086 wakeup(&sc->mly_event_change);
1088 if (sc->mly_event_counter != sc->mly_event_waiting)
1089 mly_fetch_event(sc);
1092 /********************************************************************************
1093 ********************************************************************************
1094 Command Buffer Management
1095 ********************************************************************************
1096 ********************************************************************************/
1098 /********************************************************************************
1099 * Allocate a command.
1102 mly_alloc_command(struct mly_softc *sc, struct mly_command **mcp)
1104 struct mly_command *mc;
1106 debug_called(3);
1108 if ((mc = mly_dequeue_free(sc)) == NULL)
1109 return(ENOMEM);
1111 *mcp = mc;
1112 return(0);
1115 /********************************************************************************
1116 * Release a command back to the freelist.
1118 void
1119 mly_release_command(struct mly_command *mc)
1121 debug_called(3);
1124 * Fill in parts of the command that may cause confusion if
1125 * a consumer doesn't when we are later allocated.
1127 mc->mc_data = NULL;
1128 mc->mc_flags = 0;
1129 mc->mc_complete = NULL;
1130 mc->mc_private = NULL;
1133 * By default, we set up to overwrite the command packet with
1134 * sense information.
1136 mc->mc_packet->generic.sense_buffer_address = mc->mc_packetphys;
1137 mc->mc_packet->generic.maximum_sense_size = sizeof(union mly_command_packet);
1139 mly_enqueue_free(mc);
1142 /********************************************************************************
1143 * Map helper for command allocation.
1145 static void
1146 mly_alloc_commands_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1148 struct mly_softc *sc = (struct mly_softc *)arg
1150 debug_called(2);
1152 sc->mly_packetphys = segs[0].ds_addr;
1155 /********************************************************************************
1156 * Allocate and initialise command and packet structures.
1158 static int
1159 mly_alloc_commands(struct mly_softc *sc)
1161 struct mly_command *mc;
1162 int i;
1165 * Allocate enough space for all the command packets in one chunk and
1166 * map them permanently into controller-visible space.
1168 if (bus_dmamem_alloc(sc->mly_packet_dmat, (void **)&sc->mly_packet,
1169 BUS_DMA_NOWAIT, &sc->mly_packetmap)) {
1170 return(ENOMEM);
1172 bus_dmamap_load(sc->mly_packet_dmat, sc->mly_packetmap, sc->mly_packet,
1173 MLY_MAXCOMMANDS * sizeof(union mly_command_packet),
1174 mly_alloc_commands_map, sc, 0);
1176 for (i = 0; i < MLY_MAXCOMMANDS; i++) {
1177 mc = &sc->mly_command[i];
1178 bzero(mc, sizeof(*mc));
1179 mc->mc_sc = sc;
1180 mc->mc_slot = MLY_SLOT_START + i;
1181 mc->mc_packet = sc->mly_packet + i;
1182 mc->mc_packetphys = sc->mly_packetphys + (i * sizeof(union mly_command_packet));
1183 if (!bus_dmamap_create(sc->mly_buffer_dmat, 0, &mc->mc_datamap))
1184 mly_release_command(mc);
1186 return(0);
1189 /********************************************************************************
1190 * Command-mapping helper function - populate this command's s/g table
1191 * with the s/g entries for its data.
1193 static void
1194 mly_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1196 struct mly_command *mc = (struct mly_command *)arg;
1197 struct mly_softc *sc = mc->mc_sc;
1198 struct mly_command_generic *gen = &(mc->mc_packet->generic);
1199 struct mly_sg_entry *sg;
1200 int i, tabofs;
1202 debug_called(3);
1204 /* can we use the transfer structure directly? */
1205 if (nseg <= 2) {
1206 sg = &gen->transfer.direct.sg[0];
1207 gen->command_control.extended_sg_table = 0;
1208 } else {
1209 tabofs = ((mc->mc_slot - MLY_SLOT_START) * MLY_MAXSGENTRIES);
1210 sg = sc->mly_sg_table + tabofs;
1211 gen->transfer.indirect.entries[0] = nseg;
1212 gen->transfer.indirect.table_physaddr[0] = sc->mly_sg_busaddr + (tabofs * sizeof(struct mly_sg_entry));
1213 gen->command_control.extended_sg_table = 1;
1216 /* copy the s/g table */
1217 for (i = 0; i < nseg; i++) {
1218 sg[i].physaddr = segs[i].ds_addr;
1219 sg[i].length = segs[i].ds_len;
1224 #if 0
1225 /********************************************************************************
1226 * Command-mapping helper function - save the cdb's physical address.
1228 * We don't support 'large' SCSI commands at this time, so this is unused.
1230 static void
1231 mly_map_command_cdb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1233 struct mly_command *mc = (struct mly_command *)arg;
1235 debug_called(3);
1237 /* XXX can we safely assume that a CDB will never cross a page boundary? */
1238 if ((segs[0].ds_addr % PAGE_SIZE) >
1239 ((segs[0].ds_addr + mc->mc_packet->scsi_large.cdb_length) % PAGE_SIZE))
1240 panic("cdb crosses page boundary");
1242 /* fix up fields in the command packet */
1243 mc->mc_packet->scsi_large.cdb_physaddr = segs[0].ds_addr;
1245 #endif
1247 /********************************************************************************
1248 * Map a command into controller-visible space
1250 static void
1251 mly_map_command(struct mly_command *mc)
1253 struct mly_softc *sc = mc->mc_sc;
1255 debug_called(2);
1257 /* don't map more than once */
1258 if (mc->mc_flags & MLY_CMD_MAPPED)
1259 return;
1261 /* does the command have a data buffer? */
1262 if (mc->mc_data != NULL)
1263 bus_dmamap_load(sc->mly_buffer_dmat, mc->mc_datamap, mc->mc_data, mc->mc_length,
1264 mly_map_command_sg, mc, 0);
1266 if (mc->mc_flags & MLY_CMD_DATAIN)
1267 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREREAD);
1268 if (mc->mc_flags & MLY_CMD_DATAOUT)
1269 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_PREWRITE);
1271 mc->mc_flags |= MLY_CMD_MAPPED;
1274 /********************************************************************************
1275 * Unmap a command from controller-visible space
1277 static void
1278 mly_unmap_command(struct mly_command *mc)
1280 struct mly_softc *sc = mc->mc_sc;
1282 debug_called(2);
1284 if (!(mc->mc_flags & MLY_CMD_MAPPED))
1285 return;
1287 if (mc->mc_flags & MLY_CMD_DATAIN)
1288 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTREAD);
1289 if (mc->mc_flags & MLY_CMD_DATAOUT)
1290 bus_dmamap_sync(sc->mly_buffer_dmat, mc->mc_datamap, BUS_DMASYNC_POSTWRITE);
1292 /* does the command have a data buffer? */
1293 if (mc->mc_data != NULL)
1294 bus_dmamap_unload(sc->mly_buffer_dmat, mc->mc_datamap);
1296 mc->mc_flags &= ~MLY_CMD_MAPPED;
1299 /********************************************************************************
1300 ********************************************************************************
1301 Hardware Control
1302 ********************************************************************************
1303 ********************************************************************************/
1305 /********************************************************************************
1306 * Handshake with the firmware while the card is being initialised.
1308 static int
1309 mly_fwhandshake(struct mly_softc *sc)
1311 u_int8_t error, param0, param1;
1312 int spinup = 0;
1314 debug_called(1);
1316 /* set HM_STSACK and let the firmware initialise */
1317 MLY_SET_REG(sc, sc->mly_idbr, MLY_HM_STSACK);
1318 DELAY(1000); /* too short? */
1320 /* if HM_STSACK is still true, the controller is initialising */
1321 if (!MLY_IDBR_TRUE(sc, MLY_HM_STSACK))
1322 return(0);
1323 mly_printf(sc, "controller initialisation started\n");
1325 /* spin waiting for initialisation to finish, or for a message to be delivered */
1326 while (MLY_IDBR_TRUE(sc, MLY_HM_STSACK)) {
1327 /* check for a message */
1328 if (MLY_ERROR_VALID(sc)) {
1329 error = MLY_GET_REG(sc, sc->mly_error_status) & ~MLY_MSG_EMPTY;
1330 param0 = MLY_GET_REG(sc, sc->mly_command_mailbox);
1331 param1 = MLY_GET_REG(sc, sc->mly_command_mailbox + 1);
1333 switch(error) {
1334 case MLY_MSG_SPINUP:
1335 if (!spinup) {
1336 mly_printf(sc, "drive spinup in progress\n");
1337 spinup = 1; /* only print this once (should print drive being spun?) */
1339 break;
1340 case MLY_MSG_RACE_RECOVERY_FAIL:
1341 mly_printf(sc, "mirror race recovery failed, one or more drives offline\n");
1342 break;
1343 case MLY_MSG_RACE_IN_PROGRESS:
1344 mly_printf(sc, "mirror race recovery in progress\n");
1345 break;
1346 case MLY_MSG_RACE_ON_CRITICAL:
1347 mly_printf(sc, "mirror race recovery on a critical drive\n");
1348 break;
1349 case MLY_MSG_PARITY_ERROR:
1350 mly_printf(sc, "FATAL MEMORY PARITY ERROR\n");
1351 return(ENXIO);
1352 default:
1353 mly_printf(sc, "unknown initialisation code 0x%x\n", error);
1357 return(0);
1360 /********************************************************************************
1361 ********************************************************************************
1362 Debugging and Diagnostics
1363 ********************************************************************************
1364 ********************************************************************************/
1366 /********************************************************************************
1367 * Print some information about the controller.
1369 static void
1370 mly_describe_controller(struct mly_softc *sc)
1372 struct mly_ioctl_getcontrollerinfo *mi = sc->mly_controllerinfo;
1374 mly_printf(sc, "%16s, %d channel%s, firmware %d.%02d-%d-%02d (%02d%02d%02d%02d), %dMB RAM\n",
1375 mi->controller_name, mi->physical_channels_present, (mi->physical_channels_present) > 1 ? "s" : "",
1376 mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build, /* XXX turn encoding? */
1377 mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
1378 mi->memory_size);
1380 if (bootverbose) {
1381 mly_printf(sc, "%s %s (%x), %dMHz %d-bit %.16s\n",
1382 mly_describe_code(mly_table_oemname, mi->oem_information),
1383 mly_describe_code(mly_table_controllertype, mi->controller_type), mi->controller_type,
1384 mi->interface_speed, mi->interface_width, mi->interface_name);
1385 mly_printf(sc, "%dMB %dMHz %d-bit %s%s%s, cache %dMB\n",
1386 mi->memory_size, mi->memory_speed, mi->memory_width,
1387 mly_describe_code(mly_table_memorytype, mi->memory_type),
1388 mi->memory_parity ? "+parity": "",mi->memory_ecc ? "+ECC": "",
1389 mi->cache_size);
1390 mly_printf(sc, "CPU: %s @ %dMHZ\n",
1391 mly_describe_code(mly_table_cputype, mi->cpu[0].type), mi->cpu[0].speed);
1392 if (mi->l2cache_size != 0)
1393 mly_printf(sc, "%dKB L2 cache\n", mi->l2cache_size);
1394 if (mi->exmemory_size != 0)
1395 mly_printf(sc, "%dMB %dMHz %d-bit private %s%s%s\n",
1396 mi->exmemory_size, mi->exmemory_speed, mi->exmemory_width,
1397 mly_describe_code(mly_table_memorytype, mi->exmemory_type),
1398 mi->exmemory_parity ? "+parity": "",mi->exmemory_ecc ? "+ECC": "");
1399 mly_printf(sc, "battery backup %s\n", mi->bbu_present ? "present" : "not installed");
1400 mly_printf(sc, "maximum data transfer %d blocks, maximum sg entries/command %d\n",
1401 mi->maximum_block_count, mi->maximum_sg_entries);
1402 mly_printf(sc, "logical devices present/critical/offline %d/%d/%d\n",
1403 mi->logical_devices_present, mi->logical_devices_critical, mi->logical_devices_offline);
1404 mly_printf(sc, "physical devices present %d\n",
1405 mi->physical_devices_present);
1406 mly_printf(sc, "physical disks present/offline %d/%d\n",
1407 mi->physical_disks_present, mi->physical_disks_offline);
1408 mly_printf(sc, "%d physical channel%s, %d virtual channel%s of %d possible\n",
1409 mi->physical_channels_present, mi->physical_channels_present == 1 ? "" : "s",
1410 mi->virtual_channels_present, mi->virtual_channels_present == 1 ? "" : "s",
1411 mi->virtual_channels_possible);
1412 mly_printf(sc, "%d parallel commands supported\n", mi->maximum_parallel_commands);
1413 mly_printf(sc, "%dMB flash ROM, %d of %d maximum cycles\n",
1414 mi->flash_size, mi->flash_age, mi->flash_maximum_age);
1418 #ifdef MLY_DEBUG
1419 /********************************************************************************
1420 * Print some controller state
1422 static void
1423 mly_printstate(struct mly_softc *sc)
1425 mly_printf(sc, "IDBR %02x ODBR %02x ERROR %02x (%x %x %x)\n",
1426 MLY_GET_REG(sc, sc->mly_idbr),
1427 MLY_GET_REG(sc, sc->mly_odbr),
1428 MLY_GET_REG(sc, sc->mly_error_status),
1429 sc->mly_idbr,
1430 sc->mly_odbr,
1431 sc->mly_error_status);
1432 mly_printf(sc, "IMASK %02x ISTATUS %02x\n",
1433 MLY_GET_REG(sc, sc->mly_interrupt_mask),
1434 MLY_GET_REG(sc, sc->mly_interrupt_status));
1435 mly_printf(sc, "COMMAND %02x %02x %02x %02x %02x %02x %02x %02x\n",
1436 MLY_GET_REG(sc, sc->mly_command_mailbox),
1437 MLY_GET_REG(sc, sc->mly_command_mailbox + 1),
1438 MLY_GET_REG(sc, sc->mly_command_mailbox + 2),
1439 MLY_GET_REG(sc, sc->mly_command_mailbox + 3),
1440 MLY_GET_REG(sc, sc->mly_command_mailbox + 4),
1441 MLY_GET_REG(sc, sc->mly_command_mailbox + 5),
1442 MLY_GET_REG(sc, sc->mly_command_mailbox + 6),
1443 MLY_GET_REG(sc, sc->mly_command_mailbox + 7));
1444 mly_printf(sc, "STATUS %02x %02x %02x %02x %02x %02x %02x %02x\n",
1445 MLY_GET_REG(sc, sc->mly_status_mailbox),
1446 MLY_GET_REG(sc, sc->mly_status_mailbox + 1),
1447 MLY_GET_REG(sc, sc->mly_status_mailbox + 2),
1448 MLY_GET_REG(sc, sc->mly_status_mailbox + 3),
1449 MLY_GET_REG(sc, sc->mly_status_mailbox + 4),
1450 MLY_GET_REG(sc, sc->mly_status_mailbox + 5),
1451 MLY_GET_REG(sc, sc->mly_status_mailbox + 6),
1452 MLY_GET_REG(sc, sc->mly_status_mailbox + 7));
1453 mly_printf(sc, " %04x %08x\n",
1454 MLY_GET_REG2(sc, sc->mly_status_mailbox),
1455 MLY_GET_REG4(sc, sc->mly_status_mailbox + 4));
1458 struct mly_softc *mly_softc0 = NULL;
1459 void
1460 mly_printstate0(void)
1462 if (mly_softc0 != NULL)
1463 mly_printstate(mly_softc0);
1466 /********************************************************************************
1467 * Print a command
1469 static void
1470 mly_print_command(struct mly_command *mc)
1472 struct mly_softc *sc = mc->mc_sc;
1474 mly_printf(sc, "COMMAND @ %p\n", mc);
1475 mly_printf(sc, " slot %d\n", mc->mc_slot);
1476 mly_printf(sc, " status 0x%x\n", mc->mc_status);
1477 mly_printf(sc, " sense len %d\n", mc->mc_sense);
1478 mly_printf(sc, " resid %d\n", mc->mc_resid);
1479 mly_printf(sc, " packet %p/0x%llx\n", mc->mc_packet, mc->mc_packetphys);
1480 if (mc->mc_packet != NULL)
1481 mly_print_packet(mc);
1482 mly_printf(sc, " data %p/%d\n", mc->mc_data, mc->mc_length);
1483 mly_printf(sc, " flags %b\n", mc->mc_flags, "\20\1busy\2complete\3slotted\4mapped\5datain\6dataout\n");
1484 mly_printf(sc, " complete %p\n", mc->mc_complete);
1485 mly_printf(sc, " private %p\n", mc->mc_private);
1488 /********************************************************************************
1489 * Print a command packet
1491 static void
1492 mly_print_packet(struct mly_command *mc)
1494 struct mly_softc *sc = mc->mc_sc;
1495 struct mly_command_generic *ge = (struct mly_command_generic *)mc->mc_packet;
1496 struct mly_command_scsi_small *ss = (struct mly_command_scsi_small *)mc->mc_packet;
1497 struct mly_command_scsi_large *sl = (struct mly_command_scsi_large *)mc->mc_packet;
1498 struct mly_command_ioctl *io = (struct mly_command_ioctl *)mc->mc_packet;
1499 int transfer;
1501 mly_printf(sc, " command_id %d\n", ge->command_id);
1502 mly_printf(sc, " opcode %d\n", ge->opcode);
1503 mly_printf(sc, " command_control fua %d dpo %d est %d dd %s nas %d ddis %d\n",
1504 ge->command_control.force_unit_access,
1505 ge->command_control.disable_page_out,
1506 ge->command_control.extended_sg_table,
1507 (ge->command_control.data_direction == MLY_CCB_WRITE) ? "WRITE" : "READ",
1508 ge->command_control.no_auto_sense,
1509 ge->command_control.disable_disconnect);
1510 mly_printf(sc, " data_size %d\n", ge->data_size);
1511 mly_printf(sc, " sense_buffer_address 0x%llx\n", ge->sense_buffer_address);
1512 mly_printf(sc, " lun %d\n", ge->addr.phys.lun);
1513 mly_printf(sc, " target %d\n", ge->addr.phys.target);
1514 mly_printf(sc, " channel %d\n", ge->addr.phys.channel);
1515 mly_printf(sc, " logical device %d\n", ge->addr.log.logdev);
1516 mly_printf(sc, " controller %d\n", ge->addr.phys.controller);
1517 mly_printf(sc, " timeout %d %s\n",
1518 ge->timeout.value,
1519 (ge->timeout.scale == MLY_TIMEOUT_SECONDS) ? "seconds" :
1520 ((ge->timeout.scale == MLY_TIMEOUT_MINUTES) ? "minutes" : "hours"));
1521 mly_printf(sc, " maximum_sense_size %d\n", ge->maximum_sense_size);
1522 switch(ge->opcode) {
1523 case MDACMD_SCSIPT:
1524 case MDACMD_SCSI:
1525 mly_printf(sc, " cdb length %d\n", ss->cdb_length);
1526 mly_printf(sc, " cdb %*D\n", ss->cdb_length, ss->cdb, " ");
1527 transfer = 1;
1528 break;
1529 case MDACMD_SCSILC:
1530 case MDACMD_SCSILCPT:
1531 mly_printf(sc, " cdb length %d\n", sl->cdb_length);
1532 mly_printf(sc, " cdb 0x%llx\n", sl->cdb_physaddr);
1533 transfer = 1;
1534 break;
1535 case MDACMD_IOCTL:
1536 mly_printf(sc, " sub_ioctl 0x%x\n", io->sub_ioctl);
1537 switch(io->sub_ioctl) {
1538 case MDACIOCTL_SETMEMORYMAILBOX:
1539 mly_printf(sc, " health_buffer_size %d\n",
1540 io->param.setmemorymailbox.health_buffer_size);
1541 mly_printf(sc, " health_buffer_phys 0x%llx\n",
1542 io->param.setmemorymailbox.health_buffer_physaddr);
1543 mly_printf(sc, " command_mailbox 0x%llx\n",
1544 io->param.setmemorymailbox.command_mailbox_physaddr);
1545 mly_printf(sc, " status_mailbox 0x%llx\n",
1546 io->param.setmemorymailbox.status_mailbox_physaddr);
1547 transfer = 0;
1548 break;
1550 case MDACIOCTL_SETREALTIMECLOCK:
1551 case MDACIOCTL_GETHEALTHSTATUS:
1552 case MDACIOCTL_GETCONTROLLERINFO:
1553 case MDACIOCTL_GETLOGDEVINFOVALID:
1554 case MDACIOCTL_GETPHYSDEVINFOVALID:
1555 case MDACIOCTL_GETPHYSDEVSTATISTICS:
1556 case MDACIOCTL_GETLOGDEVSTATISTICS:
1557 case MDACIOCTL_GETCONTROLLERSTATISTICS:
1558 case MDACIOCTL_GETBDT_FOR_SYSDRIVE:
1559 case MDACIOCTL_CREATENEWCONF:
1560 case MDACIOCTL_ADDNEWCONF:
1561 case MDACIOCTL_GETDEVCONFINFO:
1562 case MDACIOCTL_GETFREESPACELIST:
1563 case MDACIOCTL_MORE:
1564 case MDACIOCTL_SETPHYSDEVPARAMETER:
1565 case MDACIOCTL_GETPHYSDEVPARAMETER:
1566 case MDACIOCTL_GETLOGDEVPARAMETER:
1567 case MDACIOCTL_SETLOGDEVPARAMETER:
1568 mly_printf(sc, " param %10D\n", io->param.data.param, " ");
1569 transfer = 1;
1570 break;
1572 case MDACIOCTL_GETEVENT:
1573 mly_printf(sc, " event %d\n",
1574 io->param.getevent.sequence_number_low + ((u_int32_t)io->addr.log.logdev << 16));
1575 transfer = 1;
1576 break;
1578 case MDACIOCTL_SETRAIDDEVSTATE:
1579 mly_printf(sc, " state %d\n", io->param.setraiddevstate.state);
1580 transfer = 0;
1581 break;
1583 case MDACIOCTL_XLATEPHYSDEVTORAIDDEV:
1584 mly_printf(sc, " raid_device %d\n", io->param.xlatephysdevtoraiddev.raid_device);
1585 mly_printf(sc, " controller %d\n", io->param.xlatephysdevtoraiddev.controller);
1586 mly_printf(sc, " channel %d\n", io->param.xlatephysdevtoraiddev.channel);
1587 mly_printf(sc, " target %d\n", io->param.xlatephysdevtoraiddev.target);
1588 mly_printf(sc, " lun %d\n", io->param.xlatephysdevtoraiddev.lun);
1589 transfer = 0;
1590 break;
1592 case MDACIOCTL_GETGROUPCONFINFO:
1593 mly_printf(sc, " group %d\n", io->param.getgroupconfinfo.group);
1594 transfer = 1;
1595 break;
1597 case MDACIOCTL_GET_SUBSYSTEM_DATA:
1598 case MDACIOCTL_SET_SUBSYSTEM_DATA:
1599 case MDACIOCTL_STARTDISOCVERY:
1600 case MDACIOCTL_INITPHYSDEVSTART:
1601 case MDACIOCTL_INITPHYSDEVSTOP:
1602 case MDACIOCTL_INITRAIDDEVSTART:
1603 case MDACIOCTL_INITRAIDDEVSTOP:
1604 case MDACIOCTL_REBUILDRAIDDEVSTART:
1605 case MDACIOCTL_REBUILDRAIDDEVSTOP:
1606 case MDACIOCTL_MAKECONSISTENTDATASTART:
1607 case MDACIOCTL_MAKECONSISTENTDATASTOP:
1608 case MDACIOCTL_CONSISTENCYCHECKSTART:
1609 case MDACIOCTL_CONSISTENCYCHECKSTOP:
1610 case MDACIOCTL_RESETDEVICE:
1611 case MDACIOCTL_FLUSHDEVICEDATA:
1612 case MDACIOCTL_PAUSEDEVICE:
1613 case MDACIOCTL_UNPAUSEDEVICE:
1614 case MDACIOCTL_LOCATEDEVICE:
1615 case MDACIOCTL_SETMASTERSLAVEMODE:
1616 case MDACIOCTL_DELETERAIDDEV:
1617 case MDACIOCTL_REPLACEINTERNALDEV:
1618 case MDACIOCTL_CLEARCONF:
1619 case MDACIOCTL_GETCONTROLLERPARAMETER:
1620 case MDACIOCTL_SETCONTRLLERPARAMETER:
1621 case MDACIOCTL_CLEARCONFSUSPMODE:
1622 case MDACIOCTL_STOREIMAGE:
1623 case MDACIOCTL_READIMAGE:
1624 case MDACIOCTL_FLASHIMAGES:
1625 case MDACIOCTL_RENAMERAIDDEV:
1626 default: /* no idea what to print */
1627 transfer = 0;
1628 break;
1630 break;
1632 case MDACMD_IOCTLCHECK:
1633 case MDACMD_MEMCOPY:
1634 default:
1635 transfer = 0;
1636 break; /* print nothing */
1638 if (transfer) {
1639 if (ge->command_control.extended_sg_table) {
1640 mly_printf(sc, " sg table 0x%llx/%d\n",
1641 ge->transfer.indirect.table_physaddr[0], ge->transfer.indirect.entries[0]);
1642 } else {
1643 mly_printf(sc, " 0000 0x%llx/%lld\n",
1644 ge->transfer.direct.sg[0].physaddr, ge->transfer.direct.sg[0].length);
1645 mly_printf(sc, " 0001 0x%llx/%lld\n",
1646 ge->transfer.direct.sg[1].physaddr, ge->transfer.direct.sg[1].length);
1651 /********************************************************************************
1652 * Panic in a slightly informative fashion
1654 static void
1655 mly_panic(struct mly_softc *sc, char *reason)
1657 mly_printstate(sc);
1658 panic(reason);
1660 #endif
1662 /********************************************************************************
1663 * Print queue statistics, callable from DDB.
1665 void
1666 mly_print_controller(int controller)
1668 struct mly_softc *sc;
1670 if ((sc = devclass_get_softc(devclass_find("mly"), controller)) == NULL) {
1671 kprintf("mly: controller %d invalid\n", controller);
1672 } else {
1673 device_printf(sc->mly_dev, "queue curr max\n");
1674 device_printf(sc->mly_dev, "free %04d/%04d\n",
1675 sc->mly_qstat[MLYQ_FREE].q_length, sc->mly_qstat[MLYQ_FREE].q_max);
1676 device_printf(sc->mly_dev, "ready %04d/%04d\n",
1677 sc->mly_qstat[MLYQ_READY].q_length, sc->mly_qstat[MLYQ_READY].q_max);
1678 device_printf(sc->mly_dev, "busy %04d/%04d\n",
1679 sc->mly_qstat[MLYQ_BUSY].q_length, sc->mly_qstat[MLYQ_BUSY].q_max);
1680 device_printf(sc->mly_dev, "complete %04d/%04d\n",
1681 sc->mly_qstat[MLYQ_COMPLETE].q_length, sc->mly_qstat[MLYQ_COMPLETE].q_max);
1686 /********************************************************************************
1687 ********************************************************************************
1688 Control device interface
1689 ********************************************************************************
1690 ********************************************************************************/
1692 /********************************************************************************
1693 * Accept an open operation on the control device.
1695 static int
1696 mly_user_open(struct dev_open_args *ap)
1698 cdev_t dev = ap->a_head.a_dev;
1699 int unit = minor(dev);
1700 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit);
1702 sc->mly_state |= MLY_STATE_OPEN;
1703 return(0);
1706 /********************************************************************************
1707 * Accept the last close on the control device.
1709 static int
1710 mly_user_close(struct dev_close_args *ap)
1712 cdev_t dev = ap->a_head.a_dev;
1713 int unit = minor(dev);
1714 struct mly_softc *sc = devclass_get_softc(devclass_find("mly"), unit);
1716 sc->mly_state &= ~MLY_STATE_OPEN;
1717 return (0);
1720 /********************************************************************************
1721 * Handle controller-specific control operations.
1723 static int
1724 mly_user_ioctl(struct dev_ioctl_args *ap)
1726 cdev_t dev = ap->a_head.a_dev;
1727 struct mly_softc *sc = (struct mly_softc *)dev->si_drv1;
1728 struct mly_user_command *uc = (struct mly_user_command *)ap->a_data;
1729 struct mly_user_health *uh = (struct mly_user_health *)ap->a_data;
1731 switch(ap->a_cmd) {
1732 case MLYIO_COMMAND:
1733 return(mly_user_command(sc, uc));
1734 case MLYIO_HEALTH:
1735 return(mly_user_health(sc, uh));
1736 default:
1737 return(ENOIOCTL);
1741 /********************************************************************************
1742 * Execute a command passed in from userspace.
1744 * The control structure contains the actual command for the controller, as well
1745 * as the user-space data pointer and data size, and an optional sense buffer
1746 * size/pointer. On completion, the data size is adjusted to the command
1747 * residual, and the sense buffer size to the size of the returned sense data.
1750 static int
1751 mly_user_command(struct mly_softc *sc, struct mly_user_command *uc)
1753 struct mly_command *mc;
1754 int error;
1756 /* allocate a command */
1757 if (mly_alloc_command(sc, &mc)) {
1758 error = ENOMEM;
1759 goto out; /* XXX Linux version will wait for a command */
1762 /* handle data size/direction */
1763 mc->mc_length = (uc->DataTransferLength >= 0) ? uc->DataTransferLength : -uc->DataTransferLength;
1764 if (mc->mc_length > 0)
1765 mc->mc_data = kmalloc(mc->mc_length, M_DEVBUF, M_INTWAIT);
1766 if (uc->DataTransferLength > 0) {
1767 mc->mc_flags |= MLY_CMD_DATAIN;
1768 bzero(mc->mc_data, mc->mc_length);
1770 if (uc->DataTransferLength < 0) {
1771 mc->mc_flags |= MLY_CMD_DATAOUT;
1772 if ((error = copyin(uc->DataTransferBuffer, mc->mc_data, mc->mc_length)) != 0)
1773 goto out;
1776 /* copy the controller command */
1777 bcopy(&uc->CommandMailbox, mc->mc_packet, sizeof(uc->CommandMailbox));
1779 /* clear command completion handler so that we get woken up */
1780 mc->mc_complete = NULL;
1782 /* execute the command */
1783 crit_enter();
1784 mly_requeue_ready(mc);
1785 mly_startio(sc);
1786 while (!(mc->mc_flags & MLY_CMD_COMPLETE))
1787 tsleep(mc, 0, "mlyioctl", 0);
1788 crit_exit();
1790 /* return the data to userspace */
1791 if (uc->DataTransferLength > 0)
1792 if ((error = copyout(mc->mc_data, uc->DataTransferBuffer, mc->mc_length)) != 0)
1793 goto out;
1795 /* return the sense buffer to userspace */
1796 if ((uc->RequestSenseLength > 0) && (mc->mc_sense > 0)) {
1797 if ((error = copyout(mc->mc_packet, uc->RequestSenseBuffer,
1798 min(uc->RequestSenseLength, mc->mc_sense))) != 0)
1799 goto out;
1802 /* return command results to userspace (caller will copy out) */
1803 uc->DataTransferLength = mc->mc_resid;
1804 uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
1805 uc->CommandStatus = mc->mc_status;
1806 error = 0;
1808 out:
1809 if (mc->mc_data != NULL)
1810 kfree(mc->mc_data, M_DEVBUF);
1811 if (mc != NULL)
1812 mly_release_command(mc);
1813 return(error);
1816 /********************************************************************************
1817 * Return health status to userspace. If the health change index in the user
1818 * structure does not match that currently exported by the controller, we
1819 * return the current status immediately. Otherwise, we block until either
1820 * interrupted or new status is delivered.
1822 static int
1823 mly_user_health(struct mly_softc *sc, struct mly_user_health *uh)
1825 struct mly_health_status mh;
1826 int error;
1828 /* fetch the current health status from userspace */
1829 if ((error = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh))) != 0)
1830 return(error);
1832 /* spin waiting for a status update */
1833 crit_enter();
1834 error = EWOULDBLOCK;
1835 while ((error != 0) && (sc->mly_event_change == mh.change_counter))
1836 error = tsleep(&sc->mly_event_change, PCATCH, "mlyhealth", 0);
1837 crit_exit();
1839 /* copy the controller's health status buffer out (there is a race here if it changes again) */
1840 error = copyout(&sc->mly_mmbox->mmm_health.status, uh->HealthStatusBuffer,
1841 sizeof(uh->HealthStatusBuffer));
1842 return(error);