2 * Bus independent FreeBSD shim for the aic7xxx based Adaptec SCSI controllers
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU Public License ("GPL").
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#20 $
33 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.45 2006/09/05 20:28:28 mjacob Exp $
34 * $DragonFly: src/sys/dev/disk/aic7xxx/aic7xxx_osm.c,v 1.23 2008/05/18 20:30:21 pavalos Exp $
37 #include "aic7xxx_osm.h"
38 #include "aic7xxx_inline.h"
40 #include <sys/kthread.h>
42 #ifndef AHC_TMODE_ENABLE
43 #define AHC_TMODE_ENABLE 0
46 #include "aic_osm_lib.c"
48 #define ccb_scb_ptr spriv_ptr0
50 devclass_t ahc_devclass
;
53 static void ahc_dump_targcmd(struct target_cmd
*cmd
);
55 static int ahc_modevent(module_t mod
, int type
, void *data
);
56 static void ahc_action(struct cam_sim
*sim
, union ccb
*ccb
);
57 static void ahc_get_tran_settings(struct ahc_softc
*ahc
,
58 int our_id
, char channel
,
59 struct ccb_trans_settings
*cts
);
60 static void ahc_async(void *callback_arg
, uint32_t code
,
61 struct cam_path
*path
, void *arg
);
62 static void ahc_execute_scb(void *arg
, bus_dma_segment_t
*dm_segs
,
63 int nsegments
, int error
);
64 static void ahc_poll(struct cam_sim
*sim
);
65 static void ahc_setup_data(struct ahc_softc
*ahc
, struct cam_sim
*sim
,
66 struct ccb_scsiio
*csio
, struct scb
*scb
);
67 static void ahc_abort_ccb(struct ahc_softc
*ahc
, struct cam_sim
*sim
,
69 static int ahc_create_path(struct ahc_softc
*ahc
,
70 char channel
, u_int target
, u_int lun
,
71 struct cam_path
**path
);
75 ahc_create_path(struct ahc_softc
*ahc
, char channel
, u_int target
,
76 u_int lun
, struct cam_path
**path
)
81 path_id
= cam_sim_path(ahc
->platform_data
->sim_b
);
83 path_id
= cam_sim_path(ahc
->platform_data
->sim
);
85 return (xpt_create_path(path
, /*periph*/NULL
,
86 path_id
, target
, lun
));
90 ahc_map_int(struct ahc_softc
*ahc
)
97 shareable
= (ahc
->flags
& AHC_EDGE_INTERRUPT
) ? 0: RF_SHAREABLE
;
98 ahc
->platform_data
->irq
=
99 bus_alloc_resource_any(ahc
->dev_softc
, SYS_RES_IRQ
, &zero
,
100 RF_ACTIVE
| shareable
);
101 if (ahc
->platform_data
->irq
== NULL
) {
102 device_printf(ahc
->dev_softc
,
103 "bus_alloc_resource() failed to allocate IRQ\n");
106 ahc
->platform_data
->irq_res_type
= SYS_RES_IRQ
;
108 /* Hook up our interrupt handler */
109 error
= bus_setup_intr(ahc
->dev_softc
, ahc
->platform_data
->irq
,
110 INTR_MPSAFE
, ahc_platform_intr
, ahc
,
111 &ahc
->platform_data
->ih
, NULL
);
114 device_printf(ahc
->dev_softc
, "bus_setup_intr() failed: %d\n",
120 aic7770_map_registers(struct ahc_softc
*ahc
, u_int unused_ioport_arg
)
122 struct resource
*regs
;
126 regs
= bus_alloc_resource_any(ahc
->dev_softc
, SYS_RES_IOPORT
, &rid
,
129 device_printf(ahc
->dev_softc
, "Unable to map I/O space?!\n");
132 ahc
->platform_data
->regs_res_type
= SYS_RES_IOPORT
;
133 ahc
->platform_data
->regs_res_id
= rid
,
134 ahc
->platform_data
->regs
= regs
;
135 ahc
->tag
= rman_get_bustag(regs
);
136 ahc
->bsh
= rman_get_bushandle(regs
);
141 * Attach all the sub-devices we can find
144 ahc_attach(struct ahc_softc
*ahc
)
147 struct ccb_setasync csa
;
151 struct cam_sim
*sim2
;
152 struct cam_path
*path
;
153 struct cam_path
*path2
;
163 * Create a thread to perform all recovery.
165 if (ahc_spawn_recovery_thread(ahc
) != 0)
168 ahc_controller_info(ahc
, ahc_info
);
169 kprintf("%s\n", ahc_info
);
173 * Attach secondary channel first if the user has
174 * declared it the primary channel.
176 if ((ahc
->features
& AHC_TWIN
) != 0
177 && (ahc
->flags
& AHC_PRIMARY_CHANNEL
) != 0) {
186 * Construct our first channel SIM entry
188 sim
= cam_sim_alloc(ahc_action
, ahc_poll
, "ahc", ahc
,
189 device_get_unit(ahc
->dev_softc
),
190 &ahc
->platform_data
->lock
, 1, AHC_MAX_QUEUE
, NULL
);
194 if (xpt_bus_register(sim
, bus_id
) != CAM_SUCCESS
) {
200 if (xpt_create_path(&path
, /*periph*/NULL
,
201 cam_sim_path(sim
), CAM_TARGET_WILDCARD
,
202 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
203 xpt_bus_deregister(cam_sim_path(sim
));
209 xpt_setup_ccb(&csa
.ccb_h
, path
, /*priority*/5);
210 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
211 csa
.event_enable
= AC_LOST_DEVICE
;
212 csa
.callback
= ahc_async
;
213 csa
.callback_arg
= sim
;
214 xpt_action((union ccb
*)&csa
);
217 if (ahc
->features
& AHC_TWIN
) {
218 sim2
= cam_sim_alloc(ahc_action
, ahc_poll
, "ahc",
219 ahc
, device_get_unit(ahc
->dev_softc
),
220 &ahc
->platform_data
->lock
, 1,
221 AHC_MAX_QUEUE
, NULL
);
224 kprintf("ahc_attach: Unable to attach second "
225 "bus due to resource shortage");
229 if (xpt_bus_register(sim2
, bus_id2
) != CAM_SUCCESS
) {
230 kprintf("ahc_attach: Unable to attach second "
231 "bus due to resource shortage");
233 * We do not want to destroy the device queue
234 * because the first bus is using it.
240 if (xpt_create_path(&path2
, /*periph*/NULL
,
243 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
244 xpt_bus_deregister(cam_sim_path(sim2
));
249 xpt_setup_ccb(&csa
.ccb_h
, path2
, /*priority*/5);
250 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
251 csa
.event_enable
= AC_LOST_DEVICE
;
252 csa
.callback
= ahc_async
;
253 csa
.callback_arg
= sim2
;
254 xpt_action((union ccb
*)&csa
);
259 if ((ahc
->features
& AHC_TWIN
) != 0
260 && (ahc
->flags
& AHC_PRIMARY_CHANNEL
) != 0) {
261 ahc
->platform_data
->sim_b
= sim
;
262 ahc
->platform_data
->path_b
= path
;
263 ahc
->platform_data
->sim
= sim2
;
264 ahc
->platform_data
->path
= path2
;
266 ahc
->platform_data
->sim
= sim
;
267 ahc
->platform_data
->path
= path
;
268 ahc
->platform_data
->sim_b
= sim2
;
269 ahc
->platform_data
->path_b
= path2
;
274 /* We have to wait until after any system dumps... */
275 ahc
->platform_data
->eh
=
276 EVENTHANDLER_REGISTER(shutdown_post_sync
, ahc_shutdown
,
277 ahc
, SHUTDOWN_PRI_DRIVER
);
278 ahc_intr_enable(ahc
, TRUE
);
285 * Catch an interrupt from the adapter
288 ahc_platform_intr(void *arg
)
290 struct ahc_softc
*ahc
;
292 ahc
= (struct ahc_softc
*)arg
;
299 * We have an scb which has been processed by the
300 * adaptor, now we look to see how the operation
304 ahc_done(struct ahc_softc
*ahc
, struct scb
*scb
)
308 CAM_DEBUG(scb
->io_ctx
->ccb_h
.path
, CAM_DEBUG_TRACE
,
309 ("ahc_done - scb %d\n", scb
->hscb
->tag
));
312 LIST_REMOVE(scb
, pending_links
);
313 if ((scb
->flags
& SCB_TIMEDOUT
) != 0)
314 LIST_REMOVE(scb
, timedout_links
);
315 if ((scb
->flags
& SCB_UNTAGGEDQ
) != 0) {
316 struct scb_tailq
*untagged_q
;
319 target_offset
= SCB_GET_TARGET_OFFSET(ahc
, scb
);
320 untagged_q
= &ahc
->untagged_queues
[target_offset
];
321 TAILQ_REMOVE(untagged_q
, scb
, links
.tqe
);
322 scb
->flags
&= ~SCB_UNTAGGEDQ
;
323 ahc_run_untagged_queue(ahc
, untagged_q
);
326 callout_stop(&scb
->io_timer
);
328 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
331 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
332 op
= BUS_DMASYNC_POSTREAD
;
334 op
= BUS_DMASYNC_POSTWRITE
;
335 bus_dmamap_sync(ahc
->buffer_dmat
, scb
->dmamap
, op
);
336 bus_dmamap_unload(ahc
->buffer_dmat
, scb
->dmamap
);
339 if (ccb
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
) {
340 struct cam_path
*ccb_path
;
343 * If we have finally disconnected, clean up our
344 * pending device state.
345 * XXX - There may be error states that cause where
346 * we will remain connected.
348 ccb_path
= ccb
->ccb_h
.path
;
349 if (ahc
->pending_device
!= NULL
350 && xpt_path_comp(ahc
->pending_device
->path
, ccb_path
) == 0) {
352 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) != 0) {
353 ahc
->pending_device
= NULL
;
356 xpt_print_path(ccb
->ccb_h
.path
);
357 kprintf("Still connected\n");
363 if (aic_get_transaction_status(scb
) == CAM_REQ_INPROG
)
364 ccb
->ccb_h
.status
|= CAM_REQ_CMP
;
365 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
366 ahc_free_scb(ahc
, scb
);
372 * If the recovery SCB completes, we have to be
373 * out of our timeout.
375 if ((scb
->flags
& SCB_RECOVERY_SCB
) != 0) {
376 struct scb
*list_scb
;
378 ahc
->scb_data
->recovery_scbs
--;
380 if (aic_get_transaction_status(scb
) == CAM_BDR_SENT
381 || aic_get_transaction_status(scb
) == CAM_REQ_ABORTED
)
382 aic_set_transaction_status(scb
, CAM_CMD_TIMEOUT
);
384 if (ahc
->scb_data
->recovery_scbs
== 0) {
386 * All recovery actions have completed successfully,
387 * so reinstate the timeouts for all other pending
390 LIST_FOREACH(list_scb
, &ahc
->pending_scbs
,
393 aic_scb_timer_reset(list_scb
,
394 aic_get_timeout(scb
));
397 ahc_print_path(ahc
, scb
);
398 kprintf("no longer in timeout, status = %x\n",
403 /* Don't clobber any existing error state */
404 if (aic_get_transaction_status(scb
) == CAM_REQ_INPROG
) {
405 ccb
->ccb_h
.status
|= CAM_REQ_CMP
;
406 } else if ((scb
->flags
& SCB_SENSE
) != 0) {
408 * We performed autosense retrieval.
410 * Zero any sense not transferred by the
411 * device. The SCSI spec mandates that any
412 * untransfered data should be assumed to be
413 * zero. Complete the 'bounce' of sense information
414 * through buffers accessible via bus-space by
415 * copying it into the clients csio.
417 memset(&ccb
->csio
.sense_data
, 0, sizeof(ccb
->csio
.sense_data
));
418 memcpy(&ccb
->csio
.sense_data
,
419 ahc_get_sense_buf(ahc
, scb
),
420 (aic_le32toh(scb
->sg_list
->len
) & AHC_SG_LEN_MASK
)
421 - ccb
->csio
.sense_resid
);
422 scb
->io_ctx
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
424 ccb
->ccb_h
.status
&= ~CAM_SIM_QUEUED
;
425 ahc_free_scb(ahc
, scb
);
430 ahc_action(struct cam_sim
*sim
, union ccb
*ccb
)
432 struct ahc_softc
*ahc
;
433 struct ahc_tmode_lstate
*lstate
;
437 CAM_DEBUG(ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("ahc_action\n"));
439 ahc
= (struct ahc_softc
*)cam_sim_softc(sim
);
441 target_id
= ccb
->ccb_h
.target_id
;
442 our_id
= SIM_SCSI_ID(ahc
, sim
);
444 switch (ccb
->ccb_h
.func_code
) {
445 /* Common cases first */
446 case XPT_ACCEPT_TARGET_IO
: /* Accept Host Target Mode CDB */
447 case XPT_CONT_TARGET_IO
:/* Continue Host Target I/O Connection*/
449 struct ahc_tmode_tstate
*tstate
;
452 status
= ahc_find_tmode_devs(ahc
, sim
, ccb
, &tstate
,
455 if (status
!= CAM_REQ_CMP
) {
456 if (ccb
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
) {
457 /* Response from the black hole device */
459 lstate
= ahc
->black_hole
;
461 ccb
->ccb_h
.status
= status
;
466 if (ccb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
) {
468 SLIST_INSERT_HEAD(&lstate
->accept_tios
, &ccb
->ccb_h
,
470 ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
471 if ((ahc
->flags
& AHC_TQINFIFO_BLOCKED
) != 0)
472 ahc_run_tqinfifo(ahc
, /*paused*/FALSE
);
477 * The target_id represents the target we attempt to
478 * select. In target mode, this is the initiator of
479 * the original command.
482 target_id
= ccb
->csio
.init_id
;
485 case XPT_SCSI_IO
: /* Execute the requested I/O operation */
486 case XPT_RESET_DEV
: /* Bus Device Reset the specified SCSI device */
489 struct hardware_scb
*hscb
;
491 if ((ahc
->flags
& AHC_INITIATORROLE
) == 0
492 && (ccb
->ccb_h
.func_code
== XPT_SCSI_IO
493 || ccb
->ccb_h
.func_code
== XPT_RESET_DEV
)) {
494 ccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
502 if ((scb
= ahc_get_scb(ahc
)) == NULL
) {
504 xpt_freeze_simq(sim
, /*count*/1);
505 ahc
->flags
|= AHC_RESOURCE_SHORTAGE
;
506 ccb
->ccb_h
.status
= CAM_REQUEUE_REQ
;
513 CAM_DEBUG(ccb
->ccb_h
.path
, CAM_DEBUG_SUBTRACE
,
514 ("start scb(%p)\n", scb
));
517 * So we can find the SCB when an abort is requested
519 ccb
->ccb_h
.ccb_scb_ptr
= scb
;
522 * Put all the arguments for the xfer in the scb
525 hscb
->scsiid
= BUILD_SCSIID(ahc
, sim
, target_id
, our_id
);
526 hscb
->lun
= ccb
->ccb_h
.target_lun
;
527 if (ccb
->ccb_h
.func_code
== XPT_RESET_DEV
) {
529 scb
->flags
|= SCB_DEVICE_RESET
;
530 hscb
->control
|= MK_MESSAGE
;
531 ahc_execute_scb(scb
, NULL
, 0, 0);
533 if (ccb
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
) {
534 struct target_data
*tdata
;
536 tdata
= &hscb
->shared_data
.tdata
;
537 if (ahc
->pending_device
== lstate
)
538 scb
->flags
|= SCB_TARGET_IMMEDIATE
;
539 hscb
->control
|= TARGET_SCB
;
540 scb
->flags
|= SCB_TARGET_SCB
;
541 tdata
->target_phases
= 0;
542 if ((ccb
->ccb_h
.flags
& CAM_SEND_STATUS
) != 0) {
543 tdata
->target_phases
|= SPHASE_PENDING
;
545 ccb
->csio
.scsi_status
;
547 if (ccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
)
548 tdata
->target_phases
|= NO_DISCONNECT
;
550 tdata
->initiator_tag
= ccb
->csio
.tag_id
;
552 if (ccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
)
553 hscb
->control
|= ccb
->csio
.tag_action
;
555 ahc_setup_data(ahc
, sim
, &ccb
->csio
, scb
);
560 case XPT_IMMED_NOTIFY
:
562 struct ahc_tmode_tstate
*tstate
;
563 struct ahc_tmode_lstate
*lstate
;
566 status
= ahc_find_tmode_devs(ahc
, sim
, ccb
, &tstate
,
569 if (status
!= CAM_REQ_CMP
) {
570 ccb
->ccb_h
.status
= status
;
574 SLIST_INSERT_HEAD(&lstate
->immed_notifies
, &ccb
->ccb_h
,
576 ccb
->ccb_h
.status
= CAM_REQ_INPROG
;
577 ahc_send_lstate_events(ahc
, lstate
);
580 case XPT_EN_LUN
: /* Enable LUN as a target */
581 ahc_handle_en_lun(ahc
, sim
, ccb
);
584 case XPT_ABORT
: /* Abort the specified CCB */
586 ahc_abort_ccb(ahc
, sim
, ccb
);
589 case XPT_SET_TRAN_SETTINGS
:
591 struct ahc_devinfo devinfo
;
592 struct ccb_trans_settings
*cts
;
593 struct ccb_trans_settings_scsi
*scsi
;
594 struct ccb_trans_settings_spi
*spi
;
595 struct ahc_initiator_tinfo
*tinfo
;
596 struct ahc_tmode_tstate
*tstate
;
597 uint16_t *discenable
;
602 scsi
= &cts
->proto_specific
.scsi
;
603 spi
= &cts
->xport_specific
.spi
;
604 ahc_compile_devinfo(&devinfo
, SIM_SCSI_ID(ahc
, sim
),
605 cts
->ccb_h
.target_id
,
606 cts
->ccb_h
.target_lun
,
607 SIM_CHANNEL(ahc
, sim
),
609 tinfo
= ahc_fetch_transinfo(ahc
, devinfo
.channel
,
611 devinfo
.target
, &tstate
);
613 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
) {
614 update_type
|= AHC_TRANS_GOAL
;
615 discenable
= &tstate
->discenable
;
616 tagenable
= &tstate
->tagenable
;
617 tinfo
->curr
.protocol_version
=
618 cts
->protocol_version
;
619 tinfo
->curr
.transport_version
=
620 cts
->transport_version
;
621 tinfo
->goal
.protocol_version
=
622 cts
->protocol_version
;
623 tinfo
->goal
.transport_version
=
624 cts
->transport_version
;
625 } else if (cts
->type
== CTS_TYPE_USER_SETTINGS
) {
626 update_type
|= AHC_TRANS_USER
;
627 discenable
= &ahc
->user_discenable
;
628 tagenable
= &ahc
->user_tagenable
;
629 tinfo
->user
.protocol_version
=
630 cts
->protocol_version
;
631 tinfo
->user
.transport_version
=
632 cts
->transport_version
;
634 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
639 if ((spi
->valid
& CTS_SPI_VALID_DISC
) != 0) {
640 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) != 0)
641 *discenable
|= devinfo
.target_mask
;
643 *discenable
&= ~devinfo
.target_mask
;
646 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
647 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0)
648 *tagenable
|= devinfo
.target_mask
;
650 *tagenable
&= ~devinfo
.target_mask
;
653 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0) {
654 ahc_validate_width(ahc
, /*tinfo limit*/NULL
,
655 &spi
->bus_width
, ROLE_UNKNOWN
);
656 ahc_set_width(ahc
, &devinfo
, spi
->bus_width
,
657 update_type
, /*paused*/FALSE
);
660 if ((spi
->valid
& CTS_SPI_VALID_PPR_OPTIONS
) == 0) {
661 if (update_type
== AHC_TRANS_USER
)
662 spi
->ppr_options
= tinfo
->user
.ppr_options
;
664 spi
->ppr_options
= tinfo
->goal
.ppr_options
;
667 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0) {
668 if (update_type
== AHC_TRANS_USER
)
669 spi
->sync_offset
= tinfo
->user
.offset
;
671 spi
->sync_offset
= tinfo
->goal
.offset
;
674 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0) {
675 if (update_type
== AHC_TRANS_USER
)
676 spi
->sync_period
= tinfo
->user
.period
;
678 spi
->sync_period
= tinfo
->goal
.period
;
681 if (((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) != 0)
682 || ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) != 0)) {
683 struct ahc_syncrate
*syncrate
;
686 if ((ahc
->features
& AHC_ULTRA2
) != 0)
687 maxsync
= AHC_SYNCRATE_DT
;
688 else if ((ahc
->features
& AHC_ULTRA
) != 0)
689 maxsync
= AHC_SYNCRATE_ULTRA
;
691 maxsync
= AHC_SYNCRATE_FAST
;
693 if (spi
->bus_width
!= MSG_EXT_WDTR_BUS_16_BIT
)
694 spi
->ppr_options
&= ~MSG_EXT_PPR_DT_REQ
;
696 syncrate
= ahc_find_syncrate(ahc
, &spi
->sync_period
,
699 ahc_validate_offset(ahc
, /*tinfo limit*/NULL
,
700 syncrate
, &spi
->sync_offset
,
701 spi
->bus_width
, ROLE_UNKNOWN
);
703 /* We use a period of 0 to represent async */
704 if (spi
->sync_offset
== 0) {
705 spi
->sync_period
= 0;
706 spi
->ppr_options
= 0;
709 ahc_set_syncrate(ahc
, &devinfo
, syncrate
,
710 spi
->sync_period
, spi
->sync_offset
,
711 spi
->ppr_options
, update_type
,
714 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
718 case XPT_GET_TRAN_SETTINGS
:
719 /* Get default/user set transfer settings for the target */
722 ahc_get_tran_settings(ahc
, SIM_SCSI_ID(ahc
, sim
),
723 SIM_CHANNEL(ahc
, sim
), &ccb
->cts
);
727 case XPT_CALC_GEOMETRY
:
731 extended
= SIM_IS_SCSIBUS_B(ahc
, sim
)
732 ? ahc
->flags
& AHC_EXTENDED_TRANS_B
733 : ahc
->flags
& AHC_EXTENDED_TRANS_A
;
734 cam_calc_geometry(&ccb
->ccg
, extended
);
738 case XPT_RESET_BUS
: /* Reset the specified SCSI bus */
742 found
= ahc_reset_channel(ahc
, SIM_CHANNEL(ahc
, sim
),
743 /*initiate reset*/TRUE
);
745 xpt_print_path(SIM_PATH(ahc
, sim
));
746 kprintf("SCSI bus reset delivered. "
747 "%d SCBs aborted.\n", found
);
749 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
753 case XPT_TERM_IO
: /* Terminate the I/O process */
755 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
758 case XPT_PATH_INQ
: /* Path routing inquiry */
760 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
762 cpi
->version_num
= 1; /* XXX??? */
763 cpi
->hba_inquiry
= PI_SDTR_ABLE
|PI_TAG_ABLE
;
764 if ((ahc
->features
& AHC_WIDE
) != 0)
765 cpi
->hba_inquiry
|= PI_WIDE_16
;
766 if ((ahc
->features
& AHC_TARGETMODE
) != 0) {
767 cpi
->target_sprt
= PIT_PROCESSOR
771 cpi
->target_sprt
= 0;
774 cpi
->hba_eng_cnt
= 0;
775 cpi
->max_target
= (ahc
->features
& AHC_WIDE
) ? 15 : 7;
776 cpi
->max_lun
= AHC_NUM_LUNS
- 1;
777 if (SIM_IS_SCSIBUS_B(ahc
, sim
)) {
778 cpi
->initiator_id
= ahc
->our_id_b
;
779 if ((ahc
->flags
& AHC_RESET_BUS_B
) == 0)
780 cpi
->hba_misc
|= PIM_NOBUSRESET
;
782 cpi
->initiator_id
= ahc
->our_id
;
783 if ((ahc
->flags
& AHC_RESET_BUS_A
) == 0)
784 cpi
->hba_misc
|= PIM_NOBUSRESET
;
786 cpi
->bus_id
= cam_sim_bus(sim
);
787 cpi
->base_transfer_speed
= 3300;
788 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
789 strncpy(cpi
->hba_vid
, "Adaptec", HBA_IDLEN
);
790 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
791 cpi
->unit_number
= cam_sim_unit(sim
);
792 cpi
->protocol
= PROTO_SCSI
;
793 cpi
->protocol_version
= SCSI_REV_2
;
794 cpi
->transport
= XPORT_SPI
;
795 cpi
->transport_version
= 2;
796 cpi
->xport_specific
.spi
.ppr_options
= SID_SPI_CLOCK_ST
;
797 if ((ahc
->features
& AHC_DT
) != 0) {
798 cpi
->transport_version
= 3;
799 cpi
->xport_specific
.spi
.ppr_options
=
802 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
807 ccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
814 ahc_get_tran_settings(struct ahc_softc
*ahc
, int our_id
, char channel
,
815 struct ccb_trans_settings
*cts
)
817 struct ahc_devinfo devinfo
;
818 struct ccb_trans_settings_scsi
*scsi
;
819 struct ccb_trans_settings_spi
*spi
;
820 struct ahc_initiator_tinfo
*targ_info
;
821 struct ahc_tmode_tstate
*tstate
;
822 struct ahc_transinfo
*tinfo
;
824 scsi
= &cts
->proto_specific
.scsi
;
825 spi
= &cts
->xport_specific
.spi
;
826 ahc_compile_devinfo(&devinfo
, our_id
,
827 cts
->ccb_h
.target_id
,
828 cts
->ccb_h
.target_lun
,
829 channel
, ROLE_UNKNOWN
);
830 targ_info
= ahc_fetch_transinfo(ahc
, devinfo
.channel
,
832 devinfo
.target
, &tstate
);
834 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
)
835 tinfo
= &targ_info
->curr
;
837 tinfo
= &targ_info
->user
;
839 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
840 spi
->flags
&= ~CTS_SPI_FLAGS_DISC_ENB
;
841 if (cts
->type
== CTS_TYPE_USER_SETTINGS
) {
842 if ((ahc
->user_discenable
& devinfo
.target_mask
) != 0)
843 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
845 if ((ahc
->user_tagenable
& devinfo
.target_mask
) != 0)
846 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
848 if ((tstate
->discenable
& devinfo
.target_mask
) != 0)
849 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
851 if ((tstate
->tagenable
& devinfo
.target_mask
) != 0)
852 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
854 cts
->protocol_version
= tinfo
->protocol_version
;
855 cts
->transport_version
= tinfo
->transport_version
;
857 spi
->sync_period
= tinfo
->period
;
858 spi
->sync_offset
= tinfo
->offset
;
859 spi
->bus_width
= tinfo
->width
;
860 spi
->ppr_options
= tinfo
->ppr_options
;
862 cts
->protocol
= PROTO_SCSI
;
863 cts
->transport
= XPORT_SPI
;
864 spi
->valid
= CTS_SPI_VALID_SYNC_RATE
865 | CTS_SPI_VALID_SYNC_OFFSET
866 | CTS_SPI_VALID_BUS_WIDTH
867 | CTS_SPI_VALID_PPR_OPTIONS
;
869 if (cts
->ccb_h
.target_lun
!= CAM_LUN_WILDCARD
) {
870 scsi
->valid
= CTS_SCSI_VALID_TQ
;
871 spi
->valid
|= CTS_SPI_VALID_DISC
;
876 cts
->ccb_h
.status
= CAM_REQ_CMP
;
880 ahc_async(void *callback_arg
, uint32_t code
, struct cam_path
*path
, void *arg
)
882 struct ahc_softc
*ahc
;
885 sim
= (struct cam_sim
*)callback_arg
;
886 ahc
= (struct ahc_softc
*)cam_sim_softc(sim
);
890 struct ahc_devinfo devinfo
;
892 ahc_compile_devinfo(&devinfo
, SIM_SCSI_ID(ahc
, sim
),
893 xpt_path_target_id(path
),
894 xpt_path_lun_id(path
),
895 SIM_CHANNEL(ahc
, sim
),
899 * Revert to async/narrow transfers
900 * for the next device.
902 ahc_set_width(ahc
, &devinfo
, MSG_EXT_WDTR_BUS_8_BIT
,
903 AHC_TRANS_GOAL
|AHC_TRANS_CUR
, /*paused*/FALSE
);
904 ahc_set_syncrate(ahc
, &devinfo
, /*syncrate*/NULL
,
905 /*period*/0, /*offset*/0, /*ppr_options*/0,
906 AHC_TRANS_GOAL
|AHC_TRANS_CUR
,
916 ahc_execute_scb(void *arg
, bus_dma_segment_t
*dm_segs
, int nsegments
,
921 struct ahc_softc
*ahc
;
922 struct ahc_initiator_tinfo
*tinfo
;
923 struct ahc_tmode_tstate
*tstate
;
926 scb
= (struct scb
*)arg
;
928 ahc
= scb
->ahc_softc
;
932 aic_set_transaction_status(scb
, CAM_REQ_TOO_BIG
);
934 aic_set_transaction_status(scb
, CAM_REQ_CMP_ERR
);
936 bus_dmamap_unload(ahc
->buffer_dmat
, scb
->dmamap
);
937 ahc_free_scb(ahc
, scb
);
941 if (nsegments
!= 0) {
942 struct ahc_dma_seg
*sg
;
943 bus_dma_segment_t
*end_seg
;
946 end_seg
= dm_segs
+ nsegments
;
948 /* Copy the segments into our SG list */
950 while (dm_segs
< end_seg
) {
953 sg
->addr
= aic_htole32(dm_segs
->ds_addr
);
954 len
= dm_segs
->ds_len
955 | ((dm_segs
->ds_addr
>> 8) & 0x7F000000);
956 sg
->len
= aic_htole32(len
);
962 * Note where to find the SG entries in bus space.
963 * We also set the full residual flag which the
964 * sequencer will clear as soon as a data transfer
967 scb
->hscb
->sgptr
= aic_htole32(scb
->sg_list_phys
|SG_FULL_RESID
);
969 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
970 op
= BUS_DMASYNC_PREREAD
;
972 op
= BUS_DMASYNC_PREWRITE
;
974 bus_dmamap_sync(ahc
->buffer_dmat
, scb
->dmamap
, op
);
976 if (ccb
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
) {
977 struct target_data
*tdata
;
979 tdata
= &scb
->hscb
->shared_data
.tdata
;
980 tdata
->target_phases
|= DPHASE_PENDING
;
982 * CAM data direction is relative to the initiator.
984 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
)
985 tdata
->data_phase
= P_DATAOUT
;
987 tdata
->data_phase
= P_DATAIN
;
990 * If the transfer is of an odd length and in the
991 * "in" direction (scsi->HostBus), then it may
992 * trigger a bug in the 'WideODD' feature of
993 * non-Ultra2 chips. Force the total data-length
994 * to be even by adding an extra, 1 byte, SG,
995 * element. We do this even if we are not currently
996 * negotiated wide as negotiation could occur before
997 * this command is executed.
999 if ((ahc
->bugs
& AHC_TMODE_WIDEODD_BUG
) != 0
1000 && (ccb
->csio
.dxfer_len
& 0x1) != 0
1001 && (ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1004 if (nsegments
> AHC_NSEG
) {
1006 aic_set_transaction_status(scb
,
1008 bus_dmamap_unload(ahc
->buffer_dmat
,
1010 ahc_free_scb(ahc
, scb
);
1014 sg
->addr
= aic_htole32(ahc
->dma_bug_buf
);
1015 sg
->len
= aic_htole32(1);
1020 sg
->len
|= aic_htole32(AHC_DMA_LAST_SEG
);
1022 /* Copy the first SG into the "current" data pointer area */
1023 scb
->hscb
->dataptr
= scb
->sg_list
->addr
;
1024 scb
->hscb
->datacnt
= scb
->sg_list
->len
;
1026 scb
->hscb
->sgptr
= aic_htole32(SG_LIST_NULL
);
1027 scb
->hscb
->dataptr
= 0;
1028 scb
->hscb
->datacnt
= 0;
1031 scb
->sg_count
= nsegments
;
1034 * Last time we need to check if this SCB needs to
1037 if (aic_get_transaction_status(scb
) != CAM_REQ_INPROG
) {
1039 bus_dmamap_unload(ahc
->buffer_dmat
, scb
->dmamap
);
1040 ahc_free_scb(ahc
, scb
);
1045 tinfo
= ahc_fetch_transinfo(ahc
, SCSIID_CHANNEL(ahc
, scb
->hscb
->scsiid
),
1046 SCSIID_OUR_ID(scb
->hscb
->scsiid
),
1047 SCSIID_TARGET(ahc
, scb
->hscb
->scsiid
),
1050 mask
= SCB_GET_TARGET_MASK(ahc
, scb
);
1051 scb
->hscb
->scsirate
= tinfo
->scsirate
;
1052 scb
->hscb
->scsioffset
= tinfo
->curr
.offset
;
1053 if ((tstate
->ultraenb
& mask
) != 0)
1054 scb
->hscb
->control
|= ULTRAENB
;
1056 if ((tstate
->discenable
& mask
) != 0
1057 && (ccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) == 0)
1058 scb
->hscb
->control
|= DISCENB
;
1060 if ((ccb
->ccb_h
.flags
& CAM_NEGOTIATE
) != 0
1061 && (tinfo
->goal
.width
!= 0
1062 || tinfo
->goal
.offset
!= 0
1063 || tinfo
->goal
.ppr_options
!= 0)) {
1064 scb
->flags
|= SCB_NEGOTIATE
;
1065 scb
->hscb
->control
|= MK_MESSAGE
;
1066 } else if ((tstate
->auto_negotiate
& mask
) != 0) {
1067 scb
->flags
|= SCB_AUTO_NEGOTIATE
;
1068 scb
->hscb
->control
|= MK_MESSAGE
;
1071 LIST_INSERT_HEAD(&ahc
->pending_scbs
, scb
, pending_links
);
1073 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
1076 * We only allow one untagged transaction
1077 * per target in the initiator role unless
1078 * we are storing a full busy target *lun*
1079 * table in SCB space.
1081 if ((scb
->hscb
->control
& (TARGET_SCB
|TAG_ENB
)) == 0
1082 && (ahc
->flags
& AHC_SCB_BTT
) == 0) {
1083 struct scb_tailq
*untagged_q
;
1086 target_offset
= SCB_GET_TARGET_OFFSET(ahc
, scb
);
1087 untagged_q
= &(ahc
->untagged_queues
[target_offset
]);
1088 TAILQ_INSERT_TAIL(untagged_q
, scb
, links
.tqe
);
1089 scb
->flags
|= SCB_UNTAGGEDQ
;
1090 if (TAILQ_FIRST(untagged_q
) != scb
) {
1094 scb
->flags
|= SCB_ACTIVE
;
1097 * Timers are disabled while recovery is in progress.
1099 aic_scb_timer_start(scb
);
1101 if ((scb
->flags
& SCB_TARGET_IMMEDIATE
) != 0) {
1102 /* Define a mapping from our tag to the SCB. */
1103 ahc
->scb_data
->scbindex
[scb
->hscb
->tag
] = scb
;
1105 if ((ahc
->flags
& AHC_PAGESCBS
) == 0)
1106 ahc_outb(ahc
, SCBPTR
, scb
->hscb
->tag
);
1107 ahc_outb(ahc
, TARG_IMMEDIATE_SCB
, scb
->hscb
->tag
);
1110 ahc_queue_scb(ahc
, scb
);
1115 ahc_poll(struct cam_sim
*sim
)
1117 struct ahc_softc
*ahc
;
1119 ahc
= (struct ahc_softc
*)cam_sim_softc(sim
);
1124 ahc_setup_data(struct ahc_softc
*ahc
, struct cam_sim
*sim
,
1125 struct ccb_scsiio
*csio
, struct scb
*scb
)
1127 struct hardware_scb
*hscb
;
1128 struct ccb_hdr
*ccb_h
;
1131 ccb_h
= &csio
->ccb_h
;
1134 csio
->sense_resid
= 0;
1135 if (ccb_h
->func_code
== XPT_SCSI_IO
) {
1136 hscb
->cdb_len
= csio
->cdb_len
;
1137 if ((ccb_h
->flags
& CAM_CDB_POINTER
) != 0) {
1139 if (hscb
->cdb_len
> sizeof(hscb
->cdb32
)
1140 || (ccb_h
->flags
& CAM_CDB_PHYS
) != 0) {
1141 aic_set_transaction_status(scb
,
1143 ahc_free_scb(ahc
, scb
);
1144 xpt_done((union ccb
*)csio
);
1147 if (hscb
->cdb_len
> 12) {
1149 csio
->cdb_io
.cdb_ptr
,
1151 scb
->flags
|= SCB_CDB32_PTR
;
1153 memcpy(hscb
->shared_data
.cdb
,
1154 csio
->cdb_io
.cdb_ptr
,
1158 if (hscb
->cdb_len
> 12) {
1159 memcpy(hscb
->cdb32
, csio
->cdb_io
.cdb_bytes
,
1161 scb
->flags
|= SCB_CDB32_PTR
;
1163 memcpy(hscb
->shared_data
.cdb
,
1164 csio
->cdb_io
.cdb_bytes
,
1170 /* Only use S/G if there is a transfer */
1171 if ((ccb_h
->flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
1172 if ((ccb_h
->flags
& CAM_SCATTER_VALID
) == 0) {
1173 /* We've been given a pointer to a single buffer */
1174 if ((ccb_h
->flags
& CAM_DATA_PHYS
) == 0) {
1178 error
= bus_dmamap_load(ahc
->buffer_dmat
,
1184 if (error
== EINPROGRESS
) {
1186 * So as to maintain ordering,
1187 * freeze the controller queue
1188 * until our mapping is
1191 xpt_freeze_simq(sim
,
1193 scb
->io_ctx
->ccb_h
.status
|=
1198 struct bus_dma_segment seg
;
1200 /* Pointer to physical buffer */
1201 if (csio
->dxfer_len
> AHC_MAXTRANSFER_SIZE
)
1202 panic("ahc_setup_data - Transfer size "
1203 "larger than can device max");
1206 (bus_addr_t
)(vm_offset_t
)csio
->data_ptr
;
1207 seg
.ds_len
= csio
->dxfer_len
;
1208 ahc_execute_scb(scb
, &seg
, 1, 0);
1211 struct bus_dma_segment
*segs
;
1213 if ((ccb_h
->flags
& CAM_DATA_PHYS
) != 0)
1214 panic("ahc_setup_data - Physical segment "
1215 "pointers unsupported");
1217 if ((ccb_h
->flags
& CAM_SG_LIST_PHYS
) == 0)
1218 panic("ahc_setup_data - Virtual segment "
1219 "addresses unsupported");
1221 /* Just use the segments provided */
1222 segs
= (struct bus_dma_segment
*)csio
->data_ptr
;
1223 ahc_execute_scb(scb
, segs
, csio
->sglist_cnt
, 0);
1226 ahc_execute_scb(scb
, NULL
, 0, 0);
1231 ahc_abort_ccb(struct ahc_softc
*ahc
, struct cam_sim
*sim
, union ccb
*ccb
)
1233 union ccb
*abort_ccb
;
1235 abort_ccb
= ccb
->cab
.abort_ccb
;
1236 switch (abort_ccb
->ccb_h
.func_code
) {
1237 case XPT_ACCEPT_TARGET_IO
:
1238 case XPT_IMMED_NOTIFY
:
1239 case XPT_CONT_TARGET_IO
:
1241 struct ahc_tmode_tstate
*tstate
;
1242 struct ahc_tmode_lstate
*lstate
;
1243 struct ccb_hdr_slist
*list
;
1246 status
= ahc_find_tmode_devs(ahc
, sim
, abort_ccb
, &tstate
,
1249 if (status
!= CAM_REQ_CMP
) {
1250 ccb
->ccb_h
.status
= status
;
1254 if (abort_ccb
->ccb_h
.func_code
== XPT_ACCEPT_TARGET_IO
)
1255 list
= &lstate
->accept_tios
;
1256 else if (abort_ccb
->ccb_h
.func_code
== XPT_IMMED_NOTIFY
)
1257 list
= &lstate
->immed_notifies
;
1262 struct ccb_hdr
*curelm
;
1265 curelm
= SLIST_FIRST(list
);
1267 if (curelm
== &abort_ccb
->ccb_h
) {
1269 SLIST_REMOVE_HEAD(list
, sim_links
.sle
);
1271 while(curelm
!= NULL
) {
1272 struct ccb_hdr
*nextelm
;
1275 SLIST_NEXT(curelm
, sim_links
.sle
);
1277 if (nextelm
== &abort_ccb
->ccb_h
) {
1290 abort_ccb
->ccb_h
.status
= CAM_REQ_ABORTED
;
1291 xpt_done(abort_ccb
);
1292 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
1294 xpt_print_path(abort_ccb
->ccb_h
.path
);
1295 kprintf("Not found\n");
1296 ccb
->ccb_h
.status
= CAM_PATH_INVALID
;
1303 /* XXX Fully implement the hard ones */
1304 ccb
->ccb_h
.status
= CAM_UA_ABORT
;
1307 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
1314 ahc_send_async(struct ahc_softc
*ahc
, char channel
, u_int target
,
1315 u_int lun
, ac_code code
, void *opt_arg
)
1317 struct ccb_trans_settings cts
;
1318 struct cam_path
*path
;
1323 error
= ahc_create_path(ahc
, channel
, target
, lun
, &path
);
1325 if (error
!= CAM_REQ_CMP
)
1329 case AC_TRANSFER_NEG
:
1331 struct ccb_trans_settings_scsi
*scsi
;
1333 cts
.type
= CTS_TYPE_CURRENT_SETTINGS
;
1334 scsi
= &cts
.proto_specific
.scsi
;
1335 cts
.ccb_h
.path
= path
;
1336 cts
.ccb_h
.target_id
= target
;
1337 cts
.ccb_h
.target_lun
= lun
;
1338 ahc_get_tran_settings(ahc
, channel
== 'A' ? ahc
->our_id
1342 scsi
->valid
&= ~CTS_SCSI_VALID_TQ
;
1343 scsi
->flags
&= ~CTS_SCSI_FLAGS_TAG_ENB
;
1344 if (opt_arg
== NULL
)
1346 if (*((ahc_queue_alg
*)opt_arg
) == AHC_QUEUE_TAGGED
)
1347 scsi
->flags
|= ~CTS_SCSI_FLAGS_TAG_ENB
;
1348 scsi
->valid
|= CTS_SCSI_VALID_TQ
;
1355 panic("ahc_send_async: Unexpected async event");
1357 xpt_async(code
, path
, arg
);
1358 xpt_free_path(path
);
1362 ahc_platform_set_tags(struct ahc_softc
*ahc
,
1363 struct ahc_devinfo
*devinfo
, int enable
)
1368 ahc_platform_alloc(struct ahc_softc
*ahc
, void *platform_arg
)
1370 ahc
->platform_data
= kmalloc(sizeof(struct ahc_platform_data
), M_DEVBUF
,
1371 M_INTWAIT
| M_ZERO
);
1376 ahc_platform_free(struct ahc_softc
*ahc
)
1378 struct ahc_platform_data
*pdata
;
1380 pdata
= ahc
->platform_data
;
1381 if (pdata
!= NULL
) {
1382 if (pdata
->regs
!= NULL
)
1383 bus_release_resource(ahc
->dev_softc
,
1384 pdata
->regs_res_type
,
1388 if (pdata
->irq
!= NULL
)
1389 bus_release_resource(ahc
->dev_softc
,
1390 pdata
->irq_res_type
,
1393 if (pdata
->sim_b
!= NULL
) {
1394 xpt_async(AC_LOST_DEVICE
, pdata
->path_b
, NULL
);
1395 xpt_free_path(pdata
->path_b
);
1396 xpt_bus_deregister(cam_sim_path(pdata
->sim_b
));
1397 cam_sim_free(pdata
->sim_b
);
1399 if (pdata
->sim
!= NULL
) {
1400 xpt_async(AC_LOST_DEVICE
, pdata
->path
, NULL
);
1401 xpt_free_path(pdata
->path
);
1402 xpt_bus_deregister(cam_sim_path(pdata
->sim
));
1403 cam_sim_free(pdata
->sim
);
1405 if (pdata
->eh
!= NULL
)
1406 EVENTHANDLER_DEREGISTER(shutdown_post_sync
, pdata
->eh
);
1407 kfree(ahc
->platform_data
, M_DEVBUF
);
1412 ahc_softc_comp(struct ahc_softc
*lahc
, struct ahc_softc
*rahc
)
1414 /* We don't sort softcs under FreeBSD so report equal always */
1419 ahc_detach(device_t dev
)
1421 struct ahc_softc
*ahc
;
1423 device_printf(dev
, "detaching device\n");
1424 ahc
= device_get_softc(dev
);
1426 TAILQ_REMOVE(&ahc_tailq
, ahc
, links
);
1427 ahc_intr_enable(ahc
, FALSE
);
1428 bus_teardown_intr(dev
, ahc
->platform_data
->irq
, ahc
->platform_data
->ih
);
1436 ahc_dump_targcmd(struct target_cmd
*cmd
)
1442 byte
= &cmd
->initiator_channel
;
1443 /* Debugging info for received commands */
1444 last_byte
= &cmd
[1].initiator_channel
;
1447 while (byte
< last_byte
) {
1450 kprintf("%#x", *byte
++);
1463 ahc_modevent(module_t mod
, int type
, void *data
)
1465 /* XXX Deal with busy status on unload. */
1466 /* XXX Deal with unknown events */
1470 static moduledata_t ahc_mod
= {
1476 DECLARE_MODULE(ahc
, ahc_mod
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
);
1477 MODULE_DEPEND(ahc
, cam
, 1, 1, 1);
1478 MODULE_VERSION(ahc
, 1);