2 * CAM SCSI interface for the the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
5 * Product specific probe and attach routines can be found in:
7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * $FreeBSD: src/sys/dev/advansys/adwcam.c,v 1.7.2.2 2001/03/05 13:08:55 obrien Exp $
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/malloc.h>
53 #include <sys/thread2.h>
55 #include <machine/clock.h>
59 #include <bus/cam/cam.h>
60 #include <bus/cam/cam_ccb.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_sim.h>
63 #include <bus/cam/cam_debug.h>
65 #include <bus/cam/scsi/scsi_message.h>
69 /* Definitions for our use of the SIM private CCB area */
70 #define ccb_acb_ptr spriv_ptr0
71 #define ccb_adw_ptr spriv_ptr1
75 static __inline cam_status
adwccbstatus(union ccb
*);
76 static __inline
struct acb
* adwgetacb(struct adw_softc
*adw
);
77 static __inline
void adwfreeacb(struct adw_softc
*adw
,
80 static void adwmapmem(void *arg
, bus_dma_segment_t
*segs
,
82 static struct sg_map_node
*
83 adwallocsgmap(struct adw_softc
*adw
);
84 static int adwallocacbs(struct adw_softc
*adw
);
86 static void adwexecuteacb(void *arg
, bus_dma_segment_t
*dm_segs
,
88 static void adw_action(struct cam_sim
*sim
, union ccb
*ccb
);
89 static void adw_poll(struct cam_sim
*sim
);
90 static void adw_async(void *callback_arg
, u_int32_t code
,
91 struct cam_path
*path
, void *arg
);
92 static void adwprocesserror(struct adw_softc
*adw
, struct acb
*acb
);
93 static void adwtimeout(void *arg
);
94 static void adw_handle_device_reset(struct adw_softc
*adw
,
96 static void adw_handle_bus_reset(struct adw_softc
*adw
,
99 static __inline cam_status
100 adwccbstatus(union ccb
* ccb
)
102 return (ccb
->ccb_h
.status
& CAM_STATUS_MASK
);
105 static __inline
struct acb
*
106 adwgetacb(struct adw_softc
*adw
)
111 if ((acb
= SLIST_FIRST(&adw
->free_acb_list
)) != NULL
) {
112 SLIST_REMOVE_HEAD(&adw
->free_acb_list
, links
);
113 } else if (adw
->num_acbs
< adw
->max_acbs
) {
115 acb
= SLIST_FIRST(&adw
->free_acb_list
);
117 kprintf("%s: Can't malloc ACB\n", adw_name(adw
));
119 SLIST_REMOVE_HEAD(&adw
->free_acb_list
, links
);
128 adwfreeacb(struct adw_softc
*adw
, struct acb
*acb
)
131 if ((acb
->state
& ACB_ACTIVE
) != 0)
132 LIST_REMOVE(&acb
->ccb
->ccb_h
, sim_links
.le
);
133 if ((acb
->state
& ACB_RELEASE_SIMQ
) != 0)
134 acb
->ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
135 else if ((adw
->state
& ADW_RESOURCE_SHORTAGE
) != 0
136 && (acb
->ccb
->ccb_h
.status
& CAM_RELEASE_SIMQ
) == 0) {
137 acb
->ccb
->ccb_h
.status
|= CAM_RELEASE_SIMQ
;
138 adw
->state
&= ~ADW_RESOURCE_SHORTAGE
;
140 acb
->state
= ACB_FREE
;
141 SLIST_INSERT_HEAD(&adw
->free_acb_list
, acb
, links
);
146 adwmapmem(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
148 bus_addr_t
*busaddrp
;
150 busaddrp
= (bus_addr_t
*)arg
;
151 *busaddrp
= segs
->ds_addr
;
154 static struct sg_map_node
*
155 adwallocsgmap(struct adw_softc
*adw
)
157 struct sg_map_node
*sg_map
;
159 sg_map
= kmalloc(sizeof(*sg_map
), M_DEVBUF
, M_INTWAIT
);
161 /* Allocate S/G space for the next batch of ACBS */
162 if (bus_dmamem_alloc(adw
->sg_dmat
, (void *)&sg_map
->sg_vaddr
,
163 BUS_DMA_NOWAIT
, &sg_map
->sg_dmamap
) != 0) {
164 kfree(sg_map
, M_DEVBUF
);
168 SLIST_INSERT_HEAD(&adw
->sg_maps
, sg_map
, links
);
170 bus_dmamap_load(adw
->sg_dmat
, sg_map
->sg_dmamap
, sg_map
->sg_vaddr
,
171 PAGE_SIZE
, adwmapmem
, &sg_map
->sg_physaddr
, /*flags*/0);
173 bzero(sg_map
->sg_vaddr
, PAGE_SIZE
);
178 * Allocate another chunk of CCB's. Return count of entries added.
179 * Assumed to be called under crit_enter().
182 adwallocacbs(struct adw_softc
*adw
)
184 struct acb
*next_acb
;
185 struct sg_map_node
*sg_map
;
187 struct adw_sg_block
*blocks
;
191 next_acb
= &adw
->acbs
[adw
->num_acbs
];
192 sg_map
= adwallocsgmap(adw
);
197 blocks
= sg_map
->sg_vaddr
;
198 busaddr
= sg_map
->sg_physaddr
;
200 newcount
= (PAGE_SIZE
/ (ADW_SG_BLOCKCNT
* sizeof(*blocks
)));
201 for (i
= 0; adw
->num_acbs
< adw
->max_acbs
&& i
< newcount
; i
++) {
204 error
= bus_dmamap_create(adw
->buffer_dmat
, /*flags*/0,
208 next_acb
->queue
.scsi_req_baddr
= acbvtob(adw
, next_acb
);
209 next_acb
->queue
.scsi_req_bo
= acbvtobo(adw
, next_acb
);
210 next_acb
->queue
.sense_baddr
=
211 acbvtob(adw
, next_acb
) + offsetof(struct acb
, sense_data
);
212 next_acb
->sg_blocks
= blocks
;
213 next_acb
->sg_busaddr
= busaddr
;
214 next_acb
->state
= ACB_FREE
;
215 SLIST_INSERT_HEAD(&adw
->free_acb_list
, next_acb
, links
);
216 blocks
+= ADW_SG_BLOCKCNT
;
217 busaddr
+= ADW_SG_BLOCKCNT
* sizeof(*blocks
);
225 adwexecuteacb(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
229 struct adw_softc
*adw
;
231 acb
= (struct acb
*)arg
;
233 adw
= (struct adw_softc
*)ccb
->ccb_h
.ccb_adw_ptr
;
237 kprintf("%s: Unexpected error 0x%x returned from "
238 "bus_dmamap_load\n", adw_name(adw
), error
);
239 if (ccb
->ccb_h
.status
== CAM_REQ_INPROG
) {
240 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
241 ccb
->ccb_h
.status
= CAM_REQ_TOO_BIG
|CAM_DEV_QFRZN
;
243 adwfreeacb(adw
, acb
);
251 acb
->queue
.data_addr
= dm_segs
[0].ds_addr
;
252 acb
->queue
.data_cnt
= ccb
->csio
.dxfer_len
;
254 struct adw_sg_block
*sg_block
;
255 struct adw_sg_elm
*sg
;
256 bus_addr_t sg_busaddr
;
258 bus_dma_segment_t
*end_seg
;
260 end_seg
= dm_segs
+ nseg
;
262 sg_busaddr
= acb
->sg_busaddr
;
264 /* Copy the segments into our SG list */
265 for (sg_block
= acb
->sg_blocks
;; sg_block
++) {
268 sg
= sg_block
->sg_list
;
269 for (i
= 0; i
< ADW_NO_OF_SG_PER_BLOCK
; i
++) {
270 if (dm_segs
>= end_seg
)
273 sg
->sg_addr
= dm_segs
->ds_addr
;
274 sg
->sg_count
= dm_segs
->ds_len
;
278 sg_block
->sg_cnt
= i
;
280 if (dm_segs
== end_seg
) {
281 sg_block
->sg_busaddr_next
= 0;
285 sizeof(struct adw_sg_block
);
286 sg_block
->sg_busaddr_next
= sg_busaddr
;
289 acb
->queue
.sg_real_addr
= acb
->sg_busaddr
;
291 acb
->queue
.sg_real_addr
= 0;
294 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
295 op
= BUS_DMASYNC_PREREAD
;
297 op
= BUS_DMASYNC_PREWRITE
;
299 bus_dmamap_sync(adw
->buffer_dmat
, acb
->dmamap
, op
);
302 acb
->queue
.data_addr
= 0;
303 acb
->queue
.data_cnt
= 0;
304 acb
->queue
.sg_real_addr
= 0;
310 * Last time we need to check if this CCB needs to
313 if (ccb
->ccb_h
.status
!= CAM_REQ_INPROG
) {
315 bus_dmamap_unload(adw
->buffer_dmat
, acb
->dmamap
);
316 adwfreeacb(adw
, acb
);
322 acb
->state
|= ACB_ACTIVE
;
323 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
324 LIST_INSERT_HEAD(&adw
->pending_ccbs
, &ccb
->ccb_h
, sim_links
.le
);
325 callout_reset(&ccb
->ccb_h
.timeout_ch
, (ccb
->ccb_h
.timeout
* hz
) / 1000,
328 adw_send_acb(adw
, acb
, acbvtob(adw
, acb
));
334 adw_action(struct cam_sim
*sim
, union ccb
*ccb
)
336 struct adw_softc
*adw
;
338 CAM_DEBUG(ccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("adw_action\n"));
340 adw
= (struct adw_softc
*)cam_sim_softc(sim
);
342 switch (ccb
->ccb_h
.func_code
) {
343 /* Common cases first */
344 case XPT_SCSI_IO
: /* Execute the requested I/O operation */
346 struct ccb_scsiio
*csio
;
347 struct ccb_hdr
*ccbh
;
353 /* Max supported CDB length is 12 bytes */
354 if (csio
->cdb_len
> 12) {
355 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
360 if ((acb
= adwgetacb(adw
)) == NULL
) {
362 adw
->state
|= ADW_RESOURCE_SHORTAGE
;
364 xpt_freeze_simq(sim
, /*count*/1);
365 ccb
->ccb_h
.status
= CAM_REQUEUE_REQ
;
370 /* Link acb and ccb so we can find one from the other */
372 ccb
->ccb_h
.ccb_acb_ptr
= acb
;
373 ccb
->ccb_h
.ccb_adw_ptr
= adw
;
376 acb
->queue
.target_cmd
= 0;
377 acb
->queue
.target_id
= ccb
->ccb_h
.target_id
;
378 acb
->queue
.target_lun
= ccb
->ccb_h
.target_lun
;
380 acb
->queue
.mflag
= 0;
381 acb
->queue
.sense_len
=
382 MIN(csio
->sense_len
, sizeof(acb
->sense_data
));
383 acb
->queue
.cdb_len
= csio
->cdb_len
;
384 if ((ccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
) != 0) {
385 switch (csio
->tag_action
) {
386 case MSG_SIMPLE_Q_TAG
:
387 acb
->queue
.scsi_cntl
= ADW_QSC_SIMPLE_Q_TAG
;
389 case MSG_HEAD_OF_Q_TAG
:
390 acb
->queue
.scsi_cntl
= ADW_QSC_HEAD_OF_Q_TAG
;
392 case MSG_ORDERED_Q_TAG
:
393 acb
->queue
.scsi_cntl
= ADW_QSC_ORDERED_Q_TAG
;
396 acb
->queue
.scsi_cntl
= ADW_QSC_NO_TAGMSG
;
400 acb
->queue
.scsi_cntl
= ADW_QSC_NO_TAGMSG
;
402 if ((ccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) != 0)
403 acb
->queue
.scsi_cntl
|= ADW_QSC_NO_DISC
;
405 acb
->queue
.done_status
= 0;
406 acb
->queue
.scsi_status
= 0;
407 acb
->queue
.host_status
= 0;
408 acb
->queue
.sg_wk_ix
= 0;
409 if ((ccb
->ccb_h
.flags
& CAM_CDB_POINTER
) != 0) {
410 if ((ccb
->ccb_h
.flags
& CAM_CDB_PHYS
) == 0) {
411 bcopy(csio
->cdb_io
.cdb_ptr
,
412 acb
->queue
.cdb
, csio
->cdb_len
);
414 /* I guess I could map it in... */
415 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
416 adwfreeacb(adw
, acb
);
421 bcopy(csio
->cdb_io
.cdb_bytes
,
422 acb
->queue
.cdb
, csio
->cdb_len
);
426 * If we have any data to send with this command,
427 * map it into bus space.
429 if ((ccbh
->flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
430 if ((ccbh
->flags
& CAM_SCATTER_VALID
) == 0) {
432 * We've been given a pointer
433 * to a single buffer.
435 if ((ccbh
->flags
& CAM_DATA_PHYS
) == 0) {
440 bus_dmamap_load(adw
->buffer_dmat
,
446 if (error
== EINPROGRESS
) {
448 * So as to maintain ordering,
449 * freeze the controller queue
450 * until our mapping is
453 xpt_freeze_simq(sim
, 1);
454 acb
->state
|= CAM_RELEASE_SIMQ
;
458 struct bus_dma_segment seg
;
460 /* Pointer to physical buffer */
462 (bus_addr_t
)csio
->data_ptr
;
463 seg
.ds_len
= csio
->dxfer_len
;
464 adwexecuteacb(acb
, &seg
, 1, 0);
467 struct bus_dma_segment
*segs
;
469 if ((ccbh
->flags
& CAM_DATA_PHYS
) != 0)
470 panic("adw_action - Physical "
474 if ((ccbh
->flags
&CAM_SG_LIST_PHYS
)==0)
475 panic("adw_action - Virtual "
479 /* Just use the segments provided */
480 segs
= (struct bus_dma_segment
*)csio
->data_ptr
;
481 adwexecuteacb(acb
, segs
, csio
->sglist_cnt
,
482 (csio
->sglist_cnt
< ADW_SGSIZE
)
486 adwexecuteacb(acb
, NULL
, 0, 0);
490 case XPT_RESET_DEV
: /* Bus Device Reset the specified SCSI device */
492 adw_idle_cmd_status_t status
;
494 status
= adw_idle_cmd_send(adw
, ADW_IDLE_CMD_DEVICE_RESET
,
495 ccb
->ccb_h
.target_id
);
496 if (status
== ADW_IDLE_CMD_SUCCESS
) {
497 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
499 xpt_print_path(ccb
->ccb_h
.path
);
500 kprintf("BDR Delivered\n");
503 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
507 case XPT_ABORT
: /* Abort the specified CCB */
509 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
512 case XPT_SET_TRAN_SETTINGS
:
514 struct ccb_trans_settings_scsi
*scsi
;
515 struct ccb_trans_settings_spi
*spi
;
516 struct ccb_trans_settings
*cts
;
520 target_mask
= 0x01 << ccb
->ccb_h
.target_id
;
523 scsi
= &cts
->proto_specific
.scsi
;
524 spi
= &cts
->xport_specific
.spi
;
525 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
) {
528 sdtrdone
= adw_lram_read_16(adw
, ADW_MC_SDTR_DONE
);
529 if ((spi
->valid
& CTS_SPI_VALID_DISC
) != 0) {
533 adw_lram_read_16(adw
, ADW_MC_DISC_ENABLE
);
535 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
) != 0)
536 discenb
|= target_mask
;
538 discenb
&= ~target_mask
;
540 adw_lram_write_16(adw
, ADW_MC_DISC_ENABLE
,
544 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
546 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
) != 0)
547 adw
->tagenb
|= target_mask
;
549 adw
->tagenb
&= ~target_mask
;
552 if ((spi
->valid
& CTS_SPI_VALID_BUS_WIDTH
) != 0) {
558 adw_lram_read_16(adw
, ADW_MC_WDTR_ABLE
);
559 wdtrenb
= wdtrenb_orig
;
560 wdtrdone
= adw_lram_read_16(adw
,
562 switch (spi
->bus_width
) {
563 case MSG_EXT_WDTR_BUS_32_BIT
:
564 case MSG_EXT_WDTR_BUS_16_BIT
:
565 wdtrenb
|= target_mask
;
567 case MSG_EXT_WDTR_BUS_8_BIT
:
569 wdtrenb
&= ~target_mask
;
572 if (wdtrenb
!= wdtrenb_orig
) {
573 adw_lram_write_16(adw
,
576 wdtrdone
&= ~target_mask
;
577 adw_lram_write_16(adw
,
580 /* Wide negotiation forces async */
581 sdtrdone
&= ~target_mask
;
582 adw_lram_write_16(adw
,
588 if (((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) != 0)
589 || ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) != 0)) {
595 sdtr
= adw_get_chip_sdtr(adw
,
596 ccb
->ccb_h
.target_id
);
598 sdtrable
= adw_lram_read_16(adw
,
600 sdtrable_orig
= sdtrable
;
603 & CTS_SPI_VALID_SYNC_RATE
) != 0) {
611 & CTS_SPI_VALID_SYNC_OFFSET
) != 0) {
612 if (spi
->sync_offset
== 0)
613 sdtr
= ADW_MC_SDTR_ASYNC
;
616 if (sdtr
== ADW_MC_SDTR_ASYNC
)
617 sdtrable
&= ~target_mask
;
619 sdtrable
|= target_mask
;
620 if (sdtr
!= sdtr_orig
621 || sdtrable
!= sdtrable_orig
) {
622 adw_set_chip_sdtr(adw
,
623 ccb
->ccb_h
.target_id
,
625 sdtrdone
&= ~target_mask
;
626 adw_lram_write_16(adw
, ADW_MC_SDTR_ABLE
,
628 adw_lram_write_16(adw
, ADW_MC_SDTR_DONE
,
635 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
639 case XPT_GET_TRAN_SETTINGS
:
640 /* Get default/user set transfer settings for the target */
642 struct ccb_trans_settings_scsi
*scsi
;
643 struct ccb_trans_settings_spi
*spi
;
644 struct ccb_trans_settings
*cts
;
648 target_mask
= 0x01 << ccb
->ccb_h
.target_id
;
649 cts
->protocol
= PROTO_SCSI
;
650 cts
->protocol_version
= SCSI_REV_2
;
651 cts
->transport
= XPORT_SPI
;
652 cts
->transport_version
= 2;
654 scsi
= &cts
->proto_specific
.scsi
;
655 spi
= &cts
->xport_specific
.spi
;
656 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
) {
660 if ((adw
->user_discenb
& target_mask
) != 0)
661 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
663 if ((adw
->user_tagenb
& target_mask
) != 0)
664 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
666 if ((adw
->user_wdtr
& target_mask
) != 0)
667 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
669 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
671 mc_sdtr
= adw_get_user_sdtr(adw
, ccb
->ccb_h
.target_id
);
672 spi
->sync_period
= adw_find_period(adw
, mc_sdtr
);
673 if (spi
->sync_period
!= 0)
674 spi
->sync_offset
= 15; /* XXX ??? */
676 spi
->sync_offset
= 0;
683 if ((adw_lram_read_16(adw
, ADW_MC_DISC_ENABLE
)
685 spi
->flags
|= CTS_SPI_FLAGS_DISC_ENB
;
687 if ((adw
->tagenb
& target_mask
) != 0)
688 scsi
->flags
|= CTS_SCSI_FLAGS_TAG_ENB
;
691 adw_lram_read_16(adw
,
692 ADW_MC_DEVICE_HSHK_CFG_TABLE
693 + (2 * ccb
->ccb_h
.target_id
));
695 if ((targ_tinfo
& ADW_HSHK_CFG_WIDE_XFR
) != 0)
696 spi
->bus_width
= MSG_EXT_WDTR_BUS_16_BIT
;
698 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
701 adw_hshk_cfg_period_factor(targ_tinfo
);
703 spi
->sync_offset
= targ_tinfo
& ADW_HSHK_CFG_OFFSET
;
704 if (spi
->sync_period
== 0)
705 spi
->sync_offset
= 0;
707 if (spi
->sync_offset
== 0)
708 spi
->sync_period
= 0;
711 spi
->valid
= CTS_SPI_VALID_SYNC_RATE
712 | CTS_SPI_VALID_SYNC_OFFSET
713 | CTS_SPI_VALID_BUS_WIDTH
714 | CTS_SPI_VALID_DISC
;
715 scsi
->valid
= CTS_SCSI_VALID_TQ
;
716 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
720 case XPT_CALC_GEOMETRY
:
722 struct ccb_calc_geometry
*ccg
;
724 u_int32_t secs_per_cylinder
;
728 * XXX Use Adaptec translation until I find out how to
729 * get this information from the card.
732 size_mb
= ccg
->volume_size
733 / ((1024L * 1024L) / ccg
->block_size
);
736 if (size_mb
> 1024 && extended
) {
738 ccg
->secs_per_track
= 63;
741 ccg
->secs_per_track
= 32;
743 secs_per_cylinder
= ccg
->heads
* ccg
->secs_per_track
;
744 ccg
->cylinders
= ccg
->volume_size
/ secs_per_cylinder
;
745 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
749 case XPT_RESET_BUS
: /* Reset the specified SCSI bus */
753 failure
= adw_reset_bus(adw
);
755 ccb
->ccb_h
.status
= CAM_REQ_CMP_ERR
;
758 xpt_print_path(adw
->path
);
759 kprintf("Bus Reset Delivered\n");
761 ccb
->ccb_h
.status
= CAM_REQ_CMP
;
766 case XPT_TERM_IO
: /* Terminate the I/O process */
768 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
771 case XPT_PATH_INQ
: /* Path routing inquiry */
773 struct ccb_pathinq
*cpi
= &ccb
->cpi
;
775 cpi
->version_num
= 1;
776 cpi
->hba_inquiry
= PI_WIDE_16
|PI_SDTR_ABLE
|PI_TAG_ABLE
;
777 cpi
->target_sprt
= 0;
779 cpi
->hba_eng_cnt
= 0;
780 cpi
->max_target
= ADW_MAX_TID
;
781 cpi
->max_lun
= ADW_MAX_LUN
;
782 cpi
->initiator_id
= adw
->initiator_id
;
783 cpi
->bus_id
= cam_sim_bus(sim
);
784 cpi
->base_transfer_speed
= 3300;
785 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
786 strncpy(cpi
->hba_vid
, "AdvanSys", HBA_IDLEN
);
787 strncpy(cpi
->dev_name
, cam_sim_name(sim
), DEV_IDLEN
);
788 cpi
->unit_number
= cam_sim_unit(sim
);
789 cpi
->transport
= XPORT_SPI
;
790 cpi
->transport_version
= 2;
791 cpi
->protocol
= PROTO_SCSI
;
792 cpi
->protocol_version
= SCSI_REV_2
;
793 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
798 ccb
->ccb_h
.status
= CAM_REQ_INVALID
;
805 adw_poll(struct cam_sim
*sim
)
807 adw_intr(cam_sim_softc(sim
));
811 adw_async(void *callback_arg
, u_int32_t code
, struct cam_path
*path
, void *arg
)
816 adw_alloc(device_t dev
, struct resource
*regs
, int regs_type
, int regs_id
)
818 struct adw_softc
*adw
;
822 * Allocate a storage area for us
824 adw
= kmalloc(sizeof(struct adw_softc
), M_DEVBUF
, M_INTWAIT
| M_ZERO
);
825 LIST_INIT(&adw
->pending_ccbs
);
826 SLIST_INIT(&adw
->sg_maps
);
828 adw
->unit
= device_get_unit(dev
);
829 adw
->regs_res_type
= regs_type
;
830 adw
->regs_res_id
= regs_id
;
832 adw
->tag
= rman_get_bustag(regs
);
833 adw
->bsh
= rman_get_bushandle(regs
);
834 KKASSERT(adw
->unit
>= 0 && adw
->unit
< 100);
836 adw
->name
= kmalloc(sizeof("adw") + i
+ 1, M_DEVBUF
, M_INTWAIT
);
837 ksprintf(adw
->name
, "adw%d", adw
->unit
);
842 adw_free(struct adw_softc
*adw
)
844 switch (adw
->init_level
) {
847 struct sg_map_node
*sg_map
;
849 while ((sg_map
= SLIST_FIRST(&adw
->sg_maps
)) != NULL
) {
850 SLIST_REMOVE_HEAD(&adw
->sg_maps
, links
);
851 bus_dmamap_unload(adw
->sg_dmat
,
853 bus_dmamem_free(adw
->sg_dmat
, sg_map
->sg_vaddr
,
855 kfree(sg_map
, M_DEVBUF
);
857 bus_dma_tag_destroy(adw
->sg_dmat
);
860 bus_dmamap_unload(adw
->acb_dmat
, adw
->acb_dmamap
);
862 bus_dmamem_free(adw
->acb_dmat
, adw
->acbs
,
864 bus_dmamap_destroy(adw
->acb_dmat
, adw
->acb_dmamap
);
866 bus_dma_tag_destroy(adw
->acb_dmat
);
868 bus_dmamap_unload(adw
->carrier_dmat
, adw
->carrier_dmamap
);
870 bus_dmamem_free(adw
->carrier_dmat
, adw
->carriers
,
871 adw
->carrier_dmamap
);
872 bus_dmamap_destroy(adw
->carrier_dmat
, adw
->carrier_dmamap
);
874 bus_dma_tag_destroy(adw
->carrier_dmat
);
876 bus_dma_tag_destroy(adw
->buffer_dmat
);
878 bus_dma_tag_destroy(adw
->parent_dmat
);
882 kfree(adw
->name
, M_DEVBUF
);
883 kfree(adw
, M_DEVBUF
);
887 adw_init(struct adw_softc
*adw
)
889 struct adw_eeprom eep_config
;
895 checksum
= adw_eeprom_read(adw
, &eep_config
);
896 bcopy(eep_config
.serial_number
, adw
->serial_number
,
897 sizeof(adw
->serial_number
));
898 if (checksum
!= eep_config
.checksum
) {
899 u_int16_t serial_number
[3];
901 adw
->flags
|= ADW_EEPROM_FAILED
;
902 kprintf("%s: EEPROM checksum failed. Restoring Defaults\n",
906 * Restore the default EEPROM settings.
907 * Assume the 6 byte board serial number that was read
908 * from EEPROM is correct even if the EEPROM checksum
911 bcopy(adw
->default_eeprom
, &eep_config
, sizeof(eep_config
));
912 bcopy(adw
->serial_number
, eep_config
.serial_number
,
913 sizeof(serial_number
));
914 adw_eeprom_write(adw
, &eep_config
);
917 /* Pull eeprom information into our softc. */
918 adw
->bios_ctrl
= eep_config
.bios_ctrl
;
919 adw
->user_wdtr
= eep_config
.wdtr_able
;
920 for (tid
= 0; tid
< ADW_MAX_TID
; tid
++) {
924 tid_mask
= 0x1 << tid
;
925 if ((adw
->features
& ADW_ULTRA
) != 0) {
927 * Ultra chips store sdtr and ultraenb
928 * bits in their seeprom, so we must
929 * construct valid mc_sdtr entries for
932 if (eep_config
.sync1
.sync_enable
& tid_mask
) {
933 if (eep_config
.sync2
.ultra_enable
& tid_mask
)
934 mc_sdtr
= ADW_MC_SDTR_20
;
936 mc_sdtr
= ADW_MC_SDTR_10
;
938 mc_sdtr
= ADW_MC_SDTR_ASYNC
;
940 switch (ADW_TARGET_GROUP(tid
)) {
942 mc_sdtr
= eep_config
.sync4
.sdtr4
;
945 mc_sdtr
= eep_config
.sync3
.sdtr3
;
948 mc_sdtr
= eep_config
.sync2
.sdtr2
;
950 default: /* Shut up compiler */
952 mc_sdtr
= eep_config
.sync1
.sdtr1
;
955 mc_sdtr
>>= ADW_TARGET_GROUP_SHIFT(tid
);
958 adw_set_user_sdtr(adw
, tid
, mc_sdtr
);
960 adw
->user_tagenb
= eep_config
.tagqng_able
;
961 adw
->user_discenb
= eep_config
.disc_enable
;
962 adw
->max_acbs
= eep_config
.max_host_qng
;
963 adw
->initiator_id
= (eep_config
.adapter_scsi_id
& ADW_MAX_TID
);
966 * Sanity check the number of host openings.
968 if (adw
->max_acbs
> ADW_DEF_MAX_HOST_QNG
)
969 adw
->max_acbs
= ADW_DEF_MAX_HOST_QNG
;
970 else if (adw
->max_acbs
< ADW_DEF_MIN_HOST_QNG
) {
971 /* If the value is zero, assume it is uninitialized. */
972 if (adw
->max_acbs
== 0)
973 adw
->max_acbs
= ADW_DEF_MAX_HOST_QNG
;
975 adw
->max_acbs
= ADW_DEF_MIN_HOST_QNG
;
979 if ((adw
->features
& ADW_ULTRA2
) != 0) {
980 switch (eep_config
.termination_lvd
) {
982 kprintf("%s: Invalid EEPROM LVD Termination Settings.\n",
984 kprintf("%s: Reverting to Automatic LVD Termination\n",
987 case ADW_EEPROM_TERM_AUTO
:
989 case ADW_EEPROM_TERM_BOTH_ON
:
990 scsicfg1
|= ADW2_SCSI_CFG1_TERM_LVD_LO
;
992 case ADW_EEPROM_TERM_HIGH_ON
:
993 scsicfg1
|= ADW2_SCSI_CFG1_TERM_LVD_HI
;
995 case ADW_EEPROM_TERM_OFF
:
996 scsicfg1
|= ADW2_SCSI_CFG1_DIS_TERM_DRV
;
1001 switch (eep_config
.termination_se
) {
1003 kprintf("%s: Invalid SE EEPROM Termination Settings.\n",
1005 kprintf("%s: Reverting to Automatic SE Termination\n",
1008 case ADW_EEPROM_TERM_AUTO
:
1010 case ADW_EEPROM_TERM_BOTH_ON
:
1011 scsicfg1
|= ADW_SCSI_CFG1_TERM_CTL_L
;
1013 case ADW_EEPROM_TERM_HIGH_ON
:
1014 scsicfg1
|= ADW_SCSI_CFG1_TERM_CTL_H
;
1016 case ADW_EEPROM_TERM_OFF
:
1017 scsicfg1
|= ADW_SCSI_CFG1_TERM_CTL_MANUAL
;
1020 kprintf("%s: SCSI ID %d, ", adw_name(adw
), adw
->initiator_id
);
1022 /* DMA tag for mapping buffers into device visible space. */
1023 if (bus_dma_tag_create(adw
->parent_dmat
, /*alignment*/1, /*boundary*/0,
1024 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT
,
1025 /*highaddr*/BUS_SPACE_MAXADDR
,
1026 /*filter*/NULL
, /*filterarg*/NULL
,
1027 /*maxsize*/MAXBSIZE
, /*nsegments*/ADW_SGSIZE
,
1028 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT
,
1029 /*flags*/BUS_DMA_ALLOCNOW
,
1030 &adw
->buffer_dmat
) != 0) {
1036 /* DMA tag for our ccb carrier structures */
1037 if (bus_dma_tag_create(adw
->parent_dmat
, /*alignment*/0x10,
1039 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT
,
1040 /*highaddr*/BUS_SPACE_MAXADDR
,
1041 /*filter*/NULL
, /*filterarg*/NULL
,
1042 (adw
->max_acbs
+ ADW_NUM_CARRIER_QUEUES
+ 1)
1043 * sizeof(struct adw_carrier
),
1045 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT
,
1046 /*flags*/0, &adw
->carrier_dmat
) != 0) {
1052 /* Allocation for our ccb carrier structures */
1053 if (bus_dmamem_alloc(adw
->carrier_dmat
, (void *)&adw
->carriers
,
1054 BUS_DMA_NOWAIT
, &adw
->carrier_dmamap
) != 0) {
1060 /* And permanently map them */
1061 bus_dmamap_load(adw
->carrier_dmat
, adw
->carrier_dmamap
,
1063 (adw
->max_acbs
+ ADW_NUM_CARRIER_QUEUES
+ 1)
1064 * sizeof(struct adw_carrier
),
1065 adwmapmem
, &adw
->carrier_busbase
, /*flags*/0);
1067 /* Clear them out. */
1068 bzero(adw
->carriers
, (adw
->max_acbs
+ ADW_NUM_CARRIER_QUEUES
+ 1)
1069 * sizeof(struct adw_carrier
));
1071 /* Setup our free carrier list */
1072 adw
->free_carriers
= adw
->carriers
;
1073 for (i
= 0; i
< adw
->max_acbs
+ ADW_NUM_CARRIER_QUEUES
; i
++) {
1074 adw
->carriers
[i
].carr_offset
=
1075 carriervtobo(adw
, &adw
->carriers
[i
]);
1076 adw
->carriers
[i
].carr_ba
=
1077 carriervtob(adw
, &adw
->carriers
[i
]);
1078 adw
->carriers
[i
].areq_ba
= 0;
1079 adw
->carriers
[i
].next_ba
=
1080 carriervtobo(adw
, &adw
->carriers
[i
+1]);
1082 /* Terminal carrier. Never leaves the freelist */
1083 adw
->carriers
[i
].carr_offset
=
1084 carriervtobo(adw
, &adw
->carriers
[i
]);
1085 adw
->carriers
[i
].carr_ba
=
1086 carriervtob(adw
, &adw
->carriers
[i
]);
1087 adw
->carriers
[i
].areq_ba
= 0;
1088 adw
->carriers
[i
].next_ba
= ~0;
1092 /* DMA tag for our acb structures */
1093 if (bus_dma_tag_create(adw
->parent_dmat
, /*alignment*/1, /*boundary*/0,
1094 /*lowaddr*/BUS_SPACE_MAXADDR
,
1095 /*highaddr*/BUS_SPACE_MAXADDR
,
1096 /*filter*/NULL
, /*filterarg*/NULL
,
1097 adw
->max_acbs
* sizeof(struct acb
),
1099 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT
,
1100 /*flags*/0, &adw
->acb_dmat
) != 0) {
1106 /* Allocation for our ccbs */
1107 if (bus_dmamem_alloc(adw
->acb_dmat
, (void *)&adw
->acbs
,
1108 BUS_DMA_NOWAIT
, &adw
->acb_dmamap
) != 0)
1113 /* And permanently map them */
1114 bus_dmamap_load(adw
->acb_dmat
, adw
->acb_dmamap
,
1116 adw
->max_acbs
* sizeof(struct acb
),
1117 adwmapmem
, &adw
->acb_busbase
, /*flags*/0);
1119 /* Clear them out. */
1120 bzero(adw
->acbs
, adw
->max_acbs
* sizeof(struct acb
));
1122 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1123 if (bus_dma_tag_create(adw
->parent_dmat
, /*alignment*/1, /*boundary*/0,
1124 /*lowaddr*/BUS_SPACE_MAXADDR
,
1125 /*highaddr*/BUS_SPACE_MAXADDR
,
1126 /*filter*/NULL
, /*filterarg*/NULL
,
1127 PAGE_SIZE
, /*nsegments*/1,
1128 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT
,
1129 /*flags*/0, &adw
->sg_dmat
) != 0) {
1135 /* Allocate our first batch of ccbs */
1136 if (adwallocacbs(adw
) == 0)
1139 if (adw_init_chip(adw
, scsicfg1
) != 0)
1142 kprintf("Queue Depth %d\n", adw
->max_acbs
);
1148 * Attach all the sub-devices we can find
1151 adw_attach(struct adw_softc
*adw
)
1153 struct ccb_setasync csa
;
1158 /* Hook up our interrupt handler */
1159 if ((error
= bus_setup_intr(adw
->device
, adw
->irq
, 0,
1160 adw_intr
, adw
, &adw
->ih
, NULL
)) != 0) {
1161 device_printf(adw
->device
, "bus_setup_intr() failed: %d\n",
1166 /* Start the Risc processor now that we are fully configured. */
1167 adw_outw(adw
, ADW_RISC_CSR
, ADW_RISC_CSR_RUN
);
1170 * Construct our SIM entry.
1172 adw
->sim
= cam_sim_alloc(adw_action
, adw_poll
, "adw", adw
, adw
->unit
,
1173 &sim_mplock
, 1, adw
->max_acbs
, NULL
);
1174 if (adw
->sim
== NULL
) {
1182 if (xpt_bus_register(adw
->sim
, 0) != CAM_SUCCESS
) {
1183 cam_sim_free(adw
->sim
);
1188 if (xpt_create_path(&adw
->path
, /*periph*/NULL
, cam_sim_path(adw
->sim
),
1189 CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
)
1191 xpt_setup_ccb(&csa
.ccb_h
, adw
->path
, /*priority*/5);
1192 csa
.ccb_h
.func_code
= XPT_SASYNC_CB
;
1193 csa
.event_enable
= AC_LOST_DEVICE
;
1194 csa
.callback
= adw_async
;
1195 csa
.callback_arg
= adw
;
1196 xpt_action((union ccb
*)&csa
);
1207 struct adw_softc
*adw
;
1210 adw
= (struct adw_softc
*)arg
;
1211 if ((adw_inw(adw
, ADW_CTRL_REG
) & ADW_CTRL_REG_HOST_INTR
) == 0)
1214 /* Reading the register clears the interrupt. */
1215 int_stat
= adw_inb(adw
, ADW_INTR_STATUS_REG
);
1217 if ((int_stat
& ADW_INTR_STATUS_INTRB
) != 0) {
1220 /* Async Microcode Event */
1221 intrb_code
= adw_lram_read_8(adw
, ADW_MC_INTRB_CODE
);
1222 switch (intrb_code
) {
1223 case ADW_ASYNC_CARRIER_READY_FAILURE
:
1225 * The RISC missed our update of
1228 if (LIST_FIRST(&adw
->pending_ccbs
) != NULL
)
1229 adw_tickle_risc(adw
, ADW_TICKLE_A
);
1231 case ADW_ASYNC_SCSI_BUS_RESET_DET
:
1233 * The firmware detected a SCSI Bus reset.
1235 kprintf("Someone Reset the Bus\n");
1236 adw_handle_bus_reset(adw
, /*initiated*/FALSE
);
1238 case ADW_ASYNC_RDMA_FAILURE
:
1240 * Handle RDMA failure by resetting the
1241 * SCSI Bus and chip.
1244 AdvResetChipAndSB(adv_dvc_varp
);
1248 case ADW_ASYNC_HOST_SCSI_BUS_RESET
:
1250 * Host generated SCSI bus reset occurred.
1252 adw_handle_bus_reset(adw
, /*initiated*/TRUE
);
1255 kprintf("adw_intr: unknown async code 0x%x\n",
1262 * Run down the RequestQ.
1264 while ((adw
->responseq
->next_ba
& ADW_RQ_DONE
) != 0) {
1265 struct adw_carrier
*free_carrier
;
1270 kprintf("0x%x, 0x%x, 0x%x, 0x%x\n",
1271 adw
->responseq
->carr_offset
,
1272 adw
->responseq
->carr_ba
,
1273 adw
->responseq
->areq_ba
,
1274 adw
->responseq
->next_ba
);
1277 * The firmware copies the adw_scsi_req_q.acb_baddr
1278 * field into the areq_ba field of the carrier.
1280 acb
= acbbotov(adw
, adw
->responseq
->areq_ba
);
1283 * The least significant four bits of the next_ba
1284 * field are used as flags. Mask them out and then
1285 * advance through the list.
1287 free_carrier
= adw
->responseq
;
1289 carrierbotov(adw
, free_carrier
->next_ba
& ADW_NEXT_BA_MASK
);
1290 free_carrier
->next_ba
= adw
->free_carriers
->carr_offset
;
1291 adw
->free_carriers
= free_carrier
;
1295 callout_stop(&ccb
->ccb_h
.timeout_ch
);
1296 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
1297 bus_dmasync_op_t op
;
1299 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
1300 op
= BUS_DMASYNC_POSTREAD
;
1302 op
= BUS_DMASYNC_POSTWRITE
;
1303 bus_dmamap_sync(adw
->buffer_dmat
, acb
->dmamap
, op
);
1304 bus_dmamap_unload(adw
->buffer_dmat
, acb
->dmamap
);
1305 ccb
->csio
.resid
= acb
->queue
.data_cnt
;
1307 ccb
->csio
.resid
= 0;
1309 /* Common Cases inline... */
1310 if (acb
->queue
.host_status
== QHSTA_NO_ERROR
1311 && (acb
->queue
.done_status
== QD_NO_ERROR
1312 || acb
->queue
.done_status
== QD_WITH_ERROR
)) {
1313 ccb
->csio
.scsi_status
= acb
->queue
.scsi_status
;
1314 ccb
->ccb_h
.status
= 0;
1315 switch (ccb
->csio
.scsi_status
) {
1316 case SCSI_STATUS_OK
:
1317 ccb
->ccb_h
.status
|= CAM_REQ_CMP
;
1319 case SCSI_STATUS_CHECK_COND
:
1320 case SCSI_STATUS_CMD_TERMINATED
:
1321 bcopy(&acb
->sense_data
, &ccb
->csio
.sense_data
,
1322 ccb
->csio
.sense_len
);
1323 ccb
->ccb_h
.status
|= CAM_AUTOSNS_VALID
;
1324 ccb
->csio
.sense_resid
= acb
->queue
.sense_len
;
1327 ccb
->ccb_h
.status
|= CAM_SCSI_STATUS_ERROR
1329 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
1332 adwfreeacb(adw
, acb
);
1335 adwprocesserror(adw
, acb
);
1341 adwprocesserror(struct adw_softc
*adw
, struct acb
*acb
)
1346 if (acb
->queue
.done_status
== QD_ABORTED_BY_HOST
) {
1347 ccb
->ccb_h
.status
= CAM_REQ_ABORTED
;
1350 switch (acb
->queue
.host_status
) {
1351 case QHSTA_M_SEL_TIMEOUT
:
1352 ccb
->ccb_h
.status
= CAM_SEL_TIMEOUT
;
1354 case QHSTA_M_SXFR_OFF_UFLW
:
1355 case QHSTA_M_SXFR_OFF_OFLW
:
1356 case QHSTA_M_DATA_OVER_RUN
:
1357 ccb
->ccb_h
.status
= CAM_DATA_RUN_ERR
;
1359 case QHSTA_M_SXFR_DESELECTED
:
1360 case QHSTA_M_UNEXPECTED_BUS_FREE
:
1361 ccb
->ccb_h
.status
= CAM_UNEXP_BUSFREE
;
1363 case QHSTA_M_SCSI_BUS_RESET
:
1364 case QHSTA_M_SCSI_BUS_RESET_UNSOL
:
1365 ccb
->ccb_h
.status
= CAM_SCSI_BUS_RESET
;
1367 case QHSTA_M_BUS_DEVICE_RESET
:
1368 ccb
->ccb_h
.status
= CAM_BDR_SENT
;
1370 case QHSTA_M_QUEUE_ABORTED
:
1371 /* BDR or Bus Reset */
1372 kprintf("Saw Queue Aborted\n");
1373 ccb
->ccb_h
.status
= adw
->last_reset
;
1375 case QHSTA_M_SXFR_SDMA_ERR
:
1376 case QHSTA_M_SXFR_SXFR_PERR
:
1377 case QHSTA_M_RDMA_PERR
:
1378 ccb
->ccb_h
.status
= CAM_UNCOR_PARITY
;
1380 case QHSTA_M_WTM_TIMEOUT
:
1381 case QHSTA_M_SXFR_WD_TMO
:
1383 /* The SCSI bus hung in a phase */
1384 xpt_print_path(adw
->path
);
1385 kprintf("Watch Dog timer expired. Reseting bus\n");
1389 case QHSTA_M_SXFR_XFR_PH_ERR
:
1390 ccb
->ccb_h
.status
= CAM_SEQUENCE_FAIL
;
1392 case QHSTA_M_SXFR_UNKNOWN_ERROR
:
1394 case QHSTA_M_BAD_CMPL_STATUS_IN
:
1395 /* No command complete after a status message */
1396 ccb
->ccb_h
.status
= CAM_SEQUENCE_FAIL
;
1398 case QHSTA_M_AUTO_REQ_SENSE_FAIL
:
1399 ccb
->ccb_h
.status
= CAM_AUTOSENSE_FAIL
;
1401 case QHSTA_M_INVALID_DEVICE
:
1402 ccb
->ccb_h
.status
= CAM_PATH_INVALID
;
1404 case QHSTA_M_NO_AUTO_REQ_SENSE
:
1406 * User didn't request sense, but we got a
1409 ccb
->csio
.scsi_status
= acb
->queue
.scsi_status
;
1410 ccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
1413 panic("%s: Unhandled Host status error %x",
1414 adw_name(adw
), acb
->queue
.host_status
);
1418 if ((acb
->state
& ACB_RECOVERY_ACB
) != 0) {
1419 if (ccb
->ccb_h
.status
== CAM_SCSI_BUS_RESET
1420 || ccb
->ccb_h
.status
== CAM_BDR_SENT
)
1421 ccb
->ccb_h
.status
= CAM_CMD_TIMEOUT
;
1423 if (ccb
->ccb_h
.status
!= CAM_REQ_CMP
) {
1424 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
1425 ccb
->ccb_h
.status
|= CAM_DEV_QFRZN
;
1427 adwfreeacb(adw
, acb
);
1432 adwtimeout(void *arg
)
1436 struct adw_softc
*adw
;
1437 adw_idle_cmd_status_t status
;
1440 acb
= (struct acb
*)arg
;
1442 adw
= (struct adw_softc
*)ccb
->ccb_h
.ccb_adw_ptr
;
1443 xpt_print_path(ccb
->ccb_h
.path
);
1444 kprintf("ACB %p - timed out\n", (void *)acb
);
1448 if ((acb
->state
& ACB_ACTIVE
) == 0) {
1449 xpt_print_path(ccb
->ccb_h
.path
);
1450 kprintf("ACB %p - timed out CCB already completed\n",
1456 acb
->state
|= ACB_RECOVERY_ACB
;
1457 target_id
= ccb
->ccb_h
.target_id
;
1459 /* Attempt a BDR first */
1460 status
= adw_idle_cmd_send(adw
, ADW_IDLE_CMD_DEVICE_RESET
,
1461 ccb
->ccb_h
.target_id
);
1463 if (status
== ADW_IDLE_CMD_SUCCESS
) {
1464 kprintf("%s: BDR Delivered. No longer in timeout\n",
1466 adw_handle_device_reset(adw
, target_id
);
1469 xpt_print_path(adw
->path
);
1470 kprintf("Bus Reset Delivered. No longer in timeout\n");
1475 adw_handle_device_reset(struct adw_softc
*adw
, u_int target
)
1477 struct cam_path
*path
;
1480 error
= xpt_create_path(&path
, /*periph*/NULL
, cam_sim_path(adw
->sim
),
1481 target
, CAM_LUN_WILDCARD
);
1483 if (error
== CAM_REQ_CMP
) {
1484 xpt_async(AC_SENT_BDR
, path
, NULL
);
1485 xpt_free_path(path
);
1487 adw
->last_reset
= CAM_BDR_SENT
;
1491 adw_handle_bus_reset(struct adw_softc
*adw
, int initiated
)
1495 * The microcode currently sets the SCSI Bus Reset signal
1496 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1497 * command above. But the SCSI Bus Reset Hold Time in the
1498 * microcode is not deterministic (it may in fact be for less
1499 * than the SCSI Spec. minimum of 25 us). Therefore on return
1500 * the Adv Library sets the SCSI Bus Reset signal for
1501 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1506 scsi_ctrl
= adw_inw(adw
, ADW_SCSI_CTRL
) & ~ADW_SCSI_CTRL_RSTOUT
;
1507 adw_outw(adw
, ADW_SCSI_CTRL
, scsi_ctrl
| ADW_SCSI_CTRL_RSTOUT
);
1508 DELAY(ADW_SCSI_RESET_HOLD_TIME_US
);
1509 adw_outw(adw
, ADW_SCSI_CTRL
, scsi_ctrl
);
1512 * We will perform the async notification when the
1513 * SCSI Reset interrupt occurs.
1516 xpt_async(AC_BUS_RESET
, adw
->path
, NULL
);
1517 adw
->last_reset
= CAM_SCSI_BUS_RESET
;