2 *********************************************************************
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34 * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.16 2008/05/18 20:30:22 pavalos Exp $
38 *********************************************************************
41 * REV# DATE NAME DESCRIPTION
42 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
43 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
44 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
45 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
46 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
47 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
48 *********************************************************************
51 /* #define AMD_DEBUG0 */
52 /* #define AMD_DEBUG_SCSI_PHASE */
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
62 #include <sys/thread2.h>
67 #include <machine/clock.h>
69 #include <bus/cam/cam.h>
70 #include <bus/cam/cam_ccb.h>
71 #include <bus/cam/cam_sim.h>
72 #include <bus/cam/cam_xpt_sim.h>
73 #include <bus/cam/cam_debug.h>
75 #include <bus/cam/scsi/scsi_all.h>
76 #include <bus/cam/scsi/scsi_message.h>
78 #include <bus/pci/pcivar.h>
79 #include <bus/pci/pcireg.h>
82 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
83 #define PCI_BASE_ADDR0 0x10
85 typedef u_int (phase_handler_t
)(struct amd_softc
*, struct amd_srb
*, u_int
);
86 typedef phase_handler_t
*phase_handler_func_t
;
88 static void amd_intr(void *vamd
);
89 static int amdstart(struct amd_softc
*amd
, struct amd_srb
* pSRB
);
90 static phase_handler_t amd_NopPhase
;
92 static phase_handler_t amd_DataOutPhase0
;
93 static phase_handler_t amd_DataInPhase0
;
94 #define amd_CommandPhase0 amd_NopPhase
95 static phase_handler_t amd_StatusPhase0
;
96 static phase_handler_t amd_MsgOutPhase0
;
97 static phase_handler_t amd_MsgInPhase0
;
98 static phase_handler_t amd_DataOutPhase1
;
99 static phase_handler_t amd_DataInPhase1
;
100 static phase_handler_t amd_CommandPhase1
;
101 static phase_handler_t amd_StatusPhase1
;
102 static phase_handler_t amd_MsgOutPhase1
;
103 static phase_handler_t amd_MsgInPhase1
;
105 static void amdsetupcommand(struct amd_softc
*amd
, struct amd_srb
*srb
);
106 static int amdparsemsg(struct amd_softc
*amd
);
107 static int amdhandlemsgreject(struct amd_softc
*amd
);
108 static void amdconstructsdtr(struct amd_softc
*amd
,
109 u_int period
, u_int offset
);
110 static u_int
amdfindclockrate(struct amd_softc
*amd
, u_int
*period
);
111 static int amdsentmsg(struct amd_softc
*amd
, u_int msgtype
, int full
);
113 static void DataIO_Comm(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int dir
);
114 static void amd_Disconnect(struct amd_softc
*amd
);
115 static void amd_Reselect(struct amd_softc
*amd
);
116 static void SRBdone(struct amd_softc
*amd
, struct amd_srb
*pSRB
);
117 static void amd_ScsiRstDetect(struct amd_softc
*amd
);
118 static void amd_ResetSCSIBus(struct amd_softc
*amd
);
119 static void RequestSense(struct amd_softc
*amd
, struct amd_srb
*pSRB
);
120 static void amd_InvalidCmd(struct amd_softc
*amd
);
123 static void amd_timeout(void *arg1
);
124 static void amd_reset(struct amd_softc
*amd
);
126 static u_int8_t
* phystovirt(struct amd_srb
*pSRB
, u_int32_t xferCnt
);
128 void amd_linkSRB(struct amd_softc
*amd
);
129 static int amd_init(device_t
);
130 static void amd_load_defaults(struct amd_softc
*amd
);
131 static void amd_load_eeprom_or_defaults(struct amd_softc
*amd
);
132 static int amd_EEpromInDO(struct amd_softc
*amd
);
133 static u_int16_t
EEpromGetData1(struct amd_softc
*amd
);
134 static void amd_EnDisableCE(struct amd_softc
*amd
, int mode
, int *regval
);
135 static void amd_EEpromOutDI(struct amd_softc
*amd
, int *regval
, int Carry
);
136 static void amd_Prepare(struct amd_softc
*amd
, int *regval
, u_int8_t EEpromCmd
);
137 static void amd_ReadEEprom(struct amd_softc
*amd
);
139 static int amd_probe(device_t
);
140 static int amd_attach(device_t
);
141 static void amdcompletematch(struct amd_softc
*amd
, target_id_t target
,
142 lun_id_t lun
, u_int tag
, struct srb_queue
*queue
,
144 static void amdsetsync(struct amd_softc
*amd
, u_int target
, u_int clockrate
,
145 u_int period
, u_int offset
, u_int type
);
146 static void amdsettags(struct amd_softc
*amd
, u_int target
, int tagenb
);
148 static __inline
void amd_clear_msg_state(struct amd_softc
*amd
);
151 amd_clear_msg_state(struct amd_softc
*amd
)
154 amd
->msgout_index
= 0;
155 amd
->msgin_index
= 0;
158 /* CAM SIM entry points */
159 #define ccb_srb_ptr spriv_ptr0
160 #define ccb_amd_ptr spriv_ptr1
161 static void amd_action(struct cam_sim
*sim
, union ccb
*ccb
);
162 static void amd_poll(struct cam_sim
*sim
);
165 * State engine function tables indexed by SCSI phase number
167 phase_handler_func_t amd_SCSI_phase0
[] = {
178 phase_handler_func_t amd_SCSI_phase1
[] = {
190 * EEProm/BIOS negotiation periods
192 u_int8_t eeprom_period
[] = {
204 * chip clock setting to SCSI specified sync parameter table.
206 u_int8_t tinfo_sync_period
[] = {
219 static __inline
struct amd_srb
*
220 amdgetsrb(struct amd_softc
* amd
)
222 struct amd_srb
* pSRB
;
225 pSRB
= TAILQ_FIRST(&amd
->free_srbs
);
227 TAILQ_REMOVE(&amd
->free_srbs
, pSRB
, links
);
233 amdsetupcommand(struct amd_softc
*amd
, struct amd_srb
*srb
)
235 struct scsi_request_sense sense_cmd
;
236 struct ccb_scsiio
*csio
;
240 csio
= &srb
->pccb
->csio
;
242 if (srb
->SRBFlag
& AUTO_REQSENSE
) {
243 sense_cmd
.opcode
= REQUEST_SENSE
;
244 sense_cmd
.byte2
= srb
->pccb
->ccb_h
.target_lun
<< 5;
245 sense_cmd
.unused
[0] = 0;
246 sense_cmd
.unused
[1] = 0;
247 sense_cmd
.length
= csio
->sense_len
;
248 sense_cmd
.control
= 0;
249 cdb
= &sense_cmd
.opcode
;
250 cdb_len
= sizeof(sense_cmd
);
252 cdb
= &srb
->CmdBlock
[0];
253 cdb_len
= srb
->ScsiCmdLen
;
255 amd_write8_multi(amd
, SCSIFIFOREG
, cdb
, cdb_len
);
259 * Attempt to start a waiting transaction. Interrupts must be disabled
260 * upon entry to this function.
263 amdrunwaiting(struct amd_softc
*amd
) {
266 if (amd
->last_phase
!= SCSI_BUS_FREE
)
269 srb
= TAILQ_FIRST(&amd
->waiting_srbs
);
273 if (amdstart(amd
, srb
) == 0) {
274 TAILQ_REMOVE(&amd
->waiting_srbs
, srb
, links
);
275 TAILQ_INSERT_HEAD(&amd
->running_srbs
, srb
, links
);
280 amdexecutesrb(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
284 struct amd_softc
*amd
;
286 srb
= (struct amd_srb
*)arg
;
288 amd
= (struct amd_softc
*)ccb
->ccb_h
.ccb_amd_ptr
;
292 kprintf("amd%d: Unexpected error 0x%x returned from "
293 "bus_dmamap_load\n", amd
->unit
, error
);
294 if (ccb
->ccb_h
.status
== CAM_REQ_INPROG
) {
295 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
296 ccb
->ccb_h
.status
= CAM_REQ_TOO_BIG
|CAM_DEV_QFRZN
;
298 TAILQ_INSERT_HEAD(&amd
->free_srbs
, srb
, links
);
305 bus_dma_segment_t
*end_seg
;
308 end_seg
= dm_segs
+ nseg
;
310 /* Copy the segments into our SG list */
311 srb
->pSGlist
= &srb
->SGsegment
[0];
313 while (dm_segs
< end_seg
) {
314 sg
->SGXLen
= dm_segs
->ds_len
;
315 sg
->SGXPtr
= dm_segs
->ds_addr
;
320 if ((ccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
321 op
= BUS_DMASYNC_PREREAD
;
323 op
= BUS_DMASYNC_PREWRITE
;
325 bus_dmamap_sync(amd
->buffer_dmat
, srb
->dmamap
, op
);
330 srb
->AdaptStatus
= 0;
331 srb
->TargetStatus
= 0;
336 srb
->TotalXferredLen
= 0;
338 srb
->SGToBeXferLen
= 0;
344 * Last time we need to check if this CCB needs to
347 if (ccb
->ccb_h
.status
!= CAM_REQ_INPROG
) {
349 bus_dmamap_unload(amd
->buffer_dmat
, srb
->dmamap
);
350 TAILQ_INSERT_HEAD(&amd
->free_srbs
, srb
, links
);
355 ccb
->ccb_h
.status
|= CAM_SIM_QUEUED
;
357 /* XXX Need a timeout handler */
358 callout_reset(&ccb
->ccb_h
.timeout_ch
, (ccb
->ccb_h
.timeout
* hz
) / 1000,
361 TAILQ_INSERT_TAIL(&amd
->waiting_srbs
, srb
, links
);
367 amd_action(struct cam_sim
* psim
, union ccb
* pccb
)
369 struct amd_softc
* amd
;
370 u_int target_id
, target_lun
;
372 CAM_DEBUG(pccb
->ccb_h
.path
, CAM_DEBUG_TRACE
, ("amd_action\n"));
374 amd
= (struct amd_softc
*) cam_sim_softc(psim
);
375 target_id
= pccb
->ccb_h
.target_id
;
376 target_lun
= pccb
->ccb_h
.target_lun
;
378 switch (pccb
->ccb_h
.func_code
) {
381 struct amd_srb
* pSRB
;
382 struct ccb_scsiio
*pcsio
;
387 * Assign an SRB and connect it with this ccb.
389 pSRB
= amdgetsrb(amd
);
393 pccb
->ccb_h
.status
= CAM_RESRC_UNAVAIL
;
398 pccb
->ccb_h
.ccb_srb_ptr
= pSRB
;
399 pccb
->ccb_h
.ccb_amd_ptr
= amd
;
400 pSRB
->ScsiCmdLen
= pcsio
->cdb_len
;
401 bcopy(pcsio
->cdb_io
.cdb_bytes
, pSRB
->CmdBlock
, pcsio
->cdb_len
);
402 if ((pccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
403 if ((pccb
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
405 * We've been given a pointer
406 * to a single buffer.
408 if ((pccb
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
413 bus_dmamap_load(amd
->buffer_dmat
,
419 if (error
== EINPROGRESS
) {
422 * ordering, freeze the
424 * until our mapping is
427 xpt_freeze_simq(amd
->psim
, 1);
428 pccb
->ccb_h
.status
|=
433 struct bus_dma_segment seg
;
435 /* Pointer to physical buffer */
437 (bus_addr_t
)pcsio
->data_ptr
;
438 seg
.ds_len
= pcsio
->dxfer_len
;
439 amdexecutesrb(pSRB
, &seg
, 1, 0);
442 struct bus_dma_segment
*segs
;
444 if ((pccb
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0
445 || (pccb
->ccb_h
.flags
& CAM_DATA_PHYS
) != 0) {
446 TAILQ_INSERT_HEAD(&amd
->free_srbs
,
448 pccb
->ccb_h
.status
= CAM_PROVIDE_FAIL
;
453 /* Just use the segments provided */
455 (struct bus_dma_segment
*)pcsio
->data_ptr
;
456 amdexecutesrb(pSRB
, segs
, pcsio
->sglist_cnt
, 0);
459 amdexecutesrb(pSRB
, NULL
, 0, 0);
464 struct ccb_pathinq
*cpi
= &pccb
->cpi
;
466 cpi
->version_num
= 1;
467 cpi
->hba_inquiry
= PI_SDTR_ABLE
| PI_TAG_ABLE
;
468 cpi
->target_sprt
= 0;
470 cpi
->hba_eng_cnt
= 0;
472 cpi
->max_lun
= amd
->max_lun
; /* 7 or 0 */
473 cpi
->initiator_id
= amd
->AdaptSCSIID
;
474 cpi
->bus_id
= cam_sim_bus(psim
);
475 strncpy(cpi
->sim_vid
, "FreeBSD", SIM_IDLEN
);
476 strncpy(cpi
->hba_vid
, "TRM-AMD", HBA_IDLEN
);
477 strncpy(cpi
->dev_name
, cam_sim_name(psim
), DEV_IDLEN
);
478 cpi
->unit_number
= cam_sim_unit(psim
);
479 cpi
->transport
= XPORT_SPI
;
480 cpi
->transport_version
= 2;
481 cpi
->protocol
= PROTO_SCSI
;
482 cpi
->protocol_version
= SCSI_REV_2
;
483 cpi
->ccb_h
.status
= CAM_REQ_CMP
;
488 pccb
->ccb_h
.status
= CAM_REQ_INVALID
;
496 amd_ResetSCSIBus(amd
);
499 for (i
= 0; i
< 500; i
++) {
500 DELAY(1000); /* Wait until our interrupt
504 pccb
->ccb_h
.status
= CAM_REQ_CMP
;
509 pccb
->ccb_h
.status
= CAM_REQ_INVALID
;
513 pccb
->ccb_h
.status
= CAM_REQ_INVALID
;
516 case XPT_GET_TRAN_SETTINGS
:
518 struct ccb_trans_settings
*cts
= &pccb
->cts
;
519 struct amd_target_info
*targ_info
= &amd
->tinfo
[target_id
];
520 struct amd_transinfo
*tinfo
;
521 struct ccb_trans_settings_scsi
*scsi
=
522 &cts
->proto_specific
.scsi
;
523 struct ccb_trans_settings_spi
*spi
=
524 &cts
->xport_specific
.spi
;
526 cts
->protocol
= PROTO_SCSI
;
527 cts
->protocol_version
= SCSI_REV_2
;
528 cts
->transport
= XPORT_SPI
;
529 cts
->transport_version
= 2;
532 if (cts
->type
== CTS_TYPE_CURRENT_SETTINGS
) {
533 /* current transfer settings */
534 if (targ_info
->disc_tag
& AMD_CUR_DISCENB
) {
535 spi
->flags
= CTS_SPI_FLAGS_DISC_ENB
;
539 if (targ_info
->disc_tag
& AMD_CUR_TAGENB
) {
540 scsi
->flags
= CTS_SCSI_FLAGS_TAG_ENB
;
544 tinfo
= &targ_info
->current
;
546 /* default(user) transfer settings */
547 if (targ_info
->disc_tag
& AMD_USR_DISCENB
) {
548 spi
->flags
= CTS_SPI_FLAGS_DISC_ENB
;
552 if (targ_info
->disc_tag
& AMD_USR_TAGENB
) {
553 scsi
->flags
= CTS_SCSI_FLAGS_TAG_ENB
;
557 tinfo
= &targ_info
->user
;
559 spi
->sync_period
= tinfo
->period
;
560 spi
->sync_offset
= tinfo
->offset
;
563 spi
->bus_width
= MSG_EXT_WDTR_BUS_8_BIT
;
564 spi
->valid
= CTS_SPI_VALID_SYNC_RATE
565 | CTS_SPI_VALID_SYNC_OFFSET
566 | CTS_SPI_VALID_BUS_WIDTH
567 | CTS_SPI_VALID_DISC
;
568 scsi
->valid
= CTS_SCSI_VALID_TQ
;
569 pccb
->ccb_h
.status
= CAM_REQ_CMP
;
573 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
574 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
575 case XPT_SET_TRAN_SETTINGS
:
577 struct ccb_trans_settings
*cts
= &pccb
->cts
;
578 struct amd_target_info
*targ_info
;
579 u_int update_type
= 0;
582 struct ccb_trans_settings_scsi
*scsi
=
583 &cts
->proto_specific
.scsi
;
584 struct ccb_trans_settings_spi
*spi
=
585 &cts
->xport_specific
.spi
;
586 if (IS_CURRENT_SETTINGS(cts
)) {
587 update_type
|= AMD_TRANS_GOAL
;
588 } else if (IS_USER_SETTINGS(cts
)) {
589 update_type
|= AMD_TRANS_USER
;
592 || update_type
== (AMD_TRANS_USER
|AMD_TRANS_GOAL
)) {
593 cts
->ccb_h
.status
= CAM_REQ_INVALID
;
598 targ_info
= &amd
->tinfo
[target_id
];
600 if ((spi
->valid
& CTS_SPI_VALID_DISC
) != 0) {
601 if (update_type
& AMD_TRANS_GOAL
) {
602 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
)
604 targ_info
->disc_tag
|= AMD_CUR_DISCENB
;
606 targ_info
->disc_tag
&= ~AMD_CUR_DISCENB
;
609 if (update_type
& AMD_TRANS_USER
) {
610 if ((spi
->flags
& CTS_SPI_FLAGS_DISC_ENB
)
612 targ_info
->disc_tag
|= AMD_USR_DISCENB
;
614 targ_info
->disc_tag
&= ~AMD_USR_DISCENB
;
618 if ((scsi
->valid
& CTS_SCSI_VALID_TQ
) != 0) {
619 if (update_type
& AMD_TRANS_GOAL
) {
620 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
)
622 targ_info
->disc_tag
|= AMD_CUR_TAGENB
;
624 targ_info
->disc_tag
&= ~AMD_CUR_TAGENB
;
627 if (update_type
& AMD_TRANS_USER
) {
628 if ((scsi
->flags
& CTS_SCSI_FLAGS_TAG_ENB
)
630 targ_info
->disc_tag
|= AMD_USR_TAGENB
;
632 targ_info
->disc_tag
&= ~AMD_USR_TAGENB
;
637 if ((spi
->valid
& CTS_SPI_VALID_SYNC_OFFSET
) == 0) {
638 if (update_type
& AMD_TRANS_GOAL
)
639 spi
->sync_offset
= targ_info
->goal
.offset
;
641 spi
->sync_offset
= targ_info
->user
.offset
;
644 if (spi
->sync_offset
> AMD_MAX_SYNC_OFFSET
)
645 spi
->sync_offset
= AMD_MAX_SYNC_OFFSET
;
647 if ((spi
->valid
& CTS_SPI_VALID_SYNC_RATE
) == 0) {
648 if (update_type
& AMD_TRANS_GOAL
)
649 spi
->sync_period
= targ_info
->goal
.period
;
651 spi
->sync_period
= targ_info
->user
.period
;
654 last_entry
= sizeof(tinfo_sync_period
) - 1;
655 if ((spi
->sync_period
!= 0)
656 && (spi
->sync_period
< tinfo_sync_period
[0]))
657 spi
->sync_period
= tinfo_sync_period
[0];
658 if (spi
->sync_period
> tinfo_sync_period
[last_entry
])
659 spi
->sync_period
= 0;
660 if (spi
->sync_offset
== 0)
661 spi
->sync_period
= 0;
663 if ((update_type
& AMD_TRANS_USER
) != 0) {
664 targ_info
->user
.period
= spi
->sync_period
;
665 targ_info
->user
.offset
= spi
->sync_offset
;
667 if ((update_type
& AMD_TRANS_GOAL
) != 0) {
668 targ_info
->goal
.period
= spi
->sync_period
;
669 targ_info
->goal
.offset
= spi
->sync_offset
;
672 pccb
->ccb_h
.status
= CAM_REQ_CMP
;
676 case XPT_CALC_GEOMETRY
:
678 struct ccb_calc_geometry
*ccg
;
680 u_int32_t secs_per_cylinder
;
684 size_mb
= ccg
->volume_size
/((1024L * 1024L)/ccg
->block_size
);
685 extended
= (amd
->eepromBuf
[EE_MODE2
] & GREATER_1G
) != 0;
687 if (size_mb
> 1024 && extended
) {
689 ccg
->secs_per_track
= 63;
692 ccg
->secs_per_track
= 32;
694 secs_per_cylinder
= ccg
->heads
* ccg
->secs_per_track
;
695 ccg
->cylinders
= ccg
->volume_size
/ secs_per_cylinder
;
696 pccb
->ccb_h
.status
= CAM_REQ_CMP
;
701 pccb
->ccb_h
.status
= CAM_REQ_INVALID
;
708 amd_poll(struct cam_sim
* psim
)
710 amd_intr(cam_sim_softc(psim
));
714 phystovirt(struct amd_srb
* pSRB
, u_int32_t xferCnt
)
717 struct ccb_scsiio
*pcsio
;
719 struct amd_sg
* pseg
;
722 pcsio
= &pSRB
->pccb
->csio
;
724 dataPtr
= (int) pcsio
->data_ptr
;
725 pseg
= pSRB
->SGsegment
;
726 for (i
= 0; i
< pSRB
->SGIndex
; i
++) {
727 dataPtr
+= (int) pseg
->SGXLen
;
730 dataPtr
+= (int) xferCnt
;
731 return ((u_int8_t
*) dataPtr
);
735 ResetDevParam(struct amd_softc
* amd
)
739 for (target
= 0; target
<= amd
->max_id
; target
++) {
740 if (amd
->AdaptSCSIID
!= target
) {
741 amdsetsync(amd
, target
, /*clockrate*/0,
742 /*period*/0, /*offset*/0, AMD_TRANS_CUR
);
748 amdcompletematch(struct amd_softc
*amd
, target_id_t target
, lun_id_t lun
,
749 u_int tag
, struct srb_queue
*queue
, cam_status status
)
752 struct amd_srb
*next_srb
;
754 for (srb
= TAILQ_FIRST(queue
); srb
!= NULL
; srb
= next_srb
) {
757 next_srb
= TAILQ_NEXT(srb
, links
);
758 if (srb
->pccb
->ccb_h
.target_id
!= target
759 && target
!= CAM_TARGET_WILDCARD
)
762 if (srb
->pccb
->ccb_h
.target_lun
!= lun
763 && lun
!= CAM_LUN_WILDCARD
)
766 if (srb
->TagNumber
!= tag
767 && tag
!= AMD_TAG_WILDCARD
)
771 TAILQ_REMOVE(queue
, srb
, links
);
772 TAILQ_INSERT_HEAD(&amd
->free_srbs
, srb
, links
);
773 if ((ccb
->ccb_h
.status
& CAM_DEV_QFRZN
) == 0
774 && (status
& CAM_DEV_QFRZN
) != 0)
775 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
776 ccb
->ccb_h
.status
= status
;
783 amdsetsync(struct amd_softc
*amd
, u_int target
, u_int clockrate
,
784 u_int period
, u_int offset
, u_int type
)
786 struct amd_target_info
*tinfo
;
790 tinfo
= &amd
->tinfo
[target
];
791 old_period
= tinfo
->current
.period
;
792 old_offset
= tinfo
->current
.offset
;
793 if ((type
& AMD_TRANS_CUR
) != 0
794 && (old_period
!= period
|| old_offset
!= offset
)) {
795 struct cam_path
*path
;
797 tinfo
->current
.period
= period
;
798 tinfo
->current
.offset
= offset
;
799 tinfo
->sync_period_reg
= clockrate
;
800 tinfo
->sync_offset_reg
= offset
;
801 tinfo
->CtrlR3
&= ~FAST_SCSI
;
802 tinfo
->CtrlR4
&= ~EATER_25NS
;
804 tinfo
->CtrlR4
|= EATER_25NS
;
806 tinfo
->CtrlR3
|= FAST_SCSI
;
808 if ((type
& AMD_TRANS_ACTIVE
) == AMD_TRANS_ACTIVE
) {
809 amd_write8(amd
, SYNCPERIOREG
, tinfo
->sync_period_reg
);
810 amd_write8(amd
, SYNCOFFREG
, tinfo
->sync_offset_reg
);
811 amd_write8(amd
, CNTLREG3
, tinfo
->CtrlR3
);
812 amd_write8(amd
, CNTLREG4
, tinfo
->CtrlR4
);
814 /* If possible, update the XPT's notion of our transfer rate */
815 if (xpt_create_path(&path
, /*periph*/NULL
,
816 cam_sim_path(amd
->psim
), target
,
817 CAM_LUN_WILDCARD
) == CAM_REQ_CMP
) {
818 struct ccb_trans_settings neg
;
819 struct ccb_trans_settings_spi
*spi
=
820 &neg
.xport_specific
.spi
;
821 xpt_setup_ccb(&neg
.ccb_h
, path
, /*priority*/1);
822 memset(&neg
, 0, sizeof (neg
));
823 spi
->sync_period
= period
;
824 spi
->sync_offset
= offset
;
825 spi
->valid
= CTS_SPI_VALID_SYNC_RATE
826 | CTS_SPI_VALID_SYNC_OFFSET
;
827 xpt_async(AC_TRANSFER_NEG
, path
, &neg
);
831 if ((type
& AMD_TRANS_GOAL
) != 0) {
832 tinfo
->goal
.period
= period
;
833 tinfo
->goal
.offset
= offset
;
836 if ((type
& AMD_TRANS_USER
) != 0) {
837 tinfo
->user
.period
= period
;
838 tinfo
->user
.offset
= offset
;
843 amdsettags(struct amd_softc
*amd
, u_int target
, int tagenb
)
845 panic("Implement me!\n");
851 **********************************************************************
852 * Function : amd_reset (struct amd_softc * amd)
853 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
854 * Inputs : cmd - command which caused the SCSI RESET
855 **********************************************************************
858 amd_reset(struct amd_softc
* amd
)
865 kprintf("DC390: RESET");
869 bval
= amd_read8(amd
, CNTLREG1
);
870 bval
|= DIS_INT_ON_SCSI_RST
;
871 amd_write8(amd
, CNTLREG1
, bval
); /* disable interrupt */
872 amd_ResetSCSIBus(amd
);
874 for (i
= 0; i
< 500; i
++) {
878 bval
= amd_read8(amd
, CNTLREG1
);
879 bval
&= ~DIS_INT_ON_SCSI_RST
;
880 amd_write8(amd
, CNTLREG1
, bval
); /* re-enable interrupt */
882 amd_write8(amd
, DMA_Cmd
, DMA_IDLE_CMD
);
883 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
886 amdcompletematch(amd
, CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
,
887 AMD_TAG_WILDCARD
, &amd
->running_srbs
,
888 CAM_DEV_QFRZN
|CAM_SCSI_BUS_RESET
);
889 amdcompletematch(amd
, CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
,
890 AMD_TAG_WILDCARD
, &amd
->waiting_srbs
,
891 CAM_DEV_QFRZN
|CAM_SCSI_BUS_RESET
);
892 amd
->active_srb
= NULL
;
899 amd_timeout(void *arg1
)
901 struct amd_srb
* pSRB
;
903 pSRB
= (struct amd_srb
*) arg1
;
908 amdstart(struct amd_softc
*amd
, struct amd_srb
*pSRB
)
911 struct ccb_scsiio
*pcsio
;
912 struct amd_target_info
*targ_info
;
921 target
= pccb
->ccb_h
.target_id
;
922 lun
= pccb
->ccb_h
.target_lun
;
923 targ_info
= &amd
->tinfo
[target
];
925 amd_clear_msg_state(amd
);
926 amd_write8(amd
, SCSIDESTIDREG
, target
);
927 amd_write8(amd
, SYNCPERIOREG
, targ_info
->sync_period_reg
);
928 amd_write8(amd
, SYNCOFFREG
, targ_info
->sync_offset_reg
);
929 amd_write8(amd
, CNTLREG1
, targ_info
->CtrlR1
);
930 amd_write8(amd
, CNTLREG3
, targ_info
->CtrlR3
);
931 amd_write8(amd
, CNTLREG4
, targ_info
->CtrlR4
);
932 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
934 identify_msg
= MSG_IDENTIFYFLAG
| lun
;
935 if ((targ_info
->disc_tag
& AMD_CUR_DISCENB
) != 0
936 && (pccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) == 0
937 && (pSRB
->CmdBlock
[0] != REQUEST_SENSE
)
938 && (pSRB
->SRBFlag
& AUTO_REQSENSE
) == 0)
939 identify_msg
|= MSG_IDENTIFY_DISCFLAG
;
941 amd_write8(amd
, SCSIFIFOREG
, identify_msg
);
943 if ((targ_info
->disc_tag
& AMD_CUR_TAGENB
) == 0
944 || (identify_msg
& MSG_IDENTIFY_DISCFLAG
) == 0)
945 pccb
->ccb_h
.flags
&= ~CAM_TAG_ACTION_VALID
;
946 if (targ_info
->current
.period
!= targ_info
->goal
.period
947 || targ_info
->current
.offset
!= targ_info
->goal
.offset
) {
948 command
= SEL_W_ATN_STOP
;
949 amdconstructsdtr(amd
, targ_info
->goal
.period
,
950 targ_info
->goal
.offset
);
951 } else if ((pccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
) != 0) {
952 command
= SEL_W_ATN2
;
953 pSRB
->SRBState
= SRB_START
;
954 amd_write8(amd
, SCSIFIFOREG
, pcsio
->tag_action
);
955 amd_write8(amd
, SCSIFIFOREG
, pSRB
->TagNumber
);
959 pSRB
->SRBState
= SRB_START
;
961 if (command
!= SEL_W_ATN_STOP
)
962 amdsetupcommand(amd
, pSRB
);
964 if (amd_read8(amd
, SCSISTATREG
) & INTERRUPT
) {
965 pSRB
->SRBState
= SRB_READY
;
968 amd
->last_phase
= SCSI_ARBITRATING
;
969 amd_write8(amd
, SCSICMDREG
, command
);
970 amd
->active_srb
= pSRB
;
971 amd
->cur_target
= target
;
978 * Catch an interrupt from the adapter.
979 * Process pending device interrupts.
984 struct amd_softc
*amd
;
985 struct amd_srb
*pSRB
;
986 u_int internstat
= 0;
990 amd
= (struct amd_softc
*)arg
;
994 kprintf("amd_intr: amd NULL return......");
999 scsistat
= amd_read8(amd
, SCSISTATREG
);
1000 if (!(scsistat
& INTERRUPT
)) {
1002 kprintf("amd_intr: scsistat = NULL ,return......");
1006 #ifdef AMD_DEBUG_SCSI_PHASE
1007 kprintf("scsistat=%2x,", scsistat
);
1010 internstat
= amd_read8(amd
, INTERNSTATREG
);
1011 intstat
= amd_read8(amd
, INTSTATREG
);
1013 #ifdef AMD_DEBUG_SCSI_PHASE
1014 kprintf("intstat=%2x,", intstat
);
1017 if (intstat
& DISCONNECTED
) {
1018 amd_Disconnect(amd
);
1021 if (intstat
& RESELECTED
) {
1025 if (intstat
& INVALID_CMD
) {
1026 amd_InvalidCmd(amd
);
1029 if (intstat
& SCSI_RESET_
) {
1030 amd_ScsiRstDetect(amd
);
1033 if (intstat
& (SUCCESSFUL_OP
+ SERVICE_REQUEST
)) {
1034 pSRB
= amd
->active_srb
;
1036 * Run our state engine. First perform
1037 * post processing for the last phase we
1038 * were in, followed by any processing
1039 * required to handle the current phase.
1042 amd_SCSI_phase0
[amd
->last_phase
](amd
, pSRB
, scsistat
);
1043 amd
->last_phase
= scsistat
& SCSI_PHASE_MASK
;
1044 (void)amd_SCSI_phase1
[amd
->last_phase
](amd
, pSRB
, scsistat
);
1049 amd_DataOutPhase0(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1051 struct amd_sg
*psgl
;
1052 u_int32_t ResidCnt
, xferCnt
;
1054 if (!(pSRB
->SRBState
& SRB_XFERPAD
)) {
1055 if (scsistat
& PARITY_ERR
) {
1056 pSRB
->SRBStatus
|= PARITY_ERROR
;
1058 if (scsistat
& COUNT_2_ZERO
) {
1059 while ((amd_read8(amd
, DMA_Status
)&DMA_XFER_DONE
) == 0)
1061 pSRB
->TotalXferredLen
+= pSRB
->SGToBeXferLen
;
1063 if (pSRB
->SGIndex
< pSRB
->SGcount
) {
1065 psgl
= pSRB
->pSGlist
;
1066 pSRB
->SGPhysAddr
= psgl
->SGXPtr
;
1067 pSRB
->SGToBeXferLen
= psgl
->SGXLen
;
1069 pSRB
->SGToBeXferLen
= 0;
1072 ResidCnt
= amd_read8(amd
, CURRENTFIFOREG
) & 0x1f;
1073 ResidCnt
+= amd_read8(amd
, CTCREG_LOW
)
1074 | (amd_read8(amd
, CTCREG_MID
) << 8)
1075 | (amd_read8(amd
, CURTXTCNTREG
) << 16);
1077 xferCnt
= pSRB
->SGToBeXferLen
- ResidCnt
;
1078 pSRB
->SGPhysAddr
+= xferCnt
;
1079 pSRB
->TotalXferredLen
+= xferCnt
;
1080 pSRB
->SGToBeXferLen
= ResidCnt
;
1083 amd_write8(amd
, DMA_Cmd
, WRITE_DIRECTION
| DMA_IDLE_CMD
);
1088 amd_DataInPhase0(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1091 u_int16_t i
, residual
;
1092 struct amd_sg
*psgl
;
1093 u_int32_t ResidCnt
, xferCnt
;
1096 if (!(pSRB
->SRBState
& SRB_XFERPAD
)) {
1097 if (scsistat
& PARITY_ERR
) {
1098 pSRB
->SRBStatus
|= PARITY_ERROR
;
1100 if (scsistat
& COUNT_2_ZERO
) {
1102 bval
= amd_read8(amd
, DMA_Status
);
1103 if ((bval
& DMA_XFER_DONE
) != 0)
1106 amd_write8(amd
, DMA_Cmd
, READ_DIRECTION
|DMA_IDLE_CMD
);
1108 pSRB
->TotalXferredLen
+= pSRB
->SGToBeXferLen
;
1110 if (pSRB
->SGIndex
< pSRB
->SGcount
) {
1112 psgl
= pSRB
->pSGlist
;
1113 pSRB
->SGPhysAddr
= psgl
->SGXPtr
;
1114 pSRB
->SGToBeXferLen
= psgl
->SGXLen
;
1116 pSRB
->SGToBeXferLen
= 0;
1118 } else { /* phase changed */
1120 bval
= amd_read8(amd
, CURRENTFIFOREG
);
1121 while (bval
& 0x1f) {
1122 if ((bval
& 0x1f) == 1) {
1123 for (i
= 0; i
< 0x100; i
++) {
1124 bval
= amd_read8(amd
, CURRENTFIFOREG
);
1125 if (!(bval
& 0x1f)) {
1127 } else if (i
== 0x0ff) {
1133 bval
= amd_read8(amd
, CURRENTFIFOREG
);
1137 amd_write8(amd
, DMA_Cmd
, READ_DIRECTION
|DMA_BLAST_CMD
);
1138 for (i
= 0; i
< 0x8000; i
++) {
1139 if ((amd_read8(amd
, DMA_Status
)&BLAST_COMPLETE
))
1142 amd_write8(amd
, DMA_Cmd
, READ_DIRECTION
|DMA_IDLE_CMD
);
1144 ResidCnt
= amd_read8(amd
, CTCREG_LOW
)
1145 | (amd_read8(amd
, CTCREG_MID
) << 8)
1146 | (amd_read8(amd
, CURTXTCNTREG
) << 16);
1147 xferCnt
= pSRB
->SGToBeXferLen
- ResidCnt
;
1148 pSRB
->SGPhysAddr
+= xferCnt
;
1149 pSRB
->TotalXferredLen
+= xferCnt
;
1150 pSRB
->SGToBeXferLen
= ResidCnt
;
1152 /* get residual byte */
1153 bval
= amd_read8(amd
, SCSIFIFOREG
);
1154 ptr
= phystovirt(pSRB
, xferCnt
);
1157 pSRB
->TotalXferredLen
++;
1158 pSRB
->SGToBeXferLen
--;
1166 amd_StatusPhase0(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1168 pSRB
->TargetStatus
= amd_read8(amd
, SCSIFIFOREG
);
1170 pSRB
->EndMessage
= amd_read8(amd
, SCSIFIFOREG
);
1171 pSRB
->SRBState
= SRB_COMPLETED
;
1172 amd_write8(amd
, SCSICMDREG
, MSG_ACCEPTED_CMD
);
1177 amd_MsgOutPhase0(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1179 if (pSRB
->SRBState
& (SRB_UNEXPECT_RESEL
+ SRB_ABORT_SENT
)) {
1180 scsistat
= SCSI_NOP0
;
1186 amd_MsgInPhase0(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1190 amd
->msgin_buf
[amd
->msgin_index
] = amd_read8(amd
, SCSIFIFOREG
);
1192 done
= amdparsemsg(amd
);
1194 amd
->msgin_index
= 0;
1201 amdparsemsg(struct amd_softc
*amd
)
1203 struct amd_target_info
*targ_info
;
1212 targ_info
= &amd
->tinfo
[amd
->cur_target
];
1215 * Parse as much of the message as is availible,
1216 * rejecting it if we don't support it. When
1217 * the entire message is availible and has been
1218 * handled, return TRUE indicating that we have
1219 * parsed an entire message.
1221 switch (amd
->msgin_buf
[0]) {
1222 case MSG_DISCONNECT
:
1223 amd
->active_srb
->SRBState
= SRB_DISCONNECT
;
1224 amd
->disc_count
[amd
->cur_target
][amd
->cur_lun
]++;
1227 case MSG_SIMPLE_Q_TAG
:
1229 struct amd_srb
*disc_srb
;
1231 if (amd
->msgin_index
< 1)
1233 disc_srb
= &amd
->SRB_array
[amd
->msgin_buf
[1]];
1234 if (amd
->active_srb
!= NULL
1235 || disc_srb
->SRBState
!= SRB_DISCONNECT
1236 || disc_srb
->pccb
->ccb_h
.target_id
!= amd
->cur_target
1237 || disc_srb
->pccb
->ccb_h
.target_lun
!= amd
->cur_lun
) {
1238 kprintf("amd%d: Unexpected tagged reselection "
1239 "for target %d, Issuing Abort\n", amd
->unit
,
1241 amd
->msgout_buf
[0] = MSG_ABORT
;
1242 amd
->msgout_len
= 1;
1246 amd
->active_srb
= disc_srb
;
1247 amd
->disc_count
[amd
->cur_target
][amd
->cur_lun
]--;
1251 case MSG_MESSAGE_REJECT
:
1252 response
= amdhandlemsgreject(amd
);
1253 if (response
== FALSE
)
1254 amd_write8(amd
, SCSICMDREG
, RESET_ATN_CMD
);
1266 /* Wait for enough of the message to begin validation */
1267 if (amd
->msgin_index
< 1)
1269 if (amd
->msgin_buf
[1] != MSG_EXT_SDTR_LEN
) {
1274 /* Wait for opcode */
1275 if (amd
->msgin_index
< 2)
1278 if (amd
->msgin_buf
[2] != MSG_EXT_SDTR
) {
1284 * Wait until we have both args before validating
1285 * and acting on this message.
1287 * Add one to MSG_EXT_SDTR_LEN to account for
1288 * the extended message preamble.
1290 if (amd
->msgin_index
< (MSG_EXT_SDTR_LEN
+ 1))
1293 period
= amd
->msgin_buf
[3];
1294 saved_offset
= offset
= amd
->msgin_buf
[4];
1295 clockrate
= amdfindclockrate(amd
, &period
);
1296 if (offset
> AMD_MAX_SYNC_OFFSET
)
1297 offset
= AMD_MAX_SYNC_OFFSET
;
1298 if (period
== 0 || offset
== 0) {
1303 amdsetsync(amd
, amd
->cur_target
, clockrate
, period
, offset
,
1304 AMD_TRANS_ACTIVE
|AMD_TRANS_GOAL
);
1307 * See if we initiated Sync Negotiation
1308 * and didn't have to fall down to async
1311 if (amdsentmsg(amd
, MSG_EXT_SDTR
, /*full*/TRUE
)) {
1313 if (saved_offset
!= offset
) {
1314 /* Went too low - force async */
1319 * Send our own SDTR in reply
1322 kprintf("Sending SDTR!\n");
1323 amd
->msgout_index
= 0;
1324 amd
->msgout_len
= 0;
1325 amdconstructsdtr(amd
, period
, offset
);
1326 amd
->msgout_index
= 0;
1332 case MSG_SAVEDATAPOINTER
:
1333 case MSG_RESTOREPOINTERS
:
1334 /* XXX Implement!!! */
1343 amd
->msgout_index
= 0;
1344 amd
->msgout_len
= 1;
1345 amd
->msgout_buf
[0] = MSG_MESSAGE_REJECT
;
1351 amd_write8(amd
, SCSICMDREG
, SET_ATN_CMD
);
1353 if (done
&& !response
)
1354 /* Clear the outgoing message buffer */
1355 amd
->msgout_len
= 0;
1358 amd_write8(amd
, SCSICMDREG
, MSG_ACCEPTED_CMD
);
1364 amdfindclockrate(struct amd_softc
*amd
, u_int
*period
)
1369 for (i
= 0; i
< sizeof(tinfo_sync_period
); i
++) {
1370 u_int8_t
*table_entry
;
1372 table_entry
= &tinfo_sync_period
[i
];
1373 if (*period
<= *table_entry
) {
1375 * When responding to a target that requests
1376 * sync, the requested rate may fall between
1377 * two rates that we can output, but still be
1378 * a rate that we can receive. Because of this,
1379 * we want to respond to the target with
1380 * the same rate that it sent to us even
1381 * if the period we use to send data to it
1382 * is lower. Only lower the response period
1386 *period
= *table_entry
;
1392 if (i
== sizeof(tinfo_sync_period
)) {
1393 /* Too slow for us. Use asnyc transfers. */
1403 * See if we sent a particular extended message to the target.
1404 * If "full" is true, the target saw the full message.
1405 * If "full" is false, the target saw at least the first
1406 * byte of the message.
1409 amdsentmsg(struct amd_softc
*amd
, u_int msgtype
, int full
)
1417 while (index
< amd
->msgout_len
) {
1418 if ((amd
->msgout_buf
[index
] & MSG_IDENTIFYFLAG
) != 0
1419 || amd
->msgout_buf
[index
] == MSG_MESSAGE_REJECT
)
1421 else if (amd
->msgout_buf
[index
] >= MSG_SIMPLE_Q_TAG
1422 && amd
->msgout_buf
[index
] < MSG_IGN_WIDE_RESIDUE
) {
1423 /* Skip tag type and tag id */
1425 } else if (amd
->msgout_buf
[index
] == MSG_EXTENDED
) {
1426 /* Found a candidate */
1427 if (amd
->msgout_buf
[index
+2] == msgtype
) {
1430 end_index
= index
+ 1
1431 + amd
->msgout_buf
[index
+ 1];
1433 if (amd
->msgout_index
> end_index
)
1435 } else if (amd
->msgout_index
> index
)
1440 panic("amdsentmsg: Inconsistent msg buffer");
1447 amdconstructsdtr(struct amd_softc
*amd
, u_int period
, u_int offset
)
1449 amd
->msgout_buf
[amd
->msgout_index
++] = MSG_EXTENDED
;
1450 amd
->msgout_buf
[amd
->msgout_index
++] = MSG_EXT_SDTR_LEN
;
1451 amd
->msgout_buf
[amd
->msgout_index
++] = MSG_EXT_SDTR
;
1452 amd
->msgout_buf
[amd
->msgout_index
++] = period
;
1453 amd
->msgout_buf
[amd
->msgout_index
++] = offset
;
1454 amd
->msgout_len
+= 5;
1458 amdhandlemsgreject(struct amd_softc
*amd
)
1461 * If we had an outstanding SDTR for this
1462 * target, this is a signal that the target
1463 * is refusing negotiation. Also watch out
1464 * for rejected tag messages.
1466 struct amd_srb
*srb
;
1467 struct amd_target_info
*targ_info
;
1468 int response
= FALSE
;
1470 srb
= amd
->active_srb
;
1471 targ_info
= &amd
->tinfo
[amd
->cur_target
];
1472 if (amdsentmsg(amd
, MSG_EXT_SDTR
, /*full*/FALSE
)) {
1473 /* note asynch xfers and clear flag */
1474 amdsetsync(amd
, amd
->cur_target
, /*clockrate*/0,
1475 /*period*/0, /*offset*/0,
1476 AMD_TRANS_ACTIVE
|AMD_TRANS_GOAL
);
1477 kprintf("amd%d:%d: refuses synchronous negotiation. "
1478 "Using asynchronous transfers\n",
1479 amd
->unit
, amd
->cur_target
);
1480 } else if ((srb
!= NULL
)
1481 && (srb
->pccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
) != 0) {
1482 struct ccb_trans_settings neg
;
1483 struct ccb_trans_settings_scsi
*scsi
= &neg
.proto_specific
.scsi
;
1485 kprintf("amd%d:%d: refuses tagged commands. Performing "
1486 "non-tagged I/O\n", amd
->unit
, amd
->cur_target
);
1488 amdsettags(amd
, amd
->cur_target
, FALSE
);
1489 memset(&neg
, 0, sizeof (neg
));
1490 scsi
->valid
= CTS_SCSI_VALID_TQ
;
1491 xpt_setup_ccb(&neg
.ccb_h
, srb
->pccb
->ccb_h
.path
, /*priority*/1);
1492 xpt_async(AC_TRANSFER_NEG
, srb
->pccb
->ccb_h
.path
, &neg
);
1495 * Resend the identify for this CCB as the target
1496 * may believe that the selection is invalid otherwise.
1498 if (amd
->msgout_len
!= 0)
1499 bcopy(&amd
->msgout_buf
[0], &amd
->msgout_buf
[1],
1501 amd
->msgout_buf
[0] = MSG_IDENTIFYFLAG
1502 | srb
->pccb
->ccb_h
.target_lun
;
1504 if ((targ_info
->disc_tag
& AMD_CUR_DISCENB
) != 0
1505 && (srb
->pccb
->ccb_h
.flags
& CAM_DIS_DISCONNECT
) == 0)
1506 amd
->msgout_buf
[0] |= MSG_IDENTIFY_DISCFLAG
;
1508 srb
->pccb
->ccb_h
.flags
&= ~CAM_TAG_ACTION_VALID
;
1511 * Requeue all tagged commands for this target
1512 * currently in our posession so they can be
1513 * converted to untagged commands.
1515 amdcompletematch(amd
, amd
->cur_target
, amd
->cur_lun
,
1516 AMD_TAG_WILDCARD
, &amd
->waiting_srbs
,
1517 CAM_DEV_QFRZN
|CAM_REQUEUE_REQ
);
1520 * Otherwise, we ignore it.
1522 kprintf("amd%d:%d: Message reject received -- ignored\n",
1523 amd
->unit
, amd
->cur_target
);
1529 if (!(pSRB
->SRBState
& SRB_MSGIN_MULTI
)) {
1530 if (bval
== MSG_DISCONNECT
) {
1531 pSRB
->SRBState
= SRB_DISCONNECT
;
1532 } else if (bval
== MSG_SAVEDATAPOINTER
) {
1534 } else if ((bval
== MSG_EXTENDED
)
1535 || ((bval
>= MSG_SIMPLE_Q_TAG
)
1536 && (bval
<= MSG_ORDERED_Q_TAG
))) {
1537 pSRB
->SRBState
|= SRB_MSGIN_MULTI
;
1538 pSRB
->MsgInBuf
[0] = bval
;
1540 pSRB
->pMsgPtr
= &pSRB
->MsgInBuf
[1];
1541 } else if (bval
== MSG_MESSAGE_REJECT
) {
1542 amd_write8(amd
, SCSICMDREG
, RESET_ATN_CMD
);
1544 if (pSRB
->SRBState
& DO_SYNC_NEGO
) {
1547 } else if (bval
== MSG_RESTOREPOINTERS
) {
1552 } else { /* minx: */
1553 *pSRB
->pMsgPtr
= bval
;
1556 if ((pSRB
->MsgInBuf
[0] >= MSG_SIMPLE_Q_TAG
)
1557 && (pSRB
->MsgInBuf
[0] <= MSG_ORDERED_Q_TAG
)) {
1558 if (pSRB
->MsgCnt
== 2) {
1560 pSRB
= &amd
->SRB_array
[pSRB
->MsgInBuf
[1]];
1561 if (pSRB
->SRBState
& SRB_DISCONNECT
) == 0) {
1562 pSRB
= amd
->pTmpSRB
;
1563 pSRB
->SRBState
= SRB_UNEXPECT_RESEL
;
1564 pDCB
->pActiveSRB
= pSRB
;
1565 pSRB
->MsgOutBuf
[0] = MSG_ABORT_TAG
;
1566 EnableMsgOut2(amd
, pSRB
);
1568 if (pDCB
->DCBFlag
& ABORT_DEV_
) {
1569 pSRB
->SRBState
= SRB_ABORT_SENT
;
1570 EnableMsgOut1(amd
, pSRB
);
1572 pDCB
->pActiveSRB
= pSRB
;
1573 pSRB
->SRBState
= SRB_DATA_XFER
;
1576 } else if ((pSRB
->MsgInBuf
[0] == MSG_EXTENDED
)
1577 && (pSRB
->MsgCnt
== 5)) {
1578 pSRB
->SRBState
&= ~(SRB_MSGIN_MULTI
+ DO_SYNC_NEGO
);
1579 if ((pSRB
->MsgInBuf
[1] != 3)
1580 || (pSRB
->MsgInBuf
[2] != 1)) { /* reject_msg: */
1582 pSRB
->MsgInBuf
[0] = MSG_MESSAGE_REJECT
;
1583 amd_write8(amd
, SCSICMDREG
, SET_ATN_CMD
);
1584 } else if (!(pSRB
->MsgInBuf
[3])
1585 || !(pSRB
->MsgInBuf
[4])) {
1586 set_async
: /* set async */
1588 pDCB
= pSRB
->pSRBDCB
;
1589 /* disable sync & sync nego */
1590 pDCB
->SyncMode
&= ~(SYNC_ENABLE
|SYNC_NEGO_DONE
);
1591 pDCB
->SyncPeriod
= 0;
1592 pDCB
->SyncOffset
= 0;
1594 pDCB
->tinfo
.goal
.period
= 0;
1595 pDCB
->tinfo
.goal
.offset
= 0;
1597 pDCB
->tinfo
.current
.period
= 0;
1598 pDCB
->tinfo
.current
.offset
= 0;
1599 pDCB
->tinfo
.current
.width
=
1600 MSG_EXT_WDTR_BUS_8_BIT
;
1602 pDCB
->CtrlR3
= FAST_CLK
; /* non_fast */
1603 pDCB
->CtrlR4
&= 0x3f;
1604 pDCB
->CtrlR4
|= EATER_25NS
;
1606 } else {/* set sync */
1608 pDCB
= pSRB
->pSRBDCB
;
1609 /* enable sync & sync nego */
1610 pDCB
->SyncMode
|= SYNC_ENABLE
|SYNC_NEGO_DONE
;
1612 /* set sync offset */
1613 pDCB
->SyncOffset
&= 0x0f0;
1614 pDCB
->SyncOffset
|= pSRB
->MsgInBuf
[4];
1616 /* set sync period */
1617 pDCB
->MaxNegoPeriod
= pSRB
->MsgInBuf
[3];
1619 wval
= (u_int16_t
) pSRB
->MsgInBuf
[3];
1623 if ((wval1
* 25) != wval
) {
1626 bval
= FAST_CLK
|FAST_SCSI
;
1627 pDCB
->CtrlR4
&= 0x3f;
1632 pDCB
->CtrlR4
|= EATER_25NS
;
1634 pDCB
->CtrlR3
= bval
;
1635 pDCB
->SyncPeriod
= (u_int8_t
) wval1
;
1637 pDCB
->tinfo
.goal
.period
=
1638 tinfo_sync_period
[pDCB
->SyncPeriod
- 4];
1639 pDCB
->tinfo
.goal
.offset
= pDCB
->SyncOffset
;
1640 pDCB
->tinfo
.current
.period
=
1641 tinfo_sync_period
[pDCB
->SyncPeriod
- 4];
1642 pDCB
->tinfo
.current
.offset
= pDCB
->SyncOffset
;
1645 * program SCSI control register
1648 amd_write8(amd
, SYNCPERIOREG
, pDCB
->SyncPeriod
);
1649 amd_write8(amd
, SYNCOFFREG
, pDCB
->SyncOffset
);
1650 amd_write8(amd
, CNTLREG3
, pDCB
->CtrlR3
);
1651 amd_write8(amd
, CNTLREG4
, pDCB
->CtrlR4
);
1656 amd_write8(amd
, SCSICMDREG
, MSG_ACCEPTED_CMD
);
1662 amd_DataOutPhase1(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1664 DataIO_Comm(amd
, pSRB
, WRITE_DIRECTION
);
1669 amd_DataInPhase1(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1671 DataIO_Comm(amd
, pSRB
, READ_DIRECTION
);
1676 DataIO_Comm(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int ioDir
)
1678 struct amd_sg
* psgl
;
1681 if (pSRB
->SGIndex
< pSRB
->SGcount
) {
1682 amd_write8(amd
, DMA_Cmd
, DMA_IDLE_CMD
|ioDir
);/* |EN_DMA_INT */
1684 if (!pSRB
->SGToBeXferLen
) {
1685 psgl
= pSRB
->pSGlist
;
1686 pSRB
->SGPhysAddr
= psgl
->SGXPtr
;
1687 pSRB
->SGToBeXferLen
= psgl
->SGXLen
;
1689 lval
= pSRB
->SGToBeXferLen
;
1690 amd_write8(amd
, CTCREG_LOW
, lval
);
1691 amd_write8(amd
, CTCREG_MID
, lval
>> 8);
1692 amd_write8(amd
, CURTXTCNTREG
, lval
>> 16);
1694 amd_write32(amd
, DMA_XferCnt
, pSRB
->SGToBeXferLen
);
1696 amd_write32(amd
, DMA_XferAddr
, pSRB
->SGPhysAddr
);
1698 pSRB
->SRBState
= SRB_DATA_XFER
;
1700 amd_write8(amd
, SCSICMDREG
, DMA_COMMAND
|INFO_XFER_CMD
);
1702 amd_write8(amd
, DMA_Cmd
, DMA_IDLE_CMD
|ioDir
); /* |EN_DMA_INT */
1704 amd_write8(amd
, DMA_Cmd
, DMA_START_CMD
|ioDir
);/* |EN_DMA_INT */
1705 } else { /* xfer pad */
1706 if (pSRB
->SGcount
) {
1707 pSRB
->AdaptStatus
= H_OVER_UNDER_RUN
;
1708 pSRB
->SRBStatus
|= OVER_RUN
;
1710 amd_write8(amd
, CTCREG_LOW
, 0);
1711 amd_write8(amd
, CTCREG_MID
, 0);
1712 amd_write8(amd
, CURTXTCNTREG
, 0);
1714 pSRB
->SRBState
|= SRB_XFERPAD
;
1715 amd_write8(amd
, SCSICMDREG
, DMA_COMMAND
|XFER_PAD_BYTE
);
1720 amd_CommandPhase1(struct amd_softc
*amd
, struct amd_srb
*srb
, u_int scsistat
)
1722 amd_write8(amd
, SCSICMDREG
, RESET_ATN_CMD
);
1723 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
1725 amdsetupcommand(amd
, srb
);
1727 srb
->SRBState
= SRB_COMMAND
;
1728 amd_write8(amd
, SCSICMDREG
, INFO_XFER_CMD
);
1733 amd_StatusPhase1(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1735 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
1736 pSRB
->SRBState
= SRB_STATUS
;
1737 amd_write8(amd
, SCSICMDREG
, INITIATOR_CMD_CMPLTE
);
1742 amd_MsgOutPhase1(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1744 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
1746 if (amd
->msgout_len
== 0) {
1747 amd
->msgout_buf
[0] = MSG_NOOP
;
1748 amd
->msgout_len
= 1;
1750 amd_write8_multi(amd
, SCSIFIFOREG
, amd
->msgout_buf
, amd
->msgout_len
);
1751 amd_write8(amd
, SCSICMDREG
, INFO_XFER_CMD
);
1756 amd_MsgInPhase1(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1758 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
1759 amd_write8(amd
, SCSICMDREG
, INFO_XFER_CMD
);
1764 amd_NopPhase(struct amd_softc
*amd
, struct amd_srb
*pSRB
, u_int scsistat
)
1770 amd_Disconnect(struct amd_softc
* amd
)
1772 struct amd_srb
*srb
;
1776 srb
= amd
->active_srb
;
1777 amd
->active_srb
= NULL
;
1778 amd
->last_phase
= SCSI_BUS_FREE
;
1779 amd_write8(amd
, SCSICMDREG
, EN_SEL_RESEL
);
1780 target
= amd
->cur_target
;
1784 /* Invalid reselection */
1786 } else if (srb
->SRBState
& SRB_ABORT_SENT
) {
1787 /* Clean up and done this srb */
1789 while (( = TAILQ_FIRST(&amd
->running_srbs
)) != NULL
) {
1790 /* XXX What about "done'ing" these srbs??? */
1791 if (pSRB
->pSRBDCB
== pDCB
) {
1792 TAILQ_REMOVE(&amd
->running_srbs
, pSRB
, links
);
1793 TAILQ_INSERT_HEAD(&amd
->free_srbs
, pSRB
, links
);
1799 if ((srb
->SRBState
& (SRB_START
| SRB_MSGOUT
))
1800 || !(srb
->SRBState
& (SRB_DISCONNECT
| SRB_COMPLETED
))) {
1801 srb
->TargetStatus
= AMD_SCSI_STAT_SEL_TIMEOUT
;
1803 } else if (srb
->SRBState
& SRB_DISCONNECT
) {
1804 if (!(srb
->pccb
->ccb_h
.flags
& CAM_TAG_ACTION_VALID
))
1805 amd
->untagged_srbs
[target
][lun
] = srb
;
1807 } else if (srb
->SRBState
& SRB_COMPLETED
) {
1809 srb
->SRBState
= SRB_FREE
;
1817 amd_Reselect(struct amd_softc
*amd
)
1819 struct amd_target_info
*tinfo
;
1820 u_int16_t disc_count
;
1822 amd_clear_msg_state(amd
);
1823 if (amd
->active_srb
!= NULL
) {
1824 /* Requeue the SRB for our attempted Selection */
1825 TAILQ_REMOVE(&amd
->running_srbs
, amd
->active_srb
, links
);
1826 TAILQ_INSERT_HEAD(&amd
->waiting_srbs
, amd
->active_srb
, links
);
1827 amd
->active_srb
= NULL
;
1830 amd
->cur_target
= amd_read8(amd
, SCSIFIFOREG
);
1831 amd
->cur_target
^= amd
->HostID_Bit
;
1832 amd
->cur_target
= ffs(amd
->cur_target
) - 1;
1833 amd
->cur_lun
= amd_read8(amd
, SCSIFIFOREG
) & 7;
1834 tinfo
= &amd
->tinfo
[amd
->cur_target
];
1835 amd
->active_srb
= amd
->untagged_srbs
[amd
->cur_target
][amd
->cur_lun
];
1836 disc_count
= amd
->disc_count
[amd
->cur_target
][amd
->cur_lun
];
1837 if (disc_count
== 0) {
1838 kprintf("amd%d: Unexpected reselection for target %d, "
1839 "Issuing Abort\n", amd
->unit
, amd
->cur_target
);
1840 amd
->msgout_buf
[0] = MSG_ABORT
;
1841 amd
->msgout_len
= 1;
1842 amd_write8(amd
, SCSICMDREG
, SET_ATN_CMD
);
1844 if (amd
->active_srb
!= NULL
) {
1845 amd
->disc_count
[amd
->cur_target
][amd
->cur_lun
]--;
1846 amd
->untagged_srbs
[amd
->cur_target
][amd
->cur_lun
] = NULL
;
1849 amd_write8(amd
, SCSIDESTIDREG
, amd
->cur_target
);
1850 amd_write8(amd
, SYNCPERIOREG
, tinfo
->sync_period_reg
);
1851 amd_write8(amd
, SYNCOFFREG
, tinfo
->sync_offset_reg
);
1852 amd_write8(amd
, CNTLREG1
, tinfo
->CtrlR1
);
1853 amd_write8(amd
, CNTLREG3
, tinfo
->CtrlR3
);
1854 amd_write8(amd
, CNTLREG4
, tinfo
->CtrlR4
);
1855 amd_write8(amd
, SCSICMDREG
, MSG_ACCEPTED_CMD
);/* drop /ACK */
1856 amd
->last_phase
= SCSI_NOP0
;
1860 SRBdone(struct amd_softc
*amd
, struct amd_srb
*pSRB
)
1862 u_int8_t bval
, i
, status
;
1864 struct ccb_scsiio
*pcsio
;
1865 struct amd_sg
*ptr2
;
1867 u_int target_id
, target_lun
;
1870 pcsio
= &pccb
->csio
;
1871 target_id
= pSRB
->pccb
->ccb_h
.target_id
;
1872 target_lun
= pSRB
->pccb
->ccb_h
.target_lun
;
1874 CAM_DEBUG(pccb
->ccb_h
.path
, CAM_DEBUG_TRACE
,
1875 ("SRBdone - TagNumber %d\n", pSRB
->TagNumber
));
1877 if ((pccb
->ccb_h
.flags
& CAM_DIR_MASK
) != CAM_DIR_NONE
) {
1878 bus_dmasync_op_t op
;
1880 if ((pccb
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
)
1881 op
= BUS_DMASYNC_POSTREAD
;
1883 op
= BUS_DMASYNC_POSTWRITE
;
1884 bus_dmamap_sync(amd
->buffer_dmat
, pSRB
->dmamap
, op
);
1885 bus_dmamap_unload(amd
->buffer_dmat
, pSRB
->dmamap
);
1888 status
= pSRB
->TargetStatus
;
1889 pccb
->ccb_h
.status
= CAM_REQ_CMP
;
1890 pccb
->ccb_h
.status
= CAM_REQ_CMP
;
1891 if (pSRB
->SRBFlag
& AUTO_REQSENSE
) {
1892 pSRB
->SRBFlag
&= ~AUTO_REQSENSE
;
1893 pSRB
->AdaptStatus
= 0;
1894 pSRB
->TargetStatus
= SCSI_STATUS_CHECK_COND
;
1896 if (status
== SCSI_STATUS_CHECK_COND
) {
1897 pccb
->ccb_h
.status
= CAM_SEL_TIMEOUT
;
1900 *((u_int32_t
*)&(pSRB
->CmdBlock
[0])) = pSRB
->Segment0
[0];
1902 pcsio
->sense_resid
= pcsio
->sense_len
1903 - pSRB
->TotalXferredLen
;
1904 pSRB
->TotalXferredLen
= pSRB
->Segment1
[1];
1905 if (pSRB
->TotalXferredLen
) {
1907 pcsio
->resid
= pcsio
->dxfer_len
1908 - pSRB
->TotalXferredLen
;
1909 /* The resid field contains valid data */
1910 /* Flush resid bytes on complete */
1912 pcsio
->scsi_status
= SCSI_STATUS_CHECK_COND
;
1914 pccb
->ccb_h
.status
= CAM_AUTOSNS_VALID
|CAM_SCSI_STATUS_ERROR
;
1918 if (status
== SCSI_STATUS_CHECK_COND
) {
1920 if ((pSRB
->SGIndex
< pSRB
->SGcount
)
1921 && (pSRB
->SGcount
) && (pSRB
->SGToBeXferLen
)) {
1922 bval
= pSRB
->SGcount
;
1923 swlval
= pSRB
->SGToBeXferLen
;
1924 ptr2
= pSRB
->pSGlist
;
1926 for (i
= pSRB
->SGIndex
+ 1; i
< bval
; i
++) {
1927 swlval
+= ptr2
->SGXLen
;
1931 pcsio
->resid
= (u_int32_t
) swlval
;
1934 kprintf("XferredLen=%8x,NotYetXferLen=%8x,",
1935 pSRB
->TotalXferredLen
, swlval
);
1938 if ((pcsio
->ccb_h
.flags
& CAM_DIS_AUTOSENSE
) == 0) {
1940 kprintf("RequestSense..................\n");
1942 RequestSense(amd
, pSRB
);
1945 pcsio
->scsi_status
= SCSI_STATUS_CHECK_COND
;
1946 pccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
1948 } else if (status
== SCSI_STATUS_QUEUE_FULL
) {
1949 pSRB
->AdaptStatus
= 0;
1950 pSRB
->TargetStatus
= 0;
1951 pcsio
->scsi_status
= SCSI_STATUS_QUEUE_FULL
;
1952 pccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
1954 } else if (status
== AMD_SCSI_STAT_SEL_TIMEOUT
) {
1955 pSRB
->AdaptStatus
= H_SEL_TIMEOUT
;
1956 pSRB
->TargetStatus
= 0;
1958 pcsio
->scsi_status
= AMD_SCSI_STAT_SEL_TIMEOUT
;
1959 pccb
->ccb_h
.status
= CAM_SEL_TIMEOUT
;
1960 } else if (status
== SCSI_STATUS_BUSY
) {
1962 kprintf("DC390: target busy at %s %d\n",
1963 __FILE__
, __LINE__
);
1965 pcsio
->scsi_status
= SCSI_STATUS_BUSY
;
1966 pccb
->ccb_h
.status
= CAM_SCSI_BUSY
;
1967 } else if (status
== SCSI_STATUS_RESERV_CONFLICT
) {
1969 kprintf("DC390: target reserved at %s %d\n",
1970 __FILE__
, __LINE__
);
1972 pcsio
->scsi_status
= SCSI_STATUS_RESERV_CONFLICT
;
1973 pccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
; /* XXX */
1975 pSRB
->AdaptStatus
= 0;
1977 kprintf("DC390: driver stuffup at %s %d\n",
1978 __FILE__
, __LINE__
);
1980 pccb
->ccb_h
.status
= CAM_SCSI_STATUS_ERROR
;
1983 status
= pSRB
->AdaptStatus
;
1984 if (status
& H_OVER_UNDER_RUN
) {
1985 pSRB
->TargetStatus
= 0;
1987 pccb
->ccb_h
.status
= CAM_DATA_RUN_ERR
;
1988 } else if (pSRB
->SRBStatus
& PARITY_ERROR
) {
1990 kprintf("DC390: driver stuffup %s %d\n",
1991 __FILE__
, __LINE__
);
1993 /* Driver failed to perform operation */
1994 pccb
->ccb_h
.status
= CAM_UNCOR_PARITY
;
1995 } else { /* No error */
1996 pSRB
->AdaptStatus
= 0;
1997 pSRB
->TargetStatus
= 0;
1999 /* there is no error, (sense is invalid) */
2004 if ((pccb
->ccb_h
.status
& CAM_STATUS_MASK
) != CAM_REQ_CMP
) {
2005 /* CAM request not yet complete =>device_Q frozen */
2006 xpt_freeze_devq(pccb
->ccb_h
.path
, 1);
2007 pccb
->ccb_h
.status
|= CAM_DEV_QFRZN
;
2009 TAILQ_REMOVE(&amd
->running_srbs
, pSRB
, links
);
2010 TAILQ_INSERT_HEAD(&amd
->free_srbs
, pSRB
, links
);
2018 amd_ResetSCSIBus(struct amd_softc
* amd
)
2021 amd
->ACBFlag
|= RESET_DEV
;
2022 amd_write8(amd
, DMA_Cmd
, DMA_IDLE_CMD
);
2023 amd_write8(amd
, SCSICMDREG
, RST_SCSI_BUS_CMD
);
2029 amd_ScsiRstDetect(struct amd_softc
* amd
)
2034 kprintf("amd_ScsiRstDetect \n");
2038 while (--wlval
) { /* delay 1 sec */
2043 amd_write8(amd
, DMA_Cmd
, DMA_IDLE_CMD
);
2044 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
2046 if (amd
->ACBFlag
& RESET_DEV
) {
2047 amd
->ACBFlag
|= RESET_DONE
;
2049 amd
->ACBFlag
|= RESET_DETECT
;
2051 amdcompletematch(amd
, CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
,
2052 AMD_TAG_WILDCARD
, &amd
->running_srbs
,
2053 CAM_DEV_QFRZN
|CAM_SCSI_BUS_RESET
);
2054 amdcompletematch(amd
, CAM_TARGET_WILDCARD
, CAM_LUN_WILDCARD
,
2055 AMD_TAG_WILDCARD
, &amd
->waiting_srbs
,
2056 CAM_DEV_QFRZN
|CAM_SCSI_BUS_RESET
);
2057 amd
->active_srb
= NULL
;
2066 RequestSense(struct amd_softc
*amd
, struct amd_srb
*pSRB
)
2069 struct ccb_scsiio
*pcsio
;
2072 pcsio
= &pccb
->csio
;
2074 pSRB
->SRBFlag
|= AUTO_REQSENSE
;
2075 pSRB
->Segment0
[0] = *((u_int32_t
*) & (pSRB
->CmdBlock
[0]));
2076 pSRB
->Segment0
[1] = *((u_int32_t
*) & (pSRB
->CmdBlock
[4]));
2077 pSRB
->Segment1
[0] = (pSRB
->ScsiCmdLen
<< 8) + pSRB
->SGcount
;
2078 pSRB
->Segment1
[1] = pSRB
->TotalXferredLen
;
2080 pSRB
->AdaptStatus
= 0;
2081 pSRB
->TargetStatus
= 0;
2083 pSRB
->Segmentx
.SGXPtr
= (u_int32_t
) vtophys(&pcsio
->sense_data
);
2084 pSRB
->Segmentx
.SGXLen
= (u_int32_t
) pcsio
->sense_len
;
2086 pSRB
->pSGlist
= &pSRB
->Segmentx
;
2090 *((u_int32_t
*) & (pSRB
->CmdBlock
[0])) = 0x00000003;
2091 pSRB
->CmdBlock
[1] = pSRB
->pccb
->ccb_h
.target_lun
<< 5;
2092 *((u_int16_t
*) & (pSRB
->CmdBlock
[4])) = pcsio
->sense_len
;
2093 pSRB
->ScsiCmdLen
= 6;
2095 pSRB
->TotalXferredLen
= 0;
2096 pSRB
->SGToBeXferLen
= 0;
2097 if (amdstart(amd
, pSRB
) != 0) {
2098 TAILQ_REMOVE(&amd
->running_srbs
, pSRB
, links
);
2099 TAILQ_INSERT_HEAD(&amd
->waiting_srbs
, pSRB
, links
);
2104 amd_InvalidCmd(struct amd_softc
* amd
)
2106 struct amd_srb
*srb
;
2108 srb
= amd
->active_srb
;
2109 if (srb
->SRBState
& (SRB_START
|SRB_MSGOUT
))
2110 amd_write8(amd
, SCSICMDREG
, CLEAR_FIFO_CMD
);
2114 amd_linkSRB(struct amd_softc
*amd
)
2117 struct amd_srb
*psrb
;
2120 count
= amd
->SRBCount
;
2122 for (i
= 0; i
< count
; i
++) {
2123 psrb
= (struct amd_srb
*)&amd
->SRB_array
[i
];
2124 psrb
->TagNumber
= i
;
2127 * Create the dmamap. This is no longer optional!
2129 error
= bus_dmamap_create(amd
->buffer_dmat
, 0, &psrb
->dmamap
);
2131 device_printf(amd
->dev
, "Error %d creating buffer "
2132 "dmamap!\n", error
);
2135 TAILQ_INSERT_TAIL(&amd
->free_srbs
, psrb
, links
);
2140 amd_EnDisableCE(struct amd_softc
*amd
, int mode
, int *regval
)
2142 if (mode
== ENABLE_CE
) {
2147 pci_write_config(amd
->dev
, *regval
, 0, /*bytes*/1);
2148 if (mode
== DISABLE_CE
) {
2149 pci_write_config(amd
->dev
, *regval
, 0, /*bytes*/1);
2155 amd_EEpromOutDI(struct amd_softc
*amd
, int *regval
, int Carry
)
2163 pci_write_config(amd
->dev
, *regval
, bval
, /*bytes*/1);
2167 pci_write_config(amd
->dev
, *regval
, bval
, /*bytes*/1);
2169 pci_write_config(amd
->dev
, *regval
, 0, /*bytes*/1);
2174 amd_EEpromInDO(struct amd_softc
*amd
)
2176 pci_write_config(amd
->dev
, 0x80, 0x80, /*bytes*/1);
2178 pci_write_config(amd
->dev
, 0x80, 0x40, /*bytes*/1);
2180 if (pci_read_config(amd
->dev
, 0, /*bytes*/1) == 0x22)
2186 EEpromGetData1(struct amd_softc
*amd
)
2193 for (i
= 0; i
< 16; i
++) {
2195 carryFlag
= amd_EEpromInDO(amd
);
2202 amd_Prepare(struct amd_softc
*amd
, int *regval
, u_int8_t EEpromCmd
)
2209 for (i
= 0; i
< 9; i
++) {
2210 amd_EEpromOutDI(amd
, regval
, carryFlag
);
2211 carryFlag
= (EEpromCmd
& j
) ? 1 : 0;
2217 amd_ReadEEprom(struct amd_softc
*amd
)
2224 ptr
= (u_int16_t
*)&amd
->eepromBuf
[0];
2226 for (i
= 0; i
< 0x40; i
++) {
2227 amd_EnDisableCE(amd
, ENABLE_CE
, ®val
);
2228 amd_Prepare(amd
, ®val
, cmd
);
2229 *ptr
= EEpromGetData1(amd
);
2232 amd_EnDisableCE(amd
, DISABLE_CE
, ®val
);
2237 amd_load_defaults(struct amd_softc
*amd
)
2241 bzero(&amd
->eepromBuf
, sizeof amd
->eepromBuf
);
2242 for (target
= 0; target
< MAX_SCSI_ID
; target
++)
2243 amd
->eepromBuf
[target
<< 2] =
2244 (TAG_QUEUING
|EN_DISCONNECT
|SYNC_NEGO
|PARITY_CHK
);
2245 amd
->eepromBuf
[EE_ADAPT_SCSI_ID
] = 7;
2246 amd
->eepromBuf
[EE_MODE2
] = ACTIVE_NEGATION
|LUN_CHECK
|GREATER_1G
;
2247 amd
->eepromBuf
[EE_TAG_CMD_NUM
] = 4;
2251 amd_load_eeprom_or_defaults(struct amd_softc
*amd
)
2253 u_int16_t wval
, *ptr
;
2256 amd_ReadEEprom(amd
);
2258 ptr
= (u_int16_t
*) & amd
->eepromBuf
[0];
2259 for (i
= 0; i
< EE_DATA_SIZE
; i
+= 2, ptr
++)
2262 if (wval
!= EE_CHECKSUM
) {
2264 kprintf("amd%d: SEEPROM data unavailable. "
2265 "Using default device parameters.\n",
2267 amd_load_defaults(amd
);
2272 **********************************************************************
2273 * Function : static int amd_init (struct Scsi_Host *host)
2274 * Purpose : initialize the internal structures for a given SCSI host
2275 * Inputs : host - pointer to this host adapter's structure/
2276 **********************************************************************
2279 amd_init(device_t dev
)
2281 struct amd_softc
*amd
= device_get_softc(dev
);
2282 struct resource
*iores
;
2286 rid
= PCI_BASE_ADDR0
;
2287 iores
= bus_alloc_resource(dev
, SYS_RES_IOPORT
, &rid
, 0, ~0, 1,
2289 if (iores
== NULL
) {
2291 kprintf("amd_init: bus_alloc_resource failure!\n");
2294 amd
->tag
= rman_get_bustag(iores
);
2295 amd
->bsh
= rman_get_bushandle(iores
);
2297 /* DMA tag for mapping buffers into device visible space. */
2298 if (bus_dma_tag_create(/*parent_dmat*/NULL
, /*alignment*/1,
2300 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT
,
2301 /*highaddr*/BUS_SPACE_MAXADDR
,
2302 /*filter*/NULL
, /*filterarg*/NULL
,
2303 /*maxsize*/MAXBSIZE
, /*nsegments*/AMD_NSEG
,
2304 /*maxsegsz*/AMD_MAXTRANSFER_SIZE
,
2305 /*flags*/BUS_DMA_ALLOCNOW
,
2306 &amd
->buffer_dmat
) != 0) {
2308 kprintf("amd_init: bus_dma_tag_create failure!\n");
2311 TAILQ_INIT(&amd
->free_srbs
);
2312 TAILQ_INIT(&amd
->running_srbs
);
2313 TAILQ_INIT(&amd
->waiting_srbs
);
2314 amd
->last_phase
= SCSI_BUS_FREE
;
2316 amd
->unit
= device_get_unit(dev
);
2317 amd
->SRBCount
= MAX_SRB_CNT
;
2319 amd_load_eeprom_or_defaults(amd
);
2321 if (amd
->eepromBuf
[EE_MODE2
] & LUN_CHECK
) {
2326 amd
->AdaptSCSIID
= amd
->eepromBuf
[EE_ADAPT_SCSI_ID
];
2327 amd
->HostID_Bit
= (1 << amd
->AdaptSCSIID
);
2328 amd
->AdaptSCSILUN
= 0;
2329 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2331 amd
->Gmode2
= amd
->eepromBuf
[EE_MODE2
];
2333 for (i
= 0; i
<= amd
->max_id
; i
++) {
2335 if (amd
->AdaptSCSIID
!= i
) {
2336 struct amd_target_info
*tinfo
;
2339 tinfo
= &amd
->tinfo
[i
];
2340 prom
= (PEEprom
)&amd
->eepromBuf
[i
<< 2];
2341 if ((prom
->EE_MODE1
& EN_DISCONNECT
) != 0) {
2342 tinfo
->disc_tag
|= AMD_USR_DISCENB
;
2343 if ((prom
->EE_MODE1
& TAG_QUEUING
) != 0)
2344 tinfo
->disc_tag
|= AMD_USR_TAGENB
;
2346 if ((prom
->EE_MODE1
& SYNC_NEGO
) != 0) {
2347 tinfo
->user
.period
=
2348 eeprom_period
[prom
->EE_SPEED
];
2349 tinfo
->user
.offset
= AMD_MAX_SYNC_OFFSET
;
2351 tinfo
->CtrlR1
= amd
->AdaptSCSIID
;
2352 if ((prom
->EE_MODE1
& PARITY_CHK
) != 0)
2353 tinfo
->CtrlR1
|= PARITY_ERR_REPO
;
2354 tinfo
->CtrlR3
= FAST_CLK
;
2355 tinfo
->CtrlR4
= EATER_25NS
;
2356 if ((amd
->eepromBuf
[EE_MODE2
] & ACTIVE_NEGATION
) != 0)
2357 tinfo
->CtrlR4
|= NEGATE_REQACKDATA
;
2360 amd_write8(amd
, SCSITIMEOUTREG
, 153); /* 250ms selection timeout */
2361 /* Conversion factor = 0 , 40MHz clock */
2362 amd_write8(amd
, CLKFACTREG
, CLK_FREQ_40MHZ
);
2363 /* NOP cmd - clear command register */
2364 amd_write8(amd
, SCSICMDREG
, NOP_CMD
);
2365 amd_write8(amd
, CNTLREG2
, EN_FEATURE
|EN_SCSI2_CMD
);
2366 amd_write8(amd
, CNTLREG3
, FAST_CLK
);
2368 if (amd
->eepromBuf
[EE_MODE2
] & ACTIVE_NEGATION
) {
2369 bval
|= NEGATE_REQACKDATA
;
2371 amd_write8(amd
, CNTLREG4
, bval
);
2373 /* Disable SCSI bus reset interrupt */
2374 amd_write8(amd
, CNTLREG1
, DIS_INT_ON_SCSI_RST
);
2380 * attach and init a host adapter
2383 amd_attach(device_t dev
)
2385 struct cam_devq
*devq
; /* Device Queue to use for this SIM */
2387 struct amd_softc
*amd
= device_get_softc(dev
);
2388 int unit
= device_get_unit(dev
);
2391 struct resource
*irqres
;
2393 if (amd_init(dev
)) {
2395 kprintf("amd_attach: amd_init failure!\n");
2399 /* Reset Pending INT */
2400 intstat
= amd_read8(amd
, INTSTATREG
);
2402 /* After setting up the adapter, map our interrupt */
2404 irqres
= bus_alloc_resource(dev
, SYS_RES_IRQ
, &rid
, 0, ~0, 1,
2405 RF_SHAREABLE
| RF_ACTIVE
);
2406 if (irqres
== NULL
||
2407 bus_setup_intr(dev
, irqres
, 0, amd_intr
, amd
, &ih
, NULL
)
2410 kprintf("amd%d: unable to register interrupt handler!\n",
2416 * Now let the CAM generic SCSI layer find the SCSI devices on
2417 * the bus * start queue to reset to the idle loop. *
2418 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2419 * max_sim_transactions
2421 devq
= cam_simq_alloc(MAX_START_JOB
);
2424 kprintf("amd_attach: cam_simq_alloc failure!\n");
2428 amd
->psim
= cam_sim_alloc(amd_action
, amd_poll
, "amd",
2429 amd
, amd
->unit
, &sim_mplock
, 1,
2430 MAX_TAGS_CMD_QUEUE
, devq
);
2431 cam_simq_release(devq
);
2432 if (amd
->psim
== NULL
) {
2434 kprintf("amd_attach: cam_sim_alloc failure!\n");
2438 if (xpt_bus_register(amd
->psim
, 0) != CAM_SUCCESS
) {
2439 cam_sim_free(amd
->psim
);
2441 kprintf("amd_attach: xpt_bus_register failure!\n");
2445 if (xpt_create_path(&amd
->ppath
, /* periph */ NULL
,
2446 cam_sim_path(amd
->psim
), CAM_TARGET_WILDCARD
,
2447 CAM_LUN_WILDCARD
) != CAM_REQ_CMP
) {
2448 xpt_bus_deregister(cam_sim_path(amd
->psim
));
2449 cam_sim_free(amd
->psim
);
2451 kprintf("amd_attach: xpt_create_path failure!\n");
2459 amd_probe(device_t dev
)
2461 if (pci_get_devid(dev
) == PCI_DEVICE_ID_AMD53C974
) {
2462 device_set_desc(dev
,
2463 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2469 static device_method_t amd_methods
[] = {
2470 /* Device interface */
2471 DEVMETHOD(device_probe
, amd_probe
),
2472 DEVMETHOD(device_attach
, amd_attach
),
2476 static driver_t amd_driver
= {
2477 "amd", amd_methods
, sizeof(struct amd_softc
)
2480 static devclass_t amd_devclass
;
2481 DRIVER_MODULE(amd
, pci
, amd_driver
, amd_devclass
, 0, 0);