Sync CAM with FreeBSD using lockmgr locks instead of mutexes.
[dragonfly.git] / sys / dev / disk / amd / amd.c
blob63317d5327945d3cd4d4bee55c50ce0cdcf634ef
1 /*
2 *********************************************************************
3 * FILE NAME : amd.c
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34 * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.16 2008/05/18 20:30:22 pavalos Exp $
38 *********************************************************************
39 * HISTORY:
41 * REV# DATE NAME DESCRIPTION
42 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
43 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
44 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
45 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
46 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
47 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
48 *********************************************************************
51 /* #define AMD_DEBUG0 */
52 /* #define AMD_DEBUG_SCSI_PHASE */
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
59 #include <sys/buf.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62 #include <sys/thread2.h>
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
67 #include <machine/clock.h>
69 #include <bus/cam/cam.h>
70 #include <bus/cam/cam_ccb.h>
71 #include <bus/cam/cam_sim.h>
72 #include <bus/cam/cam_xpt_sim.h>
73 #include <bus/cam/cam_debug.h>
75 #include <bus/cam/scsi/scsi_all.h>
76 #include <bus/cam/scsi/scsi_message.h>
78 #include <bus/pci/pcivar.h>
79 #include <bus/pci/pcireg.h>
80 #include "amd.h"
82 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
83 #define PCI_BASE_ADDR0 0x10
85 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
86 typedef phase_handler_t *phase_handler_func_t;
88 static void amd_intr(void *vamd);
89 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
90 static phase_handler_t amd_NopPhase;
92 static phase_handler_t amd_DataOutPhase0;
93 static phase_handler_t amd_DataInPhase0;
94 #define amd_CommandPhase0 amd_NopPhase
95 static phase_handler_t amd_StatusPhase0;
96 static phase_handler_t amd_MsgOutPhase0;
97 static phase_handler_t amd_MsgInPhase0;
98 static phase_handler_t amd_DataOutPhase1;
99 static phase_handler_t amd_DataInPhase1;
100 static phase_handler_t amd_CommandPhase1;
101 static phase_handler_t amd_StatusPhase1;
102 static phase_handler_t amd_MsgOutPhase1;
103 static phase_handler_t amd_MsgInPhase1;
105 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
106 static int amdparsemsg(struct amd_softc *amd);
107 static int amdhandlemsgreject(struct amd_softc *amd);
108 static void amdconstructsdtr(struct amd_softc *amd,
109 u_int period, u_int offset);
110 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
111 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
113 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
114 static void amd_Disconnect(struct amd_softc *amd);
115 static void amd_Reselect(struct amd_softc *amd);
116 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
117 static void amd_ScsiRstDetect(struct amd_softc *amd);
118 static void amd_ResetSCSIBus(struct amd_softc *amd);
119 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
120 static void amd_InvalidCmd(struct amd_softc *amd);
122 #if 0
123 static void amd_timeout(void *arg1);
124 static void amd_reset(struct amd_softc *amd);
125 #endif
126 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
128 void amd_linkSRB(struct amd_softc *amd);
129 static int amd_init(device_t);
130 static void amd_load_defaults(struct amd_softc *amd);
131 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
132 static int amd_EEpromInDO(struct amd_softc *amd);
133 static u_int16_t EEpromGetData1(struct amd_softc *amd);
134 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
135 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
136 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
137 static void amd_ReadEEprom(struct amd_softc *amd);
139 static int amd_probe(device_t);
140 static int amd_attach(device_t);
141 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
142 lun_id_t lun, u_int tag, struct srb_queue *queue,
143 cam_status status);
144 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
145 u_int period, u_int offset, u_int type);
146 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
148 static __inline void amd_clear_msg_state(struct amd_softc *amd);
150 static __inline void
151 amd_clear_msg_state(struct amd_softc *amd)
153 amd->msgout_len = 0;
154 amd->msgout_index = 0;
155 amd->msgin_index = 0;
158 /* CAM SIM entry points */
159 #define ccb_srb_ptr spriv_ptr0
160 #define ccb_amd_ptr spriv_ptr1
161 static void amd_action(struct cam_sim *sim, union ccb *ccb);
162 static void amd_poll(struct cam_sim *sim);
165 * State engine function tables indexed by SCSI phase number
167 phase_handler_func_t amd_SCSI_phase0[] = {
168 amd_DataOutPhase0,
169 amd_DataInPhase0,
170 amd_CommandPhase0,
171 amd_StatusPhase0,
172 amd_NopPhase,
173 amd_NopPhase,
174 amd_MsgOutPhase0,
175 amd_MsgInPhase0
178 phase_handler_func_t amd_SCSI_phase1[] = {
179 amd_DataOutPhase1,
180 amd_DataInPhase1,
181 amd_CommandPhase1,
182 amd_StatusPhase1,
183 amd_NopPhase,
184 amd_NopPhase,
185 amd_MsgOutPhase1,
186 amd_MsgInPhase1
190 * EEProm/BIOS negotiation periods
192 u_int8_t eeprom_period[] = {
193 25, /* 10.0MHz */
194 32, /* 8.0MHz */
195 38, /* 6.6MHz */
196 44, /* 5.7MHz */
197 50, /* 5.0MHz */
198 63, /* 4.0MHz */
199 83, /* 3.0MHz */
200 125 /* 2.0MHz */
204 * chip clock setting to SCSI specified sync parameter table.
206 u_int8_t tinfo_sync_period[] = {
207 25, /* 10.0 */
208 32, /* 8.0 */
209 38, /* 6.6 */
210 44, /* 5.7 */
211 50, /* 5.0 */
212 57, /* 4.4 */
213 63, /* 4.0 */
214 70, /* 3.6 */
215 76, /* 3.3 */
216 83 /* 3.0 */
219 static __inline struct amd_srb *
220 amdgetsrb(struct amd_softc * amd)
222 struct amd_srb * pSRB;
224 crit_enter();
225 pSRB = TAILQ_FIRST(&amd->free_srbs);
226 if (pSRB)
227 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
228 crit_exit();
229 return (pSRB);
232 static void
233 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
235 struct scsi_request_sense sense_cmd;
236 struct ccb_scsiio *csio;
237 u_int8_t *cdb;
238 u_int cdb_len;
240 csio = &srb->pccb->csio;
242 if (srb->SRBFlag & AUTO_REQSENSE) {
243 sense_cmd.opcode = REQUEST_SENSE;
244 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
245 sense_cmd.unused[0] = 0;
246 sense_cmd.unused[1] = 0;
247 sense_cmd.length = csio->sense_len;
248 sense_cmd.control = 0;
249 cdb = &sense_cmd.opcode;
250 cdb_len = sizeof(sense_cmd);
251 } else {
252 cdb = &srb->CmdBlock[0];
253 cdb_len = srb->ScsiCmdLen;
255 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
259 * Attempt to start a waiting transaction. Interrupts must be disabled
260 * upon entry to this function.
262 static void
263 amdrunwaiting(struct amd_softc *amd) {
264 struct amd_srb *srb;
266 if (amd->last_phase != SCSI_BUS_FREE)
267 return;
269 srb = TAILQ_FIRST(&amd->waiting_srbs);
270 if (srb == NULL)
271 return;
273 if (amdstart(amd, srb) == 0) {
274 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
275 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
279 static void
280 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
282 struct amd_srb *srb;
283 union ccb *ccb;
284 struct amd_softc *amd;
286 srb = (struct amd_srb *)arg;
287 ccb = srb->pccb;
288 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
290 if (error != 0) {
291 if (error != EFBIG)
292 kprintf("amd%d: Unexpected error 0x%x returned from "
293 "bus_dmamap_load\n", amd->unit, error);
294 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
295 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
296 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
298 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
299 xpt_done(ccb);
300 return;
303 if (nseg != 0) {
304 struct amd_sg *sg;
305 bus_dma_segment_t *end_seg;
306 bus_dmasync_op_t op;
308 end_seg = dm_segs + nseg;
310 /* Copy the segments into our SG list */
311 srb->pSGlist = &srb->SGsegment[0];
312 sg = srb->pSGlist;
313 while (dm_segs < end_seg) {
314 sg->SGXLen = dm_segs->ds_len;
315 sg->SGXPtr = dm_segs->ds_addr;
316 sg++;
317 dm_segs++;
320 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
321 op = BUS_DMASYNC_PREREAD;
322 else
323 op = BUS_DMASYNC_PREWRITE;
325 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
328 srb->SGcount = nseg;
329 srb->SGIndex = 0;
330 srb->AdaptStatus = 0;
331 srb->TargetStatus = 0;
332 srb->MsgCnt = 0;
333 srb->SRBStatus = 0;
334 srb->SRBFlag = 0;
335 srb->SRBState = 0;
336 srb->TotalXferredLen = 0;
337 srb->SGPhysAddr = 0;
338 srb->SGToBeXferLen = 0;
339 srb->EndMessage = 0;
341 crit_enter();
344 * Last time we need to check if this CCB needs to
345 * be aborted.
347 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
348 if (nseg != 0)
349 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
350 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
351 xpt_done(ccb);
352 crit_exit();
353 return;
355 ccb->ccb_h.status |= CAM_SIM_QUEUED;
356 #if 0
357 /* XXX Need a timeout handler */
358 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
359 amdtimeout, srb);
360 #endif
361 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
362 amdrunwaiting(amd);
363 crit_exit();
366 static void
367 amd_action(struct cam_sim * psim, union ccb * pccb)
369 struct amd_softc * amd;
370 u_int target_id, target_lun;
372 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
374 amd = (struct amd_softc *) cam_sim_softc(psim);
375 target_id = pccb->ccb_h.target_id;
376 target_lun = pccb->ccb_h.target_lun;
378 switch (pccb->ccb_h.func_code) {
379 case XPT_SCSI_IO:
381 struct amd_srb * pSRB;
382 struct ccb_scsiio *pcsio;
384 pcsio = &pccb->csio;
387 * Assign an SRB and connect it with this ccb.
389 pSRB = amdgetsrb(amd);
391 if (!pSRB) {
392 /* Freeze SIMQ */
393 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
394 xpt_done(pccb);
395 return;
397 pSRB->pccb = pccb;
398 pccb->ccb_h.ccb_srb_ptr = pSRB;
399 pccb->ccb_h.ccb_amd_ptr = amd;
400 pSRB->ScsiCmdLen = pcsio->cdb_len;
401 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
402 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
403 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
405 * We've been given a pointer
406 * to a single buffer.
408 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
409 int error;
411 crit_enter();
412 error =
413 bus_dmamap_load(amd->buffer_dmat,
414 pSRB->dmamap,
415 pcsio->data_ptr,
416 pcsio->dxfer_len,
417 amdexecutesrb,
418 pSRB, /*flags*/0);
419 if (error == EINPROGRESS) {
421 * So as to maintain
422 * ordering, freeze the
423 * controller queue
424 * until our mapping is
425 * returned.
427 xpt_freeze_simq(amd->psim, 1);
428 pccb->ccb_h.status |=
429 CAM_RELEASE_SIMQ;
431 crit_exit();
432 } else {
433 struct bus_dma_segment seg;
435 /* Pointer to physical buffer */
436 seg.ds_addr =
437 (bus_addr_t)pcsio->data_ptr;
438 seg.ds_len = pcsio->dxfer_len;
439 amdexecutesrb(pSRB, &seg, 1, 0);
441 } else {
442 struct bus_dma_segment *segs;
444 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
445 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
446 TAILQ_INSERT_HEAD(&amd->free_srbs,
447 pSRB, links);
448 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
449 xpt_done(pccb);
450 return;
453 /* Just use the segments provided */
454 segs =
455 (struct bus_dma_segment *)pcsio->data_ptr;
456 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
458 } else
459 amdexecutesrb(pSRB, NULL, 0, 0);
460 break;
462 case XPT_PATH_INQ:
464 struct ccb_pathinq *cpi = &pccb->cpi;
466 cpi->version_num = 1;
467 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
468 cpi->target_sprt = 0;
469 cpi->hba_misc = 0;
470 cpi->hba_eng_cnt = 0;
471 cpi->max_target = 7;
472 cpi->max_lun = amd->max_lun; /* 7 or 0 */
473 cpi->initiator_id = amd->AdaptSCSIID;
474 cpi->bus_id = cam_sim_bus(psim);
475 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
476 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
477 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
478 cpi->unit_number = cam_sim_unit(psim);
479 cpi->transport = XPORT_SPI;
480 cpi->transport_version = 2;
481 cpi->protocol = PROTO_SCSI;
482 cpi->protocol_version = SCSI_REV_2;
483 cpi->ccb_h.status = CAM_REQ_CMP;
484 xpt_done(pccb);
485 break;
487 case XPT_ABORT:
488 pccb->ccb_h.status = CAM_REQ_INVALID;
489 xpt_done(pccb);
490 break;
491 case XPT_RESET_BUS:
494 int i;
496 amd_ResetSCSIBus(amd);
497 amd->ACBFlag = 0;
499 for (i = 0; i < 500; i++) {
500 DELAY(1000); /* Wait until our interrupt
501 * handler sees it */
504 pccb->ccb_h.status = CAM_REQ_CMP;
505 xpt_done(pccb);
506 break;
508 case XPT_RESET_DEV:
509 pccb->ccb_h.status = CAM_REQ_INVALID;
510 xpt_done(pccb);
511 break;
512 case XPT_TERM_IO:
513 pccb->ccb_h.status = CAM_REQ_INVALID;
514 xpt_done(pccb);
515 break;
516 case XPT_GET_TRAN_SETTINGS:
518 struct ccb_trans_settings *cts = &pccb->cts;
519 struct amd_target_info *targ_info = &amd->tinfo[target_id];
520 struct amd_transinfo *tinfo;
521 struct ccb_trans_settings_scsi *scsi =
522 &cts->proto_specific.scsi;
523 struct ccb_trans_settings_spi *spi =
524 &cts->xport_specific.spi;
526 cts->protocol = PROTO_SCSI;
527 cts->protocol_version = SCSI_REV_2;
528 cts->transport = XPORT_SPI;
529 cts->transport_version = 2;
531 crit_enter();
532 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
533 /* current transfer settings */
534 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
535 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
536 } else {
537 spi->flags = 0;
539 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
540 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
541 } else {
542 scsi->flags = 0;
544 tinfo = &targ_info->current;
545 } else {
546 /* default(user) transfer settings */
547 if (targ_info->disc_tag & AMD_USR_DISCENB) {
548 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
549 } else {
550 spi->flags = 0;
552 if (targ_info->disc_tag & AMD_USR_TAGENB) {
553 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
554 } else {
555 scsi->flags = 0;
557 tinfo = &targ_info->user;
559 spi->sync_period = tinfo->period;
560 spi->sync_offset = tinfo->offset;
561 crit_exit();
563 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
564 spi->valid = CTS_SPI_VALID_SYNC_RATE
565 | CTS_SPI_VALID_SYNC_OFFSET
566 | CTS_SPI_VALID_BUS_WIDTH
567 | CTS_SPI_VALID_DISC;
568 scsi->valid = CTS_SCSI_VALID_TQ;
569 pccb->ccb_h.status = CAM_REQ_CMP;
570 xpt_done(pccb);
571 break;
573 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
574 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
575 case XPT_SET_TRAN_SETTINGS:
577 struct ccb_trans_settings *cts = &pccb->cts;
578 struct amd_target_info *targ_info;
579 u_int update_type = 0;
580 int last_entry;
582 struct ccb_trans_settings_scsi *scsi =
583 &cts->proto_specific.scsi;
584 struct ccb_trans_settings_spi *spi =
585 &cts->xport_specific.spi;
586 if (IS_CURRENT_SETTINGS(cts)) {
587 update_type |= AMD_TRANS_GOAL;
588 } else if (IS_USER_SETTINGS(cts)) {
589 update_type |= AMD_TRANS_USER;
591 if (update_type == 0
592 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
593 cts->ccb_h.status = CAM_REQ_INVALID;
594 xpt_done(pccb);
597 crit_enter();
598 targ_info = &amd->tinfo[target_id];
600 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
601 if (update_type & AMD_TRANS_GOAL) {
602 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
603 != 0) {
604 targ_info->disc_tag |= AMD_CUR_DISCENB;
605 } else {
606 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
609 if (update_type & AMD_TRANS_USER) {
610 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
611 != 0) {
612 targ_info->disc_tag |= AMD_USR_DISCENB;
613 } else {
614 targ_info->disc_tag &= ~AMD_USR_DISCENB;
618 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
619 if (update_type & AMD_TRANS_GOAL) {
620 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
621 != 0) {
622 targ_info->disc_tag |= AMD_CUR_TAGENB;
623 } else {
624 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
627 if (update_type & AMD_TRANS_USER) {
628 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
629 != 0) {
630 targ_info->disc_tag |= AMD_USR_TAGENB;
631 } else {
632 targ_info->disc_tag &= ~AMD_USR_TAGENB;
637 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
638 if (update_type & AMD_TRANS_GOAL)
639 spi->sync_offset = targ_info->goal.offset;
640 else
641 spi->sync_offset = targ_info->user.offset;
644 if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
645 spi->sync_offset = AMD_MAX_SYNC_OFFSET;
647 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
648 if (update_type & AMD_TRANS_GOAL)
649 spi->sync_period = targ_info->goal.period;
650 else
651 spi->sync_period = targ_info->user.period;
654 last_entry = sizeof(tinfo_sync_period) - 1;
655 if ((spi->sync_period != 0)
656 && (spi->sync_period < tinfo_sync_period[0]))
657 spi->sync_period = tinfo_sync_period[0];
658 if (spi->sync_period > tinfo_sync_period[last_entry])
659 spi->sync_period = 0;
660 if (spi->sync_offset == 0)
661 spi->sync_period = 0;
663 if ((update_type & AMD_TRANS_USER) != 0) {
664 targ_info->user.period = spi->sync_period;
665 targ_info->user.offset = spi->sync_offset;
667 if ((update_type & AMD_TRANS_GOAL) != 0) {
668 targ_info->goal.period = spi->sync_period;
669 targ_info->goal.offset = spi->sync_offset;
671 crit_exit();
672 pccb->ccb_h.status = CAM_REQ_CMP;
673 xpt_done(pccb);
674 break;
676 case XPT_CALC_GEOMETRY:
678 struct ccb_calc_geometry *ccg;
679 u_int32_t size_mb;
680 u_int32_t secs_per_cylinder;
681 int extended;
683 ccg = &pccb->ccg;
684 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
685 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
687 if (size_mb > 1024 && extended) {
688 ccg->heads = 255;
689 ccg->secs_per_track = 63;
690 } else {
691 ccg->heads = 64;
692 ccg->secs_per_track = 32;
694 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
695 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
696 pccb->ccb_h.status = CAM_REQ_CMP;
697 xpt_done(pccb);
698 break;
700 default:
701 pccb->ccb_h.status = CAM_REQ_INVALID;
702 xpt_done(pccb);
703 break;
707 static void
708 amd_poll(struct cam_sim * psim)
710 amd_intr(cam_sim_softc(psim));
713 static u_int8_t *
714 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
716 int dataPtr;
717 struct ccb_scsiio *pcsio;
718 u_int8_t i;
719 struct amd_sg * pseg;
721 dataPtr = 0;
722 pcsio = &pSRB->pccb->csio;
724 dataPtr = (int) pcsio->data_ptr;
725 pseg = pSRB->SGsegment;
726 for (i = 0; i < pSRB->SGIndex; i++) {
727 dataPtr += (int) pseg->SGXLen;
728 pseg++;
730 dataPtr += (int) xferCnt;
731 return ((u_int8_t *) dataPtr);
734 static void
735 ResetDevParam(struct amd_softc * amd)
737 u_int target;
739 for (target = 0; target <= amd->max_id; target++) {
740 if (amd->AdaptSCSIID != target) {
741 amdsetsync(amd, target, /*clockrate*/0,
742 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
747 static void
748 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
749 u_int tag, struct srb_queue *queue, cam_status status)
751 struct amd_srb *srb;
752 struct amd_srb *next_srb;
754 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
755 union ccb *ccb;
757 next_srb = TAILQ_NEXT(srb, links);
758 if (srb->pccb->ccb_h.target_id != target
759 && target != CAM_TARGET_WILDCARD)
760 continue;
762 if (srb->pccb->ccb_h.target_lun != lun
763 && lun != CAM_LUN_WILDCARD)
764 continue;
766 if (srb->TagNumber != tag
767 && tag != AMD_TAG_WILDCARD)
768 continue;
770 ccb = srb->pccb;
771 TAILQ_REMOVE(queue, srb, links);
772 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
773 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
774 && (status & CAM_DEV_QFRZN) != 0)
775 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
776 ccb->ccb_h.status = status;
777 xpt_done(ccb);
782 static void
783 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
784 u_int period, u_int offset, u_int type)
786 struct amd_target_info *tinfo;
787 u_int old_period;
788 u_int old_offset;
790 tinfo = &amd->tinfo[target];
791 old_period = tinfo->current.period;
792 old_offset = tinfo->current.offset;
793 if ((type & AMD_TRANS_CUR) != 0
794 && (old_period != period || old_offset != offset)) {
795 struct cam_path *path;
797 tinfo->current.period = period;
798 tinfo->current.offset = offset;
799 tinfo->sync_period_reg = clockrate;
800 tinfo->sync_offset_reg = offset;
801 tinfo->CtrlR3 &= ~FAST_SCSI;
802 tinfo->CtrlR4 &= ~EATER_25NS;
803 if (clockrate > 7)
804 tinfo->CtrlR4 |= EATER_25NS;
805 else
806 tinfo->CtrlR3 |= FAST_SCSI;
808 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
809 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
810 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
811 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
812 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
814 /* If possible, update the XPT's notion of our transfer rate */
815 if (xpt_create_path(&path, /*periph*/NULL,
816 cam_sim_path(amd->psim), target,
817 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
818 struct ccb_trans_settings neg;
819 struct ccb_trans_settings_spi *spi =
820 &neg.xport_specific.spi;
821 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
822 memset(&neg, 0, sizeof (neg));
823 spi->sync_period = period;
824 spi->sync_offset = offset;
825 spi->valid = CTS_SPI_VALID_SYNC_RATE
826 | CTS_SPI_VALID_SYNC_OFFSET;
827 xpt_async(AC_TRANSFER_NEG, path, &neg);
828 xpt_free_path(path);
831 if ((type & AMD_TRANS_GOAL) != 0) {
832 tinfo->goal.period = period;
833 tinfo->goal.offset = offset;
836 if ((type & AMD_TRANS_USER) != 0) {
837 tinfo->user.period = period;
838 tinfo->user.offset = offset;
842 static void
843 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
845 panic("Implement me!\n");
849 #if 0
851 **********************************************************************
852 * Function : amd_reset (struct amd_softc * amd)
853 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
854 * Inputs : cmd - command which caused the SCSI RESET
855 **********************************************************************
857 static void
858 amd_reset(struct amd_softc * amd)
860 u_int8_t bval;
861 u_int16_t i;
864 #ifdef AMD_DEBUG0
865 kprintf("DC390: RESET");
866 #endif
868 crit_enter();
869 bval = amd_read8(amd, CNTLREG1);
870 bval |= DIS_INT_ON_SCSI_RST;
871 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
872 amd_ResetSCSIBus(amd);
874 for (i = 0; i < 500; i++) {
875 DELAY(1000);
878 bval = amd_read8(amd, CNTLREG1);
879 bval &= ~DIS_INT_ON_SCSI_RST;
880 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
882 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
883 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
885 ResetDevParam(amd);
886 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
887 AMD_TAG_WILDCARD, &amd->running_srbs,
888 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
889 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
890 AMD_TAG_WILDCARD, &amd->waiting_srbs,
891 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
892 amd->active_srb = NULL;
893 amd->ACBFlag = 0;
894 crit_exit();
895 return;
898 void
899 amd_timeout(void *arg1)
901 struct amd_srb * pSRB;
903 pSRB = (struct amd_srb *) arg1;
905 #endif
907 static int
908 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
910 union ccb *pccb;
911 struct ccb_scsiio *pcsio;
912 struct amd_target_info *targ_info;
913 u_int identify_msg;
914 u_int command;
915 u_int target;
916 u_int lun;
917 int tagged;
919 pccb = pSRB->pccb;
920 pcsio = &pccb->csio;
921 target = pccb->ccb_h.target_id;
922 lun = pccb->ccb_h.target_lun;
923 targ_info = &amd->tinfo[target];
925 amd_clear_msg_state(amd);
926 amd_write8(amd, SCSIDESTIDREG, target);
927 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
928 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
929 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
930 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
931 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
932 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
934 identify_msg = MSG_IDENTIFYFLAG | lun;
935 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
936 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
937 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
938 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
939 identify_msg |= MSG_IDENTIFY_DISCFLAG;
941 amd_write8(amd, SCSIFIFOREG, identify_msg);
942 tagged = 0;
943 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
944 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
945 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
946 if (targ_info->current.period != targ_info->goal.period
947 || targ_info->current.offset != targ_info->goal.offset) {
948 command = SEL_W_ATN_STOP;
949 amdconstructsdtr(amd, targ_info->goal.period,
950 targ_info->goal.offset);
951 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
952 command = SEL_W_ATN2;
953 pSRB->SRBState = SRB_START;
954 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
955 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
956 tagged++;
957 } else {
958 command = SEL_W_ATN;
959 pSRB->SRBState = SRB_START;
961 if (command != SEL_W_ATN_STOP)
962 amdsetupcommand(amd, pSRB);
964 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
965 pSRB->SRBState = SRB_READY;
966 return (1);
967 } else {
968 amd->last_phase = SCSI_ARBITRATING;
969 amd_write8(amd, SCSICMDREG, command);
970 amd->active_srb = pSRB;
971 amd->cur_target = target;
972 amd->cur_lun = lun;
973 return (0);
978 * Catch an interrupt from the adapter.
979 * Process pending device interrupts.
981 static void
982 amd_intr(void *arg)
984 struct amd_softc *amd;
985 struct amd_srb *pSRB;
986 u_int internstat = 0;
987 u_int scsistat;
988 u_int intstat;
990 amd = (struct amd_softc *)arg;
992 if (amd == NULL) {
993 #ifdef AMD_DEBUG0
994 kprintf("amd_intr: amd NULL return......");
995 #endif
996 return;
999 scsistat = amd_read8(amd, SCSISTATREG);
1000 if (!(scsistat & INTERRUPT)) {
1001 #ifdef AMD_DEBUG0
1002 kprintf("amd_intr: scsistat = NULL ,return......");
1003 #endif
1004 return;
1006 #ifdef AMD_DEBUG_SCSI_PHASE
1007 kprintf("scsistat=%2x,", scsistat);
1008 #endif
1010 internstat = amd_read8(amd, INTERNSTATREG);
1011 intstat = amd_read8(amd, INTSTATREG);
1013 #ifdef AMD_DEBUG_SCSI_PHASE
1014 kprintf("intstat=%2x,", intstat);
1015 #endif
1017 if (intstat & DISCONNECTED) {
1018 amd_Disconnect(amd);
1019 return;
1021 if (intstat & RESELECTED) {
1022 amd_Reselect(amd);
1023 return;
1025 if (intstat & INVALID_CMD) {
1026 amd_InvalidCmd(amd);
1027 return;
1029 if (intstat & SCSI_RESET_) {
1030 amd_ScsiRstDetect(amd);
1031 return;
1033 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1034 pSRB = amd->active_srb;
1036 * Run our state engine. First perform
1037 * post processing for the last phase we
1038 * were in, followed by any processing
1039 * required to handle the current phase.
1041 scsistat =
1042 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1043 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1044 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1048 static u_int
1049 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1051 struct amd_sg *psgl;
1052 u_int32_t ResidCnt, xferCnt;
1054 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1055 if (scsistat & PARITY_ERR) {
1056 pSRB->SRBStatus |= PARITY_ERROR;
1058 if (scsistat & COUNT_2_ZERO) {
1059 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1061 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1062 pSRB->SGIndex++;
1063 if (pSRB->SGIndex < pSRB->SGcount) {
1064 pSRB->pSGlist++;
1065 psgl = pSRB->pSGlist;
1066 pSRB->SGPhysAddr = psgl->SGXPtr;
1067 pSRB->SGToBeXferLen = psgl->SGXLen;
1068 } else {
1069 pSRB->SGToBeXferLen = 0;
1071 } else {
1072 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1073 ResidCnt += amd_read8(amd, CTCREG_LOW)
1074 | (amd_read8(amd, CTCREG_MID) << 8)
1075 | (amd_read8(amd, CURTXTCNTREG) << 16);
1077 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1078 pSRB->SGPhysAddr += xferCnt;
1079 pSRB->TotalXferredLen += xferCnt;
1080 pSRB->SGToBeXferLen = ResidCnt;
1083 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1084 return (scsistat);
1087 static u_int
1088 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1090 u_int8_t bval;
1091 u_int16_t i, residual;
1092 struct amd_sg *psgl;
1093 u_int32_t ResidCnt, xferCnt;
1094 u_int8_t * ptr;
1096 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1097 if (scsistat & PARITY_ERR) {
1098 pSRB->SRBStatus |= PARITY_ERROR;
1100 if (scsistat & COUNT_2_ZERO) {
1101 while (1) {
1102 bval = amd_read8(amd, DMA_Status);
1103 if ((bval & DMA_XFER_DONE) != 0)
1104 break;
1106 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1108 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1109 pSRB->SGIndex++;
1110 if (pSRB->SGIndex < pSRB->SGcount) {
1111 pSRB->pSGlist++;
1112 psgl = pSRB->pSGlist;
1113 pSRB->SGPhysAddr = psgl->SGXPtr;
1114 pSRB->SGToBeXferLen = psgl->SGXLen;
1115 } else {
1116 pSRB->SGToBeXferLen = 0;
1118 } else { /* phase changed */
1119 residual = 0;
1120 bval = amd_read8(amd, CURRENTFIFOREG);
1121 while (bval & 0x1f) {
1122 if ((bval & 0x1f) == 1) {
1123 for (i = 0; i < 0x100; i++) {
1124 bval = amd_read8(amd, CURRENTFIFOREG);
1125 if (!(bval & 0x1f)) {
1126 goto din_1;
1127 } else if (i == 0x0ff) {
1128 residual = 1;
1129 goto din_1;
1132 } else {
1133 bval = amd_read8(amd, CURRENTFIFOREG);
1136 din_1:
1137 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1138 for (i = 0; i < 0x8000; i++) {
1139 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1140 break;
1142 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1144 ResidCnt = amd_read8(amd, CTCREG_LOW)
1145 | (amd_read8(amd, CTCREG_MID) << 8)
1146 | (amd_read8(amd, CURTXTCNTREG) << 16);
1147 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1148 pSRB->SGPhysAddr += xferCnt;
1149 pSRB->TotalXferredLen += xferCnt;
1150 pSRB->SGToBeXferLen = ResidCnt;
1151 if (residual) {
1152 /* get residual byte */
1153 bval = amd_read8(amd, SCSIFIFOREG);
1154 ptr = phystovirt(pSRB, xferCnt);
1155 *ptr = bval;
1156 pSRB->SGPhysAddr++;
1157 pSRB->TotalXferredLen++;
1158 pSRB->SGToBeXferLen--;
1162 return (scsistat);
1165 static u_int
1166 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1168 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1169 /* get message */
1170 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1171 pSRB->SRBState = SRB_COMPLETED;
1172 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1173 return (SCSI_NOP0);
1176 static u_int
1177 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1179 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1180 scsistat = SCSI_NOP0;
1182 return (scsistat);
1185 static u_int
1186 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1188 int done;
1190 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1192 done = amdparsemsg(amd);
1193 if (done)
1194 amd->msgin_index = 0;
1195 else
1196 amd->msgin_index++;
1197 return (SCSI_NOP0);
1200 static int
1201 amdparsemsg(struct amd_softc *amd)
1203 struct amd_target_info *targ_info;
1204 int reject;
1205 int done;
1206 int response;
1208 done = FALSE;
1209 response = FALSE;
1210 reject = FALSE;
1212 targ_info = &amd->tinfo[amd->cur_target];
1215 * Parse as much of the message as is availible,
1216 * rejecting it if we don't support it. When
1217 * the entire message is availible and has been
1218 * handled, return TRUE indicating that we have
1219 * parsed an entire message.
1221 switch (amd->msgin_buf[0]) {
1222 case MSG_DISCONNECT:
1223 amd->active_srb->SRBState = SRB_DISCONNECT;
1224 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1225 done = TRUE;
1226 break;
1227 case MSG_SIMPLE_Q_TAG:
1229 struct amd_srb *disc_srb;
1231 if (amd->msgin_index < 1)
1232 break;
1233 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1234 if (amd->active_srb != NULL
1235 || disc_srb->SRBState != SRB_DISCONNECT
1236 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1237 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1238 kprintf("amd%d: Unexpected tagged reselection "
1239 "for target %d, Issuing Abort\n", amd->unit,
1240 amd->cur_target);
1241 amd->msgout_buf[0] = MSG_ABORT;
1242 amd->msgout_len = 1;
1243 response = TRUE;
1244 break;
1246 amd->active_srb = disc_srb;
1247 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1248 done = TRUE;
1249 break;
1251 case MSG_MESSAGE_REJECT:
1252 response = amdhandlemsgreject(amd);
1253 if (response == FALSE)
1254 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1255 /* FALLTHROUGH */
1256 case MSG_NOOP:
1257 done = TRUE;
1258 break;
1259 case MSG_EXTENDED:
1261 u_int clockrate;
1262 u_int period;
1263 u_int offset;
1264 u_int saved_offset;
1266 /* Wait for enough of the message to begin validation */
1267 if (amd->msgin_index < 1)
1268 break;
1269 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1270 reject = TRUE;
1271 break;
1274 /* Wait for opcode */
1275 if (amd->msgin_index < 2)
1276 break;
1278 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1279 reject = TRUE;
1280 break;
1284 * Wait until we have both args before validating
1285 * and acting on this message.
1287 * Add one to MSG_EXT_SDTR_LEN to account for
1288 * the extended message preamble.
1290 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1291 break;
1293 period = amd->msgin_buf[3];
1294 saved_offset = offset = amd->msgin_buf[4];
1295 clockrate = amdfindclockrate(amd, &period);
1296 if (offset > AMD_MAX_SYNC_OFFSET)
1297 offset = AMD_MAX_SYNC_OFFSET;
1298 if (period == 0 || offset == 0) {
1299 offset = 0;
1300 period = 0;
1301 clockrate = 0;
1303 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1304 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1307 * See if we initiated Sync Negotiation
1308 * and didn't have to fall down to async
1309 * transfers.
1311 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1312 /* We started it */
1313 if (saved_offset != offset) {
1314 /* Went too low - force async */
1315 reject = TRUE;
1317 } else {
1319 * Send our own SDTR in reply
1321 if (bootverbose)
1322 kprintf("Sending SDTR!\n");
1323 amd->msgout_index = 0;
1324 amd->msgout_len = 0;
1325 amdconstructsdtr(amd, period, offset);
1326 amd->msgout_index = 0;
1327 response = TRUE;
1329 done = TRUE;
1330 break;
1332 case MSG_SAVEDATAPOINTER:
1333 case MSG_RESTOREPOINTERS:
1334 /* XXX Implement!!! */
1335 done = TRUE;
1336 break;
1337 default:
1338 reject = TRUE;
1339 break;
1342 if (reject) {
1343 amd->msgout_index = 0;
1344 amd->msgout_len = 1;
1345 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1346 done = TRUE;
1347 response = TRUE;
1350 if (response)
1351 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1353 if (done && !response)
1354 /* Clear the outgoing message buffer */
1355 amd->msgout_len = 0;
1357 /* Drop Ack */
1358 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1360 return (done);
1363 static u_int
1364 amdfindclockrate(struct amd_softc *amd, u_int *period)
1366 u_int i;
1367 u_int clockrate;
1369 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1370 u_int8_t *table_entry;
1372 table_entry = &tinfo_sync_period[i];
1373 if (*period <= *table_entry) {
1375 * When responding to a target that requests
1376 * sync, the requested rate may fall between
1377 * two rates that we can output, but still be
1378 * a rate that we can receive. Because of this,
1379 * we want to respond to the target with
1380 * the same rate that it sent to us even
1381 * if the period we use to send data to it
1382 * is lower. Only lower the response period
1383 * if we must.
1385 if (i == 0) {
1386 *period = *table_entry;
1388 break;
1392 if (i == sizeof(tinfo_sync_period)) {
1393 /* Too slow for us. Use asnyc transfers. */
1394 *period = 0;
1395 clockrate = 0;
1396 } else
1397 clockrate = i + 4;
1399 return (clockrate);
1403 * See if we sent a particular extended message to the target.
1404 * If "full" is true, the target saw the full message.
1405 * If "full" is false, the target saw at least the first
1406 * byte of the message.
1408 static int
1409 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1411 int found;
1412 int index;
1414 found = FALSE;
1415 index = 0;
1417 while (index < amd->msgout_len) {
1418 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1419 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1420 index++;
1421 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1422 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1423 /* Skip tag type and tag id */
1424 index += 2;
1425 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1426 /* Found a candidate */
1427 if (amd->msgout_buf[index+2] == msgtype) {
1428 u_int end_index;
1430 end_index = index + 1
1431 + amd->msgout_buf[index + 1];
1432 if (full) {
1433 if (amd->msgout_index > end_index)
1434 found = TRUE;
1435 } else if (amd->msgout_index > index)
1436 found = TRUE;
1438 break;
1439 } else {
1440 panic("amdsentmsg: Inconsistent msg buffer");
1443 return (found);
1446 static void
1447 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1449 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1450 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1451 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1452 amd->msgout_buf[amd->msgout_index++] = period;
1453 amd->msgout_buf[amd->msgout_index++] = offset;
1454 amd->msgout_len += 5;
1457 static int
1458 amdhandlemsgreject(struct amd_softc *amd)
1461 * If we had an outstanding SDTR for this
1462 * target, this is a signal that the target
1463 * is refusing negotiation. Also watch out
1464 * for rejected tag messages.
1466 struct amd_srb *srb;
1467 struct amd_target_info *targ_info;
1468 int response = FALSE;
1470 srb = amd->active_srb;
1471 targ_info = &amd->tinfo[amd->cur_target];
1472 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1473 /* note asynch xfers and clear flag */
1474 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1475 /*period*/0, /*offset*/0,
1476 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1477 kprintf("amd%d:%d: refuses synchronous negotiation. "
1478 "Using asynchronous transfers\n",
1479 amd->unit, amd->cur_target);
1480 } else if ((srb != NULL)
1481 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1482 struct ccb_trans_settings neg;
1483 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1485 kprintf("amd%d:%d: refuses tagged commands. Performing "
1486 "non-tagged I/O\n", amd->unit, amd->cur_target);
1488 amdsettags(amd, amd->cur_target, FALSE);
1489 memset(&neg, 0, sizeof (neg));
1490 scsi->valid = CTS_SCSI_VALID_TQ;
1491 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1492 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1495 * Resend the identify for this CCB as the target
1496 * may believe that the selection is invalid otherwise.
1498 if (amd->msgout_len != 0)
1499 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1500 amd->msgout_len);
1501 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1502 | srb->pccb->ccb_h.target_lun;
1503 amd->msgout_len++;
1504 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1505 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1506 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1508 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1511 * Requeue all tagged commands for this target
1512 * currently in our posession so they can be
1513 * converted to untagged commands.
1515 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1516 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1517 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1518 } else {
1520 * Otherwise, we ignore it.
1522 kprintf("amd%d:%d: Message reject received -- ignored\n",
1523 amd->unit, amd->cur_target);
1525 return (response);
1528 #if 0
1529 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1530 if (bval == MSG_DISCONNECT) {
1531 pSRB->SRBState = SRB_DISCONNECT;
1532 } else if (bval == MSG_SAVEDATAPOINTER) {
1533 goto min6;
1534 } else if ((bval == MSG_EXTENDED)
1535 || ((bval >= MSG_SIMPLE_Q_TAG)
1536 && (bval <= MSG_ORDERED_Q_TAG))) {
1537 pSRB->SRBState |= SRB_MSGIN_MULTI;
1538 pSRB->MsgInBuf[0] = bval;
1539 pSRB->MsgCnt = 1;
1540 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1541 } else if (bval == MSG_MESSAGE_REJECT) {
1542 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1544 if (pSRB->SRBState & DO_SYNC_NEGO) {
1545 goto set_async;
1547 } else if (bval == MSG_RESTOREPOINTERS) {
1548 goto min6;
1549 } else {
1550 goto min6;
1552 } else { /* minx: */
1553 *pSRB->pMsgPtr = bval;
1554 pSRB->MsgCnt++;
1555 pSRB->pMsgPtr++;
1556 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1557 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1558 if (pSRB->MsgCnt == 2) {
1559 pSRB->SRBState = 0;
1560 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1561 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1562 pSRB = amd->pTmpSRB;
1563 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1564 pDCB->pActiveSRB = pSRB;
1565 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1566 EnableMsgOut2(amd, pSRB);
1567 } else {
1568 if (pDCB->DCBFlag & ABORT_DEV_) {
1569 pSRB->SRBState = SRB_ABORT_SENT;
1570 EnableMsgOut1(amd, pSRB);
1572 pDCB->pActiveSRB = pSRB;
1573 pSRB->SRBState = SRB_DATA_XFER;
1576 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1577 && (pSRB->MsgCnt == 5)) {
1578 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1579 if ((pSRB->MsgInBuf[1] != 3)
1580 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1581 pSRB->MsgCnt = 1;
1582 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1583 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1584 } else if (!(pSRB->MsgInBuf[3])
1585 || !(pSRB->MsgInBuf[4])) {
1586 set_async: /* set async */
1588 pDCB = pSRB->pSRBDCB;
1589 /* disable sync & sync nego */
1590 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1591 pDCB->SyncPeriod = 0;
1592 pDCB->SyncOffset = 0;
1594 pDCB->tinfo.goal.period = 0;
1595 pDCB->tinfo.goal.offset = 0;
1597 pDCB->tinfo.current.period = 0;
1598 pDCB->tinfo.current.offset = 0;
1599 pDCB->tinfo.current.width =
1600 MSG_EXT_WDTR_BUS_8_BIT;
1602 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1603 pDCB->CtrlR4 &= 0x3f;
1604 pDCB->CtrlR4 |= EATER_25NS;
1605 goto re_prog;
1606 } else {/* set sync */
1608 pDCB = pSRB->pSRBDCB;
1609 /* enable sync & sync nego */
1610 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1612 /* set sync offset */
1613 pDCB->SyncOffset &= 0x0f0;
1614 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1616 /* set sync period */
1617 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1619 wval = (u_int16_t) pSRB->MsgInBuf[3];
1620 wval = wval << 2;
1621 wval--;
1622 wval1 = wval / 25;
1623 if ((wval1 * 25) != wval) {
1624 wval1++;
1626 bval = FAST_CLK|FAST_SCSI;
1627 pDCB->CtrlR4 &= 0x3f;
1628 if (wval1 >= 8) {
1629 /* Fast SCSI */
1630 wval1--;
1631 bval = FAST_CLK;
1632 pDCB->CtrlR4 |= EATER_25NS;
1634 pDCB->CtrlR3 = bval;
1635 pDCB->SyncPeriod = (u_int8_t) wval1;
1637 pDCB->tinfo.goal.period =
1638 tinfo_sync_period[pDCB->SyncPeriod - 4];
1639 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1640 pDCB->tinfo.current.period =
1641 tinfo_sync_period[pDCB->SyncPeriod - 4];
1642 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1645 * program SCSI control register
1647 re_prog:
1648 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1649 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1650 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1651 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1655 min6:
1656 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1657 return (SCSI_NOP0);
1659 #endif
1661 static u_int
1662 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1664 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1665 return (scsistat);
1668 static u_int
1669 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1671 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1672 return (scsistat);
1675 static void
1676 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1678 struct amd_sg * psgl;
1679 u_int32_t lval;
1681 if (pSRB->SGIndex < pSRB->SGcount) {
1682 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1684 if (!pSRB->SGToBeXferLen) {
1685 psgl = pSRB->pSGlist;
1686 pSRB->SGPhysAddr = psgl->SGXPtr;
1687 pSRB->SGToBeXferLen = psgl->SGXLen;
1689 lval = pSRB->SGToBeXferLen;
1690 amd_write8(amd, CTCREG_LOW, lval);
1691 amd_write8(amd, CTCREG_MID, lval >> 8);
1692 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1694 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1696 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1698 pSRB->SRBState = SRB_DATA_XFER;
1700 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1702 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1704 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1705 } else { /* xfer pad */
1706 if (pSRB->SGcount) {
1707 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1708 pSRB->SRBStatus |= OVER_RUN;
1710 amd_write8(amd, CTCREG_LOW, 0);
1711 amd_write8(amd, CTCREG_MID, 0);
1712 amd_write8(amd, CURTXTCNTREG, 0);
1714 pSRB->SRBState |= SRB_XFERPAD;
1715 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1719 static u_int
1720 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1722 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1723 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1725 amdsetupcommand(amd, srb);
1727 srb->SRBState = SRB_COMMAND;
1728 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1729 return (scsistat);
1732 static u_int
1733 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1735 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1736 pSRB->SRBState = SRB_STATUS;
1737 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1738 return (scsistat);
1741 static u_int
1742 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1744 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1746 if (amd->msgout_len == 0) {
1747 amd->msgout_buf[0] = MSG_NOOP;
1748 amd->msgout_len = 1;
1750 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1751 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1752 return (scsistat);
1755 static u_int
1756 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1758 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1759 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1760 return (scsistat);
1763 static u_int
1764 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1766 return (scsistat);
1769 static void
1770 amd_Disconnect(struct amd_softc * amd)
1772 struct amd_srb *srb;
1773 int target;
1774 int lun;
1776 srb = amd->active_srb;
1777 amd->active_srb = NULL;
1778 amd->last_phase = SCSI_BUS_FREE;
1779 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1780 target = amd->cur_target;
1781 lun = amd->cur_lun;
1783 if (srb == NULL) {
1784 /* Invalid reselection */
1785 amdrunwaiting(amd);
1786 } else if (srb->SRBState & SRB_ABORT_SENT) {
1787 /* Clean up and done this srb */
1788 #if 0
1789 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1790 /* XXX What about "done'ing" these srbs??? */
1791 if (pSRB->pSRBDCB == pDCB) {
1792 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1793 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1796 amdrunwaiting(amd);
1797 #endif
1798 } else {
1799 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1800 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1801 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1802 goto disc1;
1803 } else if (srb->SRBState & SRB_DISCONNECT) {
1804 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1805 amd->untagged_srbs[target][lun] = srb;
1806 amdrunwaiting(amd);
1807 } else if (srb->SRBState & SRB_COMPLETED) {
1808 disc1:
1809 srb->SRBState = SRB_FREE;
1810 SRBdone(amd, srb);
1813 return;
1816 static void
1817 amd_Reselect(struct amd_softc *amd)
1819 struct amd_target_info *tinfo;
1820 u_int16_t disc_count;
1822 amd_clear_msg_state(amd);
1823 if (amd->active_srb != NULL) {
1824 /* Requeue the SRB for our attempted Selection */
1825 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1826 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1827 amd->active_srb = NULL;
1829 /* get ID */
1830 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1831 amd->cur_target ^= amd->HostID_Bit;
1832 amd->cur_target = ffs(amd->cur_target) - 1;
1833 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1834 tinfo = &amd->tinfo[amd->cur_target];
1835 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1836 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1837 if (disc_count == 0) {
1838 kprintf("amd%d: Unexpected reselection for target %d, "
1839 "Issuing Abort\n", amd->unit, amd->cur_target);
1840 amd->msgout_buf[0] = MSG_ABORT;
1841 amd->msgout_len = 1;
1842 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1844 if (amd->active_srb != NULL) {
1845 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1846 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1849 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1850 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1851 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1852 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1853 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1854 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1855 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1856 amd->last_phase = SCSI_NOP0;
1859 static void
1860 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1862 u_int8_t bval, i, status;
1863 union ccb *pccb;
1864 struct ccb_scsiio *pcsio;
1865 struct amd_sg *ptr2;
1866 u_int32_t swlval;
1867 u_int target_id, target_lun;
1869 pccb = pSRB->pccb;
1870 pcsio = &pccb->csio;
1871 target_id = pSRB->pccb->ccb_h.target_id;
1872 target_lun = pSRB->pccb->ccb_h.target_lun;
1874 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1875 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1877 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1878 bus_dmasync_op_t op;
1880 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1881 op = BUS_DMASYNC_POSTREAD;
1882 else
1883 op = BUS_DMASYNC_POSTWRITE;
1884 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1885 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1888 status = pSRB->TargetStatus;
1889 pccb->ccb_h.status = CAM_REQ_CMP;
1890 pccb->ccb_h.status = CAM_REQ_CMP;
1891 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1892 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1893 pSRB->AdaptStatus = 0;
1894 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1896 if (status == SCSI_STATUS_CHECK_COND) {
1897 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1898 goto ckc_e;
1900 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1902 pcsio->sense_resid = pcsio->sense_len
1903 - pSRB->TotalXferredLen;
1904 pSRB->TotalXferredLen = pSRB->Segment1[1];
1905 if (pSRB->TotalXferredLen) {
1906 /* ???? */
1907 pcsio->resid = pcsio->dxfer_len
1908 - pSRB->TotalXferredLen;
1909 /* The resid field contains valid data */
1910 /* Flush resid bytes on complete */
1911 } else {
1912 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1914 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1915 goto ckc_e;
1917 if (status) {
1918 if (status == SCSI_STATUS_CHECK_COND) {
1920 if ((pSRB->SGIndex < pSRB->SGcount)
1921 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1922 bval = pSRB->SGcount;
1923 swlval = pSRB->SGToBeXferLen;
1924 ptr2 = pSRB->pSGlist;
1925 ptr2++;
1926 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1927 swlval += ptr2->SGXLen;
1928 ptr2++;
1930 /* ??????? */
1931 pcsio->resid = (u_int32_t) swlval;
1933 #ifdef AMD_DEBUG0
1934 kprintf("XferredLen=%8x,NotYetXferLen=%8x,",
1935 pSRB->TotalXferredLen, swlval);
1936 #endif
1938 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1939 #ifdef AMD_DEBUG0
1940 kprintf("RequestSense..................\n");
1941 #endif
1942 RequestSense(amd, pSRB);
1943 return;
1945 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1946 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1947 goto ckc_e;
1948 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1949 pSRB->AdaptStatus = 0;
1950 pSRB->TargetStatus = 0;
1951 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1952 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1953 goto ckc_e;
1954 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1955 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1956 pSRB->TargetStatus = 0;
1958 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1959 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1960 } else if (status == SCSI_STATUS_BUSY) {
1961 #ifdef AMD_DEBUG0
1962 kprintf("DC390: target busy at %s %d\n",
1963 __FILE__, __LINE__);
1964 #endif
1965 pcsio->scsi_status = SCSI_STATUS_BUSY;
1966 pccb->ccb_h.status = CAM_SCSI_BUSY;
1967 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1968 #ifdef AMD_DEBUG0
1969 kprintf("DC390: target reserved at %s %d\n",
1970 __FILE__, __LINE__);
1971 #endif
1972 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1973 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1974 } else {
1975 pSRB->AdaptStatus = 0;
1976 #ifdef AMD_DEBUG0
1977 kprintf("DC390: driver stuffup at %s %d\n",
1978 __FILE__, __LINE__);
1979 #endif
1980 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1982 } else {
1983 status = pSRB->AdaptStatus;
1984 if (status & H_OVER_UNDER_RUN) {
1985 pSRB->TargetStatus = 0;
1987 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1988 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1989 #ifdef AMD_DEBUG0
1990 kprintf("DC390: driver stuffup %s %d\n",
1991 __FILE__, __LINE__);
1992 #endif
1993 /* Driver failed to perform operation */
1994 pccb->ccb_h.status = CAM_UNCOR_PARITY;
1995 } else { /* No error */
1996 pSRB->AdaptStatus = 0;
1997 pSRB->TargetStatus = 0;
1998 pcsio->resid = 0;
1999 /* there is no error, (sense is invalid) */
2002 ckc_e:
2003 crit_enter();
2004 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2005 /* CAM request not yet complete =>device_Q frozen */
2006 xpt_freeze_devq(pccb->ccb_h.path, 1);
2007 pccb->ccb_h.status |= CAM_DEV_QFRZN;
2009 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2010 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2011 amdrunwaiting(amd);
2012 crit_exit();
2013 xpt_done(pccb);
2017 static void
2018 amd_ResetSCSIBus(struct amd_softc * amd)
2020 crit_enter();
2021 amd->ACBFlag |= RESET_DEV;
2022 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2023 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2024 crit_exit();
2025 return;
2028 static void
2029 amd_ScsiRstDetect(struct amd_softc * amd)
2031 u_int32_t wlval;
2033 #ifdef AMD_DEBUG0
2034 kprintf("amd_ScsiRstDetect \n");
2035 #endif
2037 wlval = 1000;
2038 while (--wlval) { /* delay 1 sec */
2039 DELAY(1000);
2041 crit_enter();
2043 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2044 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2046 if (amd->ACBFlag & RESET_DEV) {
2047 amd->ACBFlag |= RESET_DONE;
2048 } else {
2049 amd->ACBFlag |= RESET_DETECT;
2050 ResetDevParam(amd);
2051 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2052 AMD_TAG_WILDCARD, &amd->running_srbs,
2053 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2054 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2055 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2056 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2057 amd->active_srb = NULL;
2058 amd->ACBFlag = 0;
2059 amdrunwaiting(amd);
2061 crit_exit();
2062 return;
2065 static void
2066 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2068 union ccb *pccb;
2069 struct ccb_scsiio *pcsio;
2071 pccb = pSRB->pccb;
2072 pcsio = &pccb->csio;
2074 pSRB->SRBFlag |= AUTO_REQSENSE;
2075 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2076 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2077 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2078 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2080 pSRB->AdaptStatus = 0;
2081 pSRB->TargetStatus = 0;
2083 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2084 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2086 pSRB->pSGlist = &pSRB->Segmentx;
2087 pSRB->SGcount = 1;
2088 pSRB->SGIndex = 0;
2090 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2091 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2092 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2093 pSRB->ScsiCmdLen = 6;
2095 pSRB->TotalXferredLen = 0;
2096 pSRB->SGToBeXferLen = 0;
2097 if (amdstart(amd, pSRB) != 0) {
2098 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2099 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2103 static void
2104 amd_InvalidCmd(struct amd_softc * amd)
2106 struct amd_srb *srb;
2108 srb = amd->active_srb;
2109 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2110 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2113 void
2114 amd_linkSRB(struct amd_softc *amd)
2116 u_int16_t count, i;
2117 struct amd_srb *psrb;
2118 int error;
2120 count = amd->SRBCount;
2122 for (i = 0; i < count; i++) {
2123 psrb = (struct amd_srb *)&amd->SRB_array[i];
2124 psrb->TagNumber = i;
2127 * Create the dmamap. This is no longer optional!
2129 error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2130 if (error) {
2131 device_printf(amd->dev, "Error %d creating buffer "
2132 "dmamap!\n", error);
2133 break;
2135 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2139 void
2140 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2142 if (mode == ENABLE_CE) {
2143 *regval = 0xc0;
2144 } else {
2145 *regval = 0x80;
2147 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2148 if (mode == DISABLE_CE) {
2149 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2151 DELAY(160);
2154 void
2155 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2157 u_int bval;
2159 bval = 0;
2160 if (Carry) {
2161 bval = 0x40;
2162 *regval = 0x80;
2163 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2165 DELAY(160);
2166 bval |= 0x80;
2167 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2168 DELAY(160);
2169 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2170 DELAY(160);
2173 static int
2174 amd_EEpromInDO(struct amd_softc *amd)
2176 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2177 DELAY(160);
2178 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2179 DELAY(160);
2180 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2181 return (1);
2182 return (0);
2185 static u_int16_t
2186 EEpromGetData1(struct amd_softc *amd)
2188 u_int i;
2189 u_int carryFlag;
2190 u_int16_t wval;
2192 wval = 0;
2193 for (i = 0; i < 16; i++) {
2194 wval <<= 1;
2195 carryFlag = amd_EEpromInDO(amd);
2196 wval |= carryFlag;
2198 return (wval);
2201 static void
2202 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2204 u_int i, j;
2205 int carryFlag;
2207 carryFlag = 1;
2208 j = 0x80;
2209 for (i = 0; i < 9; i++) {
2210 amd_EEpromOutDI(amd, regval, carryFlag);
2211 carryFlag = (EEpromCmd & j) ? 1 : 0;
2212 j >>= 1;
2216 static void
2217 amd_ReadEEprom(struct amd_softc *amd)
2219 int regval;
2220 u_int i;
2221 u_int16_t *ptr;
2222 u_int8_t cmd;
2224 ptr = (u_int16_t *)&amd->eepromBuf[0];
2225 cmd = EEPROM_READ;
2226 for (i = 0; i < 0x40; i++) {
2227 amd_EnDisableCE(amd, ENABLE_CE, &regval);
2228 amd_Prepare(amd, &regval, cmd);
2229 *ptr = EEpromGetData1(amd);
2230 ptr++;
2231 cmd++;
2232 amd_EnDisableCE(amd, DISABLE_CE, &regval);
2236 static void
2237 amd_load_defaults(struct amd_softc *amd)
2239 int target;
2241 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2242 for (target = 0; target < MAX_SCSI_ID; target++)
2243 amd->eepromBuf[target << 2] =
2244 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2245 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2246 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2247 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2250 static void
2251 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2253 u_int16_t wval, *ptr;
2254 u_int8_t i;
2256 amd_ReadEEprom(amd);
2257 wval = 0;
2258 ptr = (u_int16_t *) & amd->eepromBuf[0];
2259 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2260 wval += *ptr;
2262 if (wval != EE_CHECKSUM) {
2263 if (bootverbose)
2264 kprintf("amd%d: SEEPROM data unavailable. "
2265 "Using default device parameters.\n",
2266 amd->unit);
2267 amd_load_defaults(amd);
2272 **********************************************************************
2273 * Function : static int amd_init (struct Scsi_Host *host)
2274 * Purpose : initialize the internal structures for a given SCSI host
2275 * Inputs : host - pointer to this host adapter's structure/
2276 **********************************************************************
2278 static int
2279 amd_init(device_t dev)
2281 struct amd_softc *amd = device_get_softc(dev);
2282 struct resource *iores;
2283 int i, rid;
2284 u_int bval;
2286 rid = PCI_BASE_ADDR0;
2287 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2288 RF_ACTIVE);
2289 if (iores == NULL) {
2290 if (bootverbose)
2291 kprintf("amd_init: bus_alloc_resource failure!\n");
2292 return ENXIO;
2294 amd->tag = rman_get_bustag(iores);
2295 amd->bsh = rman_get_bushandle(iores);
2297 /* DMA tag for mapping buffers into device visible space. */
2298 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2299 /*boundary*/0,
2300 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2301 /*highaddr*/BUS_SPACE_MAXADDR,
2302 /*filter*/NULL, /*filterarg*/NULL,
2303 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2304 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2305 /*flags*/BUS_DMA_ALLOCNOW,
2306 &amd->buffer_dmat) != 0) {
2307 if (bootverbose)
2308 kprintf("amd_init: bus_dma_tag_create failure!\n");
2309 return ENXIO;
2311 TAILQ_INIT(&amd->free_srbs);
2312 TAILQ_INIT(&amd->running_srbs);
2313 TAILQ_INIT(&amd->waiting_srbs);
2314 amd->last_phase = SCSI_BUS_FREE;
2315 amd->dev = dev;
2316 amd->unit = device_get_unit(dev);
2317 amd->SRBCount = MAX_SRB_CNT;
2318 amd->status = 0;
2319 amd_load_eeprom_or_defaults(amd);
2320 amd->max_id = 7;
2321 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2322 amd->max_lun = 7;
2323 } else {
2324 amd->max_lun = 0;
2326 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2327 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2328 amd->AdaptSCSILUN = 0;
2329 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2330 amd->ACBFlag = 0;
2331 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2332 amd_linkSRB(amd);
2333 for (i = 0; i <= amd->max_id; i++) {
2335 if (amd->AdaptSCSIID != i) {
2336 struct amd_target_info *tinfo;
2337 PEEprom prom;
2339 tinfo = &amd->tinfo[i];
2340 prom = (PEEprom)&amd->eepromBuf[i << 2];
2341 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2342 tinfo->disc_tag |= AMD_USR_DISCENB;
2343 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2344 tinfo->disc_tag |= AMD_USR_TAGENB;
2346 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2347 tinfo->user.period =
2348 eeprom_period[prom->EE_SPEED];
2349 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2351 tinfo->CtrlR1 = amd->AdaptSCSIID;
2352 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2353 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2354 tinfo->CtrlR3 = FAST_CLK;
2355 tinfo->CtrlR4 = EATER_25NS;
2356 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2357 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2360 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2361 /* Conversion factor = 0 , 40MHz clock */
2362 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2363 /* NOP cmd - clear command register */
2364 amd_write8(amd, SCSICMDREG, NOP_CMD);
2365 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2366 amd_write8(amd, CNTLREG3, FAST_CLK);
2367 bval = EATER_25NS;
2368 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2369 bval |= NEGATE_REQACKDATA;
2371 amd_write8(amd, CNTLREG4, bval);
2373 /* Disable SCSI bus reset interrupt */
2374 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2376 return 0;
2380 * attach and init a host adapter
2382 static int
2383 amd_attach(device_t dev)
2385 struct cam_devq *devq; /* Device Queue to use for this SIM */
2386 u_int8_t intstat;
2387 struct amd_softc *amd = device_get_softc(dev);
2388 int unit = device_get_unit(dev);
2389 int rid;
2390 void *ih;
2391 struct resource *irqres;
2393 if (amd_init(dev)) {
2394 if (bootverbose)
2395 kprintf("amd_attach: amd_init failure!\n");
2396 return ENXIO;
2399 /* Reset Pending INT */
2400 intstat = amd_read8(amd, INTSTATREG);
2402 /* After setting up the adapter, map our interrupt */
2403 rid = 0;
2404 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2405 RF_SHAREABLE | RF_ACTIVE);
2406 if (irqres == NULL ||
2407 bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2409 if (bootverbose)
2410 kprintf("amd%d: unable to register interrupt handler!\n",
2411 unit);
2412 return ENXIO;
2416 * Now let the CAM generic SCSI layer find the SCSI devices on
2417 * the bus * start queue to reset to the idle loop. *
2418 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2419 * max_sim_transactions
2421 devq = cam_simq_alloc(MAX_START_JOB);
2422 if (devq == NULL) {
2423 if (bootverbose)
2424 kprintf("amd_attach: cam_simq_alloc failure!\n");
2425 return ENXIO;
2428 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2429 amd, amd->unit, &sim_mplock, 1,
2430 MAX_TAGS_CMD_QUEUE, devq);
2431 cam_simq_release(devq);
2432 if (amd->psim == NULL) {
2433 if (bootverbose)
2434 kprintf("amd_attach: cam_sim_alloc failure!\n");
2435 return ENXIO;
2438 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2439 cam_sim_free(amd->psim);
2440 if (bootverbose)
2441 kprintf("amd_attach: xpt_bus_register failure!\n");
2442 return ENXIO;
2445 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2446 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2447 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2448 xpt_bus_deregister(cam_sim_path(amd->psim));
2449 cam_sim_free(amd->psim);
2450 if (bootverbose)
2451 kprintf("amd_attach: xpt_create_path failure!\n");
2452 return ENXIO;
2455 return 0;
2458 static int
2459 amd_probe(device_t dev)
2461 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2462 device_set_desc(dev,
2463 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2464 return 0;
2466 return ENXIO;
2469 static device_method_t amd_methods[] = {
2470 /* Device interface */
2471 DEVMETHOD(device_probe, amd_probe),
2472 DEVMETHOD(device_attach, amd_attach),
2473 { 0, 0 }
2476 static driver_t amd_driver = {
2477 "amd", amd_methods, sizeof(struct amd_softc)
2480 static devclass_t amd_devclass;
2481 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);