2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32 * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.7 2006/10/25 20:55:52 dillon Exp $
36 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 * Copyright (c) 1995-1996 Advanced System Products, Inc.
39 * All Rights Reserved.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that redistributions of source
43 * code retain the above copyright notice and this comment without
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/thread2.h>
54 #include <machine/clock.h>
56 #include <bus/cam/cam.h>
57 #include <bus/cam/cam_ccb.h>
58 #include <bus/cam/cam_sim.h>
59 #include <bus/cam/cam_xpt_sim.h>
61 #include <bus/cam/scsi/scsi_all.h>
62 #include <bus/cam/scsi/scsi_message.h>
63 #include <bus/cam/scsi/scsi_da.h>
64 #include <bus/cam/scsi/scsi_cd.h>
67 #include <vm/vm_param.h>
73 struct adv_quirk_entry
{
74 struct scsi_inquiry_pattern inq_pat
;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
80 static struct adv_quirk_entry adv_quirk_table
[] =
83 { T_CDROM
, SIP_MEDIA_REMOVABLE
, "HP", "*", "*" },
84 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS
|ADV_QUIRK_FIX_ASYN_XFER
87 { T_CDROM
, SIP_MEDIA_REMOVABLE
, "NEC", "CD-ROM DRIVE", "*" },
92 T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
,
93 "TANDBERG", " TDC 36", "*"
98 { T_SEQUENTIAL
, SIP_MEDIA_REMOVABLE
, "WANGTEK", "*", "*" },
103 T_PROCESSOR
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
110 T_SCANNER
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
116 /* Default quirk entry */
118 T_ANY
, SIP_MEDIA_REMOVABLE
|SIP_MEDIA_FIXED
,
119 /*vendor*/"*", /*product*/"*", /*revision*/"*"
121 ADV_QUIRK_FIX_ASYN_XFER
,
126 * Allowable periods in ns
128 static u_int8_t adv_sdtr_period_tbl
[] =
140 static u_int8_t adv_sdtr_period_tbl_ultra
[] =
166 u_int8_t sdtr_xfer_period
;
167 u_int8_t sdtr_req_ack_offset
;
179 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
180 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define wdtr_width u_ext_msg.wdtr.wdtr_width
182 #define mdp_b3 u_ext_msg.mdp_b3
183 #define mdp_b2 u_ext_msg.mdp_b2
184 #define mdp_b1 u_ext_msg.mdp_b1
185 #define mdp_b0 u_ext_msg.mdp_b0
188 * Some of the early PCI adapters have problems with
189 * async transfers. Instead use an offset of 1.
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
194 static void adv_read_lram_16_multi(struct adv_softc
*adv
, u_int16_t s_addr
,
195 u_int16_t
*buffer
, int count
);
196 static void adv_write_lram_16_multi(struct adv_softc
*adv
,
197 u_int16_t s_addr
, u_int16_t
*buffer
,
199 static void adv_mset_lram_16(struct adv_softc
*adv
, u_int16_t s_addr
,
200 u_int16_t set_value
, int count
);
201 static u_int32_t
adv_msum_lram_16(struct adv_softc
*adv
, u_int16_t s_addr
,
204 static int adv_write_and_verify_lram_16(struct adv_softc
*adv
,
205 u_int16_t addr
, u_int16_t value
);
206 static u_int32_t
adv_read_lram_32(struct adv_softc
*adv
, u_int16_t addr
);
209 static void adv_write_lram_32(struct adv_softc
*adv
, u_int16_t addr
,
211 static void adv_write_lram_32_multi(struct adv_softc
*adv
,
212 u_int16_t s_addr
, u_int32_t
*buffer
,
215 /* EEPROM routines */
216 static u_int16_t
adv_read_eeprom_16(struct adv_softc
*adv
, u_int8_t addr
);
217 static u_int16_t
adv_write_eeprom_16(struct adv_softc
*adv
, u_int8_t addr
,
219 static int adv_write_eeprom_cmd_reg(struct adv_softc
*adv
,
221 static int adv_set_eeprom_config_once(struct adv_softc
*adv
,
222 struct adv_eeprom_config
*eeconfig
);
225 static u_int32_t
adv_load_microcode(struct adv_softc
*adv
, u_int16_t s_addr
,
226 u_int16_t
*mcode_buf
, u_int16_t mcode_size
);
228 static void adv_reinit_lram(struct adv_softc
*adv
);
229 static void adv_init_lram(struct adv_softc
*adv
);
230 static int adv_init_microcode_var(struct adv_softc
*adv
);
231 static void adv_init_qlink_var(struct adv_softc
*adv
);
234 static void adv_disable_interrupt(struct adv_softc
*adv
);
235 static void adv_enable_interrupt(struct adv_softc
*adv
);
236 static void adv_toggle_irq_act(struct adv_softc
*adv
);
239 static int adv_host_req_chip_halt(struct adv_softc
*adv
);
240 static void adv_set_chip_ih(struct adv_softc
*adv
, u_int16_t ins_code
);
242 static u_int8_t
adv_get_chip_scsi_ctrl(struct adv_softc
*adv
);
245 /* Queue handling and execution */
247 adv_sgcount_to_qcount(int sgcount
);
250 adv_sgcount_to_qcount(int sgcount
)
254 n_sg_list_qs
= ((sgcount
- 1) / ADV_SG_LIST_PER_Q
);
255 if (((sgcount
- 1) % ADV_SG_LIST_PER_Q
) != 0)
257 return (n_sg_list_qs
+ 1);
260 static void adv_get_q_info(struct adv_softc
*adv
, u_int16_t s_addr
,
261 u_int16_t
*inbuf
, int words
);
262 static u_int
adv_get_num_free_queues(struct adv_softc
*adv
, u_int8_t n_qs
);
263 static u_int8_t
adv_alloc_free_queues(struct adv_softc
*adv
,
264 u_int8_t free_q_head
, u_int8_t n_free_q
);
265 static u_int8_t
adv_alloc_free_queue(struct adv_softc
*adv
,
266 u_int8_t free_q_head
);
267 static int adv_send_scsi_queue(struct adv_softc
*adv
,
268 struct adv_scsi_q
*scsiq
,
269 u_int8_t n_q_required
);
270 static void adv_put_ready_sg_list_queue(struct adv_softc
*adv
,
271 struct adv_scsi_q
*scsiq
,
273 static void adv_put_ready_queue(struct adv_softc
*adv
,
274 struct adv_scsi_q
*scsiq
, u_int q_no
);
275 static void adv_put_scsiq(struct adv_softc
*adv
, u_int16_t s_addr
,
276 u_int16_t
*buffer
, int words
);
279 static void adv_handle_extmsg_in(struct adv_softc
*adv
,
280 u_int16_t halt_q_addr
, u_int8_t q_cntl
,
281 target_bit_vector target_id
,
283 static void adv_msgout_sdtr(struct adv_softc
*adv
, u_int8_t sdtr_period
,
284 u_int8_t sdtr_offset
);
285 static void adv_set_sdtr_reg_at_id(struct adv_softc
*adv
, int id
,
289 /* Exported functions first */
292 advasync(void *callback_arg
, u_int32_t code
, struct cam_path
*path
, void *arg
)
294 struct adv_softc
*adv
;
296 adv
= (struct adv_softc
*)callback_arg
;
298 case AC_FOUND_DEVICE
:
300 struct ccb_getdev
*cgd
;
301 target_bit_vector target_mask
;
304 struct adv_quirk_entry
*entry
;
305 struct adv_target_transinfo
* tinfo
;
307 cgd
= (struct ccb_getdev
*)arg
;
309 target_mask
= ADV_TID_TO_TARGET_MASK(cgd
->ccb_h
.target_id
);
311 num_entries
= sizeof(adv_quirk_table
)/sizeof(*adv_quirk_table
);
312 match
= cam_quirkmatch((caddr_t
)&cgd
->inq_data
,
313 (caddr_t
)adv_quirk_table
,
314 num_entries
, sizeof(*adv_quirk_table
),
318 panic("advasync: device didn't match wildcard entry!!");
320 entry
= (struct adv_quirk_entry
*)match
;
322 if (adv
->bug_fix_control
& ADV_BUG_FIX_ASYN_USE_SYN
) {
323 if ((entry
->quirks
& ADV_QUIRK_FIX_ASYN_XFER_ALWAYS
)!=0)
324 adv
->fix_asyn_xfer_always
|= target_mask
;
326 adv
->fix_asyn_xfer_always
&= ~target_mask
;
328 * We start out life with all bits set and clear them
329 * after we've determined that the fix isn't necessary.
330 * It may well be that we've already cleared a target
331 * before the full inquiry session completes, so don't
332 * gratuitously set a target bit even if it has this
333 * quirk. But, if the quirk exonerates a device, clear
336 if ((entry
->quirks
& ADV_QUIRK_FIX_ASYN_XFER
) == 0)
337 adv
->fix_asyn_xfer
&= ~target_mask
;
340 * Reset our sync settings now that we've determined
341 * what quirks are in effect for the device.
343 tinfo
= &adv
->tinfo
[cgd
->ccb_h
.target_id
];
344 adv_set_syncrate(adv
, cgd
->ccb_h
.path
,
345 cgd
->ccb_h
.target_id
,
346 tinfo
->current
.period
,
347 tinfo
->current
.offset
,
355 if (adv
->bug_fix_control
& ADV_BUG_FIX_ASYN_USE_SYN
) {
356 target_mask
= 0x01 << xpt_path_target_id(path
);
357 adv
->fix_asyn_xfer
|= target_mask
;
361 * Revert to async transfers
362 * for the next device.
364 adv_set_syncrate(adv
, /*path*/NULL
,
365 xpt_path_target_id(path
),
368 ADV_TRANS_GOAL
|ADV_TRANS_CUR
);
376 adv_set_bank(struct adv_softc
*adv
, u_int8_t bank
)
381 * Start out with the bank reset to 0
383 control
= ADV_INB(adv
, ADV_CHIP_CTRL
)
384 & (~(ADV_CC_SINGLE_STEP
| ADV_CC_TEST
385 | ADV_CC_DIAG
| ADV_CC_SCSI_RESET
386 | ADV_CC_CHIP_RESET
| ADV_CC_BANK_ONE
));
388 control
|= ADV_CC_BANK_ONE
;
389 } else if (bank
== 2) {
390 control
|= ADV_CC_DIAG
| ADV_CC_BANK_ONE
;
392 ADV_OUTB(adv
, ADV_CHIP_CTRL
, control
);
396 adv_read_lram_8(struct adv_softc
*adv
, u_int16_t addr
)
402 * LRAM is accessed on 16bit boundaries.
404 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
& 0xFFFE);
405 word_data
= ADV_INW(adv
, ADV_LRAM_DATA
);
407 #if BYTE_ORDER == BIG_ENDIAN
408 byte_data
= (u_int8_t
)(word_data
& 0xFF);
410 byte_data
= (u_int8_t
)((word_data
>> 8) & 0xFF);
413 #if BYTE_ORDER == BIG_ENDIAN
414 byte_data
= (u_int8_t
)((word_data
>> 8) & 0xFF);
416 byte_data
= (u_int8_t
)(word_data
& 0xFF);
423 adv_write_lram_8(struct adv_softc
*adv
, u_int16_t addr
, u_int8_t value
)
427 word_data
= adv_read_lram_16(adv
, addr
& 0xFFFE);
430 word_data
|= (((u_int8_t
)value
<< 8) & 0xFF00);
433 word_data
|= ((u_int8_t
)value
& 0x00FF);
435 adv_write_lram_16(adv
, addr
& 0xFFFE, word_data
);
440 adv_read_lram_16(struct adv_softc
*adv
, u_int16_t addr
)
442 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
);
443 return (ADV_INW(adv
, ADV_LRAM_DATA
));
447 adv_write_lram_16(struct adv_softc
*adv
, u_int16_t addr
, u_int16_t value
)
449 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
);
450 ADV_OUTW(adv
, ADV_LRAM_DATA
, value
);
454 * Determine if there is a board at "iobase" by looking
455 * for the AdvanSys signatures. Return 1 if a board is
456 * found, 0 otherwise.
459 adv_find_signature(bus_space_tag_t tag
, bus_space_handle_t bsh
)
463 if (bus_space_read_1(tag
, bsh
, ADV_SIGNATURE_BYTE
) == ADV_1000_ID1B
) {
464 signature
= bus_space_read_2(tag
, bsh
, ADV_SIGNATURE_WORD
);
465 if ((signature
== ADV_1000_ID0W
)
466 || (signature
== ADV_1000_ID0W_FIX
))
473 adv_lib_init(struct adv_softc
*adv
)
475 if ((adv
->type
& ADV_ULTRA
) != 0) {
476 adv
->sdtr_period_tbl
= adv_sdtr_period_tbl_ultra
;
477 adv
->sdtr_period_tbl_size
= sizeof(adv_sdtr_period_tbl_ultra
);
479 adv
->sdtr_period_tbl
= adv_sdtr_period_tbl
;
480 adv
->sdtr_period_tbl_size
= sizeof(adv_sdtr_period_tbl
);
485 adv_get_eeprom_config(struct adv_softc
*adv
, struct
486 adv_eeprom_config
*eeprom_config
)
494 wbuf
= (u_int16_t
*)eeprom_config
;
497 for (s_addr
= 0; s_addr
< 2; s_addr
++, wbuf
++) {
498 *wbuf
= adv_read_eeprom_16(adv
, s_addr
);
502 if (adv
->type
& ADV_VL
) {
503 cfg_beg
= ADV_EEPROM_CFG_BEG_VL
;
504 cfg_end
= ADV_EEPROM_MAX_ADDR_VL
;
506 cfg_beg
= ADV_EEPROM_CFG_BEG
;
507 cfg_end
= ADV_EEPROM_MAX_ADDR
;
510 for (s_addr
= cfg_beg
; s_addr
<= (cfg_end
- 1); s_addr
++, wbuf
++) {
511 *wbuf
= adv_read_eeprom_16(adv
, s_addr
);
514 printf("Addr 0x%x: 0x%04x\n", s_addr
, *wbuf
);
517 *wbuf
= adv_read_eeprom_16(adv
, s_addr
);
522 adv_set_eeprom_config(struct adv_softc
*adv
,
523 struct adv_eeprom_config
*eeprom_config
)
529 if (adv_set_eeprom_config_once(adv
, eeprom_config
) == 0) {
532 if (++retry
> ADV_EEPROM_MAX_RETRY
) {
536 return (retry
> ADV_EEPROM_MAX_RETRY
);
540 adv_reset_chip(struct adv_softc
*adv
, int reset_bus
)
543 ADV_OUTB(adv
, ADV_CHIP_CTRL
, ADV_CC_CHIP_RESET
| ADV_CC_HALT
544 | (reset_bus
? ADV_CC_SCSI_RESET
: 0));
547 adv_set_chip_ih(adv
, ADV_INS_RFLAG_WTM
);
548 adv_set_chip_ih(adv
, ADV_INS_HALT
);
551 ADV_OUTB(adv
, ADV_CHIP_CTRL
, ADV_CC_CHIP_RESET
| ADV_CC_HALT
);
553 ADV_OUTB(adv
, ADV_CHIP_CTRL
, ADV_CC_HALT
);
557 ADV_OUTW(adv
, ADV_CHIP_STATUS
, ADV_CIW_CLR_SCSI_RESET_INT
);
558 ADV_OUTW(adv
, ADV_CHIP_STATUS
, 0);
559 return (adv_is_chip_halted(adv
));
563 adv_test_external_lram(struct adv_softc
* adv
)
566 u_int16_t saved_value
;
571 q_addr
= ADV_QNO_TO_QADDR(241);
572 saved_value
= adv_read_lram_16(adv
, q_addr
);
573 if (adv_write_and_verify_lram_16(adv
, q_addr
, 0x55AA) == 0) {
575 adv_write_lram_16(adv
, q_addr
, saved_value
);
582 adv_init_lram_and_mcode(struct adv_softc
*adv
)
586 adv_disable_interrupt(adv
);
590 retval
= adv_load_microcode(adv
, 0, (u_int16_t
*)adv_mcode
,
592 if (retval
!= adv_mcode_chksum
) {
593 printf("adv%d: Microcode download failed checksum!\n",
598 if (adv_init_microcode_var(adv
) != 0)
601 adv_enable_interrupt(adv
);
606 adv_get_chip_irq(struct adv_softc
*adv
)
611 cfg_lsw
= ADV_INW(adv
, ADV_CONFIG_LSW
);
613 if ((adv
->type
& ADV_VL
) != 0) {
614 chip_irq
= (u_int8_t
)(((cfg_lsw
>> 2) & 0x07));
615 if ((chip_irq
== 0) ||
620 return (chip_irq
+ (ADV_MIN_IRQ_NO
- 1));
622 chip_irq
= (u_int8_t
)(((cfg_lsw
>> 2) & 0x03));
625 return (chip_irq
+ ADV_MIN_IRQ_NO
);
629 adv_set_chip_irq(struct adv_softc
*adv
, u_int8_t irq_no
)
633 if ((adv
->type
& ADV_VL
) != 0) {
635 if ((irq_no
< ADV_MIN_IRQ_NO
)
636 || (irq_no
> ADV_MAX_IRQ_NO
)) {
639 irq_no
-= ADV_MIN_IRQ_NO
- 1;
642 cfg_lsw
= ADV_INW(adv
, ADV_CONFIG_LSW
) & 0xFFE3;
644 ADV_OUTW(adv
, ADV_CONFIG_LSW
, cfg_lsw
);
645 adv_toggle_irq_act(adv
);
647 cfg_lsw
= ADV_INW(adv
, ADV_CONFIG_LSW
) & 0xFFE0;
648 cfg_lsw
|= (irq_no
& 0x07) << 2;
649 ADV_OUTW(adv
, ADV_CONFIG_LSW
, cfg_lsw
);
650 adv_toggle_irq_act(adv
);
651 } else if ((adv
->type
& ADV_ISA
) != 0) {
654 irq_no
-= ADV_MIN_IRQ_NO
;
655 cfg_lsw
= ADV_INW(adv
, ADV_CONFIG_LSW
) & 0xFFF3;
656 cfg_lsw
|= (irq_no
& 0x03) << 2;
657 ADV_OUTW(adv
, ADV_CONFIG_LSW
, cfg_lsw
);
659 return (adv_get_chip_irq(adv
));
663 adv_set_chip_scsiid(struct adv_softc
*adv
, int new_id
)
667 cfg_lsw
= ADV_INW(adv
, ADV_CONFIG_LSW
);
668 if (ADV_CONFIG_SCSIID(cfg_lsw
) == new_id
)
670 cfg_lsw
&= ~ADV_CFG_LSW_SCSIID
;
671 cfg_lsw
|= (new_id
& ADV_MAX_TID
) << ADV_CFG_LSW_SCSIID_SHIFT
;
672 ADV_OUTW(adv
, ADV_CONFIG_LSW
, cfg_lsw
);
676 adv_execute_scsi_queue(struct adv_softc
*adv
, struct adv_scsi_q
*scsiq
,
679 struct adv_target_transinfo
* tinfo
;
680 u_int32_t
*p_data_addr
;
681 u_int32_t
*p_data_bcount
;
682 int disable_syn_offset_one_fix
;
686 u_int8_t sg_entry_cnt
;
688 u_int8_t sg_entry_cnt_minus_one
;
692 retval
= 1; /* Default to error case */
693 target_ix
= scsiq
->q2
.target_ix
;
694 tid_no
= ADV_TIX_TO_TID(target_ix
);
695 tinfo
= &adv
->tinfo
[tid_no
];
697 if (scsiq
->cdbptr
[0] == REQUEST_SENSE
) {
698 /* Renegotiate if appropriate. */
699 adv_set_syncrate(adv
, /*struct cam_path */NULL
,
700 tid_no
, /*period*/0, /*offset*/0,
702 if (tinfo
->current
.period
!= tinfo
->goal
.period
) {
703 adv_msgout_sdtr(adv
, tinfo
->goal
.period
,
705 scsiq
->q1
.cntl
|= (QC_MSG_OUT
| QC_URGENT
);
709 if ((scsiq
->q1
.cntl
& QC_SG_HEAD
) != 0) {
710 sg_entry_cnt
= scsiq
->sg_head
->entry_cnt
;
711 sg_entry_cnt_minus_one
= sg_entry_cnt
- 1;
714 if (sg_entry_cnt
<= 1)
715 panic("adv_execute_scsi_queue: Queue "
716 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt
);
718 if (sg_entry_cnt
> ADV_MAX_SG_LIST
)
719 panic("adv_execute_scsi_queue: "
720 "Queue with too many segs.");
722 if ((adv
->type
& (ADV_ISA
| ADV_VL
| ADV_EISA
)) != 0) {
725 for (i
= 0; i
< sg_entry_cnt_minus_one
; i
++) {
726 addr
= scsiq
->sg_head
->sg_list
[i
].addr
+
727 scsiq
->sg_head
->sg_list
[i
].bytes
;
729 if ((addr
& 0x0003) != 0)
730 panic("adv_execute_scsi_queue: SG "
731 "with odd address or byte count");
736 &scsiq
->sg_head
->sg_list
[sg_entry_cnt_minus_one
].addr
;
738 &scsiq
->sg_head
->sg_list
[sg_entry_cnt_minus_one
].bytes
;
740 n_q_required
= adv_sgcount_to_qcount(sg_entry_cnt
);
741 scsiq
->sg_head
->queue_cnt
= n_q_required
- 1;
743 p_data_addr
= &scsiq
->q1
.data_addr
;
744 p_data_bcount
= &scsiq
->q1
.data_cnt
;
748 disable_syn_offset_one_fix
= FALSE
;
750 if ((adv
->fix_asyn_xfer
& scsiq
->q1
.target_id
) != 0
751 && (adv
->fix_asyn_xfer_always
& scsiq
->q1
.target_id
) == 0) {
755 disable_syn_offset_one_fix
= TRUE
;
757 if (scsiq
->cdbptr
[0] == INQUIRY
758 || scsiq
->cdbptr
[0] == REQUEST_SENSE
759 || scsiq
->cdbptr
[0] == READ_CAPACITY
760 || scsiq
->cdbptr
[0] == MODE_SELECT_6
761 || scsiq
->cdbptr
[0] == MODE_SENSE_6
762 || scsiq
->cdbptr
[0] == MODE_SENSE_10
763 || scsiq
->cdbptr
[0] == MODE_SELECT_10
764 || scsiq
->cdbptr
[0] == READ_TOC
) {
765 disable_syn_offset_one_fix
= TRUE
;
771 if (disable_syn_offset_one_fix
) {
772 scsiq
->q2
.tag_code
&=
773 ~(MSG_SIMPLE_Q_TAG
|MSG_HEAD_OF_Q_TAG
|MSG_ORDERED_Q_TAG
);
774 scsiq
->q2
.tag_code
|= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
775 | ADV_TAG_FLAG_DISABLE_DISCONNECT
);
778 if ((adv
->bug_fix_control
& ADV_BUG_FIX_IF_NOT_DWB
) != 0
779 && (scsiq
->cdbptr
[0] == READ_10
|| scsiq
->cdbptr
[0] == READ_6
)) {
780 u_int8_t extra_bytes
;
782 addr
= *p_data_addr
+ *p_data_bcount
;
783 extra_bytes
= addr
& 0x0003;
785 && ((scsiq
->q1
.cntl
& QC_SG_HEAD
) != 0
786 || (scsiq
->q1
.data_cnt
& 0x01FF) == 0)) {
787 scsiq
->q2
.tag_code
|= ADV_TAG_FLAG_EXTRA_BYTES
;
788 scsiq
->q1
.extra_bytes
= extra_bytes
;
789 *p_data_bcount
-= extra_bytes
;
793 if ((adv_get_num_free_queues(adv
, n_q_required
) >= n_q_required
)
794 || ((scsiq
->q1
.cntl
& QC_URGENT
) != 0))
795 retval
= adv_send_scsi_queue(adv
, scsiq
, n_q_required
);
802 adv_copy_lram_doneq(struct adv_softc
*adv
, u_int16_t q_addr
,
803 struct adv_q_done_info
*scsiq
, u_int32_t max_dma_count
)
806 u_int8_t sg_queue_cnt
;
808 adv_get_q_info(adv
, q_addr
+ ADV_SCSIQ_DONE_INFO_BEG
,
810 (sizeof(scsiq
->d2
) + sizeof(scsiq
->d3
)) / 2);
812 #if BYTE_ORDER == BIG_ENDIAN
813 adv_adj_endian_qdone_info(scsiq
);
816 val
= adv_read_lram_16(adv
, q_addr
+ ADV_SCSIQ_B_STATUS
);
817 scsiq
->q_status
= val
& 0xFF;
818 scsiq
->q_no
= (val
>> 8) & 0XFF;
820 val
= adv_read_lram_16(adv
, q_addr
+ ADV_SCSIQ_B_CNTL
);
821 scsiq
->cntl
= val
& 0xFF;
822 sg_queue_cnt
= (val
>> 8) & 0xFF;
824 val
= adv_read_lram_16(adv
,q_addr
+ ADV_SCSIQ_B_SENSE_LEN
);
825 scsiq
->sense_len
= val
& 0xFF;
826 scsiq
->extra_bytes
= (val
>> 8) & 0xFF;
829 * Due to a bug in accessing LRAM on the 940UA, the residual
830 * is split into separate high and low 16bit quantities.
832 scsiq
->remain_bytes
=
833 adv_read_lram_16(adv
, q_addr
+ ADV_SCSIQ_DW_REMAIN_XFER_CNT
);
834 scsiq
->remain_bytes
|=
835 adv_read_lram_16(adv
, q_addr
+ ADV_SCSIQ_W_ALT_DC1
) << 16;
838 * XXX Is this just a safeguard or will the counter really
839 * have bogus upper bits?
841 scsiq
->remain_bytes
&= max_dma_count
;
843 return (sg_queue_cnt
);
847 adv_start_chip(struct adv_softc
*adv
)
849 ADV_OUTB(adv
, ADV_CHIP_CTRL
, 0);
850 if ((ADV_INW(adv
, ADV_CHIP_STATUS
) & ADV_CSW_HALTED
) != 0)
856 adv_stop_execution(struct adv_softc
*adv
)
861 if (adv_read_lram_8(adv
, ADV_STOP_CODE_B
) == 0) {
862 adv_write_lram_8(adv
, ADV_STOP_CODE_B
,
863 ADV_STOP_REQ_RISC_STOP
);
865 if (adv_read_lram_8(adv
, ADV_STOP_CODE_B
) &
866 ADV_STOP_ACK_RISC_STOP
) {
870 } while (count
++ < 20);
876 adv_is_chip_halted(struct adv_softc
*adv
)
878 if ((ADV_INW(adv
, ADV_CHIP_STATUS
) & ADV_CSW_HALTED
) != 0) {
879 if ((ADV_INB(adv
, ADV_CHIP_CTRL
) & ADV_CC_HALT
) != 0) {
887 * XXX The numeric constants and the loops in this routine
888 * need to be documented.
891 adv_ack_interrupt(struct adv_softc
*adv
)
899 risc_flag
= adv_read_lram_8(adv
, ADVV_RISC_FLAG_B
);
900 if (loop
++ > 0x7FFF) {
903 } while ((risc_flag
& ADV_RISC_FLAG_GEN_INT
) != 0);
905 host_flag
= adv_read_lram_8(adv
, ADVV_HOST_FLAG_B
);
906 adv_write_lram_8(adv
, ADVV_HOST_FLAG_B
,
907 host_flag
| ADV_HOST_FLAG_ACK_INT
);
909 ADV_OUTW(adv
, ADV_CHIP_STATUS
, ADV_CIW_INT_ACK
);
911 while (ADV_INW(adv
, ADV_CHIP_STATUS
) & ADV_CSW_INT_PENDING
) {
912 ADV_OUTW(adv
, ADV_CHIP_STATUS
, ADV_CIW_INT_ACK
);
918 adv_write_lram_8(adv
, ADVV_HOST_FLAG_B
, host_flag
);
922 * Handle all conditions that may halt the chip waiting
923 * for us to intervene.
926 adv_isr_chip_halted(struct adv_softc
*adv
)
928 u_int16_t int_halt_code
;
929 u_int16_t halt_q_addr
;
930 target_bit_vector target_mask
;
931 target_bit_vector scsi_busy
;
937 int_halt_code
= adv_read_lram_16(adv
, ADVV_HALTCODE_W
);
938 halt_qp
= adv_read_lram_8(adv
, ADVV_CURCDB_B
);
939 halt_q_addr
= ADV_QNO_TO_QADDR(halt_qp
);
940 target_ix
= adv_read_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_TARGET_IX
);
941 q_cntl
= adv_read_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_CNTL
);
942 tid_no
= ADV_TIX_TO_TID(target_ix
);
943 target_mask
= ADV_TID_TO_TARGET_MASK(tid_no
);
944 if (int_halt_code
== ADV_HALT_DISABLE_ASYN_USE_SYN_FIX
) {
946 * Temporarily disable the async fix by removing
947 * this target from the list of affected targets,
948 * setting our async rate, and then putting us
949 * back into the mask.
951 adv
->fix_asyn_xfer
&= ~target_mask
;
952 adv_set_syncrate(adv
, /*struct cam_path */NULL
,
953 tid_no
, /*period*/0, /*offset*/0,
955 adv
->fix_asyn_xfer
|= target_mask
;
956 } else if (int_halt_code
== ADV_HALT_ENABLE_ASYN_USE_SYN_FIX
) {
957 adv_set_syncrate(adv
, /*struct cam_path */NULL
,
958 tid_no
, /*period*/0, /*offset*/0,
960 } else if (int_halt_code
== ADV_HALT_EXTMSG_IN
) {
961 adv_handle_extmsg_in(adv
, halt_q_addr
, q_cntl
,
962 target_mask
, tid_no
);
963 } else if (int_halt_code
== ADV_HALT_CHK_CONDITION
) {
964 struct adv_target_transinfo
* tinfo
;
966 u_int32_t cinfo_index
;
970 tinfo
= &adv
->tinfo
[tid_no
];
971 q_cntl
|= QC_REQ_SENSE
;
973 /* Renegotiate if appropriate. */
974 adv_set_syncrate(adv
, /*struct cam_path */NULL
,
975 tid_no
, /*period*/0, /*offset*/0,
977 if (tinfo
->current
.period
!= tinfo
->goal
.period
) {
978 adv_msgout_sdtr(adv
, tinfo
->goal
.period
,
980 q_cntl
|= QC_MSG_OUT
;
982 adv_write_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_CNTL
, q_cntl
);
984 /* Don't tag request sense commands */
985 tag_code
= adv_read_lram_8(adv
,
986 halt_q_addr
+ ADV_SCSIQ_B_TAG_CODE
);
988 ~(MSG_SIMPLE_Q_TAG
|MSG_HEAD_OF_Q_TAG
|MSG_ORDERED_Q_TAG
);
990 if ((adv
->fix_asyn_xfer
& target_mask
) != 0
991 && (adv
->fix_asyn_xfer_always
& target_mask
) == 0) {
992 tag_code
|= (ADV_TAG_FLAG_DISABLE_DISCONNECT
993 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
);
995 adv_write_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_TAG_CODE
,
997 q_status
= adv_read_lram_8(adv
,
998 halt_q_addr
+ ADV_SCSIQ_B_STATUS
);
999 q_status
|= (QS_READY
| QS_BUSY
);
1000 adv_write_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_STATUS
,
1003 * Freeze the devq until we can handle the sense condition.
1006 adv_read_lram_32(adv
, halt_q_addr
+ ADV_SCSIQ_D_CINFO_IDX
);
1007 ccb
= adv
->ccb_infos
[cinfo_index
].ccb
;
1008 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
1009 ccb
->ccb_h
.status
|= CAM_DEV_QFRZN
;
1010 adv_abort_ccb(adv
, tid_no
, ADV_TIX_TO_LUN(target_ix
),
1011 /*ccb*/NULL
, CAM_REQUEUE_REQ
,
1012 /*queued_only*/TRUE
);
1013 scsi_busy
= adv_read_lram_8(adv
, ADVV_SCSIBUSY_B
);
1014 scsi_busy
&= ~target_mask
;
1015 adv_write_lram_8(adv
, ADVV_SCSIBUSY_B
, scsi_busy
);
1017 * Ensure we have enough time to actually
1018 * retrieve the sense.
1020 callout_reset(&ccb
->ccb_h
.timeout_ch
, 5 * hz
, adv_timeout
, ccb
);
1021 } else if (int_halt_code
== ADV_HALT_SDTR_REJECTED
) {
1022 struct ext_msg out_msg
;
1024 adv_read_lram_16_multi(adv
, ADVV_MSGOUT_BEG
,
1025 (u_int16_t
*) &out_msg
,
1028 if ((out_msg
.msg_type
== MSG_EXTENDED
)
1029 && (out_msg
.msg_len
== MSG_EXT_SDTR_LEN
)
1030 && (out_msg
.msg_req
== MSG_EXT_SDTR
)) {
1032 /* Revert to Async */
1033 adv_set_syncrate(adv
, /*struct cam_path */NULL
,
1034 tid_no
, /*period*/0, /*offset*/0,
1035 ADV_TRANS_GOAL
|ADV_TRANS_ACTIVE
);
1037 q_cntl
&= ~QC_MSG_OUT
;
1038 adv_write_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_CNTL
, q_cntl
);
1039 } else if (int_halt_code
== ADV_HALT_SS_QUEUE_FULL
) {
1040 u_int8_t scsi_status
;
1042 u_int32_t cinfo_index
;
1044 scsi_status
= adv_read_lram_8(adv
, halt_q_addr
1045 + ADV_SCSIQ_SCSI_STATUS
);
1047 adv_read_lram_32(adv
, halt_q_addr
+ ADV_SCSIQ_D_CINFO_IDX
);
1048 ccb
= adv
->ccb_infos
[cinfo_index
].ccb
;
1049 xpt_freeze_devq(ccb
->ccb_h
.path
, /*count*/1);
1050 ccb
->ccb_h
.status
|= CAM_DEV_QFRZN
|CAM_SCSI_STATUS_ERROR
;
1051 ccb
->csio
.scsi_status
= SCSI_STATUS_QUEUE_FULL
;
1052 adv_abort_ccb(adv
, tid_no
, ADV_TIX_TO_LUN(target_ix
),
1053 /*ccb*/NULL
, CAM_REQUEUE_REQ
,
1054 /*queued_only*/TRUE
);
1055 scsi_busy
= adv_read_lram_8(adv
, ADVV_SCSIBUSY_B
);
1056 scsi_busy
&= ~target_mask
;
1057 adv_write_lram_8(adv
, ADVV_SCSIBUSY_B
, scsi_busy
);
1059 printf("Unhandled Halt Code %x\n", int_halt_code
);
1061 adv_write_lram_16(adv
, ADVV_HALTCODE_W
, 0);
1065 adv_sdtr_to_period_offset(struct adv_softc
*adv
,
1066 u_int8_t sync_data
, u_int8_t
*period
,
1067 u_int8_t
*offset
, int tid
)
1069 if (adv
->fix_asyn_xfer
& ADV_TID_TO_TARGET_MASK(tid
)
1070 && (sync_data
== ASYN_SDTR_DATA_FIX_PCI_REV_AB
)) {
1071 *period
= *offset
= 0;
1073 *period
= adv
->sdtr_period_tbl
[((sync_data
>> 4) & 0xF)];
1074 *offset
= sync_data
& 0xF;
1079 adv_set_syncrate(struct adv_softc
*adv
, struct cam_path
*path
,
1080 u_int tid
, u_int period
, u_int offset
, u_int type
)
1082 struct adv_target_transinfo
* tinfo
;
1087 tinfo
= &adv
->tinfo
[tid
];
1089 /* Filter our input */
1090 sdtr_data
= adv_period_offset_to_sdtr(adv
, &period
,
1093 old_period
= tinfo
->current
.period
;
1094 old_offset
= tinfo
->current
.offset
;
1096 if ((type
& ADV_TRANS_CUR
) != 0
1097 && ((old_period
!= period
|| old_offset
!= offset
)
1098 || period
== 0 || offset
== 0) /*Changes in asyn fix settings*/) {
1102 halted
= adv_is_chip_halted(adv
);
1104 /* Must halt the chip first */
1105 adv_host_req_chip_halt(adv
);
1107 /* Update current hardware settings */
1108 adv_set_sdtr_reg_at_id(adv
, tid
, sdtr_data
);
1111 * If a target can run in sync mode, we don't need
1112 * to check it for sync problems.
1115 adv
->fix_asyn_xfer
&= ~ADV_TID_TO_TARGET_MASK(tid
);
1118 /* Start the chip again */
1119 adv_start_chip(adv
);
1122 tinfo
->current
.period
= period
;
1123 tinfo
->current
.offset
= offset
;
1127 * Tell the SCSI layer about the
1128 * new transfer parameters.
1130 struct ccb_trans_settings neg
;
1132 neg
.sync_period
= period
;
1133 neg
.sync_offset
= offset
;
1134 neg
.valid
= CCB_TRANS_SYNC_RATE_VALID
1135 | CCB_TRANS_SYNC_OFFSET_VALID
;
1136 xpt_setup_ccb(&neg
.ccb_h
, path
, /*priority*/1);
1137 xpt_async(AC_TRANSFER_NEG
, path
, &neg
);
1141 if ((type
& ADV_TRANS_GOAL
) != 0) {
1142 tinfo
->goal
.period
= period
;
1143 tinfo
->goal
.offset
= offset
;
1146 if ((type
& ADV_TRANS_USER
) != 0) {
1147 tinfo
->user
.period
= period
;
1148 tinfo
->user
.offset
= offset
;
1153 adv_period_offset_to_sdtr(struct adv_softc
*adv
, u_int
*period
,
1154 u_int
*offset
, int tid
)
1160 if (offset
== NULL
) {
1162 offset
= &dummy_offset
;
1165 if (period
== NULL
) {
1167 period
= &dummy_period
;
1170 *offset
= MIN(ADV_SYN_MAX_OFFSET
, *offset
);
1171 if (*period
!= 0 && *offset
!= 0) {
1172 for (i
= 0; i
< adv
->sdtr_period_tbl_size
; i
++) {
1173 if (*period
<= adv
->sdtr_period_tbl
[i
]) {
1175 * When responding to a target that requests
1176 * sync, the requested rate may fall between
1177 * two rates that we can output, but still be
1178 * a rate that we can receive. Because of this,
1179 * we want to respond to the target with
1180 * the same rate that it sent to us even
1181 * if the period we use to send data to it
1182 * is lower. Only lower the response period
1185 if (i
== 0 /* Our maximum rate */)
1186 *period
= adv
->sdtr_period_tbl
[0];
1187 return ((i
<< 4) | *offset
);
1195 if (adv
->fix_asyn_xfer
& ADV_TID_TO_TARGET_MASK(tid
))
1196 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB
);
1200 /* Internal Routines */
1203 adv_read_lram_16_multi(struct adv_softc
*adv
, u_int16_t s_addr
,
1204 u_int16_t
*buffer
, int count
)
1206 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1207 ADV_INSW(adv
, ADV_LRAM_DATA
, buffer
, count
);
1211 adv_write_lram_16_multi(struct adv_softc
*adv
, u_int16_t s_addr
,
1212 u_int16_t
*buffer
, int count
)
1214 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1215 ADV_OUTSW(adv
, ADV_LRAM_DATA
, buffer
, count
);
1219 adv_mset_lram_16(struct adv_softc
*adv
, u_int16_t s_addr
,
1220 u_int16_t set_value
, int count
)
1222 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1223 bus_space_set_multi_2(adv
->tag
, adv
->bsh
, ADV_LRAM_DATA
,
1228 adv_msum_lram_16(struct adv_softc
*adv
, u_int16_t s_addr
, int count
)
1234 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1235 for (i
= 0; i
< count
; i
++)
1236 sum
+= ADV_INW(adv
, ADV_LRAM_DATA
);
1241 adv_write_and_verify_lram_16(struct adv_softc
*adv
, u_int16_t addr
,
1247 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
);
1248 ADV_OUTW(adv
, ADV_LRAM_DATA
, value
);
1250 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
);
1251 if (value
!= ADV_INW(adv
, ADV_LRAM_DATA
))
1257 adv_read_lram_32(struct adv_softc
*adv
, u_int16_t addr
)
1259 u_int16_t val_low
, val_high
;
1261 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
);
1263 #if BYTE_ORDER == BIG_ENDIAN
1264 val_high
= ADV_INW(adv
, ADV_LRAM_DATA
);
1265 val_low
= ADV_INW(adv
, ADV_LRAM_DATA
);
1267 val_low
= ADV_INW(adv
, ADV_LRAM_DATA
);
1268 val_high
= ADV_INW(adv
, ADV_LRAM_DATA
);
1271 return (((u_int32_t
)val_high
<< 16) | (u_int32_t
)val_low
);
1275 adv_write_lram_32(struct adv_softc
*adv
, u_int16_t addr
, u_int32_t value
)
1277 ADV_OUTW(adv
, ADV_LRAM_ADDR
, addr
);
1279 #if BYTE_ORDER == BIG_ENDIAN
1280 ADV_OUTW(adv
, ADV_LRAM_DATA
, (u_int16_t
)((value
>> 16) & 0xFFFF));
1281 ADV_OUTW(adv
, ADV_LRAM_DATA
, (u_int16_t
)(value
& 0xFFFF));
1283 ADV_OUTW(adv
, ADV_LRAM_DATA
, (u_int16_t
)(value
& 0xFFFF));
1284 ADV_OUTW(adv
, ADV_LRAM_DATA
, (u_int16_t
)((value
>> 16) & 0xFFFF));
1289 adv_write_lram_32_multi(struct adv_softc
*adv
, u_int16_t s_addr
,
1290 u_int32_t
*buffer
, int count
)
1292 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1293 ADV_OUTSW(adv
, ADV_LRAM_DATA
, (u_int16_t
*)buffer
, count
* 2);
1297 adv_read_eeprom_16(struct adv_softc
*adv
, u_int8_t addr
)
1299 u_int16_t read_wval
;
1302 adv_write_eeprom_cmd_reg(adv
, ADV_EEPROM_CMD_WRITE_DISABLE
);
1304 cmd_reg
= addr
| ADV_EEPROM_CMD_READ
;
1305 adv_write_eeprom_cmd_reg(adv
, cmd_reg
);
1307 read_wval
= ADV_INW(adv
, ADV_EEPROM_DATA
);
1313 adv_write_eeprom_16(struct adv_softc
*adv
, u_int8_t addr
, u_int16_t value
)
1315 u_int16_t read_value
;
1317 read_value
= adv_read_eeprom_16(adv
, addr
);
1318 if (read_value
!= value
) {
1319 adv_write_eeprom_cmd_reg(adv
, ADV_EEPROM_CMD_WRITE_ENABLE
);
1322 ADV_OUTW(adv
, ADV_EEPROM_DATA
, value
);
1325 adv_write_eeprom_cmd_reg(adv
, ADV_EEPROM_CMD_WRITE
| addr
);
1328 adv_write_eeprom_cmd_reg(adv
, ADV_EEPROM_CMD_WRITE_DISABLE
);
1330 read_value
= adv_read_eeprom_16(adv
, addr
);
1332 return (read_value
);
1336 adv_write_eeprom_cmd_reg(struct adv_softc
*adv
, u_int8_t cmd_reg
)
1343 ADV_OUTB(adv
, ADV_EEPROM_CMD
, cmd_reg
);
1345 read_back
= ADV_INB(adv
, ADV_EEPROM_CMD
);
1346 if (read_back
== cmd_reg
) {
1349 if (retry
++ > ADV_EEPROM_MAX_RETRY
) {
1356 adv_set_eeprom_config_once(struct adv_softc
*adv
,
1357 struct adv_eeprom_config
*eeprom_config
)
1366 wbuf
= (u_int16_t
*)eeprom_config
;
1369 for (s_addr
= 0; s_addr
< 2; s_addr
++, wbuf
++) {
1371 if (*wbuf
!= adv_write_eeprom_16(adv
, s_addr
, *wbuf
)) {
1375 if (adv
->type
& ADV_VL
) {
1376 cfg_beg
= ADV_EEPROM_CFG_BEG_VL
;
1377 cfg_end
= ADV_EEPROM_MAX_ADDR_VL
;
1379 cfg_beg
= ADV_EEPROM_CFG_BEG
;
1380 cfg_end
= ADV_EEPROM_MAX_ADDR
;
1383 for (s_addr
= cfg_beg
; s_addr
<= (cfg_end
- 1); s_addr
++, wbuf
++) {
1385 if (*wbuf
!= adv_write_eeprom_16(adv
, s_addr
, *wbuf
)) {
1390 if (sum
!= adv_write_eeprom_16(adv
, s_addr
, sum
)) {
1393 wbuf
= (u_int16_t
*)eeprom_config
;
1394 for (s_addr
= 0; s_addr
< 2; s_addr
++, wbuf
++) {
1395 if (*wbuf
!= adv_read_eeprom_16(adv
, s_addr
)) {
1399 for (s_addr
= cfg_beg
; s_addr
<= cfg_end
; s_addr
++, wbuf
++) {
1400 if (*wbuf
!= adv_read_eeprom_16(adv
, s_addr
)) {
1408 adv_load_microcode(struct adv_softc
*adv
, u_int16_t s_addr
,
1409 u_int16_t
*mcode_buf
, u_int16_t mcode_size
)
1412 u_int16_t mcode_lram_size
;
1413 u_int16_t mcode_chksum
;
1415 mcode_lram_size
= mcode_size
>> 1;
1416 /* XXX Why zero the memory just before you write the whole thing?? */
1417 adv_mset_lram_16(adv
, s_addr
, 0, mcode_lram_size
);
1418 adv_write_lram_16_multi(adv
, s_addr
, mcode_buf
, mcode_lram_size
);
1420 chksum
= adv_msum_lram_16(adv
, s_addr
, mcode_lram_size
);
1421 mcode_chksum
= (u_int16_t
)adv_msum_lram_16(adv
, ADV_CODE_SEC_BEG
,
1422 ((mcode_size
- s_addr
1423 - ADV_CODE_SEC_BEG
) >> 1));
1424 adv_write_lram_16(adv
, ADVV_MCODE_CHKSUM_W
, mcode_chksum
);
1425 adv_write_lram_16(adv
, ADVV_MCODE_SIZE_W
, mcode_size
);
1430 adv_reinit_lram(struct adv_softc
*adv
) {
1432 adv_init_qlink_var(adv
);
1436 adv_init_lram(struct adv_softc
*adv
)
1441 adv_mset_lram_16(adv
, ADV_QADR_BEG
, 0,
1442 (((adv
->max_openings
+ 2 + 1) * 64) >> 1));
1444 i
= ADV_MIN_ACTIVE_QNO
;
1445 s_addr
= ADV_QADR_BEG
+ ADV_QBLK_SIZE
;
1447 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_FWD
, i
+ 1);
1448 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_BWD
, adv
->max_openings
);
1449 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_QNO
, i
);
1451 s_addr
+= ADV_QBLK_SIZE
;
1452 for (; i
< adv
->max_openings
; i
++, s_addr
+= ADV_QBLK_SIZE
) {
1453 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_FWD
, i
+ 1);
1454 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_BWD
, i
- 1);
1455 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_QNO
, i
);
1458 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_FWD
, ADV_QLINK_END
);
1459 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_BWD
, adv
->max_openings
- 1);
1460 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_QNO
, adv
->max_openings
);
1462 s_addr
+= ADV_QBLK_SIZE
;
1464 for (; i
<= adv
->max_openings
+ 3; i
++, s_addr
+= ADV_QBLK_SIZE
) {
1465 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_FWD
, i
);
1466 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_BWD
, i
);
1467 adv_write_lram_8(adv
, s_addr
+ ADV_SCSIQ_B_QNO
, i
);
1472 adv_init_microcode_var(struct adv_softc
*adv
)
1476 for (i
= 0; i
<= ADV_MAX_TID
; i
++) {
1478 /* Start out async all around */
1479 adv_set_syncrate(adv
, /*path*/NULL
,
1481 ADV_TRANS_GOAL
|ADV_TRANS_CUR
);
1484 adv_init_qlink_var(adv
);
1486 adv_write_lram_8(adv
, ADVV_DISC_ENABLE_B
, adv
->disc_enable
);
1487 adv_write_lram_8(adv
, ADVV_HOSTSCSI_ID_B
, 0x01 << adv
->scsi_id
);
1489 adv_write_lram_32(adv
, ADVV_OVERRUN_PADDR_D
, adv
->overrun_physbase
);
1491 adv_write_lram_32(adv
, ADVV_OVERRUN_BSIZE_D
, ADV_OVERRUN_BSIZE
);
1493 ADV_OUTW(adv
, ADV_REG_PROG_COUNTER
, ADV_MCODE_START_ADDR
);
1494 if (ADV_INW(adv
, ADV_REG_PROG_COUNTER
) != ADV_MCODE_START_ADDR
) {
1495 printf("adv%d: Unable to set program counter. Aborting.\n",
1503 adv_init_qlink_var(struct adv_softc
*adv
)
1506 u_int16_t lram_addr
;
1508 adv_write_lram_8(adv
, ADVV_NEXTRDY_B
, 1);
1509 adv_write_lram_8(adv
, ADVV_DONENEXT_B
, adv
->max_openings
);
1511 adv_write_lram_16(adv
, ADVV_FREE_Q_HEAD_W
, 1);
1512 adv_write_lram_16(adv
, ADVV_DONE_Q_TAIL_W
, adv
->max_openings
);
1514 adv_write_lram_8(adv
, ADVV_BUSY_QHEAD_B
,
1515 (u_int8_t
)((int) adv
->max_openings
+ 1));
1516 adv_write_lram_8(adv
, ADVV_DISC1_QHEAD_B
,
1517 (u_int8_t
)((int) adv
->max_openings
+ 2));
1519 adv_write_lram_8(adv
, ADVV_TOTAL_READY_Q_B
, adv
->max_openings
);
1521 adv_write_lram_16(adv
, ADVV_ASCDVC_ERR_CODE_W
, 0);
1522 adv_write_lram_16(adv
, ADVV_HALTCODE_W
, 0);
1523 adv_write_lram_8(adv
, ADVV_STOP_CODE_B
, 0);
1524 adv_write_lram_8(adv
, ADVV_SCSIBUSY_B
, 0);
1525 adv_write_lram_8(adv
, ADVV_WTM_FLAG_B
, 0);
1526 adv_write_lram_8(adv
, ADVV_Q_DONE_IN_PROGRESS_B
, 0);
1528 lram_addr
= ADV_QADR_BEG
;
1529 for (i
= 0; i
< 32; i
++, lram_addr
+= 2)
1530 adv_write_lram_16(adv
, lram_addr
, 0);
1534 adv_disable_interrupt(struct adv_softc
*adv
)
1538 cfg
= ADV_INW(adv
, ADV_CONFIG_LSW
);
1539 ADV_OUTW(adv
, ADV_CONFIG_LSW
, cfg
& ~ADV_CFG_LSW_HOST_INT_ON
);
1543 adv_enable_interrupt(struct adv_softc
*adv
)
1547 cfg
= ADV_INW(adv
, ADV_CONFIG_LSW
);
1548 ADV_OUTW(adv
, ADV_CONFIG_LSW
, cfg
| ADV_CFG_LSW_HOST_INT_ON
);
1552 adv_toggle_irq_act(struct adv_softc
*adv
)
1554 ADV_OUTW(adv
, ADV_CHIP_STATUS
, ADV_CIW_IRQ_ACT
);
1555 ADV_OUTW(adv
, ADV_CHIP_STATUS
, 0);
1559 adv_start_execution(struct adv_softc
*adv
)
1561 if (adv_read_lram_8(adv
, ADV_STOP_CODE_B
) != 0) {
1562 adv_write_lram_8(adv
, ADV_STOP_CODE_B
, 0);
1567 adv_stop_chip(struct adv_softc
*adv
)
1571 cc_val
= ADV_INB(adv
, ADV_CHIP_CTRL
)
1572 & (~(ADV_CC_SINGLE_STEP
| ADV_CC_TEST
| ADV_CC_DIAG
));
1573 ADV_OUTB(adv
, ADV_CHIP_CTRL
, cc_val
| ADV_CC_HALT
);
1574 adv_set_chip_ih(adv
, ADV_INS_HALT
);
1575 adv_set_chip_ih(adv
, ADV_INS_RFLAG_WTM
);
1576 if ((ADV_INW(adv
, ADV_CHIP_STATUS
) & ADV_CSW_HALTED
) == 0) {
1583 adv_host_req_chip_halt(struct adv_softc
*adv
)
1586 u_int8_t saved_stop_code
;
1588 if (adv_is_chip_halted(adv
))
1592 saved_stop_code
= adv_read_lram_8(adv
, ADVV_STOP_CODE_B
);
1593 adv_write_lram_8(adv
, ADVV_STOP_CODE_B
,
1594 ADV_STOP_HOST_REQ_RISC_HALT
| ADV_STOP_REQ_RISC_STOP
);
1595 while (adv_is_chip_halted(adv
) == 0
1599 adv_write_lram_8(adv
, ADVV_STOP_CODE_B
, saved_stop_code
);
1600 return (count
< 2000);
1604 adv_set_chip_ih(struct adv_softc
*adv
, u_int16_t ins_code
)
1606 adv_set_bank(adv
, 1);
1607 ADV_OUTW(adv
, ADV_REG_IH
, ins_code
);
1608 adv_set_bank(adv
, 0);
1613 adv_get_chip_scsi_ctrl(struct adv_softc
*adv
)
1617 adv_set_bank(adv
, 1);
1618 scsi_ctrl
= ADV_INB(adv
, ADV_REG_SC
);
1619 adv_set_bank(adv
, 0);
1625 * XXX Looks like more padding issues in this routine as well.
1626 * There has to be a way to turn this into an insw.
1629 adv_get_q_info(struct adv_softc
*adv
, u_int16_t s_addr
,
1630 u_int16_t
*inbuf
, int words
)
1634 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1635 for (i
= 0; i
< words
; i
++, inbuf
++) {
1639 *inbuf
= ADV_INW(adv
, ADV_LRAM_DATA
);
1644 adv_get_num_free_queues(struct adv_softc
*adv
, u_int8_t n_qs
)
1649 cur_used_qs
= adv
->cur_active
+ ADV_MIN_FREE_Q
;
1651 if ((cur_used_qs
+ n_qs
) <= adv
->max_openings
) {
1652 cur_free_qs
= adv
->max_openings
- cur_used_qs
;
1653 return (cur_free_qs
);
1655 adv
->openings_needed
= n_qs
;
1660 adv_alloc_free_queues(struct adv_softc
*adv
, u_int8_t free_q_head
,
1665 for (i
= 0; i
< n_free_q
; i
++) {
1666 free_q_head
= adv_alloc_free_queue(adv
, free_q_head
);
1667 if (free_q_head
== ADV_QLINK_END
)
1670 return (free_q_head
);
1674 adv_alloc_free_queue(struct adv_softc
*adv
, u_int8_t free_q_head
)
1680 next_qp
= ADV_QLINK_END
;
1681 q_addr
= ADV_QNO_TO_QADDR(free_q_head
);
1682 q_status
= adv_read_lram_8(adv
, q_addr
+ ADV_SCSIQ_B_STATUS
);
1684 if ((q_status
& QS_READY
) == 0)
1685 next_qp
= adv_read_lram_8(adv
, q_addr
+ ADV_SCSIQ_B_FWD
);
1691 adv_send_scsi_queue(struct adv_softc
*adv
, struct adv_scsi_q
*scsiq
,
1692 u_int8_t n_q_required
)
1694 u_int8_t free_q_head
;
1701 target_ix
= scsiq
->q2
.target_ix
;
1702 tid_no
= ADV_TIX_TO_TID(target_ix
);
1703 free_q_head
= adv_read_lram_16(adv
, ADVV_FREE_Q_HEAD_W
) & 0xFF;
1704 if ((next_qp
= adv_alloc_free_queues(adv
, free_q_head
, n_q_required
))
1706 scsiq
->q1
.q_no
= free_q_head
;
1709 * Now that we know our Q number, point our sense
1710 * buffer pointer to a bus dma mapped area where
1711 * we can dma the data to.
1713 scsiq
->q1
.sense_addr
= adv
->sense_physbase
1714 + ((free_q_head
- 1) * sizeof(struct scsi_sense_data
));
1715 adv_put_ready_sg_list_queue(adv
, scsiq
, free_q_head
);
1716 adv_write_lram_16(adv
, ADVV_FREE_Q_HEAD_W
, next_qp
);
1717 adv
->cur_active
+= n_q_required
;
1725 adv_put_ready_sg_list_queue(struct adv_softc
*adv
, struct adv_scsi_q
*scsiq
,
1728 u_int8_t sg_list_dwords
;
1729 u_int8_t sg_index
, i
;
1730 u_int8_t sg_entry_cnt
;
1733 struct adv_sg_head
*sg_head
;
1734 struct adv_sg_list_q scsi_sg_q
;
1736 sg_head
= scsiq
->sg_head
;
1739 sg_entry_cnt
= sg_head
->entry_cnt
- 1;
1741 if (sg_entry_cnt
== 0)
1742 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1743 "a SG list but only one element");
1744 if ((scsiq
->q1
.cntl
& QC_SG_HEAD
) == 0)
1745 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1746 "a SG list but QC_SG_HEAD not set");
1748 q_addr
= ADV_QNO_TO_QADDR(q_no
);
1750 scsiq
->q1
.sg_queue_cnt
= sg_head
->queue_cnt
;
1751 scsi_sg_q
.sg_head_qp
= q_no
;
1752 scsi_sg_q
.cntl
= QCSG_SG_XFER_LIST
;
1753 for (i
= 0; i
< sg_head
->queue_cnt
; i
++) {
1754 u_int8_t segs_this_q
;
1756 if (sg_entry_cnt
> ADV_SG_LIST_PER_Q
)
1757 segs_this_q
= ADV_SG_LIST_PER_Q
;
1759 /* This will be the last segment then */
1760 segs_this_q
= sg_entry_cnt
;
1761 scsi_sg_q
.cntl
|= QCSG_SG_XFER_END
;
1763 scsi_sg_q
.seq_no
= i
+ 1;
1764 sg_list_dwords
= segs_this_q
<< 1;
1766 scsi_sg_q
.sg_list_cnt
= segs_this_q
;
1767 scsi_sg_q
.sg_cur_list_cnt
= segs_this_q
;
1769 scsi_sg_q
.sg_list_cnt
= segs_this_q
- 1;
1770 scsi_sg_q
.sg_cur_list_cnt
= segs_this_q
- 1;
1772 next_qp
= adv_read_lram_8(adv
, q_addr
+ ADV_SCSIQ_B_FWD
);
1773 scsi_sg_q
.q_no
= next_qp
;
1774 q_addr
= ADV_QNO_TO_QADDR(next_qp
);
1776 adv_write_lram_16_multi(adv
,
1777 q_addr
+ ADV_SCSIQ_SGHD_CPY_BEG
,
1778 (u_int16_t
*)&scsi_sg_q
,
1779 sizeof(scsi_sg_q
) >> 1);
1780 adv_write_lram_32_multi(adv
, q_addr
+ ADV_SGQ_LIST_BEG
,
1781 (u_int32_t
*)&sg_head
->sg_list
[sg_index
],
1783 sg_entry_cnt
-= segs_this_q
;
1784 sg_index
+= ADV_SG_LIST_PER_Q
;
1787 adv_put_ready_queue(adv
, scsiq
, q_no
);
1791 adv_put_ready_queue(struct adv_softc
*adv
, struct adv_scsi_q
*scsiq
,
1794 struct adv_target_transinfo
* tinfo
;
1798 tid_no
= ADV_TIX_TO_TID(scsiq
->q2
.target_ix
);
1799 tinfo
= &adv
->tinfo
[tid_no
];
1800 if ((tinfo
->current
.period
!= tinfo
->goal
.period
)
1801 || (tinfo
->current
.offset
!= tinfo
->goal
.offset
)) {
1803 adv_msgout_sdtr(adv
, tinfo
->goal
.period
, tinfo
->goal
.offset
);
1804 scsiq
->q1
.cntl
|= QC_MSG_OUT
;
1806 q_addr
= ADV_QNO_TO_QADDR(q_no
);
1808 scsiq
->q1
.status
= QS_FREE
;
1810 adv_write_lram_16_multi(adv
, q_addr
+ ADV_SCSIQ_CDB_BEG
,
1811 (u_int16_t
*)scsiq
->cdbptr
,
1812 scsiq
->q2
.cdb_len
>> 1);
1814 #if BYTE_ORDER == BIG_ENDIAN
1815 adv_adj_scsiq_endian(scsiq
);
1818 adv_put_scsiq(adv
, q_addr
+ ADV_SCSIQ_CPY_BEG
,
1819 (u_int16_t
*) &scsiq
->q1
.cntl
,
1820 ((sizeof(scsiq
->q1
) + sizeof(scsiq
->q2
)) / 2) - 1);
1822 #if CC_WRITE_IO_COUNT
1823 adv_write_lram_16(adv
, q_addr
+ ADV_SCSIQ_W_REQ_COUNT
,
1827 #if CC_CLEAR_DMA_REMAIN
1829 adv_write_lram_32(adv
, q_addr
+ ADV_SCSIQ_DW_REMAIN_XFER_ADDR
, 0);
1830 adv_write_lram_32(adv
, q_addr
+ ADV_SCSIQ_DW_REMAIN_XFER_CNT
, 0);
1833 adv_write_lram_16(adv
, q_addr
+ ADV_SCSIQ_B_STATUS
,
1834 (scsiq
->q1
.q_no
<< 8) | QS_READY
);
1838 adv_put_scsiq(struct adv_softc
*adv
, u_int16_t s_addr
,
1839 u_int16_t
*buffer
, int words
)
1844 * XXX This routine makes *gross* assumptions
1845 * about padding in the data structures.
1846 * Either the data structures should have explicit
1847 * padding members added, or they should have padding
1848 * turned off via compiler attributes depending on
1849 * which yields better overall performance. My hunch
1850 * would be that turning off padding would be the
1851 * faster approach as an outsw is much faster than
1852 * this crude loop and accessing un-aligned data
1853 * members isn't *that* expensive. The other choice
1854 * would be to modify the ASC script so that the
1855 * the adv_scsiq_1 structure can be re-arranged so
1856 * padding isn't required.
1858 ADV_OUTW(adv
, ADV_LRAM_ADDR
, s_addr
);
1859 for (i
= 0; i
< words
; i
++, buffer
++) {
1860 if (i
== 2 || i
== 10) {
1863 ADV_OUTW(adv
, ADV_LRAM_DATA
, *buffer
);
1868 adv_handle_extmsg_in(struct adv_softc
*adv
, u_int16_t halt_q_addr
,
1869 u_int8_t q_cntl
, target_bit_vector target_mask
,
1872 struct ext_msg ext_msg
;
1874 adv_read_lram_16_multi(adv
, ADVV_MSGIN_BEG
, (u_int16_t
*) &ext_msg
,
1875 sizeof(ext_msg
) >> 1);
1876 if ((ext_msg
.msg_type
== MSG_EXTENDED
)
1877 && (ext_msg
.msg_req
== MSG_EXT_SDTR
)
1878 && (ext_msg
.msg_len
== MSG_EXT_SDTR_LEN
)) {
1880 struct adv_target_transinfo
* tinfo
;
1881 u_int32_t cinfo_index
;
1885 u_int8_t orig_offset
;
1888 adv_read_lram_32(adv
, halt_q_addr
+ ADV_SCSIQ_D_CINFO_IDX
);
1889 ccb
= adv
->ccb_infos
[cinfo_index
].ccb
;
1890 tinfo
= &adv
->tinfo
[tid_no
];
1893 orig_offset
= ext_msg
.req_ack_offset
;
1894 if (ext_msg
.xfer_period
< tinfo
->goal
.period
) {
1895 sdtr_accept
= FALSE
;
1896 ext_msg
.xfer_period
= tinfo
->goal
.period
;
1899 /* Perform range checking */
1900 period
= ext_msg
.xfer_period
;
1901 offset
= ext_msg
.req_ack_offset
;
1902 adv_period_offset_to_sdtr(adv
, &period
, &offset
, tid_no
);
1903 ext_msg
.xfer_period
= period
;
1904 ext_msg
.req_ack_offset
= offset
;
1906 /* Record our current sync settings */
1907 adv_set_syncrate(adv
, ccb
->ccb_h
.path
,
1908 tid_no
, ext_msg
.xfer_period
,
1909 ext_msg
.req_ack_offset
,
1910 ADV_TRANS_GOAL
|ADV_TRANS_ACTIVE
);
1912 /* Offset too high or large period forced async */
1913 if (orig_offset
!= ext_msg
.req_ack_offset
)
1914 sdtr_accept
= FALSE
;
1916 if (sdtr_accept
&& (q_cntl
& QC_MSG_OUT
)) {
1917 /* Valid response to our requested negotiation */
1918 q_cntl
&= ~QC_MSG_OUT
;
1921 q_cntl
|= QC_MSG_OUT
;
1922 adv_msgout_sdtr(adv
, ext_msg
.xfer_period
,
1923 ext_msg
.req_ack_offset
);
1926 } else if (ext_msg
.msg_type
== MSG_EXTENDED
1927 && ext_msg
.msg_req
== MSG_EXT_WDTR
1928 && ext_msg
.msg_len
== MSG_EXT_WDTR_LEN
) {
1930 ext_msg
.wdtr_width
= 0;
1931 adv_write_lram_16_multi(adv
, ADVV_MSGOUT_BEG
,
1932 (u_int16_t
*)&ext_msg
,
1933 sizeof(ext_msg
) >> 1);
1934 q_cntl
|= QC_MSG_OUT
;
1937 ext_msg
.msg_type
= MSG_MESSAGE_REJECT
;
1938 adv_write_lram_16_multi(adv
, ADVV_MSGOUT_BEG
,
1939 (u_int16_t
*)&ext_msg
,
1940 sizeof(ext_msg
) >> 1);
1941 q_cntl
|= QC_MSG_OUT
;
1943 adv_write_lram_8(adv
, halt_q_addr
+ ADV_SCSIQ_B_CNTL
, q_cntl
);
1947 adv_msgout_sdtr(struct adv_softc
*adv
, u_int8_t sdtr_period
,
1948 u_int8_t sdtr_offset
)
1950 struct ext_msg sdtr_buf
;
1952 sdtr_buf
.msg_type
= MSG_EXTENDED
;
1953 sdtr_buf
.msg_len
= MSG_EXT_SDTR_LEN
;
1954 sdtr_buf
.msg_req
= MSG_EXT_SDTR
;
1955 sdtr_buf
.xfer_period
= sdtr_period
;
1956 sdtr_offset
&= ADV_SYN_MAX_OFFSET
;
1957 sdtr_buf
.req_ack_offset
= sdtr_offset
;
1958 adv_write_lram_16_multi(adv
, ADVV_MSGOUT_BEG
,
1959 (u_int16_t
*) &sdtr_buf
,
1960 sizeof(sdtr_buf
) / 2);
1964 adv_abort_ccb(struct adv_softc
*adv
, int target
, int lun
, union ccb
*ccb
,
1965 u_int32_t status
, int queued_only
)
1969 struct adv_q_done_info scsiq_buf
;
1970 struct adv_q_done_info
*scsiq
;
1975 target_ix
= ADV_TIDLUN_TO_IX(target
, lun
);
1977 for (q_no
= ADV_MIN_ACTIVE_QNO
; q_no
<= adv
->max_openings
; q_no
++) {
1978 struct adv_ccb_info
*ccb_info
;
1979 q_addr
= ADV_QNO_TO_QADDR(q_no
);
1981 adv_copy_lram_doneq(adv
, q_addr
, scsiq
, adv
->max_dma_count
);
1982 ccb_info
= &adv
->ccb_infos
[scsiq
->d2
.ccb_index
];
1983 if (((scsiq
->q_status
& QS_READY
) != 0)
1984 && ((scsiq
->q_status
& QS_ABORTED
) == 0)
1985 && ((scsiq
->cntl
& QCSG_SG_XFER_LIST
) == 0)
1986 && (scsiq
->d2
.target_ix
== target_ix
)
1987 && (queued_only
== 0
1988 || !(scsiq
->q_status
& (QS_DISC1
|QS_DISC2
|QS_BUSY
|QS_DONE
)))
1989 && (ccb
== NULL
|| (ccb
== ccb_info
->ccb
))) {
1990 union ccb
*aborted_ccb
;
1991 struct adv_ccb_info
*cinfo
;
1993 scsiq
->q_status
|= QS_ABORTED
;
1994 adv_write_lram_8(adv
, q_addr
+ ADV_SCSIQ_B_STATUS
,
1996 aborted_ccb
= ccb_info
->ccb
;
1997 /* Don't clobber earlier error codes */
1998 if ((aborted_ccb
->ccb_h
.status
& CAM_STATUS_MASK
)
2000 aborted_ccb
->ccb_h
.status
|= status
;
2001 cinfo
= (struct adv_ccb_info
*)
2002 aborted_ccb
->ccb_h
.ccb_cinfo_ptr
;
2003 cinfo
->state
|= ACCB_ABORT_QUEUED
;
2011 adv_reset_bus(struct adv_softc
*adv
, int initiate_bus_reset
)
2018 while ((ADV_INW(adv
, ADV_CHIP_STATUS
) & ADV_CSW_SCSI_RESET_ACTIVE
) != 0
2021 adv_reset_chip(adv
, initiate_bus_reset
);
2022 adv_reinit_lram(adv
);
2023 for (i
= 0; i
<= ADV_MAX_TID
; i
++)
2024 adv_set_syncrate(adv
, NULL
, i
, /*period*/0,
2025 /*offset*/0, ADV_TRANS_CUR
);
2026 ADV_OUTW(adv
, ADV_REG_PROG_COUNTER
, ADV_MCODE_START_ADDR
);
2028 /* Tell the XPT layer that a bus reset occured */
2029 if (adv
->path
!= NULL
)
2030 xpt_async(AC_BUS_RESET
, adv
->path
, NULL
);
2033 while ((ccb
= (union ccb
*)LIST_FIRST(&adv
->pending_ccbs
)) != NULL
) {
2034 if ((ccb
->ccb_h
.status
& CAM_STATUS_MASK
) == CAM_REQ_INPROG
)
2035 ccb
->ccb_h
.status
|= CAM_SCSI_BUS_RESET
;
2036 adv_done(adv
, ccb
, QD_ABORTED_BY_HOST
, 0, 0, 0);
2040 adv_start_chip(adv
);
2045 adv_set_sdtr_reg_at_id(struct adv_softc
*adv
, int tid
, u_int8_t sdtr_data
)
2049 adv_set_bank(adv
, 1);
2050 orig_id
= ffs(ADV_INB(adv
, ADV_HOST_SCSIID
)) - 1;
2051 ADV_OUTB(adv
, ADV_HOST_SCSIID
, tid
);
2052 if (ADV_INB(adv
, ADV_HOST_SCSIID
) == (0x01 << tid
)) {
2053 adv_set_bank(adv
, 0);
2054 ADV_OUTB(adv
, ADV_SYN_OFFSET
, sdtr_data
);
2056 adv_set_bank(adv
, 1);
2057 ADV_OUTB(adv
, ADV_HOST_SCSIID
, orig_id
);
2058 adv_set_bank(adv
, 0);