Do a major clean-up of the BUSDMA architecture. A large number of
[dfdiff.git] / sys / dev / disk / advansys / advlib.c
blob4a32140d23faee65b0f46614e812983d8e866924
1 /*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32 * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.7 2006/10/25 20:55:52 dillon Exp $
35 * Ported from:
36 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 * Copyright (c) 1995-1996 Advanced System Products, Inc.
39 * All Rights Reserved.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that redistributions of source
43 * code retain the above copyright notice and this comment without
44 * modification.
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/thread2.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
54 #include <machine/clock.h>
56 #include <bus/cam/cam.h>
57 #include <bus/cam/cam_ccb.h>
58 #include <bus/cam/cam_sim.h>
59 #include <bus/cam/cam_xpt_sim.h>
61 #include <bus/cam/scsi/scsi_all.h>
62 #include <bus/cam/scsi/scsi_message.h>
63 #include <bus/cam/scsi/scsi_da.h>
64 #include <bus/cam/scsi/scsi_cd.h>
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
70 #include "advansys.h"
71 #include "advmcode.h"
73 struct adv_quirk_entry {
74 struct scsi_inquiry_pattern inq_pat;
75 u_int8_t quirks;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
80 static struct adv_quirk_entry adv_quirk_table[] =
83 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
87 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
92 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 "TANDBERG", " TDC 36", "*"
98 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
103 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104 "*", "*", "*"
110 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 "*", "*", "*"
116 /* Default quirk entry */
118 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 /*vendor*/"*", /*product*/"*", /*revision*/"*"
121 ADV_QUIRK_FIX_ASYN_XFER,
126 * Allowable periods in ns
128 static u_int8_t adv_sdtr_period_tbl[] =
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
154 88,
156 100,
160 struct ext_msg {
161 u_int8_t msg_type;
162 u_int8_t msg_len;
163 u_int8_t msg_req;
164 union {
165 struct {
166 u_int8_t sdtr_xfer_period;
167 u_int8_t sdtr_req_ack_offset;
168 } sdtr;
169 struct {
170 u_int8_t wdtr_width;
171 } wdtr;
172 struct {
173 u_int8_t mdp[4];
174 } mdp;
175 } u_ext_msg;
176 u_int8_t res;
179 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
180 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define wdtr_width u_ext_msg.wdtr.wdtr_width
182 #define mdp_b3 u_ext_msg.mdp_b3
183 #define mdp_b2 u_ext_msg.mdp_b2
184 #define mdp_b1 u_ext_msg.mdp_b1
185 #define mdp_b0 u_ext_msg.mdp_b0
188 * Some of the early PCI adapters have problems with
189 * async transfers. Instead use an offset of 1.
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
193 /* LRAM routines */
194 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 u_int16_t *buffer, int count);
196 static void adv_write_lram_16_multi(struct adv_softc *adv,
197 u_int16_t s_addr, u_int16_t *buffer,
198 int count);
199 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 int count);
204 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
205 u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
209 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210 u_int32_t value);
211 static void adv_write_lram_32_multi(struct adv_softc *adv,
212 u_int16_t s_addr, u_int32_t *buffer,
213 int count);
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218 u_int16_t value);
219 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220 u_int8_t cmd_reg);
221 static int adv_set_eeprom_config_once(struct adv_softc *adv,
222 struct adv_eeprom_config *eeconfig);
224 /* Initialization */
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 u_int16_t *mcode_buf, u_int16_t mcode_size);
228 static void adv_reinit_lram(struct adv_softc *adv);
229 static void adv_init_lram(struct adv_softc *adv);
230 static int adv_init_microcode_var(struct adv_softc *adv);
231 static void adv_init_qlink_var(struct adv_softc *adv);
233 /* Interrupts */
234 static void adv_disable_interrupt(struct adv_softc *adv);
235 static void adv_enable_interrupt(struct adv_softc *adv);
236 static void adv_toggle_irq_act(struct adv_softc *adv);
238 /* Chip Control */
239 static int adv_host_req_chip_halt(struct adv_softc *adv);
240 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241 #if UNUSED
242 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243 #endif
245 /* Queue handling and execution */
246 static __inline int
247 adv_sgcount_to_qcount(int sgcount);
249 static __inline int
250 adv_sgcount_to_qcount(int sgcount)
252 int n_sg_list_qs;
254 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256 n_sg_list_qs++;
257 return (n_sg_list_qs + 1);
260 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
261 u_int16_t *inbuf, int words);
262 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
263 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
264 u_int8_t free_q_head, u_int8_t n_free_q);
265 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
266 u_int8_t free_q_head);
267 static int adv_send_scsi_queue(struct adv_softc *adv,
268 struct adv_scsi_q *scsiq,
269 u_int8_t n_q_required);
270 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
271 struct adv_scsi_q *scsiq,
272 u_int q_no);
273 static void adv_put_ready_queue(struct adv_softc *adv,
274 struct adv_scsi_q *scsiq, u_int q_no);
275 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
276 u_int16_t *buffer, int words);
278 /* Messages */
279 static void adv_handle_extmsg_in(struct adv_softc *adv,
280 u_int16_t halt_q_addr, u_int8_t q_cntl,
281 target_bit_vector target_id,
282 int tid);
283 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
284 u_int8_t sdtr_offset);
285 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
286 u_int8_t sdtr_data);
289 /* Exported functions first */
291 void
292 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
294 struct adv_softc *adv;
296 adv = (struct adv_softc *)callback_arg;
297 switch (code) {
298 case AC_FOUND_DEVICE:
300 struct ccb_getdev *cgd;
301 target_bit_vector target_mask;
302 int num_entries;
303 caddr_t match;
304 struct adv_quirk_entry *entry;
305 struct adv_target_transinfo* tinfo;
307 cgd = (struct ccb_getdev *)arg;
309 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
311 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
312 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
313 (caddr_t)adv_quirk_table,
314 num_entries, sizeof(*adv_quirk_table),
315 scsi_inquiry_match);
317 if (match == NULL)
318 panic("advasync: device didn't match wildcard entry!!");
320 entry = (struct adv_quirk_entry *)match;
322 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
323 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
324 adv->fix_asyn_xfer_always |= target_mask;
325 else
326 adv->fix_asyn_xfer_always &= ~target_mask;
328 * We start out life with all bits set and clear them
329 * after we've determined that the fix isn't necessary.
330 * It may well be that we've already cleared a target
331 * before the full inquiry session completes, so don't
332 * gratuitously set a target bit even if it has this
333 * quirk. But, if the quirk exonerates a device, clear
334 * the bit now.
336 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
337 adv->fix_asyn_xfer &= ~target_mask;
340 * Reset our sync settings now that we've determined
341 * what quirks are in effect for the device.
343 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
344 adv_set_syncrate(adv, cgd->ccb_h.path,
345 cgd->ccb_h.target_id,
346 tinfo->current.period,
347 tinfo->current.offset,
348 ADV_TRANS_CUR);
349 break;
351 case AC_LOST_DEVICE:
353 u_int target_mask;
355 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
356 target_mask = 0x01 << xpt_path_target_id(path);
357 adv->fix_asyn_xfer |= target_mask;
361 * Revert to async transfers
362 * for the next device.
364 adv_set_syncrate(adv, /*path*/NULL,
365 xpt_path_target_id(path),
366 /*period*/0,
367 /*offset*/0,
368 ADV_TRANS_GOAL|ADV_TRANS_CUR);
370 default:
371 break;
375 void
376 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
378 u_int8_t control;
381 * Start out with the bank reset to 0
383 control = ADV_INB(adv, ADV_CHIP_CTRL)
384 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
385 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
386 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
387 if (bank == 1) {
388 control |= ADV_CC_BANK_ONE;
389 } else if (bank == 2) {
390 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
392 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
395 u_int8_t
396 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
398 u_int8_t byte_data;
399 u_int16_t word_data;
402 * LRAM is accessed on 16bit boundaries.
404 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
405 word_data = ADV_INW(adv, ADV_LRAM_DATA);
406 if (addr & 1) {
407 #if BYTE_ORDER == BIG_ENDIAN
408 byte_data = (u_int8_t)(word_data & 0xFF);
409 #else
410 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
411 #endif
412 } else {
413 #if BYTE_ORDER == BIG_ENDIAN
414 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415 #else
416 byte_data = (u_int8_t)(word_data & 0xFF);
417 #endif
419 return (byte_data);
422 void
423 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
425 u_int16_t word_data;
427 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
428 if (addr & 1) {
429 word_data &= 0x00FF;
430 word_data |= (((u_int8_t)value << 8) & 0xFF00);
431 } else {
432 word_data &= 0xFF00;
433 word_data |= ((u_int8_t)value & 0x00FF);
435 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
439 u_int16_t
440 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
442 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
443 return (ADV_INW(adv, ADV_LRAM_DATA));
446 void
447 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
449 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
450 ADV_OUTW(adv, ADV_LRAM_DATA, value);
454 * Determine if there is a board at "iobase" by looking
455 * for the AdvanSys signatures. Return 1 if a board is
456 * found, 0 otherwise.
458 int
459 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
461 u_int16_t signature;
463 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
464 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
465 if ((signature == ADV_1000_ID0W)
466 || (signature == ADV_1000_ID0W_FIX))
467 return (1);
469 return (0);
472 void
473 adv_lib_init(struct adv_softc *adv)
475 if ((adv->type & ADV_ULTRA) != 0) {
476 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
477 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
478 } else {
479 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
480 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
484 u_int16_t
485 adv_get_eeprom_config(struct adv_softc *adv, struct
486 adv_eeprom_config *eeprom_config)
488 u_int16_t sum;
489 u_int16_t *wbuf;
490 u_int8_t cfg_beg;
491 u_int8_t cfg_end;
492 u_int8_t s_addr;
494 wbuf = (u_int16_t *)eeprom_config;
495 sum = 0;
497 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
498 *wbuf = adv_read_eeprom_16(adv, s_addr);
499 sum += *wbuf;
502 if (adv->type & ADV_VL) {
503 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
504 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
505 } else {
506 cfg_beg = ADV_EEPROM_CFG_BEG;
507 cfg_end = ADV_EEPROM_MAX_ADDR;
510 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
511 *wbuf = adv_read_eeprom_16(adv, s_addr);
512 sum += *wbuf;
513 #if ADV_DEBUG_EEPROM
514 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
515 #endif
517 *wbuf = adv_read_eeprom_16(adv, s_addr);
518 return (sum);
522 adv_set_eeprom_config(struct adv_softc *adv,
523 struct adv_eeprom_config *eeprom_config)
525 int retry;
527 retry = 0;
528 while (1) {
529 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
530 break;
532 if (++retry > ADV_EEPROM_MAX_RETRY) {
533 break;
536 return (retry > ADV_EEPROM_MAX_RETRY);
540 adv_reset_chip(struct adv_softc *adv, int reset_bus)
542 adv_stop_chip(adv);
543 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
544 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
545 DELAY(60);
547 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
548 adv_set_chip_ih(adv, ADV_INS_HALT);
550 if (reset_bus)
551 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
553 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
554 if (reset_bus)
555 DELAY(200 * 1000);
557 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
558 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
559 return (adv_is_chip_halted(adv));
563 adv_test_external_lram(struct adv_softc* adv)
565 u_int16_t q_addr;
566 u_int16_t saved_value;
567 int success;
569 success = 0;
571 q_addr = ADV_QNO_TO_QADDR(241);
572 saved_value = adv_read_lram_16(adv, q_addr);
573 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
574 success = 1;
575 adv_write_lram_16(adv, q_addr, saved_value);
577 return (success);
582 adv_init_lram_and_mcode(struct adv_softc *adv)
584 u_int32_t retval;
586 adv_disable_interrupt(adv);
588 adv_init_lram(adv);
590 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
591 adv_mcode_size);
592 if (retval != adv_mcode_chksum) {
593 printf("adv%d: Microcode download failed checksum!\n",
594 adv->unit);
595 return (1);
598 if (adv_init_microcode_var(adv) != 0)
599 return (1);
601 adv_enable_interrupt(adv);
602 return (0);
605 u_int8_t
606 adv_get_chip_irq(struct adv_softc *adv)
608 u_int16_t cfg_lsw;
609 u_int8_t chip_irq;
611 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
613 if ((adv->type & ADV_VL) != 0) {
614 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
615 if ((chip_irq == 0) ||
616 (chip_irq == 4) ||
617 (chip_irq == 7)) {
618 return (0);
620 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
622 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
623 if (chip_irq == 3)
624 chip_irq += 2;
625 return (chip_irq + ADV_MIN_IRQ_NO);
628 u_int8_t
629 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
631 u_int16_t cfg_lsw;
633 if ((adv->type & ADV_VL) != 0) {
634 if (irq_no != 0) {
635 if ((irq_no < ADV_MIN_IRQ_NO)
636 || (irq_no > ADV_MAX_IRQ_NO)) {
637 irq_no = 0;
638 } else {
639 irq_no -= ADV_MIN_IRQ_NO - 1;
642 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
643 cfg_lsw |= 0x0010;
644 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
645 adv_toggle_irq_act(adv);
647 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
648 cfg_lsw |= (irq_no & 0x07) << 2;
649 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650 adv_toggle_irq_act(adv);
651 } else if ((adv->type & ADV_ISA) != 0) {
652 if (irq_no == 15)
653 irq_no -= 2;
654 irq_no -= ADV_MIN_IRQ_NO;
655 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
656 cfg_lsw |= (irq_no & 0x03) << 2;
657 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
659 return (adv_get_chip_irq(adv));
662 void
663 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
665 u_int16_t cfg_lsw;
667 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
668 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
669 return;
670 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
671 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
672 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
676 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
677 u_int32_t datalen)
679 struct adv_target_transinfo* tinfo;
680 u_int32_t *p_data_addr;
681 u_int32_t *p_data_bcount;
682 int disable_syn_offset_one_fix;
683 int retval;
684 u_int n_q_required;
685 u_int32_t addr;
686 u_int8_t sg_entry_cnt;
687 u_int8_t target_ix;
688 u_int8_t sg_entry_cnt_minus_one;
689 u_int8_t tid_no;
691 scsiq->q1.q_no = 0;
692 retval = 1; /* Default to error case */
693 target_ix = scsiq->q2.target_ix;
694 tid_no = ADV_TIX_TO_TID(target_ix);
695 tinfo = &adv->tinfo[tid_no];
697 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
698 /* Renegotiate if appropriate. */
699 adv_set_syncrate(adv, /*struct cam_path */NULL,
700 tid_no, /*period*/0, /*offset*/0,
701 ADV_TRANS_CUR);
702 if (tinfo->current.period != tinfo->goal.period) {
703 adv_msgout_sdtr(adv, tinfo->goal.period,
704 tinfo->goal.offset);
705 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
709 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
710 sg_entry_cnt = scsiq->sg_head->entry_cnt;
711 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
713 #ifdef DIAGNOSTIC
714 if (sg_entry_cnt <= 1)
715 panic("adv_execute_scsi_queue: Queue "
716 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
718 if (sg_entry_cnt > ADV_MAX_SG_LIST)
719 panic("adv_execute_scsi_queue: "
720 "Queue with too many segs.");
722 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
723 int i;
725 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
726 addr = scsiq->sg_head->sg_list[i].addr +
727 scsiq->sg_head->sg_list[i].bytes;
729 if ((addr & 0x0003) != 0)
730 panic("adv_execute_scsi_queue: SG "
731 "with odd address or byte count");
734 #endif
735 p_data_addr =
736 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
737 p_data_bcount =
738 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
740 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
741 scsiq->sg_head->queue_cnt = n_q_required - 1;
742 } else {
743 p_data_addr = &scsiq->q1.data_addr;
744 p_data_bcount = &scsiq->q1.data_cnt;
745 n_q_required = 1;
748 disable_syn_offset_one_fix = FALSE;
750 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
751 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
753 if (datalen != 0) {
754 if (datalen < 512) {
755 disable_syn_offset_one_fix = TRUE;
756 } else {
757 if (scsiq->cdbptr[0] == INQUIRY
758 || scsiq->cdbptr[0] == REQUEST_SENSE
759 || scsiq->cdbptr[0] == READ_CAPACITY
760 || scsiq->cdbptr[0] == MODE_SELECT_6
761 || scsiq->cdbptr[0] == MODE_SENSE_6
762 || scsiq->cdbptr[0] == MODE_SENSE_10
763 || scsiq->cdbptr[0] == MODE_SELECT_10
764 || scsiq->cdbptr[0] == READ_TOC) {
765 disable_syn_offset_one_fix = TRUE;
771 if (disable_syn_offset_one_fix) {
772 scsiq->q2.tag_code &=
773 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
774 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
775 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
778 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
779 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
780 u_int8_t extra_bytes;
782 addr = *p_data_addr + *p_data_bcount;
783 extra_bytes = addr & 0x0003;
784 if (extra_bytes != 0
785 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
786 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
787 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
788 scsiq->q1.extra_bytes = extra_bytes;
789 *p_data_bcount -= extra_bytes;
793 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
794 || ((scsiq->q1.cntl & QC_URGENT) != 0))
795 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
797 return (retval);
801 u_int8_t
802 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
803 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
805 u_int16_t val;
806 u_int8_t sg_queue_cnt;
808 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
809 (u_int16_t *)scsiq,
810 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
812 #if BYTE_ORDER == BIG_ENDIAN
813 adv_adj_endian_qdone_info(scsiq);
814 #endif
816 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
817 scsiq->q_status = val & 0xFF;
818 scsiq->q_no = (val >> 8) & 0XFF;
820 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
821 scsiq->cntl = val & 0xFF;
822 sg_queue_cnt = (val >> 8) & 0xFF;
824 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
825 scsiq->sense_len = val & 0xFF;
826 scsiq->extra_bytes = (val >> 8) & 0xFF;
829 * Due to a bug in accessing LRAM on the 940UA, the residual
830 * is split into separate high and low 16bit quantities.
832 scsiq->remain_bytes =
833 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
834 scsiq->remain_bytes |=
835 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
838 * XXX Is this just a safeguard or will the counter really
839 * have bogus upper bits?
841 scsiq->remain_bytes &= max_dma_count;
843 return (sg_queue_cnt);
847 adv_start_chip(struct adv_softc *adv)
849 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
850 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
851 return (0);
852 return (1);
856 adv_stop_execution(struct adv_softc *adv)
858 int count;
860 count = 0;
861 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
862 adv_write_lram_8(adv, ADV_STOP_CODE_B,
863 ADV_STOP_REQ_RISC_STOP);
864 do {
865 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
866 ADV_STOP_ACK_RISC_STOP) {
867 return (1);
869 DELAY(1000);
870 } while (count++ < 20);
872 return (0);
876 adv_is_chip_halted(struct adv_softc *adv)
878 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
879 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
880 return (1);
883 return (0);
887 * XXX The numeric constants and the loops in this routine
888 * need to be documented.
890 void
891 adv_ack_interrupt(struct adv_softc *adv)
893 u_int8_t host_flag;
894 u_int8_t risc_flag;
895 int loop;
897 loop = 0;
898 do {
899 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
900 if (loop++ > 0x7FFF) {
901 break;
903 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
905 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
906 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
907 host_flag | ADV_HOST_FLAG_ACK_INT);
909 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
910 loop = 0;
911 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
912 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
913 if (loop++ > 3) {
914 break;
918 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
922 * Handle all conditions that may halt the chip waiting
923 * for us to intervene.
925 void
926 adv_isr_chip_halted(struct adv_softc *adv)
928 u_int16_t int_halt_code;
929 u_int16_t halt_q_addr;
930 target_bit_vector target_mask;
931 target_bit_vector scsi_busy;
932 u_int8_t halt_qp;
933 u_int8_t target_ix;
934 u_int8_t q_cntl;
935 u_int8_t tid_no;
937 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
938 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
939 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
940 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
941 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
942 tid_no = ADV_TIX_TO_TID(target_ix);
943 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
944 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
946 * Temporarily disable the async fix by removing
947 * this target from the list of affected targets,
948 * setting our async rate, and then putting us
949 * back into the mask.
951 adv->fix_asyn_xfer &= ~target_mask;
952 adv_set_syncrate(adv, /*struct cam_path */NULL,
953 tid_no, /*period*/0, /*offset*/0,
954 ADV_TRANS_ACTIVE);
955 adv->fix_asyn_xfer |= target_mask;
956 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
957 adv_set_syncrate(adv, /*struct cam_path */NULL,
958 tid_no, /*period*/0, /*offset*/0,
959 ADV_TRANS_ACTIVE);
960 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
961 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
962 target_mask, tid_no);
963 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
964 struct adv_target_transinfo* tinfo;
965 union ccb *ccb;
966 u_int32_t cinfo_index;
967 u_int8_t tag_code;
968 u_int8_t q_status;
970 tinfo = &adv->tinfo[tid_no];
971 q_cntl |= QC_REQ_SENSE;
973 /* Renegotiate if appropriate. */
974 adv_set_syncrate(adv, /*struct cam_path */NULL,
975 tid_no, /*period*/0, /*offset*/0,
976 ADV_TRANS_CUR);
977 if (tinfo->current.period != tinfo->goal.period) {
978 adv_msgout_sdtr(adv, tinfo->goal.period,
979 tinfo->goal.offset);
980 q_cntl |= QC_MSG_OUT;
982 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
984 /* Don't tag request sense commands */
985 tag_code = adv_read_lram_8(adv,
986 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
987 tag_code &=
988 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
990 if ((adv->fix_asyn_xfer & target_mask) != 0
991 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
992 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
993 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
995 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
996 tag_code);
997 q_status = adv_read_lram_8(adv,
998 halt_q_addr + ADV_SCSIQ_B_STATUS);
999 q_status |= (QS_READY | QS_BUSY);
1000 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1001 q_status);
1003 * Freeze the devq until we can handle the sense condition.
1005 cinfo_index =
1006 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1007 ccb = adv->ccb_infos[cinfo_index].ccb;
1008 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1009 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1010 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1011 /*ccb*/NULL, CAM_REQUEUE_REQ,
1012 /*queued_only*/TRUE);
1013 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1014 scsi_busy &= ~target_mask;
1015 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1017 * Ensure we have enough time to actually
1018 * retrieve the sense.
1020 callout_reset(&ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1021 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1022 struct ext_msg out_msg;
1024 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1025 (u_int16_t *) &out_msg,
1026 sizeof(out_msg)/2);
1028 if ((out_msg.msg_type == MSG_EXTENDED)
1029 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1030 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1032 /* Revert to Async */
1033 adv_set_syncrate(adv, /*struct cam_path */NULL,
1034 tid_no, /*period*/0, /*offset*/0,
1035 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1037 q_cntl &= ~QC_MSG_OUT;
1038 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1039 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1040 u_int8_t scsi_status;
1041 union ccb *ccb;
1042 u_int32_t cinfo_index;
1044 scsi_status = adv_read_lram_8(adv, halt_q_addr
1045 + ADV_SCSIQ_SCSI_STATUS);
1046 cinfo_index =
1047 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1048 ccb = adv->ccb_infos[cinfo_index].ccb;
1049 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1050 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1051 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1052 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1053 /*ccb*/NULL, CAM_REQUEUE_REQ,
1054 /*queued_only*/TRUE);
1055 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1056 scsi_busy &= ~target_mask;
1057 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1058 } else {
1059 printf("Unhandled Halt Code %x\n", int_halt_code);
1061 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1064 void
1065 adv_sdtr_to_period_offset(struct adv_softc *adv,
1066 u_int8_t sync_data, u_int8_t *period,
1067 u_int8_t *offset, int tid)
1069 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1070 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1071 *period = *offset = 0;
1072 } else {
1073 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1074 *offset = sync_data & 0xF;
1078 void
1079 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1080 u_int tid, u_int period, u_int offset, u_int type)
1082 struct adv_target_transinfo* tinfo;
1083 u_int old_period;
1084 u_int old_offset;
1085 u_int8_t sdtr_data;
1087 tinfo = &adv->tinfo[tid];
1089 /* Filter our input */
1090 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1091 &offset, tid);
1093 old_period = tinfo->current.period;
1094 old_offset = tinfo->current.offset;
1096 if ((type & ADV_TRANS_CUR) != 0
1097 && ((old_period != period || old_offset != offset)
1098 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1099 int halted;
1101 crit_enter();
1102 halted = adv_is_chip_halted(adv);
1103 if (halted == 0)
1104 /* Must halt the chip first */
1105 adv_host_req_chip_halt(adv);
1107 /* Update current hardware settings */
1108 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1111 * If a target can run in sync mode, we don't need
1112 * to check it for sync problems.
1114 if (offset != 0)
1115 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1117 if (halted == 0)
1118 /* Start the chip again */
1119 adv_start_chip(adv);
1121 crit_exit();
1122 tinfo->current.period = period;
1123 tinfo->current.offset = offset;
1125 if (path != NULL) {
1127 * Tell the SCSI layer about the
1128 * new transfer parameters.
1130 struct ccb_trans_settings neg;
1132 neg.sync_period = period;
1133 neg.sync_offset = offset;
1134 neg.valid = CCB_TRANS_SYNC_RATE_VALID
1135 | CCB_TRANS_SYNC_OFFSET_VALID;
1136 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1137 xpt_async(AC_TRANSFER_NEG, path, &neg);
1141 if ((type & ADV_TRANS_GOAL) != 0) {
1142 tinfo->goal.period = period;
1143 tinfo->goal.offset = offset;
1146 if ((type & ADV_TRANS_USER) != 0) {
1147 tinfo->user.period = period;
1148 tinfo->user.offset = offset;
1152 u_int8_t
1153 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1154 u_int *offset, int tid)
1156 u_int i;
1157 u_int dummy_offset;
1158 u_int dummy_period;
1160 if (offset == NULL) {
1161 dummy_offset = 0;
1162 offset = &dummy_offset;
1165 if (period == NULL) {
1166 dummy_period = 0;
1167 period = &dummy_period;
1170 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1171 if (*period != 0 && *offset != 0) {
1172 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1173 if (*period <= adv->sdtr_period_tbl[i]) {
1175 * When responding to a target that requests
1176 * sync, the requested rate may fall between
1177 * two rates that we can output, but still be
1178 * a rate that we can receive. Because of this,
1179 * we want to respond to the target with
1180 * the same rate that it sent to us even
1181 * if the period we use to send data to it
1182 * is lower. Only lower the response period
1183 * if we must.
1185 if (i == 0 /* Our maximum rate */)
1186 *period = adv->sdtr_period_tbl[0];
1187 return ((i << 4) | *offset);
1192 /* Must go async */
1193 *period = 0;
1194 *offset = 0;
1195 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1196 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1197 return (0);
1200 /* Internal Routines */
1202 static void
1203 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1204 u_int16_t *buffer, int count)
1206 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1207 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1210 static void
1211 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1212 u_int16_t *buffer, int count)
1214 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1215 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1218 static void
1219 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1220 u_int16_t set_value, int count)
1222 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1223 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1224 set_value, count);
1227 static u_int32_t
1228 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1230 u_int32_t sum;
1231 int i;
1233 sum = 0;
1234 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1235 for (i = 0; i < count; i++)
1236 sum += ADV_INW(adv, ADV_LRAM_DATA);
1237 return (sum);
1240 static int
1241 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1242 u_int16_t value)
1244 int retval;
1246 retval = 0;
1247 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1248 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1249 DELAY(10000);
1250 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1251 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1252 retval = 1;
1253 return (retval);
1256 static u_int32_t
1257 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1259 u_int16_t val_low, val_high;
1261 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1263 #if BYTE_ORDER == BIG_ENDIAN
1264 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1265 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1266 #else
1267 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1268 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1269 #endif
1271 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1274 static void
1275 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1277 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1279 #if BYTE_ORDER == BIG_ENDIAN
1280 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1281 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1282 #else
1283 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1284 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1285 #endif
1288 static void
1289 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1290 u_int32_t *buffer, int count)
1292 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1293 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1296 static u_int16_t
1297 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1299 u_int16_t read_wval;
1300 u_int8_t cmd_reg;
1302 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1303 DELAY(1000);
1304 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1305 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1306 DELAY(1000);
1307 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1308 DELAY(1000);
1309 return (read_wval);
1312 static u_int16_t
1313 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1315 u_int16_t read_value;
1317 read_value = adv_read_eeprom_16(adv, addr);
1318 if (read_value != value) {
1319 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1320 DELAY(1000);
1322 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1323 DELAY(1000);
1325 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1326 DELAY(20 * 1000);
1328 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1329 DELAY(1000);
1330 read_value = adv_read_eeprom_16(adv, addr);
1332 return (read_value);
1335 static int
1336 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1338 u_int8_t read_back;
1339 int retry;
1341 retry = 0;
1342 while (1) {
1343 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1344 DELAY(1000);
1345 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1346 if (read_back == cmd_reg) {
1347 return (1);
1349 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1350 return (0);
1355 static int
1356 adv_set_eeprom_config_once(struct adv_softc *adv,
1357 struct adv_eeprom_config *eeprom_config)
1359 int n_error;
1360 u_int16_t *wbuf;
1361 u_int16_t sum;
1362 u_int8_t s_addr;
1363 u_int8_t cfg_beg;
1364 u_int8_t cfg_end;
1366 wbuf = (u_int16_t *)eeprom_config;
1367 n_error = 0;
1368 sum = 0;
1369 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1370 sum += *wbuf;
1371 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1372 n_error++;
1375 if (adv->type & ADV_VL) {
1376 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1377 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1378 } else {
1379 cfg_beg = ADV_EEPROM_CFG_BEG;
1380 cfg_end = ADV_EEPROM_MAX_ADDR;
1383 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1384 sum += *wbuf;
1385 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1386 n_error++;
1389 *wbuf = sum;
1390 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1391 n_error++;
1393 wbuf = (u_int16_t *)eeprom_config;
1394 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1395 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1396 n_error++;
1399 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1400 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1401 n_error++;
1404 return (n_error);
1407 static u_int32_t
1408 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1409 u_int16_t *mcode_buf, u_int16_t mcode_size)
1411 u_int32_t chksum;
1412 u_int16_t mcode_lram_size;
1413 u_int16_t mcode_chksum;
1415 mcode_lram_size = mcode_size >> 1;
1416 /* XXX Why zero the memory just before you write the whole thing?? */
1417 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1418 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1420 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1421 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1422 ((mcode_size - s_addr
1423 - ADV_CODE_SEC_BEG) >> 1));
1424 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1425 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1426 return (chksum);
1429 static void
1430 adv_reinit_lram(struct adv_softc *adv) {
1431 adv_init_lram(adv);
1432 adv_init_qlink_var(adv);
1435 static void
1436 adv_init_lram(struct adv_softc *adv)
1438 u_int8_t i;
1439 u_int16_t s_addr;
1441 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1442 (((adv->max_openings + 2 + 1) * 64) >> 1));
1444 i = ADV_MIN_ACTIVE_QNO;
1445 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1447 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1448 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1449 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1450 i++;
1451 s_addr += ADV_QBLK_SIZE;
1452 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1453 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1454 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1455 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1458 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1459 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1460 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1461 i++;
1462 s_addr += ADV_QBLK_SIZE;
1464 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1465 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1466 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1467 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471 static int
1472 adv_init_microcode_var(struct adv_softc *adv)
1474 int i;
1476 for (i = 0; i <= ADV_MAX_TID; i++) {
1478 /* Start out async all around */
1479 adv_set_syncrate(adv, /*path*/NULL,
1480 i, 0, 0,
1481 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1484 adv_init_qlink_var(adv);
1486 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1487 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1489 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1491 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1493 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1494 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1495 printf("adv%d: Unable to set program counter. Aborting.\n",
1496 adv->unit);
1497 return (1);
1499 return (0);
1502 static void
1503 adv_init_qlink_var(struct adv_softc *adv)
1505 int i;
1506 u_int16_t lram_addr;
1508 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1509 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1511 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1512 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1514 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1515 (u_int8_t)((int) adv->max_openings + 1));
1516 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1517 (u_int8_t)((int) adv->max_openings + 2));
1519 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1521 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1522 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1523 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1524 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1525 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1526 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1528 lram_addr = ADV_QADR_BEG;
1529 for (i = 0; i < 32; i++, lram_addr += 2)
1530 adv_write_lram_16(adv, lram_addr, 0);
1533 static void
1534 adv_disable_interrupt(struct adv_softc *adv)
1536 u_int16_t cfg;
1538 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1539 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1542 static void
1543 adv_enable_interrupt(struct adv_softc *adv)
1545 u_int16_t cfg;
1547 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1548 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1551 static void
1552 adv_toggle_irq_act(struct adv_softc *adv)
1554 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1555 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1558 void
1559 adv_start_execution(struct adv_softc *adv)
1561 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1562 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1567 adv_stop_chip(struct adv_softc *adv)
1569 u_int8_t cc_val;
1571 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1572 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1573 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1574 adv_set_chip_ih(adv, ADV_INS_HALT);
1575 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1576 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1577 return (0);
1579 return (1);
1582 static int
1583 adv_host_req_chip_halt(struct adv_softc *adv)
1585 int count;
1586 u_int8_t saved_stop_code;
1588 if (adv_is_chip_halted(adv))
1589 return (1);
1591 count = 0;
1592 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1593 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1594 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1595 while (adv_is_chip_halted(adv) == 0
1596 && count++ < 2000)
1599 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1600 return (count < 2000);
1603 static void
1604 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1606 adv_set_bank(adv, 1);
1607 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1608 adv_set_bank(adv, 0);
1611 #if UNUSED
1612 static u_int8_t
1613 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1615 u_int8_t scsi_ctrl;
1617 adv_set_bank(adv, 1);
1618 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1619 adv_set_bank(adv, 0);
1620 return (scsi_ctrl);
1622 #endif
1625 * XXX Looks like more padding issues in this routine as well.
1626 * There has to be a way to turn this into an insw.
1628 static void
1629 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1630 u_int16_t *inbuf, int words)
1632 int i;
1634 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1635 for (i = 0; i < words; i++, inbuf++) {
1636 if (i == 5) {
1637 continue;
1639 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1643 static u_int
1644 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1646 u_int cur_used_qs;
1647 u_int cur_free_qs;
1649 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1651 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1652 cur_free_qs = adv->max_openings - cur_used_qs;
1653 return (cur_free_qs);
1655 adv->openings_needed = n_qs;
1656 return (0);
1659 static u_int8_t
1660 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1661 u_int8_t n_free_q)
1663 int i;
1665 for (i = 0; i < n_free_q; i++) {
1666 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1667 if (free_q_head == ADV_QLINK_END)
1668 break;
1670 return (free_q_head);
1673 static u_int8_t
1674 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1676 u_int16_t q_addr;
1677 u_int8_t next_qp;
1678 u_int8_t q_status;
1680 next_qp = ADV_QLINK_END;
1681 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1682 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1684 if ((q_status & QS_READY) == 0)
1685 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1687 return (next_qp);
1690 static int
1691 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1692 u_int8_t n_q_required)
1694 u_int8_t free_q_head;
1695 u_int8_t next_qp;
1696 u_int8_t tid_no;
1697 u_int8_t target_ix;
1698 int retval;
1700 retval = 1;
1701 target_ix = scsiq->q2.target_ix;
1702 tid_no = ADV_TIX_TO_TID(target_ix);
1703 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1704 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1705 != ADV_QLINK_END) {
1706 scsiq->q1.q_no = free_q_head;
1709 * Now that we know our Q number, point our sense
1710 * buffer pointer to a bus dma mapped area where
1711 * we can dma the data to.
1713 scsiq->q1.sense_addr = adv->sense_physbase
1714 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1715 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1716 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1717 adv->cur_active += n_q_required;
1718 retval = 0;
1720 return (retval);
1724 static void
1725 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1726 u_int q_no)
1728 u_int8_t sg_list_dwords;
1729 u_int8_t sg_index, i;
1730 u_int8_t sg_entry_cnt;
1731 u_int8_t next_qp;
1732 u_int16_t q_addr;
1733 struct adv_sg_head *sg_head;
1734 struct adv_sg_list_q scsi_sg_q;
1736 sg_head = scsiq->sg_head;
1738 if (sg_head) {
1739 sg_entry_cnt = sg_head->entry_cnt - 1;
1740 #ifdef DIAGNOSTIC
1741 if (sg_entry_cnt == 0)
1742 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1743 "a SG list but only one element");
1744 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1745 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1746 "a SG list but QC_SG_HEAD not set");
1747 #endif
1748 q_addr = ADV_QNO_TO_QADDR(q_no);
1749 sg_index = 1;
1750 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1751 scsi_sg_q.sg_head_qp = q_no;
1752 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1753 for (i = 0; i < sg_head->queue_cnt; i++) {
1754 u_int8_t segs_this_q;
1756 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1757 segs_this_q = ADV_SG_LIST_PER_Q;
1758 else {
1759 /* This will be the last segment then */
1760 segs_this_q = sg_entry_cnt;
1761 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1763 scsi_sg_q.seq_no = i + 1;
1764 sg_list_dwords = segs_this_q << 1;
1765 if (i == 0) {
1766 scsi_sg_q.sg_list_cnt = segs_this_q;
1767 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1768 } else {
1769 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1770 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1772 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1773 scsi_sg_q.q_no = next_qp;
1774 q_addr = ADV_QNO_TO_QADDR(next_qp);
1776 adv_write_lram_16_multi(adv,
1777 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1778 (u_int16_t *)&scsi_sg_q,
1779 sizeof(scsi_sg_q) >> 1);
1780 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1781 (u_int32_t *)&sg_head->sg_list[sg_index],
1782 sg_list_dwords);
1783 sg_entry_cnt -= segs_this_q;
1784 sg_index += ADV_SG_LIST_PER_Q;
1787 adv_put_ready_queue(adv, scsiq, q_no);
1790 static void
1791 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1792 u_int q_no)
1794 struct adv_target_transinfo* tinfo;
1795 u_int q_addr;
1796 u_int tid_no;
1798 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1799 tinfo = &adv->tinfo[tid_no];
1800 if ((tinfo->current.period != tinfo->goal.period)
1801 || (tinfo->current.offset != tinfo->goal.offset)) {
1803 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1804 scsiq->q1.cntl |= QC_MSG_OUT;
1806 q_addr = ADV_QNO_TO_QADDR(q_no);
1808 scsiq->q1.status = QS_FREE;
1810 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1811 (u_int16_t *)scsiq->cdbptr,
1812 scsiq->q2.cdb_len >> 1);
1814 #if BYTE_ORDER == BIG_ENDIAN
1815 adv_adj_scsiq_endian(scsiq);
1816 #endif
1818 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1819 (u_int16_t *) &scsiq->q1.cntl,
1820 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1822 #if CC_WRITE_IO_COUNT
1823 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1824 adv->req_count);
1825 #endif
1827 #if CC_CLEAR_DMA_REMAIN
1829 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1830 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1831 #endif
1833 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1834 (scsiq->q1.q_no << 8) | QS_READY);
1837 static void
1838 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1839 u_int16_t *buffer, int words)
1841 int i;
1844 * XXX This routine makes *gross* assumptions
1845 * about padding in the data structures.
1846 * Either the data structures should have explicit
1847 * padding members added, or they should have padding
1848 * turned off via compiler attributes depending on
1849 * which yields better overall performance. My hunch
1850 * would be that turning off padding would be the
1851 * faster approach as an outsw is much faster than
1852 * this crude loop and accessing un-aligned data
1853 * members isn't *that* expensive. The other choice
1854 * would be to modify the ASC script so that the
1855 * the adv_scsiq_1 structure can be re-arranged so
1856 * padding isn't required.
1858 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1859 for (i = 0; i < words; i++, buffer++) {
1860 if (i == 2 || i == 10) {
1861 continue;
1863 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1867 static void
1868 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1869 u_int8_t q_cntl, target_bit_vector target_mask,
1870 int tid_no)
1872 struct ext_msg ext_msg;
1874 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1875 sizeof(ext_msg) >> 1);
1876 if ((ext_msg.msg_type == MSG_EXTENDED)
1877 && (ext_msg.msg_req == MSG_EXT_SDTR)
1878 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1879 union ccb *ccb;
1880 struct adv_target_transinfo* tinfo;
1881 u_int32_t cinfo_index;
1882 u_int period;
1883 u_int offset;
1884 int sdtr_accept;
1885 u_int8_t orig_offset;
1887 cinfo_index =
1888 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1889 ccb = adv->ccb_infos[cinfo_index].ccb;
1890 tinfo = &adv->tinfo[tid_no];
1891 sdtr_accept = TRUE;
1893 orig_offset = ext_msg.req_ack_offset;
1894 if (ext_msg.xfer_period < tinfo->goal.period) {
1895 sdtr_accept = FALSE;
1896 ext_msg.xfer_period = tinfo->goal.period;
1899 /* Perform range checking */
1900 period = ext_msg.xfer_period;
1901 offset = ext_msg.req_ack_offset;
1902 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1903 ext_msg.xfer_period = period;
1904 ext_msg.req_ack_offset = offset;
1906 /* Record our current sync settings */
1907 adv_set_syncrate(adv, ccb->ccb_h.path,
1908 tid_no, ext_msg.xfer_period,
1909 ext_msg.req_ack_offset,
1910 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1912 /* Offset too high or large period forced async */
1913 if (orig_offset != ext_msg.req_ack_offset)
1914 sdtr_accept = FALSE;
1916 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1917 /* Valid response to our requested negotiation */
1918 q_cntl &= ~QC_MSG_OUT;
1919 } else {
1920 /* Must Respond */
1921 q_cntl |= QC_MSG_OUT;
1922 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1923 ext_msg.req_ack_offset);
1926 } else if (ext_msg.msg_type == MSG_EXTENDED
1927 && ext_msg.msg_req == MSG_EXT_WDTR
1928 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1930 ext_msg.wdtr_width = 0;
1931 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1932 (u_int16_t *)&ext_msg,
1933 sizeof(ext_msg) >> 1);
1934 q_cntl |= QC_MSG_OUT;
1935 } else {
1937 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1938 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1939 (u_int16_t *)&ext_msg,
1940 sizeof(ext_msg) >> 1);
1941 q_cntl |= QC_MSG_OUT;
1943 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1946 static void
1947 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1948 u_int8_t sdtr_offset)
1950 struct ext_msg sdtr_buf;
1952 sdtr_buf.msg_type = MSG_EXTENDED;
1953 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1954 sdtr_buf.msg_req = MSG_EXT_SDTR;
1955 sdtr_buf.xfer_period = sdtr_period;
1956 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1957 sdtr_buf.req_ack_offset = sdtr_offset;
1958 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1959 (u_int16_t *) &sdtr_buf,
1960 sizeof(sdtr_buf) / 2);
1964 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1965 u_int32_t status, int queued_only)
1967 u_int16_t q_addr;
1968 u_int8_t q_no;
1969 struct adv_q_done_info scsiq_buf;
1970 struct adv_q_done_info *scsiq;
1971 u_int8_t target_ix;
1972 int count;
1974 scsiq = &scsiq_buf;
1975 target_ix = ADV_TIDLUN_TO_IX(target, lun);
1976 count = 0;
1977 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1978 struct adv_ccb_info *ccb_info;
1979 q_addr = ADV_QNO_TO_QADDR(q_no);
1981 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1982 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1983 if (((scsiq->q_status & QS_READY) != 0)
1984 && ((scsiq->q_status & QS_ABORTED) == 0)
1985 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1986 && (scsiq->d2.target_ix == target_ix)
1987 && (queued_only == 0
1988 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1989 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1990 union ccb *aborted_ccb;
1991 struct adv_ccb_info *cinfo;
1993 scsiq->q_status |= QS_ABORTED;
1994 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1995 scsiq->q_status);
1996 aborted_ccb = ccb_info->ccb;
1997 /* Don't clobber earlier error codes */
1998 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
1999 == CAM_REQ_INPROG)
2000 aborted_ccb->ccb_h.status |= status;
2001 cinfo = (struct adv_ccb_info *)
2002 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2003 cinfo->state |= ACCB_ABORT_QUEUED;
2004 count++;
2007 return (count);
2011 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2013 int count;
2014 int i;
2015 union ccb *ccb;
2017 i = 200;
2018 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2019 && i--)
2020 DELAY(1000);
2021 adv_reset_chip(adv, initiate_bus_reset);
2022 adv_reinit_lram(adv);
2023 for (i = 0; i <= ADV_MAX_TID; i++)
2024 adv_set_syncrate(adv, NULL, i, /*period*/0,
2025 /*offset*/0, ADV_TRANS_CUR);
2026 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2028 /* Tell the XPT layer that a bus reset occured */
2029 if (adv->path != NULL)
2030 xpt_async(AC_BUS_RESET, adv->path, NULL);
2032 count = 0;
2033 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2034 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2035 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2036 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2037 count++;
2040 adv_start_chip(adv);
2041 return (count);
2044 static void
2045 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2047 int orig_id;
2049 adv_set_bank(adv, 1);
2050 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2051 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2052 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2053 adv_set_bank(adv, 0);
2054 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2056 adv_set_bank(adv, 1);
2057 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2058 adv_set_bank(adv, 0);