2 * Inline routines shareable across OS platforms.
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * Copyright (c) 2000-2001 Adaptec Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * substantially similar to the "NO WARRANTY" disclaimer below
16 * ("Disclaimer") and any redistribution must be conditioned upon
17 * including a substantially similar Disclaimer requirement for further
18 * binary redistribution.
19 * 3. Neither the names of the above-listed copyright holders nor the names
20 * of any contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
23 * Alternatively, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2 as published by the Free
25 * Software Foundation.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES.
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $
45 #ifndef _AIC7XXX_INLINE_H_
46 #define _AIC7XXX_INLINE_H_
48 /************************* Sequencer Execution Control ************************/
49 static __inline
void ahc_pause_bug_fix(struct ahc_softc
*ahc
);
50 static __inline
int ahc_is_paused(struct ahc_softc
*ahc
);
51 static __inline
void ahc_pause(struct ahc_softc
*ahc
);
52 static __inline
void ahc_unpause(struct ahc_softc
*ahc
);
55 * Work around any chip bugs related to halting sequencer execution.
56 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
57 * reading a register that will set this signal and deassert it.
58 * Without this workaround, if the chip is paused, by an interrupt or
59 * manual pause while accessing scb ram, accesses to certain registers
60 * will hang the system (infinite pci retries).
63 ahc_pause_bug_fix(struct ahc_softc
*ahc
)
65 if ((ahc
->features
& AHC_ULTRA2
) != 0)
66 (void)ahc_inb(ahc
, CCSCBCTL
);
70 * Determine whether the sequencer has halted code execution.
71 * Returns non-zero status if the sequencer is stopped.
74 ahc_is_paused(struct ahc_softc
*ahc
)
76 return ((ahc_inb(ahc
, HCNTRL
) & PAUSE
) != 0);
80 * Request that the sequencer stop and wait, indefinitely, for it
81 * to stop. The sequencer will only acknowledge that it is paused
82 * once it has reached an instruction boundary and PAUSEDIS is
83 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
84 * for critical sections.
87 ahc_pause(struct ahc_softc
*ahc
)
89 ahc_outb(ahc
, HCNTRL
, ahc
->pause
);
92 * Since the sequencer can disable pausing in a critical section, we
93 * must loop until it actually stops.
95 while (ahc_is_paused(ahc
) == 0)
98 ahc_pause_bug_fix(ahc
);
102 * Allow the sequencer to continue program execution.
103 * We check here to ensure that no additional interrupt
104 * sources that would cause the sequencer to halt have been
105 * asserted. If, for example, a SCSI bus reset is detected
106 * while we are fielding a different, pausing, interrupt type,
107 * we don't want to release the sequencer before going back
108 * into our interrupt handler and dealing with this new
112 ahc_unpause(struct ahc_softc
*ahc
)
114 if ((ahc_inb(ahc
, INTSTAT
) & (SCSIINT
| SEQINT
| BRKADRINT
)) == 0)
115 ahc_outb(ahc
, HCNTRL
, ahc
->unpause
);
118 /*********************** Untagged Transaction Routines ************************/
119 static __inline
void ahc_freeze_untagged_queues(struct ahc_softc
*ahc
);
120 static __inline
void ahc_release_untagged_queues(struct ahc_softc
*ahc
);
123 * Block our completion routine from starting the next untagged
124 * transaction for this target or target lun.
127 ahc_freeze_untagged_queues(struct ahc_softc
*ahc
)
129 if ((ahc
->flags
& AHC_SCB_BTT
) == 0)
130 ahc
->untagged_queue_lock
++;
134 * Allow the next untagged transaction for this target or target lun
135 * to be executed. We use a counting semaphore to allow the lock
136 * to be acquired recursively. Once the count drops to zero, the
137 * transaction queues will be run.
140 ahc_release_untagged_queues(struct ahc_softc
*ahc
)
142 if ((ahc
->flags
& AHC_SCB_BTT
) == 0) {
143 ahc
->untagged_queue_lock
--;
144 if (ahc
->untagged_queue_lock
== 0)
145 ahc_run_untagged_queues(ahc
);
149 /************************** Memory mapping routines ***************************/
150 static __inline
struct ahc_dma_seg
*
151 ahc_sg_bus_to_virt(struct scb
*scb
,
152 uint32_t sg_busaddr
);
153 static __inline
uint32_t
154 ahc_sg_virt_to_bus(struct scb
*scb
,
155 struct ahc_dma_seg
*sg
);
156 static __inline
uint32_t
157 ahc_hscb_busaddr(struct ahc_softc
*ahc
, u_int index
);
158 static __inline
void ahc_sync_scb(struct ahc_softc
*ahc
,
159 struct scb
*scb
, int op
);
160 static __inline
void ahc_sync_sglist(struct ahc_softc
*ahc
,
161 struct scb
*scb
, int op
);
162 static __inline
uint32_t
163 ahc_targetcmd_offset(struct ahc_softc
*ahc
,
166 static __inline
struct ahc_dma_seg
*
167 ahc_sg_bus_to_virt(struct scb
*scb
, uint32_t sg_busaddr
)
171 sg_index
= (sg_busaddr
- scb
->sg_list_phys
)/sizeof(struct ahc_dma_seg
);
172 /* sg_list_phys points to entry 1, not 0 */
175 return (&scb
->sg_list
[sg_index
]);
178 static __inline
uint32_t
179 ahc_sg_virt_to_bus(struct scb
*scb
, struct ahc_dma_seg
*sg
)
183 /* sg_list_phys points to entry 1, not 0 */
184 sg_index
= sg
- &scb
->sg_list
[1];
186 return (scb
->sg_list_phys
+ (sg_index
* sizeof(*scb
->sg_list
)));
189 static __inline
uint32_t
190 ahc_hscb_busaddr(struct ahc_softc
*ahc
, u_int index
)
192 return (ahc
->scb_data
->hscb_busaddr
193 + (sizeof(struct hardware_scb
) * index
));
197 ahc_sync_scb(struct ahc_softc
*ahc
, struct scb
*scb
, int op
)
199 ahc_dmamap_sync(ahc
, ahc
->scb_data
->hscb_dmat
,
200 ahc
->scb_data
->hscb_dmamap
,
201 /*offset*/(scb
->hscb
- ahc
->hscbs
) * sizeof(*scb
->hscb
),
202 /*len*/sizeof(*scb
->hscb
), op
);
206 ahc_sync_sglist(struct ahc_softc
*ahc
, struct scb
*scb
, int op
)
208 if (scb
->sg_count
== 0)
211 ahc_dmamap_sync(ahc
, ahc
->scb_data
->sg_dmat
, scb
->sg_map
->sg_dmamap
,
212 /*offset*/(scb
->sg_list
- scb
->sg_map
->sg_vaddr
)
213 * sizeof(struct ahc_dma_seg
),
214 /*len*/sizeof(struct ahc_dma_seg
) * scb
->sg_count
, op
);
217 static __inline
uint32_t
218 ahc_targetcmd_offset(struct ahc_softc
*ahc
, u_int index
)
220 return (((uint8_t *)&ahc
->targetcmds
[index
]) - ahc
->qoutfifo
);
223 /******************************** Debugging ***********************************/
224 static __inline
char *ahc_name(struct ahc_softc
*ahc
);
226 static __inline
char *
227 ahc_name(struct ahc_softc
*ahc
)
232 /*********************** Miscelaneous Support Functions ***********************/
234 static __inline
void ahc_update_residual(struct ahc_softc
*ahc
,
236 static __inline
struct ahc_initiator_tinfo
*
237 ahc_fetch_transinfo(struct ahc_softc
*ahc
,
238 char channel
, u_int our_id
,
240 struct ahc_tmode_tstate
**tstate
);
241 static __inline
uint16_t
242 ahc_inw(struct ahc_softc
*ahc
, u_int port
);
243 static __inline
void ahc_outw(struct ahc_softc
*ahc
, u_int port
,
245 static __inline
uint32_t
246 ahc_inl(struct ahc_softc
*ahc
, u_int port
);
247 static __inline
void ahc_outl(struct ahc_softc
*ahc
, u_int port
,
249 static __inline
uint64_t
250 ahc_inq(struct ahc_softc
*ahc
, u_int port
);
251 static __inline
void ahc_outq(struct ahc_softc
*ahc
, u_int port
,
253 static __inline
struct scb
*
254 ahc_get_scb(struct ahc_softc
*ahc
);
255 static __inline
void ahc_free_scb(struct ahc_softc
*ahc
, struct scb
*scb
);
256 static __inline
void ahc_swap_with_next_hscb(struct ahc_softc
*ahc
,
258 static __inline
void ahc_queue_scb(struct ahc_softc
*ahc
, struct scb
*scb
);
259 static __inline
struct scsi_sense_data
*
260 ahc_get_sense_buf(struct ahc_softc
*ahc
,
262 static __inline
uint32_t
263 ahc_get_sense_bufaddr(struct ahc_softc
*ahc
,
267 * Determine whether the sequencer reported a residual
268 * for this SCB/transaction.
271 ahc_update_residual(struct ahc_softc
*ahc
, struct scb
*scb
)
275 sgptr
= ahc_le32toh(scb
->hscb
->sgptr
);
276 if ((sgptr
& SG_RESID_VALID
) != 0)
277 ahc_calc_residual(ahc
, scb
);
281 * Return pointers to the transfer negotiation information
282 * for the specified our_id/remote_id pair.
284 static __inline
struct ahc_initiator_tinfo
*
285 ahc_fetch_transinfo(struct ahc_softc
*ahc
, char channel
, u_int our_id
,
286 u_int remote_id
, struct ahc_tmode_tstate
**tstate
)
289 * Transfer data structures are stored from the perspective
290 * of the target role. Since the parameters for a connection
291 * in the initiator role to a given target are the same as
292 * when the roles are reversed, we pretend we are the target.
296 *tstate
= ahc
->enabled_targets
[our_id
];
297 return (&(*tstate
)->transinfo
[remote_id
]);
300 static __inline
uint16_t
301 ahc_inw(struct ahc_softc
*ahc
, u_int port
)
303 uint16_t r
= ahc_inb(ahc
, port
+1) << 8;
304 return r
| ahc_inb(ahc
, port
);
308 ahc_outw(struct ahc_softc
*ahc
, u_int port
, u_int value
)
310 ahc_outb(ahc
, port
, value
& 0xFF);
311 ahc_outb(ahc
, port
+1, (value
>> 8) & 0xFF);
314 static __inline
uint32_t
315 ahc_inl(struct ahc_softc
*ahc
, u_int port
)
317 return ((ahc_inb(ahc
, port
))
318 | (ahc_inb(ahc
, port
+1) << 8)
319 | (ahc_inb(ahc
, port
+2) << 16)
320 | (ahc_inb(ahc
, port
+3) << 24));
324 ahc_outl(struct ahc_softc
*ahc
, u_int port
, uint32_t value
)
326 ahc_outb(ahc
, port
, (value
) & 0xFF);
327 ahc_outb(ahc
, port
+1, ((value
) >> 8) & 0xFF);
328 ahc_outb(ahc
, port
+2, ((value
) >> 16) & 0xFF);
329 ahc_outb(ahc
, port
+3, ((value
) >> 24) & 0xFF);
332 static __inline
uint64_t
333 ahc_inq(struct ahc_softc
*ahc
, u_int port
)
335 return ((ahc_inb(ahc
, port
))
336 | (ahc_inb(ahc
, port
+1) << 8)
337 | (ahc_inb(ahc
, port
+2) << 16)
338 | (ahc_inb(ahc
, port
+3) << 24)
339 | (((uint64_t)ahc_inb(ahc
, port
+4)) << 32)
340 | (((uint64_t)ahc_inb(ahc
, port
+5)) << 40)
341 | (((uint64_t)ahc_inb(ahc
, port
+6)) << 48)
342 | (((uint64_t)ahc_inb(ahc
, port
+7)) << 56));
346 ahc_outq(struct ahc_softc
*ahc
, u_int port
, uint64_t value
)
348 ahc_outb(ahc
, port
, value
& 0xFF);
349 ahc_outb(ahc
, port
+1, (value
>> 8) & 0xFF);
350 ahc_outb(ahc
, port
+2, (value
>> 16) & 0xFF);
351 ahc_outb(ahc
, port
+3, (value
>> 24) & 0xFF);
352 ahc_outb(ahc
, port
+4, (value
>> 32) & 0xFF);
353 ahc_outb(ahc
, port
+5, (value
>> 40) & 0xFF);
354 ahc_outb(ahc
, port
+6, (value
>> 48) & 0xFF);
355 ahc_outb(ahc
, port
+7, (value
>> 56) & 0xFF);
359 * Get a free scb. If there are none, see if we can allocate a new SCB.
361 static __inline
struct scb
*
362 ahc_get_scb(struct ahc_softc
*ahc
)
366 if ((scb
= SLIST_FIRST(&ahc
->scb_data
->free_scbs
)) == NULL
) {
368 scb
= SLIST_FIRST(&ahc
->scb_data
->free_scbs
);
372 SLIST_REMOVE_HEAD(&ahc
->scb_data
->free_scbs
, links
.sle
);
377 * Return an SCB resource to the free list.
380 ahc_free_scb(struct ahc_softc
*ahc
, struct scb
*scb
)
382 struct hardware_scb
*hscb
;
385 /* Clean up for the next user */
386 ahc
->scb_data
->scbindex
[hscb
->tag
] = NULL
;
387 scb
->flags
= SCB_FREE
;
390 SLIST_INSERT_HEAD(&ahc
->scb_data
->free_scbs
, scb
, links
.sle
);
392 /* Notify the OSM that a resource is now available. */
393 ahc_platform_scb_free(ahc
, scb
);
396 static __inline
struct scb
*
397 ahc_lookup_scb(struct ahc_softc
*ahc
, u_int tag
)
401 scb
= ahc
->scb_data
->scbindex
[tag
];
403 ahc_sync_scb(ahc
, scb
,
404 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
409 ahc_swap_with_next_hscb(struct ahc_softc
*ahc
, struct scb
*scb
)
411 struct hardware_scb
*q_hscb
;
415 * Our queuing method is a bit tricky. The card
416 * knows in advance which HSCB to download, and we
417 * can't disappoint it. To achieve this, the next
418 * SCB to download is saved off in ahc->next_queued_scb.
419 * When we are called to queue "an arbitrary scb",
420 * we copy the contents of the incoming HSCB to the one
421 * the sequencer knows about, swap HSCB pointers and
422 * finally assign the SCB to the tag indexed location
423 * in the scb_array. This makes sure that we can still
424 * locate the correct SCB by SCB_TAG.
426 q_hscb
= ahc
->next_queued_scb
->hscb
;
427 saved_tag
= q_hscb
->tag
;
428 memcpy(q_hscb
, scb
->hscb
, sizeof(*scb
->hscb
));
429 if ((scb
->flags
& SCB_CDB32_PTR
) != 0) {
430 q_hscb
->shared_data
.cdb_ptr
=
431 ahc_htole32(ahc_hscb_busaddr(ahc
, q_hscb
->tag
)
432 + offsetof(struct hardware_scb
, cdb32
));
434 q_hscb
->tag
= saved_tag
;
435 q_hscb
->next
= scb
->hscb
->tag
;
437 /* Now swap HSCB pointers. */
438 ahc
->next_queued_scb
->hscb
= scb
->hscb
;
441 /* Now define the mapping from tag to SCB in the scbindex */
442 ahc
->scb_data
->scbindex
[scb
->hscb
->tag
] = scb
;
446 * Tell the sequencer about a new transaction to execute.
449 ahc_queue_scb(struct ahc_softc
*ahc
, struct scb
*scb
)
451 ahc_swap_with_next_hscb(ahc
, scb
);
453 if (scb
->hscb
->tag
== SCB_LIST_NULL
454 || scb
->hscb
->next
== SCB_LIST_NULL
)
455 panic("Attempt to queue invalid SCB tag %x:%x\n",
456 scb
->hscb
->tag
, scb
->hscb
->next
);
459 * Setup data "oddness".
461 scb
->hscb
->lun
&= LID
;
462 if (ahc_get_transfer_length(scb
) & 0x1)
463 scb
->hscb
->lun
|= SCB_XFERLEN_ODD
;
466 * Keep a history of SCBs we've downloaded in the qinfifo.
468 ahc
->qinfifo
[ahc
->qinfifonext
++] = scb
->hscb
->tag
;
471 * Make sure our data is consistent from the
472 * perspective of the adapter.
474 ahc_sync_scb(ahc
, scb
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
476 /* Tell the adapter about the newly queued SCB */
477 if ((ahc
->features
& AHC_QUEUE_REGS
) != 0) {
478 ahc_outb(ahc
, HNSCB_QOFF
, ahc
->qinfifonext
);
480 if ((ahc
->features
& AHC_AUTOPAUSE
) == 0)
482 ahc_outb(ahc
, KERNEL_QINPOS
, ahc
->qinfifonext
);
483 if ((ahc
->features
& AHC_AUTOPAUSE
) == 0)
488 static __inline
struct scsi_sense_data
*
489 ahc_get_sense_buf(struct ahc_softc
*ahc
, struct scb
*scb
)
493 offset
= scb
- ahc
->scb_data
->scbarray
;
494 return (&ahc
->scb_data
->sense
[offset
]);
497 static __inline
uint32_t
498 ahc_get_sense_bufaddr(struct ahc_softc
*ahc
, struct scb
*scb
)
502 offset
= scb
- ahc
->scb_data
->scbarray
;
503 return (ahc
->scb_data
->sense_busaddr
504 + (offset
* sizeof(struct scsi_sense_data
)));
507 /************************** Interrupt Processing ******************************/
508 static __inline
void ahc_sync_qoutfifo(struct ahc_softc
*ahc
, int op
);
509 static __inline
void ahc_sync_tqinfifo(struct ahc_softc
*ahc
, int op
);
510 static __inline u_int
ahc_check_cmdcmpltqueues(struct ahc_softc
*ahc
);
511 static __inline
int ahc_intr(struct ahc_softc
*ahc
);
514 ahc_sync_qoutfifo(struct ahc_softc
*ahc
, int op
)
516 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
, ahc
->shared_data_dmamap
,
517 /*offset*/0, /*len*/256, op
);
521 ahc_sync_tqinfifo(struct ahc_softc
*ahc
, int op
)
523 #ifdef AHC_TARGET_MODE
524 if ((ahc
->flags
& AHC_TARGETROLE
) != 0) {
525 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
,
526 ahc
->shared_data_dmamap
,
527 ahc_targetcmd_offset(ahc
, 0),
528 sizeof(struct target_cmd
) * AHC_TMODE_CMDS
,
535 * See if the firmware has posted any completed commands
536 * into our in-core command complete fifos.
538 #define AHC_RUN_QOUTFIFO 0x1
539 #define AHC_RUN_TQINFIFO 0x2
540 static __inline u_int
541 ahc_check_cmdcmpltqueues(struct ahc_softc
*ahc
)
546 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
, ahc
->shared_data_dmamap
,
547 /*offset*/ahc
->qoutfifonext
, /*len*/1,
548 BUS_DMASYNC_POSTREAD
);
549 if (ahc
->qoutfifo
[ahc
->qoutfifonext
] != SCB_LIST_NULL
)
550 retval
|= AHC_RUN_QOUTFIFO
;
551 #ifdef AHC_TARGET_MODE
552 if ((ahc
->flags
& AHC_TARGETROLE
) != 0
553 && (ahc
->flags
& AHC_TQINFIFO_BLOCKED
) == 0) {
554 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
,
555 ahc
->shared_data_dmamap
,
556 ahc_targetcmd_offset(ahc
, ahc
->tqinfifofnext
),
557 /*len*/sizeof(struct target_cmd
),
558 BUS_DMASYNC_POSTREAD
);
559 if (ahc
->targetcmds
[ahc
->tqinfifonext
].cmd_valid
!= 0)
560 retval
|= AHC_RUN_TQINFIFO
;
567 * Catch an interrupt from the adapter
570 ahc_intr(struct ahc_softc
*ahc
)
574 if ((ahc
->pause
& INTEN
) == 0) {
576 * Our interrupt is not enabled on the chip
577 * and may be disabled for re-entrancy reasons,
578 * so just return. This is likely just a shared
584 * Instead of directly reading the interrupt status register,
585 * infer the cause of the interrupt by checking our in-core
586 * completion queues. This avoids a costly PCI bus read in
589 if ((ahc
->flags
& (AHC_ALL_INTERRUPTS
|AHC_EDGE_INTERRUPT
)) == 0
590 && (ahc_check_cmdcmpltqueues(ahc
) != 0))
593 intstat
= ahc_inb(ahc
, INTSTAT
);
596 if ((intstat
& INT_PEND
) == 0) {
597 #if AHC_PCI_CONFIG > 0
598 if (ahc
->unsolicited_ints
> 500) {
599 ahc
->unsolicited_ints
= 0;
600 if ((ahc
->chip
& AHC_PCI
) != 0
601 && (ahc_inb(ahc
, ERROR
) & PCIERRSTAT
) != 0)
605 ahc
->unsolicited_ints
++;
608 ahc
->unsolicited_ints
= 0;
610 if (intstat
& CMDCMPLT
) {
611 ahc_outb(ahc
, CLRINT
, CLRCMDINT
);
614 * Ensure that the chip sees that we've cleared
615 * this interrupt before we walk the output fifo.
616 * Otherwise, we may, due to posted bus writes,
617 * clear the interrupt after we finish the scan,
618 * and after the sequencer has added new entries
619 * and asserted the interrupt again.
621 ahc_flush_device_writes(ahc
);
622 ahc_run_qoutfifo(ahc
);
623 #ifdef AHC_TARGET_MODE
624 if ((ahc
->flags
& AHC_TARGETROLE
) != 0)
625 ahc_run_tqinfifo(ahc
, /*paused*/FALSE
);
630 * Handle statuses that may invalidate our cached
631 * copy of INTSTAT separately.
633 if (intstat
== 0xFF && (ahc
->features
& AHC_REMOVABLE
) != 0) {
634 /* Hot eject. Do nothing */
635 } else if (intstat
& BRKADRINT
) {
636 ahc_handle_brkadrint(ahc
);
637 } else if ((intstat
& (SEQINT
|SCSIINT
)) != 0) {
639 ahc_pause_bug_fix(ahc
);
641 if ((intstat
& SEQINT
) != 0)
642 ahc_handle_seqint(ahc
, intstat
);
644 if ((intstat
& SCSIINT
) != 0)
645 ahc_handle_scsiint(ahc
, intstat
);
650 #endif /* _AIC7XXX_INLINE_H_ */