Remove bogus checks after kmalloc(M_WAITOK) which never returns NULL.
[dragonfly.git] / sys / dev / disk / isp / isp_pci.c
blobe8674dfbc1e2aa19658a4f3eff704fc2fa5e0a5e
1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.13 2008/01/06 16:55:49 swildner Exp $ */
3 /*
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5 * FreeBSD Version.
7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
35 #include <sys/bus.h>
36 #include <sys/rman.h>
37 #include <sys/malloc.h>
39 #include <bus/pci/pcireg.h>
40 #include <bus/pci/pcivar.h>
42 #include "isp_freebsd.h"
44 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
45 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
46 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
47 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
48 static int
49 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
50 static int
51 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
52 static int isp_pci_mbxdma(struct ispsoftc *);
53 static int
54 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t);
55 static void
56 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
58 static void isp_pci_reset1(struct ispsoftc *);
59 static void isp_pci_dumpregs(struct ispsoftc *, const char *);
61 static struct ispmdvec mdvec = {
62 isp_pci_rd_isr,
63 isp_pci_rd_reg,
64 isp_pci_wr_reg,
65 isp_pci_mbxdma,
66 isp_pci_dmasetup,
67 isp_pci_dmateardown,
68 NULL,
69 isp_pci_reset1,
70 isp_pci_dumpregs,
71 NULL,
72 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
75 static struct ispmdvec mdvec_1080 = {
76 isp_pci_rd_isr,
77 isp_pci_rd_reg_1080,
78 isp_pci_wr_reg_1080,
79 isp_pci_mbxdma,
80 isp_pci_dmasetup,
81 isp_pci_dmateardown,
82 NULL,
83 isp_pci_reset1,
84 isp_pci_dumpregs,
85 NULL,
86 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
89 static struct ispmdvec mdvec_12160 = {
90 isp_pci_rd_isr,
91 isp_pci_rd_reg_1080,
92 isp_pci_wr_reg_1080,
93 isp_pci_mbxdma,
94 isp_pci_dmasetup,
95 isp_pci_dmateardown,
96 NULL,
97 isp_pci_reset1,
98 isp_pci_dumpregs,
99 NULL,
100 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
103 static struct ispmdvec mdvec_2100 = {
104 isp_pci_rd_isr,
105 isp_pci_rd_reg,
106 isp_pci_wr_reg,
107 isp_pci_mbxdma,
108 isp_pci_dmasetup,
109 isp_pci_dmateardown,
110 NULL,
111 isp_pci_reset1,
112 isp_pci_dumpregs
115 static struct ispmdvec mdvec_2200 = {
116 isp_pci_rd_isr,
117 isp_pci_rd_reg,
118 isp_pci_wr_reg,
119 isp_pci_mbxdma,
120 isp_pci_dmasetup,
121 isp_pci_dmateardown,
122 NULL,
123 isp_pci_reset1,
124 isp_pci_dumpregs
127 static struct ispmdvec mdvec_2300 = {
128 isp_pci_rd_isr_2300,
129 isp_pci_rd_reg,
130 isp_pci_wr_reg,
131 isp_pci_mbxdma,
132 isp_pci_dmasetup,
133 isp_pci_dmateardown,
134 NULL,
135 isp_pci_reset1,
136 isp_pci_dumpregs
139 #ifndef PCIM_CMD_INVEN
140 #define PCIM_CMD_INVEN 0x10
141 #endif
142 #ifndef PCIM_CMD_BUSMASTEREN
143 #define PCIM_CMD_BUSMASTEREN 0x0004
144 #endif
145 #ifndef PCIM_CMD_PERRESPEN
146 #define PCIM_CMD_PERRESPEN 0x0040
147 #endif
148 #ifndef PCIM_CMD_SEREN
149 #define PCIM_CMD_SEREN 0x0100
150 #endif
152 #ifndef PCIR_COMMAND
153 #define PCIR_COMMAND 0x04
154 #endif
156 #ifndef PCIR_CACHELNSZ
157 #define PCIR_CACHELNSZ 0x0c
158 #endif
160 #ifndef PCIR_LATTIMER
161 #define PCIR_LATTIMER 0x0d
162 #endif
164 #ifndef PCIR_ROMADDR
165 #define PCIR_ROMADDR 0x30
166 #endif
168 #ifndef PCI_VENDOR_QLOGIC
169 #define PCI_VENDOR_QLOGIC 0x1077
170 #endif
172 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
173 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
174 #endif
176 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
177 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
178 #endif
180 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
181 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
182 #endif
184 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
185 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
186 #endif
188 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
189 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
190 #endif
192 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
193 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
194 #endif
196 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
197 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
198 #endif
200 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
201 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
202 #endif
204 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
205 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
206 #endif
208 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
209 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
210 #endif
212 #define PCI_QLOGIC_ISP1020 \
213 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
215 #define PCI_QLOGIC_ISP1080 \
216 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
218 #define PCI_QLOGIC_ISP10160 \
219 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
221 #define PCI_QLOGIC_ISP12160 \
222 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
224 #define PCI_QLOGIC_ISP1240 \
225 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
227 #define PCI_QLOGIC_ISP1280 \
228 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
230 #define PCI_QLOGIC_ISP2100 \
231 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
233 #define PCI_QLOGIC_ISP2200 \
234 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
236 #define PCI_QLOGIC_ISP2300 \
237 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
239 #define PCI_QLOGIC_ISP2312 \
240 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
243 * Odd case for some AMI raid cards... We need to *not* attach to this.
245 #define AMI_RAID_SUBVENDOR_ID 0x101e
247 #define IO_MAP_REG 0x10
248 #define MEM_MAP_REG 0x14
250 #define PCI_DFLT_LTNCY 0x40
251 #define PCI_DFLT_LNSZ 0x10
253 static int isp_pci_probe (device_t);
254 static int isp_pci_attach (device_t);
257 struct isp_pcisoftc {
258 struct ispsoftc pci_isp;
259 device_t pci_dev;
260 struct resource * pci_reg;
261 bus_space_tag_t pci_st;
262 bus_space_handle_t pci_sh;
263 void * ih;
264 int16_t pci_poff[_NREG_BLKS];
265 bus_dma_tag_t dmat;
266 bus_dmamap_t *dmaps;
268 ispfwfunc *isp_get_firmware_p = NULL;
270 static device_method_t isp_pci_methods[] = {
271 /* Device interface */
272 DEVMETHOD(device_probe, isp_pci_probe),
273 DEVMETHOD(device_attach, isp_pci_attach),
274 { 0, 0 }
276 static void isp_pci_intr(void *);
278 static driver_t isp_pci_driver = {
279 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
281 static devclass_t isp_devclass;
282 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
283 MODULE_VERSION(isp, 1);
285 static int
286 isp_pci_probe(device_t dev)
288 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
289 case PCI_QLOGIC_ISP1020:
290 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
291 break;
292 case PCI_QLOGIC_ISP1080:
293 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
294 break;
295 case PCI_QLOGIC_ISP1240:
296 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
297 break;
298 case PCI_QLOGIC_ISP1280:
299 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
300 break;
301 case PCI_QLOGIC_ISP10160:
302 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
303 break;
304 case PCI_QLOGIC_ISP12160:
305 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
306 return (ENXIO);
308 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
309 break;
310 case PCI_QLOGIC_ISP2100:
311 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
312 break;
313 case PCI_QLOGIC_ISP2200:
314 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
315 break;
316 case PCI_QLOGIC_ISP2300:
317 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
318 break;
319 case PCI_QLOGIC_ISP2312:
320 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
321 break;
322 default:
323 return (ENXIO);
325 if (device_get_unit(dev) == 0 && bootverbose) {
326 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
327 "Core Version %d.%d\n",
328 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
329 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
332 * XXXX: Here is where we might load the f/w module
333 * XXXX: (or increase a reference count to it).
335 return (0);
338 static int
339 isp_pci_attach(device_t dev)
341 struct resource *regs, *irq;
342 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug;
343 u_int32_t data, cmd, linesz, psize, basetype;
344 struct isp_pcisoftc *pcs;
345 struct ispsoftc *isp = NULL;
346 struct ispmdvec *mdvp;
347 quad_t wwn;
348 bus_size_t lim;
351 * Figure out if we're supposed to skip this one.
353 unit = device_get_unit(dev);
354 if (kgetenv_int("isp_disable", &bitmap)) {
355 if (bitmap & (1 << unit)) {
356 device_printf(dev, "not configuring\n");
358 * But return '0' to preserve HBA numbering.
360 return (0);
364 pcs = kmalloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO);
367 * Figure out which we should try first - memory mapping or i/o mapping?
369 m1 = PCIM_CMD_PORTEN;
370 m2 = PCIM_CMD_MEMEN;
371 bitmap = 0;
372 if (kgetenv_int("isp_mem_map", &bitmap)) {
373 if (bitmap & (1 << unit)) {
374 m1 = PCIM_CMD_MEMEN;
375 m2 = PCIM_CMD_PORTEN;
378 bitmap = 0;
379 if (kgetenv_int("isp_io_map", &bitmap)) {
380 if (bitmap & (1 << unit)) {
381 m1 = PCIM_CMD_PORTEN;
382 m2 = PCIM_CMD_MEMEN;
386 linesz = PCI_DFLT_LNSZ;
387 irq = regs = NULL;
388 rgd = rtp = iqd = 0;
390 cmd = pci_read_config(dev, PCIR_COMMAND, 1);
391 if (cmd & m1) {
392 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
393 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
394 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
396 if (regs == NULL && (cmd & m2)) {
397 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
398 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
399 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE);
401 if (regs == NULL) {
402 device_printf(dev, "unable to map any ports\n");
403 goto bad;
405 if (bootverbose)
406 device_printf(dev, "using %s space register mapping\n",
407 (rgd == IO_MAP_REG)? "I/O" : "Memory");
408 pcs->pci_dev = dev;
409 pcs->pci_reg = regs;
410 pcs->pci_st = rman_get_bustag(regs);
411 pcs->pci_sh = rman_get_bushandle(regs);
413 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
414 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
415 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
416 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
417 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
418 mdvp = &mdvec;
419 basetype = ISP_HA_SCSI_UNKNOWN;
420 psize = sizeof (sdparam);
421 lim = BUS_SPACE_MAXSIZE_32BIT;
422 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
423 mdvp = &mdvec;
424 basetype = ISP_HA_SCSI_UNKNOWN;
425 psize = sizeof (sdparam);
426 lim = BUS_SPACE_MAXSIZE_24BIT;
428 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
429 mdvp = &mdvec_1080;
430 basetype = ISP_HA_SCSI_1080;
431 psize = sizeof (sdparam);
432 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
433 ISP1080_DMA_REGS_OFF;
435 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
436 mdvp = &mdvec_1080;
437 basetype = ISP_HA_SCSI_1240;
438 psize = 2 * sizeof (sdparam);
439 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
440 ISP1080_DMA_REGS_OFF;
442 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
443 mdvp = &mdvec_1080;
444 basetype = ISP_HA_SCSI_1280;
445 psize = 2 * sizeof (sdparam);
446 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
447 ISP1080_DMA_REGS_OFF;
449 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
450 mdvp = &mdvec_12160;
451 basetype = ISP_HA_SCSI_10160;
452 psize = sizeof (sdparam);
453 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
454 ISP1080_DMA_REGS_OFF;
456 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
457 mdvp = &mdvec_12160;
458 basetype = ISP_HA_SCSI_12160;
459 psize = 2 * sizeof (sdparam);
460 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
461 ISP1080_DMA_REGS_OFF;
463 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
464 mdvp = &mdvec_2100;
465 basetype = ISP_HA_FC_2100;
466 psize = sizeof (fcparam);
467 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
468 PCI_MBOX_REGS2100_OFF;
469 if (pci_get_revid(dev) < 3) {
471 * XXX: Need to get the actual revision
472 * XXX: number of the 2100 FB. At any rate,
473 * XXX: lower cache line size for early revision
474 * XXX; boards.
476 linesz = 1;
479 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
480 mdvp = &mdvec_2200;
481 basetype = ISP_HA_FC_2200;
482 psize = sizeof (fcparam);
483 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
484 PCI_MBOX_REGS2100_OFF;
486 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
487 mdvp = &mdvec_2300;
488 basetype = ISP_HA_FC_2300;
489 psize = sizeof (fcparam);
490 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
491 PCI_MBOX_REGS2300_OFF;
493 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) {
494 mdvp = &mdvec_2300;
495 basetype = ISP_HA_FC_2312;
496 psize = sizeof (fcparam);
497 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
498 PCI_MBOX_REGS2300_OFF;
500 isp = &pcs->pci_isp;
501 isp->isp_param = kmalloc(psize, M_DEVBUF, M_WAITOK | M_ZERO);
502 isp->isp_mdvec = mdvp;
503 isp->isp_type = basetype;
504 isp->isp_revision = pci_get_revid(dev);
505 #ifdef ISP_TARGET_MODE
506 isp->isp_role = ISP_ROLE_BOTH;
507 #else
508 isp->isp_role = ISP_DEFAULT_ROLES;
509 #endif
510 isp->isp_dev = dev;
514 * Try and find firmware for this device.
517 if (isp_get_firmware_p) {
518 int device = (int) pci_get_device(dev);
519 #ifdef ISP_TARGET_MODE
520 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
521 #else
522 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
523 #endif
527 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
528 * are set.
530 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
531 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
532 if (IS_2300(isp)) { /* per QLogic errata */
533 cmd &= ~PCIM_CMD_INVEN;
535 if (IS_23XX(isp)) {
537 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
539 isp->isp_touched = 1;
542 pci_write_config(dev, PCIR_COMMAND, cmd, 1);
545 * Make sure the Cache Line Size register is set sensibly.
547 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
548 if (data != linesz) {
549 data = PCI_DFLT_LNSZ;
550 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
551 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
555 * Make sure the Latency Timer is sane.
557 data = pci_read_config(dev, PCIR_LATTIMER, 1);
558 if (data < PCI_DFLT_LTNCY) {
559 data = PCI_DFLT_LTNCY;
560 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
561 pci_write_config(dev, PCIR_LATTIMER, data, 1);
565 * Make sure we've disabled the ROM.
567 data = pci_read_config(dev, PCIR_ROMADDR, 4);
568 data &= ~1;
569 pci_write_config(dev, PCIR_ROMADDR, data, 4);
571 iqd = 0;
572 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0,
573 1, RF_ACTIVE | RF_SHAREABLE);
574 if (irq == NULL) {
575 device_printf(dev, "could not allocate interrupt\n");
576 goto bad;
579 if (kgetenv_int("isp_no_fwload", &bitmap)) {
580 if (bitmap & (1 << unit))
581 isp->isp_confopts |= ISP_CFG_NORELOAD;
583 if (kgetenv_int("isp_fwload", &bitmap)) {
584 if (bitmap & (1 << unit))
585 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
587 if (kgetenv_int("isp_no_nvram", &bitmap)) {
588 if (bitmap & (1 << unit))
589 isp->isp_confopts |= ISP_CFG_NONVRAM;
591 if (kgetenv_int("isp_nvram", &bitmap)) {
592 if (bitmap & (1 << unit))
593 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
595 if (kgetenv_int("isp_fcduplex", &bitmap)) {
596 if (bitmap & (1 << unit))
597 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
599 if (kgetenv_int("isp_no_fcduplex", &bitmap)) {
600 if (bitmap & (1 << unit))
601 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
603 if (kgetenv_int("isp_nport", &bitmap)) {
604 if (bitmap & (1 << unit))
605 isp->isp_confopts |= ISP_CFG_NPORT;
609 * Because the resource_*_value functions can neither return
610 * 64 bit integer values, nor can they be directly coerced
611 * to interpret the right hand side of the assignment as
612 * you want them to interpret it, we have to force WWN
613 * hint replacement to specify WWN strings with a leading
614 * 'w' (e..g w50000000aaaa0001). Sigh.
616 if (kgetenv_quad("isp_portwwn", &wwn)) {
617 isp->isp_osinfo.default_port_wwn = wwn;
618 isp->isp_confopts |= ISP_CFG_OWNWWPN;
620 if (isp->isp_osinfo.default_port_wwn == 0) {
621 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
624 if (kgetenv_quad("isp_nodewwn", &wwn)) {
625 isp->isp_osinfo.default_node_wwn = wwn;
626 isp->isp_confopts |= ISP_CFG_OWNWWNN;
628 if (isp->isp_osinfo.default_node_wwn == 0) {
629 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
632 isp_debug = 0;
633 (void) kgetenv_int("isp_debug", &isp_debug);
634 if (bus_setup_intr(dev, irq, 0, isp_pci_intr,
635 isp, &pcs->ih, NULL)) {
636 device_printf(dev, "could not setup interrupt\n");
637 goto bad;
640 #ifdef ISP_FW_CRASH_DUMP
641 bitmap = 0;
642 if (kgetenv_int("isp_fw_dump_enable", &bitmap)) {
643 if (bitmap & (1 << unit) {
644 size_t amt = 0;
645 if (IS_2200(isp)) {
646 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
647 } else if (IS_23XX(isp)) {
648 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
650 if (amt) {
651 FCPARAM(isp)->isp_dump_data =
652 kmalloc(amt, M_DEVBUF, M_WAITOK);
653 bzero(FCPARAM(isp)->isp_dump_data, amt);
654 } else {
655 device_printf(dev,
656 "f/w crash dumps not supported for card\n");
660 #endif
662 if (IS_2312(isp)) {
663 isp->isp_port = pci_get_function(dev);
667 * Set up logging levels.
669 if (isp_debug) {
670 isp->isp_dblev = isp_debug;
671 } else {
672 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
674 if (bootverbose)
675 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
678 * Make sure we're in reset state.
680 ISP_LOCK(isp);
681 isp_reset(isp);
683 if (isp->isp_state != ISP_RESETSTATE) {
684 ISP_UNLOCK(isp);
685 goto bad;
687 isp_init(isp);
688 if (isp->isp_state != ISP_INITSTATE) {
689 /* If we're a Fibre Channel Card, we allow deferred attach */
690 if (IS_SCSI(isp)) {
691 isp_uninit(isp);
692 ISP_UNLOCK(isp);
693 goto bad;
696 isp_attach(isp);
697 if (isp->isp_state != ISP_RUNSTATE) {
698 /* If we're a Fibre Channel Card, we allow deferred attach */
699 if (IS_SCSI(isp)) {
700 isp_uninit(isp);
701 ISP_UNLOCK(isp);
702 goto bad;
706 * XXXX: Here is where we might unload the f/w module
707 * XXXX: (or decrease the reference count to it).
709 ISP_UNLOCK(isp);
710 return (0);
712 bad:
714 if (pcs && pcs->ih) {
715 (void) bus_teardown_intr(dev, irq, pcs->ih);
718 if (irq) {
719 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
723 if (regs) {
724 (void) bus_release_resource(dev, rtp, rgd, regs);
727 if (pcs) {
728 if (pcs->pci_isp.isp_param)
729 kfree(pcs->pci_isp.isp_param, M_DEVBUF);
730 kfree(pcs, M_DEVBUF);
734 * XXXX: Here is where we might unload the f/w module
735 * XXXX: (or decrease the reference count to it).
737 return (ENXIO);
740 static void
741 isp_pci_intr(void *arg)
743 struct ispsoftc *isp = arg;
744 u_int16_t isr, sema, mbox;
746 ISP_LOCK(isp);
747 isp->isp_intcnt++;
748 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
749 isp->isp_intbogus++;
750 } else {
751 int iok = isp->isp_osinfo.intsok;
752 isp->isp_osinfo.intsok = 0;
753 isp_intr(isp, isr, sema, mbox);
754 isp->isp_osinfo.intsok = iok;
756 ISP_UNLOCK(isp);
760 #define IspVirt2Off(a, x) \
761 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
762 _BLK_REG_SHFT] + ((x) & 0xff))
764 #define BXR2(pcs, off) \
765 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
766 #define BXW2(pcs, off, v) \
767 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
770 static INLINE int
771 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
773 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
774 u_int16_t val0, val1;
775 int i = 0;
777 do {
778 val0 = BXR2(pcs, IspVirt2Off(isp, off));
779 val1 = BXR2(pcs, IspVirt2Off(isp, off));
780 } while (val0 != val1 && ++i < 1000);
781 if (val0 != val1) {
782 return (1);
784 *rp = val0;
785 return (0);
788 static int
789 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
790 u_int16_t *semap, u_int16_t *mbp)
792 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
793 u_int16_t isr, sema;
795 if (IS_2100(isp)) {
796 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
797 return (0);
799 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
800 return (0);
802 } else {
803 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
804 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
806 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
807 isr &= INT_PENDING_MASK(isp);
808 sema &= BIU_SEMA_LOCK;
809 if (isr == 0 && sema == 0) {
810 return (0);
812 *isrp = isr;
813 if ((*semap = sema) != 0) {
814 if (IS_2100(isp)) {
815 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
816 return (0);
818 } else {
819 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
822 return (1);
825 static int
826 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
827 u_int16_t *semap, u_int16_t *mbox0p)
829 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
830 u_int32_t r2hisr;
832 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
833 *isrp = 0;
834 return (0);
836 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
837 IspVirt2Off(pcs, BIU_R2HSTSLO));
838 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
839 if ((r2hisr & BIU_R2HST_INTR) == 0) {
840 *isrp = 0;
841 return (0);
843 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
844 case ISPR2HST_ROM_MBX_OK:
845 case ISPR2HST_ROM_MBX_FAIL:
846 case ISPR2HST_MBX_OK:
847 case ISPR2HST_MBX_FAIL:
848 case ISPR2HST_ASYNC_EVENT:
849 *isrp = r2hisr & 0xffff;
850 *mbox0p = (r2hisr >> 16);
851 *semap = 1;
852 return (1);
853 case ISPR2HST_RIO_16:
854 *isrp = r2hisr & 0xffff;
855 *mbox0p = ASYNC_RIO1;
856 *semap = 1;
857 return (1);
858 case ISPR2HST_FPOST:
859 *isrp = r2hisr & 0xffff;
860 *mbox0p = ASYNC_CMD_CMPLT;
861 *semap = 1;
862 return (1);
863 case ISPR2HST_FPOST_CTIO:
864 *isrp = r2hisr & 0xffff;
865 *mbox0p = ASYNC_CTIO_DONE;
866 *semap = 1;
867 return (1);
868 case ISPR2HST_RSPQ_UPDATE:
869 *isrp = r2hisr & 0xffff;
870 *mbox0p = 0;
871 *semap = 0;
872 return (1);
873 default:
874 return (0);
878 static u_int16_t
879 isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
881 u_int16_t rv;
882 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
883 int oldconf = 0;
885 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
887 * We will assume that someone has paused the RISC processor.
889 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
890 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
891 oldconf | BIU_PCI_CONF1_SXP);
893 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
894 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
895 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
897 return (rv);
900 static void
901 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
903 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
904 int oldconf = 0;
906 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
908 * We will assume that someone has paused the RISC processor.
910 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
911 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
912 oldconf | BIU_PCI_CONF1_SXP);
914 BXW2(pcs, IspVirt2Off(isp, regoff), val);
915 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
916 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
920 static u_int16_t
921 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
923 u_int16_t rv, oc = 0;
924 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
926 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
927 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
928 u_int16_t tc;
930 * We will assume that someone has paused the RISC processor.
932 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
933 tc = oc & ~BIU_PCI1080_CONF1_DMA;
934 if (regoff & SXP_BANK1_SELECT)
935 tc |= BIU_PCI1080_CONF1_SXP1;
936 else
937 tc |= BIU_PCI1080_CONF1_SXP0;
938 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
939 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
940 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
941 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
942 oc | BIU_PCI1080_CONF1_DMA);
944 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
945 if (oc) {
946 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
948 return (rv);
951 static void
952 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
954 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
955 int oc = 0;
957 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
958 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
959 u_int16_t tc;
961 * We will assume that someone has paused the RISC processor.
963 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
964 tc = oc & ~BIU_PCI1080_CONF1_DMA;
965 if (regoff & SXP_BANK1_SELECT)
966 tc |= BIU_PCI1080_CONF1_SXP1;
967 else
968 tc |= BIU_PCI1080_CONF1_SXP0;
969 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
970 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
971 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
972 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
973 oc | BIU_PCI1080_CONF1_DMA);
975 BXW2(pcs, IspVirt2Off(isp, regoff), val);
976 if (oc) {
977 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
982 struct imush {
983 struct ispsoftc *isp;
984 int error;
987 static void imc(void *, bus_dma_segment_t *, int, int);
989 static void
990 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
992 struct imush *imushp = (struct imush *) arg;
993 if (error) {
994 imushp->error = error;
995 } else {
996 struct ispsoftc *isp =imushp->isp;
997 bus_addr_t addr = segs->ds_addr;
999 isp->isp_rquest_dma = addr;
1000 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1001 isp->isp_result_dma = addr;
1002 if (IS_FC(isp)) {
1003 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1004 FCPARAM(isp)->isp_scdma = addr;
1010 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1012 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1014 static int
1015 isp_pci_mbxdma(struct ispsoftc *isp)
1017 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1018 caddr_t base;
1019 u_int32_t len;
1020 int i, error, ns;
1021 bus_size_t alim, slim;
1022 struct imush im;
1025 * Already been here? If so, leave...
1027 if (isp->isp_rquest) {
1028 return (0);
1031 #ifdef ISP_DAC_SUPPORTED
1032 alim = BUS_SPACE_UNRESTRICTED;
1033 #else
1034 alim = BUS_SPACE_MAXADDR_32BIT;
1035 #endif
1036 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1037 slim = BUS_SPACE_MAXADDR_32BIT;
1038 } else {
1039 slim = BUS_SPACE_MAXADDR_24BIT;
1042 ISP_UNLOCK(isp);
1043 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim,
1044 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1045 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1046 ISP_LOCK(isp);
1047 return(1);
1051 len = sizeof (XS_T **) * isp->isp_maxcmds;
1052 isp->isp_xflist = (XS_T **) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1053 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1054 pcs->dmaps = (bus_dmamap_t *) kmalloc(len, M_DEVBUF, M_WAITOK);
1057 * Allocate and map the request, result queues, plus FC scratch area.
1059 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1060 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1061 if (IS_FC(isp)) {
1062 len += ISP2100_SCRLEN;
1065 ns = (len / PAGE_SIZE) + 1;
1066 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim,
1067 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1068 isp_prt(isp, ISP_LOGERR,
1069 "cannot create a dma tag for control spaces");
1070 kfree(pcs->dmaps, M_DEVBUF);
1071 kfree(isp->isp_xflist, M_DEVBUF);
1072 ISP_LOCK(isp);
1073 return (1);
1076 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1077 &isp->isp_cdmap) != 0) {
1078 isp_prt(isp, ISP_LOGERR,
1079 "cannot allocate %d bytes of CCB memory", len);
1080 bus_dma_tag_destroy(isp->isp_cdmat);
1081 kfree(isp->isp_xflist, M_DEVBUF);
1082 kfree(pcs->dmaps, M_DEVBUF);
1083 ISP_LOCK(isp);
1084 return (1);
1087 for (i = 0; i < isp->isp_maxcmds; i++) {
1088 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1089 if (error) {
1090 isp_prt(isp, ISP_LOGERR,
1091 "error %d creating per-cmd DMA maps", error);
1092 while (--i >= 0) {
1093 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1095 goto bad;
1099 im.isp = isp;
1100 im.error = 0;
1101 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1102 if (im.error) {
1103 isp_prt(isp, ISP_LOGERR,
1104 "error %d loading dma map for control areas", im.error);
1105 goto bad;
1108 isp->isp_rquest = base;
1109 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1110 isp->isp_result = base;
1111 if (IS_FC(isp)) {
1112 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1113 FCPARAM(isp)->isp_scratch = base;
1115 ISP_LOCK(isp);
1116 return (0);
1118 bad:
1119 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1120 bus_dma_tag_destroy(isp->isp_cdmat);
1121 kfree(isp->isp_xflist, M_DEVBUF);
1122 kfree(pcs->dmaps, M_DEVBUF);
1123 ISP_LOCK(isp);
1124 isp->isp_rquest = NULL;
1125 return (1);
1128 typedef struct {
1129 struct ispsoftc *isp;
1130 void *cmd_token;
1131 void *rq;
1132 u_int16_t *nxtip;
1133 u_int16_t optr;
1134 u_int error;
1135 } mush_t;
1137 #define MUSHERR_NOQENTRIES -2
1139 #ifdef ISP_TARGET_MODE
1141 * We need to handle DMA for target mode differently from initiator mode.
1143 * DMA mapping and construction and submission of CTIO Request Entries
1144 * and rendevous for completion are very tightly coupled because we start
1145 * out by knowing (per platform) how much data we have to move, but we
1146 * don't know, up front, how many DMA mapping segments will have to be used
1147 * cover that data, so we don't know how many CTIO Request Entries we
1148 * will end up using. Further, for performance reasons we may want to
1149 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1151 * The standard vector still goes through isp_pci_dmasetup, but the callback
1152 * for the DMA mapping routines comes here instead with the whole transfer
1153 * mapped and a pointer to a partially filled in already allocated request
1154 * queue entry. We finish the job.
1156 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1157 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1159 #define STATUS_WITH_DATA 1
1161 static void
1162 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1164 mush_t *mp;
1165 struct ccb_scsiio *csio;
1166 struct ispsoftc *isp;
1167 struct isp_pcisoftc *pcs;
1168 bus_dmamap_t *dp;
1169 ct_entry_t *cto, *qe;
1170 u_int8_t scsi_status;
1171 u_int16_t curi, nxti, handle;
1172 u_int32_t sflags;
1173 int32_t resid;
1174 int nth_ctio, nctios, send_status;
1176 mp = (mush_t *) arg;
1177 if (error) {
1178 mp->error = error;
1179 return;
1182 isp = mp->isp;
1183 csio = mp->cmd_token;
1184 cto = mp->rq;
1185 curi = isp->isp_reqidx;
1186 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1188 cto->ct_xfrlen = 0;
1189 cto->ct_seg_count = 0;
1190 cto->ct_header.rqs_entry_count = 1;
1191 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1193 if (nseg == 0) {
1194 cto->ct_header.rqs_seqno = 1;
1195 isp_prt(isp, ISP_LOGTDEBUG1,
1196 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1197 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1198 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1199 cto->ct_scsi_status, cto->ct_resid);
1200 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1201 isp_put_ctio(isp, cto, qe);
1202 return;
1205 nctios = nseg / ISP_RQDSEG;
1206 if (nseg % ISP_RQDSEG) {
1207 nctios++;
1211 * Save syshandle, and potentially any SCSI status, which we'll
1212 * reinsert on the last CTIO we're going to send.
1215 handle = cto->ct_syshandle;
1216 cto->ct_syshandle = 0;
1217 cto->ct_header.rqs_seqno = 0;
1218 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1220 if (send_status) {
1221 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1222 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1224 * Preserve residual.
1226 resid = cto->ct_resid;
1229 * Save actual SCSI status.
1231 scsi_status = cto->ct_scsi_status;
1233 #ifndef STATUS_WITH_DATA
1234 sflags |= CT_NO_DATA;
1236 * We can't do a status at the same time as a data CTIO, so
1237 * we need to synthesize an extra CTIO at this level.
1239 nctios++;
1240 #endif
1241 } else {
1242 sflags = scsi_status = resid = 0;
1245 cto->ct_resid = 0;
1246 cto->ct_scsi_status = 0;
1248 pcs = (struct isp_pcisoftc *)isp;
1249 dp = &pcs->dmaps[isp_handle_index(handle)];
1250 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1251 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1252 } else {
1253 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1256 nxti = *mp->nxtip;
1258 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1259 int seglim;
1261 seglim = nseg;
1262 if (seglim) {
1263 int seg;
1265 if (seglim > ISP_RQDSEG)
1266 seglim = ISP_RQDSEG;
1268 for (seg = 0; seg < seglim; seg++, nseg--) {
1270 * Unlike normal initiator commands, we don't
1271 * do any swizzling here.
1273 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1274 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1275 cto->ct_xfrlen += dm_segs->ds_len;
1276 dm_segs++;
1278 cto->ct_seg_count = seg;
1279 } else {
1281 * This case should only happen when we're sending an
1282 * extra CTIO with final status.
1284 if (send_status == 0) {
1285 isp_prt(isp, ISP_LOGWARN,
1286 "tdma_mk ran out of segments");
1287 mp->error = EINVAL;
1288 return;
1293 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1294 * ct_tagtype, and ct_timeout have been carried over
1295 * unchanged from what our caller had set.
1297 * The dataseg fields and the seg_count fields we just got
1298 * through setting. The data direction we've preserved all
1299 * along and only clear it if we're now sending status.
1302 if (nth_ctio == nctios - 1) {
1304 * We're the last in a sequence of CTIOs, so mark
1305 * this CTIO and save the handle to the CCB such that
1306 * when this CTIO completes we can free dma resources
1307 * and do whatever else we need to do to finish the
1308 * rest of the command. We *don't* give this to the
1309 * firmware to work on- the caller will do that.
1312 cto->ct_syshandle = handle;
1313 cto->ct_header.rqs_seqno = 1;
1315 if (send_status) {
1316 cto->ct_scsi_status = scsi_status;
1317 cto->ct_flags |= sflags;
1318 cto->ct_resid = resid;
1320 if (send_status) {
1321 isp_prt(isp, ISP_LOGTDEBUG1,
1322 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1323 "scsi status %x resid %d",
1324 cto->ct_fwhandle, csio->ccb_h.target_lun,
1325 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1326 cto->ct_scsi_status, cto->ct_resid);
1327 } else {
1328 isp_prt(isp, ISP_LOGTDEBUG1,
1329 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1330 cto->ct_fwhandle, csio->ccb_h.target_lun,
1331 cto->ct_iid, cto->ct_tag_val,
1332 cto->ct_flags);
1334 isp_put_ctio(isp, cto, qe);
1335 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1336 if (nctios > 1) {
1337 MEMORYBARRIER(isp, SYNC_REQUEST,
1338 curi, QENTRY_LEN);
1340 } else {
1341 ct_entry_t *oqe = qe;
1344 * Make sure syshandle fields are clean
1346 cto->ct_syshandle = 0;
1347 cto->ct_header.rqs_seqno = 0;
1349 isp_prt(isp, ISP_LOGTDEBUG1,
1350 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1351 cto->ct_fwhandle, csio->ccb_h.target_lun,
1352 cto->ct_iid, cto->ct_flags);
1355 * Get a new CTIO
1357 qe = (ct_entry_t *)
1358 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1359 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1360 if (nxti == mp->optr) {
1361 isp_prt(isp, ISP_LOGTDEBUG0,
1362 "Queue Overflow in tdma_mk");
1363 mp->error = MUSHERR_NOQENTRIES;
1364 return;
1368 * Now that we're done with the old CTIO,
1369 * flush it out to the request queue.
1371 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1372 isp_put_ctio(isp, cto, oqe);
1373 if (nth_ctio != 0) {
1374 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1375 QENTRY_LEN);
1377 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1380 * Reset some fields in the CTIO so we can reuse
1381 * for the next one we'll flush to the request
1382 * queue.
1384 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1385 cto->ct_header.rqs_entry_count = 1;
1386 cto->ct_header.rqs_flags = 0;
1387 cto->ct_status = 0;
1388 cto->ct_scsi_status = 0;
1389 cto->ct_xfrlen = 0;
1390 cto->ct_resid = 0;
1391 cto->ct_seg_count = 0;
1392 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1395 *mp->nxtip = nxti;
1399 * We don't have to do multiple CTIOs here. Instead, we can just do
1400 * continuation segments as needed. This greatly simplifies the code
1401 * improves performance.
1404 static void
1405 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1407 mush_t *mp;
1408 struct ccb_scsiio *csio;
1409 struct ispsoftc *isp;
1410 ct2_entry_t *cto, *qe;
1411 u_int16_t curi, nxti;
1412 int segcnt;
1414 mp = (mush_t *) arg;
1415 if (error) {
1416 mp->error = error;
1417 return;
1420 isp = mp->isp;
1421 csio = mp->cmd_token;
1422 cto = mp->rq;
1424 curi = isp->isp_reqidx;
1425 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1427 if (nseg == 0) {
1428 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1429 isp_prt(isp, ISP_LOGWARN,
1430 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1431 "set (0x%x)", cto->ct_flags);
1432 mp->error = EINVAL;
1433 return;
1436 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1437 * flags to NO DATA and clear relative offset flags.
1438 * We preserve the ct_resid and the response area.
1440 cto->ct_header.rqs_seqno = 1;
1441 cto->ct_seg_count = 0;
1442 cto->ct_reloff = 0;
1443 isp_prt(isp, ISP_LOGTDEBUG1,
1444 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1445 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1446 cto->ct_iid, cto->ct_flags, cto->ct_status,
1447 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1448 isp_put_ctio2(isp, cto, qe);
1449 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1450 return;
1453 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1454 isp_prt(isp, ISP_LOGERR,
1455 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1456 "(0x%x)", cto->ct_flags);
1457 mp->error = EINVAL;
1458 return;
1462 nxti = *mp->nxtip;
1465 * Set up the CTIO2 data segments.
1467 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg;
1468 cto->ct_seg_count++, segcnt++) {
1469 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base =
1470 dm_segs[segcnt].ds_addr;
1471 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count =
1472 dm_segs[segcnt].ds_len;
1473 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1474 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d",
1475 cto->ct_seg_count, dm_segs[segcnt].ds_addr,
1476 dm_segs[segcnt].ds_len);
1479 while (segcnt < nseg) {
1480 u_int16_t curip;
1481 int seg;
1482 ispcontreq_t local, *crq = &local, *qep;
1484 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1485 curip = nxti;
1486 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1487 if (nxti == mp->optr) {
1488 ISP_UNLOCK(isp);
1489 isp_prt(isp, ISP_LOGTDEBUG0,
1490 "tdma_mkfc: request queue overflow");
1491 mp->error = MUSHERR_NOQENTRIES;
1492 return;
1494 cto->ct_header.rqs_entry_count++;
1495 MEMZERO((void *)crq, sizeof (*crq));
1496 crq->req_header.rqs_entry_count = 1;
1497 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1498 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG;
1499 segcnt++, seg++) {
1500 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr;
1501 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len;
1502 isp_prt(isp, ISP_LOGTDEBUG1,
1503 "isp_send_ctio2: ent%d[%d]%x:%u",
1504 cto->ct_header.rqs_entry_count-1, seg,
1505 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len);
1506 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1507 cto->ct_seg_count++;
1509 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
1510 isp_put_cont_req(isp, crq, qep);
1511 ISP_TDQE(isp, "cont entry", curi, qep);
1515 * No do final twiddling for the CTIO itself.
1517 cto->ct_header.rqs_seqno = 1;
1518 isp_prt(isp, ISP_LOGTDEBUG1,
1519 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1520 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
1521 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
1522 cto->ct_resid);
1523 isp_put_ctio2(isp, cto, qe);
1524 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
1525 *mp->nxtip = nxti;
1527 #endif
1529 static void dma2(void *, bus_dma_segment_t *, int, int);
1531 static void
1532 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1534 mush_t *mp;
1535 struct ispsoftc *isp;
1536 struct ccb_scsiio *csio;
1537 struct isp_pcisoftc *pcs;
1538 bus_dmamap_t *dp;
1539 bus_dma_segment_t *eseg;
1540 ispreq_t *rq;
1541 int seglim, datalen;
1542 u_int16_t nxti;
1544 mp = (mush_t *) arg;
1545 if (error) {
1546 mp->error = error;
1547 return;
1550 if (nseg < 1) {
1551 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1552 mp->error = EFAULT;
1553 return;
1555 csio = mp->cmd_token;
1556 isp = mp->isp;
1557 rq = mp->rq;
1558 pcs = (struct isp_pcisoftc *)mp->isp;
1559 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1560 nxti = *mp->nxtip;
1562 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1563 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1564 } else {
1565 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1568 datalen = XS_XFRLEN(csio);
1571 * We're passed an initial partially filled in entry that
1572 * has most fields filled in except for data transfer
1573 * related values.
1575 * Our job is to fill in the initial request queue entry and
1576 * then to start allocating and filling in continuation entries
1577 * until we've covered the entire transfer.
1580 if (IS_FC(isp)) {
1581 seglim = ISP_RQDSEG_T2;
1582 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1583 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1584 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1585 } else {
1586 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1588 } else {
1589 if (csio->cdb_len > 12) {
1590 seglim = 0;
1591 } else {
1592 seglim = ISP_RQDSEG;
1594 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1595 rq->req_flags |= REQFLAG_DATA_IN;
1596 } else {
1597 rq->req_flags |= REQFLAG_DATA_OUT;
1601 eseg = dm_segs + nseg;
1603 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1604 if (IS_FC(isp)) {
1605 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1606 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1607 dm_segs->ds_addr;
1608 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1609 dm_segs->ds_len;
1610 } else {
1611 rq->req_dataseg[rq->req_seg_count].ds_base =
1612 dm_segs->ds_addr;
1613 rq->req_dataseg[rq->req_seg_count].ds_count =
1614 dm_segs->ds_len;
1616 datalen -= dm_segs->ds_len;
1617 rq->req_seg_count++;
1618 dm_segs++;
1621 while (datalen > 0 && dm_segs != eseg) {
1622 u_int16_t onxti;
1623 ispcontreq_t local, *crq = &local, *cqe;
1625 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1626 onxti = nxti;
1627 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
1628 if (nxti == mp->optr) {
1629 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
1630 mp->error = MUSHERR_NOQENTRIES;
1631 return;
1633 rq->req_header.rqs_entry_count++;
1634 MEMZERO((void *)crq, sizeof (*crq));
1635 crq->req_header.rqs_entry_count = 1;
1636 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1638 seglim = 0;
1639 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1640 crq->req_dataseg[seglim].ds_base =
1641 dm_segs->ds_addr;
1642 crq->req_dataseg[seglim].ds_count =
1643 dm_segs->ds_len;
1644 rq->req_seg_count++;
1645 dm_segs++;
1646 seglim++;
1647 datalen -= dm_segs->ds_len;
1649 isp_put_cont_req(isp, crq, cqe);
1650 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
1652 *mp->nxtip = nxti;
1655 static int
1656 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1657 u_int16_t *nxtip, u_int16_t optr)
1659 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1660 ispreq_t *qep;
1661 bus_dmamap_t *dp = NULL;
1662 mush_t mush, *mp;
1663 void (*eptr)(void *, bus_dma_segment_t *, int, int);
1665 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
1666 #ifdef ISP_TARGET_MODE
1667 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1668 if (IS_FC(isp)) {
1669 eptr = tdma_mkfc;
1670 } else {
1671 eptr = tdma_mk;
1673 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1674 (csio->dxfer_len == 0)) {
1675 mp = &mush;
1676 mp->isp = isp;
1677 mp->cmd_token = csio;
1678 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
1679 mp->nxtip = nxtip;
1680 mp->optr = optr;
1681 mp->error = 0;
1682 (*eptr)(mp, NULL, 0, 0);
1683 goto mbxsync;
1685 } else
1686 #endif
1687 eptr = dma2;
1690 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1691 (csio->dxfer_len == 0)) {
1692 rq->req_seg_count = 1;
1693 goto mbxsync;
1697 * Do a virtual grapevine step to collect info for
1698 * the callback dma allocation that we have to use...
1700 mp = &mush;
1701 mp->isp = isp;
1702 mp->cmd_token = csio;
1703 mp->rq = rq;
1704 mp->nxtip = nxtip;
1705 mp->optr = optr;
1706 mp->error = 0;
1708 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1709 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1710 int error;
1711 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
1712 crit_enter();
1713 error = bus_dmamap_load(pcs->dmat, *dp,
1714 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1715 if (error == EINPROGRESS) {
1716 bus_dmamap_unload(pcs->dmat, *dp);
1717 mp->error = EINVAL;
1718 isp_prt(isp, ISP_LOGERR,
1719 "deferred dma allocation not supported");
1720 } else if (error && mp->error == 0) {
1721 #ifdef DIAGNOSTIC
1722 isp_prt(isp, ISP_LOGERR,
1723 "error %d in dma mapping code", error);
1724 #endif
1725 mp->error = error;
1727 crit_exit();
1728 } else {
1729 /* Pointer to physical buffer */
1730 struct bus_dma_segment seg;
1731 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1732 seg.ds_len = csio->dxfer_len;
1733 (*eptr)(mp, &seg, 1, 0);
1735 } else {
1736 struct bus_dma_segment *segs;
1738 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1739 isp_prt(isp, ISP_LOGERR,
1740 "Physical segment pointers unsupported");
1741 mp->error = EINVAL;
1742 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1743 isp_prt(isp, ISP_LOGERR,
1744 "Virtual segment addresses unsupported");
1745 mp->error = EINVAL;
1746 } else {
1747 /* Just use the segments provided */
1748 segs = (struct bus_dma_segment *) csio->data_ptr;
1749 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1752 if (mp->error) {
1753 int retval = CMD_COMPLETE;
1754 if (mp->error == MUSHERR_NOQENTRIES) {
1755 retval = CMD_EAGAIN;
1756 } else if (mp->error == EFBIG) {
1757 XS_SETERR(csio, CAM_REQ_TOO_BIG);
1758 } else if (mp->error == EINVAL) {
1759 XS_SETERR(csio, CAM_REQ_INVALID);
1760 } else {
1761 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1763 return (retval);
1765 mbxsync:
1766 switch (rq->req_header.rqs_entry_type) {
1767 case RQSTYPE_REQUEST:
1768 isp_put_request(isp, rq, qep);
1769 break;
1770 case RQSTYPE_CMDONLY:
1771 isp_put_extended_request(isp, (ispextreq_t *)rq,
1772 (ispextreq_t *)qep);
1773 break;
1774 case RQSTYPE_T2RQS:
1775 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
1776 break;
1778 return (CMD_QUEUED);
1781 static void
1782 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
1784 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1785 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
1786 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1787 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
1788 } else {
1789 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
1791 bus_dmamap_unload(pcs->dmat, *dp);
1795 static void
1796 isp_pci_reset1(struct ispsoftc *isp)
1798 /* Make sure the BIOS is disabled */
1799 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1800 /* and enable interrupts */
1801 ENABLE_INTS(isp);
1804 static void
1805 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1807 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1808 if (msg)
1809 kprintf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1810 else
1811 kprintf("%s:\n", device_get_nameunit(isp->isp_dev));
1812 if (IS_SCSI(isp))
1813 kprintf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1814 else
1815 kprintf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1816 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1817 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1818 kprintf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1821 if (IS_SCSI(isp)) {
1822 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1823 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1824 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1825 ISP_READ(isp, CDMA_FIFO_STS));
1826 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1827 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1828 ISP_READ(isp, DDMA_FIFO_STS));
1829 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1830 ISP_READ(isp, SXP_INTERRUPT),
1831 ISP_READ(isp, SXP_GROSS_ERR),
1832 ISP_READ(isp, SXP_PINS_CTRL));
1833 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1835 kprintf(" mbox regs: %x %x %x %x %x\n",
1836 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1837 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1838 ISP_READ(isp, OUTMAILBOX4));
1839 kprintf(" PCI Status Command/Status=%x\n",
1840 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));