1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.13 2008/01/06 16:55:49 swildner Exp $ */
4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
37 #include <sys/malloc.h>
39 #include <bus/pci/pcireg.h>
40 #include <bus/pci/pcivar.h>
42 #include "isp_freebsd.h"
44 static u_int16_t
isp_pci_rd_reg(struct ispsoftc
*, int);
45 static void isp_pci_wr_reg(struct ispsoftc
*, int, u_int16_t
);
46 static u_int16_t
isp_pci_rd_reg_1080(struct ispsoftc
*, int);
47 static void isp_pci_wr_reg_1080(struct ispsoftc
*, int, u_int16_t
);
49 isp_pci_rd_isr(struct ispsoftc
*, u_int16_t
*, u_int16_t
*, u_int16_t
*);
51 isp_pci_rd_isr_2300(struct ispsoftc
*, u_int16_t
*, u_int16_t
*, u_int16_t
*);
52 static int isp_pci_mbxdma(struct ispsoftc
*);
54 isp_pci_dmasetup(struct ispsoftc
*, XS_T
*, ispreq_t
*, u_int16_t
*, u_int16_t
);
56 isp_pci_dmateardown(struct ispsoftc
*, XS_T
*, u_int16_t
);
58 static void isp_pci_reset1(struct ispsoftc
*);
59 static void isp_pci_dumpregs(struct ispsoftc
*, const char *);
61 static struct ispmdvec mdvec
= {
72 BIU_BURST_ENABLE
|BIU_PCI_CONF1_FIFO_64
75 static struct ispmdvec mdvec_1080
= {
86 BIU_BURST_ENABLE
|BIU_PCI_CONF1_FIFO_64
89 static struct ispmdvec mdvec_12160
= {
100 BIU_BURST_ENABLE
|BIU_PCI_CONF1_FIFO_64
103 static struct ispmdvec mdvec_2100
= {
115 static struct ispmdvec mdvec_2200
= {
127 static struct ispmdvec mdvec_2300
= {
139 #ifndef PCIM_CMD_INVEN
140 #define PCIM_CMD_INVEN 0x10
142 #ifndef PCIM_CMD_BUSMASTEREN
143 #define PCIM_CMD_BUSMASTEREN 0x0004
145 #ifndef PCIM_CMD_PERRESPEN
146 #define PCIM_CMD_PERRESPEN 0x0040
148 #ifndef PCIM_CMD_SEREN
149 #define PCIM_CMD_SEREN 0x0100
153 #define PCIR_COMMAND 0x04
156 #ifndef PCIR_CACHELNSZ
157 #define PCIR_CACHELNSZ 0x0c
160 #ifndef PCIR_LATTIMER
161 #define PCIR_LATTIMER 0x0d
165 #define PCIR_ROMADDR 0x30
168 #ifndef PCI_VENDOR_QLOGIC
169 #define PCI_VENDOR_QLOGIC 0x1077
172 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
173 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
176 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
177 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
180 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
181 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
184 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
185 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
188 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
189 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
192 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
193 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
196 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
197 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
200 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
201 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
204 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
205 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
208 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
209 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
212 #define PCI_QLOGIC_ISP1020 \
213 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
215 #define PCI_QLOGIC_ISP1080 \
216 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
218 #define PCI_QLOGIC_ISP10160 \
219 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
221 #define PCI_QLOGIC_ISP12160 \
222 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
224 #define PCI_QLOGIC_ISP1240 \
225 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
227 #define PCI_QLOGIC_ISP1280 \
228 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
230 #define PCI_QLOGIC_ISP2100 \
231 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
233 #define PCI_QLOGIC_ISP2200 \
234 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
236 #define PCI_QLOGIC_ISP2300 \
237 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
239 #define PCI_QLOGIC_ISP2312 \
240 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
243 * Odd case for some AMI raid cards... We need to *not* attach to this.
245 #define AMI_RAID_SUBVENDOR_ID 0x101e
247 #define IO_MAP_REG 0x10
248 #define MEM_MAP_REG 0x14
250 #define PCI_DFLT_LTNCY 0x40
251 #define PCI_DFLT_LNSZ 0x10
253 static int isp_pci_probe (device_t
);
254 static int isp_pci_attach (device_t
);
257 struct isp_pcisoftc
{
258 struct ispsoftc pci_isp
;
260 struct resource
* pci_reg
;
261 bus_space_tag_t pci_st
;
262 bus_space_handle_t pci_sh
;
264 int16_t pci_poff
[_NREG_BLKS
];
268 ispfwfunc
*isp_get_firmware_p
= NULL
;
270 static device_method_t isp_pci_methods
[] = {
271 /* Device interface */
272 DEVMETHOD(device_probe
, isp_pci_probe
),
273 DEVMETHOD(device_attach
, isp_pci_attach
),
276 static void isp_pci_intr(void *);
278 static driver_t isp_pci_driver
= {
279 "isp", isp_pci_methods
, sizeof (struct isp_pcisoftc
)
281 static devclass_t isp_devclass
;
282 DRIVER_MODULE(isp
, pci
, isp_pci_driver
, isp_devclass
, 0, 0);
283 MODULE_VERSION(isp
, 1);
286 isp_pci_probe(device_t dev
)
288 switch ((pci_get_device(dev
) << 16) | (pci_get_vendor(dev
))) {
289 case PCI_QLOGIC_ISP1020
:
290 device_set_desc(dev
, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
292 case PCI_QLOGIC_ISP1080
:
293 device_set_desc(dev
, "Qlogic ISP 1080 PCI SCSI Adapter");
295 case PCI_QLOGIC_ISP1240
:
296 device_set_desc(dev
, "Qlogic ISP 1240 PCI SCSI Adapter");
298 case PCI_QLOGIC_ISP1280
:
299 device_set_desc(dev
, "Qlogic ISP 1280 PCI SCSI Adapter");
301 case PCI_QLOGIC_ISP10160
:
302 device_set_desc(dev
, "Qlogic ISP 10160 PCI SCSI Adapter");
304 case PCI_QLOGIC_ISP12160
:
305 if (pci_get_subvendor(dev
) == AMI_RAID_SUBVENDOR_ID
) {
308 device_set_desc(dev
, "Qlogic ISP 12160 PCI SCSI Adapter");
310 case PCI_QLOGIC_ISP2100
:
311 device_set_desc(dev
, "Qlogic ISP 2100 PCI FC-AL Adapter");
313 case PCI_QLOGIC_ISP2200
:
314 device_set_desc(dev
, "Qlogic ISP 2200 PCI FC-AL Adapter");
316 case PCI_QLOGIC_ISP2300
:
317 device_set_desc(dev
, "Qlogic ISP 2300 PCI FC-AL Adapter");
319 case PCI_QLOGIC_ISP2312
:
320 device_set_desc(dev
, "Qlogic ISP 2312 PCI FC-AL Adapter");
325 if (device_get_unit(dev
) == 0 && bootverbose
) {
326 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
327 "Core Version %d.%d\n",
328 ISP_PLATFORM_VERSION_MAJOR
, ISP_PLATFORM_VERSION_MINOR
,
329 ISP_CORE_VERSION_MAJOR
, ISP_CORE_VERSION_MINOR
);
332 * XXXX: Here is where we might load the f/w module
333 * XXXX: (or increase a reference count to it).
339 isp_pci_attach(device_t dev
)
341 struct resource
*regs
, *irq
;
342 int unit
, bitmap
, rtp
, rgd
, iqd
, m1
, m2
, isp_debug
;
343 u_int32_t data
, cmd
, linesz
, psize
, basetype
;
344 struct isp_pcisoftc
*pcs
;
345 struct ispsoftc
*isp
= NULL
;
346 struct ispmdvec
*mdvp
;
351 * Figure out if we're supposed to skip this one.
353 unit
= device_get_unit(dev
);
354 if (kgetenv_int("isp_disable", &bitmap
)) {
355 if (bitmap
& (1 << unit
)) {
356 device_printf(dev
, "not configuring\n");
358 * But return '0' to preserve HBA numbering.
364 pcs
= kmalloc(sizeof (struct isp_pcisoftc
), M_DEVBUF
, M_WAITOK
| M_ZERO
);
367 * Figure out which we should try first - memory mapping or i/o mapping?
369 m1
= PCIM_CMD_PORTEN
;
372 if (kgetenv_int("isp_mem_map", &bitmap
)) {
373 if (bitmap
& (1 << unit
)) {
375 m2
= PCIM_CMD_PORTEN
;
379 if (kgetenv_int("isp_io_map", &bitmap
)) {
380 if (bitmap
& (1 << unit
)) {
381 m1
= PCIM_CMD_PORTEN
;
386 linesz
= PCI_DFLT_LNSZ
;
390 cmd
= pci_read_config(dev
, PCIR_COMMAND
, 1);
392 rtp
= (m1
== PCIM_CMD_MEMEN
)? SYS_RES_MEMORY
: SYS_RES_IOPORT
;
393 rgd
= (m1
== PCIM_CMD_MEMEN
)? MEM_MAP_REG
: IO_MAP_REG
;
394 regs
= bus_alloc_resource(dev
, rtp
, &rgd
, 0, ~0, 1, RF_ACTIVE
);
396 if (regs
== NULL
&& (cmd
& m2
)) {
397 rtp
= (m2
== PCIM_CMD_MEMEN
)? SYS_RES_MEMORY
: SYS_RES_IOPORT
;
398 rgd
= (m2
== PCIM_CMD_MEMEN
)? MEM_MAP_REG
: IO_MAP_REG
;
399 regs
= bus_alloc_resource(dev
, rtp
, &rgd
, 0, ~0, 1, RF_ACTIVE
);
402 device_printf(dev
, "unable to map any ports\n");
406 device_printf(dev
, "using %s space register mapping\n",
407 (rgd
== IO_MAP_REG
)? "I/O" : "Memory");
410 pcs
->pci_st
= rman_get_bustag(regs
);
411 pcs
->pci_sh
= rman_get_bushandle(regs
);
413 pcs
->pci_poff
[BIU_BLOCK
>> _BLK_REG_SHFT
] = BIU_REGS_OFF
;
414 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS_OFF
;
415 pcs
->pci_poff
[SXP_BLOCK
>> _BLK_REG_SHFT
] = PCI_SXP_REGS_OFF
;
416 pcs
->pci_poff
[RISC_BLOCK
>> _BLK_REG_SHFT
] = PCI_RISC_REGS_OFF
;
417 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = DMA_REGS_OFF
;
419 basetype
= ISP_HA_SCSI_UNKNOWN
;
420 psize
= sizeof (sdparam
);
421 lim
= BUS_SPACE_MAXSIZE_32BIT
;
422 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP1020
) {
424 basetype
= ISP_HA_SCSI_UNKNOWN
;
425 psize
= sizeof (sdparam
);
426 lim
= BUS_SPACE_MAXSIZE_24BIT
;
428 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP1080
) {
430 basetype
= ISP_HA_SCSI_1080
;
431 psize
= sizeof (sdparam
);
432 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] =
433 ISP1080_DMA_REGS_OFF
;
435 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP1240
) {
437 basetype
= ISP_HA_SCSI_1240
;
438 psize
= 2 * sizeof (sdparam
);
439 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] =
440 ISP1080_DMA_REGS_OFF
;
442 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP1280
) {
444 basetype
= ISP_HA_SCSI_1280
;
445 psize
= 2 * sizeof (sdparam
);
446 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] =
447 ISP1080_DMA_REGS_OFF
;
449 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP10160
) {
451 basetype
= ISP_HA_SCSI_10160
;
452 psize
= sizeof (sdparam
);
453 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] =
454 ISP1080_DMA_REGS_OFF
;
456 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP12160
) {
458 basetype
= ISP_HA_SCSI_12160
;
459 psize
= 2 * sizeof (sdparam
);
460 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] =
461 ISP1080_DMA_REGS_OFF
;
463 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP2100
) {
465 basetype
= ISP_HA_FC_2100
;
466 psize
= sizeof (fcparam
);
467 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] =
468 PCI_MBOX_REGS2100_OFF
;
469 if (pci_get_revid(dev
) < 3) {
471 * XXX: Need to get the actual revision
472 * XXX: number of the 2100 FB. At any rate,
473 * XXX: lower cache line size for early revision
479 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP2200
) {
481 basetype
= ISP_HA_FC_2200
;
482 psize
= sizeof (fcparam
);
483 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] =
484 PCI_MBOX_REGS2100_OFF
;
486 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP2300
) {
488 basetype
= ISP_HA_FC_2300
;
489 psize
= sizeof (fcparam
);
490 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] =
491 PCI_MBOX_REGS2300_OFF
;
493 if (pci_get_devid(dev
) == PCI_QLOGIC_ISP2312
) {
495 basetype
= ISP_HA_FC_2312
;
496 psize
= sizeof (fcparam
);
497 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] =
498 PCI_MBOX_REGS2300_OFF
;
501 isp
->isp_param
= kmalloc(psize
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
502 isp
->isp_mdvec
= mdvp
;
503 isp
->isp_type
= basetype
;
504 isp
->isp_revision
= pci_get_revid(dev
);
505 #ifdef ISP_TARGET_MODE
506 isp
->isp_role
= ISP_ROLE_BOTH
;
508 isp
->isp_role
= ISP_DEFAULT_ROLES
;
514 * Try and find firmware for this device.
517 if (isp_get_firmware_p
) {
518 int device
= (int) pci_get_device(dev
);
519 #ifdef ISP_TARGET_MODE
520 (*isp_get_firmware_p
)(0, 1, device
, &mdvp
->dv_ispfw
);
522 (*isp_get_firmware_p
)(0, 0, device
, &mdvp
->dv_ispfw
);
527 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
530 cmd
|= PCIM_CMD_SEREN
| PCIM_CMD_PERRESPEN
|
531 PCIM_CMD_BUSMASTEREN
| PCIM_CMD_INVEN
;
532 if (IS_2300(isp
)) { /* per QLogic errata */
533 cmd
&= ~PCIM_CMD_INVEN
;
537 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
539 isp
->isp_touched
= 1;
542 pci_write_config(dev
, PCIR_COMMAND
, cmd
, 1);
545 * Make sure the Cache Line Size register is set sensibly.
547 data
= pci_read_config(dev
, PCIR_CACHELNSZ
, 1);
548 if (data
!= linesz
) {
549 data
= PCI_DFLT_LNSZ
;
550 isp_prt(isp
, ISP_LOGCONFIG
, "set PCI line size to %d", data
);
551 pci_write_config(dev
, PCIR_CACHELNSZ
, data
, 1);
555 * Make sure the Latency Timer is sane.
557 data
= pci_read_config(dev
, PCIR_LATTIMER
, 1);
558 if (data
< PCI_DFLT_LTNCY
) {
559 data
= PCI_DFLT_LTNCY
;
560 isp_prt(isp
, ISP_LOGCONFIG
, "set PCI latency to %d", data
);
561 pci_write_config(dev
, PCIR_LATTIMER
, data
, 1);
565 * Make sure we've disabled the ROM.
567 data
= pci_read_config(dev
, PCIR_ROMADDR
, 4);
569 pci_write_config(dev
, PCIR_ROMADDR
, data
, 4);
572 irq
= bus_alloc_resource(dev
, SYS_RES_IRQ
, &iqd
, 0, ~0,
573 1, RF_ACTIVE
| RF_SHAREABLE
);
575 device_printf(dev
, "could not allocate interrupt\n");
579 if (kgetenv_int("isp_no_fwload", &bitmap
)) {
580 if (bitmap
& (1 << unit
))
581 isp
->isp_confopts
|= ISP_CFG_NORELOAD
;
583 if (kgetenv_int("isp_fwload", &bitmap
)) {
584 if (bitmap
& (1 << unit
))
585 isp
->isp_confopts
&= ~ISP_CFG_NORELOAD
;
587 if (kgetenv_int("isp_no_nvram", &bitmap
)) {
588 if (bitmap
& (1 << unit
))
589 isp
->isp_confopts
|= ISP_CFG_NONVRAM
;
591 if (kgetenv_int("isp_nvram", &bitmap
)) {
592 if (bitmap
& (1 << unit
))
593 isp
->isp_confopts
&= ~ISP_CFG_NONVRAM
;
595 if (kgetenv_int("isp_fcduplex", &bitmap
)) {
596 if (bitmap
& (1 << unit
))
597 isp
->isp_confopts
|= ISP_CFG_FULL_DUPLEX
;
599 if (kgetenv_int("isp_no_fcduplex", &bitmap
)) {
600 if (bitmap
& (1 << unit
))
601 isp
->isp_confopts
&= ~ISP_CFG_FULL_DUPLEX
;
603 if (kgetenv_int("isp_nport", &bitmap
)) {
604 if (bitmap
& (1 << unit
))
605 isp
->isp_confopts
|= ISP_CFG_NPORT
;
609 * Because the resource_*_value functions can neither return
610 * 64 bit integer values, nor can they be directly coerced
611 * to interpret the right hand side of the assignment as
612 * you want them to interpret it, we have to force WWN
613 * hint replacement to specify WWN strings with a leading
614 * 'w' (e..g w50000000aaaa0001). Sigh.
616 if (kgetenv_quad("isp_portwwn", &wwn
)) {
617 isp
->isp_osinfo
.default_port_wwn
= wwn
;
618 isp
->isp_confopts
|= ISP_CFG_OWNWWPN
;
620 if (isp
->isp_osinfo
.default_port_wwn
== 0) {
621 isp
->isp_osinfo
.default_port_wwn
= 0x400000007F000009ull
;
624 if (kgetenv_quad("isp_nodewwn", &wwn
)) {
625 isp
->isp_osinfo
.default_node_wwn
= wwn
;
626 isp
->isp_confopts
|= ISP_CFG_OWNWWNN
;
628 if (isp
->isp_osinfo
.default_node_wwn
== 0) {
629 isp
->isp_osinfo
.default_node_wwn
= 0x400000007F000009ull
;
633 (void) kgetenv_int("isp_debug", &isp_debug
);
634 if (bus_setup_intr(dev
, irq
, 0, isp_pci_intr
,
635 isp
, &pcs
->ih
, NULL
)) {
636 device_printf(dev
, "could not setup interrupt\n");
640 #ifdef ISP_FW_CRASH_DUMP
642 if (kgetenv_int("isp_fw_dump_enable", &bitmap
)) {
643 if (bitmap
& (1 << unit
) {
646 amt
= QLA2200_RISC_IMAGE_DUMP_SIZE
;
647 } else if (IS_23XX(isp
)) {
648 amt
= QLA2300_RISC_IMAGE_DUMP_SIZE
;
651 FCPARAM(isp
)->isp_dump_data
=
652 kmalloc(amt
, M_DEVBUF
, M_WAITOK
);
653 bzero(FCPARAM(isp
)->isp_dump_data
, amt
);
656 "f/w crash dumps not supported for card\n");
663 isp
->isp_port
= pci_get_function(dev
);
667 * Set up logging levels.
670 isp
->isp_dblev
= isp_debug
;
672 isp
->isp_dblev
= ISP_LOGWARN
|ISP_LOGERR
;
675 isp
->isp_dblev
|= ISP_LOGCONFIG
|ISP_LOGINFO
;
678 * Make sure we're in reset state.
683 if (isp
->isp_state
!= ISP_RESETSTATE
) {
688 if (isp
->isp_state
!= ISP_INITSTATE
) {
689 /* If we're a Fibre Channel Card, we allow deferred attach */
697 if (isp
->isp_state
!= ISP_RUNSTATE
) {
698 /* If we're a Fibre Channel Card, we allow deferred attach */
706 * XXXX: Here is where we might unload the f/w module
707 * XXXX: (or decrease the reference count to it).
714 if (pcs
&& pcs
->ih
) {
715 (void) bus_teardown_intr(dev
, irq
, pcs
->ih
);
719 (void) bus_release_resource(dev
, SYS_RES_IRQ
, iqd
, irq
);
724 (void) bus_release_resource(dev
, rtp
, rgd
, regs
);
728 if (pcs
->pci_isp
.isp_param
)
729 kfree(pcs
->pci_isp
.isp_param
, M_DEVBUF
);
730 kfree(pcs
, M_DEVBUF
);
734 * XXXX: Here is where we might unload the f/w module
735 * XXXX: (or decrease the reference count to it).
741 isp_pci_intr(void *arg
)
743 struct ispsoftc
*isp
= arg
;
744 u_int16_t isr
, sema
, mbox
;
748 if (ISP_READ_ISR(isp
, &isr
, &sema
, &mbox
) == 0) {
751 int iok
= isp
->isp_osinfo
.intsok
;
752 isp
->isp_osinfo
.intsok
= 0;
753 isp_intr(isp
, isr
, sema
, mbox
);
754 isp
->isp_osinfo
.intsok
= iok
;
760 #define IspVirt2Off(a, x) \
761 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
762 _BLK_REG_SHFT] + ((x) & 0xff))
764 #define BXR2(pcs, off) \
765 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
766 #define BXW2(pcs, off, v) \
767 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
771 isp_pci_rd_debounced(struct ispsoftc
*isp
, int off
, u_int16_t
*rp
)
773 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
774 u_int16_t val0
, val1
;
778 val0
= BXR2(pcs
, IspVirt2Off(isp
, off
));
779 val1
= BXR2(pcs
, IspVirt2Off(isp
, off
));
780 } while (val0
!= val1
&& ++i
< 1000);
789 isp_pci_rd_isr(struct ispsoftc
*isp
, u_int16_t
*isrp
,
790 u_int16_t
*semap
, u_int16_t
*mbp
)
792 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
796 if (isp_pci_rd_debounced(isp
, BIU_ISR
, &isr
)) {
799 if (isp_pci_rd_debounced(isp
, BIU_SEMA
, &sema
)) {
803 isr
= BXR2(pcs
, IspVirt2Off(isp
, BIU_ISR
));
804 sema
= BXR2(pcs
, IspVirt2Off(isp
, BIU_SEMA
));
806 isp_prt(isp
, ISP_LOGDEBUG3
, "ISR 0x%x SEMA 0x%x", isr
, sema
);
807 isr
&= INT_PENDING_MASK(isp
);
808 sema
&= BIU_SEMA_LOCK
;
809 if (isr
== 0 && sema
== 0) {
813 if ((*semap
= sema
) != 0) {
815 if (isp_pci_rd_debounced(isp
, OUTMAILBOX0
, mbp
)) {
819 *mbp
= BXR2(pcs
, IspVirt2Off(isp
, OUTMAILBOX0
));
826 isp_pci_rd_isr_2300(struct ispsoftc
*isp
, u_int16_t
*isrp
,
827 u_int16_t
*semap
, u_int16_t
*mbox0p
)
829 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
832 if (!(BXR2(pcs
, IspVirt2Off(isp
, BIU_ISR
) & BIU2100_ISR_RISC_INT
))) {
836 r2hisr
= bus_space_read_4(pcs
->pci_st
, pcs
->pci_sh
,
837 IspVirt2Off(pcs
, BIU_R2HSTSLO
));
838 isp_prt(isp
, ISP_LOGDEBUG3
, "RISC2HOST ISR 0x%x", r2hisr
);
839 if ((r2hisr
& BIU_R2HST_INTR
) == 0) {
843 switch (r2hisr
& BIU_R2HST_ISTAT_MASK
) {
844 case ISPR2HST_ROM_MBX_OK
:
845 case ISPR2HST_ROM_MBX_FAIL
:
846 case ISPR2HST_MBX_OK
:
847 case ISPR2HST_MBX_FAIL
:
848 case ISPR2HST_ASYNC_EVENT
:
849 *isrp
= r2hisr
& 0xffff;
850 *mbox0p
= (r2hisr
>> 16);
853 case ISPR2HST_RIO_16
:
854 *isrp
= r2hisr
& 0xffff;
855 *mbox0p
= ASYNC_RIO1
;
859 *isrp
= r2hisr
& 0xffff;
860 *mbox0p
= ASYNC_CMD_CMPLT
;
863 case ISPR2HST_FPOST_CTIO
:
864 *isrp
= r2hisr
& 0xffff;
865 *mbox0p
= ASYNC_CTIO_DONE
;
868 case ISPR2HST_RSPQ_UPDATE
:
869 *isrp
= r2hisr
& 0xffff;
879 isp_pci_rd_reg(struct ispsoftc
*isp
, int regoff
)
882 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
885 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
887 * We will assume that someone has paused the RISC processor.
889 oldconf
= BXR2(pcs
, IspVirt2Off(isp
, BIU_CONF1
));
890 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
),
891 oldconf
| BIU_PCI_CONF1_SXP
);
893 rv
= BXR2(pcs
, IspVirt2Off(isp
, regoff
));
894 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
895 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
), oldconf
);
901 isp_pci_wr_reg(struct ispsoftc
*isp
, int regoff
, u_int16_t val
)
903 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
906 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
908 * We will assume that someone has paused the RISC processor.
910 oldconf
= BXR2(pcs
, IspVirt2Off(isp
, BIU_CONF1
));
911 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
),
912 oldconf
| BIU_PCI_CONF1_SXP
);
914 BXW2(pcs
, IspVirt2Off(isp
, regoff
), val
);
915 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
916 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
), oldconf
);
921 isp_pci_rd_reg_1080(struct ispsoftc
*isp
, int regoff
)
923 u_int16_t rv
, oc
= 0;
924 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
926 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
||
927 (regoff
& _BLK_REG_MASK
) == (SXP_BLOCK
|SXP_BANK1_SELECT
)) {
930 * We will assume that someone has paused the RISC processor.
932 oc
= BXR2(pcs
, IspVirt2Off(isp
, BIU_CONF1
));
933 tc
= oc
& ~BIU_PCI1080_CONF1_DMA
;
934 if (regoff
& SXP_BANK1_SELECT
)
935 tc
|= BIU_PCI1080_CONF1_SXP1
;
937 tc
|= BIU_PCI1080_CONF1_SXP0
;
938 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
), tc
);
939 } else if ((regoff
& _BLK_REG_MASK
) == DMA_BLOCK
) {
940 oc
= BXR2(pcs
, IspVirt2Off(isp
, BIU_CONF1
));
941 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
),
942 oc
| BIU_PCI1080_CONF1_DMA
);
944 rv
= BXR2(pcs
, IspVirt2Off(isp
, regoff
));
946 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
), oc
);
952 isp_pci_wr_reg_1080(struct ispsoftc
*isp
, int regoff
, u_int16_t val
)
954 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*) isp
;
957 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
||
958 (regoff
& _BLK_REG_MASK
) == (SXP_BLOCK
|SXP_BANK1_SELECT
)) {
961 * We will assume that someone has paused the RISC processor.
963 oc
= BXR2(pcs
, IspVirt2Off(isp
, BIU_CONF1
));
964 tc
= oc
& ~BIU_PCI1080_CONF1_DMA
;
965 if (regoff
& SXP_BANK1_SELECT
)
966 tc
|= BIU_PCI1080_CONF1_SXP1
;
968 tc
|= BIU_PCI1080_CONF1_SXP0
;
969 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
), tc
);
970 } else if ((regoff
& _BLK_REG_MASK
) == DMA_BLOCK
) {
971 oc
= BXR2(pcs
, IspVirt2Off(isp
, BIU_CONF1
));
972 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
),
973 oc
| BIU_PCI1080_CONF1_DMA
);
975 BXW2(pcs
, IspVirt2Off(isp
, regoff
), val
);
977 BXW2(pcs
, IspVirt2Off(isp
, BIU_CONF1
), oc
);
983 struct ispsoftc
*isp
;
987 static void imc(void *, bus_dma_segment_t
*, int, int);
990 imc(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
992 struct imush
*imushp
= (struct imush
*) arg
;
994 imushp
->error
= error
;
996 struct ispsoftc
*isp
=imushp
->isp
;
997 bus_addr_t addr
= segs
->ds_addr
;
999 isp
->isp_rquest_dma
= addr
;
1000 addr
+= ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp
));
1001 isp
->isp_result_dma
= addr
;
1003 addr
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp
));
1004 FCPARAM(isp
)->isp_scdma
= addr
;
1010 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1012 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1015 isp_pci_mbxdma(struct ispsoftc
*isp
)
1017 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*)isp
;
1021 bus_size_t alim
, slim
;
1025 * Already been here? If so, leave...
1027 if (isp
->isp_rquest
) {
1031 #ifdef ISP_DAC_SUPPORTED
1032 alim
= BUS_SPACE_UNRESTRICTED
;
1034 alim
= BUS_SPACE_MAXADDR_32BIT
;
1036 if (IS_ULTRA2(isp
) || IS_FC(isp
) || IS_1240(isp
)) {
1037 slim
= BUS_SPACE_MAXADDR_32BIT
;
1039 slim
= BUS_SPACE_MAXADDR_24BIT
;
1043 if (bus_dma_tag_create(NULL
, 1, slim
+1, alim
, alim
,
1044 NULL
, NULL
, BUS_SPACE_MAXSIZE
, ISP_NSEGS
, slim
, 0, &pcs
->dmat
)) {
1045 isp_prt(isp
, ISP_LOGERR
, "could not create master dma tag");
1051 len
= sizeof (XS_T
**) * isp
->isp_maxcmds
;
1052 isp
->isp_xflist
= (XS_T
**) kmalloc(len
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
1053 len
= sizeof (bus_dmamap_t
) * isp
->isp_maxcmds
;
1054 pcs
->dmaps
= (bus_dmamap_t
*) kmalloc(len
, M_DEVBUF
, M_WAITOK
);
1057 * Allocate and map the request, result queues, plus FC scratch area.
1059 len
= ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp
));
1060 len
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp
));
1062 len
+= ISP2100_SCRLEN
;
1065 ns
= (len
/ PAGE_SIZE
) + 1;
1066 if (bus_dma_tag_create(pcs
->dmat
, QENTRY_LEN
, slim
+1, alim
, alim
,
1067 NULL
, NULL
, len
, ns
, slim
, 0, &isp
->isp_cdmat
)) {
1068 isp_prt(isp
, ISP_LOGERR
,
1069 "cannot create a dma tag for control spaces");
1070 kfree(pcs
->dmaps
, M_DEVBUF
);
1071 kfree(isp
->isp_xflist
, M_DEVBUF
);
1076 if (bus_dmamem_alloc(isp
->isp_cdmat
, (void **)&base
, BUS_DMA_NOWAIT
,
1077 &isp
->isp_cdmap
) != 0) {
1078 isp_prt(isp
, ISP_LOGERR
,
1079 "cannot allocate %d bytes of CCB memory", len
);
1080 bus_dma_tag_destroy(isp
->isp_cdmat
);
1081 kfree(isp
->isp_xflist
, M_DEVBUF
);
1082 kfree(pcs
->dmaps
, M_DEVBUF
);
1087 for (i
= 0; i
< isp
->isp_maxcmds
; i
++) {
1088 error
= bus_dmamap_create(pcs
->dmat
, 0, &pcs
->dmaps
[i
]);
1090 isp_prt(isp
, ISP_LOGERR
,
1091 "error %d creating per-cmd DMA maps", error
);
1093 bus_dmamap_destroy(pcs
->dmat
, pcs
->dmaps
[i
]);
1101 bus_dmamap_load(isp
->isp_cdmat
, isp
->isp_cdmap
, base
, len
, imc
, &im
, 0);
1103 isp_prt(isp
, ISP_LOGERR
,
1104 "error %d loading dma map for control areas", im
.error
);
1108 isp
->isp_rquest
= base
;
1109 base
+= ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp
));
1110 isp
->isp_result
= base
;
1112 base
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp
));
1113 FCPARAM(isp
)->isp_scratch
= base
;
1119 bus_dmamem_free(isp
->isp_cdmat
, base
, isp
->isp_cdmap
);
1120 bus_dma_tag_destroy(isp
->isp_cdmat
);
1121 kfree(isp
->isp_xflist
, M_DEVBUF
);
1122 kfree(pcs
->dmaps
, M_DEVBUF
);
1124 isp
->isp_rquest
= NULL
;
1129 struct ispsoftc
*isp
;
1137 #define MUSHERR_NOQENTRIES -2
1139 #ifdef ISP_TARGET_MODE
1141 * We need to handle DMA for target mode differently from initiator mode.
1143 * DMA mapping and construction and submission of CTIO Request Entries
1144 * and rendevous for completion are very tightly coupled because we start
1145 * out by knowing (per platform) how much data we have to move, but we
1146 * don't know, up front, how many DMA mapping segments will have to be used
1147 * cover that data, so we don't know how many CTIO Request Entries we
1148 * will end up using. Further, for performance reasons we may want to
1149 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1151 * The standard vector still goes through isp_pci_dmasetup, but the callback
1152 * for the DMA mapping routines comes here instead with the whole transfer
1153 * mapped and a pointer to a partially filled in already allocated request
1154 * queue entry. We finish the job.
1156 static void tdma_mk(void *, bus_dma_segment_t
*, int, int);
1157 static void tdma_mkfc(void *, bus_dma_segment_t
*, int, int);
1159 #define STATUS_WITH_DATA 1
1162 tdma_mk(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1165 struct ccb_scsiio
*csio
;
1166 struct ispsoftc
*isp
;
1167 struct isp_pcisoftc
*pcs
;
1169 ct_entry_t
*cto
, *qe
;
1170 u_int8_t scsi_status
;
1171 u_int16_t curi
, nxti
, handle
;
1174 int nth_ctio
, nctios
, send_status
;
1176 mp
= (mush_t
*) arg
;
1183 csio
= mp
->cmd_token
;
1185 curi
= isp
->isp_reqidx
;
1186 qe
= (ct_entry_t
*) ISP_QUEUE_ENTRY(isp
->isp_rquest
, curi
);
1189 cto
->ct_seg_count
= 0;
1190 cto
->ct_header
.rqs_entry_count
= 1;
1191 MEMZERO(cto
->ct_dataseg
, sizeof(cto
->ct_dataseg
));
1194 cto
->ct_header
.rqs_seqno
= 1;
1195 isp_prt(isp
, ISP_LOGTDEBUG1
,
1196 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1197 cto
->ct_fwhandle
, csio
->ccb_h
.target_lun
, cto
->ct_iid
,
1198 cto
->ct_tag_val
, cto
->ct_flags
, cto
->ct_status
,
1199 cto
->ct_scsi_status
, cto
->ct_resid
);
1200 ISP_TDQE(isp
, "tdma_mk[no data]", curi
, cto
);
1201 isp_put_ctio(isp
, cto
, qe
);
1205 nctios
= nseg
/ ISP_RQDSEG
;
1206 if (nseg
% ISP_RQDSEG
) {
1211 * Save syshandle, and potentially any SCSI status, which we'll
1212 * reinsert on the last CTIO we're going to send.
1215 handle
= cto
->ct_syshandle
;
1216 cto
->ct_syshandle
= 0;
1217 cto
->ct_header
.rqs_seqno
= 0;
1218 send_status
= (cto
->ct_flags
& CT_SENDSTATUS
) != 0;
1221 sflags
= cto
->ct_flags
& (CT_SENDSTATUS
| CT_CCINCR
);
1222 cto
->ct_flags
&= ~(CT_SENDSTATUS
| CT_CCINCR
);
1224 * Preserve residual.
1226 resid
= cto
->ct_resid
;
1229 * Save actual SCSI status.
1231 scsi_status
= cto
->ct_scsi_status
;
1233 #ifndef STATUS_WITH_DATA
1234 sflags
|= CT_NO_DATA
;
1236 * We can't do a status at the same time as a data CTIO, so
1237 * we need to synthesize an extra CTIO at this level.
1242 sflags
= scsi_status
= resid
= 0;
1246 cto
->ct_scsi_status
= 0;
1248 pcs
= (struct isp_pcisoftc
*)isp
;
1249 dp
= &pcs
->dmaps
[isp_handle_index(handle
)];
1250 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1251 bus_dmamap_sync(pcs
->dmat
, *dp
, BUS_DMASYNC_PREREAD
);
1253 bus_dmamap_sync(pcs
->dmat
, *dp
, BUS_DMASYNC_PREWRITE
);
1258 for (nth_ctio
= 0; nth_ctio
< nctios
; nth_ctio
++) {
1265 if (seglim
> ISP_RQDSEG
)
1266 seglim
= ISP_RQDSEG
;
1268 for (seg
= 0; seg
< seglim
; seg
++, nseg
--) {
1270 * Unlike normal initiator commands, we don't
1271 * do any swizzling here.
1273 cto
->ct_dataseg
[seg
].ds_count
= dm_segs
->ds_len
;
1274 cto
->ct_dataseg
[seg
].ds_base
= dm_segs
->ds_addr
;
1275 cto
->ct_xfrlen
+= dm_segs
->ds_len
;
1278 cto
->ct_seg_count
= seg
;
1281 * This case should only happen when we're sending an
1282 * extra CTIO with final status.
1284 if (send_status
== 0) {
1285 isp_prt(isp
, ISP_LOGWARN
,
1286 "tdma_mk ran out of segments");
1293 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1294 * ct_tagtype, and ct_timeout have been carried over
1295 * unchanged from what our caller had set.
1297 * The dataseg fields and the seg_count fields we just got
1298 * through setting. The data direction we've preserved all
1299 * along and only clear it if we're now sending status.
1302 if (nth_ctio
== nctios
- 1) {
1304 * We're the last in a sequence of CTIOs, so mark
1305 * this CTIO and save the handle to the CCB such that
1306 * when this CTIO completes we can free dma resources
1307 * and do whatever else we need to do to finish the
1308 * rest of the command. We *don't* give this to the
1309 * firmware to work on- the caller will do that.
1312 cto
->ct_syshandle
= handle
;
1313 cto
->ct_header
.rqs_seqno
= 1;
1316 cto
->ct_scsi_status
= scsi_status
;
1317 cto
->ct_flags
|= sflags
;
1318 cto
->ct_resid
= resid
;
1321 isp_prt(isp
, ISP_LOGTDEBUG1
,
1322 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1323 "scsi status %x resid %d",
1324 cto
->ct_fwhandle
, csio
->ccb_h
.target_lun
,
1325 cto
->ct_iid
, cto
->ct_tag_val
, cto
->ct_flags
,
1326 cto
->ct_scsi_status
, cto
->ct_resid
);
1328 isp_prt(isp
, ISP_LOGTDEBUG1
,
1329 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1330 cto
->ct_fwhandle
, csio
->ccb_h
.target_lun
,
1331 cto
->ct_iid
, cto
->ct_tag_val
,
1334 isp_put_ctio(isp
, cto
, qe
);
1335 ISP_TDQE(isp
, "last tdma_mk", curi
, cto
);
1337 MEMORYBARRIER(isp
, SYNC_REQUEST
,
1341 ct_entry_t
*oqe
= qe
;
1344 * Make sure syshandle fields are clean
1346 cto
->ct_syshandle
= 0;
1347 cto
->ct_header
.rqs_seqno
= 0;
1349 isp_prt(isp
, ISP_LOGTDEBUG1
,
1350 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1351 cto
->ct_fwhandle
, csio
->ccb_h
.target_lun
,
1352 cto
->ct_iid
, cto
->ct_flags
);
1358 ISP_QUEUE_ENTRY(isp
->isp_rquest
, nxti
);
1359 nxti
= ISP_NXT_QENTRY(nxti
, RQUEST_QUEUE_LEN(isp
));
1360 if (nxti
== mp
->optr
) {
1361 isp_prt(isp
, ISP_LOGTDEBUG0
,
1362 "Queue Overflow in tdma_mk");
1363 mp
->error
= MUSHERR_NOQENTRIES
;
1368 * Now that we're done with the old CTIO,
1369 * flush it out to the request queue.
1371 ISP_TDQE(isp
, "dma_tgt_fc", curi
, cto
);
1372 isp_put_ctio(isp
, cto
, oqe
);
1373 if (nth_ctio
!= 0) {
1374 MEMORYBARRIER(isp
, SYNC_REQUEST
, curi
,
1377 curi
= ISP_NXT_QENTRY(curi
, RQUEST_QUEUE_LEN(isp
));
1380 * Reset some fields in the CTIO so we can reuse
1381 * for the next one we'll flush to the request
1384 cto
->ct_header
.rqs_entry_type
= RQSTYPE_CTIO
;
1385 cto
->ct_header
.rqs_entry_count
= 1;
1386 cto
->ct_header
.rqs_flags
= 0;
1388 cto
->ct_scsi_status
= 0;
1391 cto
->ct_seg_count
= 0;
1392 MEMZERO(cto
->ct_dataseg
, sizeof(cto
->ct_dataseg
));
1399 * We don't have to do multiple CTIOs here. Instead, we can just do
1400 * continuation segments as needed. This greatly simplifies the code
1401 * improves performance.
1405 tdma_mkfc(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1408 struct ccb_scsiio
*csio
;
1409 struct ispsoftc
*isp
;
1410 ct2_entry_t
*cto
, *qe
;
1411 u_int16_t curi
, nxti
;
1414 mp
= (mush_t
*) arg
;
1421 csio
= mp
->cmd_token
;
1424 curi
= isp
->isp_reqidx
;
1425 qe
= (ct2_entry_t
*) ISP_QUEUE_ENTRY(isp
->isp_rquest
, curi
);
1428 if ((cto
->ct_flags
& CT2_FLAG_MMASK
) != CT2_FLAG_MODE1
) {
1429 isp_prt(isp
, ISP_LOGWARN
,
1430 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1431 "set (0x%x)", cto
->ct_flags
);
1436 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1437 * flags to NO DATA and clear relative offset flags.
1438 * We preserve the ct_resid and the response area.
1440 cto
->ct_header
.rqs_seqno
= 1;
1441 cto
->ct_seg_count
= 0;
1443 isp_prt(isp
, ISP_LOGTDEBUG1
,
1444 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1445 "0x%x res %d", cto
->ct_rxid
, csio
->ccb_h
.target_lun
,
1446 cto
->ct_iid
, cto
->ct_flags
, cto
->ct_status
,
1447 cto
->rsp
.m1
.ct_scsi_status
, cto
->ct_resid
);
1448 isp_put_ctio2(isp
, cto
, qe
);
1449 ISP_TDQE(isp
, "dma2_tgt_fc[no data]", curi
, qe
);
1453 if ((cto
->ct_flags
& CT2_FLAG_MMASK
) != CT2_FLAG_MODE0
) {
1454 isp_prt(isp
, ISP_LOGERR
,
1455 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1456 "(0x%x)", cto
->ct_flags
);
1465 * Set up the CTIO2 data segments.
1467 for (segcnt
= 0; cto
->ct_seg_count
< ISP_RQDSEG_T2
&& segcnt
< nseg
;
1468 cto
->ct_seg_count
++, segcnt
++) {
1469 cto
->rsp
.m0
.ct_dataseg
[cto
->ct_seg_count
].ds_base
=
1470 dm_segs
[segcnt
].ds_addr
;
1471 cto
->rsp
.m0
.ct_dataseg
[cto
->ct_seg_count
].ds_count
=
1472 dm_segs
[segcnt
].ds_len
;
1473 cto
->rsp
.m0
.ct_xfrlen
+= dm_segs
[segcnt
].ds_len
;
1474 isp_prt(isp
, ISP_LOGTDEBUG1
, "isp_send_ctio2: ent0[%d]0x%x:%d",
1475 cto
->ct_seg_count
, dm_segs
[segcnt
].ds_addr
,
1476 dm_segs
[segcnt
].ds_len
);
1479 while (segcnt
< nseg
) {
1482 ispcontreq_t local
, *crq
= &local
, *qep
;
1484 qep
= (ispcontreq_t
*) ISP_QUEUE_ENTRY(isp
->isp_rquest
, nxti
);
1486 nxti
= ISP_NXT_QENTRY(curip
, RQUEST_QUEUE_LEN(isp
));
1487 if (nxti
== mp
->optr
) {
1489 isp_prt(isp
, ISP_LOGTDEBUG0
,
1490 "tdma_mkfc: request queue overflow");
1491 mp
->error
= MUSHERR_NOQENTRIES
;
1494 cto
->ct_header
.rqs_entry_count
++;
1495 MEMZERO((void *)crq
, sizeof (*crq
));
1496 crq
->req_header
.rqs_entry_count
= 1;
1497 crq
->req_header
.rqs_entry_type
= RQSTYPE_DATASEG
;
1498 for (seg
= 0; segcnt
< nseg
&& seg
< ISP_CDSEG
;
1500 crq
->req_dataseg
[seg
].ds_base
= dm_segs
[segcnt
].ds_addr
;
1501 crq
->req_dataseg
[seg
].ds_count
= dm_segs
[segcnt
].ds_len
;
1502 isp_prt(isp
, ISP_LOGTDEBUG1
,
1503 "isp_send_ctio2: ent%d[%d]%x:%u",
1504 cto
->ct_header
.rqs_entry_count
-1, seg
,
1505 dm_segs
[segcnt
].ds_addr
, dm_segs
[segcnt
].ds_len
);
1506 cto
->rsp
.m0
.ct_xfrlen
+= dm_segs
[segcnt
].ds_len
;
1507 cto
->ct_seg_count
++;
1509 MEMORYBARRIER(isp
, SYNC_REQUEST
, curip
, QENTRY_LEN
);
1510 isp_put_cont_req(isp
, crq
, qep
);
1511 ISP_TDQE(isp
, "cont entry", curi
, qep
);
1515 * No do final twiddling for the CTIO itself.
1517 cto
->ct_header
.rqs_seqno
= 1;
1518 isp_prt(isp
, ISP_LOGTDEBUG1
,
1519 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
1520 cto
->ct_rxid
, csio
->ccb_h
.target_lun
, (int) cto
->ct_iid
,
1521 cto
->ct_flags
, cto
->ct_status
, cto
->rsp
.m1
.ct_scsi_status
,
1523 isp_put_ctio2(isp
, cto
, qe
);
1524 ISP_TDQE(isp
, "last dma2_tgt_fc", curi
, qe
);
1529 static void dma2(void *, bus_dma_segment_t
*, int, int);
1532 dma2(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1535 struct ispsoftc
*isp
;
1536 struct ccb_scsiio
*csio
;
1537 struct isp_pcisoftc
*pcs
;
1539 bus_dma_segment_t
*eseg
;
1541 int seglim
, datalen
;
1544 mp
= (mush_t
*) arg
;
1551 isp_prt(mp
->isp
, ISP_LOGERR
, "bad segment count (%d)", nseg
);
1555 csio
= mp
->cmd_token
;
1558 pcs
= (struct isp_pcisoftc
*)mp
->isp
;
1559 dp
= &pcs
->dmaps
[isp_handle_index(rq
->req_handle
)];
1562 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1563 bus_dmamap_sync(pcs
->dmat
, *dp
, BUS_DMASYNC_PREREAD
);
1565 bus_dmamap_sync(pcs
->dmat
, *dp
, BUS_DMASYNC_PREWRITE
);
1568 datalen
= XS_XFRLEN(csio
);
1571 * We're passed an initial partially filled in entry that
1572 * has most fields filled in except for data transfer
1575 * Our job is to fill in the initial request queue entry and
1576 * then to start allocating and filling in continuation entries
1577 * until we've covered the entire transfer.
1581 seglim
= ISP_RQDSEG_T2
;
1582 ((ispreqt2_t
*)rq
)->req_totalcnt
= datalen
;
1583 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1584 ((ispreqt2_t
*)rq
)->req_flags
|= REQFLAG_DATA_IN
;
1586 ((ispreqt2_t
*)rq
)->req_flags
|= REQFLAG_DATA_OUT
;
1589 if (csio
->cdb_len
> 12) {
1592 seglim
= ISP_RQDSEG
;
1594 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1595 rq
->req_flags
|= REQFLAG_DATA_IN
;
1597 rq
->req_flags
|= REQFLAG_DATA_OUT
;
1601 eseg
= dm_segs
+ nseg
;
1603 while (datalen
!= 0 && rq
->req_seg_count
< seglim
&& dm_segs
!= eseg
) {
1605 ispreqt2_t
*rq2
= (ispreqt2_t
*)rq
;
1606 rq2
->req_dataseg
[rq2
->req_seg_count
].ds_base
=
1608 rq2
->req_dataseg
[rq2
->req_seg_count
].ds_count
=
1611 rq
->req_dataseg
[rq
->req_seg_count
].ds_base
=
1613 rq
->req_dataseg
[rq
->req_seg_count
].ds_count
=
1616 datalen
-= dm_segs
->ds_len
;
1617 rq
->req_seg_count
++;
1621 while (datalen
> 0 && dm_segs
!= eseg
) {
1623 ispcontreq_t local
, *crq
= &local
, *cqe
;
1625 cqe
= (ispcontreq_t
*) ISP_QUEUE_ENTRY(isp
->isp_rquest
, nxti
);
1627 nxti
= ISP_NXT_QENTRY(onxti
, RQUEST_QUEUE_LEN(isp
));
1628 if (nxti
== mp
->optr
) {
1629 isp_prt(isp
, ISP_LOGDEBUG0
, "Request Queue Overflow++");
1630 mp
->error
= MUSHERR_NOQENTRIES
;
1633 rq
->req_header
.rqs_entry_count
++;
1634 MEMZERO((void *)crq
, sizeof (*crq
));
1635 crq
->req_header
.rqs_entry_count
= 1;
1636 crq
->req_header
.rqs_entry_type
= RQSTYPE_DATASEG
;
1639 while (datalen
> 0 && seglim
< ISP_CDSEG
&& dm_segs
!= eseg
) {
1640 crq
->req_dataseg
[seglim
].ds_base
=
1642 crq
->req_dataseg
[seglim
].ds_count
=
1644 rq
->req_seg_count
++;
1647 datalen
-= dm_segs
->ds_len
;
1649 isp_put_cont_req(isp
, crq
, cqe
);
1650 MEMORYBARRIER(isp
, SYNC_REQUEST
, onxti
, QENTRY_LEN
);
1656 isp_pci_dmasetup(struct ispsoftc
*isp
, struct ccb_scsiio
*csio
, ispreq_t
*rq
,
1657 u_int16_t
*nxtip
, u_int16_t optr
)
1659 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*)isp
;
1661 bus_dmamap_t
*dp
= NULL
;
1663 void (*eptr
)(void *, bus_dma_segment_t
*, int, int);
1665 qep
= (ispreq_t
*) ISP_QUEUE_ENTRY(isp
->isp_rquest
, isp
->isp_reqidx
);
1666 #ifdef ISP_TARGET_MODE
1667 if (csio
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
) {
1673 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
||
1674 (csio
->dxfer_len
== 0)) {
1677 mp
->cmd_token
= csio
;
1678 mp
->rq
= rq
; /* really a ct_entry_t or ct2_entry_t */
1682 (*eptr
)(mp
, NULL
, 0, 0);
1690 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
||
1691 (csio
->dxfer_len
== 0)) {
1692 rq
->req_seg_count
= 1;
1697 * Do a virtual grapevine step to collect info for
1698 * the callback dma allocation that we have to use...
1702 mp
->cmd_token
= csio
;
1708 if ((csio
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
1709 if ((csio
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
1711 dp
= &pcs
->dmaps
[isp_handle_index(rq
->req_handle
)];
1713 error
= bus_dmamap_load(pcs
->dmat
, *dp
,
1714 csio
->data_ptr
, csio
->dxfer_len
, eptr
, mp
, 0);
1715 if (error
== EINPROGRESS
) {
1716 bus_dmamap_unload(pcs
->dmat
, *dp
);
1718 isp_prt(isp
, ISP_LOGERR
,
1719 "deferred dma allocation not supported");
1720 } else if (error
&& mp
->error
== 0) {
1722 isp_prt(isp
, ISP_LOGERR
,
1723 "error %d in dma mapping code", error
);
1729 /* Pointer to physical buffer */
1730 struct bus_dma_segment seg
;
1731 seg
.ds_addr
= (bus_addr_t
)csio
->data_ptr
;
1732 seg
.ds_len
= csio
->dxfer_len
;
1733 (*eptr
)(mp
, &seg
, 1, 0);
1736 struct bus_dma_segment
*segs
;
1738 if ((csio
->ccb_h
.flags
& CAM_DATA_PHYS
) != 0) {
1739 isp_prt(isp
, ISP_LOGERR
,
1740 "Physical segment pointers unsupported");
1742 } else if ((csio
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
1743 isp_prt(isp
, ISP_LOGERR
,
1744 "Virtual segment addresses unsupported");
1747 /* Just use the segments provided */
1748 segs
= (struct bus_dma_segment
*) csio
->data_ptr
;
1749 (*eptr
)(mp
, segs
, csio
->sglist_cnt
, 0);
1753 int retval
= CMD_COMPLETE
;
1754 if (mp
->error
== MUSHERR_NOQENTRIES
) {
1755 retval
= CMD_EAGAIN
;
1756 } else if (mp
->error
== EFBIG
) {
1757 XS_SETERR(csio
, CAM_REQ_TOO_BIG
);
1758 } else if (mp
->error
== EINVAL
) {
1759 XS_SETERR(csio
, CAM_REQ_INVALID
);
1761 XS_SETERR(csio
, CAM_UNREC_HBA_ERROR
);
1766 switch (rq
->req_header
.rqs_entry_type
) {
1767 case RQSTYPE_REQUEST
:
1768 isp_put_request(isp
, rq
, qep
);
1770 case RQSTYPE_CMDONLY
:
1771 isp_put_extended_request(isp
, (ispextreq_t
*)rq
,
1772 (ispextreq_t
*)qep
);
1775 isp_put_request_t2(isp
, (ispreqt2_t
*) rq
, (ispreqt2_t
*) qep
);
1778 return (CMD_QUEUED
);
1782 isp_pci_dmateardown(struct ispsoftc
*isp
, XS_T
*xs
, u_int16_t handle
)
1784 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*)isp
;
1785 bus_dmamap_t
*dp
= &pcs
->dmaps
[isp_handle_index(handle
)];
1786 if ((xs
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1787 bus_dmamap_sync(pcs
->dmat
, *dp
, BUS_DMASYNC_POSTREAD
);
1789 bus_dmamap_sync(pcs
->dmat
, *dp
, BUS_DMASYNC_POSTWRITE
);
1791 bus_dmamap_unload(pcs
->dmat
, *dp
);
1796 isp_pci_reset1(struct ispsoftc
*isp
)
1798 /* Make sure the BIOS is disabled */
1799 isp_pci_wr_reg(isp
, HCCR
, PCI_HCCR_CMD_BIOS
);
1800 /* and enable interrupts */
1805 isp_pci_dumpregs(struct ispsoftc
*isp
, const char *msg
)
1807 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*)isp
;
1809 kprintf("%s: %s\n", device_get_nameunit(isp
->isp_dev
), msg
);
1811 kprintf("%s:\n", device_get_nameunit(isp
->isp_dev
));
1813 kprintf(" biu_conf1=%x", ISP_READ(isp
, BIU_CONF1
));
1815 kprintf(" biu_csr=%x", ISP_READ(isp
, BIU2100_CSR
));
1816 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp
, BIU_ICR
),
1817 ISP_READ(isp
, BIU_ISR
), ISP_READ(isp
, BIU_SEMA
));
1818 kprintf("risc_hccr=%x\n", ISP_READ(isp
, HCCR
));
1822 ISP_WRITE(isp
, HCCR
, HCCR_CMD_PAUSE
);
1823 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1824 ISP_READ(isp
, CDMA_CONF
), ISP_READ(isp
, CDMA_STATUS
),
1825 ISP_READ(isp
, CDMA_FIFO_STS
));
1826 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1827 ISP_READ(isp
, DDMA_CONF
), ISP_READ(isp
, DDMA_STATUS
),
1828 ISP_READ(isp
, DDMA_FIFO_STS
));
1829 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1830 ISP_READ(isp
, SXP_INTERRUPT
),
1831 ISP_READ(isp
, SXP_GROSS_ERR
),
1832 ISP_READ(isp
, SXP_PINS_CTRL
));
1833 ISP_WRITE(isp
, HCCR
, HCCR_CMD_RELEASE
);
1835 kprintf(" mbox regs: %x %x %x %x %x\n",
1836 ISP_READ(isp
, OUTMAILBOX0
), ISP_READ(isp
, OUTMAILBOX1
),
1837 ISP_READ(isp
, OUTMAILBOX2
), ISP_READ(isp
, OUTMAILBOX3
),
1838 ISP_READ(isp
, OUTMAILBOX4
));
1839 kprintf(" PCI Status Command/Status=%x\n",
1840 pci_read_config(pcs
->pci_dev
, PCIR_COMMAND
, 1));