2 * Copyright (c) 1997-2008 by Matthew Jacob
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.159 2011/11/16 02:52:24 mjacob Exp $
29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/linker.h>
38 #include <sys/firmware.h>
40 #include <sys/stdint.h>
41 #include <bus/pci/pcireg.h>
42 #include <bus/pci/pcivar.h>
44 #include <sys/malloc.h>
47 #include <dev/disk/isp/isp_freebsd.h>
49 static uint32_t isp_pci_rd_reg(ispsoftc_t
*, int);
50 static void isp_pci_wr_reg(ispsoftc_t
*, int, uint32_t);
51 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t
*, int);
52 static void isp_pci_wr_reg_1080(ispsoftc_t
*, int, uint32_t);
53 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t
*, int);
54 static void isp_pci_wr_reg_2400(ispsoftc_t
*, int, uint32_t);
55 static int isp_pci_rd_isr(ispsoftc_t
*, uint32_t *, uint16_t *, uint16_t *);
56 static int isp_pci_rd_isr_2300(ispsoftc_t
*, uint32_t *, uint16_t *, uint16_t *);
57 static int isp_pci_rd_isr_2400(ispsoftc_t
*, uint32_t *, uint16_t *, uint16_t *);
58 static int isp_pci_mbxdma(ispsoftc_t
*);
59 static int isp_pci_dmasetup(ispsoftc_t
*, XS_T
*, void *);
62 static void isp_pci_reset0(ispsoftc_t
*);
63 static void isp_pci_reset1(ispsoftc_t
*);
64 static void isp_pci_dumpregs(ispsoftc_t
*, const char *);
66 static struct ispmdvec mdvec
= {
72 isp_common_dmateardown
,
77 BIU_BURST_ENABLE
|BIU_PCI_CONF1_FIFO_64
80 static struct ispmdvec mdvec_1080
= {
86 isp_common_dmateardown
,
91 BIU_BURST_ENABLE
|BIU_PCI_CONF1_FIFO_64
94 static struct ispmdvec mdvec_12160
= {
100 isp_common_dmateardown
,
105 BIU_BURST_ENABLE
|BIU_PCI_CONF1_FIFO_64
108 static struct ispmdvec mdvec_2100
= {
114 isp_common_dmateardown
,
120 static struct ispmdvec mdvec_2200
= {
126 isp_common_dmateardown
,
132 static struct ispmdvec mdvec_2300
= {
138 isp_common_dmateardown
,
144 static struct ispmdvec mdvec_2400
= {
150 isp_common_dmateardown
,
156 static struct ispmdvec mdvec_2500
= {
162 isp_common_dmateardown
,
168 #ifndef PCIM_CMD_INVEN
169 #define PCIM_CMD_INVEN 0x10
171 #ifndef PCIM_CMD_BUSMASTEREN
172 #define PCIM_CMD_BUSMASTEREN 0x0004
174 #ifndef PCIM_CMD_PERRESPEN
175 #define PCIM_CMD_PERRESPEN 0x0040
177 #ifndef PCIM_CMD_SEREN
178 #define PCIM_CMD_SEREN 0x0100
180 #ifndef PCIM_CMD_INTX_DISABLE
181 #define PCIM_CMD_INTX_DISABLE 0x0400
185 #define PCIR_COMMAND 0x04
188 #ifndef PCIR_CACHELNSZ
189 #define PCIR_CACHELNSZ 0x0c
192 #ifndef PCIR_LATTIMER
193 #define PCIR_LATTIMER 0x0d
197 #define PCIR_ROMADDR 0x30
200 #ifndef PCI_VENDOR_QLOGIC
201 #define PCI_VENDOR_QLOGIC 0x1077
204 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
205 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
208 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
209 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
212 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
213 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
216 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
217 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
220 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
221 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
224 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
225 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
228 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
229 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
232 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
233 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
236 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
237 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
240 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
241 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
244 #ifndef PCI_PRODUCT_QLOGIC_ISP2322
245 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
248 #ifndef PCI_PRODUCT_QLOGIC_ISP2422
249 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
252 #ifndef PCI_PRODUCT_QLOGIC_ISP2432
253 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432
256 #ifndef PCI_PRODUCT_QLOGIC_ISP2532
257 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532
260 #ifndef PCI_PRODUCT_QLOGIC_ISP6312
261 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
264 #ifndef PCI_PRODUCT_QLOGIC_ISP6322
265 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
268 #ifndef PCI_PRODUCT_QLOGIC_ISP5432
269 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432
272 #define PCI_QLOGIC_ISP5432 \
273 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC)
275 #define PCI_QLOGIC_ISP1020 \
276 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
278 #define PCI_QLOGIC_ISP1080 \
279 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
281 #define PCI_QLOGIC_ISP10160 \
282 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
284 #define PCI_QLOGIC_ISP12160 \
285 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
287 #define PCI_QLOGIC_ISP1240 \
288 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
290 #define PCI_QLOGIC_ISP1280 \
291 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
293 #define PCI_QLOGIC_ISP2100 \
294 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
296 #define PCI_QLOGIC_ISP2200 \
297 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
299 #define PCI_QLOGIC_ISP2300 \
300 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
302 #define PCI_QLOGIC_ISP2312 \
303 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
305 #define PCI_QLOGIC_ISP2322 \
306 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
308 #define PCI_QLOGIC_ISP2422 \
309 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
311 #define PCI_QLOGIC_ISP2432 \
312 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
314 #define PCI_QLOGIC_ISP2532 \
315 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC)
317 #define PCI_QLOGIC_ISP6312 \
318 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
320 #define PCI_QLOGIC_ISP6322 \
321 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
324 * Odd case for some AMI raid cards... We need to *not* attach to this.
326 #define AMI_RAID_SUBVENDOR_ID 0x101e
328 #define IO_MAP_REG 0x10
329 #define MEM_MAP_REG 0x14
331 #define PCI_DFLT_LTNCY 0x40
332 #define PCI_DFLT_LNSZ 0x10
334 static int isp_pci_probe (device_t
);
335 static int isp_pci_attach (device_t
);
336 static int isp_pci_detach (device_t
);
339 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev
340 struct isp_pcisoftc
{
343 struct resource
* regs
;
350 int16_t pci_poff
[_NREG_BLKS
];
354 static int isp_msi_enable
= 1;
355 TUNABLE_INT("hw.isp.msi.enable", &isp_msi_enable
);
357 static device_method_t isp_pci_methods
[] = {
358 /* Device interface */
359 DEVMETHOD(device_probe
, isp_pci_probe
),
360 DEVMETHOD(device_attach
, isp_pci_attach
),
361 DEVMETHOD(device_detach
, isp_pci_detach
),
365 static driver_t isp_pci_driver
= {
366 "isp", isp_pci_methods
, sizeof (struct isp_pcisoftc
)
368 static devclass_t isp_devclass
;
369 DRIVER_MODULE(isp
, pci
, isp_pci_driver
, isp_devclass
, NULL
, NULL
);
372 isp_pci_probe(device_t dev
)
374 switch ((pci_get_device(dev
) << 16) | (pci_get_vendor(dev
))) {
375 case PCI_QLOGIC_ISP1020
:
376 device_set_desc(dev
, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
378 case PCI_QLOGIC_ISP1080
:
379 device_set_desc(dev
, "Qlogic ISP 1080 PCI SCSI Adapter");
381 case PCI_QLOGIC_ISP1240
:
382 device_set_desc(dev
, "Qlogic ISP 1240 PCI SCSI Adapter");
384 case PCI_QLOGIC_ISP1280
:
385 device_set_desc(dev
, "Qlogic ISP 1280 PCI SCSI Adapter");
387 case PCI_QLOGIC_ISP10160
:
388 device_set_desc(dev
, "Qlogic ISP 10160 PCI SCSI Adapter");
390 case PCI_QLOGIC_ISP12160
:
391 if (pci_get_subvendor(dev
) == AMI_RAID_SUBVENDOR_ID
) {
394 device_set_desc(dev
, "Qlogic ISP 12160 PCI SCSI Adapter");
396 case PCI_QLOGIC_ISP2100
:
397 device_set_desc(dev
, "Qlogic ISP 2100 PCI FC-AL Adapter");
399 case PCI_QLOGIC_ISP2200
:
400 device_set_desc(dev
, "Qlogic ISP 2200 PCI FC-AL Adapter");
402 case PCI_QLOGIC_ISP2300
:
403 device_set_desc(dev
, "Qlogic ISP 2300 PCI FC-AL Adapter");
405 case PCI_QLOGIC_ISP2312
:
406 device_set_desc(dev
, "Qlogic ISP 2312 PCI FC-AL Adapter");
408 case PCI_QLOGIC_ISP2322
:
409 device_set_desc(dev
, "Qlogic ISP 2322 PCI FC-AL Adapter");
411 case PCI_QLOGIC_ISP2422
:
412 device_set_desc(dev
, "Qlogic ISP 2422 PCI FC-AL Adapter");
414 case PCI_QLOGIC_ISP2432
:
415 device_set_desc(dev
, "Qlogic ISP 2432 PCI FC-AL Adapter");
417 case PCI_QLOGIC_ISP2532
:
418 device_set_desc(dev
, "Qlogic ISP 2532 PCI FC-AL Adapter");
420 case PCI_QLOGIC_ISP5432
:
421 device_set_desc(dev
, "Qlogic ISP 5432 PCI FC-AL Adapter");
423 case PCI_QLOGIC_ISP6312
:
424 device_set_desc(dev
, "Qlogic ISP 6312 PCI FC-AL Adapter");
426 case PCI_QLOGIC_ISP6322
:
427 device_set_desc(dev
, "Qlogic ISP 6322 PCI FC-AL Adapter");
432 if (isp_announced
== 0 && bootverbose
) {
433 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
434 "Core Version %d.%d\n",
435 ISP_PLATFORM_VERSION_MAJOR
, ISP_PLATFORM_VERSION_MINOR
,
436 ISP_CORE_VERSION_MAJOR
, ISP_CORE_VERSION_MINOR
);
440 * XXXX: Here is where we might load the f/w module
441 * XXXX: (or increase a reference count to it).
443 return (BUS_PROBE_DEFAULT
);
447 isp_get_generic_options(device_t dev
, ispsoftc_t
*isp
, int *nvp
)
452 * Figure out if we're supposed to skip this one.
455 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "disable", &tval
) == 0 && tval
) {
456 device_printf(dev
, "disabled at user request\n");
457 isp
->isp_osinfo
.disabled
= 1;
462 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "fwload_disable", &tval
) == 0 && tval
!= 0) {
463 isp
->isp_confopts
|= ISP_CFG_NORELOAD
;
466 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "ignore_nvram", &tval
) == 0 && tval
!= 0) {
467 isp
->isp_confopts
|= ISP_CFG_NONVRAM
;
470 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "debug", &tval
);
472 isp
->isp_dblev
= tval
;
474 isp
->isp_dblev
= ISP_LOGWARN
|ISP_LOGERR
;
477 isp
->isp_dblev
|= ISP_LOGCONFIG
|ISP_LOGINFO
;
479 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "vports", &tval
);
480 if (tval
> 0 && tval
< 127) {
486 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "autoconfig", &tval
);
487 isp_autoconfig
= tval
;
489 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "quickboot_time", &tval
);
490 isp_quickboot_time
= tval
;
493 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "forcemulti", &tval
) == 0 && tval
!= 0) {
494 isp
->isp_osinfo
.forcemulti
= 1;
499 isp_get_pci_options(device_t dev
, int *m1
, int *m2
)
503 * Which we should try first - memory mapping or i/o mapping?
505 * We used to try memory first followed by i/o on alpha, otherwise
506 * the reverse, but we should just try memory first all the time now.
508 *m1
= PCIM_CMD_MEMEN
;
509 *m2
= PCIM_CMD_PORTEN
;
512 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "prefer_iomap", &tval
) == 0 && tval
!= 0) {
513 *m1
= PCIM_CMD_PORTEN
;
514 *m2
= PCIM_CMD_MEMEN
;
517 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "prefer_memmap", &tval
) == 0 && tval
!= 0) {
518 *m1
= PCIM_CMD_MEMEN
;
519 *m2
= PCIM_CMD_PORTEN
;
524 isp_get_specific_options(device_t dev
, int chan
, ispsoftc_t
*isp
)
529 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "iid", &tval
)) {
531 ISP_FC_PC(isp
, chan
)->default_id
= 109 - chan
;
533 ISP_SPI_PC(isp
, chan
)->iid
= 7;
537 ISP_FC_PC(isp
, chan
)->default_id
= tval
- chan
;
539 ISP_SPI_PC(isp
, chan
)->iid
= tval
;
541 isp
->isp_confopts
|= ISP_CFG_OWNLOOPID
;
545 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "role", &tval
) == 0) {
548 case ISP_ROLE_INITIATOR
:
549 case ISP_ROLE_TARGET
:
550 case ISP_ROLE_INITIATOR
|ISP_ROLE_TARGET
:
551 device_printf(dev
, "setting role to 0x%x\n", tval
);
559 tval
= ISP_DEFAULT_ROLES
;
563 ISP_SPI_PC(isp
, chan
)->def_role
= tval
;
566 ISP_FC_PC(isp
, chan
)->def_role
= tval
;
569 if (resource_int_value(device_get_name(dev
), device_get_unit(dev
), "fullduplex", &tval
) == 0 && tval
!= 0) {
570 isp
->isp_confopts
|= ISP_CFG_FULL_DUPLEX
;
573 if (resource_string_value(device_get_name(dev
), device_get_unit(dev
), "topology", &sptr
) == 0 && sptr
!= NULL
) {
574 if (strcmp(sptr
, "lport") == 0) {
575 isp
->isp_confopts
|= ISP_CFG_LPORT
;
576 } else if (strcmp(sptr
, "nport") == 0) {
577 isp
->isp_confopts
|= ISP_CFG_NPORT
;
578 } else if (strcmp(sptr
, "lport-only") == 0) {
579 isp
->isp_confopts
|= ISP_CFG_LPORT_ONLY
;
580 } else if (strcmp(sptr
, "nport-only") == 0) {
581 isp
->isp_confopts
|= ISP_CFG_NPORT_ONLY
;
586 * Because the resource_*_value functions can neither return
587 * 64 bit integer values, nor can they be directly coerced
588 * to interpret the right hand side of the assignment as
589 * you want them to interpret it, we have to force WWN
590 * hint replacement to specify WWN strings with a leading
591 * 'w' (e..g w50000000aaaa0001). Sigh.
594 tval
= resource_string_value(device_get_name(dev
), device_get_unit(dev
), "portwwn", &sptr
);
595 if (tval
== 0 && sptr
!= NULL
&& *sptr
++ == 'w') {
597 ISP_FC_PC(isp
, chan
)->def_wwpn
= strtouq(sptr
, &eptr
, 16);
598 if (eptr
< sptr
+ 16 || ISP_FC_PC(isp
, chan
)->def_wwpn
== -1) {
599 device_printf(dev
, "mangled portwwn hint '%s'\n", sptr
);
600 ISP_FC_PC(isp
, chan
)->def_wwpn
= 0;
605 tval
= resource_string_value(device_get_name(dev
), device_get_unit(dev
), "nodewwn", &sptr
);
606 if (tval
== 0 && sptr
!= NULL
&& *sptr
++ == 'w') {
608 ISP_FC_PC(isp
, chan
)->def_wwnn
= strtouq(sptr
, &eptr
, 16);
609 if (eptr
< sptr
+ 16 || ISP_FC_PC(isp
, chan
)->def_wwnn
== 0) {
610 device_printf(dev
, "mangled nodewwn hint '%s'\n", sptr
);
611 ISP_FC_PC(isp
, chan
)->def_wwnn
= 0;
616 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "hysteresis", &tval
);
617 if (tval
>= 0 && tval
< 256) {
618 ISP_FC_PC(isp
, chan
)->hysteresis
= tval
;
620 ISP_FC_PC(isp
, chan
)->hysteresis
= isp_fabric_hysteresis
;
624 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "loop_down_limit", &tval
);
625 if (tval
>= 0 && tval
< 0xffff) {
626 ISP_FC_PC(isp
, chan
)->loop_down_limit
= tval
;
628 ISP_FC_PC(isp
, chan
)->loop_down_limit
= isp_loop_down_limit
;
632 (void) resource_int_value(device_get_name(dev
), device_get_unit(dev
), "gone_device_time", &tval
);
633 if (tval
>= 0 && tval
< 0xffff) {
634 ISP_FC_PC(isp
, chan
)->gone_device_time
= tval
;
636 ISP_FC_PC(isp
, chan
)->gone_device_time
= isp_gone_device_time
;
641 isp_pci_attach(device_t dev
)
643 int i
, m1
, m2
, locksetup
= 0;
645 uint32_t data
, cmd
, linesz
, did
;
646 struct isp_pcisoftc
*pcs
;
652 pcs
= device_get_softc(dev
);
654 device_printf(dev
, "cannot get softc\n");
657 memset(pcs
, 0, sizeof (*pcs
));
665 * Get Generic Options
667 isp_get_generic_options(dev
, isp
, &isp_nvports
);
670 * Check to see if options have us disabled
672 if (isp
->isp_osinfo
.disabled
) {
674 * But return zero to preserve unit numbering
680 * Get PCI options- which in this case are just mapping preferences.
682 isp_get_pci_options(dev
, &m1
, &m2
);
684 linesz
= PCI_DFLT_LNSZ
;
685 pcs
->irq
= pcs
->regs
= NULL
;
686 pcs
->rgd
= pcs
->rtp
= pcs
->iqd
= 0;
688 cmd
= pci_read_config(dev
, PCIR_COMMAND
, 2);
690 pcs
->rtp
= (m1
== PCIM_CMD_MEMEN
)? SYS_RES_MEMORY
: SYS_RES_IOPORT
;
691 pcs
->rgd
= (m1
== PCIM_CMD_MEMEN
)? MEM_MAP_REG
: IO_MAP_REG
;
692 pcs
->regs
= bus_alloc_resource_any(dev
, pcs
->rtp
, &pcs
->rgd
, RF_ACTIVE
);
694 if (pcs
->regs
== NULL
&& (cmd
& m2
)) {
695 pcs
->rtp
= (m2
== PCIM_CMD_MEMEN
)? SYS_RES_MEMORY
: SYS_RES_IOPORT
;
696 pcs
->rgd
= (m2
== PCIM_CMD_MEMEN
)? MEM_MAP_REG
: IO_MAP_REG
;
697 pcs
->regs
= bus_alloc_resource_any(dev
, pcs
->rtp
, &pcs
->rgd
, RF_ACTIVE
);
699 if (pcs
->regs
== NULL
) {
700 device_printf(dev
, "unable to map any ports\n");
704 device_printf(dev
, "using %s space register mapping\n", (pcs
->rgd
== IO_MAP_REG
)? "I/O" : "Memory");
706 isp
->isp_bus_tag
= rman_get_bustag(pcs
->regs
);
707 isp
->isp_bus_handle
= rman_get_bushandle(pcs
->regs
);
710 pcs
->pci_poff
[BIU_BLOCK
>> _BLK_REG_SHFT
] = BIU_REGS_OFF
;
711 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS_OFF
;
712 pcs
->pci_poff
[SXP_BLOCK
>> _BLK_REG_SHFT
] = PCI_SXP_REGS_OFF
;
713 pcs
->pci_poff
[RISC_BLOCK
>> _BLK_REG_SHFT
] = PCI_RISC_REGS_OFF
;
714 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = DMA_REGS_OFF
;
716 switch (pci_get_devid(dev
)) {
717 case PCI_QLOGIC_ISP1020
:
719 isp
->isp_mdvec
= &mdvec
;
720 isp
->isp_type
= ISP_HA_SCSI_UNKNOWN
;
722 case PCI_QLOGIC_ISP1080
:
724 isp
->isp_mdvec
= &mdvec_1080
;
725 isp
->isp_type
= ISP_HA_SCSI_1080
;
726 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = ISP1080_DMA_REGS_OFF
;
728 case PCI_QLOGIC_ISP1240
:
730 isp
->isp_mdvec
= &mdvec_1080
;
731 isp
->isp_type
= ISP_HA_SCSI_1240
;
733 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = ISP1080_DMA_REGS_OFF
;
735 case PCI_QLOGIC_ISP1280
:
737 isp
->isp_mdvec
= &mdvec_1080
;
738 isp
->isp_type
= ISP_HA_SCSI_1280
;
739 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = ISP1080_DMA_REGS_OFF
;
741 case PCI_QLOGIC_ISP10160
:
743 isp
->isp_mdvec
= &mdvec_12160
;
744 isp
->isp_type
= ISP_HA_SCSI_10160
;
745 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = ISP1080_DMA_REGS_OFF
;
747 case PCI_QLOGIC_ISP12160
:
750 isp
->isp_mdvec
= &mdvec_12160
;
751 isp
->isp_type
= ISP_HA_SCSI_12160
;
752 pcs
->pci_poff
[DMA_BLOCK
>> _BLK_REG_SHFT
] = ISP1080_DMA_REGS_OFF
;
754 case PCI_QLOGIC_ISP2100
:
756 isp
->isp_mdvec
= &mdvec_2100
;
757 isp
->isp_type
= ISP_HA_FC_2100
;
758 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2100_OFF
;
759 if (pci_get_revid(dev
) < 3) {
761 * XXX: Need to get the actual revision
762 * XXX: number of the 2100 FB. At any rate,
763 * XXX: lower cache line size for early revision
769 case PCI_QLOGIC_ISP2200
:
771 isp
->isp_mdvec
= &mdvec_2200
;
772 isp
->isp_type
= ISP_HA_FC_2200
;
773 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2100_OFF
;
775 case PCI_QLOGIC_ISP2300
:
777 isp
->isp_mdvec
= &mdvec_2300
;
778 isp
->isp_type
= ISP_HA_FC_2300
;
779 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2300_OFF
;
781 case PCI_QLOGIC_ISP2312
:
782 case PCI_QLOGIC_ISP6312
:
784 isp
->isp_mdvec
= &mdvec_2300
;
785 isp
->isp_type
= ISP_HA_FC_2312
;
786 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2300_OFF
;
788 case PCI_QLOGIC_ISP2322
:
789 case PCI_QLOGIC_ISP6322
:
791 isp
->isp_mdvec
= &mdvec_2300
;
792 isp
->isp_type
= ISP_HA_FC_2322
;
793 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2300_OFF
;
795 case PCI_QLOGIC_ISP2422
:
796 case PCI_QLOGIC_ISP2432
:
798 isp
->isp_nchan
+= isp_nvports
;
799 isp
->isp_mdvec
= &mdvec_2400
;
800 isp
->isp_type
= ISP_HA_FC_2400
;
801 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2400_OFF
;
803 case PCI_QLOGIC_ISP2532
:
805 isp
->isp_nchan
+= isp_nvports
;
806 isp
->isp_mdvec
= &mdvec_2500
;
807 isp
->isp_type
= ISP_HA_FC_2500
;
808 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2400_OFF
;
810 case PCI_QLOGIC_ISP5432
:
812 isp
->isp_mdvec
= &mdvec_2500
;
813 isp
->isp_type
= ISP_HA_FC_2500
;
814 pcs
->pci_poff
[MBOX_BLOCK
>> _BLK_REG_SHFT
] = PCI_MBOX_REGS2400_OFF
;
817 device_printf(dev
, "unknown device type\n");
821 isp
->isp_revision
= pci_get_revid(dev
);
824 psize
= sizeof (fcparam
);
825 xsize
= sizeof (struct isp_fc
);
827 psize
= sizeof (sdparam
);
828 xsize
= sizeof (struct isp_spi
);
830 psize
*= isp
->isp_nchan
;
831 xsize
*= isp
->isp_nchan
;
832 isp
->isp_param
= kmalloc(psize
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
833 if (isp
->isp_param
== NULL
) {
834 device_printf(dev
, "cannot allocate parameter data\n");
837 isp
->isp_osinfo
.pc
.ptr
= kmalloc(xsize
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
838 if (isp
->isp_osinfo
.pc
.ptr
== NULL
) {
839 device_printf(dev
, "cannot allocate parameter data\n");
844 * Now that we know who we are (roughly) get/set specific options
846 for (i
= 0; i
< isp
->isp_nchan
; i
++) {
847 isp_get_specific_options(dev
, i
, isp
);
851 * The 'it' suffix really only matters for SCSI cards in target mode.
853 isp
->isp_osinfo
.fw
= NULL
;
854 if (IS_SCSI(isp
) && (ISP_SPI_PC(isp
, 0)->def_role
& ISP_ROLE_TARGET
)) {
855 ksnprintf(fwname
, sizeof (fwname
), "isp_%04x_it", did
);
856 isp
->isp_osinfo
.fw
= firmware_get(fwname
);
857 } else if (IS_24XX(isp
) && (isp
->isp_nchan
> 1 || isp
->isp_osinfo
.forcemulti
)) {
858 ksnprintf(fwname
, sizeof (fwname
), "isp_%04x_multi", did
);
859 isp
->isp_osinfo
.fw
= firmware_get(fwname
);
861 if (isp
->isp_osinfo
.fw
== NULL
) {
862 ksnprintf(fwname
, sizeof (fwname
), "isp_%04x", did
);
863 isp
->isp_osinfo
.fw
= firmware_get(fwname
);
865 if (isp
->isp_osinfo
.fw
!= NULL
) {
866 isp
->isp_mdvec
->dv_ispfw
= isp
->isp_osinfo
.fw
->data
;
870 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
873 cmd
|= PCIM_CMD_SEREN
| PCIM_CMD_PERRESPEN
|
874 PCIM_CMD_BUSMASTEREN
| PCIM_CMD_INVEN
;
876 if (IS_2300(isp
)) { /* per QLogic errata */
877 cmd
&= ~PCIM_CMD_INVEN
;
880 if (IS_2322(isp
) || pci_get_devid(dev
) == PCI_QLOGIC_ISP6312
) {
881 cmd
&= ~PCIM_CMD_INTX_DISABLE
;
885 cmd
&= ~PCIM_CMD_INTX_DISABLE
;
888 pci_write_config(dev
, PCIR_COMMAND
, cmd
, 2);
891 * Make sure the Cache Line Size register is set sensibly.
893 data
= pci_read_config(dev
, PCIR_CACHELNSZ
, 1);
894 if (data
== 0 || (linesz
!= PCI_DFLT_LNSZ
&& data
!= linesz
)) {
895 isp_prt(isp
, ISP_LOGCONFIG
, "set PCI line size to %d from %d", linesz
, data
);
897 pci_write_config(dev
, PCIR_CACHELNSZ
, data
, 1);
901 * Make sure the Latency Timer is sane.
903 data
= pci_read_config(dev
, PCIR_LATTIMER
, 1);
904 if (data
< PCI_DFLT_LTNCY
) {
905 data
= PCI_DFLT_LTNCY
;
906 isp_prt(isp
, ISP_LOGCONFIG
, "set PCI latency to %d", data
);
907 pci_write_config(dev
, PCIR_LATTIMER
, data
, 1);
911 * Make sure we've disabled the ROM.
913 data
= pci_read_config(dev
, PCIR_ROMADDR
, 4);
915 pci_write_config(dev
, PCIR_ROMADDR
, data
, 4);
917 pcs
->irq_type
= pci_alloc_1intr(dev
, isp_msi_enable
, &pcs
->iqd
,
919 pcs
->irq
= bus_alloc_resource_any(dev
, SYS_RES_IRQ
, &pcs
->iqd
,
921 if (pcs
->irq
== NULL
) {
922 device_printf(dev
, "could not allocate interrupt\n");
926 /* Make sure the lock is set up. */
927 lockinit(&isp
->isp_osinfo
.lock
, "isp", 0, LK_CANRECURSE
);
930 if (isp_setup_intr(dev
, pcs
->irq
, ISP_IFLAGS
, isp_platform_intr
, isp
, &pcs
->ih
, NULL
)) {
931 device_printf(dev
, "could not setup interrupt\n");
936 * Last minute checks...
938 if (IS_23XX(isp
) || IS_24XX(isp
)) {
939 isp
->isp_port
= pci_get_function(dev
);
943 * Make sure we're in reset state.
947 if (isp
->isp_state
!= ISP_RESETSTATE
) {
952 if (isp
->isp_state
== ISP_INITSTATE
) {
953 isp
->isp_state
= ISP_RUNSTATE
;
956 if (isp_attach(isp
)) {
966 (void) bus_teardown_intr(dev
, pcs
->irq
, pcs
->ih
);
969 lockuninit(&isp
->isp_osinfo
.lock
);
972 (void) bus_release_resource(dev
, SYS_RES_IRQ
, pcs
->iqd
, pcs
->irq
);
974 if (pcs
->irq_type
== PCI_INTR_TYPE_MSI
) {
975 pci_release_msi(dev
);
978 (void) bus_release_resource(dev
, pcs
->rtp
, pcs
->rgd
, pcs
->regs
);
980 if (pcs
->pci_isp
.isp_param
) {
981 kfree(pcs
->pci_isp
.isp_param
, M_DEVBUF
);
982 pcs
->pci_isp
.isp_param
= NULL
;
984 if (pcs
->pci_isp
.isp_osinfo
.pc
.ptr
) {
985 kfree(pcs
->pci_isp
.isp_osinfo
.pc
.ptr
, M_DEVBUF
);
986 pcs
->pci_isp
.isp_osinfo
.pc
.ptr
= NULL
;
992 isp_pci_detach(device_t dev
)
994 struct isp_pcisoftc
*pcs
;
998 pcs
= device_get_softc(dev
);
1002 isp
= (ispsoftc_t
*) pcs
;
1003 status
= isp_detach(isp
);
1009 (void) bus_teardown_intr(dev
, pcs
->irq
, pcs
->ih
);
1012 lockuninit(&isp
->isp_osinfo
.lock
);
1013 (void) bus_release_resource(dev
, SYS_RES_IRQ
, pcs
->iqd
, pcs
->irq
);
1014 if (pcs
->irq_type
== PCI_INTR_TYPE_MSI
) {
1015 pci_release_msi(dev
);
1017 (void) bus_release_resource(dev
, pcs
->rtp
, pcs
->rgd
, pcs
->regs
);
1018 if (pcs
->pci_isp
.isp_param
) {
1019 kfree(pcs
->pci_isp
.isp_param
, M_DEVBUF
);
1020 pcs
->pci_isp
.isp_param
= NULL
;
1022 if (pcs
->pci_isp
.isp_osinfo
.pc
.ptr
) {
1023 kfree(pcs
->pci_isp
.isp_osinfo
.pc
.ptr
, M_DEVBUF
);
1024 pcs
->pci_isp
.isp_osinfo
.pc
.ptr
= NULL
;
1029 #define IspVirt2Off(a, x) \
1030 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1031 _BLK_REG_SHFT] + ((x) & 0xfff))
1033 #define BXR2(isp, off) \
1034 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off)
1035 #define BXW2(isp, off, v) \
1036 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v)
1037 #define BXR4(isp, off) \
1038 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off)
1039 #define BXW4(isp, off, v) \
1040 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v)
1043 static ISP_INLINE
int
1044 isp_pci_rd_debounced(ispsoftc_t
*isp
, int off
, uint16_t *rp
)
1046 uint32_t val0
, val1
;
1050 val0
= BXR2(isp
, IspVirt2Off(isp
, off
));
1051 val1
= BXR2(isp
, IspVirt2Off(isp
, off
));
1052 } while (val0
!= val1
&& ++i
< 1000);
1061 isp_pci_rd_isr(ispsoftc_t
*isp
, uint32_t *isrp
, uint16_t *semap
, uint16_t *mbp
)
1066 if (isp_pci_rd_debounced(isp
, BIU_ISR
, &isr
)) {
1069 if (isp_pci_rd_debounced(isp
, BIU_SEMA
, &sema
)) {
1073 isr
= BXR2(isp
, IspVirt2Off(isp
, BIU_ISR
));
1074 sema
= BXR2(isp
, IspVirt2Off(isp
, BIU_SEMA
));
1076 isp_prt(isp
, ISP_LOGDEBUG3
, "ISR 0x%x SEMA 0x%x", isr
, sema
);
1077 isr
&= INT_PENDING_MASK(isp
);
1078 sema
&= BIU_SEMA_LOCK
;
1079 if (isr
== 0 && sema
== 0) {
1083 if ((*semap
= sema
) != 0) {
1085 if (isp_pci_rd_debounced(isp
, OUTMAILBOX0
, mbp
)) {
1089 *mbp
= BXR2(isp
, IspVirt2Off(isp
, OUTMAILBOX0
));
1096 isp_pci_rd_isr_2300(ispsoftc_t
*isp
, uint32_t *isrp
, uint16_t *semap
, uint16_t *mbox0p
)
1101 if (!(BXR2(isp
, IspVirt2Off(isp
, BIU_ISR
) & BIU2100_ISR_RISC_INT
))) {
1105 r2hisr
= BXR4(isp
, IspVirt2Off(isp
, BIU_R2HSTSLO
));
1106 isp_prt(isp
, ISP_LOGDEBUG3
, "RISC2HOST ISR 0x%x", r2hisr
);
1107 if ((r2hisr
& BIU_R2HST_INTR
) == 0) {
1111 switch (r2hisr
& BIU_R2HST_ISTAT_MASK
) {
1112 case ISPR2HST_ROM_MBX_OK
:
1113 case ISPR2HST_ROM_MBX_FAIL
:
1114 case ISPR2HST_MBX_OK
:
1115 case ISPR2HST_MBX_FAIL
:
1116 case ISPR2HST_ASYNC_EVENT
:
1117 *isrp
= r2hisr
& 0xffff;
1118 *mbox0p
= (r2hisr
>> 16);
1121 case ISPR2HST_RIO_16
:
1122 *isrp
= r2hisr
& 0xffff;
1123 *mbox0p
= ASYNC_RIO16_1
;
1126 case ISPR2HST_FPOST
:
1127 *isrp
= r2hisr
& 0xffff;
1128 *mbox0p
= ASYNC_CMD_CMPLT
;
1131 case ISPR2HST_FPOST_CTIO
:
1132 *isrp
= r2hisr
& 0xffff;
1133 *mbox0p
= ASYNC_CTIO_DONE
;
1136 case ISPR2HST_RSPQ_UPDATE
:
1137 *isrp
= r2hisr
& 0xffff;
1142 hccr
= ISP_READ(isp
, HCCR
);
1143 if (hccr
& HCCR_PAUSE
) {
1144 ISP_WRITE(isp
, HCCR
, HCCR_RESET
);
1145 isp_prt(isp
, ISP_LOGERR
, "RISC paused at interrupt (%x->%x)", hccr
, ISP_READ(isp
, HCCR
));
1146 ISP_WRITE(isp
, BIU_ICR
, 0);
1148 isp_prt(isp
, ISP_LOGERR
, "unknown interrupt 0x%x\n", r2hisr
);
1155 isp_pci_rd_isr_2400(ispsoftc_t
*isp
, uint32_t *isrp
, uint16_t *semap
, uint16_t *mbox0p
)
1159 r2hisr
= BXR4(isp
, IspVirt2Off(isp
, BIU2400_R2HSTSLO
));
1160 isp_prt(isp
, ISP_LOGDEBUG3
, "RISC2HOST ISR 0x%x", r2hisr
);
1161 if ((r2hisr
& BIU2400_R2HST_INTR
) == 0) {
1165 switch (r2hisr
& BIU2400_R2HST_ISTAT_MASK
) {
1166 case ISP2400R2HST_ROM_MBX_OK
:
1167 case ISP2400R2HST_ROM_MBX_FAIL
:
1168 case ISP2400R2HST_MBX_OK
:
1169 case ISP2400R2HST_MBX_FAIL
:
1170 case ISP2400R2HST_ASYNC_EVENT
:
1171 *isrp
= r2hisr
& 0xffff;
1172 *mbox0p
= (r2hisr
>> 16);
1175 case ISP2400R2HST_RSPQ_UPDATE
:
1176 case ISP2400R2HST_ATIO_RSPQ_UPDATE
:
1177 case ISP2400R2HST_ATIO_RQST_UPDATE
:
1178 *isrp
= r2hisr
& 0xffff;
1183 ISP_WRITE(isp
, BIU2400_HCCR
, HCCR_2400_CMD_CLEAR_RISC_INT
);
1184 isp_prt(isp
, ISP_LOGERR
, "unknown interrupt 0x%x\n", r2hisr
);
1190 isp_pci_rd_reg(ispsoftc_t
*isp
, int regoff
)
1195 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
1197 * We will assume that someone has paused the RISC processor.
1199 oldconf
= BXR2(isp
, IspVirt2Off(isp
, BIU_CONF1
));
1200 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), oldconf
| BIU_PCI_CONF1_SXP
);
1201 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1203 rv
= BXR2(isp
, IspVirt2Off(isp
, regoff
));
1204 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
1205 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), oldconf
);
1206 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1212 isp_pci_wr_reg(ispsoftc_t
*isp
, int regoff
, uint32_t val
)
1216 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
1218 * We will assume that someone has paused the RISC processor.
1220 oldconf
= BXR2(isp
, IspVirt2Off(isp
, BIU_CONF1
));
1221 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
),
1222 oldconf
| BIU_PCI_CONF1_SXP
);
1223 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1225 BXW2(isp
, IspVirt2Off(isp
, regoff
), val
);
1226 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, regoff
), 2, -1);
1227 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
1228 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), oldconf
);
1229 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1235 isp_pci_rd_reg_1080(ispsoftc_t
*isp
, int regoff
)
1237 uint32_t rv
, oc
= 0;
1239 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
1242 * We will assume that someone has paused the RISC processor.
1244 oc
= BXR2(isp
, IspVirt2Off(isp
, BIU_CONF1
));
1245 tc
= oc
& ~BIU_PCI1080_CONF1_DMA
;
1246 if (regoff
& SXP_BANK1_SELECT
)
1247 tc
|= BIU_PCI1080_CONF1_SXP1
;
1249 tc
|= BIU_PCI1080_CONF1_SXP0
;
1250 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), tc
);
1251 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1252 } else if ((regoff
& _BLK_REG_MASK
) == DMA_BLOCK
) {
1253 oc
= BXR2(isp
, IspVirt2Off(isp
, BIU_CONF1
));
1254 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
),
1255 oc
| BIU_PCI1080_CONF1_DMA
);
1256 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1258 rv
= BXR2(isp
, IspVirt2Off(isp
, regoff
));
1260 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), oc
);
1261 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1267 isp_pci_wr_reg_1080(ispsoftc_t
*isp
, int regoff
, uint32_t val
)
1271 if ((regoff
& _BLK_REG_MASK
) == SXP_BLOCK
) {
1274 * We will assume that someone has paused the RISC processor.
1276 oc
= BXR2(isp
, IspVirt2Off(isp
, BIU_CONF1
));
1277 tc
= oc
& ~BIU_PCI1080_CONF1_DMA
;
1278 if (regoff
& SXP_BANK1_SELECT
)
1279 tc
|= BIU_PCI1080_CONF1_SXP1
;
1281 tc
|= BIU_PCI1080_CONF1_SXP0
;
1282 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), tc
);
1283 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1284 } else if ((regoff
& _BLK_REG_MASK
) == DMA_BLOCK
) {
1285 oc
= BXR2(isp
, IspVirt2Off(isp
, BIU_CONF1
));
1286 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
),
1287 oc
| BIU_PCI1080_CONF1_DMA
);
1288 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1290 BXW2(isp
, IspVirt2Off(isp
, regoff
), val
);
1291 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, regoff
), 2, -1);
1293 BXW2(isp
, IspVirt2Off(isp
, BIU_CONF1
), oc
);
1294 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, BIU_CONF1
), 2, -1);
1299 isp_pci_rd_reg_2400(ispsoftc_t
*isp
, int regoff
)
1302 int block
= regoff
& _BLK_REG_MASK
;
1308 return (BXR2(isp
, IspVirt2Off(isp
, regoff
)));
1310 isp_prt(isp
, ISP_LOGWARN
, "SXP_BLOCK read at 0x%x", regoff
);
1311 return (0xffffffff);
1313 isp_prt(isp
, ISP_LOGWARN
, "RISC_BLOCK read at 0x%x", regoff
);
1314 return (0xffffffff);
1316 isp_prt(isp
, ISP_LOGWARN
, "DMA_BLOCK read at 0x%x", regoff
);
1317 return (0xffffffff);
1319 isp_prt(isp
, ISP_LOGWARN
, "unknown block read at 0x%x", regoff
);
1320 return (0xffffffff);
1325 case BIU2400_FLASH_ADDR
:
1326 case BIU2400_FLASH_DATA
:
1330 case BIU2400_REQINP
:
1331 case BIU2400_REQOUTP
:
1332 case BIU2400_RSPINP
:
1333 case BIU2400_RSPOUTP
:
1334 case BIU2400_PRI_REQINP
:
1335 case BIU2400_PRI_REQOUTP
:
1336 case BIU2400_ATIO_RSPINP
:
1337 case BIU2400_ATIO_RSPOUTP
:
1342 rv
= BXR4(isp
, IspVirt2Off(isp
, regoff
));
1344 case BIU2400_R2HSTSLO
:
1345 rv
= BXR4(isp
, IspVirt2Off(isp
, regoff
));
1347 case BIU2400_R2HSTSHI
:
1348 rv
= BXR4(isp
, IspVirt2Off(isp
, regoff
)) >> 16;
1351 isp_prt(isp
, ISP_LOGERR
,
1352 "isp_pci_rd_reg_2400: unknown offset %x", regoff
);
1360 isp_pci_wr_reg_2400(ispsoftc_t
*isp
, int regoff
, uint32_t val
)
1362 int block
= regoff
& _BLK_REG_MASK
;
1368 BXW2(isp
, IspVirt2Off(isp
, regoff
), val
);
1369 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, regoff
), 2, -1);
1372 isp_prt(isp
, ISP_LOGWARN
, "SXP_BLOCK write at 0x%x", regoff
);
1375 isp_prt(isp
, ISP_LOGWARN
, "RISC_BLOCK write at 0x%x", regoff
);
1378 isp_prt(isp
, ISP_LOGWARN
, "DMA_BLOCK write at 0x%x", regoff
);
1381 isp_prt(isp
, ISP_LOGWARN
, "unknown block write at 0x%x",
1387 case BIU2400_FLASH_ADDR
:
1388 case BIU2400_FLASH_DATA
:
1392 case BIU2400_REQINP
:
1393 case BIU2400_REQOUTP
:
1394 case BIU2400_RSPINP
:
1395 case BIU2400_RSPOUTP
:
1396 case BIU2400_PRI_REQINP
:
1397 case BIU2400_PRI_REQOUTP
:
1398 case BIU2400_ATIO_RSPINP
:
1399 case BIU2400_ATIO_RSPOUTP
:
1404 BXW4(isp
, IspVirt2Off(isp
, regoff
), val
);
1405 MEMORYBARRIER(isp
, SYNC_REG
, IspVirt2Off(isp
, regoff
), 4, -1);
1408 isp_prt(isp
, ISP_LOGERR
,
1409 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff
);
1422 static void imc(void *, bus_dma_segment_t
*, int, int);
1423 static void imc1(void *, bus_dma_segment_t
*, int, int);
1426 imc(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
1428 struct imush
*imushp
= (struct imush
*) arg
;
1431 imushp
->error
= error
;
1435 imushp
->error
= EINVAL
;
1438 isp_prt(imushp
->isp
, ISP_LOGDEBUG0
, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs
->ds_addr
, (uintmax_t) segs
->ds_len
);
1439 imushp
->isp
->isp_rquest
= imushp
->vbase
;
1440 imushp
->isp
->isp_rquest_dma
= segs
->ds_addr
;
1441 segs
->ds_addr
+= ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp
->isp
));
1442 imushp
->vbase
+= ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp
->isp
));
1443 imushp
->isp
->isp_result_dma
= segs
->ds_addr
;
1444 imushp
->isp
->isp_result
= imushp
->vbase
;
1446 #ifdef ISP_TARGET_MODE
1447 if (IS_24XX(imushp
->isp
)) {
1448 segs
->ds_addr
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp
->isp
));
1449 imushp
->vbase
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp
->isp
));
1450 imushp
->isp
->isp_atioq_dma
= segs
->ds_addr
;
1451 imushp
->isp
->isp_atioq
= imushp
->vbase
;
1457 imc1(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
1459 struct imush
*imushp
= (struct imush
*) arg
;
1461 imushp
->error
= error
;
1465 imushp
->error
= EINVAL
;
1468 isp_prt(imushp
->isp
, ISP_LOGDEBUG0
, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs
->ds_addr
, (uintmax_t) segs
->ds_len
);
1469 FCPARAM(imushp
->isp
, imushp
->chan
)->isp_scdma
= segs
->ds_addr
;
1470 FCPARAM(imushp
->isp
, imushp
->chan
)->isp_scratch
= imushp
->vbase
;
1474 isp_pci_mbxdma(ispsoftc_t
*isp
)
1478 int i
, error
, ns
, cmap
= 0;
1479 bus_size_t slim
; /* segment size */
1480 bus_addr_t llim
; /* low limit of unavailable dma */
1481 bus_addr_t hlim
; /* high limit of unavailable dma */
1485 * Already been here? If so, leave...
1487 if (isp
->isp_rquest
) {
1492 if (isp
->isp_maxcmds
== 0) {
1493 isp_prt(isp
, ISP_LOGERR
, "maxcmds not set");
1498 hlim
= BUS_SPACE_MAXADDR
;
1499 if (IS_ULTRA2(isp
) || IS_FC(isp
) || IS_1240(isp
)) {
1500 if (sizeof (bus_size_t
) > 4) {
1501 slim
= (bus_size_t
) (1ULL << 32);
1503 slim
= (bus_size_t
) (1UL << 31);
1505 llim
= BUS_SPACE_MAXADDR
;
1507 llim
= BUS_SPACE_MAXADDR_32BIT
;
1511 len
= isp
->isp_maxcmds
* sizeof (struct isp_pcmd
);
1512 isp
->isp_osinfo
.pcmd_pool
= (struct isp_pcmd
*) kmalloc(len
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
1513 if (isp
->isp_osinfo
.pcmd_pool
== NULL
) {
1514 isp_prt(isp
, ISP_LOGERR
, "cannot allocate pcmds");
1520 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1522 #ifdef ISP_TARGET_MODE
1523 if (IS_SCSI(isp
) && sizeof (bus_addr_t
) > 4) {
1524 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1525 isp_prt(isp
, ISP_LOGERR
, "we cannot do DAC for SPI cards yet");
1531 if (isp_dma_tag_create(NULL
, 1, slim
, llim
, hlim
, NULL
, NULL
, BUS_SPACE_MAXSIZE
, ISP_NSEGS
, slim
, 0, &isp
->isp_osinfo
.dmat
)) {
1532 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1534 isp_prt(isp
, ISP_LOGERR
, "could not create master dma tag");
1538 len
= sizeof (isp_hdl_t
) * isp
->isp_maxcmds
;
1539 isp
->isp_xflist
= (isp_hdl_t
*) kmalloc(len
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
1540 if (isp
->isp_xflist
== NULL
) {
1541 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1543 isp_prt(isp
, ISP_LOGERR
, "cannot alloc xflist array");
1546 for (len
= 0; len
< isp
->isp_maxcmds
- 1; len
++) {
1547 isp
->isp_xflist
[len
].cmd
= &isp
->isp_xflist
[len
+1];
1549 isp
->isp_xffree
= isp
->isp_xflist
;
1550 #ifdef ISP_TARGET_MODE
1551 len
= sizeof (isp_hdl_t
) * isp
->isp_maxcmds
;
1552 isp
->isp_tgtlist
= (isp_hdl_t
*) kmalloc(len
, M_DEVBUF
, M_WAITOK
| M_ZERO
);
1553 if (isp
->isp_tgtlist
== NULL
) {
1554 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1555 kfree(isp
->isp_xflist
, M_DEVBUF
);
1557 isp_prt(isp
, ISP_LOGERR
, "cannot alloc tgtlist array");
1560 for (len
= 0; len
< isp
->isp_maxcmds
- 1; len
++) {
1561 isp
->isp_tgtlist
[len
].cmd
= &isp
->isp_tgtlist
[len
+1];
1563 isp
->isp_tgtfree
= isp
->isp_tgtlist
;
1567 * Allocate and map the request and result queues (and ATIO queue
1568 * if we're a 2400 supporting target mode).
1570 len
= ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp
));
1571 len
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp
));
1572 #ifdef ISP_TARGET_MODE
1574 len
+= ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp
));
1578 ns
= (len
/ PAGE_SIZE
) + 1;
1581 * Create a tag for the control spaces. We don't always need this
1582 * to be 32 bits, but we do this for simplicity and speed's sake.
1584 if (isp_dma_tag_create(isp
->isp_osinfo
.dmat
, QENTRY_LEN
, slim
, BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
, NULL
, NULL
, len
, ns
, slim
, 0, &isp
->isp_osinfo
.cdmat
)) {
1585 isp_prt(isp
, ISP_LOGERR
, "cannot create a dma tag for control spaces");
1586 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1587 kfree(isp
->isp_xflist
, M_DEVBUF
);
1588 #ifdef ISP_TARGET_MODE
1589 kfree(isp
->isp_tgtlist
, M_DEVBUF
);
1595 if (bus_dmamem_alloc(isp
->isp_osinfo
.cdmat
, (void **)&base
, BUS_DMA_NOWAIT
| BUS_DMA_COHERENT
, &isp
->isp_osinfo
.cdmap
) != 0) {
1596 isp_prt(isp
, ISP_LOGERR
, "cannot allocate %d bytes of CCB memory", len
);
1597 bus_dma_tag_destroy(isp
->isp_osinfo
.cdmat
);
1598 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1599 kfree(isp
->isp_xflist
, M_DEVBUF
);
1600 #ifdef ISP_TARGET_MODE
1601 kfree(isp
->isp_tgtlist
, M_DEVBUF
);
1612 bus_dmamap_load(isp
->isp_osinfo
.cdmat
, isp
->isp_osinfo
.cdmap
, base
, len
, imc
, &im
, 0);
1614 isp_prt(isp
, ISP_LOGERR
, "error %d loading dma map for control areas", im
.error
);
1619 for (cmap
= 0; cmap
< isp
->isp_nchan
; cmap
++) {
1620 struct isp_fc
*fc
= ISP_FC_PC(isp
, cmap
);
1621 if (isp_dma_tag_create(isp
->isp_osinfo
.dmat
, 64, slim
, BUS_SPACE_MAXADDR_32BIT
, BUS_SPACE_MAXADDR
, NULL
, NULL
, ISP_FC_SCRLEN
, 1, slim
, 0, &fc
->tdmat
)) {
1624 if (bus_dmamem_alloc(fc
->tdmat
, (void **)&base
, BUS_DMA_NOWAIT
| BUS_DMA_COHERENT
, &fc
->tdmap
) != 0) {
1625 bus_dma_tag_destroy(fc
->tdmat
);
1632 bus_dmamap_load(fc
->tdmat
, fc
->tdmap
, base
, ISP_FC_SCRLEN
, imc1
, &im
, 0);
1634 bus_dmamem_free(fc
->tdmat
, base
, fc
->tdmap
);
1635 bus_dma_tag_destroy(fc
->tdmat
);
1641 for (i
= 0; i
< isp
->isp_maxcmds
; i
++) {
1642 struct isp_pcmd
*pcmd
= &isp
->isp_osinfo
.pcmd_pool
[i
];
1643 error
= bus_dmamap_create(isp
->isp_osinfo
.dmat
, 0, &pcmd
->dmap
);
1645 isp_prt(isp
, ISP_LOGERR
, "error %d creating per-cmd DMA maps", error
);
1647 bus_dmamap_destroy(isp
->isp_osinfo
.dmat
, isp
->isp_osinfo
.pcmd_pool
[i
].dmap
);
1651 callout_init(&pcmd
->wdog
);
1652 if (i
== isp
->isp_maxcmds
-1) {
1655 pcmd
->next
= &isp
->isp_osinfo
.pcmd_pool
[i
+1];
1658 isp
->isp_osinfo
.pcmd_free
= &isp
->isp_osinfo
.pcmd_pool
[0];
1663 while (--cmap
>= 0) {
1664 struct isp_fc
*fc
= ISP_FC_PC(isp
, cmap
);
1665 bus_dmamem_free(fc
->tdmat
, base
, fc
->tdmap
);
1666 bus_dma_tag_destroy(fc
->tdmat
);
1668 bus_dmamem_free(isp
->isp_osinfo
.cdmat
, base
, isp
->isp_osinfo
.cdmap
);
1669 bus_dma_tag_destroy(isp
->isp_osinfo
.cdmat
);
1670 kfree(isp
->isp_xflist
, M_DEVBUF
);
1671 #ifdef ISP_TARGET_MODE
1672 kfree(isp
->isp_tgtlist
, M_DEVBUF
);
1674 kfree(isp
->isp_osinfo
.pcmd_pool
, M_DEVBUF
);
1675 isp
->isp_rquest
= NULL
;
1683 void *rq
; /* original request */
1688 #define MUSHERR_NOQENTRIES -2
1690 #ifdef ISP_TARGET_MODE
1691 static void tdma2_2(void *, bus_dma_segment_t
*, int, bus_size_t
, int);
1692 static void tdma2(void *, bus_dma_segment_t
*, int, int);
1695 tdma2_2(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, bus_size_t mapsize
, int error
)
1699 mp
->mapsize
= mapsize
;
1700 tdma2(arg
, dm_segs
, nseg
, error
);
1704 tdma2(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1708 struct ccb_scsiio
*csio
;
1712 mp
= (mush_t
*) arg
;
1717 csio
= mp
->cmd_token
;
1721 if (sizeof (bus_addr_t
) > 4) {
1722 if (nseg
>= ISP_NSEG64_MAX
) {
1723 isp_prt(isp
, ISP_LOGERR
, "number of segments (%d) exceed maximum we can support (%d)", nseg
, ISP_NSEG64_MAX
);
1727 if (rq
->req_header
.rqs_entry_type
== RQSTYPE_CTIO2
) {
1728 rq
->req_header
.rqs_entry_type
= RQSTYPE_CTIO3
;
1731 if (nseg
>= ISP_NSEG_MAX
) {
1732 isp_prt(isp
, ISP_LOGERR
, "number of segments (%d) exceed maximum we can support (%d)", nseg
, ISP_NSEG_MAX
);
1737 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1738 bus_dmamap_sync(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
, BUS_DMASYNC_PREWRITE
);
1739 ddir
= ISP_TO_DEVICE
;
1740 } else if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1741 bus_dmamap_sync(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
, BUS_DMASYNC_PREREAD
);
1742 ddir
= ISP_FROM_DEVICE
;
1754 if (isp_send_tgt_cmd(isp
, rq
, dm_segs
, nseg
, XS_XFRLEN(csio
), ddir
, &csio
->sense_data
, csio
->sense_len
) != CMD_QUEUED
) {
1755 mp
->error
= MUSHERR_NOQENTRIES
;
1760 static void dma2_2(void *, bus_dma_segment_t
*, int, bus_size_t
, int);
1761 static void dma2(void *, bus_dma_segment_t
*, int, int);
1764 dma2_2(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, bus_size_t mapsize
, int error
)
1768 mp
->mapsize
= mapsize
;
1769 dma2(arg
, dm_segs
, nseg
, error
);
1773 dma2(void *arg
, bus_dma_segment_t
*dm_segs
, int nseg
, int error
)
1777 struct ccb_scsiio
*csio
;
1781 mp
= (mush_t
*) arg
;
1786 csio
= mp
->cmd_token
;
1790 if (sizeof (bus_addr_t
) > 4) {
1791 if (nseg
>= ISP_NSEG64_MAX
) {
1792 isp_prt(isp
, ISP_LOGERR
, "number of segments (%d) exceed maximum we can support (%d)", nseg
, ISP_NSEG64_MAX
);
1796 if (rq
->req_header
.rqs_entry_type
== RQSTYPE_T2RQS
) {
1797 rq
->req_header
.rqs_entry_type
= RQSTYPE_T3RQS
;
1798 } else if (rq
->req_header
.rqs_entry_type
== RQSTYPE_REQUEST
) {
1799 rq
->req_header
.rqs_entry_type
= RQSTYPE_A64
;
1802 if (nseg
>= ISP_NSEG_MAX
) {
1803 isp_prt(isp
, ISP_LOGERR
, "number of segments (%d) exceed maximum we can support (%d)", nseg
, ISP_NSEG_MAX
);
1808 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_IN
) {
1809 bus_dmamap_sync(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
, BUS_DMASYNC_PREREAD
);
1810 ddir
= ISP_FROM_DEVICE
;
1811 } else if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_OUT
) {
1812 bus_dmamap_sync(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
, BUS_DMASYNC_PREWRITE
);
1813 ddir
= ISP_TO_DEVICE
;
1823 if (isp_send_cmd(isp
, rq
, dm_segs
, nseg
, XS_XFRLEN(csio
), ddir
) != CMD_QUEUED
) {
1824 mp
->error
= MUSHERR_NOQENTRIES
;
1829 isp_pci_dmasetup(ispsoftc_t
*isp
, struct ccb_scsiio
*csio
, void *ff
)
1832 void (*eptr
)(void *, bus_dma_segment_t
*, int, int);
1833 void (*eptr2
)(void *, bus_dma_segment_t
*, int, bus_size_t
, int);
1837 mp
->cmd_token
= csio
;
1842 #ifdef ISP_TARGET_MODE
1843 if (csio
->ccb_h
.func_code
== XPT_CONT_TARGET_IO
) {
1854 if ((csio
->ccb_h
.flags
& CAM_DIR_MASK
) == CAM_DIR_NONE
|| (csio
->dxfer_len
== 0)) {
1855 (*eptr
)(mp
, NULL
, 0, 0);
1856 } else if ((csio
->ccb_h
.flags
& CAM_SCATTER_VALID
) == 0) {
1857 if ((csio
->ccb_h
.flags
& CAM_DATA_PHYS
) == 0) {
1859 error
= bus_dmamap_load(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
, csio
->data_ptr
, csio
->dxfer_len
, eptr
, mp
, 0);
1861 xpt_print(csio
->ccb_h
.path
, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__
, csio
->data_ptr
, csio
->dxfer_len
, error
);
1864 if (error
== EINPROGRESS
) {
1865 bus_dmamap_unload(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
);
1867 isp_prt(isp
, ISP_LOGERR
, "deferred dma allocation not supported");
1868 } else if (error
&& mp
->error
== 0) {
1870 isp_prt(isp
, ISP_LOGERR
, "error %d in dma mapping code", error
);
1875 /* Pointer to physical buffer */
1876 struct bus_dma_segment seg
;
1877 seg
.ds_addr
= (bus_addr_t
)(vm_offset_t
)csio
->data_ptr
;
1878 seg
.ds_len
= csio
->dxfer_len
;
1879 (*eptr
)(mp
, &seg
, 1, 0);
1882 struct bus_dma_segment
*segs
;
1884 if ((csio
->ccb_h
.flags
& CAM_DATA_PHYS
) != 0) {
1885 isp_prt(isp
, ISP_LOGERR
, "Physical segment pointers unsupported");
1887 } else if ((csio
->ccb_h
.flags
& CAM_SG_LIST_PHYS
) == 0) {
1892 * We're taking advantage of the fact that
1893 * the pointer/length sizes and layout of the iovec
1894 * structure are the same as the bus_dma_segment
1895 * structure. This might be a little dangerous,
1896 * but only if they change the structures, which
1899 KASSERT((sizeof (sguio
.uio_iov
) == sizeof (csio
->data_ptr
) &&
1900 sizeof (sguio
.uio_iovcnt
) >= sizeof (csio
->sglist_cnt
) &&
1901 sizeof (sguio
.uio_resid
) >= sizeof (csio
->dxfer_len
)), ("Ken's assumption failed"));
1902 sguio
.uio_iov
= (struct iovec
*)csio
->data_ptr
;
1903 sguio
.uio_iovcnt
= csio
->sglist_cnt
;
1904 sguio
.uio_resid
= csio
->dxfer_len
;
1905 sguio
.uio_segflg
= UIO_SYSSPACE
;
1907 error
= bus_dmamap_load_uio(isp
->isp_osinfo
.dmat
, PISP_PCMD(csio
)->dmap
, &sguio
, eptr2
, mp
, 0);
1909 if (error
!= 0 && mp
->error
== 0) {
1910 isp_prt(isp
, ISP_LOGERR
, "error %d in dma mapping code", error
);
1914 /* Just use the segments provided */
1915 segs
= (struct bus_dma_segment
*) csio
->data_ptr
;
1916 (*eptr
)(mp
, segs
, csio
->sglist_cnt
, 0);
1920 int retval
= CMD_COMPLETE
;
1921 if (mp
->error
== MUSHERR_NOQENTRIES
) {
1922 retval
= CMD_EAGAIN
;
1923 } else if (mp
->error
== EFBIG
) {
1924 XS_SETERR(csio
, CAM_REQ_TOO_BIG
);
1925 } else if (mp
->error
== EINVAL
) {
1926 XS_SETERR(csio
, CAM_REQ_INVALID
);
1928 XS_SETERR(csio
, CAM_UNREC_HBA_ERROR
);
1932 return (CMD_QUEUED
);
1936 isp_pci_reset0(ispsoftc_t
*isp
)
1938 ISP_DISABLE_INTS(isp
);
1942 isp_pci_reset1(ispsoftc_t
*isp
)
1944 if (!IS_24XX(isp
)) {
1945 /* Make sure the BIOS is disabled */
1946 isp_pci_wr_reg(isp
, HCCR
, PCI_HCCR_CMD_BIOS
);
1948 /* and enable interrupts */
1949 ISP_ENABLE_INTS(isp
);
1953 isp_pci_dumpregs(ispsoftc_t
*isp
, const char *msg
)
1955 struct isp_pcisoftc
*pcs
= (struct isp_pcisoftc
*)isp
;
1957 kprintf("%s: %s\n", device_get_nameunit(isp
->isp_dev
), msg
);
1959 kprintf("%s:\n", device_get_nameunit(isp
->isp_dev
));
1961 kprintf(" biu_conf1=%x", ISP_READ(isp
, BIU_CONF1
));
1963 kprintf(" biu_csr=%x", ISP_READ(isp
, BIU2100_CSR
));
1964 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp
, BIU_ICR
),
1965 ISP_READ(isp
, BIU_ISR
), ISP_READ(isp
, BIU_SEMA
));
1966 kprintf("risc_hccr=%x\n", ISP_READ(isp
, HCCR
));
1970 ISP_WRITE(isp
, HCCR
, HCCR_CMD_PAUSE
);
1971 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1972 ISP_READ(isp
, CDMA_CONF
), ISP_READ(isp
, CDMA_STATUS
),
1973 ISP_READ(isp
, CDMA_FIFO_STS
));
1974 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1975 ISP_READ(isp
, DDMA_CONF
), ISP_READ(isp
, DDMA_STATUS
),
1976 ISP_READ(isp
, DDMA_FIFO_STS
));
1977 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1978 ISP_READ(isp
, SXP_INTERRUPT
),
1979 ISP_READ(isp
, SXP_GROSS_ERR
),
1980 ISP_READ(isp
, SXP_PINS_CTRL
));
1981 ISP_WRITE(isp
, HCCR
, HCCR_CMD_RELEASE
);
1983 kprintf(" mbox regs: %x %x %x %x %x\n",
1984 ISP_READ(isp
, OUTMAILBOX0
), ISP_READ(isp
, OUTMAILBOX1
),
1985 ISP_READ(isp
, OUTMAILBOX2
), ISP_READ(isp
, OUTMAILBOX3
),
1986 ISP_READ(isp
, OUTMAILBOX4
));
1987 kprintf(" PCI Status Command/Status=%x\n",
1988 pci_read_config(pcs
->pci_dev
, PCIR_COMMAND
, 1));