2 * Copyright (c) 2000, 2001 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/mly/mly_pci.c,v 1.1.2.2 2001/03/05 20:17:24 msmith Exp $
28 * $DragonFly: src/sys/dev/raid/mly/Attic/mly_pci.c,v 1.8 2006/10/25 20:56:01 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
37 #include <sys/devicestat.h>
40 #include <sys/thread2.h>
42 #include <bus/pci/pcireg.h>
43 #include <bus/pci/pcivar.h>
49 static int mly_pci_probe(device_t dev
);
50 static int mly_pci_attach(device_t dev
);
51 static int mly_pci_detach(device_t dev
);
52 static int mly_pci_shutdown(device_t dev
);
53 static int mly_pci_suspend(device_t dev
);
54 static int mly_pci_resume(device_t dev
);
55 static void mly_pci_intr(void *arg
);
57 static int mly_sg_map(struct mly_softc
*sc
);
58 static void mly_sg_map_helper(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
);
59 static int mly_mmbox_map(struct mly_softc
*sc
);
60 static void mly_mmbox_map_helper(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
);
62 static device_method_t mly_methods
[] = {
63 /* Device interface */
64 DEVMETHOD(device_probe
, mly_pci_probe
),
65 DEVMETHOD(device_attach
, mly_pci_attach
),
66 DEVMETHOD(device_detach
, mly_pci_detach
),
67 DEVMETHOD(device_shutdown
, mly_pci_shutdown
),
68 DEVMETHOD(device_suspend
, mly_pci_suspend
),
69 DEVMETHOD(device_resume
, mly_pci_resume
),
73 static driver_t mly_pci_driver
= {
76 sizeof(struct mly_softc
)
79 static devclass_t mly_devclass
;
80 DRIVER_MODULE(mly
, pci
, mly_pci_driver
, mly_devclass
, 0, 0);
90 } mly_identifiers
[] = {
91 {0x1069, 0xba56, 0x1069, 0x0040, MLY_HWIF_STRONGARM
, "Mylex eXtremeRAID 2000"},
92 {0x1069, 0xba56, 0x1069, 0x0030, MLY_HWIF_STRONGARM
, "Mylex eXtremeRAID 3000"},
93 {0x1069, 0x0050, 0x1069, 0x0050, MLY_HWIF_I960RX
, "Mylex AcceleRAID 352"},
94 {0x1069, 0x0050, 0x1069, 0x0052, MLY_HWIF_I960RX
, "Mylex AcceleRAID 170"},
95 {0x1069, 0x0050, 0x1069, 0x0054, MLY_HWIF_I960RX
, "Mylex AcceleRAID 160"},
99 /********************************************************************************
100 ********************************************************************************
102 ********************************************************************************
103 ********************************************************************************/
106 mly_pci_probe(device_t dev
)
112 for (m
= mly_identifiers
; m
->vendor
!= 0; m
++) {
113 if ((m
->vendor
== pci_get_vendor(dev
)) &&
114 (m
->device
== pci_get_device(dev
)) &&
115 ((m
->subvendor
== 0) || ((m
->subvendor
== pci_get_subvendor(dev
)) &&
116 (m
->subdevice
== pci_get_subdevice(dev
))))) {
118 device_set_desc(dev
, m
->desc
);
119 return(-10); /* allow room to be overridden */
126 mly_pci_attach(device_t dev
)
128 struct mly_softc
*sc
;
137 sc
= device_get_softc(dev
);
138 bzero(sc
, sizeof(*sc
));
142 if (device_get_unit(sc
->mly_dev
) == 0)
146 /* assume failure is 'not configured' */
150 * Verify that the adapter is correctly set up in PCI space.
152 command
= pci_read_config(sc
->mly_dev
, PCIR_COMMAND
, 2);
153 command
|= PCIM_CMD_BUSMASTEREN
;
154 pci_write_config(dev
, PCIR_COMMAND
, command
, 2);
155 command
= pci_read_config(sc
->mly_dev
, PCIR_COMMAND
, 2);
156 if (!(command
& PCIM_CMD_BUSMASTEREN
)) {
157 mly_printf(sc
, "can't enable busmaster feature\n");
160 if ((command
& PCIM_CMD_MEMEN
) == 0) {
161 mly_printf(sc
, "memory window not available\n");
166 * Allocate the PCI register window.
168 sc
->mly_regs_rid
= PCIR_MAPS
; /* first base address register */
169 if ((sc
->mly_regs_resource
= bus_alloc_resource(sc
->mly_dev
, SYS_RES_MEMORY
, &sc
->mly_regs_rid
,
170 0, ~0, 1, RF_ACTIVE
)) == NULL
) {
171 mly_printf(sc
, "can't allocate register window\n");
174 sc
->mly_btag
= rman_get_bustag(sc
->mly_regs_resource
);
175 sc
->mly_bhandle
= rman_get_bushandle(sc
->mly_regs_resource
);
178 * Allocate and connect our interrupt.
181 if ((sc
->mly_irq
= bus_alloc_resource(sc
->mly_dev
, SYS_RES_IRQ
, &sc
->mly_irq_rid
,
182 0, ~0, 1, RF_SHAREABLE
| RF_ACTIVE
)) == NULL
) {
183 mly_printf(sc
, "can't allocate interrupt\n");
186 error
= bus_setup_intr(sc
->mly_dev
, sc
->mly_irq
, 0,
187 mly_pci_intr
, sc
, &sc
->mly_intr
, NULL
);
189 mly_printf(sc
, "can't set up interrupt\n");
193 /* assume failure is 'out of memory' */
197 * Allocate the parent bus DMA tag appropriate for our PCI interface.
199 * Note that all of these controllers are 64-bit capable.
201 if (bus_dma_tag_create(NULL
, /* parent */
202 1, 0, /* alignment, boundary */
203 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
204 BUS_SPACE_MAXADDR
, /* highaddr */
205 NULL
, NULL
, /* filter, filterarg */
206 MAXBSIZE
, MLY_MAXSGENTRIES
, /* maxsize, nsegments */
207 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
208 BUS_DMA_ALLOCNOW
, /* flags */
209 &sc
->mly_parent_dmat
)) {
210 mly_printf(sc
, "can't allocate parent DMA tag\n");
215 * Create DMA tag for mapping buffers into controller-addressable space.
217 if (bus_dma_tag_create(sc
->mly_parent_dmat
, /* parent */
218 1, 0, /* alignment, boundary */
219 BUS_SPACE_MAXADDR
, /* lowaddr */
220 BUS_SPACE_MAXADDR
, /* highaddr */
221 NULL
, NULL
, /* filter, filterarg */
222 MAXBSIZE
, MLY_MAXSGENTRIES
, /* maxsize, nsegments */
223 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
225 &sc
->mly_buffer_dmat
)) {
226 mly_printf(sc
, "can't allocate buffer DMA tag\n");
231 * Initialise the DMA tag for command packets.
233 if (bus_dma_tag_create(sc
->mly_parent_dmat
, /* parent */
234 1, 0, /* alignment, boundary */
235 BUS_SPACE_MAXADDR
, /* lowaddr */
236 BUS_SPACE_MAXADDR
, /* highaddr */
237 NULL
, NULL
, /* filter, filterarg */
238 sizeof(union mly_command_packet
) * MLY_MAXCOMMANDS
, 1, /* maxsize, nsegments */
239 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
241 &sc
->mly_packet_dmat
)) {
242 mly_printf(sc
, "can't allocate command packet DMA tag\n");
247 * Detect the hardware interface version
249 for (i
= 0; mly_identifiers
[i
].vendor
!= 0; i
++) {
250 if ((mly_identifiers
[i
].vendor
== pci_get_vendor(dev
)) &&
251 (mly_identifiers
[i
].device
== pci_get_device(dev
))) {
252 sc
->mly_hwif
= mly_identifiers
[i
].hwif
;
253 switch(sc
->mly_hwif
) {
254 case MLY_HWIF_I960RX
:
255 debug(2, "set hardware up for i960RX");
256 sc
->mly_doorbell_true
= 0x00;
257 sc
->mly_command_mailbox
= MLY_I960RX_COMMAND_MAILBOX
;
258 sc
->mly_status_mailbox
= MLY_I960RX_STATUS_MAILBOX
;
259 sc
->mly_idbr
= MLY_I960RX_IDBR
;
260 sc
->mly_odbr
= MLY_I960RX_ODBR
;
261 sc
->mly_error_status
= MLY_I960RX_ERROR_STATUS
;
262 sc
->mly_interrupt_status
= MLY_I960RX_INTERRUPT_STATUS
;
263 sc
->mly_interrupt_mask
= MLY_I960RX_INTERRUPT_MASK
;
265 case MLY_HWIF_STRONGARM
:
266 debug(2, "set hardware up for StrongARM");
267 sc
->mly_doorbell_true
= 0xff; /* doorbell 'true' is 0 */
268 sc
->mly_command_mailbox
= MLY_STRONGARM_COMMAND_MAILBOX
;
269 sc
->mly_status_mailbox
= MLY_STRONGARM_STATUS_MAILBOX
;
270 sc
->mly_idbr
= MLY_STRONGARM_IDBR
;
271 sc
->mly_odbr
= MLY_STRONGARM_ODBR
;
272 sc
->mly_error_status
= MLY_STRONGARM_ERROR_STATUS
;
273 sc
->mly_interrupt_status
= MLY_STRONGARM_INTERRUPT_STATUS
;
274 sc
->mly_interrupt_mask
= MLY_STRONGARM_INTERRUPT_MASK
;
282 * Create the scatter/gather mappings.
284 if ((error
= mly_sg_map(sc
)))
288 * Allocate and map the memory mailbox
290 if ((error
= mly_mmbox_map(sc
)))
294 * Do bus-independent initialisation.
296 if ((error
= mly_attach(sc
)))
306 /********************************************************************************
307 * Disconnect from the controller completely, in preparation for unload.
310 mly_pci_detach(device_t dev
)
312 struct mly_softc
*sc
= device_get_softc(dev
);
317 if (sc
->mly_state
& MLY_STATE_OPEN
)
320 if ((error
= mly_pci_shutdown(dev
)))
328 /********************************************************************************
329 * Bring the controller down to a dormant state and detach all child devices.
331 * This function is called before detach or system shutdown.
333 * Note that we can assume that the camq on the controller is empty, as we won't
334 * allow shutdown if any device is open.
337 mly_pci_shutdown(device_t dev
)
339 struct mly_softc
*sc
= device_get_softc(dev
);
347 /********************************************************************************
348 * Bring the controller to a quiescent state, ready for system suspend.
350 * We can't assume that the controller is not active at this point, so we need
351 * to mask interrupts.
354 mly_pci_suspend(device_t dev
)
356 struct mly_softc
*sc
= device_get_softc(dev
);
365 /********************************************************************************
366 * Bring the controller back to a state ready for operation.
369 mly_pci_resume(device_t dev
)
371 struct mly_softc
*sc
= device_get_softc(dev
);
374 sc
->mly_state
&= ~MLY_STATE_SUSPEND
;
375 MLY_UNMASK_INTERRUPTS(sc
);
379 /*******************************************************************************
380 * Take an interrupt, or be poked by other code to look for interrupt-worthy
384 mly_pci_intr(void *arg
)
386 struct mly_softc
*sc
= (struct mly_softc
*)arg
;
390 /* collect finished commands, queue anything waiting */
394 /********************************************************************************
395 ********************************************************************************
396 Bus-dependant Resource Management
397 ********************************************************************************
398 ********************************************************************************/
400 /********************************************************************************
401 * Allocate memory for the scatter/gather tables
404 mly_sg_map(struct mly_softc
*sc
)
411 * Create a single tag describing a region large enough to hold all of
412 * the s/g lists we will need.
414 segsize
= sizeof(struct mly_sg_entry
) * MLY_MAXCOMMANDS
* MLY_MAXSGENTRIES
;
415 if (bus_dma_tag_create(sc
->mly_parent_dmat
, /* parent */
416 1, 0, /* alignment, boundary */
417 BUS_SPACE_MAXADDR
, /* lowaddr */
418 BUS_SPACE_MAXADDR
, /* highaddr */
419 NULL
, NULL
, /* filter, filterarg */
420 segsize
, 1, /* maxsize, nsegments */
421 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
424 mly_printf(sc
, "can't allocate scatter/gather DMA tag\n");
429 * Allocate enough s/g maps for all commands and permanently map them into
430 * controller-visible space.
432 * XXX this assumes we can get enough space for all the s/g maps in one
435 if (bus_dmamem_alloc(sc
->mly_sg_dmat
, (void **)&sc
->mly_sg_table
, BUS_DMA_NOWAIT
, &sc
->mly_sg_dmamap
)) {
436 mly_printf(sc
, "can't allocate s/g table\n");
439 bus_dmamap_load(sc
->mly_sg_dmat
, sc
->mly_sg_dmamap
, sc
->mly_sg_table
, segsize
, mly_sg_map_helper
, sc
, 0);
443 /********************************************************************************
444 * Save the physical address of the base of the s/g table.
447 mly_sg_map_helper(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
449 struct mly_softc
*sc
= (struct mly_softc
*)arg
;
453 /* save base of s/g table's address in bus space */
454 sc
->mly_sg_busaddr
= segs
->ds_addr
;
457 /********************************************************************************
458 * Allocate memory for the memory-mailbox interface
461 mly_mmbox_map(struct mly_softc
*sc
)
465 * Create a DMA tag for a single contiguous region large enough for the
466 * memory mailbox structure.
468 if (bus_dma_tag_create(sc
->mly_parent_dmat
, /* parent */
469 1, 0, /* alignment, boundary */
470 BUS_SPACE_MAXADDR
, /* lowaddr */
471 BUS_SPACE_MAXADDR
, /* highaddr */
472 NULL
, NULL
, /* filter, filterarg */
473 sizeof(struct mly_mmbox
), 1, /* maxsize, nsegments */
474 BUS_SPACE_MAXSIZE_32BIT
, /* maxsegsize */
476 &sc
->mly_mmbox_dmat
)) {
477 mly_printf(sc
, "can't allocate memory mailbox DMA tag\n");
482 * Allocate the buffer
484 if (bus_dmamem_alloc(sc
->mly_mmbox_dmat
, (void **)&sc
->mly_mmbox
, BUS_DMA_NOWAIT
, &sc
->mly_mmbox_dmamap
)) {
485 mly_printf(sc
, "can't allocate memory mailbox\n");
488 bus_dmamap_load(sc
->mly_mmbox_dmat
, sc
->mly_mmbox_dmamap
, sc
->mly_mmbox
, sizeof(struct mly_mmbox
),
489 mly_mmbox_map_helper
, sc
, 0);
490 bzero(sc
->mly_mmbox
, sizeof(*sc
->mly_mmbox
));
495 /********************************************************************************
496 * Save the physical address of the memory mailbox
499 mly_mmbox_map_helper(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
501 struct mly_softc
*sc
= (struct mly_softc
*)arg
;
505 sc
->mly_mmbox_busaddr
= segs
->ds_addr
;
508 /********************************************************************************
509 * Free all of the resources associated with (sc)
511 * Should not be called if the controller is active.
514 mly_free(struct mly_softc
*sc
)
516 struct mly_command
*mc
;
520 /* detach from CAM */
523 /* throw away command buffer DMA maps */
524 while (mly_alloc_command(sc
, &mc
) == 0)
525 bus_dmamap_destroy(sc
->mly_buffer_dmat
, mc
->mc_datamap
);
527 /* release the packet storage */
528 if (sc
->mly_packet
!= NULL
) {
529 bus_dmamap_unload(sc
->mly_packet_dmat
, sc
->mly_packetmap
);
530 bus_dmamem_free(sc
->mly_packet_dmat
, sc
->mly_packet
, sc
->mly_packetmap
);
533 /* throw away the controllerinfo structure */
534 if (sc
->mly_controllerinfo
!= NULL
)
535 kfree(sc
->mly_controllerinfo
, M_DEVBUF
);
537 /* throw away the controllerparam structure */
538 if (sc
->mly_controllerparam
!= NULL
)
539 kfree(sc
->mly_controllerparam
, M_DEVBUF
);
541 /* destroy data-transfer DMA tag */
542 if (sc
->mly_buffer_dmat
)
543 bus_dma_tag_destroy(sc
->mly_buffer_dmat
);
545 /* free and destroy DMA memory and tag for s/g lists */
546 if (sc
->mly_sg_table
) {
547 bus_dmamap_unload(sc
->mly_sg_dmat
, sc
->mly_sg_dmamap
);
548 bus_dmamem_free(sc
->mly_sg_dmat
, sc
->mly_sg_table
, sc
->mly_sg_dmamap
);
551 bus_dma_tag_destroy(sc
->mly_sg_dmat
);
553 /* free and destroy DMA memory and tag for memory mailbox */
555 bus_dmamap_unload(sc
->mly_mmbox_dmat
, sc
->mly_mmbox_dmamap
);
556 bus_dmamem_free(sc
->mly_mmbox_dmat
, sc
->mly_mmbox
, sc
->mly_mmbox_dmamap
);
558 if (sc
->mly_mmbox_dmat
)
559 bus_dma_tag_destroy(sc
->mly_mmbox_dmat
);
561 /* disconnect the interrupt handler */
563 bus_teardown_intr(sc
->mly_dev
, sc
->mly_irq
, sc
->mly_intr
);
564 if (sc
->mly_irq
!= NULL
)
565 bus_release_resource(sc
->mly_dev
, SYS_RES_IRQ
, sc
->mly_irq_rid
, sc
->mly_irq
);
567 /* destroy the parent DMA tag */
568 if (sc
->mly_parent_dmat
)
569 bus_dma_tag_destroy(sc
->mly_parent_dmat
);
571 /* release the register window mapping */
572 if (sc
->mly_regs_resource
!= NULL
)
573 bus_release_resource(sc
->mly_dev
, SYS_RES_MEMORY
, sc
->mly_regs_rid
, sc
->mly_regs_resource
);