2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
30 #include <asm/uaccess.h>
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
36 static int __init
ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev
*, const struct pci_device_id
*);
38 static void ca91cx42_remove(struct pci_dev
*);
39 static void __exit
ca91cx42_exit(void);
41 /* Module parameters */
44 struct vme_bridge
*ca91cx42_bridge
;
45 wait_queue_head_t dma_queue
;
46 wait_queue_head_t iack_queue
;
47 wait_queue_head_t lm_queue
;
48 wait_queue_head_t mbox_queue
;
50 void (*lm_callback
[4])(int); /* Called in interrupt handler, be careful! */
54 struct mutex vme_rmw
; /* Only one RMW cycle at a time */
55 struct mutex vme_int
; /*
56 * Only one VME interrupt can be
57 * generated at a time, provide locking
60 static char driver_name
[] = "vme_ca91cx42";
62 static struct pci_device_id ca91cx42_ids
[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_CA91C142
) },
67 static struct pci_driver ca91cx42_driver
= {
69 .id_table
= ca91cx42_ids
,
70 .probe
= ca91cx42_probe
,
71 .remove
= ca91cx42_remove
,
74 static u32
ca91cx42_DMA_irqhandler(void)
78 return CA91CX42_LINT_DMA
;
81 static u32
ca91cx42_LM_irqhandler(u32 stat
)
86 for (i
= 0; i
< 4; i
++) {
87 if (stat
& CA91CX42_LINT_LM
[i
]) {
88 /* We only enable interrupts if the callback is set */
90 serviced
|= CA91CX42_LINT_LM
[i
];
97 /* XXX This needs to be split into 4 queues */
98 static u32
ca91cx42_MB_irqhandler(int mbox_mask
)
100 wake_up(&mbox_queue
);
102 return CA91CX42_LINT_MBOX
;
105 static u32
ca91cx42_IACK_irqhandler(void)
107 wake_up(&iack_queue
);
109 return CA91CX42_LINT_SW_IACK
;
113 int ca91cx42_bus_error_chk(int clrflag
)
116 tmp
= ioread32(ca91cx42_bridge
->base
+ PCI_COMMAND
);
117 if (tmp
& 0x08000000) { /* S_TA is Set */
119 iowrite32(tmp
| 0x08000000,
120 ca91cx42_bridge
->base
+ PCI_COMMAND
);
127 static u32
ca91cx42_VERR_irqhandler(void)
131 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
133 if (!(val
& 0x00000800)) {
134 printk(KERN_ERR
"ca91c042: ca91cx42_VERR_irqhandler DMA Read "
135 "Error DGCS=%08X\n", val
);
138 return CA91CX42_LINT_VERR
;
141 static u32
ca91cx42_LERR_irqhandler(void)
145 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
147 if (!(val
& 0x00000800)) {
148 printk(KERN_ERR
"ca91c042: ca91cx42_LERR_irqhandler DMA Read "
149 "Error DGCS=%08X\n", val
);
153 return CA91CX42_LINT_LERR
;
157 static u32
ca91cx42_VIRQ_irqhandler(int stat
)
159 int vec
, i
, serviced
= 0;
161 for (i
= 7; i
> 0; i
--) {
162 if (stat
& (1 << i
)) {
163 vec
= ioread32(ca91cx42_bridge
->base
+
164 CA91CX42_V_STATID
[i
]) & 0xff;
166 vme_irq_handler(ca91cx42_bridge
, i
, vec
);
168 serviced
|= (1 << i
);
175 static irqreturn_t
ca91cx42_irqhandler(int irq
, void *dev_id
)
177 u32 stat
, enable
, serviced
= 0;
179 if (dev_id
!= ca91cx42_bridge
->base
)
182 enable
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
183 stat
= ioread32(ca91cx42_bridge
->base
+ LINT_STAT
);
185 /* Only look at unmasked interrupts */
191 if (stat
& CA91CX42_LINT_DMA
)
192 serviced
|= ca91cx42_DMA_irqhandler();
193 if (stat
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
195 serviced
|= ca91cx42_LM_irqhandler(stat
);
196 if (stat
& CA91CX42_LINT_MBOX
)
197 serviced
|= ca91cx42_MB_irqhandler(stat
);
198 if (stat
& CA91CX42_LINT_SW_IACK
)
199 serviced
|= ca91cx42_IACK_irqhandler();
200 if (stat
& CA91CX42_LINT_VERR
)
201 serviced
|= ca91cx42_VERR_irqhandler();
202 if (stat
& CA91CX42_LINT_LERR
)
203 serviced
|= ca91cx42_LERR_irqhandler();
204 if (stat
& (CA91CX42_LINT_VIRQ1
| CA91CX42_LINT_VIRQ2
|
205 CA91CX42_LINT_VIRQ3
| CA91CX42_LINT_VIRQ4
|
206 CA91CX42_LINT_VIRQ5
| CA91CX42_LINT_VIRQ6
|
207 CA91CX42_LINT_VIRQ7
))
208 serviced
|= ca91cx42_VIRQ_irqhandler(stat
);
210 /* Clear serviced interrupts */
211 iowrite32(stat
, ca91cx42_bridge
->base
+ LINT_STAT
);
216 static int ca91cx42_irq_init(struct vme_bridge
*bridge
)
219 struct pci_dev
*pdev
;
222 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
224 /* Initialise list for VME bus errors */
225 INIT_LIST_HEAD(&(bridge
->vme_errors
));
227 mutex_init(&(bridge
->irq_mtx
));
229 /* Disable interrupts from PCI to VME */
230 iowrite32(0, bridge
->base
+ VINT_EN
);
232 /* Disable PCI interrupts */
233 iowrite32(0, bridge
->base
+ LINT_EN
);
234 /* Clear Any Pending PCI Interrupts */
235 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
237 result
= request_irq(pdev
->irq
, ca91cx42_irqhandler
, IRQF_SHARED
,
240 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
245 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
246 iowrite32(0, bridge
->base
+ LINT_MAP0
);
247 iowrite32(0, bridge
->base
+ LINT_MAP1
);
248 iowrite32(0, bridge
->base
+ LINT_MAP2
);
250 /* Enable DMA, mailbox & LM Interrupts */
251 tmp
= CA91CX42_LINT_MBOX3
| CA91CX42_LINT_MBOX2
| CA91CX42_LINT_MBOX1
|
252 CA91CX42_LINT_MBOX0
| CA91CX42_LINT_SW_IACK
|
253 CA91CX42_LINT_VERR
| CA91CX42_LINT_LERR
| CA91CX42_LINT_DMA
;
255 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
260 static void ca91cx42_irq_exit(struct pci_dev
*pdev
)
262 /* Disable interrupts from PCI to VME */
263 iowrite32(0, ca91cx42_bridge
->base
+ VINT_EN
);
265 /* Disable PCI interrupts */
266 iowrite32(0, ca91cx42_bridge
->base
+ LINT_EN
);
267 /* Clear Any Pending PCI Interrupts */
268 iowrite32(0x00FFFFFF, ca91cx42_bridge
->base
+ LINT_STAT
);
270 free_irq(pdev
->irq
, pdev
);
274 * Set up an VME interrupt
276 void ca91cx42_irq_set(int level
, int state
, int sync
)
279 struct pci_dev
*pdev
;
282 /* Enable IRQ level */
283 tmp
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
286 tmp
&= ~CA91CX42_LINT_VIRQ
[level
];
288 tmp
|= CA91CX42_LINT_VIRQ
[level
];
290 iowrite32(tmp
, ca91cx42_bridge
->base
+ LINT_EN
);
292 if ((state
== 0) && (sync
!= 0)) {
293 pdev
= container_of(ca91cx42_bridge
->parent
, struct pci_dev
,
296 synchronize_irq(pdev
->irq
);
300 int ca91cx42_irq_generate(int level
, int statid
)
304 /* Universe can only generate even vectors */
308 mutex_lock(&(vme_int
));
310 tmp
= ioread32(ca91cx42_bridge
->base
+ VINT_EN
);
313 iowrite32(statid
<< 24, ca91cx42_bridge
->base
+ STATID
);
315 /* Assert VMEbus IRQ */
316 tmp
= tmp
| (1 << (level
+ 24));
317 iowrite32(tmp
, ca91cx42_bridge
->base
+ VINT_EN
);
320 wait_event_interruptible(iack_queue
, 0);
322 /* Return interrupt to low state */
323 tmp
= ioread32(ca91cx42_bridge
->base
+ VINT_EN
);
324 tmp
= tmp
& ~(1 << (level
+ 24));
325 iowrite32(tmp
, ca91cx42_bridge
->base
+ VINT_EN
);
327 mutex_unlock(&(vme_int
));
332 int ca91cx42_slave_set(struct vme_slave_resource
*image
, int enabled
,
333 unsigned long long vme_base
, unsigned long long size
,
334 dma_addr_t pci_base
, vme_address_t aspace
, vme_cycle_t cycle
)
336 unsigned int i
, addr
= 0, granularity
= 0;
337 unsigned int temp_ctl
= 0;
338 unsigned int vme_bound
, pci_offset
;
344 addr
|= CA91CX42_VSI_CTL_VAS_A16
;
347 addr
|= CA91CX42_VSI_CTL_VAS_A24
;
350 addr
|= CA91CX42_VSI_CTL_VAS_A32
;
353 addr
|= CA91CX42_VSI_CTL_VAS_USER1
;
356 addr
|= CA91CX42_VSI_CTL_VAS_USER2
;
363 printk(KERN_ERR
"Invalid address space\n");
369 * Bound address is a valid address for the window, adjust
372 vme_bound
= vme_base
+ size
- granularity
;
373 pci_offset
= pci_base
- vme_base
;
375 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
376 * too big for registers
379 if ((i
== 0) || (i
== 4))
380 granularity
= 0x1000;
382 granularity
= 0x10000;
384 if (vme_base
& (granularity
- 1)) {
385 printk(KERN_ERR
"Invalid VME base alignment\n");
388 if (vme_bound
& (granularity
- 1)) {
389 printk(KERN_ERR
"Invalid VME bound alignment\n");
392 if (pci_offset
& (granularity
- 1)) {
393 printk(KERN_ERR
"Invalid PCI Offset alignment\n");
397 /* Disable while we are mucking around */
398 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
399 temp_ctl
&= ~CA91CX42_VSI_CTL_EN
;
400 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
403 iowrite32(vme_base
, ca91cx42_bridge
->base
+ CA91CX42_VSI_BS
[i
]);
404 iowrite32(vme_bound
, ca91cx42_bridge
->base
+ CA91CX42_VSI_BD
[i
]);
405 iowrite32(pci_offset
, ca91cx42_bridge
->base
+ CA91CX42_VSI_TO
[i
]);
407 /* XXX Prefetch stuff currently unsupported */
409 if (vmeIn
->wrPostEnable
)
410 temp_ctl
|= CA91CX42_VSI_CTL_PWEN
;
411 if (vmeIn
->prefetchEnable
)
412 temp_ctl
|= CA91CX42_VSI_CTL_PREN
;
414 temp_ctl
|= CA91CX42_VSI_CTL_LLRMW
;
415 if (vmeIn
->data64BitCapable
)
416 temp_ctl
|= CA91CX42_VSI_CTL_LD64EN
;
419 /* Setup address space */
420 temp_ctl
&= ~CA91CX42_VSI_CTL_VAS_M
;
423 /* Setup cycle types */
424 temp_ctl
&= ~(CA91CX42_VSI_CTL_PGM_M
| CA91CX42_VSI_CTL_SUPER_M
);
425 if (cycle
& VME_SUPER
)
426 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_SUPR
;
427 if (cycle
& VME_USER
)
428 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_NPRIV
;
429 if (cycle
& VME_PROG
)
430 temp_ctl
|= CA91CX42_VSI_CTL_PGM_PGM
;
431 if (cycle
& VME_DATA
)
432 temp_ctl
|= CA91CX42_VSI_CTL_PGM_DATA
;
434 /* Write ctl reg without enable */
435 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
438 temp_ctl
|= CA91CX42_VSI_CTL_EN
;
440 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
445 int ca91cx42_slave_get(struct vme_slave_resource
*image
, int *enabled
,
446 unsigned long long *vme_base
, unsigned long long *size
,
447 dma_addr_t
*pci_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
449 unsigned int i
, granularity
= 0, ctl
= 0;
450 unsigned long long vme_bound
, pci_offset
;
454 if ((i
== 0) || (i
== 4))
455 granularity
= 0x1000;
457 granularity
= 0x10000;
460 ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
462 *vme_base
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_BS
[i
]);
463 vme_bound
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_BD
[i
]);
464 pci_offset
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_TO
[i
]);
466 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
467 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
473 if (ctl
& CA91CX42_VSI_CTL_EN
)
476 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A16
)
478 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A24
)
480 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A32
)
482 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER1
)
484 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER2
)
487 if (ctl
& CA91CX42_VSI_CTL_SUPER_SUPR
)
489 if (ctl
& CA91CX42_VSI_CTL_SUPER_NPRIV
)
491 if (ctl
& CA91CX42_VSI_CTL_PGM_PGM
)
493 if (ctl
& CA91CX42_VSI_CTL_PGM_DATA
)
500 * Allocate and map PCI Resource
502 static int ca91cx42_alloc_resource(struct vme_master_resource
*image
,
503 unsigned long long size
)
505 unsigned long long existing_size
;
507 struct pci_dev
*pdev
;
509 /* Find pci_dev container of dev */
510 if (ca91cx42_bridge
->parent
== NULL
) {
511 printk(KERN_ERR
"Dev entry NULL\n");
514 pdev
= container_of(ca91cx42_bridge
->parent
, struct pci_dev
, dev
);
516 existing_size
= (unsigned long long)(image
->pci_resource
.end
-
517 image
->pci_resource
.start
);
519 /* If the existing size is OK, return */
520 if (existing_size
== (size
- 1))
523 if (existing_size
!= 0) {
524 iounmap(image
->kern_base
);
525 image
->kern_base
= NULL
;
526 if (image
->pci_resource
.name
!= NULL
)
527 kfree(image
->pci_resource
.name
);
528 release_resource(&(image
->pci_resource
));
529 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
532 if (image
->pci_resource
.name
== NULL
) {
533 image
->pci_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_KERNEL
);
534 if (image
->pci_resource
.name
== NULL
) {
535 printk(KERN_ERR
"Unable to allocate memory for resource"
542 sprintf((char *)image
->pci_resource
.name
, "%s.%d",
543 ca91cx42_bridge
->name
, image
->number
);
545 image
->pci_resource
.start
= 0;
546 image
->pci_resource
.end
= (unsigned long)size
;
547 image
->pci_resource
.flags
= IORESOURCE_MEM
;
549 retval
= pci_bus_alloc_resource(pdev
->bus
,
550 &(image
->pci_resource
), size
, size
, PCIBIOS_MIN_MEM
,
553 printk(KERN_ERR
"Failed to allocate mem resource for "
554 "window %d size 0x%lx start 0x%lx\n",
555 image
->number
, (unsigned long)size
,
556 (unsigned long)image
->pci_resource
.start
);
560 image
->kern_base
= ioremap_nocache(
561 image
->pci_resource
.start
, size
);
562 if (image
->kern_base
== NULL
) {
563 printk(KERN_ERR
"Failed to remap resource\n");
570 iounmap(image
->kern_base
);
571 image
->kern_base
= NULL
;
573 release_resource(&(image
->pci_resource
));
575 kfree(image
->pci_resource
.name
);
576 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
582 * * Free and unmap PCI Resource
584 static void ca91cx42_free_resource(struct vme_master_resource
*image
)
586 iounmap(image
->kern_base
);
587 image
->kern_base
= NULL
;
588 release_resource(&(image
->pci_resource
));
589 kfree(image
->pci_resource
.name
);
590 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
594 int ca91cx42_master_set(struct vme_master_resource
*image
, int enabled
,
595 unsigned long long vme_base
, unsigned long long size
,
596 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
600 unsigned int temp_ctl
= 0;
601 unsigned long long pci_bound
, vme_offset
, pci_base
;
603 /* Verify input data */
604 if (vme_base
& 0xFFF) {
605 printk(KERN_ERR
"Invalid VME Window alignment\n");
610 printk(KERN_ERR
"Invalid VME Window alignment\n");
615 spin_lock(&(image
->lock
));
617 /* XXX We should do this much later, so that we can exit without
618 * needing to redo the mapping...
621 * Let's allocate the resource here rather than further up the stack as
622 * it avoids pushing loads of bus dependant stuff up the stack
624 retval
= ca91cx42_alloc_resource(image
, size
);
626 spin_unlock(&(image
->lock
));
627 printk(KERN_ERR
"Unable to allocate memory for resource "
633 pci_base
= (unsigned long long)image
->pci_resource
.start
;
636 * Bound address is a valid address for the window, adjust
637 * according to window granularity.
639 pci_bound
= pci_base
+ (size
- 0x1000);
640 vme_offset
= vme_base
- pci_base
;
644 /* Disable while we are mucking around */
645 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
646 temp_ctl
&= ~CA91CX42_LSI_CTL_EN
;
647 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
649 /* XXX Prefetch stuff currently unsupported */
651 if (vmeOut
->wrPostEnable
)
652 temp_ctl
|= 0x40000000;
655 /* Setup cycle types */
656 temp_ctl
&= ~CA91CX42_LSI_CTL_VCT_M
;
658 temp_ctl
|= CA91CX42_LSI_CTL_VCT_BLT
;
659 if (cycle
& VME_MBLT
)
660 temp_ctl
|= CA91CX42_LSI_CTL_VCT_MBLT
;
662 /* Setup data width */
663 temp_ctl
&= ~CA91CX42_LSI_CTL_VDW_M
;
666 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D8
;
669 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D16
;
672 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D32
;
675 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D64
;
678 spin_unlock(&(image
->lock
));
679 printk(KERN_ERR
"Invalid data width\n");
685 /* Setup address space */
686 temp_ctl
&= ~CA91CX42_LSI_CTL_VAS_M
;
689 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A16
;
692 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A24
;
695 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A32
;
698 temp_ctl
|= CA91CX42_LSI_CTL_VAS_CRCSR
;
701 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER1
;
704 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER2
;
710 spin_unlock(&(image
->lock
));
711 printk(KERN_ERR
"Invalid address space\n");
717 temp_ctl
&= ~(CA91CX42_LSI_CTL_PGM_M
| CA91CX42_LSI_CTL_SUPER_M
);
718 if (cycle
& VME_SUPER
)
719 temp_ctl
|= CA91CX42_LSI_CTL_SUPER_SUPR
;
720 if (cycle
& VME_PROG
)
721 temp_ctl
|= CA91CX42_LSI_CTL_PGM_PGM
;
724 iowrite32(pci_base
, ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
725 iowrite32(pci_bound
, ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
726 iowrite32(vme_offset
, ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
728 /* Write ctl reg without enable */
729 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
732 temp_ctl
|= CA91CX42_LSI_CTL_EN
;
734 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
736 spin_unlock(&(image
->lock
));
741 ca91cx42_free_resource(image
);
747 int __ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
748 unsigned long long *vme_base
, unsigned long long *size
,
749 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
752 unsigned long long pci_base
, pci_bound
, vme_offset
;
756 ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
758 pci_base
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
759 vme_offset
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
760 pci_bound
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
762 *vme_base
= pci_base
+ vme_offset
;
763 *size
= (pci_bound
- pci_base
) + 0x1000;
770 if (ctl
& CA91CX42_LSI_CTL_EN
)
773 /* Setup address space */
774 switch (ctl
& CA91CX42_LSI_CTL_VAS_M
) {
775 case CA91CX42_LSI_CTL_VAS_A16
:
778 case CA91CX42_LSI_CTL_VAS_A24
:
781 case CA91CX42_LSI_CTL_VAS_A32
:
784 case CA91CX42_LSI_CTL_VAS_CRCSR
:
787 case CA91CX42_LSI_CTL_VAS_USER1
:
790 case CA91CX42_LSI_CTL_VAS_USER2
:
795 /* XXX Not sure howto check for MBLT */
796 /* Setup cycle types */
797 if (ctl
& CA91CX42_LSI_CTL_VCT_BLT
)
802 if (ctl
& CA91CX42_LSI_CTL_SUPER_SUPR
)
807 if (ctl
& CA91CX42_LSI_CTL_PGM_PGM
)
812 /* Setup data width */
813 switch (ctl
& CA91CX42_LSI_CTL_VDW_M
) {
814 case CA91CX42_LSI_CTL_VDW_D8
:
817 case CA91CX42_LSI_CTL_VDW_D16
:
820 case CA91CX42_LSI_CTL_VDW_D32
:
823 case CA91CX42_LSI_CTL_VDW_D64
:
828 /* XXX Prefetch stuff currently unsupported */
830 if (ctl
& 0x40000000)
831 vmeOut
->wrPostEnable
= 1;
837 int ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
838 unsigned long long *vme_base
, unsigned long long *size
,
839 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
843 spin_lock(&(image
->lock
));
845 retval
= __ca91cx42_master_get(image
, enabled
, vme_base
, size
, aspace
,
848 spin_unlock(&(image
->lock
));
853 ssize_t
ca91cx42_master_read(struct vme_master_resource
*image
, void *buf
,
854 size_t count
, loff_t offset
)
858 spin_lock(&(image
->lock
));
860 memcpy_fromio(buf
, image
->kern_base
+ offset
, (unsigned int)count
);
863 spin_unlock(&(image
->lock
));
868 ssize_t
ca91cx42_master_write(struct vme_master_resource
*image
, void *buf
,
869 size_t count
, loff_t offset
)
873 spin_lock(&(image
->lock
));
875 memcpy_toio(image
->kern_base
+ offset
, buf
, (unsigned int)count
);
878 spin_unlock(&(image
->lock
));
883 int ca91cx42_slot_get(void)
888 slot
= ioread32(ca91cx42_bridge
->base
+ VCSR_BS
);
889 slot
= ((slot
& CA91CX42_VCSR_BS_SLOT_M
) >> 27);
897 static int __init
ca91cx42_init(void)
899 return pci_register_driver(&ca91cx42_driver
);
903 * Configure CR/CSR space
905 * Access to the CR/CSR can be configured at power-up. The location of the
906 * CR/CSR registers in the CR/CSR address space is determined by the boards
907 * Auto-ID or Geographic address. This function ensures that the window is
908 * enabled at an offset consistent with the boards geopgraphic address.
910 static int ca91cx42_crcsr_init(struct pci_dev
*pdev
)
912 unsigned int crcsr_addr
;
915 /* XXX We may need to set this somehow as the Universe II does not support
916 * geographical addressing.
919 if (vme_slotnum
!= -1)
920 iowrite32(vme_slotnum
<< 27, ca91cx42_bridge
->base
+ VCSR_BS
);
922 slot
= ca91cx42_slot_get();
923 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", slot
);
925 dev_err(&pdev
->dev
, "Slot number is unset, not configuring "
930 /* Allocate mem for CR/CSR image */
931 crcsr_kernel
= pci_alloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
933 if (crcsr_kernel
== NULL
) {
934 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
939 memset(crcsr_kernel
, 0, VME_CRCSR_BUF_SIZE
);
941 crcsr_addr
= slot
* (512 * 1024);
942 iowrite32(crcsr_bus
- crcsr_addr
, ca91cx42_bridge
->base
+ VCSR_TO
);
944 tmp
= ioread32(ca91cx42_bridge
->base
+ VCSR_CTL
);
945 tmp
|= CA91CX42_VCSR_CTL_EN
;
946 iowrite32(tmp
, ca91cx42_bridge
->base
+ VCSR_CTL
);
951 static void ca91cx42_crcsr_exit(struct pci_dev
*pdev
)
955 /* Turn off CR/CSR space */
956 tmp
= ioread32(ca91cx42_bridge
->base
+ VCSR_CTL
);
957 tmp
&= ~CA91CX42_VCSR_CTL_EN
;
958 iowrite32(tmp
, ca91cx42_bridge
->base
+ VCSR_CTL
);
961 iowrite32(0, ca91cx42_bridge
->base
+ VCSR_TO
);
963 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, crcsr_kernel
, crcsr_bus
);
966 static int ca91cx42_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
970 struct list_head
*pos
= NULL
;
971 struct vme_master_resource
*master_image
;
972 struct vme_slave_resource
*slave_image
;
974 struct vme_dma_resource
*dma_ctrlr
;
976 struct vme_lm_resource
*lm
;
978 /* We want to support more than one of each bridge so we need to
979 * dynamically allocate the bridge structure
981 ca91cx42_bridge
= kmalloc(sizeof(struct vme_bridge
), GFP_KERNEL
);
983 if (ca91cx42_bridge
== NULL
) {
984 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
990 memset(ca91cx42_bridge
, 0, sizeof(struct vme_bridge
));
992 /* Enable the device */
993 retval
= pci_enable_device(pdev
);
995 dev_err(&pdev
->dev
, "Unable to enable device\n");
1000 retval
= pci_request_regions(pdev
, driver_name
);
1002 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
1006 /* map registers in BAR 0 */
1007 ca91cx42_bridge
->base
= ioremap_nocache(pci_resource_start(pdev
, 0),
1009 if (!ca91cx42_bridge
->base
) {
1010 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
1015 /* Check to see if the mapping worked out */
1016 data
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_PCI_ID
) & 0x0000FFFF;
1017 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
1018 dev_err(&pdev
->dev
, "PCI_ID check failed\n");
1023 /* Initialize wait queues & mutual exclusion flags */
1024 /* XXX These need to be moved to the vme_bridge structure */
1025 init_waitqueue_head(&dma_queue
);
1026 init_waitqueue_head(&iack_queue
);
1027 mutex_init(&(vme_int
));
1028 mutex_init(&(vme_rmw
));
1030 ca91cx42_bridge
->parent
= &(pdev
->dev
);
1031 strcpy(ca91cx42_bridge
->name
, driver_name
);
1034 retval
= ca91cx42_irq_init(ca91cx42_bridge
);
1036 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
1040 /* Add master windows to list */
1041 INIT_LIST_HEAD(&(ca91cx42_bridge
->master_resources
));
1042 for (i
= 0; i
< CA91C142_MAX_MASTER
; i
++) {
1043 master_image
= kmalloc(sizeof(struct vme_master_resource
),
1045 if (master_image
== NULL
) {
1046 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1047 "master resource structure\n");
1051 master_image
->parent
= ca91cx42_bridge
;
1052 spin_lock_init(&(master_image
->lock
));
1053 master_image
->locked
= 0;
1054 master_image
->number
= i
;
1055 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
1056 VME_CRCSR
| VME_USER1
| VME_USER2
;
1057 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1058 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1059 master_image
->width_attr
= VME_D8
| VME_D16
| VME_D32
| VME_D64
;
1060 memset(&(master_image
->pci_resource
), 0,
1061 sizeof(struct resource
));
1062 master_image
->kern_base
= NULL
;
1063 list_add_tail(&(master_image
->list
),
1064 &(ca91cx42_bridge
->master_resources
));
1067 /* Add slave windows to list */
1068 INIT_LIST_HEAD(&(ca91cx42_bridge
->slave_resources
));
1069 for (i
= 0; i
< CA91C142_MAX_SLAVE
; i
++) {
1070 slave_image
= kmalloc(sizeof(struct vme_slave_resource
),
1072 if (slave_image
== NULL
) {
1073 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1074 "slave resource structure\n");
1078 slave_image
->parent
= ca91cx42_bridge
;
1079 mutex_init(&(slave_image
->mtx
));
1080 slave_image
->locked
= 0;
1081 slave_image
->number
= i
;
1082 slave_image
->address_attr
= VME_A24
| VME_A32
| VME_USER1
|
1085 /* Only windows 0 and 4 support A16 */
1086 if (i
== 0 || i
== 4)
1087 slave_image
->address_attr
|= VME_A16
;
1089 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1090 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1091 list_add_tail(&(slave_image
->list
),
1092 &(ca91cx42_bridge
->slave_resources
));
1095 /* Add dma engines to list */
1096 INIT_LIST_HEAD(&(ca91cx42_bridge
->dma_resources
));
1097 for (i
= 0; i
< CA91C142_MAX_DMA
; i
++) {
1098 dma_ctrlr
= kmalloc(sizeof(struct vme_dma_resource
),
1100 if (dma_ctrlr
== NULL
) {
1101 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1102 "dma resource structure\n");
1106 dma_ctrlr
->parent
= ca91cx42_bridge
;
1107 mutex_init(&(dma_ctrlr
->mtx
));
1108 dma_ctrlr
->locked
= 0;
1109 dma_ctrlr
->number
= i
;
1110 INIT_LIST_HEAD(&(dma_ctrlr
->pending
));
1111 INIT_LIST_HEAD(&(dma_ctrlr
->running
));
1112 list_add_tail(&(dma_ctrlr
->list
),
1113 &(ca91cx42_bridge
->dma_resources
));
1116 /* Add location monitor to list */
1117 INIT_LIST_HEAD(&(ca91cx42_bridge
->lm_resources
));
1118 lm
= kmalloc(sizeof(struct vme_lm_resource
), GFP_KERNEL
);
1120 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1121 "location monitor resource structure\n");
1125 lm
->parent
= ca91cx42_bridge
;
1126 mutex_init(&(lm
->mtx
));
1130 list_add_tail(&(lm
->list
), &(ca91cx42_bridge
->lm_resources
));
1132 ca91cx42_bridge
->slave_get
= ca91cx42_slave_get
;
1133 ca91cx42_bridge
->slave_set
= ca91cx42_slave_set
;
1134 ca91cx42_bridge
->master_get
= ca91cx42_master_get
;
1135 ca91cx42_bridge
->master_set
= ca91cx42_master_set
;
1136 ca91cx42_bridge
->master_read
= ca91cx42_master_read
;
1137 ca91cx42_bridge
->master_write
= ca91cx42_master_write
;
1139 ca91cx42_bridge
->master_rmw
= ca91cx42_master_rmw
;
1140 ca91cx42_bridge
->dma_list_add
= ca91cx42_dma_list_add
;
1141 ca91cx42_bridge
->dma_list_exec
= ca91cx42_dma_list_exec
;
1142 ca91cx42_bridge
->dma_list_empty
= ca91cx42_dma_list_empty
;
1144 ca91cx42_bridge
->irq_set
= ca91cx42_irq_set
;
1145 ca91cx42_bridge
->irq_generate
= ca91cx42_irq_generate
;
1147 ca91cx42_bridge
->lm_set
= ca91cx42_lm_set
;
1148 ca91cx42_bridge
->lm_get
= ca91cx42_lm_get
;
1149 ca91cx42_bridge
->lm_attach
= ca91cx42_lm_attach
;
1150 ca91cx42_bridge
->lm_detach
= ca91cx42_lm_detach
;
1152 ca91cx42_bridge
->slot_get
= ca91cx42_slot_get
;
1154 data
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1155 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
1156 (data
& CA91CX42_MISC_CTL_SYSCON
) ? "" : " not");
1157 dev_info(&pdev
->dev
, "Slot ID is %d\n", ca91cx42_slot_get());
1159 if (ca91cx42_crcsr_init(pdev
)) {
1160 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
1167 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1170 retval
= vme_register_bridge(ca91cx42_bridge
);
1172 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
1178 vme_unregister_bridge(ca91cx42_bridge
);
1180 ca91cx42_crcsr_exit(pdev
);
1185 /* resources are stored in link list */
1186 list_for_each(pos
, &(ca91cx42_bridge
->lm_resources
)) {
1187 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1193 /* resources are stored in link list */
1194 list_for_each(pos
, &(ca91cx42_bridge
->dma_resources
)) {
1195 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1201 /* resources are stored in link list */
1202 list_for_each(pos
, &(ca91cx42_bridge
->slave_resources
)) {
1203 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1208 /* resources are stored in link list */
1209 list_for_each(pos
, &(ca91cx42_bridge
->master_resources
)) {
1210 master_image
= list_entry(pos
, struct vme_master_resource
,
1213 kfree(master_image
);
1216 ca91cx42_irq_exit(pdev
);
1219 iounmap(ca91cx42_bridge
->base
);
1221 pci_release_regions(pdev
);
1223 pci_disable_device(pdev
);
1225 kfree(ca91cx42_bridge
);
1231 void ca91cx42_remove(struct pci_dev
*pdev
)
1233 struct list_head
*pos
= NULL
;
1234 struct vme_master_resource
*master_image
;
1235 struct vme_slave_resource
*slave_image
;
1236 struct vme_dma_resource
*dma_ctrlr
;
1237 struct vme_lm_resource
*lm
;
1240 iowrite32(0, ca91cx42_bridge
->base
+ LINT_EN
);
1242 /* Turn off the windows */
1243 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI0_CTL
);
1244 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI1_CTL
);
1245 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI2_CTL
);
1246 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI3_CTL
);
1247 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI4_CTL
);
1248 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI5_CTL
);
1249 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI6_CTL
);
1250 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI7_CTL
);
1251 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI0_CTL
);
1252 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI1_CTL
);
1253 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI2_CTL
);
1254 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI3_CTL
);
1255 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI4_CTL
);
1256 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI5_CTL
);
1257 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI6_CTL
);
1258 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI7_CTL
);
1260 vme_unregister_bridge(ca91cx42_bridge
);
1262 ca91cx42_crcsr_exit(pdev
);
1264 /* resources are stored in link list */
1265 list_for_each(pos
, &(ca91cx42_bridge
->lm_resources
)) {
1266 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1271 /* resources are stored in link list */
1272 list_for_each(pos
, &(ca91cx42_bridge
->dma_resources
)) {
1273 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1278 /* resources are stored in link list */
1279 list_for_each(pos
, &(ca91cx42_bridge
->slave_resources
)) {
1280 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1285 /* resources are stored in link list */
1286 list_for_each(pos
, &(ca91cx42_bridge
->master_resources
)) {
1287 master_image
= list_entry(pos
, struct vme_master_resource
,
1290 kfree(master_image
);
1293 ca91cx42_irq_exit(pdev
);
1295 iounmap(ca91cx42_bridge
->base
);
1297 pci_release_regions(pdev
);
1299 pci_disable_device(pdev
);
1301 kfree(ca91cx42_bridge
);
1304 static void __exit
ca91cx42_exit(void)
1306 pci_unregister_driver(&ca91cx42_driver
);
1309 MODULE_PARM_DESC(geoid
, "Override geographical addressing");
1310 module_param(geoid
, int, 0);
1312 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1313 MODULE_LICENSE("GPL");
1315 module_init(ca91cx42_init
);
1316 module_exit(ca91cx42_exit
);
1318 /*----------------------------------------------------------------------------
1320 *--------------------------------------------------------------------------*/
1323 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1325 int ca91cx42_master_rmw(vmeRmwCfg_t
*vmeRmw
)
1333 int *rmw_pci_data_ptr
= NULL
;
1334 int *vaDataPtr
= NULL
;
1336 vmeOutWindowCfg_t vmeOut
;
1337 if (vmeRmw
->maxAttempts
< 1) {
1340 if (vmeRmw
->targetAddrU
) {
1343 /* Find the PCI address that maps to the desired VME address */
1344 for (i
= 0; i
< 8; i
++) {
1345 temp_ctl
= ioread32(ca91cx42_bridge
->base
+
1346 CA91CX42_LSI_CTL
[i
]);
1347 if ((temp_ctl
& 0x80000000) == 0) {
1350 memset(&vmeOut
, 0, sizeof(vmeOut
));
1351 vmeOut
.windowNbr
= i
;
1352 ca91cx42_get_out_bound(&vmeOut
);
1353 if (vmeOut
.addrSpace
!= vmeRmw
->addrSpace
) {
1356 tempBS
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
1357 tempBD
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
1358 tempTO
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
1359 vmeBS
= tempBS
+ tempTO
;
1360 vmeBD
= tempBD
+ tempTO
;
1361 if ((vmeRmw
->targetAddr
>= vmeBS
) &&
1362 (vmeRmw
->targetAddr
< vmeBD
)) {
1364 (int *)(tempBS
+ (vmeRmw
->targetAddr
- vmeBS
));
1366 (int *)(out_image_va
[i
] +
1367 (vmeRmw
->targetAddr
- vmeBS
));
1372 /* If no window - fail. */
1373 if (rmw_pci_data_ptr
== NULL
) {
1376 /* Setup the RMW registers. */
1377 iowrite32(0, ca91cx42_bridge
->base
+ SCYC_CTL
);
1378 iowrite32(SWIZZLE(vmeRmw
->enableMask
), ca91cx42_bridge
->base
+ SCYC_EN
);
1379 iowrite32(SWIZZLE(vmeRmw
->compareData
), ca91cx42_bridge
->base
+
1381 iowrite32(SWIZZLE(vmeRmw
->swapData
), ca91cx42_bridge
->base
+ SCYC_SWP
);
1382 iowrite32((int)rmw_pci_data_ptr
, ca91cx42_bridge
->base
+ SCYC_ADDR
);
1383 iowrite32(1, ca91cx42_bridge
->base
+ SCYC_CTL
);
1385 /* Run the RMW cycle until either success or max attempts. */
1386 vmeRmw
->numAttempts
= 1;
1387 while (vmeRmw
->numAttempts
<= vmeRmw
->maxAttempts
) {
1389 if ((ioread32(vaDataPtr
) & vmeRmw
->enableMask
) ==
1390 (vmeRmw
->swapData
& vmeRmw
->enableMask
)) {
1392 iowrite32(0, ca91cx42_bridge
->base
+ SCYC_CTL
);
1396 vmeRmw
->numAttempts
++;
1399 /* If no success, set num Attempts to be greater than max attempts */
1400 if (vmeRmw
->numAttempts
> vmeRmw
->maxAttempts
) {
1401 vmeRmw
->numAttempts
= vmeRmw
->maxAttempts
+ 1;
1407 int uniSetupDctlReg(vmeDmaPacket_t
* vmeDma
, int *dctlregreturn
)
1409 unsigned int dctlreg
= 0x80;
1410 struct vmeAttr
*vmeAttr
;
1412 if (vmeDma
->srcBus
== VME_DMA_VME
) {
1414 vmeAttr
= &vmeDma
->srcVmeAttr
;
1416 dctlreg
= 0x80000000;
1417 vmeAttr
= &vmeDma
->dstVmeAttr
;
1420 switch (vmeAttr
->maxDataWidth
) {
1424 dctlreg
|= 0x00400000;
1427 dctlreg
|= 0x00800000;
1430 dctlreg
|= 0x00C00000;
1434 switch (vmeAttr
->addrSpace
) {
1438 dctlreg
|= 0x00010000;
1441 dctlreg
|= 0x00020000;
1444 dctlreg
|= 0x00060000;
1447 dctlreg
|= 0x00070000;
1450 case VME_A64
: /* not supported in Universe DMA */
1457 if (vmeAttr
->userAccessType
== VME_PROG
) {
1458 dctlreg
|= 0x00004000;
1460 if (vmeAttr
->dataAccessType
== VME_SUPER
) {
1461 dctlreg
|= 0x00001000;
1463 if (vmeAttr
->xferProtocol
!= VME_SCT
) {
1464 dctlreg
|= 0x00000100;
1466 *dctlregreturn
= dctlreg
;
1471 ca91cx42_start_dma(int channel
, unsigned int dgcsreg
, TDMA_Cmd_Packet
*vmeLL
)
1475 /* Setup registers as needed for direct or chained. */
1476 if (dgcsreg
& 0x8000000) {
1477 iowrite32(0, ca91cx42_bridge
->base
+ DTBC
);
1478 iowrite32((unsigned int)vmeLL
, ca91cx42_bridge
->base
+ DCPP
);
1481 printk(KERN_ERR
"Starting: DGCS = %08x\n", dgcsreg
);
1482 printk(KERN_ERR
"Starting: DVA = %08x\n",
1483 ioread32(&vmeLL
->dva
));
1484 printk(KERN_ERR
"Starting: DLV = %08x\n",
1485 ioread32(&vmeLL
->dlv
));
1486 printk(KERN_ERR
"Starting: DTBC = %08x\n",
1487 ioread32(&vmeLL
->dtbc
));
1488 printk(KERN_ERR
"Starting: DCTL = %08x\n",
1489 ioread32(&vmeLL
->dctl
));
1491 /* Write registers */
1492 iowrite32(ioread32(&vmeLL
->dva
), ca91cx42_bridge
->base
+ DVA
);
1493 iowrite32(ioread32(&vmeLL
->dlv
), ca91cx42_bridge
->base
+ DLA
);
1494 iowrite32(ioread32(&vmeLL
->dtbc
), ca91cx42_bridge
->base
+ DTBC
);
1495 iowrite32(ioread32(&vmeLL
->dctl
), ca91cx42_bridge
->base
+ DCTL
);
1496 iowrite32(0, ca91cx42_bridge
->base
+ DCPP
);
1499 /* Start the operation */
1500 iowrite32(dgcsreg
, ca91cx42_bridge
->base
+ DGCS
);
1502 iowrite32(dgcsreg
| 0x8000000F, ca91cx42_bridge
->base
+ DGCS
);
1506 TDMA_Cmd_Packet
*ca91cx42_setup_dma(vmeDmaPacket_t
* vmeDma
)
1508 vmeDmaPacket_t
*vmeCur
;
1511 TDMA_Cmd_Packet
*startLL
;
1512 TDMA_Cmd_Packet
*currentLL
;
1513 TDMA_Cmd_Packet
*nextLL
;
1514 unsigned int dctlreg
= 0;
1516 maxPerPage
= PAGESIZE
/ sizeof(TDMA_Cmd_Packet
) - 1;
1517 startLL
= (TDMA_Cmd_Packet
*) __get_free_pages(GFP_KERNEL
, 0);
1521 /* First allocate pages for descriptors and create linked list */
1523 currentLL
= startLL
;
1525 while (vmeCur
!= 0) {
1526 if (vmeCur
->pNextPacket
!= 0) {
1527 currentLL
->dcpp
= (unsigned int)(currentLL
+ 1);
1529 if (currentLLcount
>= maxPerPage
) {
1531 __get_free_pages(GFP_KERNEL
, 0);
1534 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1536 currentLL
->dcpp
= (unsigned int)0;
1538 vmeCur
= vmeCur
->pNextPacket
;
1541 /* Next fill in information for each descriptor */
1543 currentLL
= startLL
;
1544 while (vmeCur
!= 0) {
1545 if (vmeCur
->srcBus
== VME_DMA_VME
) {
1546 iowrite32(vmeCur
->srcAddr
, ¤tLL
->dva
);
1547 iowrite32(vmeCur
->dstAddr
, ¤tLL
->dlv
);
1549 iowrite32(vmeCur
->srcAddr
, ¤tLL
->dlv
);
1550 iowrite32(vmeCur
->dstAddr
, ¤tLL
->dva
);
1552 uniSetupDctlReg(vmeCur
, &dctlreg
);
1553 iowrite32(dctlreg
, ¤tLL
->dctl
);
1554 iowrite32(vmeCur
->byteCount
, ¤tLL
->dtbc
);
1556 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1557 vmeCur
= vmeCur
->pNextPacket
;
1560 /* Convert Links to PCI addresses. */
1561 currentLL
= startLL
;
1562 while (currentLL
!= 0) {
1563 nextLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1565 iowrite32(1, ¤tLL
->dcpp
);
1567 iowrite32((unsigned int)virt_to_bus(nextLL
),
1573 /* Return pointer to descriptors list */
1577 int ca91cx42_free_dma(TDMA_Cmd_Packet
*startLL
)
1579 TDMA_Cmd_Packet
*currentLL
;
1580 TDMA_Cmd_Packet
*prevLL
;
1581 TDMA_Cmd_Packet
*nextLL
;
1582 unsigned int dcppreg
;
1584 /* Convert Links to virtual addresses. */
1585 currentLL
= startLL
;
1586 while (currentLL
!= 0) {
1587 dcppreg
= ioread32(¤tLL
->dcpp
);
1590 currentLL
->dcpp
= 0;
1592 currentLL
->dcpp
= (unsigned int)bus_to_virt(dcppreg
);
1594 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1597 /* Free all pages associated with the descriptors. */
1598 currentLL
= startLL
;
1600 while (currentLL
!= 0) {
1601 nextLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1602 if (currentLL
+ 1 != nextLL
) {
1603 free_pages((int)prevLL
, 0);
1609 /* Return pointer to descriptors list */
1613 int ca91cx42_do_dma(vmeDmaPacket_t
*vmeDma
)
1615 unsigned int dgcsreg
= 0;
1616 unsigned int dctlreg
= 0;
1619 vmeDmaPacket_t
*curDma
;
1620 TDMA_Cmd_Packet
*dmaLL
;
1622 /* Sanity check the VME chain. */
1623 channel
= vmeDma
->channel_number
;
1628 while (curDma
!= 0) {
1629 if (curDma
->byteCount
== 0) {
1632 if (curDma
->byteCount
>= 0x1000000) {
1635 if ((curDma
->srcAddr
& 7) != (curDma
->dstAddr
& 7)) {
1638 switch (curDma
->srcBus
) {
1640 if (curDma
->dstBus
!= VME_DMA_VME
) {
1645 if (curDma
->dstBus
!= VME_DMA_PCI
) {
1653 if (uniSetupDctlReg(curDma
, &dctlreg
) < 0) {
1657 curDma
= curDma
->pNextPacket
;
1658 if (curDma
== vmeDma
) { /* Endless Loop! */
1663 /* calculate control register */
1664 if (vmeDma
->pNextPacket
!= 0) {
1665 dgcsreg
= 0x8000000;
1670 for (x
= 0; x
< 8; x
++) { /* vme block size */
1671 if ((256 << x
) >= vmeDma
->maxVmeBlockSize
) {
1677 dgcsreg
|= (x
<< 20);
1679 if (vmeDma
->vmeBackOffTimer
) {
1680 for (x
= 1; x
< 8; x
++) { /* vme timer */
1681 if ((16 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
1687 dgcsreg
|= (x
<< 16);
1689 /*` Setup the dma chain */
1690 dmaLL
= ca91cx42_setup_dma(vmeDma
);
1693 if (dgcsreg
& 0x8000000) {
1694 vmeDma
->vmeDmaStartTick
=
1695 ca91cx42_start_dma(channel
, dgcsreg
,
1696 (TDMA_Cmd_Packet
*) virt_to_phys(dmaLL
));
1698 vmeDma
->vmeDmaStartTick
=
1699 ca91cx42_start_dma(channel
, dgcsreg
, dmaLL
);
1702 wait_event_interruptible(dma_queue
,
1703 ioread32(ca91cx42_bridge
->base
+ DGCS
) & 0x800);
1705 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
1706 iowrite32(val
| 0xF00, ca91cx42_bridge
->base
+ DGCS
);
1708 vmeDma
->vmeDmaStatus
= 0;
1710 if (!(val
& 0x00000800)) {
1711 vmeDma
->vmeDmaStatus
= val
& 0x700;
1712 printk(KERN_ERR
"ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1713 " DGCS=%08X\n", val
);
1714 val
= ioread32(ca91cx42_bridge
->base
+ DCPP
);
1715 printk(KERN_ERR
"ca91c042: DCPP=%08X\n", val
);
1716 val
= ioread32(ca91cx42_bridge
->base
+ DCTL
);
1717 printk(KERN_ERR
"ca91c042: DCTL=%08X\n", val
);
1718 val
= ioread32(ca91cx42_bridge
->base
+ DTBC
);
1719 printk(KERN_ERR
"ca91c042: DTBC=%08X\n", val
);
1720 val
= ioread32(ca91cx42_bridge
->base
+ DLA
);
1721 printk(KERN_ERR
"ca91c042: DLA=%08X\n", val
);
1722 val
= ioread32(ca91cx42_bridge
->base
+ DVA
);
1723 printk(KERN_ERR
"ca91c042: DVA=%08X\n", val
);
1726 /* Free the dma chain */
1727 ca91cx42_free_dma(dmaLL
);
1732 int ca91cx42_lm_set(vmeLmCfg_t
*vmeLm
)
1739 switch (vmeLm
->addrSpace
) {
1745 temp_ctl
|= 0x00000;
1748 temp_ctl
|= 0x10000;
1751 temp_ctl
|= 0x20000;
1754 temp_ctl
|= 0x50000;
1757 temp_ctl
|= 0x60000;
1760 temp_ctl
|= 0x70000;
1764 /* Disable while we are mucking around */
1765 iowrite32(0x00000000, ca91cx42_bridge
->base
+ LM_CTL
);
1767 iowrite32(vmeLm
->addr
, ca91cx42_bridge
->base
+ LM_BS
);
1769 /* Setup CTL register. */
1770 if (vmeLm
->userAccessType
& VME_SUPER
)
1771 temp_ctl
|= 0x00200000;
1772 if (vmeLm
->userAccessType
& VME_USER
)
1773 temp_ctl
|= 0x00100000;
1774 if (vmeLm
->dataAccessType
& VME_PROG
)
1775 temp_ctl
|= 0x00800000;
1776 if (vmeLm
->dataAccessType
& VME_DATA
)
1777 temp_ctl
|= 0x00400000;
1780 /* Write ctl reg and enable */
1781 iowrite32(0x80000000 | temp_ctl
, ca91cx42_bridge
->base
+ LM_CTL
);
1782 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ LM_CTL
);
1787 int ca91cx42_wait_lm(vmeLmCfg_t
*vmeLm
)
1789 unsigned long flags
;
1792 spin_lock_irqsave(&lm_lock
, flags
);
1793 spin_unlock_irqrestore(&lm_lock
, flags
);
1795 if (vmeLm
->lmWait
< 10)
1797 interruptible_sleep_on_timeout(&lm_queue
, vmeLm
->lmWait
);
1799 iowrite32(0x00000000, ca91cx42_bridge
->base
+ LM_CTL
);
1806 int ca91cx42_set_arbiter(vmeArbiterCfg_t
*vmeArb
)
1811 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1812 temp_ctl
&= 0x00FFFFFF;
1814 if (vmeArb
->globalTimeoutTimer
== 0xFFFFFFFF) {
1816 } else if (vmeArb
->globalTimeoutTimer
> 1024) {
1818 } else if (vmeArb
->globalTimeoutTimer
== 0) {
1822 while ((16 * (1 << (vbto
- 1))) < vmeArb
->globalTimeoutTimer
)
1825 temp_ctl
|= (vbto
<< 28);
1827 if (vmeArb
->arbiterMode
== VME_PRIORITY_MODE
)
1828 temp_ctl
|= 1 << 26;
1830 if (vmeArb
->arbiterTimeoutFlag
)
1831 temp_ctl
|= 2 << 24;
1833 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ MISC_CTL
);
1837 int ca91cx42_get_arbiter(vmeArbiterCfg_t
*vmeArb
)
1842 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1844 vbto
= (temp_ctl
>> 28) & 0xF;
1846 vmeArb
->globalTimeoutTimer
= (16 * (1 << (vbto
- 1)));
1848 if (temp_ctl
& (1 << 26))
1849 vmeArb
->arbiterMode
= VME_PRIORITY_MODE
;
1851 vmeArb
->arbiterMode
= VME_R_ROBIN_MODE
;
1853 if (temp_ctl
& (3 << 24))
1854 vmeArb
->arbiterTimeoutFlag
= 1;
1859 int ca91cx42_set_requestor(vmeRequesterCfg_t
*vmeReq
)
1863 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MAST_CTL
);
1864 temp_ctl
&= 0xFF0FFFFF;
1866 if (vmeReq
->releaseMode
== 1)
1867 temp_ctl
|= (1 << 20);
1869 if (vmeReq
->fairMode
== 1)
1870 temp_ctl
|= (1 << 21);
1872 temp_ctl
|= (vmeReq
->requestLevel
<< 22);
1874 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ MAST_CTL
);
1878 int ca91cx42_get_requestor(vmeRequesterCfg_t
*vmeReq
)
1882 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MAST_CTL
);
1884 if (temp_ctl
& (1 << 20))
1885 vmeReq
->releaseMode
= 1;
1887 if (temp_ctl
& (1 << 21))
1888 vmeReq
->fairMode
= 1;
1890 vmeReq
->requestLevel
= (temp_ctl
& 0xC00000) >> 22;