2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/version.h>
19 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/pci.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/poll.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/sched.h>
31 #include <asm/uaccess.h>
34 #include "../vme_bridge.h"
35 #include "vme_ca91cx42.h"
37 static int __init
ca91cx42_init(void);
38 static int ca91cx42_probe(struct pci_dev
*, const struct pci_device_id
*);
39 static void ca91cx42_remove(struct pci_dev
*);
40 static void __exit
ca91cx42_exit(void);
42 struct vme_bridge
*ca91cx42_bridge
;
43 wait_queue_head_t dma_queue
;
44 wait_queue_head_t iack_queue
;
45 wait_queue_head_t lm_queue
;
46 wait_queue_head_t mbox_queue
;
48 void (*lm_callback
[4])(int); /* Called in interrupt handler, be careful! */
52 struct mutex vme_rmw
; /* Only one RMW cycle at a time */
53 struct mutex vme_int
; /*
54 * Only one VME interrupt can be
55 * generated at a time, provide locking
57 struct mutex vme_irq
; /* Locking for VME irq callback configuration */
61 static char driver_name
[] = "vme_ca91cx42";
63 static struct pci_device_id ca91cx42_ids
[] = {
64 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA
, PCI_DEVICE_ID_TUNDRA_CA91C142
) },
68 static struct pci_driver ca91cx42_driver
= {
70 .id_table
= ca91cx42_ids
,
71 .probe
= ca91cx42_probe
,
72 .remove
= ca91cx42_remove
,
75 static u32
ca91cx42_DMA_irqhandler(void)
79 return CA91CX42_LINT_DMA
;
82 static u32
ca91cx42_LM_irqhandler(u32 stat
)
87 for (i
= 0; i
< 4; i
++) {
88 if (stat
& CA91CX42_LINT_LM
[i
]) {
89 /* We only enable interrupts if the callback is set */
91 serviced
|= CA91CX42_LINT_LM
[i
];
98 /* XXX This needs to be split into 4 queues */
99 static u32
ca91cx42_MB_irqhandler(int mbox_mask
)
101 wake_up(&mbox_queue
);
103 return CA91CX42_LINT_MBOX
;
106 static u32
ca91cx42_IACK_irqhandler(void)
108 wake_up(&iack_queue
);
110 return CA91CX42_LINT_SW_IACK
;
114 int ca91cx42_bus_error_chk(int clrflag
)
117 tmp
= ioread32(ca91cx42_bridge
->base
+ PCI_COMMAND
);
118 if (tmp
& 0x08000000) { /* S_TA is Set */
120 iowrite32(tmp
| 0x08000000,
121 ca91cx42_bridge
->base
+ PCI_COMMAND
);
128 static u32
ca91cx42_VERR_irqhandler(void)
132 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
134 if (!(val
& 0x00000800)) {
135 printk(KERN_ERR
"ca91c042: ca91cx42_VERR_irqhandler DMA Read "
136 "Error DGCS=%08X\n", val
);
139 return CA91CX42_LINT_VERR
;
142 static u32
ca91cx42_LERR_irqhandler(void)
146 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
148 if (!(val
& 0x00000800)) {
149 printk(KERN_ERR
"ca91c042: ca91cx42_LERR_irqhandler DMA Read "
150 "Error DGCS=%08X\n", val
);
154 return CA91CX42_LINT_LERR
;
158 static u32
ca91cx42_VIRQ_irqhandler(int stat
)
160 int vec
, i
, serviced
= 0;
161 void (*call
)(int, int, void *);
164 for (i
= 7; i
> 0; i
--) {
165 if (stat
& (1 << i
)) {
166 vec
= ioread32(ca91cx42_bridge
->base
+
167 CA91CX42_V_STATID
[i
]) & 0xff;
169 call
= ca91cx42_bridge
->irq
[i
- 1].callback
[vec
].func
;
171 ca91cx42_bridge
->irq
[i
- 1].callback
[vec
].priv_data
;
174 call(i
, vec
, priv_data
);
176 printk("Spurilous VME interrupt, level:%x, "
177 "vector:%x\n", i
, vec
);
179 serviced
|= (1 << i
);
186 static irqreturn_t
ca91cx42_irqhandler(int irq
, void *dev_id
)
188 u32 stat
, enable
, serviced
= 0;
190 if (dev_id
!= ca91cx42_bridge
->base
)
193 enable
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
194 stat
= ioread32(ca91cx42_bridge
->base
+ LINT_STAT
);
196 /* Only look at unmasked interrupts */
202 if (stat
& CA91CX42_LINT_DMA
)
203 serviced
|= ca91cx42_DMA_irqhandler();
204 if (stat
& (CA91CX42_LINT_LM0
| CA91CX42_LINT_LM1
| CA91CX42_LINT_LM2
|
206 serviced
|= ca91cx42_LM_irqhandler(stat
);
207 if (stat
& CA91CX42_LINT_MBOX
)
208 serviced
|= ca91cx42_MB_irqhandler(stat
);
209 if (stat
& CA91CX42_LINT_SW_IACK
)
210 serviced
|= ca91cx42_IACK_irqhandler();
211 if (stat
& CA91CX42_LINT_VERR
)
212 serviced
|= ca91cx42_VERR_irqhandler();
213 if (stat
& CA91CX42_LINT_LERR
)
214 serviced
|= ca91cx42_LERR_irqhandler();
215 if (stat
& (CA91CX42_LINT_VIRQ1
| CA91CX42_LINT_VIRQ2
|
216 CA91CX42_LINT_VIRQ3
| CA91CX42_LINT_VIRQ4
|
217 CA91CX42_LINT_VIRQ5
| CA91CX42_LINT_VIRQ6
|
218 CA91CX42_LINT_VIRQ7
))
219 serviced
|= ca91cx42_VIRQ_irqhandler(stat
);
221 /* Clear serviced interrupts */
222 iowrite32(stat
, ca91cx42_bridge
->base
+ LINT_STAT
);
227 static int ca91cx42_irq_init(struct vme_bridge
*bridge
)
230 struct pci_dev
*pdev
;
233 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
235 /* Initialise list for VME bus errors */
236 INIT_LIST_HEAD(&(bridge
->vme_errors
));
238 /* Disable interrupts from PCI to VME */
239 iowrite32(0, bridge
->base
+ VINT_EN
);
241 /* Disable PCI interrupts */
242 iowrite32(0, bridge
->base
+ LINT_EN
);
243 /* Clear Any Pending PCI Interrupts */
244 iowrite32(0x00FFFFFF, bridge
->base
+ LINT_STAT
);
246 result
= request_irq(pdev
->irq
, ca91cx42_irqhandler
, IRQF_SHARED
,
249 dev_err(&pdev
->dev
, "Can't get assigned pci irq vector %02X\n",
254 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
255 iowrite32(0, bridge
->base
+ LINT_MAP0
);
256 iowrite32(0, bridge
->base
+ LINT_MAP1
);
257 iowrite32(0, bridge
->base
+ LINT_MAP2
);
259 /* Enable DMA, mailbox & LM Interrupts */
260 tmp
= CA91CX42_LINT_MBOX3
| CA91CX42_LINT_MBOX2
| CA91CX42_LINT_MBOX1
|
261 CA91CX42_LINT_MBOX0
| CA91CX42_LINT_SW_IACK
|
262 CA91CX42_LINT_VERR
| CA91CX42_LINT_LERR
| CA91CX42_LINT_DMA
;
264 iowrite32(tmp
, bridge
->base
+ LINT_EN
);
269 static void ca91cx42_irq_exit(struct pci_dev
*pdev
)
271 /* Disable interrupts from PCI to VME */
272 iowrite32(0, ca91cx42_bridge
->base
+ VINT_EN
);
274 /* Disable PCI interrupts */
275 iowrite32(0, ca91cx42_bridge
->base
+ LINT_EN
);
276 /* Clear Any Pending PCI Interrupts */
277 iowrite32(0x00FFFFFF, ca91cx42_bridge
->base
+ LINT_STAT
);
279 free_irq(pdev
->irq
, pdev
);
283 * Set up an VME interrupt
285 int ca91cx42_request_irq(int level
, int statid
,
286 void (*callback
)(int level
, int vector
, void *priv_data
),
291 mutex_lock(&(vme_irq
));
293 if (ca91cx42_bridge
->irq
[level
- 1].callback
[statid
].func
) {
294 mutex_unlock(&(vme_irq
));
295 printk("VME Interrupt already taken\n");
300 ca91cx42_bridge
->irq
[level
- 1].count
++;
301 ca91cx42_bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
302 ca91cx42_bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
304 /* Enable IRQ level */
305 tmp
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
306 tmp
|= CA91CX42_LINT_VIRQ
[level
];
307 iowrite32(tmp
, ca91cx42_bridge
->base
+ LINT_EN
);
309 mutex_unlock(&(vme_irq
));
317 void ca91cx42_free_irq(int level
, int statid
)
320 struct pci_dev
*pdev
;
322 mutex_lock(&(vme_irq
));
324 ca91cx42_bridge
->irq
[level
- 1].count
--;
326 /* Disable IRQ level if no more interrupts attached at this level*/
327 if (ca91cx42_bridge
->irq
[level
- 1].count
== 0) {
328 tmp
= ioread32(ca91cx42_bridge
->base
+ LINT_EN
);
329 tmp
&= ~CA91CX42_LINT_VIRQ
[level
];
330 iowrite32(tmp
, ca91cx42_bridge
->base
+ LINT_EN
);
332 pdev
= container_of(ca91cx42_bridge
->parent
, struct pci_dev
,
335 synchronize_irq(pdev
->irq
);
338 ca91cx42_bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
339 ca91cx42_bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
341 mutex_unlock(&(vme_irq
));
344 int ca91cx42_generate_irq(int level
, int statid
)
348 /* Universe can only generate even vectors */
352 mutex_lock(&(vme_int
));
354 tmp
= ioread32(ca91cx42_bridge
->base
+ VINT_EN
);
357 iowrite32(statid
<< 24, ca91cx42_bridge
->base
+ STATID
);
359 /* Assert VMEbus IRQ */
360 tmp
= tmp
| (1 << (level
+ 24));
361 iowrite32(tmp
, ca91cx42_bridge
->base
+ VINT_EN
);
364 wait_event_interruptible(iack_queue
, 0);
366 /* Return interrupt to low state */
367 tmp
= ioread32(ca91cx42_bridge
->base
+ VINT_EN
);
368 tmp
= tmp
& ~(1 << (level
+ 24));
369 iowrite32(tmp
, ca91cx42_bridge
->base
+ VINT_EN
);
371 mutex_unlock(&(vme_int
));
376 int ca91cx42_slave_set(struct vme_slave_resource
*image
, int enabled
,
377 unsigned long long vme_base
, unsigned long long size
,
378 dma_addr_t pci_base
, vme_address_t aspace
, vme_cycle_t cycle
)
380 unsigned int i
, addr
= 0, granularity
= 0;
381 unsigned int temp_ctl
= 0;
382 unsigned int vme_bound
, pci_offset
;
388 addr
|= CA91CX42_VSI_CTL_VAS_A16
;
391 addr
|= CA91CX42_VSI_CTL_VAS_A24
;
394 addr
|= CA91CX42_VSI_CTL_VAS_A32
;
397 addr
|= CA91CX42_VSI_CTL_VAS_USER1
;
400 addr
|= CA91CX42_VSI_CTL_VAS_USER2
;
407 printk(KERN_ERR
"Invalid address space\n");
413 * Bound address is a valid address for the window, adjust
416 vme_bound
= vme_base
+ size
- granularity
;
417 pci_offset
= pci_base
- vme_base
;
419 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
420 * too big for registers
423 if ((i
== 0) || (i
== 4))
424 granularity
= 0x1000;
426 granularity
= 0x10000;
428 if (vme_base
& (granularity
- 1)) {
429 printk(KERN_ERR
"Invalid VME base alignment\n");
432 if (vme_bound
& (granularity
- 1)) {
433 printk(KERN_ERR
"Invalid VME bound alignment\n");
436 if (pci_offset
& (granularity
- 1)) {
437 printk(KERN_ERR
"Invalid PCI Offset alignment\n");
441 /* Disable while we are mucking around */
442 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
443 temp_ctl
&= ~CA91CX42_VSI_CTL_EN
;
444 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
447 iowrite32(vme_base
, ca91cx42_bridge
->base
+ CA91CX42_VSI_BS
[i
]);
448 iowrite32(vme_bound
, ca91cx42_bridge
->base
+ CA91CX42_VSI_BD
[i
]);
449 iowrite32(pci_offset
, ca91cx42_bridge
->base
+ CA91CX42_VSI_TO
[i
]);
451 /* XXX Prefetch stuff currently unsupported */
453 if (vmeIn
->wrPostEnable
)
454 temp_ctl
|= CA91CX42_VSI_CTL_PWEN
;
455 if (vmeIn
->prefetchEnable
)
456 temp_ctl
|= CA91CX42_VSI_CTL_PREN
;
458 temp_ctl
|= CA91CX42_VSI_CTL_LLRMW
;
459 if (vmeIn
->data64BitCapable
)
460 temp_ctl
|= CA91CX42_VSI_CTL_LD64EN
;
463 /* Setup address space */
464 temp_ctl
&= ~CA91CX42_VSI_CTL_VAS_M
;
467 /* Setup cycle types */
468 temp_ctl
&= ~(CA91CX42_VSI_CTL_PGM_M
| CA91CX42_VSI_CTL_SUPER_M
);
469 if (cycle
& VME_SUPER
)
470 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_SUPR
;
471 if (cycle
& VME_USER
)
472 temp_ctl
|= CA91CX42_VSI_CTL_SUPER_NPRIV
;
473 if (cycle
& VME_PROG
)
474 temp_ctl
|= CA91CX42_VSI_CTL_PGM_PGM
;
475 if (cycle
& VME_DATA
)
476 temp_ctl
|= CA91CX42_VSI_CTL_PGM_DATA
;
478 /* Write ctl reg without enable */
479 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
482 temp_ctl
|= CA91CX42_VSI_CTL_EN
;
484 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
489 int ca91cx42_slave_get(struct vme_slave_resource
*image
, int *enabled
,
490 unsigned long long *vme_base
, unsigned long long *size
,
491 dma_addr_t
*pci_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
493 unsigned int i
, granularity
= 0, ctl
= 0;
494 unsigned long long vme_bound
, pci_offset
;
498 if ((i
== 0) || (i
== 4))
499 granularity
= 0x1000;
501 granularity
= 0x10000;
504 ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_CTL
[i
]);
506 *vme_base
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_BS
[i
]);
507 vme_bound
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_BD
[i
]);
508 pci_offset
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_VSI_TO
[i
]);
510 *pci_base
= (dma_addr_t
)vme_base
+ pci_offset
;
511 *size
= (unsigned long long)((vme_bound
- *vme_base
) + granularity
);
517 if (ctl
& CA91CX42_VSI_CTL_EN
)
520 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A16
)
522 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A24
)
524 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_A32
)
526 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER1
)
528 if ((ctl
& CA91CX42_VSI_CTL_VAS_M
) == CA91CX42_VSI_CTL_VAS_USER2
)
531 if (ctl
& CA91CX42_VSI_CTL_SUPER_SUPR
)
533 if (ctl
& CA91CX42_VSI_CTL_SUPER_NPRIV
)
535 if (ctl
& CA91CX42_VSI_CTL_PGM_PGM
)
537 if (ctl
& CA91CX42_VSI_CTL_PGM_DATA
)
544 * Allocate and map PCI Resource
546 static int ca91cx42_alloc_resource(struct vme_master_resource
*image
,
547 unsigned long long size
)
549 unsigned long long existing_size
;
551 struct pci_dev
*pdev
;
553 /* Find pci_dev container of dev */
554 if (ca91cx42_bridge
->parent
== NULL
) {
555 printk(KERN_ERR
"Dev entry NULL\n");
558 pdev
= container_of(ca91cx42_bridge
->parent
, struct pci_dev
, dev
);
560 existing_size
= (unsigned long long)(image
->pci_resource
.end
-
561 image
->pci_resource
.start
);
563 /* If the existing size is OK, return */
564 if (existing_size
== (size
- 1))
567 if (existing_size
!= 0) {
568 iounmap(image
->kern_base
);
569 image
->kern_base
= NULL
;
570 if (image
->pci_resource
.name
!= NULL
)
571 kfree(image
->pci_resource
.name
);
572 release_resource(&(image
->pci_resource
));
573 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
576 if (image
->pci_resource
.name
== NULL
) {
577 image
->pci_resource
.name
= kmalloc(VMENAMSIZ
+3, GFP_KERNEL
);
578 if (image
->pci_resource
.name
== NULL
) {
579 printk(KERN_ERR
"Unable to allocate memory for resource"
586 sprintf((char *)image
->pci_resource
.name
, "%s.%d",
587 ca91cx42_bridge
->name
, image
->number
);
589 image
->pci_resource
.start
= 0;
590 image
->pci_resource
.end
= (unsigned long)size
;
591 image
->pci_resource
.flags
= IORESOURCE_MEM
;
593 retval
= pci_bus_alloc_resource(pdev
->bus
,
594 &(image
->pci_resource
), size
, size
, PCIBIOS_MIN_MEM
,
597 printk(KERN_ERR
"Failed to allocate mem resource for "
598 "window %d size 0x%lx start 0x%lx\n",
599 image
->number
, (unsigned long)size
,
600 (unsigned long)image
->pci_resource
.start
);
604 image
->kern_base
= ioremap_nocache(
605 image
->pci_resource
.start
, size
);
606 if (image
->kern_base
== NULL
) {
607 printk(KERN_ERR
"Failed to remap resource\n");
614 iounmap(image
->kern_base
);
615 image
->kern_base
= NULL
;
617 release_resource(&(image
->pci_resource
));
619 kfree(image
->pci_resource
.name
);
620 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
626 * * Free and unmap PCI Resource
628 static void ca91cx42_free_resource(struct vme_master_resource
*image
)
630 iounmap(image
->kern_base
);
631 image
->kern_base
= NULL
;
632 release_resource(&(image
->pci_resource
));
633 kfree(image
->pci_resource
.name
);
634 memset(&(image
->pci_resource
), 0, sizeof(struct resource
));
638 int ca91cx42_master_set(struct vme_master_resource
*image
, int enabled
,
639 unsigned long long vme_base
, unsigned long long size
,
640 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
644 unsigned int temp_ctl
= 0;
645 unsigned long long pci_bound
, vme_offset
, pci_base
;
647 /* Verify input data */
648 if (vme_base
& 0xFFF) {
649 printk(KERN_ERR
"Invalid VME Window alignment\n");
654 printk(KERN_ERR
"Invalid VME Window alignment\n");
659 spin_lock(&(image
->lock
));
661 /* XXX We should do this much later, so that we can exit without
662 * needing to redo the mapping...
665 * Let's allocate the resource here rather than further up the stack as
666 * it avoids pushing loads of bus dependant stuff up the stack
668 retval
= ca91cx42_alloc_resource(image
, size
);
670 spin_unlock(&(image
->lock
));
671 printk(KERN_ERR
"Unable to allocate memory for resource "
677 pci_base
= (unsigned long long)image
->pci_resource
.start
;
680 * Bound address is a valid address for the window, adjust
681 * according to window granularity.
683 pci_bound
= pci_base
+ (size
- 0x1000);
684 vme_offset
= vme_base
- pci_base
;
688 /* Disable while we are mucking around */
689 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
690 temp_ctl
&= ~CA91CX42_LSI_CTL_EN
;
691 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
693 /* XXX Prefetch stuff currently unsupported */
695 if (vmeOut
->wrPostEnable
)
696 temp_ctl
|= 0x40000000;
699 /* Setup cycle types */
700 temp_ctl
&= ~CA91CX42_LSI_CTL_VCT_M
;
702 temp_ctl
|= CA91CX42_LSI_CTL_VCT_BLT
;
703 if (cycle
& VME_MBLT
)
704 temp_ctl
|= CA91CX42_LSI_CTL_VCT_MBLT
;
706 /* Setup data width */
707 temp_ctl
&= ~CA91CX42_LSI_CTL_VDW_M
;
710 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D8
;
713 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D16
;
716 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D32
;
719 temp_ctl
|= CA91CX42_LSI_CTL_VDW_D64
;
722 spin_unlock(&(image
->lock
));
723 printk(KERN_ERR
"Invalid data width\n");
729 /* Setup address space */
730 temp_ctl
&= ~CA91CX42_LSI_CTL_VAS_M
;
733 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A16
;
736 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A24
;
739 temp_ctl
|= CA91CX42_LSI_CTL_VAS_A32
;
742 temp_ctl
|= CA91CX42_LSI_CTL_VAS_CRCSR
;
745 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER1
;
748 temp_ctl
|= CA91CX42_LSI_CTL_VAS_USER2
;
754 spin_unlock(&(image
->lock
));
755 printk(KERN_ERR
"Invalid address space\n");
761 temp_ctl
&= ~(CA91CX42_LSI_CTL_PGM_M
| CA91CX42_LSI_CTL_SUPER_M
);
762 if (cycle
& VME_SUPER
)
763 temp_ctl
|= CA91CX42_LSI_CTL_SUPER_SUPR
;
764 if (cycle
& VME_PROG
)
765 temp_ctl
|= CA91CX42_LSI_CTL_PGM_PGM
;
768 iowrite32(pci_base
, ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
769 iowrite32(pci_bound
, ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
770 iowrite32(vme_offset
, ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
772 /* Write ctl reg without enable */
773 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
776 temp_ctl
|= CA91CX42_LSI_CTL_EN
;
778 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
780 spin_unlock(&(image
->lock
));
785 ca91cx42_free_resource(image
);
791 int __ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
792 unsigned long long *vme_base
, unsigned long long *size
,
793 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
796 unsigned long long pci_base
, pci_bound
, vme_offset
;
800 ctl
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_CTL
[i
]);
802 pci_base
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
803 vme_offset
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
804 pci_bound
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
806 *vme_base
= pci_base
+ vme_offset
;
807 *size
= (pci_bound
- pci_base
) + 0x1000;
814 if (ctl
& CA91CX42_LSI_CTL_EN
)
817 /* Setup address space */
818 switch (ctl
& CA91CX42_LSI_CTL_VAS_M
) {
819 case CA91CX42_LSI_CTL_VAS_A16
:
822 case CA91CX42_LSI_CTL_VAS_A24
:
825 case CA91CX42_LSI_CTL_VAS_A32
:
828 case CA91CX42_LSI_CTL_VAS_CRCSR
:
831 case CA91CX42_LSI_CTL_VAS_USER1
:
834 case CA91CX42_LSI_CTL_VAS_USER2
:
839 /* XXX Not sure howto check for MBLT */
840 /* Setup cycle types */
841 if (ctl
& CA91CX42_LSI_CTL_VCT_BLT
)
846 if (ctl
& CA91CX42_LSI_CTL_SUPER_SUPR
)
851 if (ctl
& CA91CX42_LSI_CTL_PGM_PGM
)
856 /* Setup data width */
857 switch (ctl
& CA91CX42_LSI_CTL_VDW_M
) {
858 case CA91CX42_LSI_CTL_VDW_D8
:
861 case CA91CX42_LSI_CTL_VDW_D16
:
864 case CA91CX42_LSI_CTL_VDW_D32
:
867 case CA91CX42_LSI_CTL_VDW_D64
:
872 /* XXX Prefetch stuff currently unsupported */
874 if (ctl
& 0x40000000)
875 vmeOut
->wrPostEnable
= 1;
881 int ca91cx42_master_get(struct vme_master_resource
*image
, int *enabled
,
882 unsigned long long *vme_base
, unsigned long long *size
,
883 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
887 spin_lock(&(image
->lock
));
889 retval
= __ca91cx42_master_get(image
, enabled
, vme_base
, size
, aspace
,
892 spin_unlock(&(image
->lock
));
897 ssize_t
ca91cx42_master_read(struct vme_master_resource
*image
, void *buf
,
898 size_t count
, loff_t offset
)
902 spin_lock(&(image
->lock
));
904 memcpy_fromio(buf
, image
->kern_base
+ offset
, (unsigned int)count
);
907 spin_unlock(&(image
->lock
));
912 ssize_t
ca91cx42_master_write(struct vme_master_resource
*image
, void *buf
,
913 size_t count
, loff_t offset
)
917 spin_lock(&(image
->lock
));
919 memcpy_toio(image
->kern_base
+ offset
, buf
, (unsigned int)count
);
922 spin_unlock(&(image
->lock
));
927 int ca91cx42_slot_get(void)
931 slot
= ioread32(ca91cx42_bridge
->base
+ VCSR_BS
);
932 slot
= ((slot
& CA91CX42_VCSR_BS_SLOT_M
) >> 27);
937 static int __init
ca91cx42_init(void)
939 return pci_register_driver(&ca91cx42_driver
);
943 * Configure CR/CSR space
945 * Access to the CR/CSR can be configured at power-up. The location of the
946 * CR/CSR registers in the CR/CSR address space is determined by the boards
947 * Auto-ID or Geographic address. This function ensures that the window is
948 * enabled at an offset consistent with the boards geopgraphic address.
950 static int ca91cx42_crcsr_init(struct pci_dev
*pdev
)
952 unsigned int crcsr_addr
;
955 /* XXX We may need to set this somehow as the Universe II does not support
956 * geographical addressing.
959 if (vme_slotnum
!= -1)
960 iowrite32(vme_slotnum
<< 27, ca91cx42_bridge
->base
+ VCSR_BS
);
962 slot
= ca91cx42_slot_get();
963 dev_info(&pdev
->dev
, "CR/CSR Offset: %d\n", slot
);
965 dev_err(&pdev
->dev
, "Slot number is unset, not configuring "
970 /* Allocate mem for CR/CSR image */
971 crcsr_kernel
= pci_alloc_consistent(pdev
, VME_CRCSR_BUF_SIZE
,
973 if (crcsr_kernel
== NULL
) {
974 dev_err(&pdev
->dev
, "Failed to allocate memory for CR/CSR "
979 memset(crcsr_kernel
, 0, VME_CRCSR_BUF_SIZE
);
981 crcsr_addr
= slot
* (512 * 1024);
982 iowrite32(crcsr_bus
- crcsr_addr
, ca91cx42_bridge
->base
+ VCSR_TO
);
984 tmp
= ioread32(ca91cx42_bridge
->base
+ VCSR_CTL
);
985 tmp
|= CA91CX42_VCSR_CTL_EN
;
986 iowrite32(tmp
, ca91cx42_bridge
->base
+ VCSR_CTL
);
991 static void ca91cx42_crcsr_exit(struct pci_dev
*pdev
)
995 /* Turn off CR/CSR space */
996 tmp
= ioread32(ca91cx42_bridge
->base
+ VCSR_CTL
);
997 tmp
&= ~CA91CX42_VCSR_CTL_EN
;
998 iowrite32(tmp
, ca91cx42_bridge
->base
+ VCSR_CTL
);
1001 iowrite32(0, ca91cx42_bridge
->base
+ VCSR_TO
);
1003 pci_free_consistent(pdev
, VME_CRCSR_BUF_SIZE
, crcsr_kernel
, crcsr_bus
);
1006 static int ca91cx42_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1010 struct list_head
*pos
= NULL
;
1011 struct vme_master_resource
*master_image
;
1012 struct vme_slave_resource
*slave_image
;
1014 struct vme_dma_resource
*dma_ctrlr
;
1016 struct vme_lm_resource
*lm
;
1018 /* We want to support more than one of each bridge so we need to
1019 * dynamically allocate the bridge structure
1021 ca91cx42_bridge
= kmalloc(sizeof(struct vme_bridge
), GFP_KERNEL
);
1023 if (ca91cx42_bridge
== NULL
) {
1024 dev_err(&pdev
->dev
, "Failed to allocate memory for device "
1030 memset(ca91cx42_bridge
, 0, sizeof(struct vme_bridge
));
1032 /* Enable the device */
1033 retval
= pci_enable_device(pdev
);
1035 dev_err(&pdev
->dev
, "Unable to enable device\n");
1040 retval
= pci_request_regions(pdev
, driver_name
);
1042 dev_err(&pdev
->dev
, "Unable to reserve resources\n");
1046 /* map registers in BAR 0 */
1047 ca91cx42_bridge
->base
= ioremap_nocache(pci_resource_start(pdev
, 0),
1049 if (!ca91cx42_bridge
->base
) {
1050 dev_err(&pdev
->dev
, "Unable to remap CRG region\n");
1055 /* Check to see if the mapping worked out */
1056 data
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_PCI_ID
) & 0x0000FFFF;
1057 if (data
!= PCI_VENDOR_ID_TUNDRA
) {
1058 dev_err(&pdev
->dev
, "PCI_ID check failed\n");
1063 /* Initialize wait queues & mutual exclusion flags */
1064 /* XXX These need to be moved to the vme_bridge structure */
1065 init_waitqueue_head(&dma_queue
);
1066 init_waitqueue_head(&iack_queue
);
1067 mutex_init(&(vme_int
));
1068 mutex_init(&(vme_irq
));
1069 mutex_init(&(vme_rmw
));
1071 ca91cx42_bridge
->parent
= &(pdev
->dev
);
1072 strcpy(ca91cx42_bridge
->name
, driver_name
);
1075 retval
= ca91cx42_irq_init(ca91cx42_bridge
);
1077 dev_err(&pdev
->dev
, "Chip Initialization failed.\n");
1081 /* Add master windows to list */
1082 INIT_LIST_HEAD(&(ca91cx42_bridge
->master_resources
));
1083 for (i
= 0; i
< CA91C142_MAX_MASTER
; i
++) {
1084 master_image
= kmalloc(sizeof(struct vme_master_resource
),
1086 if (master_image
== NULL
) {
1087 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1088 "master resource structure\n");
1092 master_image
->parent
= ca91cx42_bridge
;
1093 spin_lock_init(&(master_image
->lock
));
1094 master_image
->locked
= 0;
1095 master_image
->number
= i
;
1096 master_image
->address_attr
= VME_A16
| VME_A24
| VME_A32
|
1097 VME_CRCSR
| VME_USER1
| VME_USER2
;
1098 master_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1099 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1100 master_image
->width_attr
= VME_D8
| VME_D16
| VME_D32
| VME_D64
;
1101 memset(&(master_image
->pci_resource
), 0,
1102 sizeof(struct resource
));
1103 master_image
->kern_base
= NULL
;
1104 list_add_tail(&(master_image
->list
),
1105 &(ca91cx42_bridge
->master_resources
));
1108 /* Add slave windows to list */
1109 INIT_LIST_HEAD(&(ca91cx42_bridge
->slave_resources
));
1110 for (i
= 0; i
< CA91C142_MAX_SLAVE
; i
++) {
1111 slave_image
= kmalloc(sizeof(struct vme_slave_resource
),
1113 if (slave_image
== NULL
) {
1114 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1115 "slave resource structure\n");
1119 slave_image
->parent
= ca91cx42_bridge
;
1120 mutex_init(&(slave_image
->mtx
));
1121 slave_image
->locked
= 0;
1122 slave_image
->number
= i
;
1123 slave_image
->address_attr
= VME_A24
| VME_A32
| VME_USER1
|
1126 /* Only windows 0 and 4 support A16 */
1127 if (i
== 0 || i
== 4)
1128 slave_image
->address_attr
|= VME_A16
;
1130 slave_image
->cycle_attr
= VME_SCT
| VME_BLT
| VME_MBLT
|
1131 VME_SUPER
| VME_USER
| VME_PROG
| VME_DATA
;
1132 list_add_tail(&(slave_image
->list
),
1133 &(ca91cx42_bridge
->slave_resources
));
1136 /* Add dma engines to list */
1137 INIT_LIST_HEAD(&(ca91cx42_bridge
->dma_resources
));
1138 for (i
= 0; i
< CA91C142_MAX_DMA
; i
++) {
1139 dma_ctrlr
= kmalloc(sizeof(struct vme_dma_resource
),
1141 if (dma_ctrlr
== NULL
) {
1142 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1143 "dma resource structure\n");
1147 dma_ctrlr
->parent
= ca91cx42_bridge
;
1148 mutex_init(&(dma_ctrlr
->mtx
));
1149 dma_ctrlr
->locked
= 0;
1150 dma_ctrlr
->number
= i
;
1151 INIT_LIST_HEAD(&(dma_ctrlr
->pending
));
1152 INIT_LIST_HEAD(&(dma_ctrlr
->running
));
1153 list_add_tail(&(dma_ctrlr
->list
),
1154 &(ca91cx42_bridge
->dma_resources
));
1157 /* Add location monitor to list */
1158 INIT_LIST_HEAD(&(ca91cx42_bridge
->lm_resources
));
1159 lm
= kmalloc(sizeof(struct vme_lm_resource
), GFP_KERNEL
);
1161 dev_err(&pdev
->dev
, "Failed to allocate memory for "
1162 "location monitor resource structure\n");
1166 lm
->parent
= ca91cx42_bridge
;
1167 mutex_init(&(lm
->mtx
));
1171 list_add_tail(&(lm
->list
), &(ca91cx42_bridge
->lm_resources
));
1173 ca91cx42_bridge
->slave_get
= ca91cx42_slave_get
;
1174 ca91cx42_bridge
->slave_set
= ca91cx42_slave_set
;
1175 ca91cx42_bridge
->master_get
= ca91cx42_master_get
;
1176 ca91cx42_bridge
->master_set
= ca91cx42_master_set
;
1177 ca91cx42_bridge
->master_read
= ca91cx42_master_read
;
1178 ca91cx42_bridge
->master_write
= ca91cx42_master_write
;
1180 ca91cx42_bridge
->master_rmw
= ca91cx42_master_rmw
;
1181 ca91cx42_bridge
->dma_list_add
= ca91cx42_dma_list_add
;
1182 ca91cx42_bridge
->dma_list_exec
= ca91cx42_dma_list_exec
;
1183 ca91cx42_bridge
->dma_list_empty
= ca91cx42_dma_list_empty
;
1185 ca91cx42_bridge
->request_irq
= ca91cx42_request_irq
;
1186 ca91cx42_bridge
->free_irq
= ca91cx42_free_irq
;
1187 ca91cx42_bridge
->generate_irq
= ca91cx42_generate_irq
;
1189 ca91cx42_bridge
->lm_set
= ca91cx42_lm_set
;
1190 ca91cx42_bridge
->lm_get
= ca91cx42_lm_get
;
1191 ca91cx42_bridge
->lm_attach
= ca91cx42_lm_attach
;
1192 ca91cx42_bridge
->lm_detach
= ca91cx42_lm_detach
;
1194 ca91cx42_bridge
->slot_get
= ca91cx42_slot_get
;
1196 data
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1197 dev_info(&pdev
->dev
, "Board is%s the VME system controller\n",
1198 (data
& CA91CX42_MISC_CTL_SYSCON
) ? "" : " not");
1199 dev_info(&pdev
->dev
, "Slot ID is %d\n", ca91cx42_slot_get());
1201 if (ca91cx42_crcsr_init(pdev
)) {
1202 dev_err(&pdev
->dev
, "CR/CSR configuration failed.\n");
1209 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1212 retval
= vme_register_bridge(ca91cx42_bridge
);
1214 dev_err(&pdev
->dev
, "Chip Registration failed.\n");
1220 vme_unregister_bridge(ca91cx42_bridge
);
1222 ca91cx42_crcsr_exit(pdev
);
1225 /* resources are stored in link list */
1226 list_for_each(pos
, &(ca91cx42_bridge
->lm_resources
)) {
1227 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1233 /* resources are stored in link list */
1234 list_for_each(pos
, &(ca91cx42_bridge
->dma_resources
)) {
1235 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1241 /* resources are stored in link list */
1242 list_for_each(pos
, &(ca91cx42_bridge
->slave_resources
)) {
1243 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1248 /* resources are stored in link list */
1249 list_for_each(pos
, &(ca91cx42_bridge
->master_resources
)) {
1250 master_image
= list_entry(pos
, struct vme_master_resource
,
1253 kfree(master_image
);
1256 ca91cx42_irq_exit(pdev
);
1259 iounmap(ca91cx42_bridge
->base
);
1261 pci_release_regions(pdev
);
1263 pci_disable_device(pdev
);
1265 kfree(ca91cx42_bridge
);
1271 void ca91cx42_remove(struct pci_dev
*pdev
)
1273 struct list_head
*pos
= NULL
;
1274 struct vme_master_resource
*master_image
;
1275 struct vme_slave_resource
*slave_image
;
1276 struct vme_dma_resource
*dma_ctrlr
;
1277 struct vme_lm_resource
*lm
;
1281 iowrite32(0, ca91cx42_bridge
->base
+ LINT_EN
);
1283 /* Turn off the windows */
1284 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI0_CTL
);
1285 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI1_CTL
);
1286 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI2_CTL
);
1287 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI3_CTL
);
1288 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI4_CTL
);
1289 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI5_CTL
);
1290 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI6_CTL
);
1291 iowrite32(0x00800000, ca91cx42_bridge
->base
+ LSI7_CTL
);
1292 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI0_CTL
);
1293 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI1_CTL
);
1294 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI2_CTL
);
1295 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI3_CTL
);
1296 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI4_CTL
);
1297 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI5_CTL
);
1298 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI6_CTL
);
1299 iowrite32(0x00F00000, ca91cx42_bridge
->base
+ VSI7_CTL
);
1301 vme_unregister_bridge(ca91cx42_bridge
);
1303 ca91cx42_crcsr_exit(pdev
);
1305 /* resources are stored in link list */
1306 list_for_each(pos
, &(ca91cx42_bridge
->lm_resources
)) {
1307 lm
= list_entry(pos
, struct vme_lm_resource
, list
);
1312 /* resources are stored in link list */
1313 list_for_each(pos
, &(ca91cx42_bridge
->dma_resources
)) {
1314 dma_ctrlr
= list_entry(pos
, struct vme_dma_resource
, list
);
1319 /* resources are stored in link list */
1320 list_for_each(pos
, &(ca91cx42_bridge
->slave_resources
)) {
1321 slave_image
= list_entry(pos
, struct vme_slave_resource
, list
);
1326 /* resources are stored in link list */
1327 list_for_each(pos
, &(ca91cx42_bridge
->master_resources
)) {
1328 master_image
= list_entry(pos
, struct vme_master_resource
,
1331 kfree(master_image
);
1334 ca91cx42_irq_exit(pdev
);
1336 iounmap(ca91cx42_bridge
->base
);
1338 pci_release_regions(pdev
);
1340 pci_disable_device(pdev
);
1342 kfree(ca91cx42_bridge
);
1345 static void __exit
ca91cx42_exit(void)
1347 pci_unregister_driver(&ca91cx42_driver
);
1350 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1351 MODULE_LICENSE("GPL");
1353 module_init(ca91cx42_init
);
1354 module_exit(ca91cx42_exit
);
1356 /*----------------------------------------------------------------------------
1358 *--------------------------------------------------------------------------*/
1361 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1363 int ca91cx42_master_rmw(vmeRmwCfg_t
*vmeRmw
)
1371 int *rmw_pci_data_ptr
= NULL
;
1372 int *vaDataPtr
= NULL
;
1374 vmeOutWindowCfg_t vmeOut
;
1375 if (vmeRmw
->maxAttempts
< 1) {
1378 if (vmeRmw
->targetAddrU
) {
1381 /* Find the PCI address that maps to the desired VME address */
1382 for (i
= 0; i
< 8; i
++) {
1383 temp_ctl
= ioread32(ca91cx42_bridge
->base
+
1384 CA91CX42_LSI_CTL
[i
]);
1385 if ((temp_ctl
& 0x80000000) == 0) {
1388 memset(&vmeOut
, 0, sizeof(vmeOut
));
1389 vmeOut
.windowNbr
= i
;
1390 ca91cx42_get_out_bound(&vmeOut
);
1391 if (vmeOut
.addrSpace
!= vmeRmw
->addrSpace
) {
1394 tempBS
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BS
[i
]);
1395 tempBD
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_BD
[i
]);
1396 tempTO
= ioread32(ca91cx42_bridge
->base
+ CA91CX42_LSI_TO
[i
]);
1397 vmeBS
= tempBS
+ tempTO
;
1398 vmeBD
= tempBD
+ tempTO
;
1399 if ((vmeRmw
->targetAddr
>= vmeBS
) &&
1400 (vmeRmw
->targetAddr
< vmeBD
)) {
1402 (int *)(tempBS
+ (vmeRmw
->targetAddr
- vmeBS
));
1404 (int *)(out_image_va
[i
] +
1405 (vmeRmw
->targetAddr
- vmeBS
));
1410 /* If no window - fail. */
1411 if (rmw_pci_data_ptr
== NULL
) {
1414 /* Setup the RMW registers. */
1415 iowrite32(0, ca91cx42_bridge
->base
+ SCYC_CTL
);
1416 iowrite32(SWIZZLE(vmeRmw
->enableMask
), ca91cx42_bridge
->base
+ SCYC_EN
);
1417 iowrite32(SWIZZLE(vmeRmw
->compareData
), ca91cx42_bridge
->base
+
1419 iowrite32(SWIZZLE(vmeRmw
->swapData
), ca91cx42_bridge
->base
+ SCYC_SWP
);
1420 iowrite32((int)rmw_pci_data_ptr
, ca91cx42_bridge
->base
+ SCYC_ADDR
);
1421 iowrite32(1, ca91cx42_bridge
->base
+ SCYC_CTL
);
1423 /* Run the RMW cycle until either success or max attempts. */
1424 vmeRmw
->numAttempts
= 1;
1425 while (vmeRmw
->numAttempts
<= vmeRmw
->maxAttempts
) {
1427 if ((ioread32(vaDataPtr
) & vmeRmw
->enableMask
) ==
1428 (vmeRmw
->swapData
& vmeRmw
->enableMask
)) {
1430 iowrite32(0, ca91cx42_bridge
->base
+ SCYC_CTL
);
1434 vmeRmw
->numAttempts
++;
1437 /* If no success, set num Attempts to be greater than max attempts */
1438 if (vmeRmw
->numAttempts
> vmeRmw
->maxAttempts
) {
1439 vmeRmw
->numAttempts
= vmeRmw
->maxAttempts
+ 1;
1445 int uniSetupDctlReg(vmeDmaPacket_t
* vmeDma
, int *dctlregreturn
)
1447 unsigned int dctlreg
= 0x80;
1448 struct vmeAttr
*vmeAttr
;
1450 if (vmeDma
->srcBus
== VME_DMA_VME
) {
1452 vmeAttr
= &vmeDma
->srcVmeAttr
;
1454 dctlreg
= 0x80000000;
1455 vmeAttr
= &vmeDma
->dstVmeAttr
;
1458 switch (vmeAttr
->maxDataWidth
) {
1462 dctlreg
|= 0x00400000;
1465 dctlreg
|= 0x00800000;
1468 dctlreg
|= 0x00C00000;
1472 switch (vmeAttr
->addrSpace
) {
1476 dctlreg
|= 0x00010000;
1479 dctlreg
|= 0x00020000;
1482 dctlreg
|= 0x00060000;
1485 dctlreg
|= 0x00070000;
1488 case VME_A64
: /* not supported in Universe DMA */
1495 if (vmeAttr
->userAccessType
== VME_PROG
) {
1496 dctlreg
|= 0x00004000;
1498 if (vmeAttr
->dataAccessType
== VME_SUPER
) {
1499 dctlreg
|= 0x00001000;
1501 if (vmeAttr
->xferProtocol
!= VME_SCT
) {
1502 dctlreg
|= 0x00000100;
1504 *dctlregreturn
= dctlreg
;
1509 ca91cx42_start_dma(int channel
, unsigned int dgcsreg
, TDMA_Cmd_Packet
*vmeLL
)
1513 /* Setup registers as needed for direct or chained. */
1514 if (dgcsreg
& 0x8000000) {
1515 iowrite32(0, ca91cx42_bridge
->base
+ DTBC
);
1516 iowrite32((unsigned int)vmeLL
, ca91cx42_bridge
->base
+ DCPP
);
1519 printk(KERN_ERR
"Starting: DGCS = %08x\n", dgcsreg
);
1520 printk(KERN_ERR
"Starting: DVA = %08x\n",
1521 ioread32(&vmeLL
->dva
));
1522 printk(KERN_ERR
"Starting: DLV = %08x\n",
1523 ioread32(&vmeLL
->dlv
));
1524 printk(KERN_ERR
"Starting: DTBC = %08x\n",
1525 ioread32(&vmeLL
->dtbc
));
1526 printk(KERN_ERR
"Starting: DCTL = %08x\n",
1527 ioread32(&vmeLL
->dctl
));
1529 /* Write registers */
1530 iowrite32(ioread32(&vmeLL
->dva
), ca91cx42_bridge
->base
+ DVA
);
1531 iowrite32(ioread32(&vmeLL
->dlv
), ca91cx42_bridge
->base
+ DLA
);
1532 iowrite32(ioread32(&vmeLL
->dtbc
), ca91cx42_bridge
->base
+ DTBC
);
1533 iowrite32(ioread32(&vmeLL
->dctl
), ca91cx42_bridge
->base
+ DCTL
);
1534 iowrite32(0, ca91cx42_bridge
->base
+ DCPP
);
1537 /* Start the operation */
1538 iowrite32(dgcsreg
, ca91cx42_bridge
->base
+ DGCS
);
1540 iowrite32(dgcsreg
| 0x8000000F, ca91cx42_bridge
->base
+ DGCS
);
1544 TDMA_Cmd_Packet
*ca91cx42_setup_dma(vmeDmaPacket_t
* vmeDma
)
1546 vmeDmaPacket_t
*vmeCur
;
1549 TDMA_Cmd_Packet
*startLL
;
1550 TDMA_Cmd_Packet
*currentLL
;
1551 TDMA_Cmd_Packet
*nextLL
;
1552 unsigned int dctlreg
= 0;
1554 maxPerPage
= PAGESIZE
/ sizeof(TDMA_Cmd_Packet
) - 1;
1555 startLL
= (TDMA_Cmd_Packet
*) __get_free_pages(GFP_KERNEL
, 0);
1559 /* First allocate pages for descriptors and create linked list */
1561 currentLL
= startLL
;
1563 while (vmeCur
!= 0) {
1564 if (vmeCur
->pNextPacket
!= 0) {
1565 currentLL
->dcpp
= (unsigned int)(currentLL
+ 1);
1567 if (currentLLcount
>= maxPerPage
) {
1569 __get_free_pages(GFP_KERNEL
, 0);
1572 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1574 currentLL
->dcpp
= (unsigned int)0;
1576 vmeCur
= vmeCur
->pNextPacket
;
1579 /* Next fill in information for each descriptor */
1581 currentLL
= startLL
;
1582 while (vmeCur
!= 0) {
1583 if (vmeCur
->srcBus
== VME_DMA_VME
) {
1584 iowrite32(vmeCur
->srcAddr
, ¤tLL
->dva
);
1585 iowrite32(vmeCur
->dstAddr
, ¤tLL
->dlv
);
1587 iowrite32(vmeCur
->srcAddr
, ¤tLL
->dlv
);
1588 iowrite32(vmeCur
->dstAddr
, ¤tLL
->dva
);
1590 uniSetupDctlReg(vmeCur
, &dctlreg
);
1591 iowrite32(dctlreg
, ¤tLL
->dctl
);
1592 iowrite32(vmeCur
->byteCount
, ¤tLL
->dtbc
);
1594 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1595 vmeCur
= vmeCur
->pNextPacket
;
1598 /* Convert Links to PCI addresses. */
1599 currentLL
= startLL
;
1600 while (currentLL
!= 0) {
1601 nextLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1603 iowrite32(1, ¤tLL
->dcpp
);
1605 iowrite32((unsigned int)virt_to_bus(nextLL
),
1611 /* Return pointer to descriptors list */
1615 int ca91cx42_free_dma(TDMA_Cmd_Packet
*startLL
)
1617 TDMA_Cmd_Packet
*currentLL
;
1618 TDMA_Cmd_Packet
*prevLL
;
1619 TDMA_Cmd_Packet
*nextLL
;
1620 unsigned int dcppreg
;
1622 /* Convert Links to virtual addresses. */
1623 currentLL
= startLL
;
1624 while (currentLL
!= 0) {
1625 dcppreg
= ioread32(¤tLL
->dcpp
);
1628 currentLL
->dcpp
= 0;
1630 currentLL
->dcpp
= (unsigned int)bus_to_virt(dcppreg
);
1632 currentLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1635 /* Free all pages associated with the descriptors. */
1636 currentLL
= startLL
;
1638 while (currentLL
!= 0) {
1639 nextLL
= (TDMA_Cmd_Packet
*) currentLL
->dcpp
;
1640 if (currentLL
+ 1 != nextLL
) {
1641 free_pages((int)prevLL
, 0);
1647 /* Return pointer to descriptors list */
1651 int ca91cx42_do_dma(vmeDmaPacket_t
*vmeDma
)
1653 unsigned int dgcsreg
= 0;
1654 unsigned int dctlreg
= 0;
1657 vmeDmaPacket_t
*curDma
;
1658 TDMA_Cmd_Packet
*dmaLL
;
1660 /* Sanity check the VME chain. */
1661 channel
= vmeDma
->channel_number
;
1666 while (curDma
!= 0) {
1667 if (curDma
->byteCount
== 0) {
1670 if (curDma
->byteCount
>= 0x1000000) {
1673 if ((curDma
->srcAddr
& 7) != (curDma
->dstAddr
& 7)) {
1676 switch (curDma
->srcBus
) {
1678 if (curDma
->dstBus
!= VME_DMA_VME
) {
1683 if (curDma
->dstBus
!= VME_DMA_PCI
) {
1691 if (uniSetupDctlReg(curDma
, &dctlreg
) < 0) {
1695 curDma
= curDma
->pNextPacket
;
1696 if (curDma
== vmeDma
) { /* Endless Loop! */
1701 /* calculate control register */
1702 if (vmeDma
->pNextPacket
!= 0) {
1703 dgcsreg
= 0x8000000;
1708 for (x
= 0; x
< 8; x
++) { /* vme block size */
1709 if ((256 << x
) >= vmeDma
->maxVmeBlockSize
) {
1715 dgcsreg
|= (x
<< 20);
1717 if (vmeDma
->vmeBackOffTimer
) {
1718 for (x
= 1; x
< 8; x
++) { /* vme timer */
1719 if ((16 << (x
- 1)) >= vmeDma
->vmeBackOffTimer
) {
1725 dgcsreg
|= (x
<< 16);
1727 /*` Setup the dma chain */
1728 dmaLL
= ca91cx42_setup_dma(vmeDma
);
1731 if (dgcsreg
& 0x8000000) {
1732 vmeDma
->vmeDmaStartTick
=
1733 ca91cx42_start_dma(channel
, dgcsreg
,
1734 (TDMA_Cmd_Packet
*) virt_to_phys(dmaLL
));
1736 vmeDma
->vmeDmaStartTick
=
1737 ca91cx42_start_dma(channel
, dgcsreg
, dmaLL
);
1740 wait_event_interruptible(dma_queue
,
1741 ioread32(ca91cx42_bridge
->base
+ DGCS
) & 0x800);
1743 val
= ioread32(ca91cx42_bridge
->base
+ DGCS
);
1744 iowrite32(val
| 0xF00, ca91cx42_bridge
->base
+ DGCS
);
1746 vmeDma
->vmeDmaStatus
= 0;
1748 if (!(val
& 0x00000800)) {
1749 vmeDma
->vmeDmaStatus
= val
& 0x700;
1750 printk(KERN_ERR
"ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1751 " DGCS=%08X\n", val
);
1752 val
= ioread32(ca91cx42_bridge
->base
+ DCPP
);
1753 printk(KERN_ERR
"ca91c042: DCPP=%08X\n", val
);
1754 val
= ioread32(ca91cx42_bridge
->base
+ DCTL
);
1755 printk(KERN_ERR
"ca91c042: DCTL=%08X\n", val
);
1756 val
= ioread32(ca91cx42_bridge
->base
+ DTBC
);
1757 printk(KERN_ERR
"ca91c042: DTBC=%08X\n", val
);
1758 val
= ioread32(ca91cx42_bridge
->base
+ DLA
);
1759 printk(KERN_ERR
"ca91c042: DLA=%08X\n", val
);
1760 val
= ioread32(ca91cx42_bridge
->base
+ DVA
);
1761 printk(KERN_ERR
"ca91c042: DVA=%08X\n", val
);
1764 /* Free the dma chain */
1765 ca91cx42_free_dma(dmaLL
);
1770 int ca91cx42_lm_set(vmeLmCfg_t
*vmeLm
)
1777 switch (vmeLm
->addrSpace
) {
1783 temp_ctl
|= 0x00000;
1786 temp_ctl
|= 0x10000;
1789 temp_ctl
|= 0x20000;
1792 temp_ctl
|= 0x50000;
1795 temp_ctl
|= 0x60000;
1798 temp_ctl
|= 0x70000;
1802 /* Disable while we are mucking around */
1803 iowrite32(0x00000000, ca91cx42_bridge
->base
+ LM_CTL
);
1805 iowrite32(vmeLm
->addr
, ca91cx42_bridge
->base
+ LM_BS
);
1807 /* Setup CTL register. */
1808 if (vmeLm
->userAccessType
& VME_SUPER
)
1809 temp_ctl
|= 0x00200000;
1810 if (vmeLm
->userAccessType
& VME_USER
)
1811 temp_ctl
|= 0x00100000;
1812 if (vmeLm
->dataAccessType
& VME_PROG
)
1813 temp_ctl
|= 0x00800000;
1814 if (vmeLm
->dataAccessType
& VME_DATA
)
1815 temp_ctl
|= 0x00400000;
1818 /* Write ctl reg and enable */
1819 iowrite32(0x80000000 | temp_ctl
, ca91cx42_bridge
->base
+ LM_CTL
);
1820 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ LM_CTL
);
1825 int ca91cx42_wait_lm(vmeLmCfg_t
*vmeLm
)
1827 unsigned long flags
;
1830 spin_lock_irqsave(&lm_lock
, flags
);
1831 spin_unlock_irqrestore(&lm_lock
, flags
);
1833 if (vmeLm
->lmWait
< 10)
1835 interruptible_sleep_on_timeout(&lm_queue
, vmeLm
->lmWait
);
1837 iowrite32(0x00000000, ca91cx42_bridge
->base
+ LM_CTL
);
1844 int ca91cx42_set_arbiter(vmeArbiterCfg_t
*vmeArb
)
1849 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1850 temp_ctl
&= 0x00FFFFFF;
1852 if (vmeArb
->globalTimeoutTimer
== 0xFFFFFFFF) {
1854 } else if (vmeArb
->globalTimeoutTimer
> 1024) {
1856 } else if (vmeArb
->globalTimeoutTimer
== 0) {
1860 while ((16 * (1 << (vbto
- 1))) < vmeArb
->globalTimeoutTimer
)
1863 temp_ctl
|= (vbto
<< 28);
1865 if (vmeArb
->arbiterMode
== VME_PRIORITY_MODE
)
1866 temp_ctl
|= 1 << 26;
1868 if (vmeArb
->arbiterTimeoutFlag
)
1869 temp_ctl
|= 2 << 24;
1871 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ MISC_CTL
);
1875 int ca91cx42_get_arbiter(vmeArbiterCfg_t
*vmeArb
)
1880 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MISC_CTL
);
1882 vbto
= (temp_ctl
>> 28) & 0xF;
1884 vmeArb
->globalTimeoutTimer
= (16 * (1 << (vbto
- 1)));
1886 if (temp_ctl
& (1 << 26))
1887 vmeArb
->arbiterMode
= VME_PRIORITY_MODE
;
1889 vmeArb
->arbiterMode
= VME_R_ROBIN_MODE
;
1891 if (temp_ctl
& (3 << 24))
1892 vmeArb
->arbiterTimeoutFlag
= 1;
1897 int ca91cx42_set_requestor(vmeRequesterCfg_t
*vmeReq
)
1901 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MAST_CTL
);
1902 temp_ctl
&= 0xFF0FFFFF;
1904 if (vmeReq
->releaseMode
== 1)
1905 temp_ctl
|= (1 << 20);
1907 if (vmeReq
->fairMode
== 1)
1908 temp_ctl
|= (1 << 21);
1910 temp_ctl
|= (vmeReq
->requestLevel
<< 22);
1912 iowrite32(temp_ctl
, ca91cx42_bridge
->base
+ MAST_CTL
);
1916 int ca91cx42_get_requestor(vmeRequesterCfg_t
*vmeReq
)
1920 temp_ctl
= ioread32(ca91cx42_bridge
->base
+ MAST_CTL
);
1922 if (temp_ctl
& (1 << 20))
1923 vmeReq
->releaseMode
= 1;
1925 if (temp_ctl
& (1 << 21))
1926 vmeReq
->fairMode
= 1;
1928 vmeReq
->requestLevel
= (temp_ctl
& 0xC00000) >> 22;