Staging: vme: Allow override of geographical address on ca91c142
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / vme / bridges / vme_ca91cx42.c
blob5799b09276869ac7a96b70a9c70a0312516187e1
1 /*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
36 static int __init ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38 static void ca91cx42_remove(struct pci_dev *);
39 static void __exit ca91cx42_exit(void);
41 /* Module parameters */
42 static int geoid;
44 struct vme_bridge *ca91cx42_bridge;
45 wait_queue_head_t dma_queue;
46 wait_queue_head_t iack_queue;
47 wait_queue_head_t lm_queue;
48 wait_queue_head_t mbox_queue;
50 void (*lm_callback[4])(int); /* Called in interrupt handler, be careful! */
51 void *crcsr_kernel;
52 dma_addr_t crcsr_bus;
54 struct mutex vme_rmw; /* Only one RMW cycle at a time */
55 struct mutex vme_int; /*
56 * Only one VME interrupt can be
57 * generated at a time, provide locking
60 static char driver_name[] = "vme_ca91cx42";
62 static struct pci_device_id ca91cx42_ids[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
64 { },
67 static struct pci_driver ca91cx42_driver = {
68 .name = driver_name,
69 .id_table = ca91cx42_ids,
70 .probe = ca91cx42_probe,
71 .remove = ca91cx42_remove,
74 static u32 ca91cx42_DMA_irqhandler(void)
76 wake_up(&dma_queue);
78 return CA91CX42_LINT_DMA;
81 static u32 ca91cx42_LM_irqhandler(u32 stat)
83 int i;
84 u32 serviced = 0;
86 for (i = 0; i < 4; i++) {
87 if (stat & CA91CX42_LINT_LM[i]) {
88 /* We only enable interrupts if the callback is set */
89 lm_callback[i](i);
90 serviced |= CA91CX42_LINT_LM[i];
94 return serviced;
97 /* XXX This needs to be split into 4 queues */
98 static u32 ca91cx42_MB_irqhandler(int mbox_mask)
100 wake_up(&mbox_queue);
102 return CA91CX42_LINT_MBOX;
105 static u32 ca91cx42_IACK_irqhandler(void)
107 wake_up(&iack_queue);
109 return CA91CX42_LINT_SW_IACK;
112 #if 0
113 int ca91cx42_bus_error_chk(int clrflag)
115 int tmp;
116 tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
117 if (tmp & 0x08000000) { /* S_TA is Set */
118 if (clrflag)
119 iowrite32(tmp | 0x08000000,
120 ca91cx42_bridge->base + PCI_COMMAND);
121 return 1;
123 return 0;
125 #endif
127 static u32 ca91cx42_VERR_irqhandler(void)
129 int val;
131 val = ioread32(ca91cx42_bridge->base + DGCS);
133 if (!(val & 0x00000800)) {
134 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
135 "Error DGCS=%08X\n", val);
138 return CA91CX42_LINT_VERR;
141 static u32 ca91cx42_LERR_irqhandler(void)
143 int val;
145 val = ioread32(ca91cx42_bridge->base + DGCS);
147 if (!(val & 0x00000800)) {
148 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
149 "Error DGCS=%08X\n", val);
153 return CA91CX42_LINT_LERR;
157 static u32 ca91cx42_VIRQ_irqhandler(int stat)
159 int vec, i, serviced = 0;
161 for (i = 7; i > 0; i--) {
162 if (stat & (1 << i)) {
163 vec = ioread32(ca91cx42_bridge->base +
164 CA91CX42_V_STATID[i]) & 0xff;
166 vme_irq_handler(ca91cx42_bridge, i, vec);
168 serviced |= (1 << i);
172 return serviced;
175 static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
177 u32 stat, enable, serviced = 0;
179 if (dev_id != ca91cx42_bridge->base)
180 return IRQ_NONE;
182 enable = ioread32(ca91cx42_bridge->base + LINT_EN);
183 stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
185 /* Only look at unmasked interrupts */
186 stat &= enable;
188 if (unlikely(!stat))
189 return IRQ_NONE;
191 if (stat & CA91CX42_LINT_DMA)
192 serviced |= ca91cx42_DMA_irqhandler();
193 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
194 CA91CX42_LINT_LM3))
195 serviced |= ca91cx42_LM_irqhandler(stat);
196 if (stat & CA91CX42_LINT_MBOX)
197 serviced |= ca91cx42_MB_irqhandler(stat);
198 if (stat & CA91CX42_LINT_SW_IACK)
199 serviced |= ca91cx42_IACK_irqhandler();
200 if (stat & CA91CX42_LINT_VERR)
201 serviced |= ca91cx42_VERR_irqhandler();
202 if (stat & CA91CX42_LINT_LERR)
203 serviced |= ca91cx42_LERR_irqhandler();
204 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
205 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
206 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
207 CA91CX42_LINT_VIRQ7))
208 serviced |= ca91cx42_VIRQ_irqhandler(stat);
210 /* Clear serviced interrupts */
211 iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
213 return IRQ_HANDLED;
216 static int ca91cx42_irq_init(struct vme_bridge *bridge)
218 int result, tmp;
219 struct pci_dev *pdev;
221 /* Need pdev */
222 pdev = container_of(bridge->parent, struct pci_dev, dev);
224 /* Initialise list for VME bus errors */
225 INIT_LIST_HEAD(&(bridge->vme_errors));
227 mutex_init(&(bridge->irq_mtx));
229 /* Disable interrupts from PCI to VME */
230 iowrite32(0, bridge->base + VINT_EN);
232 /* Disable PCI interrupts */
233 iowrite32(0, bridge->base + LINT_EN);
234 /* Clear Any Pending PCI Interrupts */
235 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
237 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
238 driver_name, pdev);
239 if (result) {
240 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
241 pdev->irq);
242 return result;
245 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
246 iowrite32(0, bridge->base + LINT_MAP0);
247 iowrite32(0, bridge->base + LINT_MAP1);
248 iowrite32(0, bridge->base + LINT_MAP2);
250 /* Enable DMA, mailbox & LM Interrupts */
251 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
252 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
253 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
255 iowrite32(tmp, bridge->base + LINT_EN);
257 return 0;
260 static void ca91cx42_irq_exit(struct pci_dev *pdev)
262 /* Disable interrupts from PCI to VME */
263 iowrite32(0, ca91cx42_bridge->base + VINT_EN);
265 /* Disable PCI interrupts */
266 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
267 /* Clear Any Pending PCI Interrupts */
268 iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
270 free_irq(pdev->irq, pdev);
274 * Set up an VME interrupt
276 void ca91cx42_irq_set(int level, int state, int sync)
279 struct pci_dev *pdev;
280 u32 tmp;
282 /* Enable IRQ level */
283 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
285 if (state == 0)
286 tmp &= ~CA91CX42_LINT_VIRQ[level];
287 else
288 tmp |= CA91CX42_LINT_VIRQ[level];
290 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
292 if ((state == 0) && (sync != 0)) {
293 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
294 dev);
296 synchronize_irq(pdev->irq);
300 int ca91cx42_irq_generate(int level, int statid)
302 u32 tmp;
304 /* Universe can only generate even vectors */
305 if (statid & 1)
306 return -EINVAL;
308 mutex_lock(&(vme_int));
310 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
312 /* Set Status/ID */
313 iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
315 /* Assert VMEbus IRQ */
316 tmp = tmp | (1 << (level + 24));
317 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
319 /* Wait for IACK */
320 wait_event_interruptible(iack_queue, 0);
322 /* Return interrupt to low state */
323 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
324 tmp = tmp & ~(1 << (level + 24));
325 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
327 mutex_unlock(&(vme_int));
329 return 0;
332 int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
333 unsigned long long vme_base, unsigned long long size,
334 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
336 unsigned int i, addr = 0, granularity = 0;
337 unsigned int temp_ctl = 0;
338 unsigned int vme_bound, pci_offset;
340 i = image->number;
342 switch (aspace) {
343 case VME_A16:
344 addr |= CA91CX42_VSI_CTL_VAS_A16;
345 break;
346 case VME_A24:
347 addr |= CA91CX42_VSI_CTL_VAS_A24;
348 break;
349 case VME_A32:
350 addr |= CA91CX42_VSI_CTL_VAS_A32;
351 break;
352 case VME_USER1:
353 addr |= CA91CX42_VSI_CTL_VAS_USER1;
354 break;
355 case VME_USER2:
356 addr |= CA91CX42_VSI_CTL_VAS_USER2;
357 break;
358 case VME_A64:
359 case VME_CRCSR:
360 case VME_USER3:
361 case VME_USER4:
362 default:
363 printk(KERN_ERR "Invalid address space\n");
364 return -EINVAL;
365 break;
369 * Bound address is a valid address for the window, adjust
370 * accordingly
372 vme_bound = vme_base + size - granularity;
373 pci_offset = pci_base - vme_base;
375 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
376 * too big for registers
379 if ((i == 0) || (i == 4))
380 granularity = 0x1000;
381 else
382 granularity = 0x10000;
384 if (vme_base & (granularity - 1)) {
385 printk(KERN_ERR "Invalid VME base alignment\n");
386 return -EINVAL;
388 if (vme_bound & (granularity - 1)) {
389 printk(KERN_ERR "Invalid VME bound alignment\n");
390 return -EINVAL;
392 if (pci_offset & (granularity - 1)) {
393 printk(KERN_ERR "Invalid PCI Offset alignment\n");
394 return -EINVAL;
397 /* Disable while we are mucking around */
398 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
399 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
400 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
402 /* Setup mapping */
403 iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
404 iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
405 iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
407 /* XXX Prefetch stuff currently unsupported */
408 #if 0
409 if (vmeIn->wrPostEnable)
410 temp_ctl |= CA91CX42_VSI_CTL_PWEN;
411 if (vmeIn->prefetchEnable)
412 temp_ctl |= CA91CX42_VSI_CTL_PREN;
413 if (vmeIn->rmwLock)
414 temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
415 if (vmeIn->data64BitCapable)
416 temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
417 #endif
419 /* Setup address space */
420 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
421 temp_ctl |= addr;
423 /* Setup cycle types */
424 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
425 if (cycle & VME_SUPER)
426 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
427 if (cycle & VME_USER)
428 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
429 if (cycle & VME_PROG)
430 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
431 if (cycle & VME_DATA)
432 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
434 /* Write ctl reg without enable */
435 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
437 if (enabled)
438 temp_ctl |= CA91CX42_VSI_CTL_EN;
440 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
442 return 0;
445 int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
446 unsigned long long *vme_base, unsigned long long *size,
447 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
449 unsigned int i, granularity = 0, ctl = 0;
450 unsigned long long vme_bound, pci_offset;
452 i = image->number;
454 if ((i == 0) || (i == 4))
455 granularity = 0x1000;
456 else
457 granularity = 0x10000;
459 /* Read Registers */
460 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
462 *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
463 vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
464 pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
466 *pci_base = (dma_addr_t)vme_base + pci_offset;
467 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
469 *enabled = 0;
470 *aspace = 0;
471 *cycle = 0;
473 if (ctl & CA91CX42_VSI_CTL_EN)
474 *enabled = 1;
476 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
477 *aspace = VME_A16;
478 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
479 *aspace = VME_A24;
480 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
481 *aspace = VME_A32;
482 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
483 *aspace = VME_USER1;
484 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
485 *aspace = VME_USER2;
487 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
488 *cycle |= VME_SUPER;
489 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
490 *cycle |= VME_USER;
491 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
492 *cycle |= VME_PROG;
493 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
494 *cycle |= VME_DATA;
496 return 0;
500 * Allocate and map PCI Resource
502 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
503 unsigned long long size)
505 unsigned long long existing_size;
506 int retval = 0;
507 struct pci_dev *pdev;
509 /* Find pci_dev container of dev */
510 if (ca91cx42_bridge->parent == NULL) {
511 printk(KERN_ERR "Dev entry NULL\n");
512 return -EINVAL;
514 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
516 existing_size = (unsigned long long)(image->pci_resource.end -
517 image->pci_resource.start);
519 /* If the existing size is OK, return */
520 if (existing_size == (size - 1))
521 return 0;
523 if (existing_size != 0) {
524 iounmap(image->kern_base);
525 image->kern_base = NULL;
526 if (image->pci_resource.name != NULL)
527 kfree(image->pci_resource.name);
528 release_resource(&(image->pci_resource));
529 memset(&(image->pci_resource), 0, sizeof(struct resource));
532 if (image->pci_resource.name == NULL) {
533 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
534 if (image->pci_resource.name == NULL) {
535 printk(KERN_ERR "Unable to allocate memory for resource"
536 " name\n");
537 retval = -ENOMEM;
538 goto err_name;
542 sprintf((char *)image->pci_resource.name, "%s.%d",
543 ca91cx42_bridge->name, image->number);
545 image->pci_resource.start = 0;
546 image->pci_resource.end = (unsigned long)size;
547 image->pci_resource.flags = IORESOURCE_MEM;
549 retval = pci_bus_alloc_resource(pdev->bus,
550 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
551 0, NULL, NULL);
552 if (retval) {
553 printk(KERN_ERR "Failed to allocate mem resource for "
554 "window %d size 0x%lx start 0x%lx\n",
555 image->number, (unsigned long)size,
556 (unsigned long)image->pci_resource.start);
557 goto err_resource;
560 image->kern_base = ioremap_nocache(
561 image->pci_resource.start, size);
562 if (image->kern_base == NULL) {
563 printk(KERN_ERR "Failed to remap resource\n");
564 retval = -ENOMEM;
565 goto err_remap;
568 return 0;
570 iounmap(image->kern_base);
571 image->kern_base = NULL;
572 err_remap:
573 release_resource(&(image->pci_resource));
574 err_resource:
575 kfree(image->pci_resource.name);
576 memset(&(image->pci_resource), 0, sizeof(struct resource));
577 err_name:
578 return retval;
582 * * Free and unmap PCI Resource
583 * */
584 static void ca91cx42_free_resource(struct vme_master_resource *image)
586 iounmap(image->kern_base);
587 image->kern_base = NULL;
588 release_resource(&(image->pci_resource));
589 kfree(image->pci_resource.name);
590 memset(&(image->pci_resource), 0, sizeof(struct resource));
594 int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
595 unsigned long long vme_base, unsigned long long size,
596 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
598 int retval = 0;
599 unsigned int i;
600 unsigned int temp_ctl = 0;
601 unsigned long long pci_bound, vme_offset, pci_base;
603 /* Verify input data */
604 if (vme_base & 0xFFF) {
605 printk(KERN_ERR "Invalid VME Window alignment\n");
606 retval = -EINVAL;
607 goto err_window;
609 if (size & 0xFFF) {
610 printk(KERN_ERR "Invalid VME Window alignment\n");
611 retval = -EINVAL;
612 goto err_window;
615 spin_lock(&(image->lock));
617 /* XXX We should do this much later, so that we can exit without
618 * needing to redo the mapping...
621 * Let's allocate the resource here rather than further up the stack as
622 * it avoids pushing loads of bus dependant stuff up the stack
624 retval = ca91cx42_alloc_resource(image, size);
625 if (retval) {
626 spin_unlock(&(image->lock));
627 printk(KERN_ERR "Unable to allocate memory for resource "
628 "name\n");
629 retval = -ENOMEM;
630 goto err_res;
633 pci_base = (unsigned long long)image->pci_resource.start;
636 * Bound address is a valid address for the window, adjust
637 * according to window granularity.
639 pci_bound = pci_base + (size - 0x1000);
640 vme_offset = vme_base - pci_base;
642 i = image->number;
644 /* Disable while we are mucking around */
645 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
646 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
647 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
649 /* XXX Prefetch stuff currently unsupported */
650 #if 0
651 if (vmeOut->wrPostEnable)
652 temp_ctl |= 0x40000000;
653 #endif
655 /* Setup cycle types */
656 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
657 if (cycle & VME_BLT)
658 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
659 if (cycle & VME_MBLT)
660 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
662 /* Setup data width */
663 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
664 switch (dwidth) {
665 case VME_D8:
666 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
667 break;
668 case VME_D16:
669 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
670 break;
671 case VME_D32:
672 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
673 break;
674 case VME_D64:
675 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
676 break;
677 default:
678 spin_unlock(&(image->lock));
679 printk(KERN_ERR "Invalid data width\n");
680 retval = -EINVAL;
681 goto err_dwidth;
682 break;
685 /* Setup address space */
686 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
687 switch (aspace) {
688 case VME_A16:
689 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
690 break;
691 case VME_A24:
692 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
693 break;
694 case VME_A32:
695 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
696 break;
697 case VME_CRCSR:
698 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
699 break;
700 case VME_USER1:
701 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
702 break;
703 case VME_USER2:
704 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
705 break;
706 case VME_A64:
707 case VME_USER3:
708 case VME_USER4:
709 default:
710 spin_unlock(&(image->lock));
711 printk(KERN_ERR "Invalid address space\n");
712 retval = -EINVAL;
713 goto err_aspace;
714 break;
717 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
718 if (cycle & VME_SUPER)
719 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
720 if (cycle & VME_PROG)
721 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
723 /* Setup mapping */
724 iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
725 iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
726 iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
728 /* Write ctl reg without enable */
729 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
731 if (enabled)
732 temp_ctl |= CA91CX42_LSI_CTL_EN;
734 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
736 spin_unlock(&(image->lock));
737 return 0;
739 err_aspace:
740 err_dwidth:
741 ca91cx42_free_resource(image);
742 err_res:
743 err_window:
744 return retval;
747 int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
748 unsigned long long *vme_base, unsigned long long *size,
749 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
751 unsigned int i, ctl;
752 unsigned long long pci_base, pci_bound, vme_offset;
754 i = image->number;
756 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
758 pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
759 vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
760 pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
762 *vme_base = pci_base + vme_offset;
763 *size = (pci_bound - pci_base) + 0x1000;
765 *enabled = 0;
766 *aspace = 0;
767 *cycle = 0;
768 *dwidth = 0;
770 if (ctl & CA91CX42_LSI_CTL_EN)
771 *enabled = 1;
773 /* Setup address space */
774 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
775 case CA91CX42_LSI_CTL_VAS_A16:
776 *aspace = VME_A16;
777 break;
778 case CA91CX42_LSI_CTL_VAS_A24:
779 *aspace = VME_A24;
780 break;
781 case CA91CX42_LSI_CTL_VAS_A32:
782 *aspace = VME_A32;
783 break;
784 case CA91CX42_LSI_CTL_VAS_CRCSR:
785 *aspace = VME_CRCSR;
786 break;
787 case CA91CX42_LSI_CTL_VAS_USER1:
788 *aspace = VME_USER1;
789 break;
790 case CA91CX42_LSI_CTL_VAS_USER2:
791 *aspace = VME_USER2;
792 break;
795 /* XXX Not sure howto check for MBLT */
796 /* Setup cycle types */
797 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
798 *cycle |= VME_BLT;
799 else
800 *cycle |= VME_SCT;
802 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
803 *cycle |= VME_SUPER;
804 else
805 *cycle |= VME_USER;
807 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
808 *cycle = VME_PROG;
809 else
810 *cycle = VME_DATA;
812 /* Setup data width */
813 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
814 case CA91CX42_LSI_CTL_VDW_D8:
815 *dwidth = VME_D8;
816 break;
817 case CA91CX42_LSI_CTL_VDW_D16:
818 *dwidth = VME_D16;
819 break;
820 case CA91CX42_LSI_CTL_VDW_D32:
821 *dwidth = VME_D32;
822 break;
823 case CA91CX42_LSI_CTL_VDW_D64:
824 *dwidth = VME_D64;
825 break;
828 /* XXX Prefetch stuff currently unsupported */
829 #if 0
830 if (ctl & 0x40000000)
831 vmeOut->wrPostEnable = 1;
832 #endif
834 return 0;
837 int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
838 unsigned long long *vme_base, unsigned long long *size,
839 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
841 int retval;
843 spin_lock(&(image->lock));
845 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
846 cycle, dwidth);
848 spin_unlock(&(image->lock));
850 return retval;
853 ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
854 size_t count, loff_t offset)
856 int retval;
858 spin_lock(&(image->lock));
860 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
861 retval = count;
863 spin_unlock(&(image->lock));
865 return retval;
868 ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
869 size_t count, loff_t offset)
871 int retval = 0;
873 spin_lock(&(image->lock));
875 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
876 retval = count;
878 spin_unlock(&(image->lock));
880 return retval;
883 int ca91cx42_slot_get(void)
885 u32 slot = 0;
887 if (!geoid) {
888 slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
889 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
890 } else
891 slot = geoid;
893 return (int)slot;
897 static int __init ca91cx42_init(void)
899 return pci_register_driver(&ca91cx42_driver);
903 * Configure CR/CSR space
905 * Access to the CR/CSR can be configured at power-up. The location of the
906 * CR/CSR registers in the CR/CSR address space is determined by the boards
907 * Auto-ID or Geographic address. This function ensures that the window is
908 * enabled at an offset consistent with the boards geopgraphic address.
910 static int ca91cx42_crcsr_init(struct pci_dev *pdev)
912 unsigned int crcsr_addr;
913 int tmp, slot;
915 /* XXX We may need to set this somehow as the Universe II does not support
916 * geographical addressing.
918 #if 0
919 if (vme_slotnum != -1)
920 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
921 #endif
922 slot = ca91cx42_slot_get();
923 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
924 if (slot == 0) {
925 dev_err(&pdev->dev, "Slot number is unset, not configuring "
926 "CR/CSR space\n");
927 return -EINVAL;
930 /* Allocate mem for CR/CSR image */
931 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
932 &crcsr_bus);
933 if (crcsr_kernel == NULL) {
934 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
935 "image\n");
936 return -ENOMEM;
939 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
941 crcsr_addr = slot * (512 * 1024);
942 iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
944 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
945 tmp |= CA91CX42_VCSR_CTL_EN;
946 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
948 return 0;
951 static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
953 u32 tmp;
955 /* Turn off CR/CSR space */
956 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
957 tmp &= ~CA91CX42_VCSR_CTL_EN;
958 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
960 /* Free image */
961 iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
963 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
966 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
968 int retval, i;
969 u32 data;
970 struct list_head *pos = NULL;
971 struct vme_master_resource *master_image;
972 struct vme_slave_resource *slave_image;
973 #if 0
974 struct vme_dma_resource *dma_ctrlr;
975 #endif
976 struct vme_lm_resource *lm;
978 /* We want to support more than one of each bridge so we need to
979 * dynamically allocate the bridge structure
981 ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
983 if (ca91cx42_bridge == NULL) {
984 dev_err(&pdev->dev, "Failed to allocate memory for device "
985 "structure\n");
986 retval = -ENOMEM;
987 goto err_struct;
990 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
992 /* Enable the device */
993 retval = pci_enable_device(pdev);
994 if (retval) {
995 dev_err(&pdev->dev, "Unable to enable device\n");
996 goto err_enable;
999 /* Map Registers */
1000 retval = pci_request_regions(pdev, driver_name);
1001 if (retval) {
1002 dev_err(&pdev->dev, "Unable to reserve resources\n");
1003 goto err_resource;
1006 /* map registers in BAR 0 */
1007 ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1008 4096);
1009 if (!ca91cx42_bridge->base) {
1010 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1011 retval = -EIO;
1012 goto err_remap;
1015 /* Check to see if the mapping worked out */
1016 data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1017 if (data != PCI_VENDOR_ID_TUNDRA) {
1018 dev_err(&pdev->dev, "PCI_ID check failed\n");
1019 retval = -EIO;
1020 goto err_test;
1023 /* Initialize wait queues & mutual exclusion flags */
1024 /* XXX These need to be moved to the vme_bridge structure */
1025 init_waitqueue_head(&dma_queue);
1026 init_waitqueue_head(&iack_queue);
1027 mutex_init(&(vme_int));
1028 mutex_init(&(vme_rmw));
1030 ca91cx42_bridge->parent = &(pdev->dev);
1031 strcpy(ca91cx42_bridge->name, driver_name);
1033 /* Setup IRQ */
1034 retval = ca91cx42_irq_init(ca91cx42_bridge);
1035 if (retval != 0) {
1036 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1037 goto err_irq;
1040 /* Add master windows to list */
1041 INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1042 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1043 master_image = kmalloc(sizeof(struct vme_master_resource),
1044 GFP_KERNEL);
1045 if (master_image == NULL) {
1046 dev_err(&pdev->dev, "Failed to allocate memory for "
1047 "master resource structure\n");
1048 retval = -ENOMEM;
1049 goto err_master;
1051 master_image->parent = ca91cx42_bridge;
1052 spin_lock_init(&(master_image->lock));
1053 master_image->locked = 0;
1054 master_image->number = i;
1055 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1056 VME_CRCSR | VME_USER1 | VME_USER2;
1057 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1058 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1059 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1060 memset(&(master_image->pci_resource), 0,
1061 sizeof(struct resource));
1062 master_image->kern_base = NULL;
1063 list_add_tail(&(master_image->list),
1064 &(ca91cx42_bridge->master_resources));
1067 /* Add slave windows to list */
1068 INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1069 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1070 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1071 GFP_KERNEL);
1072 if (slave_image == NULL) {
1073 dev_err(&pdev->dev, "Failed to allocate memory for "
1074 "slave resource structure\n");
1075 retval = -ENOMEM;
1076 goto err_slave;
1078 slave_image->parent = ca91cx42_bridge;
1079 mutex_init(&(slave_image->mtx));
1080 slave_image->locked = 0;
1081 slave_image->number = i;
1082 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1083 VME_USER2;
1085 /* Only windows 0 and 4 support A16 */
1086 if (i == 0 || i == 4)
1087 slave_image->address_attr |= VME_A16;
1089 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1090 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1091 list_add_tail(&(slave_image->list),
1092 &(ca91cx42_bridge->slave_resources));
1094 #if 0
1095 /* Add dma engines to list */
1096 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1097 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1098 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1099 GFP_KERNEL);
1100 if (dma_ctrlr == NULL) {
1101 dev_err(&pdev->dev, "Failed to allocate memory for "
1102 "dma resource structure\n");
1103 retval = -ENOMEM;
1104 goto err_dma;
1106 dma_ctrlr->parent = ca91cx42_bridge;
1107 mutex_init(&(dma_ctrlr->mtx));
1108 dma_ctrlr->locked = 0;
1109 dma_ctrlr->number = i;
1110 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1111 INIT_LIST_HEAD(&(dma_ctrlr->running));
1112 list_add_tail(&(dma_ctrlr->list),
1113 &(ca91cx42_bridge->dma_resources));
1115 #endif
1116 /* Add location monitor to list */
1117 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1118 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1119 if (lm == NULL) {
1120 dev_err(&pdev->dev, "Failed to allocate memory for "
1121 "location monitor resource structure\n");
1122 retval = -ENOMEM;
1123 goto err_lm;
1125 lm->parent = ca91cx42_bridge;
1126 mutex_init(&(lm->mtx));
1127 lm->locked = 0;
1128 lm->number = 1;
1129 lm->monitors = 4;
1130 list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1132 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1133 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1134 ca91cx42_bridge->master_get = ca91cx42_master_get;
1135 ca91cx42_bridge->master_set = ca91cx42_master_set;
1136 ca91cx42_bridge->master_read = ca91cx42_master_read;
1137 ca91cx42_bridge->master_write = ca91cx42_master_write;
1138 #if 0
1139 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1140 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1141 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1142 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1143 #endif
1144 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1145 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1146 #if 0
1147 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1148 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1149 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1150 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1151 #endif
1152 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1154 data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1155 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1156 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1157 dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1159 if (ca91cx42_crcsr_init(pdev)) {
1160 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1161 retval = -EINVAL;
1162 #if 0
1163 goto err_crcsr;
1164 #endif
1167 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1168 * ca91cx42_remove()
1170 retval = vme_register_bridge(ca91cx42_bridge);
1171 if (retval != 0) {
1172 dev_err(&pdev->dev, "Chip Registration failed.\n");
1173 goto err_reg;
1176 return 0;
1178 vme_unregister_bridge(ca91cx42_bridge);
1179 err_reg:
1180 ca91cx42_crcsr_exit(pdev);
1181 #if 0
1182 err_crcsr:
1183 #endif
1184 err_lm:
1185 /* resources are stored in link list */
1186 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1187 lm = list_entry(pos, struct vme_lm_resource, list);
1188 list_del(pos);
1189 kfree(lm);
1191 #if 0
1192 err_dma:
1193 /* resources are stored in link list */
1194 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1195 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1196 list_del(pos);
1197 kfree(dma_ctrlr);
1199 #endif
1200 err_slave:
1201 /* resources are stored in link list */
1202 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1203 slave_image = list_entry(pos, struct vme_slave_resource, list);
1204 list_del(pos);
1205 kfree(slave_image);
1207 err_master:
1208 /* resources are stored in link list */
1209 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1210 master_image = list_entry(pos, struct vme_master_resource,
1211 list);
1212 list_del(pos);
1213 kfree(master_image);
1216 ca91cx42_irq_exit(pdev);
1217 err_irq:
1218 err_test:
1219 iounmap(ca91cx42_bridge->base);
1220 err_remap:
1221 pci_release_regions(pdev);
1222 err_resource:
1223 pci_disable_device(pdev);
1224 err_enable:
1225 kfree(ca91cx42_bridge);
1226 err_struct:
1227 return retval;
1231 void ca91cx42_remove(struct pci_dev *pdev)
1233 struct list_head *pos = NULL;
1234 struct vme_master_resource *master_image;
1235 struct vme_slave_resource *slave_image;
1236 struct vme_dma_resource *dma_ctrlr;
1237 struct vme_lm_resource *lm;
1239 /* Turn off Ints */
1240 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1242 /* Turn off the windows */
1243 iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1244 iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1245 iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1246 iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1247 iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1248 iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1249 iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1250 iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1251 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1252 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1253 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1254 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1255 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1256 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1257 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1258 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1260 vme_unregister_bridge(ca91cx42_bridge);
1261 #if 0
1262 ca91cx42_crcsr_exit(pdev);
1263 #endif
1264 /* resources are stored in link list */
1265 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1266 lm = list_entry(pos, struct vme_lm_resource, list);
1267 list_del(pos);
1268 kfree(lm);
1271 /* resources are stored in link list */
1272 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1273 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1274 list_del(pos);
1275 kfree(dma_ctrlr);
1278 /* resources are stored in link list */
1279 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1280 slave_image = list_entry(pos, struct vme_slave_resource, list);
1281 list_del(pos);
1282 kfree(slave_image);
1285 /* resources are stored in link list */
1286 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1287 master_image = list_entry(pos, struct vme_master_resource,
1288 list);
1289 list_del(pos);
1290 kfree(master_image);
1293 ca91cx42_irq_exit(pdev);
1295 iounmap(ca91cx42_bridge->base);
1297 pci_release_regions(pdev);
1299 pci_disable_device(pdev);
1301 kfree(ca91cx42_bridge);
1304 static void __exit ca91cx42_exit(void)
1306 pci_unregister_driver(&ca91cx42_driver);
1309 MODULE_PARM_DESC(geoid, "Override geographical addressing");
1310 module_param(geoid, int, 0);
1312 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1313 MODULE_LICENSE("GPL");
1315 module_init(ca91cx42_init);
1316 module_exit(ca91cx42_exit);
1318 /*----------------------------------------------------------------------------
1319 * STAGING
1320 *--------------------------------------------------------------------------*/
1322 #if 0
1323 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1325 int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1327 int temp_ctl = 0;
1328 int tempBS = 0;
1329 int tempBD = 0;
1330 int tempTO = 0;
1331 int vmeBS = 0;
1332 int vmeBD = 0;
1333 int *rmw_pci_data_ptr = NULL;
1334 int *vaDataPtr = NULL;
1335 int i;
1336 vmeOutWindowCfg_t vmeOut;
1337 if (vmeRmw->maxAttempts < 1) {
1338 return -EINVAL;
1340 if (vmeRmw->targetAddrU) {
1341 return -EINVAL;
1343 /* Find the PCI address that maps to the desired VME address */
1344 for (i = 0; i < 8; i++) {
1345 temp_ctl = ioread32(ca91cx42_bridge->base +
1346 CA91CX42_LSI_CTL[i]);
1347 if ((temp_ctl & 0x80000000) == 0) {
1348 continue;
1350 memset(&vmeOut, 0, sizeof(vmeOut));
1351 vmeOut.windowNbr = i;
1352 ca91cx42_get_out_bound(&vmeOut);
1353 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1354 continue;
1356 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1357 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1358 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
1359 vmeBS = tempBS + tempTO;
1360 vmeBD = tempBD + tempTO;
1361 if ((vmeRmw->targetAddr >= vmeBS) &&
1362 (vmeRmw->targetAddr < vmeBD)) {
1363 rmw_pci_data_ptr =
1364 (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1365 vaDataPtr =
1366 (int *)(out_image_va[i] +
1367 (vmeRmw->targetAddr - vmeBS));
1368 break;
1372 /* If no window - fail. */
1373 if (rmw_pci_data_ptr == NULL) {
1374 return -EINVAL;
1376 /* Setup the RMW registers. */
1377 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1378 iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1379 iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1380 SCYC_CMP);
1381 iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1382 iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1383 iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
1385 /* Run the RMW cycle until either success or max attempts. */
1386 vmeRmw->numAttempts = 1;
1387 while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1389 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1390 (vmeRmw->swapData & vmeRmw->enableMask)) {
1392 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1393 break;
1396 vmeRmw->numAttempts++;
1399 /* If no success, set num Attempts to be greater than max attempts */
1400 if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1401 vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1404 return 0;
1407 int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1409 unsigned int dctlreg = 0x80;
1410 struct vmeAttr *vmeAttr;
1412 if (vmeDma->srcBus == VME_DMA_VME) {
1413 dctlreg = 0;
1414 vmeAttr = &vmeDma->srcVmeAttr;
1415 } else {
1416 dctlreg = 0x80000000;
1417 vmeAttr = &vmeDma->dstVmeAttr;
1420 switch (vmeAttr->maxDataWidth) {
1421 case VME_D8:
1422 break;
1423 case VME_D16:
1424 dctlreg |= 0x00400000;
1425 break;
1426 case VME_D32:
1427 dctlreg |= 0x00800000;
1428 break;
1429 case VME_D64:
1430 dctlreg |= 0x00C00000;
1431 break;
1434 switch (vmeAttr->addrSpace) {
1435 case VME_A16:
1436 break;
1437 case VME_A24:
1438 dctlreg |= 0x00010000;
1439 break;
1440 case VME_A32:
1441 dctlreg |= 0x00020000;
1442 break;
1443 case VME_USER1:
1444 dctlreg |= 0x00060000;
1445 break;
1446 case VME_USER2:
1447 dctlreg |= 0x00070000;
1448 break;
1450 case VME_A64: /* not supported in Universe DMA */
1451 case VME_CRCSR:
1452 case VME_USER3:
1453 case VME_USER4:
1454 return -EINVAL;
1455 break;
1457 if (vmeAttr->userAccessType == VME_PROG) {
1458 dctlreg |= 0x00004000;
1460 if (vmeAttr->dataAccessType == VME_SUPER) {
1461 dctlreg |= 0x00001000;
1463 if (vmeAttr->xferProtocol != VME_SCT) {
1464 dctlreg |= 0x00000100;
1466 *dctlregreturn = dctlreg;
1467 return 0;
1470 unsigned int
1471 ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1473 unsigned int val;
1475 /* Setup registers as needed for direct or chained. */
1476 if (dgcsreg & 0x8000000) {
1477 iowrite32(0, ca91cx42_bridge->base + DTBC);
1478 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
1479 } else {
1480 #if 0
1481 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1482 printk(KERN_ERR "Starting: DVA = %08x\n",
1483 ioread32(&vmeLL->dva));
1484 printk(KERN_ERR "Starting: DLV = %08x\n",
1485 ioread32(&vmeLL->dlv));
1486 printk(KERN_ERR "Starting: DTBC = %08x\n",
1487 ioread32(&vmeLL->dtbc));
1488 printk(KERN_ERR "Starting: DCTL = %08x\n",
1489 ioread32(&vmeLL->dctl));
1490 #endif
1491 /* Write registers */
1492 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1493 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1494 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1495 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1496 iowrite32(0, ca91cx42_bridge->base + DCPP);
1499 /* Start the operation */
1500 iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
1501 val = get_tbl();
1502 iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1503 return val;
1506 TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1508 vmeDmaPacket_t *vmeCur;
1509 int maxPerPage;
1510 int currentLLcount;
1511 TDMA_Cmd_Packet *startLL;
1512 TDMA_Cmd_Packet *currentLL;
1513 TDMA_Cmd_Packet *nextLL;
1514 unsigned int dctlreg = 0;
1516 maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1517 startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1518 if (startLL == 0) {
1519 return startLL;
1521 /* First allocate pages for descriptors and create linked list */
1522 vmeCur = vmeDma;
1523 currentLL = startLL;
1524 currentLLcount = 0;
1525 while (vmeCur != 0) {
1526 if (vmeCur->pNextPacket != 0) {
1527 currentLL->dcpp = (unsigned int)(currentLL + 1);
1528 currentLLcount++;
1529 if (currentLLcount >= maxPerPage) {
1530 currentLL->dcpp =
1531 __get_free_pages(GFP_KERNEL, 0);
1532 currentLLcount = 0;
1534 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1535 } else {
1536 currentLL->dcpp = (unsigned int)0;
1538 vmeCur = vmeCur->pNextPacket;
1541 /* Next fill in information for each descriptor */
1542 vmeCur = vmeDma;
1543 currentLL = startLL;
1544 while (vmeCur != 0) {
1545 if (vmeCur->srcBus == VME_DMA_VME) {
1546 iowrite32(vmeCur->srcAddr, &currentLL->dva);
1547 iowrite32(vmeCur->dstAddr, &currentLL->dlv);
1548 } else {
1549 iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1550 iowrite32(vmeCur->dstAddr, &currentLL->dva);
1552 uniSetupDctlReg(vmeCur, &dctlreg);
1553 iowrite32(dctlreg, &currentLL->dctl);
1554 iowrite32(vmeCur->byteCount, &currentLL->dtbc);
1556 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1557 vmeCur = vmeCur->pNextPacket;
1560 /* Convert Links to PCI addresses. */
1561 currentLL = startLL;
1562 while (currentLL != 0) {
1563 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1564 if (nextLL == 0) {
1565 iowrite32(1, &currentLL->dcpp);
1566 } else {
1567 iowrite32((unsigned int)virt_to_bus(nextLL),
1568 &currentLL->dcpp);
1570 currentLL = nextLL;
1573 /* Return pointer to descriptors list */
1574 return startLL;
1577 int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1579 TDMA_Cmd_Packet *currentLL;
1580 TDMA_Cmd_Packet *prevLL;
1581 TDMA_Cmd_Packet *nextLL;
1582 unsigned int dcppreg;
1584 /* Convert Links to virtual addresses. */
1585 currentLL = startLL;
1586 while (currentLL != 0) {
1587 dcppreg = ioread32(&currentLL->dcpp);
1588 dcppreg &= ~6;
1589 if (dcppreg & 1) {
1590 currentLL->dcpp = 0;
1591 } else {
1592 currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1594 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1597 /* Free all pages associated with the descriptors. */
1598 currentLL = startLL;
1599 prevLL = currentLL;
1600 while (currentLL != 0) {
1601 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1602 if (currentLL + 1 != nextLL) {
1603 free_pages((int)prevLL, 0);
1604 prevLL = nextLL;
1606 currentLL = nextLL;
1609 /* Return pointer to descriptors list */
1610 return 0;
1613 int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1615 unsigned int dgcsreg = 0;
1616 unsigned int dctlreg = 0;
1617 int val;
1618 int channel, x;
1619 vmeDmaPacket_t *curDma;
1620 TDMA_Cmd_Packet *dmaLL;
1622 /* Sanity check the VME chain. */
1623 channel = vmeDma->channel_number;
1624 if (channel > 0) {
1625 return -EINVAL;
1627 curDma = vmeDma;
1628 while (curDma != 0) {
1629 if (curDma->byteCount == 0) {
1630 return -EINVAL;
1632 if (curDma->byteCount >= 0x1000000) {
1633 return -EINVAL;
1635 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1636 return -EINVAL;
1638 switch (curDma->srcBus) {
1639 case VME_DMA_PCI:
1640 if (curDma->dstBus != VME_DMA_VME) {
1641 return -EINVAL;
1643 break;
1644 case VME_DMA_VME:
1645 if (curDma->dstBus != VME_DMA_PCI) {
1646 return -EINVAL;
1648 break;
1649 default:
1650 return -EINVAL;
1651 break;
1653 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1654 return -EINVAL;
1657 curDma = curDma->pNextPacket;
1658 if (curDma == vmeDma) { /* Endless Loop! */
1659 return -EINVAL;
1663 /* calculate control register */
1664 if (vmeDma->pNextPacket != 0) {
1665 dgcsreg = 0x8000000;
1666 } else {
1667 dgcsreg = 0;
1670 for (x = 0; x < 8; x++) { /* vme block size */
1671 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1672 break;
1675 if (x == 8)
1676 x = 7;
1677 dgcsreg |= (x << 20);
1679 if (vmeDma->vmeBackOffTimer) {
1680 for (x = 1; x < 8; x++) { /* vme timer */
1681 if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1682 break;
1685 if (x == 8)
1686 x = 7;
1687 dgcsreg |= (x << 16);
1689 /*` Setup the dma chain */
1690 dmaLL = ca91cx42_setup_dma(vmeDma);
1692 /* Start the DMA */
1693 if (dgcsreg & 0x8000000) {
1694 vmeDma->vmeDmaStartTick =
1695 ca91cx42_start_dma(channel, dgcsreg,
1696 (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1697 } else {
1698 vmeDma->vmeDmaStartTick =
1699 ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1702 wait_event_interruptible(dma_queue,
1703 ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
1705 val = ioread32(ca91cx42_bridge->base + DGCS);
1706 iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
1708 vmeDma->vmeDmaStatus = 0;
1710 if (!(val & 0x00000800)) {
1711 vmeDma->vmeDmaStatus = val & 0x700;
1712 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1713 " DGCS=%08X\n", val);
1714 val = ioread32(ca91cx42_bridge->base + DCPP);
1715 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1716 val = ioread32(ca91cx42_bridge->base + DCTL);
1717 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1718 val = ioread32(ca91cx42_bridge->base + DTBC);
1719 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1720 val = ioread32(ca91cx42_bridge->base + DLA);
1721 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1722 val = ioread32(ca91cx42_bridge->base + DVA);
1723 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1726 /* Free the dma chain */
1727 ca91cx42_free_dma(dmaLL);
1729 return 0;
1732 int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1734 int temp_ctl = 0;
1736 if (vmeLm->addrU)
1737 return -EINVAL;
1739 switch (vmeLm->addrSpace) {
1740 case VME_A64:
1741 case VME_USER3:
1742 case VME_USER4:
1743 return -EINVAL;
1744 case VME_A16:
1745 temp_ctl |= 0x00000;
1746 break;
1747 case VME_A24:
1748 temp_ctl |= 0x10000;
1749 break;
1750 case VME_A32:
1751 temp_ctl |= 0x20000;
1752 break;
1753 case VME_CRCSR:
1754 temp_ctl |= 0x50000;
1755 break;
1756 case VME_USER1:
1757 temp_ctl |= 0x60000;
1758 break;
1759 case VME_USER2:
1760 temp_ctl |= 0x70000;
1761 break;
1764 /* Disable while we are mucking around */
1765 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1767 iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1769 /* Setup CTL register. */
1770 if (vmeLm->userAccessType & VME_SUPER)
1771 temp_ctl |= 0x00200000;
1772 if (vmeLm->userAccessType & VME_USER)
1773 temp_ctl |= 0x00100000;
1774 if (vmeLm->dataAccessType & VME_PROG)
1775 temp_ctl |= 0x00800000;
1776 if (vmeLm->dataAccessType & VME_DATA)
1777 temp_ctl |= 0x00400000;
1780 /* Write ctl reg and enable */
1781 iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1782 temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1784 return 0;
1787 int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1789 unsigned long flags;
1790 unsigned int tmp;
1792 spin_lock_irqsave(&lm_lock, flags);
1793 spin_unlock_irqrestore(&lm_lock, flags);
1794 if (tmp == 0) {
1795 if (vmeLm->lmWait < 10)
1796 vmeLm->lmWait = 10;
1797 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1799 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1801 return 0;
1806 int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1808 int temp_ctl = 0;
1809 int vbto = 0;
1811 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1812 temp_ctl &= 0x00FFFFFF;
1814 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1815 vbto = 7;
1816 } else if (vmeArb->globalTimeoutTimer > 1024) {
1817 return -EINVAL;
1818 } else if (vmeArb->globalTimeoutTimer == 0) {
1819 vbto = 0;
1820 } else {
1821 vbto = 1;
1822 while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1823 vbto += 1;
1825 temp_ctl |= (vbto << 28);
1827 if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1828 temp_ctl |= 1 << 26;
1830 if (vmeArb->arbiterTimeoutFlag)
1831 temp_ctl |= 2 << 24;
1833 iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1834 return 0;
1837 int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1839 int temp_ctl = 0;
1840 int vbto = 0;
1842 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1844 vbto = (temp_ctl >> 28) & 0xF;
1845 if (vbto != 0)
1846 vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1848 if (temp_ctl & (1 << 26))
1849 vmeArb->arbiterMode = VME_PRIORITY_MODE;
1850 else
1851 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1853 if (temp_ctl & (3 << 24))
1854 vmeArb->arbiterTimeoutFlag = 1;
1856 return 0;
1859 int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1861 int temp_ctl = 0;
1863 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1864 temp_ctl &= 0xFF0FFFFF;
1866 if (vmeReq->releaseMode == 1)
1867 temp_ctl |= (1 << 20);
1869 if (vmeReq->fairMode == 1)
1870 temp_ctl |= (1 << 21);
1872 temp_ctl |= (vmeReq->requestLevel << 22);
1874 iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1875 return 0;
1878 int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1880 int temp_ctl = 0;
1882 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1884 if (temp_ctl & (1 << 20))
1885 vmeReq->releaseMode = 1;
1887 if (temp_ctl & (1 << 21))
1888 vmeReq->fairMode = 1;
1890 vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;
1892 return 0;
1896 #endif