Staging: vme: fix sched.h build breakage
[linux-2.6/linux-2.6-openrd.git] / drivers / staging / vme / bridges / vme_ca91cx42.c
blobe139eaeaa174f6158ee82aecd302bbddb8ab5d6f
1 /*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * Derived from ca91c042.c by Michael Wyrick
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/version.h>
19 #include <linux/module.h>
20 #include <linux/mm.h>
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/pci.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/poll.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/sched.h>
29 #include <asm/time.h>
30 #include <asm/io.h>
31 #include <asm/uaccess.h>
33 #include "../vme.h"
34 #include "../vme_bridge.h"
35 #include "vme_ca91cx42.h"
37 static int __init ca91cx42_init(void);
38 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
39 static void ca91cx42_remove(struct pci_dev *);
40 static void __exit ca91cx42_exit(void);
42 struct vme_bridge *ca91cx42_bridge;
43 wait_queue_head_t dma_queue;
44 wait_queue_head_t iack_queue;
45 wait_queue_head_t lm_queue;
46 wait_queue_head_t mbox_queue;
48 void (*lm_callback[4])(int); /* Called in interrupt handler, be careful! */
49 void *crcsr_kernel;
50 dma_addr_t crcsr_bus;
52 struct mutex vme_rmw; /* Only one RMW cycle at a time */
53 struct mutex vme_int; /*
54 * Only one VME interrupt can be
55 * generated at a time, provide locking
57 struct mutex vme_irq; /* Locking for VME irq callback configuration */
61 static char driver_name[] = "vme_ca91cx42";
63 static struct pci_device_id ca91cx42_ids[] = {
64 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
65 { },
68 static struct pci_driver ca91cx42_driver = {
69 .name = driver_name,
70 .id_table = ca91cx42_ids,
71 .probe = ca91cx42_probe,
72 .remove = ca91cx42_remove,
75 static u32 ca91cx42_DMA_irqhandler(void)
77 wake_up(&dma_queue);
79 return CA91CX42_LINT_DMA;
82 static u32 ca91cx42_LM_irqhandler(u32 stat)
84 int i;
85 u32 serviced = 0;
87 for (i = 0; i < 4; i++) {
88 if (stat & CA91CX42_LINT_LM[i]) {
89 /* We only enable interrupts if the callback is set */
90 lm_callback[i](i);
91 serviced |= CA91CX42_LINT_LM[i];
95 return serviced;
98 /* XXX This needs to be split into 4 queues */
99 static u32 ca91cx42_MB_irqhandler(int mbox_mask)
101 wake_up(&mbox_queue);
103 return CA91CX42_LINT_MBOX;
106 static u32 ca91cx42_IACK_irqhandler(void)
108 wake_up(&iack_queue);
110 return CA91CX42_LINT_SW_IACK;
113 #if 0
114 int ca91cx42_bus_error_chk(int clrflag)
116 int tmp;
117 tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
118 if (tmp & 0x08000000) { /* S_TA is Set */
119 if (clrflag)
120 iowrite32(tmp | 0x08000000,
121 ca91cx42_bridge->base + PCI_COMMAND);
122 return 1;
124 return 0;
126 #endif
128 static u32 ca91cx42_VERR_irqhandler(void)
130 int val;
132 val = ioread32(ca91cx42_bridge->base + DGCS);
134 if (!(val & 0x00000800)) {
135 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
136 "Error DGCS=%08X\n", val);
139 return CA91CX42_LINT_VERR;
142 static u32 ca91cx42_LERR_irqhandler(void)
144 int val;
146 val = ioread32(ca91cx42_bridge->base + DGCS);
148 if (!(val & 0x00000800)) {
149 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
150 "Error DGCS=%08X\n", val);
154 return CA91CX42_LINT_LERR;
158 static u32 ca91cx42_VIRQ_irqhandler(int stat)
160 int vec, i, serviced = 0;
161 void (*call)(int, int, void *);
162 void *priv_data;
164 for (i = 7; i > 0; i--) {
165 if (stat & (1 << i)) {
166 vec = ioread32(ca91cx42_bridge->base +
167 CA91CX42_V_STATID[i]) & 0xff;
169 call = ca91cx42_bridge->irq[i - 1].callback[vec].func;
170 priv_data =
171 ca91cx42_bridge->irq[i - 1].callback[vec].priv_data;
173 if (call != NULL)
174 call(i, vec, priv_data);
175 else
176 printk("Spurilous VME interrupt, level:%x, "
177 "vector:%x\n", i, vec);
179 serviced |= (1 << i);
183 return serviced;
186 static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
188 u32 stat, enable, serviced = 0;
190 if (dev_id != ca91cx42_bridge->base)
191 return IRQ_NONE;
193 enable = ioread32(ca91cx42_bridge->base + LINT_EN);
194 stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
196 /* Only look at unmasked interrupts */
197 stat &= enable;
199 if (unlikely(!stat))
200 return IRQ_NONE;
202 if (stat & CA91CX42_LINT_DMA)
203 serviced |= ca91cx42_DMA_irqhandler();
204 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
205 CA91CX42_LINT_LM3))
206 serviced |= ca91cx42_LM_irqhandler(stat);
207 if (stat & CA91CX42_LINT_MBOX)
208 serviced |= ca91cx42_MB_irqhandler(stat);
209 if (stat & CA91CX42_LINT_SW_IACK)
210 serviced |= ca91cx42_IACK_irqhandler();
211 if (stat & CA91CX42_LINT_VERR)
212 serviced |= ca91cx42_VERR_irqhandler();
213 if (stat & CA91CX42_LINT_LERR)
214 serviced |= ca91cx42_LERR_irqhandler();
215 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
216 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
217 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
218 CA91CX42_LINT_VIRQ7))
219 serviced |= ca91cx42_VIRQ_irqhandler(stat);
221 /* Clear serviced interrupts */
222 iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
224 return IRQ_HANDLED;
227 static int ca91cx42_irq_init(struct vme_bridge *bridge)
229 int result, tmp;
230 struct pci_dev *pdev;
232 /* Need pdev */
233 pdev = container_of(bridge->parent, struct pci_dev, dev);
235 /* Initialise list for VME bus errors */
236 INIT_LIST_HEAD(&(bridge->vme_errors));
238 /* Disable interrupts from PCI to VME */
239 iowrite32(0, bridge->base + VINT_EN);
241 /* Disable PCI interrupts */
242 iowrite32(0, bridge->base + LINT_EN);
243 /* Clear Any Pending PCI Interrupts */
244 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
246 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
247 driver_name, pdev);
248 if (result) {
249 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
250 pdev->irq);
251 return result;
254 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
255 iowrite32(0, bridge->base + LINT_MAP0);
256 iowrite32(0, bridge->base + LINT_MAP1);
257 iowrite32(0, bridge->base + LINT_MAP2);
259 /* Enable DMA, mailbox & LM Interrupts */
260 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
261 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
262 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
264 iowrite32(tmp, bridge->base + LINT_EN);
266 return 0;
269 static void ca91cx42_irq_exit(struct pci_dev *pdev)
271 /* Disable interrupts from PCI to VME */
272 iowrite32(0, ca91cx42_bridge->base + VINT_EN);
274 /* Disable PCI interrupts */
275 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
276 /* Clear Any Pending PCI Interrupts */
277 iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
279 free_irq(pdev->irq, pdev);
283 * Set up an VME interrupt
285 int ca91cx42_request_irq(int level, int statid,
286 void (*callback)(int level, int vector, void *priv_data),
287 void *priv_data)
289 u32 tmp;
291 mutex_lock(&(vme_irq));
293 if (ca91cx42_bridge->irq[level - 1].callback[statid].func) {
294 mutex_unlock(&(vme_irq));
295 printk("VME Interrupt already taken\n");
296 return -EBUSY;
300 ca91cx42_bridge->irq[level - 1].count++;
301 ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
302 ca91cx42_bridge->irq[level - 1].callback[statid].func = callback;
304 /* Enable IRQ level */
305 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
306 tmp |= CA91CX42_LINT_VIRQ[level];
307 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
309 mutex_unlock(&(vme_irq));
311 return 0;
315 * Free VME interrupt
317 void ca91cx42_free_irq(int level, int statid)
319 u32 tmp;
320 struct pci_dev *pdev;
322 mutex_lock(&(vme_irq));
324 ca91cx42_bridge->irq[level - 1].count--;
326 /* Disable IRQ level if no more interrupts attached at this level*/
327 if (ca91cx42_bridge->irq[level - 1].count == 0) {
328 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
329 tmp &= ~CA91CX42_LINT_VIRQ[level];
330 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
332 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
333 dev);
335 synchronize_irq(pdev->irq);
338 ca91cx42_bridge->irq[level - 1].callback[statid].func = NULL;
339 ca91cx42_bridge->irq[level - 1].callback[statid].priv_data = NULL;
341 mutex_unlock(&(vme_irq));
344 int ca91cx42_generate_irq(int level, int statid)
346 u32 tmp;
348 /* Universe can only generate even vectors */
349 if (statid & 1)
350 return -EINVAL;
352 mutex_lock(&(vme_int));
354 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
356 /* Set Status/ID */
357 iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
359 /* Assert VMEbus IRQ */
360 tmp = tmp | (1 << (level + 24));
361 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
363 /* Wait for IACK */
364 wait_event_interruptible(iack_queue, 0);
366 /* Return interrupt to low state */
367 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
368 tmp = tmp & ~(1 << (level + 24));
369 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
371 mutex_unlock(&(vme_int));
373 return 0;
376 int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
377 unsigned long long vme_base, unsigned long long size,
378 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
380 unsigned int i, addr = 0, granularity = 0;
381 unsigned int temp_ctl = 0;
382 unsigned int vme_bound, pci_offset;
384 i = image->number;
386 switch (aspace) {
387 case VME_A16:
388 addr |= CA91CX42_VSI_CTL_VAS_A16;
389 break;
390 case VME_A24:
391 addr |= CA91CX42_VSI_CTL_VAS_A24;
392 break;
393 case VME_A32:
394 addr |= CA91CX42_VSI_CTL_VAS_A32;
395 break;
396 case VME_USER1:
397 addr |= CA91CX42_VSI_CTL_VAS_USER1;
398 break;
399 case VME_USER2:
400 addr |= CA91CX42_VSI_CTL_VAS_USER2;
401 break;
402 case VME_A64:
403 case VME_CRCSR:
404 case VME_USER3:
405 case VME_USER4:
406 default:
407 printk(KERN_ERR "Invalid address space\n");
408 return -EINVAL;
409 break;
413 * Bound address is a valid address for the window, adjust
414 * accordingly
416 vme_bound = vme_base + size - granularity;
417 pci_offset = pci_base - vme_base;
419 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
420 * too big for registers
423 if ((i == 0) || (i == 4))
424 granularity = 0x1000;
425 else
426 granularity = 0x10000;
428 if (vme_base & (granularity - 1)) {
429 printk(KERN_ERR "Invalid VME base alignment\n");
430 return -EINVAL;
432 if (vme_bound & (granularity - 1)) {
433 printk(KERN_ERR "Invalid VME bound alignment\n");
434 return -EINVAL;
436 if (pci_offset & (granularity - 1)) {
437 printk(KERN_ERR "Invalid PCI Offset alignment\n");
438 return -EINVAL;
441 /* Disable while we are mucking around */
442 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
443 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
444 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
446 /* Setup mapping */
447 iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
448 iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
449 iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
451 /* XXX Prefetch stuff currently unsupported */
452 #if 0
453 if (vmeIn->wrPostEnable)
454 temp_ctl |= CA91CX42_VSI_CTL_PWEN;
455 if (vmeIn->prefetchEnable)
456 temp_ctl |= CA91CX42_VSI_CTL_PREN;
457 if (vmeIn->rmwLock)
458 temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
459 if (vmeIn->data64BitCapable)
460 temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
461 #endif
463 /* Setup address space */
464 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
465 temp_ctl |= addr;
467 /* Setup cycle types */
468 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
469 if (cycle & VME_SUPER)
470 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
471 if (cycle & VME_USER)
472 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
473 if (cycle & VME_PROG)
474 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
475 if (cycle & VME_DATA)
476 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
478 /* Write ctl reg without enable */
479 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
481 if (enabled)
482 temp_ctl |= CA91CX42_VSI_CTL_EN;
484 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
486 return 0;
489 int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
490 unsigned long long *vme_base, unsigned long long *size,
491 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
493 unsigned int i, granularity = 0, ctl = 0;
494 unsigned long long vme_bound, pci_offset;
496 i = image->number;
498 if ((i == 0) || (i == 4))
499 granularity = 0x1000;
500 else
501 granularity = 0x10000;
503 /* Read Registers */
504 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
506 *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
507 vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
508 pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
510 *pci_base = (dma_addr_t)vme_base + pci_offset;
511 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
513 *enabled = 0;
514 *aspace = 0;
515 *cycle = 0;
517 if (ctl & CA91CX42_VSI_CTL_EN)
518 *enabled = 1;
520 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
521 *aspace = VME_A16;
522 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
523 *aspace = VME_A24;
524 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
525 *aspace = VME_A32;
526 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
527 *aspace = VME_USER1;
528 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
529 *aspace = VME_USER2;
531 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
532 *cycle |= VME_SUPER;
533 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
534 *cycle |= VME_USER;
535 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
536 *cycle |= VME_PROG;
537 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
538 *cycle |= VME_DATA;
540 return 0;
544 * Allocate and map PCI Resource
546 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
547 unsigned long long size)
549 unsigned long long existing_size;
550 int retval = 0;
551 struct pci_dev *pdev;
553 /* Find pci_dev container of dev */
554 if (ca91cx42_bridge->parent == NULL) {
555 printk(KERN_ERR "Dev entry NULL\n");
556 return -EINVAL;
558 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
560 existing_size = (unsigned long long)(image->pci_resource.end -
561 image->pci_resource.start);
563 /* If the existing size is OK, return */
564 if (existing_size == (size - 1))
565 return 0;
567 if (existing_size != 0) {
568 iounmap(image->kern_base);
569 image->kern_base = NULL;
570 if (image->pci_resource.name != NULL)
571 kfree(image->pci_resource.name);
572 release_resource(&(image->pci_resource));
573 memset(&(image->pci_resource), 0, sizeof(struct resource));
576 if (image->pci_resource.name == NULL) {
577 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
578 if (image->pci_resource.name == NULL) {
579 printk(KERN_ERR "Unable to allocate memory for resource"
580 " name\n");
581 retval = -ENOMEM;
582 goto err_name;
586 sprintf((char *)image->pci_resource.name, "%s.%d",
587 ca91cx42_bridge->name, image->number);
589 image->pci_resource.start = 0;
590 image->pci_resource.end = (unsigned long)size;
591 image->pci_resource.flags = IORESOURCE_MEM;
593 retval = pci_bus_alloc_resource(pdev->bus,
594 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
595 0, NULL, NULL);
596 if (retval) {
597 printk(KERN_ERR "Failed to allocate mem resource for "
598 "window %d size 0x%lx start 0x%lx\n",
599 image->number, (unsigned long)size,
600 (unsigned long)image->pci_resource.start);
601 goto err_resource;
604 image->kern_base = ioremap_nocache(
605 image->pci_resource.start, size);
606 if (image->kern_base == NULL) {
607 printk(KERN_ERR "Failed to remap resource\n");
608 retval = -ENOMEM;
609 goto err_remap;
612 return 0;
614 iounmap(image->kern_base);
615 image->kern_base = NULL;
616 err_remap:
617 release_resource(&(image->pci_resource));
618 err_resource:
619 kfree(image->pci_resource.name);
620 memset(&(image->pci_resource), 0, sizeof(struct resource));
621 err_name:
622 return retval;
626 * * Free and unmap PCI Resource
627 * */
628 static void ca91cx42_free_resource(struct vme_master_resource *image)
630 iounmap(image->kern_base);
631 image->kern_base = NULL;
632 release_resource(&(image->pci_resource));
633 kfree(image->pci_resource.name);
634 memset(&(image->pci_resource), 0, sizeof(struct resource));
638 int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
639 unsigned long long vme_base, unsigned long long size,
640 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
642 int retval = 0;
643 unsigned int i;
644 unsigned int temp_ctl = 0;
645 unsigned long long pci_bound, vme_offset, pci_base;
647 /* Verify input data */
648 if (vme_base & 0xFFF) {
649 printk(KERN_ERR "Invalid VME Window alignment\n");
650 retval = -EINVAL;
651 goto err_window;
653 if (size & 0xFFF) {
654 printk(KERN_ERR "Invalid VME Window alignment\n");
655 retval = -EINVAL;
656 goto err_window;
659 spin_lock(&(image->lock));
661 /* XXX We should do this much later, so that we can exit without
662 * needing to redo the mapping...
665 * Let's allocate the resource here rather than further up the stack as
666 * it avoids pushing loads of bus dependant stuff up the stack
668 retval = ca91cx42_alloc_resource(image, size);
669 if (retval) {
670 spin_unlock(&(image->lock));
671 printk(KERN_ERR "Unable to allocate memory for resource "
672 "name\n");
673 retval = -ENOMEM;
674 goto err_res;
677 pci_base = (unsigned long long)image->pci_resource.start;
680 * Bound address is a valid address for the window, adjust
681 * according to window granularity.
683 pci_bound = pci_base + (size - 0x1000);
684 vme_offset = vme_base - pci_base;
686 i = image->number;
688 /* Disable while we are mucking around */
689 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
690 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
691 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
693 /* XXX Prefetch stuff currently unsupported */
694 #if 0
695 if (vmeOut->wrPostEnable)
696 temp_ctl |= 0x40000000;
697 #endif
699 /* Setup cycle types */
700 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
701 if (cycle & VME_BLT)
702 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
703 if (cycle & VME_MBLT)
704 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
706 /* Setup data width */
707 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
708 switch (dwidth) {
709 case VME_D8:
710 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
711 break;
712 case VME_D16:
713 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
714 break;
715 case VME_D32:
716 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
717 break;
718 case VME_D64:
719 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
720 break;
721 default:
722 spin_unlock(&(image->lock));
723 printk(KERN_ERR "Invalid data width\n");
724 retval = -EINVAL;
725 goto err_dwidth;
726 break;
729 /* Setup address space */
730 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
731 switch (aspace) {
732 case VME_A16:
733 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
734 break;
735 case VME_A24:
736 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
737 break;
738 case VME_A32:
739 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
740 break;
741 case VME_CRCSR:
742 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
743 break;
744 case VME_USER1:
745 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
746 break;
747 case VME_USER2:
748 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
749 break;
750 case VME_A64:
751 case VME_USER3:
752 case VME_USER4:
753 default:
754 spin_unlock(&(image->lock));
755 printk(KERN_ERR "Invalid address space\n");
756 retval = -EINVAL;
757 goto err_aspace;
758 break;
761 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
762 if (cycle & VME_SUPER)
763 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
764 if (cycle & VME_PROG)
765 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
767 /* Setup mapping */
768 iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
769 iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
770 iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
772 /* Write ctl reg without enable */
773 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
775 if (enabled)
776 temp_ctl |= CA91CX42_LSI_CTL_EN;
778 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
780 spin_unlock(&(image->lock));
781 return 0;
783 err_aspace:
784 err_dwidth:
785 ca91cx42_free_resource(image);
786 err_res:
787 err_window:
788 return retval;
791 int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
792 unsigned long long *vme_base, unsigned long long *size,
793 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
795 unsigned int i, ctl;
796 unsigned long long pci_base, pci_bound, vme_offset;
798 i = image->number;
800 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
802 pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
803 vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
804 pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
806 *vme_base = pci_base + vme_offset;
807 *size = (pci_bound - pci_base) + 0x1000;
809 *enabled = 0;
810 *aspace = 0;
811 *cycle = 0;
812 *dwidth = 0;
814 if (ctl & CA91CX42_LSI_CTL_EN)
815 *enabled = 1;
817 /* Setup address space */
818 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
819 case CA91CX42_LSI_CTL_VAS_A16:
820 *aspace = VME_A16;
821 break;
822 case CA91CX42_LSI_CTL_VAS_A24:
823 *aspace = VME_A24;
824 break;
825 case CA91CX42_LSI_CTL_VAS_A32:
826 *aspace = VME_A32;
827 break;
828 case CA91CX42_LSI_CTL_VAS_CRCSR:
829 *aspace = VME_CRCSR;
830 break;
831 case CA91CX42_LSI_CTL_VAS_USER1:
832 *aspace = VME_USER1;
833 break;
834 case CA91CX42_LSI_CTL_VAS_USER2:
835 *aspace = VME_USER2;
836 break;
839 /* XXX Not sure howto check for MBLT */
840 /* Setup cycle types */
841 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
842 *cycle |= VME_BLT;
843 else
844 *cycle |= VME_SCT;
846 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
847 *cycle |= VME_SUPER;
848 else
849 *cycle |= VME_USER;
851 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
852 *cycle = VME_PROG;
853 else
854 *cycle = VME_DATA;
856 /* Setup data width */
857 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
858 case CA91CX42_LSI_CTL_VDW_D8:
859 *dwidth = VME_D8;
860 break;
861 case CA91CX42_LSI_CTL_VDW_D16:
862 *dwidth = VME_D16;
863 break;
864 case CA91CX42_LSI_CTL_VDW_D32:
865 *dwidth = VME_D32;
866 break;
867 case CA91CX42_LSI_CTL_VDW_D64:
868 *dwidth = VME_D64;
869 break;
872 /* XXX Prefetch stuff currently unsupported */
873 #if 0
874 if (ctl & 0x40000000)
875 vmeOut->wrPostEnable = 1;
876 #endif
878 return 0;
881 int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
882 unsigned long long *vme_base, unsigned long long *size,
883 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
885 int retval;
887 spin_lock(&(image->lock));
889 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
890 cycle, dwidth);
892 spin_unlock(&(image->lock));
894 return retval;
897 ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
898 size_t count, loff_t offset)
900 int retval;
902 spin_lock(&(image->lock));
904 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
905 retval = count;
907 spin_unlock(&(image->lock));
909 return retval;
912 ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
913 size_t count, loff_t offset)
915 int retval = 0;
917 spin_lock(&(image->lock));
919 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
920 retval = count;
922 spin_unlock(&(image->lock));
924 return retval;
927 int ca91cx42_slot_get(void)
929 u32 slot = 0;
931 slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
932 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
933 return (int)slot;
937 static int __init ca91cx42_init(void)
939 return pci_register_driver(&ca91cx42_driver);
943 * Configure CR/CSR space
945 * Access to the CR/CSR can be configured at power-up. The location of the
946 * CR/CSR registers in the CR/CSR address space is determined by the boards
947 * Auto-ID or Geographic address. This function ensures that the window is
948 * enabled at an offset consistent with the boards geopgraphic address.
950 static int ca91cx42_crcsr_init(struct pci_dev *pdev)
952 unsigned int crcsr_addr;
953 int tmp, slot;
955 /* XXX We may need to set this somehow as the Universe II does not support
956 * geographical addressing.
958 #if 0
959 if (vme_slotnum != -1)
960 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
961 #endif
962 slot = ca91cx42_slot_get();
963 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
964 if (slot == 0) {
965 dev_err(&pdev->dev, "Slot number is unset, not configuring "
966 "CR/CSR space\n");
967 return -EINVAL;
970 /* Allocate mem for CR/CSR image */
971 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
972 &crcsr_bus);
973 if (crcsr_kernel == NULL) {
974 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
975 "image\n");
976 return -ENOMEM;
979 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
981 crcsr_addr = slot * (512 * 1024);
982 iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
984 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
985 tmp |= CA91CX42_VCSR_CTL_EN;
986 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
988 return 0;
991 static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
993 u32 tmp;
995 /* Turn off CR/CSR space */
996 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
997 tmp &= ~CA91CX42_VCSR_CTL_EN;
998 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
1000 /* Free image */
1001 iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
1003 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
1006 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1008 int retval, i;
1009 u32 data;
1010 struct list_head *pos = NULL;
1011 struct vme_master_resource *master_image;
1012 struct vme_slave_resource *slave_image;
1013 #if 0
1014 struct vme_dma_resource *dma_ctrlr;
1015 #endif
1016 struct vme_lm_resource *lm;
1018 /* We want to support more than one of each bridge so we need to
1019 * dynamically allocate the bridge structure
1021 ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1023 if (ca91cx42_bridge == NULL) {
1024 dev_err(&pdev->dev, "Failed to allocate memory for device "
1025 "structure\n");
1026 retval = -ENOMEM;
1027 goto err_struct;
1030 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
1032 /* Enable the device */
1033 retval = pci_enable_device(pdev);
1034 if (retval) {
1035 dev_err(&pdev->dev, "Unable to enable device\n");
1036 goto err_enable;
1039 /* Map Registers */
1040 retval = pci_request_regions(pdev, driver_name);
1041 if (retval) {
1042 dev_err(&pdev->dev, "Unable to reserve resources\n");
1043 goto err_resource;
1046 /* map registers in BAR 0 */
1047 ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1048 4096);
1049 if (!ca91cx42_bridge->base) {
1050 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1051 retval = -EIO;
1052 goto err_remap;
1055 /* Check to see if the mapping worked out */
1056 data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1057 if (data != PCI_VENDOR_ID_TUNDRA) {
1058 dev_err(&pdev->dev, "PCI_ID check failed\n");
1059 retval = -EIO;
1060 goto err_test;
1063 /* Initialize wait queues & mutual exclusion flags */
1064 /* XXX These need to be moved to the vme_bridge structure */
1065 init_waitqueue_head(&dma_queue);
1066 init_waitqueue_head(&iack_queue);
1067 mutex_init(&(vme_int));
1068 mutex_init(&(vme_irq));
1069 mutex_init(&(vme_rmw));
1071 ca91cx42_bridge->parent = &(pdev->dev);
1072 strcpy(ca91cx42_bridge->name, driver_name);
1074 /* Setup IRQ */
1075 retval = ca91cx42_irq_init(ca91cx42_bridge);
1076 if (retval != 0) {
1077 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1078 goto err_irq;
1081 /* Add master windows to list */
1082 INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1083 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1084 master_image = kmalloc(sizeof(struct vme_master_resource),
1085 GFP_KERNEL);
1086 if (master_image == NULL) {
1087 dev_err(&pdev->dev, "Failed to allocate memory for "
1088 "master resource structure\n");
1089 retval = -ENOMEM;
1090 goto err_master;
1092 master_image->parent = ca91cx42_bridge;
1093 spin_lock_init(&(master_image->lock));
1094 master_image->locked = 0;
1095 master_image->number = i;
1096 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1097 VME_CRCSR | VME_USER1 | VME_USER2;
1098 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1099 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1100 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1101 memset(&(master_image->pci_resource), 0,
1102 sizeof(struct resource));
1103 master_image->kern_base = NULL;
1104 list_add_tail(&(master_image->list),
1105 &(ca91cx42_bridge->master_resources));
1108 /* Add slave windows to list */
1109 INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1110 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1111 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1112 GFP_KERNEL);
1113 if (slave_image == NULL) {
1114 dev_err(&pdev->dev, "Failed to allocate memory for "
1115 "slave resource structure\n");
1116 retval = -ENOMEM;
1117 goto err_slave;
1119 slave_image->parent = ca91cx42_bridge;
1120 mutex_init(&(slave_image->mtx));
1121 slave_image->locked = 0;
1122 slave_image->number = i;
1123 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1124 VME_USER2;
1126 /* Only windows 0 and 4 support A16 */
1127 if (i == 0 || i == 4)
1128 slave_image->address_attr |= VME_A16;
1130 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1131 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1132 list_add_tail(&(slave_image->list),
1133 &(ca91cx42_bridge->slave_resources));
1135 #if 0
1136 /* Add dma engines to list */
1137 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1138 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1139 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1140 GFP_KERNEL);
1141 if (dma_ctrlr == NULL) {
1142 dev_err(&pdev->dev, "Failed to allocate memory for "
1143 "dma resource structure\n");
1144 retval = -ENOMEM;
1145 goto err_dma;
1147 dma_ctrlr->parent = ca91cx42_bridge;
1148 mutex_init(&(dma_ctrlr->mtx));
1149 dma_ctrlr->locked = 0;
1150 dma_ctrlr->number = i;
1151 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1152 INIT_LIST_HEAD(&(dma_ctrlr->running));
1153 list_add_tail(&(dma_ctrlr->list),
1154 &(ca91cx42_bridge->dma_resources));
1156 #endif
1157 /* Add location monitor to list */
1158 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1159 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1160 if (lm == NULL) {
1161 dev_err(&pdev->dev, "Failed to allocate memory for "
1162 "location monitor resource structure\n");
1163 retval = -ENOMEM;
1164 goto err_lm;
1166 lm->parent = ca91cx42_bridge;
1167 mutex_init(&(lm->mtx));
1168 lm->locked = 0;
1169 lm->number = 1;
1170 lm->monitors = 4;
1171 list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1173 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1174 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1175 ca91cx42_bridge->master_get = ca91cx42_master_get;
1176 ca91cx42_bridge->master_set = ca91cx42_master_set;
1177 ca91cx42_bridge->master_read = ca91cx42_master_read;
1178 ca91cx42_bridge->master_write = ca91cx42_master_write;
1179 #if 0
1180 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1181 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1182 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1183 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1184 #endif
1185 ca91cx42_bridge->request_irq = ca91cx42_request_irq;
1186 ca91cx42_bridge->free_irq = ca91cx42_free_irq;
1187 ca91cx42_bridge->generate_irq = ca91cx42_generate_irq;
1188 #if 0
1189 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1190 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1191 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1192 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1193 #endif
1194 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1196 data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1197 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1198 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1199 dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1201 if (ca91cx42_crcsr_init(pdev)) {
1202 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1203 retval = -EINVAL;
1204 #if 0
1205 goto err_crcsr;
1206 #endif
1209 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1210 * ca91cx42_remove()
1212 retval = vme_register_bridge(ca91cx42_bridge);
1213 if (retval != 0) {
1214 dev_err(&pdev->dev, "Chip Registration failed.\n");
1215 goto err_reg;
1218 return 0;
1220 vme_unregister_bridge(ca91cx42_bridge);
1221 err_reg:
1222 ca91cx42_crcsr_exit(pdev);
1223 err_crcsr:
1224 err_lm:
1225 /* resources are stored in link list */
1226 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1227 lm = list_entry(pos, struct vme_lm_resource, list);
1228 list_del(pos);
1229 kfree(lm);
1231 #if 0
1232 err_dma:
1233 /* resources are stored in link list */
1234 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1235 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1236 list_del(pos);
1237 kfree(dma_ctrlr);
1239 #endif
1240 err_slave:
1241 /* resources are stored in link list */
1242 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1243 slave_image = list_entry(pos, struct vme_slave_resource, list);
1244 list_del(pos);
1245 kfree(slave_image);
1247 err_master:
1248 /* resources are stored in link list */
1249 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1250 master_image = list_entry(pos, struct vme_master_resource,
1251 list);
1252 list_del(pos);
1253 kfree(master_image);
1256 ca91cx42_irq_exit(pdev);
1257 err_irq:
1258 err_test:
1259 iounmap(ca91cx42_bridge->base);
1260 err_remap:
1261 pci_release_regions(pdev);
1262 err_resource:
1263 pci_disable_device(pdev);
1264 err_enable:
1265 kfree(ca91cx42_bridge);
1266 err_struct:
1267 return retval;
1271 void ca91cx42_remove(struct pci_dev *pdev)
1273 struct list_head *pos = NULL;
1274 struct vme_master_resource *master_image;
1275 struct vme_slave_resource *slave_image;
1276 struct vme_dma_resource *dma_ctrlr;
1277 struct vme_lm_resource *lm;
1278 int i;
1280 /* Turn off Ints */
1281 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1283 /* Turn off the windows */
1284 iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1285 iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1286 iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1287 iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1288 iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1289 iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1290 iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1291 iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1292 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1293 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1294 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1295 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1296 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1297 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1298 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1299 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1301 vme_unregister_bridge(ca91cx42_bridge);
1302 #if 0
1303 ca91cx42_crcsr_exit(pdev);
1304 #endif
1305 /* resources are stored in link list */
1306 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1307 lm = list_entry(pos, struct vme_lm_resource, list);
1308 list_del(pos);
1309 kfree(lm);
1312 /* resources are stored in link list */
1313 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1314 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1315 list_del(pos);
1316 kfree(dma_ctrlr);
1319 /* resources are stored in link list */
1320 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1321 slave_image = list_entry(pos, struct vme_slave_resource, list);
1322 list_del(pos);
1323 kfree(slave_image);
1326 /* resources are stored in link list */
1327 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1328 master_image = list_entry(pos, struct vme_master_resource,
1329 list);
1330 list_del(pos);
1331 kfree(master_image);
1334 ca91cx42_irq_exit(pdev);
1336 iounmap(ca91cx42_bridge->base);
1338 pci_release_regions(pdev);
1340 pci_disable_device(pdev);
1342 kfree(ca91cx42_bridge);
1345 static void __exit ca91cx42_exit(void)
1347 pci_unregister_driver(&ca91cx42_driver);
1350 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1351 MODULE_LICENSE("GPL");
1353 module_init(ca91cx42_init);
1354 module_exit(ca91cx42_exit);
1356 /*----------------------------------------------------------------------------
1357 * STAGING
1358 *--------------------------------------------------------------------------*/
1360 #if 0
1361 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1363 int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1365 int temp_ctl = 0;
1366 int tempBS = 0;
1367 int tempBD = 0;
1368 int tempTO = 0;
1369 int vmeBS = 0;
1370 int vmeBD = 0;
1371 int *rmw_pci_data_ptr = NULL;
1372 int *vaDataPtr = NULL;
1373 int i;
1374 vmeOutWindowCfg_t vmeOut;
1375 if (vmeRmw->maxAttempts < 1) {
1376 return -EINVAL;
1378 if (vmeRmw->targetAddrU) {
1379 return -EINVAL;
1381 /* Find the PCI address that maps to the desired VME address */
1382 for (i = 0; i < 8; i++) {
1383 temp_ctl = ioread32(ca91cx42_bridge->base +
1384 CA91CX42_LSI_CTL[i]);
1385 if ((temp_ctl & 0x80000000) == 0) {
1386 continue;
1388 memset(&vmeOut, 0, sizeof(vmeOut));
1389 vmeOut.windowNbr = i;
1390 ca91cx42_get_out_bound(&vmeOut);
1391 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1392 continue;
1394 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1395 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1396 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
1397 vmeBS = tempBS + tempTO;
1398 vmeBD = tempBD + tempTO;
1399 if ((vmeRmw->targetAddr >= vmeBS) &&
1400 (vmeRmw->targetAddr < vmeBD)) {
1401 rmw_pci_data_ptr =
1402 (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1403 vaDataPtr =
1404 (int *)(out_image_va[i] +
1405 (vmeRmw->targetAddr - vmeBS));
1406 break;
1410 /* If no window - fail. */
1411 if (rmw_pci_data_ptr == NULL) {
1412 return -EINVAL;
1414 /* Setup the RMW registers. */
1415 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1416 iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1417 iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1418 SCYC_CMP);
1419 iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1420 iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1421 iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
1423 /* Run the RMW cycle until either success or max attempts. */
1424 vmeRmw->numAttempts = 1;
1425 while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1427 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1428 (vmeRmw->swapData & vmeRmw->enableMask)) {
1430 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1431 break;
1434 vmeRmw->numAttempts++;
1437 /* If no success, set num Attempts to be greater than max attempts */
1438 if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1439 vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1442 return 0;
1445 int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1447 unsigned int dctlreg = 0x80;
1448 struct vmeAttr *vmeAttr;
1450 if (vmeDma->srcBus == VME_DMA_VME) {
1451 dctlreg = 0;
1452 vmeAttr = &vmeDma->srcVmeAttr;
1453 } else {
1454 dctlreg = 0x80000000;
1455 vmeAttr = &vmeDma->dstVmeAttr;
1458 switch (vmeAttr->maxDataWidth) {
1459 case VME_D8:
1460 break;
1461 case VME_D16:
1462 dctlreg |= 0x00400000;
1463 break;
1464 case VME_D32:
1465 dctlreg |= 0x00800000;
1466 break;
1467 case VME_D64:
1468 dctlreg |= 0x00C00000;
1469 break;
1472 switch (vmeAttr->addrSpace) {
1473 case VME_A16:
1474 break;
1475 case VME_A24:
1476 dctlreg |= 0x00010000;
1477 break;
1478 case VME_A32:
1479 dctlreg |= 0x00020000;
1480 break;
1481 case VME_USER1:
1482 dctlreg |= 0x00060000;
1483 break;
1484 case VME_USER2:
1485 dctlreg |= 0x00070000;
1486 break;
1488 case VME_A64: /* not supported in Universe DMA */
1489 case VME_CRCSR:
1490 case VME_USER3:
1491 case VME_USER4:
1492 return -EINVAL;
1493 break;
1495 if (vmeAttr->userAccessType == VME_PROG) {
1496 dctlreg |= 0x00004000;
1498 if (vmeAttr->dataAccessType == VME_SUPER) {
1499 dctlreg |= 0x00001000;
1501 if (vmeAttr->xferProtocol != VME_SCT) {
1502 dctlreg |= 0x00000100;
1504 *dctlregreturn = dctlreg;
1505 return 0;
1508 unsigned int
1509 ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1511 unsigned int val;
1513 /* Setup registers as needed for direct or chained. */
1514 if (dgcsreg & 0x8000000) {
1515 iowrite32(0, ca91cx42_bridge->base + DTBC);
1516 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
1517 } else {
1518 #if 0
1519 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1520 printk(KERN_ERR "Starting: DVA = %08x\n",
1521 ioread32(&vmeLL->dva));
1522 printk(KERN_ERR "Starting: DLV = %08x\n",
1523 ioread32(&vmeLL->dlv));
1524 printk(KERN_ERR "Starting: DTBC = %08x\n",
1525 ioread32(&vmeLL->dtbc));
1526 printk(KERN_ERR "Starting: DCTL = %08x\n",
1527 ioread32(&vmeLL->dctl));
1528 #endif
1529 /* Write registers */
1530 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1531 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1532 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1533 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1534 iowrite32(0, ca91cx42_bridge->base + DCPP);
1537 /* Start the operation */
1538 iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
1539 val = get_tbl();
1540 iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1541 return val;
1544 TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1546 vmeDmaPacket_t *vmeCur;
1547 int maxPerPage;
1548 int currentLLcount;
1549 TDMA_Cmd_Packet *startLL;
1550 TDMA_Cmd_Packet *currentLL;
1551 TDMA_Cmd_Packet *nextLL;
1552 unsigned int dctlreg = 0;
1554 maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1555 startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1556 if (startLL == 0) {
1557 return startLL;
1559 /* First allocate pages for descriptors and create linked list */
1560 vmeCur = vmeDma;
1561 currentLL = startLL;
1562 currentLLcount = 0;
1563 while (vmeCur != 0) {
1564 if (vmeCur->pNextPacket != 0) {
1565 currentLL->dcpp = (unsigned int)(currentLL + 1);
1566 currentLLcount++;
1567 if (currentLLcount >= maxPerPage) {
1568 currentLL->dcpp =
1569 __get_free_pages(GFP_KERNEL, 0);
1570 currentLLcount = 0;
1572 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1573 } else {
1574 currentLL->dcpp = (unsigned int)0;
1576 vmeCur = vmeCur->pNextPacket;
1579 /* Next fill in information for each descriptor */
1580 vmeCur = vmeDma;
1581 currentLL = startLL;
1582 while (vmeCur != 0) {
1583 if (vmeCur->srcBus == VME_DMA_VME) {
1584 iowrite32(vmeCur->srcAddr, &currentLL->dva);
1585 iowrite32(vmeCur->dstAddr, &currentLL->dlv);
1586 } else {
1587 iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1588 iowrite32(vmeCur->dstAddr, &currentLL->dva);
1590 uniSetupDctlReg(vmeCur, &dctlreg);
1591 iowrite32(dctlreg, &currentLL->dctl);
1592 iowrite32(vmeCur->byteCount, &currentLL->dtbc);
1594 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1595 vmeCur = vmeCur->pNextPacket;
1598 /* Convert Links to PCI addresses. */
1599 currentLL = startLL;
1600 while (currentLL != 0) {
1601 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1602 if (nextLL == 0) {
1603 iowrite32(1, &currentLL->dcpp);
1604 } else {
1605 iowrite32((unsigned int)virt_to_bus(nextLL),
1606 &currentLL->dcpp);
1608 currentLL = nextLL;
1611 /* Return pointer to descriptors list */
1612 return startLL;
1615 int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1617 TDMA_Cmd_Packet *currentLL;
1618 TDMA_Cmd_Packet *prevLL;
1619 TDMA_Cmd_Packet *nextLL;
1620 unsigned int dcppreg;
1622 /* Convert Links to virtual addresses. */
1623 currentLL = startLL;
1624 while (currentLL != 0) {
1625 dcppreg = ioread32(&currentLL->dcpp);
1626 dcppreg &= ~6;
1627 if (dcppreg & 1) {
1628 currentLL->dcpp = 0;
1629 } else {
1630 currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1632 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1635 /* Free all pages associated with the descriptors. */
1636 currentLL = startLL;
1637 prevLL = currentLL;
1638 while (currentLL != 0) {
1639 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1640 if (currentLL + 1 != nextLL) {
1641 free_pages((int)prevLL, 0);
1642 prevLL = nextLL;
1644 currentLL = nextLL;
1647 /* Return pointer to descriptors list */
1648 return 0;
1651 int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1653 unsigned int dgcsreg = 0;
1654 unsigned int dctlreg = 0;
1655 int val;
1656 int channel, x;
1657 vmeDmaPacket_t *curDma;
1658 TDMA_Cmd_Packet *dmaLL;
1660 /* Sanity check the VME chain. */
1661 channel = vmeDma->channel_number;
1662 if (channel > 0) {
1663 return -EINVAL;
1665 curDma = vmeDma;
1666 while (curDma != 0) {
1667 if (curDma->byteCount == 0) {
1668 return -EINVAL;
1670 if (curDma->byteCount >= 0x1000000) {
1671 return -EINVAL;
1673 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1674 return -EINVAL;
1676 switch (curDma->srcBus) {
1677 case VME_DMA_PCI:
1678 if (curDma->dstBus != VME_DMA_VME) {
1679 return -EINVAL;
1681 break;
1682 case VME_DMA_VME:
1683 if (curDma->dstBus != VME_DMA_PCI) {
1684 return -EINVAL;
1686 break;
1687 default:
1688 return -EINVAL;
1689 break;
1691 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1692 return -EINVAL;
1695 curDma = curDma->pNextPacket;
1696 if (curDma == vmeDma) { /* Endless Loop! */
1697 return -EINVAL;
1701 /* calculate control register */
1702 if (vmeDma->pNextPacket != 0) {
1703 dgcsreg = 0x8000000;
1704 } else {
1705 dgcsreg = 0;
1708 for (x = 0; x < 8; x++) { /* vme block size */
1709 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1710 break;
1713 if (x == 8)
1714 x = 7;
1715 dgcsreg |= (x << 20);
1717 if (vmeDma->vmeBackOffTimer) {
1718 for (x = 1; x < 8; x++) { /* vme timer */
1719 if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1720 break;
1723 if (x == 8)
1724 x = 7;
1725 dgcsreg |= (x << 16);
1727 /*` Setup the dma chain */
1728 dmaLL = ca91cx42_setup_dma(vmeDma);
1730 /* Start the DMA */
1731 if (dgcsreg & 0x8000000) {
1732 vmeDma->vmeDmaStartTick =
1733 ca91cx42_start_dma(channel, dgcsreg,
1734 (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1735 } else {
1736 vmeDma->vmeDmaStartTick =
1737 ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1740 wait_event_interruptible(dma_queue,
1741 ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
1743 val = ioread32(ca91cx42_bridge->base + DGCS);
1744 iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
1746 vmeDma->vmeDmaStatus = 0;
1748 if (!(val & 0x00000800)) {
1749 vmeDma->vmeDmaStatus = val & 0x700;
1750 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1751 " DGCS=%08X\n", val);
1752 val = ioread32(ca91cx42_bridge->base + DCPP);
1753 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1754 val = ioread32(ca91cx42_bridge->base + DCTL);
1755 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1756 val = ioread32(ca91cx42_bridge->base + DTBC);
1757 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1758 val = ioread32(ca91cx42_bridge->base + DLA);
1759 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1760 val = ioread32(ca91cx42_bridge->base + DVA);
1761 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1764 /* Free the dma chain */
1765 ca91cx42_free_dma(dmaLL);
1767 return 0;
1770 int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1772 int temp_ctl = 0;
1774 if (vmeLm->addrU)
1775 return -EINVAL;
1777 switch (vmeLm->addrSpace) {
1778 case VME_A64:
1779 case VME_USER3:
1780 case VME_USER4:
1781 return -EINVAL;
1782 case VME_A16:
1783 temp_ctl |= 0x00000;
1784 break;
1785 case VME_A24:
1786 temp_ctl |= 0x10000;
1787 break;
1788 case VME_A32:
1789 temp_ctl |= 0x20000;
1790 break;
1791 case VME_CRCSR:
1792 temp_ctl |= 0x50000;
1793 break;
1794 case VME_USER1:
1795 temp_ctl |= 0x60000;
1796 break;
1797 case VME_USER2:
1798 temp_ctl |= 0x70000;
1799 break;
1802 /* Disable while we are mucking around */
1803 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1805 iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1807 /* Setup CTL register. */
1808 if (vmeLm->userAccessType & VME_SUPER)
1809 temp_ctl |= 0x00200000;
1810 if (vmeLm->userAccessType & VME_USER)
1811 temp_ctl |= 0x00100000;
1812 if (vmeLm->dataAccessType & VME_PROG)
1813 temp_ctl |= 0x00800000;
1814 if (vmeLm->dataAccessType & VME_DATA)
1815 temp_ctl |= 0x00400000;
1818 /* Write ctl reg and enable */
1819 iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1820 temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1822 return 0;
1825 int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1827 unsigned long flags;
1828 unsigned int tmp;
1830 spin_lock_irqsave(&lm_lock, flags);
1831 spin_unlock_irqrestore(&lm_lock, flags);
1832 if (tmp == 0) {
1833 if (vmeLm->lmWait < 10)
1834 vmeLm->lmWait = 10;
1835 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1837 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1839 return 0;
1844 int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1846 int temp_ctl = 0;
1847 int vbto = 0;
1849 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1850 temp_ctl &= 0x00FFFFFF;
1852 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1853 vbto = 7;
1854 } else if (vmeArb->globalTimeoutTimer > 1024) {
1855 return -EINVAL;
1856 } else if (vmeArb->globalTimeoutTimer == 0) {
1857 vbto = 0;
1858 } else {
1859 vbto = 1;
1860 while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1861 vbto += 1;
1863 temp_ctl |= (vbto << 28);
1865 if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1866 temp_ctl |= 1 << 26;
1868 if (vmeArb->arbiterTimeoutFlag)
1869 temp_ctl |= 2 << 24;
1871 iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1872 return 0;
1875 int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1877 int temp_ctl = 0;
1878 int vbto = 0;
1880 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1882 vbto = (temp_ctl >> 28) & 0xF;
1883 if (vbto != 0)
1884 vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1886 if (temp_ctl & (1 << 26))
1887 vmeArb->arbiterMode = VME_PRIORITY_MODE;
1888 else
1889 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1891 if (temp_ctl & (3 << 24))
1892 vmeArb->arbiterTimeoutFlag = 1;
1894 return 0;
1897 int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1899 int temp_ctl = 0;
1901 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1902 temp_ctl &= 0xFF0FFFFF;
1904 if (vmeReq->releaseMode == 1)
1905 temp_ctl |= (1 << 20);
1907 if (vmeReq->fairMode == 1)
1908 temp_ctl |= (1 << 21);
1910 temp_ctl |= (vmeReq->requestLevel << 22);
1912 iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1913 return 0;
1916 int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1918 int temp_ctl = 0;
1920 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1922 if (temp_ctl & (1 << 20))
1923 vmeReq->releaseMode = 1;
1925 if (temp_ctl & (1 << 21))
1926 vmeReq->fairMode = 1;
1928 vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;
1930 return 0;
1934 #endif