regulator: ltc3676: constify regulator_ops structure
[linux-2.6/btrfs-unstable.git] / drivers / vme / vme.c
blobbdbadaa47ef3ecb481b200785d4bc1aa9981f6e4
1 /*
2 * VME Bridge Framework
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/vme.h>
35 #include "vme_bridge.h"
37 /* Bitmask and list of registered buses both protected by common mutex */
38 static unsigned int vme_bus_numbers;
39 static LIST_HEAD(vme_bus_list);
40 static DEFINE_MUTEX(vme_buses_lock);
42 static int __init vme_init(void);
44 static struct vme_dev *dev_to_vme_dev(struct device *dev)
46 return container_of(dev, struct vme_dev, dev);
50 * Find the bridge that the resource is associated with.
52 static struct vme_bridge *find_bridge(struct vme_resource *resource)
54 /* Get list to search */
55 switch (resource->type) {
56 case VME_MASTER:
57 return list_entry(resource->entry, struct vme_master_resource,
58 list)->parent;
59 break;
60 case VME_SLAVE:
61 return list_entry(resource->entry, struct vme_slave_resource,
62 list)->parent;
63 break;
64 case VME_DMA:
65 return list_entry(resource->entry, struct vme_dma_resource,
66 list)->parent;
67 break;
68 case VME_LM:
69 return list_entry(resource->entry, struct vme_lm_resource,
70 list)->parent;
71 break;
72 default:
73 printk(KERN_ERR "Unknown resource type\n");
74 return NULL;
75 break;
80 * Allocate a contiguous block of memory for use by the driver. This is used to
81 * create the buffers for the slave windows.
83 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
84 dma_addr_t *dma)
86 struct vme_bridge *bridge;
88 if (resource == NULL) {
89 printk(KERN_ERR "No resource\n");
90 return NULL;
93 bridge = find_bridge(resource);
94 if (bridge == NULL) {
95 printk(KERN_ERR "Can't find bridge\n");
96 return NULL;
99 if (bridge->parent == NULL) {
100 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
101 return NULL;
104 if (bridge->alloc_consistent == NULL) {
105 printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
106 bridge->name);
107 return NULL;
110 return bridge->alloc_consistent(bridge->parent, size, dma);
112 EXPORT_SYMBOL(vme_alloc_consistent);
115 * Free previously allocated contiguous block of memory.
117 void vme_free_consistent(struct vme_resource *resource, size_t size,
118 void *vaddr, dma_addr_t dma)
120 struct vme_bridge *bridge;
122 if (resource == NULL) {
123 printk(KERN_ERR "No resource\n");
124 return;
127 bridge = find_bridge(resource);
128 if (bridge == NULL) {
129 printk(KERN_ERR "Can't find bridge\n");
130 return;
133 if (bridge->parent == NULL) {
134 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
135 return;
138 if (bridge->free_consistent == NULL) {
139 printk(KERN_ERR "free_consistent not supported by bridge %s\n",
140 bridge->name);
141 return;
144 bridge->free_consistent(bridge->parent, size, vaddr, dma);
146 EXPORT_SYMBOL(vme_free_consistent);
148 size_t vme_get_size(struct vme_resource *resource)
150 int enabled, retval;
151 unsigned long long base, size;
152 dma_addr_t buf_base;
153 u32 aspace, cycle, dwidth;
155 switch (resource->type) {
156 case VME_MASTER:
157 retval = vme_master_get(resource, &enabled, &base, &size,
158 &aspace, &cycle, &dwidth);
159 if (retval)
160 return 0;
162 return size;
163 break;
164 case VME_SLAVE:
165 retval = vme_slave_get(resource, &enabled, &base, &size,
166 &buf_base, &aspace, &cycle);
167 if (retval)
168 return 0;
170 return size;
171 break;
172 case VME_DMA:
173 return 0;
174 break;
175 default:
176 printk(KERN_ERR "Unknown resource type\n");
177 return 0;
178 break;
181 EXPORT_SYMBOL(vme_get_size);
183 int vme_check_window(u32 aspace, unsigned long long vme_base,
184 unsigned long long size)
186 int retval = 0;
188 switch (aspace) {
189 case VME_A16:
190 if (((vme_base + size) > VME_A16_MAX) ||
191 (vme_base > VME_A16_MAX))
192 retval = -EFAULT;
193 break;
194 case VME_A24:
195 if (((vme_base + size) > VME_A24_MAX) ||
196 (vme_base > VME_A24_MAX))
197 retval = -EFAULT;
198 break;
199 case VME_A32:
200 if (((vme_base + size) > VME_A32_MAX) ||
201 (vme_base > VME_A32_MAX))
202 retval = -EFAULT;
203 break;
204 case VME_A64:
205 if ((size != 0) && (vme_base > U64_MAX + 1 - size))
206 retval = -EFAULT;
207 break;
208 case VME_CRCSR:
209 if (((vme_base + size) > VME_CRCSR_MAX) ||
210 (vme_base > VME_CRCSR_MAX))
211 retval = -EFAULT;
212 break;
213 case VME_USER1:
214 case VME_USER2:
215 case VME_USER3:
216 case VME_USER4:
217 /* User Defined */
218 break;
219 default:
220 printk(KERN_ERR "Invalid address space\n");
221 retval = -EINVAL;
222 break;
225 return retval;
227 EXPORT_SYMBOL(vme_check_window);
229 static u32 vme_get_aspace(int am)
231 switch (am) {
232 case 0x29:
233 case 0x2D:
234 return VME_A16;
235 case 0x38:
236 case 0x39:
237 case 0x3A:
238 case 0x3B:
239 case 0x3C:
240 case 0x3D:
241 case 0x3E:
242 case 0x3F:
243 return VME_A24;
244 case 0x8:
245 case 0x9:
246 case 0xA:
247 case 0xB:
248 case 0xC:
249 case 0xD:
250 case 0xE:
251 case 0xF:
252 return VME_A32;
253 case 0x0:
254 case 0x1:
255 case 0x3:
256 return VME_A64;
259 return 0;
263 * Request a slave image with specific attributes, return some unique
264 * identifier.
266 struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
267 u32 cycle)
269 struct vme_bridge *bridge;
270 struct list_head *slave_pos = NULL;
271 struct vme_slave_resource *allocated_image = NULL;
272 struct vme_slave_resource *slave_image = NULL;
273 struct vme_resource *resource = NULL;
275 bridge = vdev->bridge;
276 if (bridge == NULL) {
277 printk(KERN_ERR "Can't find VME bus\n");
278 goto err_bus;
281 /* Loop through slave resources */
282 list_for_each(slave_pos, &bridge->slave_resources) {
283 slave_image = list_entry(slave_pos,
284 struct vme_slave_resource, list);
286 if (slave_image == NULL) {
287 printk(KERN_ERR "Registered NULL Slave resource\n");
288 continue;
291 /* Find an unlocked and compatible image */
292 mutex_lock(&slave_image->mtx);
293 if (((slave_image->address_attr & address) == address) &&
294 ((slave_image->cycle_attr & cycle) == cycle) &&
295 (slave_image->locked == 0)) {
297 slave_image->locked = 1;
298 mutex_unlock(&slave_image->mtx);
299 allocated_image = slave_image;
300 break;
302 mutex_unlock(&slave_image->mtx);
305 /* No free image */
306 if (allocated_image == NULL)
307 goto err_image;
309 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
310 if (resource == NULL) {
311 printk(KERN_WARNING "Unable to allocate resource structure\n");
312 goto err_alloc;
314 resource->type = VME_SLAVE;
315 resource->entry = &allocated_image->list;
317 return resource;
319 err_alloc:
320 /* Unlock image */
321 mutex_lock(&slave_image->mtx);
322 slave_image->locked = 0;
323 mutex_unlock(&slave_image->mtx);
324 err_image:
325 err_bus:
326 return NULL;
328 EXPORT_SYMBOL(vme_slave_request);
330 int vme_slave_set(struct vme_resource *resource, int enabled,
331 unsigned long long vme_base, unsigned long long size,
332 dma_addr_t buf_base, u32 aspace, u32 cycle)
334 struct vme_bridge *bridge = find_bridge(resource);
335 struct vme_slave_resource *image;
336 int retval;
338 if (resource->type != VME_SLAVE) {
339 printk(KERN_ERR "Not a slave resource\n");
340 return -EINVAL;
343 image = list_entry(resource->entry, struct vme_slave_resource, list);
345 if (bridge->slave_set == NULL) {
346 printk(KERN_ERR "Function not supported\n");
347 return -ENOSYS;
350 if (!(((image->address_attr & aspace) == aspace) &&
351 ((image->cycle_attr & cycle) == cycle))) {
352 printk(KERN_ERR "Invalid attributes\n");
353 return -EINVAL;
356 retval = vme_check_window(aspace, vme_base, size);
357 if (retval)
358 return retval;
360 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
361 aspace, cycle);
363 EXPORT_SYMBOL(vme_slave_set);
365 int vme_slave_get(struct vme_resource *resource, int *enabled,
366 unsigned long long *vme_base, unsigned long long *size,
367 dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
369 struct vme_bridge *bridge = find_bridge(resource);
370 struct vme_slave_resource *image;
372 if (resource->type != VME_SLAVE) {
373 printk(KERN_ERR "Not a slave resource\n");
374 return -EINVAL;
377 image = list_entry(resource->entry, struct vme_slave_resource, list);
379 if (bridge->slave_get == NULL) {
380 printk(KERN_ERR "vme_slave_get not supported\n");
381 return -EINVAL;
384 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
385 aspace, cycle);
387 EXPORT_SYMBOL(vme_slave_get);
389 void vme_slave_free(struct vme_resource *resource)
391 struct vme_slave_resource *slave_image;
393 if (resource->type != VME_SLAVE) {
394 printk(KERN_ERR "Not a slave resource\n");
395 return;
398 slave_image = list_entry(resource->entry, struct vme_slave_resource,
399 list);
400 if (slave_image == NULL) {
401 printk(KERN_ERR "Can't find slave resource\n");
402 return;
405 /* Unlock image */
406 mutex_lock(&slave_image->mtx);
407 if (slave_image->locked == 0)
408 printk(KERN_ERR "Image is already free\n");
410 slave_image->locked = 0;
411 mutex_unlock(&slave_image->mtx);
413 /* Free up resource memory */
414 kfree(resource);
416 EXPORT_SYMBOL(vme_slave_free);
419 * Request a master image with specific attributes, return some unique
420 * identifier.
422 struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
423 u32 cycle, u32 dwidth)
425 struct vme_bridge *bridge;
426 struct list_head *master_pos = NULL;
427 struct vme_master_resource *allocated_image = NULL;
428 struct vme_master_resource *master_image = NULL;
429 struct vme_resource *resource = NULL;
431 bridge = vdev->bridge;
432 if (bridge == NULL) {
433 printk(KERN_ERR "Can't find VME bus\n");
434 goto err_bus;
437 /* Loop through master resources */
438 list_for_each(master_pos, &bridge->master_resources) {
439 master_image = list_entry(master_pos,
440 struct vme_master_resource, list);
442 if (master_image == NULL) {
443 printk(KERN_WARNING "Registered NULL master resource\n");
444 continue;
447 /* Find an unlocked and compatible image */
448 spin_lock(&master_image->lock);
449 if (((master_image->address_attr & address) == address) &&
450 ((master_image->cycle_attr & cycle) == cycle) &&
451 ((master_image->width_attr & dwidth) == dwidth) &&
452 (master_image->locked == 0)) {
454 master_image->locked = 1;
455 spin_unlock(&master_image->lock);
456 allocated_image = master_image;
457 break;
459 spin_unlock(&master_image->lock);
462 /* Check to see if we found a resource */
463 if (allocated_image == NULL) {
464 printk(KERN_ERR "Can't find a suitable resource\n");
465 goto err_image;
468 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
469 if (resource == NULL) {
470 printk(KERN_ERR "Unable to allocate resource structure\n");
471 goto err_alloc;
473 resource->type = VME_MASTER;
474 resource->entry = &allocated_image->list;
476 return resource;
478 err_alloc:
479 /* Unlock image */
480 spin_lock(&master_image->lock);
481 master_image->locked = 0;
482 spin_unlock(&master_image->lock);
483 err_image:
484 err_bus:
485 return NULL;
487 EXPORT_SYMBOL(vme_master_request);
489 int vme_master_set(struct vme_resource *resource, int enabled,
490 unsigned long long vme_base, unsigned long long size, u32 aspace,
491 u32 cycle, u32 dwidth)
493 struct vme_bridge *bridge = find_bridge(resource);
494 struct vme_master_resource *image;
495 int retval;
497 if (resource->type != VME_MASTER) {
498 printk(KERN_ERR "Not a master resource\n");
499 return -EINVAL;
502 image = list_entry(resource->entry, struct vme_master_resource, list);
504 if (bridge->master_set == NULL) {
505 printk(KERN_WARNING "vme_master_set not supported\n");
506 return -EINVAL;
509 if (!(((image->address_attr & aspace) == aspace) &&
510 ((image->cycle_attr & cycle) == cycle) &&
511 ((image->width_attr & dwidth) == dwidth))) {
512 printk(KERN_WARNING "Invalid attributes\n");
513 return -EINVAL;
516 retval = vme_check_window(aspace, vme_base, size);
517 if (retval)
518 return retval;
520 return bridge->master_set(image, enabled, vme_base, size, aspace,
521 cycle, dwidth);
523 EXPORT_SYMBOL(vme_master_set);
525 int vme_master_get(struct vme_resource *resource, int *enabled,
526 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
527 u32 *cycle, u32 *dwidth)
529 struct vme_bridge *bridge = find_bridge(resource);
530 struct vme_master_resource *image;
532 if (resource->type != VME_MASTER) {
533 printk(KERN_ERR "Not a master resource\n");
534 return -EINVAL;
537 image = list_entry(resource->entry, struct vme_master_resource, list);
539 if (bridge->master_get == NULL) {
540 printk(KERN_WARNING "%s not supported\n", __func__);
541 return -EINVAL;
544 return bridge->master_get(image, enabled, vme_base, size, aspace,
545 cycle, dwidth);
547 EXPORT_SYMBOL(vme_master_get);
550 * Read data out of VME space into a buffer.
552 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
553 loff_t offset)
555 struct vme_bridge *bridge = find_bridge(resource);
556 struct vme_master_resource *image;
557 size_t length;
559 if (bridge->master_read == NULL) {
560 printk(KERN_WARNING "Reading from resource not supported\n");
561 return -EINVAL;
564 if (resource->type != VME_MASTER) {
565 printk(KERN_ERR "Not a master resource\n");
566 return -EINVAL;
569 image = list_entry(resource->entry, struct vme_master_resource, list);
571 length = vme_get_size(resource);
573 if (offset > length) {
574 printk(KERN_WARNING "Invalid Offset\n");
575 return -EFAULT;
578 if ((offset + count) > length)
579 count = length - offset;
581 return bridge->master_read(image, buf, count, offset);
584 EXPORT_SYMBOL(vme_master_read);
587 * Write data out to VME space from a buffer.
589 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
590 size_t count, loff_t offset)
592 struct vme_bridge *bridge = find_bridge(resource);
593 struct vme_master_resource *image;
594 size_t length;
596 if (bridge->master_write == NULL) {
597 printk(KERN_WARNING "Writing to resource not supported\n");
598 return -EINVAL;
601 if (resource->type != VME_MASTER) {
602 printk(KERN_ERR "Not a master resource\n");
603 return -EINVAL;
606 image = list_entry(resource->entry, struct vme_master_resource, list);
608 length = vme_get_size(resource);
610 if (offset > length) {
611 printk(KERN_WARNING "Invalid Offset\n");
612 return -EFAULT;
615 if ((offset + count) > length)
616 count = length - offset;
618 return bridge->master_write(image, buf, count, offset);
620 EXPORT_SYMBOL(vme_master_write);
623 * Perform RMW cycle to provided location.
625 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
626 unsigned int compare, unsigned int swap, loff_t offset)
628 struct vme_bridge *bridge = find_bridge(resource);
629 struct vme_master_resource *image;
631 if (bridge->master_rmw == NULL) {
632 printk(KERN_WARNING "Writing to resource not supported\n");
633 return -EINVAL;
636 if (resource->type != VME_MASTER) {
637 printk(KERN_ERR "Not a master resource\n");
638 return -EINVAL;
641 image = list_entry(resource->entry, struct vme_master_resource, list);
643 return bridge->master_rmw(image, mask, compare, swap, offset);
645 EXPORT_SYMBOL(vme_master_rmw);
647 int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
649 struct vme_master_resource *image;
650 phys_addr_t phys_addr;
651 unsigned long vma_size;
653 if (resource->type != VME_MASTER) {
654 pr_err("Not a master resource\n");
655 return -EINVAL;
658 image = list_entry(resource->entry, struct vme_master_resource, list);
659 phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
660 vma_size = vma->vm_end - vma->vm_start;
662 if (phys_addr + vma_size > image->bus_resource.end + 1) {
663 pr_err("Map size cannot exceed the window size\n");
664 return -EFAULT;
667 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
669 return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
671 EXPORT_SYMBOL(vme_master_mmap);
673 void vme_master_free(struct vme_resource *resource)
675 struct vme_master_resource *master_image;
677 if (resource->type != VME_MASTER) {
678 printk(KERN_ERR "Not a master resource\n");
679 return;
682 master_image = list_entry(resource->entry, struct vme_master_resource,
683 list);
684 if (master_image == NULL) {
685 printk(KERN_ERR "Can't find master resource\n");
686 return;
689 /* Unlock image */
690 spin_lock(&master_image->lock);
691 if (master_image->locked == 0)
692 printk(KERN_ERR "Image is already free\n");
694 master_image->locked = 0;
695 spin_unlock(&master_image->lock);
697 /* Free up resource memory */
698 kfree(resource);
700 EXPORT_SYMBOL(vme_master_free);
703 * Request a DMA controller with specific attributes, return some unique
704 * identifier.
706 struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
708 struct vme_bridge *bridge;
709 struct list_head *dma_pos = NULL;
710 struct vme_dma_resource *allocated_ctrlr = NULL;
711 struct vme_dma_resource *dma_ctrlr = NULL;
712 struct vme_resource *resource = NULL;
714 /* XXX Not checking resource attributes */
715 printk(KERN_ERR "No VME resource Attribute tests done\n");
717 bridge = vdev->bridge;
718 if (bridge == NULL) {
719 printk(KERN_ERR "Can't find VME bus\n");
720 goto err_bus;
723 /* Loop through DMA resources */
724 list_for_each(dma_pos, &bridge->dma_resources) {
725 dma_ctrlr = list_entry(dma_pos,
726 struct vme_dma_resource, list);
728 if (dma_ctrlr == NULL) {
729 printk(KERN_ERR "Registered NULL DMA resource\n");
730 continue;
733 /* Find an unlocked and compatible controller */
734 mutex_lock(&dma_ctrlr->mtx);
735 if (((dma_ctrlr->route_attr & route) == route) &&
736 (dma_ctrlr->locked == 0)) {
738 dma_ctrlr->locked = 1;
739 mutex_unlock(&dma_ctrlr->mtx);
740 allocated_ctrlr = dma_ctrlr;
741 break;
743 mutex_unlock(&dma_ctrlr->mtx);
746 /* Check to see if we found a resource */
747 if (allocated_ctrlr == NULL)
748 goto err_ctrlr;
750 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
751 if (resource == NULL) {
752 printk(KERN_WARNING "Unable to allocate resource structure\n");
753 goto err_alloc;
755 resource->type = VME_DMA;
756 resource->entry = &allocated_ctrlr->list;
758 return resource;
760 err_alloc:
761 /* Unlock image */
762 mutex_lock(&dma_ctrlr->mtx);
763 dma_ctrlr->locked = 0;
764 mutex_unlock(&dma_ctrlr->mtx);
765 err_ctrlr:
766 err_bus:
767 return NULL;
769 EXPORT_SYMBOL(vme_dma_request);
772 * Start new list
774 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
776 struct vme_dma_resource *ctrlr;
777 struct vme_dma_list *dma_list;
779 if (resource->type != VME_DMA) {
780 printk(KERN_ERR "Not a DMA resource\n");
781 return NULL;
784 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
786 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
787 if (dma_list == NULL) {
788 printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
789 return NULL;
791 INIT_LIST_HEAD(&dma_list->entries);
792 dma_list->parent = ctrlr;
793 mutex_init(&dma_list->mtx);
795 return dma_list;
797 EXPORT_SYMBOL(vme_new_dma_list);
800 * Create "Pattern" type attributes
802 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
804 struct vme_dma_attr *attributes;
805 struct vme_dma_pattern *pattern_attr;
807 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
808 if (attributes == NULL) {
809 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
810 goto err_attr;
813 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
814 if (pattern_attr == NULL) {
815 printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
816 goto err_pat;
819 attributes->type = VME_DMA_PATTERN;
820 attributes->private = (void *)pattern_attr;
822 pattern_attr->pattern = pattern;
823 pattern_attr->type = type;
825 return attributes;
827 err_pat:
828 kfree(attributes);
829 err_attr:
830 return NULL;
832 EXPORT_SYMBOL(vme_dma_pattern_attribute);
835 * Create "PCI" type attributes
837 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
839 struct vme_dma_attr *attributes;
840 struct vme_dma_pci *pci_attr;
842 /* XXX Run some sanity checks here */
844 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
845 if (attributes == NULL) {
846 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
847 goto err_attr;
850 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
851 if (pci_attr == NULL) {
852 printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
853 goto err_pci;
858 attributes->type = VME_DMA_PCI;
859 attributes->private = (void *)pci_attr;
861 pci_attr->address = address;
863 return attributes;
865 err_pci:
866 kfree(attributes);
867 err_attr:
868 return NULL;
870 EXPORT_SYMBOL(vme_dma_pci_attribute);
873 * Create "VME" type attributes
875 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
876 u32 aspace, u32 cycle, u32 dwidth)
878 struct vme_dma_attr *attributes;
879 struct vme_dma_vme *vme_attr;
881 attributes = kmalloc(
882 sizeof(struct vme_dma_attr), GFP_KERNEL);
883 if (attributes == NULL) {
884 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
885 goto err_attr;
888 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
889 if (vme_attr == NULL) {
890 printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
891 goto err_vme;
894 attributes->type = VME_DMA_VME;
895 attributes->private = (void *)vme_attr;
897 vme_attr->address = address;
898 vme_attr->aspace = aspace;
899 vme_attr->cycle = cycle;
900 vme_attr->dwidth = dwidth;
902 return attributes;
904 err_vme:
905 kfree(attributes);
906 err_attr:
907 return NULL;
909 EXPORT_SYMBOL(vme_dma_vme_attribute);
912 * Free attribute
914 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
916 kfree(attributes->private);
917 kfree(attributes);
919 EXPORT_SYMBOL(vme_dma_free_attribute);
921 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
922 struct vme_dma_attr *dest, size_t count)
924 struct vme_bridge *bridge = list->parent->parent;
925 int retval;
927 if (bridge->dma_list_add == NULL) {
928 printk(KERN_WARNING "Link List DMA generation not supported\n");
929 return -EINVAL;
932 if (!mutex_trylock(&list->mtx)) {
933 printk(KERN_ERR "Link List already submitted\n");
934 return -EINVAL;
937 retval = bridge->dma_list_add(list, src, dest, count);
939 mutex_unlock(&list->mtx);
941 return retval;
943 EXPORT_SYMBOL(vme_dma_list_add);
945 int vme_dma_list_exec(struct vme_dma_list *list)
947 struct vme_bridge *bridge = list->parent->parent;
948 int retval;
950 if (bridge->dma_list_exec == NULL) {
951 printk(KERN_ERR "Link List DMA execution not supported\n");
952 return -EINVAL;
955 mutex_lock(&list->mtx);
957 retval = bridge->dma_list_exec(list);
959 mutex_unlock(&list->mtx);
961 return retval;
963 EXPORT_SYMBOL(vme_dma_list_exec);
965 int vme_dma_list_free(struct vme_dma_list *list)
967 struct vme_bridge *bridge = list->parent->parent;
968 int retval;
970 if (bridge->dma_list_empty == NULL) {
971 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
972 return -EINVAL;
975 if (!mutex_trylock(&list->mtx)) {
976 printk(KERN_ERR "Link List in use\n");
977 return -EINVAL;
981 * Empty out all of the entries from the DMA list. We need to go to the
982 * low level driver as DMA entries are driver specific.
984 retval = bridge->dma_list_empty(list);
985 if (retval) {
986 printk(KERN_ERR "Unable to empty link-list entries\n");
987 mutex_unlock(&list->mtx);
988 return retval;
990 mutex_unlock(&list->mtx);
991 kfree(list);
993 return retval;
995 EXPORT_SYMBOL(vme_dma_list_free);
997 int vme_dma_free(struct vme_resource *resource)
999 struct vme_dma_resource *ctrlr;
1001 if (resource->type != VME_DMA) {
1002 printk(KERN_ERR "Not a DMA resource\n");
1003 return -EINVAL;
1006 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1008 if (!mutex_trylock(&ctrlr->mtx)) {
1009 printk(KERN_ERR "Resource busy, can't free\n");
1010 return -EBUSY;
1013 if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1014 printk(KERN_WARNING "Resource still processing transfers\n");
1015 mutex_unlock(&ctrlr->mtx);
1016 return -EBUSY;
1019 ctrlr->locked = 0;
1021 mutex_unlock(&ctrlr->mtx);
1023 kfree(resource);
1025 return 0;
1027 EXPORT_SYMBOL(vme_dma_free);
1029 void vme_bus_error_handler(struct vme_bridge *bridge,
1030 unsigned long long address, int am)
1032 struct list_head *handler_pos = NULL;
1033 struct vme_error_handler *handler;
1034 int handler_triggered = 0;
1035 u32 aspace = vme_get_aspace(am);
1037 list_for_each(handler_pos, &bridge->vme_error_handlers) {
1038 handler = list_entry(handler_pos, struct vme_error_handler,
1039 list);
1040 if ((aspace == handler->aspace) &&
1041 (address >= handler->start) &&
1042 (address < handler->end)) {
1043 if (!handler->num_errors)
1044 handler->first_error = address;
1045 if (handler->num_errors != UINT_MAX)
1046 handler->num_errors++;
1047 handler_triggered = 1;
1051 if (!handler_triggered)
1052 dev_err(bridge->parent,
1053 "Unhandled VME access error at address 0x%llx\n",
1054 address);
1056 EXPORT_SYMBOL(vme_bus_error_handler);
1058 struct vme_error_handler *vme_register_error_handler(
1059 struct vme_bridge *bridge, u32 aspace,
1060 unsigned long long address, size_t len)
1062 struct vme_error_handler *handler;
1064 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
1065 if (!handler)
1066 return NULL;
1068 handler->aspace = aspace;
1069 handler->start = address;
1070 handler->end = address + len;
1071 handler->num_errors = 0;
1072 handler->first_error = 0;
1073 list_add_tail(&handler->list, &bridge->vme_error_handlers);
1075 return handler;
1077 EXPORT_SYMBOL(vme_register_error_handler);
1079 void vme_unregister_error_handler(struct vme_error_handler *handler)
1081 list_del(&handler->list);
1082 kfree(handler);
1084 EXPORT_SYMBOL(vme_unregister_error_handler);
1086 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1088 void (*call)(int, int, void *);
1089 void *priv_data;
1091 call = bridge->irq[level - 1].callback[statid].func;
1092 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1094 if (call != NULL)
1095 call(level, statid, priv_data);
1096 else
1097 printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1098 level, statid);
1100 EXPORT_SYMBOL(vme_irq_handler);
1102 int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1103 void (*callback)(int, int, void *),
1104 void *priv_data)
1106 struct vme_bridge *bridge;
1108 bridge = vdev->bridge;
1109 if (bridge == NULL) {
1110 printk(KERN_ERR "Can't find VME bus\n");
1111 return -EINVAL;
1114 if ((level < 1) || (level > 7)) {
1115 printk(KERN_ERR "Invalid interrupt level\n");
1116 return -EINVAL;
1119 if (bridge->irq_set == NULL) {
1120 printk(KERN_ERR "Configuring interrupts not supported\n");
1121 return -EINVAL;
1124 mutex_lock(&bridge->irq_mtx);
1126 if (bridge->irq[level - 1].callback[statid].func) {
1127 mutex_unlock(&bridge->irq_mtx);
1128 printk(KERN_WARNING "VME Interrupt already taken\n");
1129 return -EBUSY;
1132 bridge->irq[level - 1].count++;
1133 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1134 bridge->irq[level - 1].callback[statid].func = callback;
1136 /* Enable IRQ level */
1137 bridge->irq_set(bridge, level, 1, 1);
1139 mutex_unlock(&bridge->irq_mtx);
1141 return 0;
1143 EXPORT_SYMBOL(vme_irq_request);
1145 void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1147 struct vme_bridge *bridge;
1149 bridge = vdev->bridge;
1150 if (bridge == NULL) {
1151 printk(KERN_ERR "Can't find VME bus\n");
1152 return;
1155 if ((level < 1) || (level > 7)) {
1156 printk(KERN_ERR "Invalid interrupt level\n");
1157 return;
1160 if (bridge->irq_set == NULL) {
1161 printk(KERN_ERR "Configuring interrupts not supported\n");
1162 return;
1165 mutex_lock(&bridge->irq_mtx);
1167 bridge->irq[level - 1].count--;
1169 /* Disable IRQ level if no more interrupts attached at this level*/
1170 if (bridge->irq[level - 1].count == 0)
1171 bridge->irq_set(bridge, level, 0, 1);
1173 bridge->irq[level - 1].callback[statid].func = NULL;
1174 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1176 mutex_unlock(&bridge->irq_mtx);
1178 EXPORT_SYMBOL(vme_irq_free);
1180 int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1182 struct vme_bridge *bridge;
1184 bridge = vdev->bridge;
1185 if (bridge == NULL) {
1186 printk(KERN_ERR "Can't find VME bus\n");
1187 return -EINVAL;
1190 if ((level < 1) || (level > 7)) {
1191 printk(KERN_WARNING "Invalid interrupt level\n");
1192 return -EINVAL;
1195 if (bridge->irq_generate == NULL) {
1196 printk(KERN_WARNING "Interrupt generation not supported\n");
1197 return -EINVAL;
1200 return bridge->irq_generate(bridge, level, statid);
1202 EXPORT_SYMBOL(vme_irq_generate);
1205 * Request the location monitor, return resource or NULL
1207 struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1209 struct vme_bridge *bridge;
1210 struct list_head *lm_pos = NULL;
1211 struct vme_lm_resource *allocated_lm = NULL;
1212 struct vme_lm_resource *lm = NULL;
1213 struct vme_resource *resource = NULL;
1215 bridge = vdev->bridge;
1216 if (bridge == NULL) {
1217 printk(KERN_ERR "Can't find VME bus\n");
1218 goto err_bus;
1221 /* Loop through DMA resources */
1222 list_for_each(lm_pos, &bridge->lm_resources) {
1223 lm = list_entry(lm_pos,
1224 struct vme_lm_resource, list);
1226 if (lm == NULL) {
1227 printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1228 continue;
1231 /* Find an unlocked controller */
1232 mutex_lock(&lm->mtx);
1233 if (lm->locked == 0) {
1234 lm->locked = 1;
1235 mutex_unlock(&lm->mtx);
1236 allocated_lm = lm;
1237 break;
1239 mutex_unlock(&lm->mtx);
1242 /* Check to see if we found a resource */
1243 if (allocated_lm == NULL)
1244 goto err_lm;
1246 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1247 if (resource == NULL) {
1248 printk(KERN_ERR "Unable to allocate resource structure\n");
1249 goto err_alloc;
1251 resource->type = VME_LM;
1252 resource->entry = &allocated_lm->list;
1254 return resource;
1256 err_alloc:
1257 /* Unlock image */
1258 mutex_lock(&lm->mtx);
1259 lm->locked = 0;
1260 mutex_unlock(&lm->mtx);
1261 err_lm:
1262 err_bus:
1263 return NULL;
1265 EXPORT_SYMBOL(vme_lm_request);
1267 int vme_lm_count(struct vme_resource *resource)
1269 struct vme_lm_resource *lm;
1271 if (resource->type != VME_LM) {
1272 printk(KERN_ERR "Not a Location Monitor resource\n");
1273 return -EINVAL;
1276 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1278 return lm->monitors;
1280 EXPORT_SYMBOL(vme_lm_count);
1282 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1283 u32 aspace, u32 cycle)
1285 struct vme_bridge *bridge = find_bridge(resource);
1286 struct vme_lm_resource *lm;
1288 if (resource->type != VME_LM) {
1289 printk(KERN_ERR "Not a Location Monitor resource\n");
1290 return -EINVAL;
1293 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1295 if (bridge->lm_set == NULL) {
1296 printk(KERN_ERR "vme_lm_set not supported\n");
1297 return -EINVAL;
1300 return bridge->lm_set(lm, lm_base, aspace, cycle);
1302 EXPORT_SYMBOL(vme_lm_set);
1304 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1305 u32 *aspace, u32 *cycle)
1307 struct vme_bridge *bridge = find_bridge(resource);
1308 struct vme_lm_resource *lm;
1310 if (resource->type != VME_LM) {
1311 printk(KERN_ERR "Not a Location Monitor resource\n");
1312 return -EINVAL;
1315 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1317 if (bridge->lm_get == NULL) {
1318 printk(KERN_ERR "vme_lm_get not supported\n");
1319 return -EINVAL;
1322 return bridge->lm_get(lm, lm_base, aspace, cycle);
1324 EXPORT_SYMBOL(vme_lm_get);
1326 int vme_lm_attach(struct vme_resource *resource, int monitor,
1327 void (*callback)(void *), void *data)
1329 struct vme_bridge *bridge = find_bridge(resource);
1330 struct vme_lm_resource *lm;
1332 if (resource->type != VME_LM) {
1333 printk(KERN_ERR "Not a Location Monitor resource\n");
1334 return -EINVAL;
1337 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1339 if (bridge->lm_attach == NULL) {
1340 printk(KERN_ERR "vme_lm_attach not supported\n");
1341 return -EINVAL;
1344 return bridge->lm_attach(lm, monitor, callback, data);
1346 EXPORT_SYMBOL(vme_lm_attach);
1348 int vme_lm_detach(struct vme_resource *resource, int monitor)
1350 struct vme_bridge *bridge = find_bridge(resource);
1351 struct vme_lm_resource *lm;
1353 if (resource->type != VME_LM) {
1354 printk(KERN_ERR "Not a Location Monitor resource\n");
1355 return -EINVAL;
1358 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1360 if (bridge->lm_detach == NULL) {
1361 printk(KERN_ERR "vme_lm_detach not supported\n");
1362 return -EINVAL;
1365 return bridge->lm_detach(lm, monitor);
1367 EXPORT_SYMBOL(vme_lm_detach);
1369 void vme_lm_free(struct vme_resource *resource)
1371 struct vme_lm_resource *lm;
1373 if (resource->type != VME_LM) {
1374 printk(KERN_ERR "Not a Location Monitor resource\n");
1375 return;
1378 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1380 mutex_lock(&lm->mtx);
1382 /* XXX
1383 * Check to see that there aren't any callbacks still attached, if
1384 * there are we should probably be detaching them!
1387 lm->locked = 0;
1389 mutex_unlock(&lm->mtx);
1391 kfree(resource);
1393 EXPORT_SYMBOL(vme_lm_free);
1395 int vme_slot_num(struct vme_dev *vdev)
1397 struct vme_bridge *bridge;
1399 bridge = vdev->bridge;
1400 if (bridge == NULL) {
1401 printk(KERN_ERR "Can't find VME bus\n");
1402 return -EINVAL;
1405 if (bridge->slot_get == NULL) {
1406 printk(KERN_WARNING "vme_slot_num not supported\n");
1407 return -EINVAL;
1410 return bridge->slot_get(bridge);
1412 EXPORT_SYMBOL(vme_slot_num);
1414 int vme_bus_num(struct vme_dev *vdev)
1416 struct vme_bridge *bridge;
1418 bridge = vdev->bridge;
1419 if (bridge == NULL) {
1420 pr_err("Can't find VME bus\n");
1421 return -EINVAL;
1424 return bridge->num;
1426 EXPORT_SYMBOL(vme_bus_num);
1428 /* - Bridge Registration --------------------------------------------------- */
1430 static void vme_dev_release(struct device *dev)
1432 kfree(dev_to_vme_dev(dev));
1435 /* Common bridge initialization */
1436 struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
1438 INIT_LIST_HEAD(&bridge->vme_error_handlers);
1439 INIT_LIST_HEAD(&bridge->master_resources);
1440 INIT_LIST_HEAD(&bridge->slave_resources);
1441 INIT_LIST_HEAD(&bridge->dma_resources);
1442 INIT_LIST_HEAD(&bridge->lm_resources);
1443 mutex_init(&bridge->irq_mtx);
1445 return bridge;
1447 EXPORT_SYMBOL(vme_init_bridge);
1449 int vme_register_bridge(struct vme_bridge *bridge)
1451 int i;
1452 int ret = -1;
1454 mutex_lock(&vme_buses_lock);
1455 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1456 if ((vme_bus_numbers & (1 << i)) == 0) {
1457 vme_bus_numbers |= (1 << i);
1458 bridge->num = i;
1459 INIT_LIST_HEAD(&bridge->devices);
1460 list_add_tail(&bridge->bus_list, &vme_bus_list);
1461 ret = 0;
1462 break;
1465 mutex_unlock(&vme_buses_lock);
1467 return ret;
1469 EXPORT_SYMBOL(vme_register_bridge);
1471 void vme_unregister_bridge(struct vme_bridge *bridge)
1473 struct vme_dev *vdev;
1474 struct vme_dev *tmp;
1476 mutex_lock(&vme_buses_lock);
1477 vme_bus_numbers &= ~(1 << bridge->num);
1478 list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1479 list_del(&vdev->drv_list);
1480 list_del(&vdev->bridge_list);
1481 device_unregister(&vdev->dev);
1483 list_del(&bridge->bus_list);
1484 mutex_unlock(&vme_buses_lock);
1486 EXPORT_SYMBOL(vme_unregister_bridge);
1488 /* - Driver Registration --------------------------------------------------- */
1490 static int __vme_register_driver_bus(struct vme_driver *drv,
1491 struct vme_bridge *bridge, unsigned int ndevs)
1493 int err;
1494 unsigned int i;
1495 struct vme_dev *vdev;
1496 struct vme_dev *tmp;
1498 for (i = 0; i < ndevs; i++) {
1499 vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1500 if (!vdev) {
1501 err = -ENOMEM;
1502 goto err_devalloc;
1504 vdev->num = i;
1505 vdev->bridge = bridge;
1506 vdev->dev.platform_data = drv;
1507 vdev->dev.release = vme_dev_release;
1508 vdev->dev.parent = bridge->parent;
1509 vdev->dev.bus = &vme_bus_type;
1510 dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1511 vdev->num);
1513 err = device_register(&vdev->dev);
1514 if (err)
1515 goto err_reg;
1517 if (vdev->dev.platform_data) {
1518 list_add_tail(&vdev->drv_list, &drv->devices);
1519 list_add_tail(&vdev->bridge_list, &bridge->devices);
1520 } else
1521 device_unregister(&vdev->dev);
1523 return 0;
1525 err_reg:
1526 put_device(&vdev->dev);
1527 kfree(vdev);
1528 err_devalloc:
1529 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1530 list_del(&vdev->drv_list);
1531 list_del(&vdev->bridge_list);
1532 device_unregister(&vdev->dev);
1534 return err;
1537 static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1539 struct vme_bridge *bridge;
1540 int err = 0;
1542 mutex_lock(&vme_buses_lock);
1543 list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1545 * This cannot cause trouble as we already have vme_buses_lock
1546 * and if the bridge is removed, it will have to go through
1547 * vme_unregister_bridge() to do it (which calls remove() on
1548 * the bridge which in turn tries to acquire vme_buses_lock and
1549 * will have to wait).
1551 err = __vme_register_driver_bus(drv, bridge, ndevs);
1552 if (err)
1553 break;
1555 mutex_unlock(&vme_buses_lock);
1556 return err;
1559 int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1561 int err;
1563 drv->driver.name = drv->name;
1564 drv->driver.bus = &vme_bus_type;
1565 INIT_LIST_HEAD(&drv->devices);
1567 err = driver_register(&drv->driver);
1568 if (err)
1569 return err;
1571 err = __vme_register_driver(drv, ndevs);
1572 if (err)
1573 driver_unregister(&drv->driver);
1575 return err;
1577 EXPORT_SYMBOL(vme_register_driver);
1579 void vme_unregister_driver(struct vme_driver *drv)
1581 struct vme_dev *dev, *dev_tmp;
1583 mutex_lock(&vme_buses_lock);
1584 list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1585 list_del(&dev->drv_list);
1586 list_del(&dev->bridge_list);
1587 device_unregister(&dev->dev);
1589 mutex_unlock(&vme_buses_lock);
1591 driver_unregister(&drv->driver);
1593 EXPORT_SYMBOL(vme_unregister_driver);
1595 /* - Bus Registration ------------------------------------------------------ */
1597 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1599 struct vme_driver *vme_drv;
1601 vme_drv = container_of(drv, struct vme_driver, driver);
1603 if (dev->platform_data == vme_drv) {
1604 struct vme_dev *vdev = dev_to_vme_dev(dev);
1606 if (vme_drv->match && vme_drv->match(vdev))
1607 return 1;
1609 dev->platform_data = NULL;
1611 return 0;
1614 static int vme_bus_probe(struct device *dev)
1616 int retval = -ENODEV;
1617 struct vme_driver *driver;
1618 struct vme_dev *vdev = dev_to_vme_dev(dev);
1620 driver = dev->platform_data;
1622 if (driver->probe != NULL)
1623 retval = driver->probe(vdev);
1625 return retval;
1628 struct bus_type vme_bus_type = {
1629 .name = "vme",
1630 .match = vme_bus_match,
1631 .probe = vme_bus_probe,
1633 EXPORT_SYMBOL(vme_bus_type);
1635 static int __init vme_init(void)
1637 return bus_register(&vme_bus_type);
1639 subsys_initcall(vme_init);