GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / vme / vme.c
blobb749e60a6ec72331fb860dd163e95bd7082e02e2
1 /*
2 * VME Bridge Framework
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
34 #include "vme.h"
35 #include "vme_bridge.h"
37 /* Bitmask and mutex to keep track of bridge numbers */
38 static unsigned int vme_bus_numbers;
39 static DEFINE_MUTEX(vme_bus_num_mtx);
41 static void __exit vme_exit(void);
42 static int __init vme_init(void);
46 * Find the bridge resource associated with a specific device resource
48 static struct vme_bridge *dev_to_bridge(struct device *dev)
50 return dev->platform_data;
54 * Find the bridge that the resource is associated with.
56 static struct vme_bridge *find_bridge(struct vme_resource *resource)
58 /* Get list to search */
59 switch (resource->type) {
60 case VME_MASTER:
61 return list_entry(resource->entry, struct vme_master_resource,
62 list)->parent;
63 break;
64 case VME_SLAVE:
65 return list_entry(resource->entry, struct vme_slave_resource,
66 list)->parent;
67 break;
68 case VME_DMA:
69 return list_entry(resource->entry, struct vme_dma_resource,
70 list)->parent;
71 break;
72 case VME_LM:
73 return list_entry(resource->entry, struct vme_lm_resource,
74 list)->parent;
75 break;
76 default:
77 printk(KERN_ERR "Unknown resource type\n");
78 return NULL;
79 break;
83 void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
84 dma_addr_t *dma)
86 struct vme_bridge *bridge;
87 struct pci_dev *pdev;
89 if (resource == NULL) {
90 printk(KERN_ERR "No resource\n");
91 return NULL;
94 bridge = find_bridge(resource);
95 if (bridge == NULL) {
96 printk(KERN_ERR "Can't find bridge\n");
97 return NULL;
100 /* Find pci_dev container of dev */
101 if (bridge->parent == NULL) {
102 printk(KERN_ERR "Dev entry NULL\n");
103 return NULL;
105 pdev = container_of(bridge->parent, struct pci_dev, dev);
107 return pci_alloc_consistent(pdev, size, dma);
109 EXPORT_SYMBOL(vme_alloc_consistent);
111 void vme_free_consistent(struct vme_resource *resource, size_t size,
112 void *vaddr, dma_addr_t dma)
114 struct vme_bridge *bridge;
115 struct pci_dev *pdev;
117 if (resource == NULL) {
118 printk(KERN_ERR "No resource\n");
119 return;
122 bridge = find_bridge(resource);
123 if (bridge == NULL) {
124 printk(KERN_ERR "Can't find bridge\n");
125 return;
128 /* Find pci_dev container of dev */
129 pdev = container_of(bridge->parent, struct pci_dev, dev);
131 pci_free_consistent(pdev, size, vaddr, dma);
133 EXPORT_SYMBOL(vme_free_consistent);
135 size_t vme_get_size(struct vme_resource *resource)
137 int enabled, retval;
138 unsigned long long base, size;
139 dma_addr_t buf_base;
140 vme_address_t aspace;
141 vme_cycle_t cycle;
142 vme_width_t dwidth;
144 switch (resource->type) {
145 case VME_MASTER:
146 retval = vme_master_get(resource, &enabled, &base, &size,
147 &aspace, &cycle, &dwidth);
149 return size;
150 break;
151 case VME_SLAVE:
152 retval = vme_slave_get(resource, &enabled, &base, &size,
153 &buf_base, &aspace, &cycle);
155 return size;
156 break;
157 case VME_DMA:
158 return 0;
159 break;
160 default:
161 printk(KERN_ERR "Unknown resource type\n");
162 return 0;
163 break;
166 EXPORT_SYMBOL(vme_get_size);
168 static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
169 unsigned long long size)
171 int retval = 0;
173 switch (aspace) {
174 case VME_A16:
175 if (((vme_base + size) > VME_A16_MAX) ||
176 (vme_base > VME_A16_MAX))
177 retval = -EFAULT;
178 break;
179 case VME_A24:
180 if (((vme_base + size) > VME_A24_MAX) ||
181 (vme_base > VME_A24_MAX))
182 retval = -EFAULT;
183 break;
184 case VME_A32:
185 if (((vme_base + size) > VME_A32_MAX) ||
186 (vme_base > VME_A32_MAX))
187 retval = -EFAULT;
188 break;
189 case VME_A64:
191 * Any value held in an unsigned long long can be used as the
192 * base
194 break;
195 case VME_CRCSR:
196 if (((vme_base + size) > VME_CRCSR_MAX) ||
197 (vme_base > VME_CRCSR_MAX))
198 retval = -EFAULT;
199 break;
200 case VME_USER1:
201 case VME_USER2:
202 case VME_USER3:
203 case VME_USER4:
204 /* User Defined */
205 break;
206 default:
207 printk(KERN_ERR "Invalid address space\n");
208 retval = -EINVAL;
209 break;
212 return retval;
216 * Request a slave image with specific attributes, return some unique
217 * identifier.
219 struct vme_resource *vme_slave_request(struct device *dev,
220 vme_address_t address, vme_cycle_t cycle)
222 struct vme_bridge *bridge;
223 struct list_head *slave_pos = NULL;
224 struct vme_slave_resource *allocated_image = NULL;
225 struct vme_slave_resource *slave_image = NULL;
226 struct vme_resource *resource = NULL;
228 bridge = dev_to_bridge(dev);
229 if (bridge == NULL) {
230 printk(KERN_ERR "Can't find VME bus\n");
231 goto err_bus;
234 /* Loop through slave resources */
235 list_for_each(slave_pos, &(bridge->slave_resources)) {
236 slave_image = list_entry(slave_pos,
237 struct vme_slave_resource, list);
239 if (slave_image == NULL) {
240 printk(KERN_ERR "Registered NULL Slave resource\n");
241 continue;
244 /* Find an unlocked and compatible image */
245 mutex_lock(&(slave_image->mtx));
246 if (((slave_image->address_attr & address) == address) &&
247 ((slave_image->cycle_attr & cycle) == cycle) &&
248 (slave_image->locked == 0)) {
250 slave_image->locked = 1;
251 mutex_unlock(&(slave_image->mtx));
252 allocated_image = slave_image;
253 break;
255 mutex_unlock(&(slave_image->mtx));
258 /* No free image */
259 if (allocated_image == NULL)
260 goto err_image;
262 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
263 if (resource == NULL) {
264 printk(KERN_WARNING "Unable to allocate resource structure\n");
265 goto err_alloc;
267 resource->type = VME_SLAVE;
268 resource->entry = &(allocated_image->list);
270 return resource;
272 err_alloc:
273 /* Unlock image */
274 mutex_lock(&(slave_image->mtx));
275 slave_image->locked = 0;
276 mutex_unlock(&(slave_image->mtx));
277 err_image:
278 err_bus:
279 return NULL;
281 EXPORT_SYMBOL(vme_slave_request);
283 int vme_slave_set(struct vme_resource *resource, int enabled,
284 unsigned long long vme_base, unsigned long long size,
285 dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
287 struct vme_bridge *bridge = find_bridge(resource);
288 struct vme_slave_resource *image;
289 int retval;
291 if (resource->type != VME_SLAVE) {
292 printk(KERN_ERR "Not a slave resource\n");
293 return -EINVAL;
296 image = list_entry(resource->entry, struct vme_slave_resource, list);
298 if (bridge->slave_set == NULL) {
299 printk(KERN_ERR "Function not supported\n");
300 return -ENOSYS;
303 if (!(((image->address_attr & aspace) == aspace) &&
304 ((image->cycle_attr & cycle) == cycle))) {
305 printk(KERN_ERR "Invalid attributes\n");
306 return -EINVAL;
309 retval = vme_check_window(aspace, vme_base, size);
310 if (retval)
311 return retval;
313 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
314 aspace, cycle);
316 EXPORT_SYMBOL(vme_slave_set);
318 int vme_slave_get(struct vme_resource *resource, int *enabled,
319 unsigned long long *vme_base, unsigned long long *size,
320 dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
322 struct vme_bridge *bridge = find_bridge(resource);
323 struct vme_slave_resource *image;
325 if (resource->type != VME_SLAVE) {
326 printk(KERN_ERR "Not a slave resource\n");
327 return -EINVAL;
330 image = list_entry(resource->entry, struct vme_slave_resource, list);
332 if (bridge->slave_get == NULL) {
333 printk(KERN_ERR "vme_slave_get not supported\n");
334 return -EINVAL;
337 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
338 aspace, cycle);
340 EXPORT_SYMBOL(vme_slave_get);
342 void vme_slave_free(struct vme_resource *resource)
344 struct vme_slave_resource *slave_image;
346 if (resource->type != VME_SLAVE) {
347 printk(KERN_ERR "Not a slave resource\n");
348 return;
351 slave_image = list_entry(resource->entry, struct vme_slave_resource,
352 list);
353 if (slave_image == NULL) {
354 printk(KERN_ERR "Can't find slave resource\n");
355 return;
358 /* Unlock image */
359 mutex_lock(&(slave_image->mtx));
360 if (slave_image->locked == 0)
361 printk(KERN_ERR "Image is already free\n");
363 slave_image->locked = 0;
364 mutex_unlock(&(slave_image->mtx));
366 /* Free up resource memory */
367 kfree(resource);
369 EXPORT_SYMBOL(vme_slave_free);
372 * Request a master image with specific attributes, return some unique
373 * identifier.
375 struct vme_resource *vme_master_request(struct device *dev,
376 vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
378 struct vme_bridge *bridge;
379 struct list_head *master_pos = NULL;
380 struct vme_master_resource *allocated_image = NULL;
381 struct vme_master_resource *master_image = NULL;
382 struct vme_resource *resource = NULL;
384 bridge = dev_to_bridge(dev);
385 if (bridge == NULL) {
386 printk(KERN_ERR "Can't find VME bus\n");
387 goto err_bus;
390 /* Loop through master resources */
391 list_for_each(master_pos, &(bridge->master_resources)) {
392 master_image = list_entry(master_pos,
393 struct vme_master_resource, list);
395 if (master_image == NULL) {
396 printk(KERN_WARNING "Registered NULL master resource\n");
397 continue;
400 /* Find an unlocked and compatible image */
401 spin_lock(&(master_image->lock));
402 if (((master_image->address_attr & address) == address) &&
403 ((master_image->cycle_attr & cycle) == cycle) &&
404 ((master_image->width_attr & dwidth) == dwidth) &&
405 (master_image->locked == 0)) {
407 master_image->locked = 1;
408 spin_unlock(&(master_image->lock));
409 allocated_image = master_image;
410 break;
412 spin_unlock(&(master_image->lock));
415 /* Check to see if we found a resource */
416 if (allocated_image == NULL) {
417 printk(KERN_ERR "Can't find a suitable resource\n");
418 goto err_image;
421 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
422 if (resource == NULL) {
423 printk(KERN_ERR "Unable to allocate resource structure\n");
424 goto err_alloc;
426 resource->type = VME_MASTER;
427 resource->entry = &(allocated_image->list);
429 return resource;
431 kfree(resource);
432 err_alloc:
433 /* Unlock image */
434 spin_lock(&(master_image->lock));
435 master_image->locked = 0;
436 spin_unlock(&(master_image->lock));
437 err_image:
438 err_bus:
439 return NULL;
441 EXPORT_SYMBOL(vme_master_request);
443 int vme_master_set(struct vme_resource *resource, int enabled,
444 unsigned long long vme_base, unsigned long long size,
445 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
447 struct vme_bridge *bridge = find_bridge(resource);
448 struct vme_master_resource *image;
449 int retval;
451 if (resource->type != VME_MASTER) {
452 printk(KERN_ERR "Not a master resource\n");
453 return -EINVAL;
456 image = list_entry(resource->entry, struct vme_master_resource, list);
458 if (bridge->master_set == NULL) {
459 printk(KERN_WARNING "vme_master_set not supported\n");
460 return -EINVAL;
463 if (!(((image->address_attr & aspace) == aspace) &&
464 ((image->cycle_attr & cycle) == cycle) &&
465 ((image->width_attr & dwidth) == dwidth))) {
466 printk(KERN_WARNING "Invalid attributes\n");
467 return -EINVAL;
470 retval = vme_check_window(aspace, vme_base, size);
471 if (retval)
472 return retval;
474 return bridge->master_set(image, enabled, vme_base, size, aspace,
475 cycle, dwidth);
477 EXPORT_SYMBOL(vme_master_set);
479 int vme_master_get(struct vme_resource *resource, int *enabled,
480 unsigned long long *vme_base, unsigned long long *size,
481 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
483 struct vme_bridge *bridge = find_bridge(resource);
484 struct vme_master_resource *image;
486 if (resource->type != VME_MASTER) {
487 printk(KERN_ERR "Not a master resource\n");
488 return -EINVAL;
491 image = list_entry(resource->entry, struct vme_master_resource, list);
493 if (bridge->master_get == NULL) {
494 printk(KERN_WARNING "vme_master_set not supported\n");
495 return -EINVAL;
498 return bridge->master_get(image, enabled, vme_base, size, aspace,
499 cycle, dwidth);
501 EXPORT_SYMBOL(vme_master_get);
504 * Read data out of VME space into a buffer.
506 ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
507 loff_t offset)
509 struct vme_bridge *bridge = find_bridge(resource);
510 struct vme_master_resource *image;
511 size_t length;
513 if (bridge->master_read == NULL) {
514 printk(KERN_WARNING "Reading from resource not supported\n");
515 return -EINVAL;
518 if (resource->type != VME_MASTER) {
519 printk(KERN_ERR "Not a master resource\n");
520 return -EINVAL;
523 image = list_entry(resource->entry, struct vme_master_resource, list);
525 length = vme_get_size(resource);
527 if (offset > length) {
528 printk(KERN_WARNING "Invalid Offset\n");
529 return -EFAULT;
532 if ((offset + count) > length)
533 count = length - offset;
535 return bridge->master_read(image, buf, count, offset);
538 EXPORT_SYMBOL(vme_master_read);
541 * Write data out to VME space from a buffer.
543 ssize_t vme_master_write(struct vme_resource *resource, void *buf,
544 size_t count, loff_t offset)
546 struct vme_bridge *bridge = find_bridge(resource);
547 struct vme_master_resource *image;
548 size_t length;
550 if (bridge->master_write == NULL) {
551 printk(KERN_WARNING "Writing to resource not supported\n");
552 return -EINVAL;
555 if (resource->type != VME_MASTER) {
556 printk(KERN_ERR "Not a master resource\n");
557 return -EINVAL;
560 image = list_entry(resource->entry, struct vme_master_resource, list);
562 length = vme_get_size(resource);
564 if (offset > length) {
565 printk(KERN_WARNING "Invalid Offset\n");
566 return -EFAULT;
569 if ((offset + count) > length)
570 count = length - offset;
572 return bridge->master_write(image, buf, count, offset);
574 EXPORT_SYMBOL(vme_master_write);
577 * Perform RMW cycle to provided location.
579 unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
580 unsigned int compare, unsigned int swap, loff_t offset)
582 struct vme_bridge *bridge = find_bridge(resource);
583 struct vme_master_resource *image;
585 if (bridge->master_rmw == NULL) {
586 printk(KERN_WARNING "Writing to resource not supported\n");
587 return -EINVAL;
590 if (resource->type != VME_MASTER) {
591 printk(KERN_ERR "Not a master resource\n");
592 return -EINVAL;
595 image = list_entry(resource->entry, struct vme_master_resource, list);
597 return bridge->master_rmw(image, mask, compare, swap, offset);
599 EXPORT_SYMBOL(vme_master_rmw);
601 void vme_master_free(struct vme_resource *resource)
603 struct vme_master_resource *master_image;
605 if (resource->type != VME_MASTER) {
606 printk(KERN_ERR "Not a master resource\n");
607 return;
610 master_image = list_entry(resource->entry, struct vme_master_resource,
611 list);
612 if (master_image == NULL) {
613 printk(KERN_ERR "Can't find master resource\n");
614 return;
617 /* Unlock image */
618 spin_lock(&(master_image->lock));
619 if (master_image->locked == 0)
620 printk(KERN_ERR "Image is already free\n");
622 master_image->locked = 0;
623 spin_unlock(&(master_image->lock));
625 /* Free up resource memory */
626 kfree(resource);
628 EXPORT_SYMBOL(vme_master_free);
631 * Request a DMA controller with specific attributes, return some unique
632 * identifier.
634 struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
636 struct vme_bridge *bridge;
637 struct list_head *dma_pos = NULL;
638 struct vme_dma_resource *allocated_ctrlr = NULL;
639 struct vme_dma_resource *dma_ctrlr = NULL;
640 struct vme_resource *resource = NULL;
642 printk(KERN_ERR "No VME resource Attribute tests done\n");
644 bridge = dev_to_bridge(dev);
645 if (bridge == NULL) {
646 printk(KERN_ERR "Can't find VME bus\n");
647 goto err_bus;
650 /* Loop through DMA resources */
651 list_for_each(dma_pos, &(bridge->dma_resources)) {
652 dma_ctrlr = list_entry(dma_pos,
653 struct vme_dma_resource, list);
655 if (dma_ctrlr == NULL) {
656 printk(KERN_ERR "Registered NULL DMA resource\n");
657 continue;
660 /* Find an unlocked and compatible controller */
661 mutex_lock(&(dma_ctrlr->mtx));
662 if (((dma_ctrlr->route_attr & route) == route) &&
663 (dma_ctrlr->locked == 0)) {
665 dma_ctrlr->locked = 1;
666 mutex_unlock(&(dma_ctrlr->mtx));
667 allocated_ctrlr = dma_ctrlr;
668 break;
670 mutex_unlock(&(dma_ctrlr->mtx));
673 /* Check to see if we found a resource */
674 if (allocated_ctrlr == NULL)
675 goto err_ctrlr;
677 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
678 if (resource == NULL) {
679 printk(KERN_WARNING "Unable to allocate resource structure\n");
680 goto err_alloc;
682 resource->type = VME_DMA;
683 resource->entry = &(allocated_ctrlr->list);
685 return resource;
687 err_alloc:
688 /* Unlock image */
689 mutex_lock(&(dma_ctrlr->mtx));
690 dma_ctrlr->locked = 0;
691 mutex_unlock(&(dma_ctrlr->mtx));
692 err_ctrlr:
693 err_bus:
694 return NULL;
696 EXPORT_SYMBOL(vme_dma_request);
699 * Start new list
701 struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
703 struct vme_dma_resource *ctrlr;
704 struct vme_dma_list *dma_list;
706 if (resource->type != VME_DMA) {
707 printk(KERN_ERR "Not a DMA resource\n");
708 return NULL;
711 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
713 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
714 if (dma_list == NULL) {
715 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
716 return NULL;
718 INIT_LIST_HEAD(&(dma_list->entries));
719 dma_list->parent = ctrlr;
720 mutex_init(&(dma_list->mtx));
722 return dma_list;
724 EXPORT_SYMBOL(vme_new_dma_list);
727 * Create "Pattern" type attributes
729 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
730 vme_pattern_t type)
732 struct vme_dma_attr *attributes;
733 struct vme_dma_pattern *pattern_attr;
735 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
736 if (attributes == NULL) {
737 printk(KERN_ERR "Unable to allocate memory for attributes "
738 "structure\n");
739 goto err_attr;
742 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
743 if (pattern_attr == NULL) {
744 printk(KERN_ERR "Unable to allocate memory for pattern "
745 "attributes\n");
746 goto err_pat;
749 attributes->type = VME_DMA_PATTERN;
750 attributes->private = (void *)pattern_attr;
752 pattern_attr->pattern = pattern;
753 pattern_attr->type = type;
755 return attributes;
757 kfree(pattern_attr);
758 err_pat:
759 kfree(attributes);
760 err_attr:
761 return NULL;
763 EXPORT_SYMBOL(vme_dma_pattern_attribute);
766 * Create "PCI" type attributes
768 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
770 struct vme_dma_attr *attributes;
771 struct vme_dma_pci *pci_attr;
774 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
775 if (attributes == NULL) {
776 printk(KERN_ERR "Unable to allocate memory for attributes "
777 "structure\n");
778 goto err_attr;
781 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
782 if (pci_attr == NULL) {
783 printk(KERN_ERR "Unable to allocate memory for pci "
784 "attributes\n");
785 goto err_pci;
790 attributes->type = VME_DMA_PCI;
791 attributes->private = (void *)pci_attr;
793 pci_attr->address = address;
795 return attributes;
797 kfree(pci_attr);
798 err_pci:
799 kfree(attributes);
800 err_attr:
801 return NULL;
803 EXPORT_SYMBOL(vme_dma_pci_attribute);
806 * Create "VME" type attributes
808 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
809 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
811 struct vme_dma_attr *attributes;
812 struct vme_dma_vme *vme_attr;
814 attributes = kmalloc(
815 sizeof(struct vme_dma_attr), GFP_KERNEL);
816 if (attributes == NULL) {
817 printk(KERN_ERR "Unable to allocate memory for attributes "
818 "structure\n");
819 goto err_attr;
822 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
823 if (vme_attr == NULL) {
824 printk(KERN_ERR "Unable to allocate memory for vme "
825 "attributes\n");
826 goto err_vme;
829 attributes->type = VME_DMA_VME;
830 attributes->private = (void *)vme_attr;
832 vme_attr->address = address;
833 vme_attr->aspace = aspace;
834 vme_attr->cycle = cycle;
835 vme_attr->dwidth = dwidth;
837 return attributes;
839 kfree(vme_attr);
840 err_vme:
841 kfree(attributes);
842 err_attr:
843 return NULL;
845 EXPORT_SYMBOL(vme_dma_vme_attribute);
848 * Free attribute
850 void vme_dma_free_attribute(struct vme_dma_attr *attributes)
852 kfree(attributes->private);
853 kfree(attributes);
855 EXPORT_SYMBOL(vme_dma_free_attribute);
857 int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
858 struct vme_dma_attr *dest, size_t count)
860 struct vme_bridge *bridge = list->parent->parent;
861 int retval;
863 if (bridge->dma_list_add == NULL) {
864 printk(KERN_WARNING "Link List DMA generation not supported\n");
865 return -EINVAL;
868 if (!mutex_trylock(&(list->mtx))) {
869 printk(KERN_ERR "Link List already submitted\n");
870 return -EINVAL;
873 retval = bridge->dma_list_add(list, src, dest, count);
875 mutex_unlock(&(list->mtx));
877 return retval;
879 EXPORT_SYMBOL(vme_dma_list_add);
881 int vme_dma_list_exec(struct vme_dma_list *list)
883 struct vme_bridge *bridge = list->parent->parent;
884 int retval;
886 if (bridge->dma_list_exec == NULL) {
887 printk(KERN_ERR "Link List DMA execution not supported\n");
888 return -EINVAL;
891 mutex_lock(&(list->mtx));
893 retval = bridge->dma_list_exec(list);
895 mutex_unlock(&(list->mtx));
897 return retval;
899 EXPORT_SYMBOL(vme_dma_list_exec);
901 int vme_dma_list_free(struct vme_dma_list *list)
903 struct vme_bridge *bridge = list->parent->parent;
904 int retval;
906 if (bridge->dma_list_empty == NULL) {
907 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
908 return -EINVAL;
911 if (!mutex_trylock(&(list->mtx))) {
912 printk(KERN_ERR "Link List in use\n");
913 return -EINVAL;
917 * Empty out all of the entries from the dma list. We need to go to the
918 * low level driver as dma entries are driver specific.
920 retval = bridge->dma_list_empty(list);
921 if (retval) {
922 printk(KERN_ERR "Unable to empty link-list entries\n");
923 mutex_unlock(&(list->mtx));
924 return retval;
926 mutex_unlock(&(list->mtx));
927 kfree(list);
929 return retval;
931 EXPORT_SYMBOL(vme_dma_list_free);
933 int vme_dma_free(struct vme_resource *resource)
935 struct vme_dma_resource *ctrlr;
937 if (resource->type != VME_DMA) {
938 printk(KERN_ERR "Not a DMA resource\n");
939 return -EINVAL;
942 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
944 if (!mutex_trylock(&(ctrlr->mtx))) {
945 printk(KERN_ERR "Resource busy, can't free\n");
946 return -EBUSY;
949 if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
950 printk(KERN_WARNING "Resource still processing transfers\n");
951 mutex_unlock(&(ctrlr->mtx));
952 return -EBUSY;
955 ctrlr->locked = 0;
957 mutex_unlock(&(ctrlr->mtx));
959 return 0;
961 EXPORT_SYMBOL(vme_dma_free);
963 void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
965 void (*call)(int, int, void *);
966 void *priv_data;
968 call = bridge->irq[level - 1].callback[statid].func;
969 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
971 if (call != NULL)
972 call(level, statid, priv_data);
973 else
974 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
975 "vector:%x\n", level, statid);
977 EXPORT_SYMBOL(vme_irq_handler);
979 int vme_irq_request(struct device *dev, int level, int statid,
980 void (*callback)(int, int, void *),
981 void *priv_data)
983 struct vme_bridge *bridge;
985 bridge = dev_to_bridge(dev);
986 if (bridge == NULL) {
987 printk(KERN_ERR "Can't find VME bus\n");
988 return -EINVAL;
991 if ((level < 1) || (level > 7)) {
992 printk(KERN_ERR "Invalid interrupt level\n");
993 return -EINVAL;
996 if (bridge->irq_set == NULL) {
997 printk(KERN_ERR "Configuring interrupts not supported\n");
998 return -EINVAL;
1001 mutex_lock(&(bridge->irq_mtx));
1003 if (bridge->irq[level - 1].callback[statid].func) {
1004 mutex_unlock(&(bridge->irq_mtx));
1005 printk(KERN_WARNING "VME Interrupt already taken\n");
1006 return -EBUSY;
1009 bridge->irq[level - 1].count++;
1010 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1011 bridge->irq[level - 1].callback[statid].func = callback;
1013 /* Enable IRQ level */
1014 bridge->irq_set(bridge, level, 1, 1);
1016 mutex_unlock(&(bridge->irq_mtx));
1018 return 0;
1020 EXPORT_SYMBOL(vme_irq_request);
1022 void vme_irq_free(struct device *dev, int level, int statid)
1024 struct vme_bridge *bridge;
1026 bridge = dev_to_bridge(dev);
1027 if (bridge == NULL) {
1028 printk(KERN_ERR "Can't find VME bus\n");
1029 return;
1032 if ((level < 1) || (level > 7)) {
1033 printk(KERN_ERR "Invalid interrupt level\n");
1034 return;
1037 if (bridge->irq_set == NULL) {
1038 printk(KERN_ERR "Configuring interrupts not supported\n");
1039 return;
1042 mutex_lock(&(bridge->irq_mtx));
1044 bridge->irq[level - 1].count--;
1046 /* Disable IRQ level if no more interrupts attached at this level*/
1047 if (bridge->irq[level - 1].count == 0)
1048 bridge->irq_set(bridge, level, 0, 1);
1050 bridge->irq[level - 1].callback[statid].func = NULL;
1051 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1053 mutex_unlock(&(bridge->irq_mtx));
1055 EXPORT_SYMBOL(vme_irq_free);
1057 int vme_irq_generate(struct device *dev, int level, int statid)
1059 struct vme_bridge *bridge;
1061 bridge = dev_to_bridge(dev);
1062 if (bridge == NULL) {
1063 printk(KERN_ERR "Can't find VME bus\n");
1064 return -EINVAL;
1067 if ((level < 1) || (level > 7)) {
1068 printk(KERN_WARNING "Invalid interrupt level\n");
1069 return -EINVAL;
1072 if (bridge->irq_generate == NULL) {
1073 printk(KERN_WARNING "Interrupt generation not supported\n");
1074 return -EINVAL;
1077 return bridge->irq_generate(bridge, level, statid);
1079 EXPORT_SYMBOL(vme_irq_generate);
1082 * Request the location monitor, return resource or NULL
1084 struct vme_resource *vme_lm_request(struct device *dev)
1086 struct vme_bridge *bridge;
1087 struct list_head *lm_pos = NULL;
1088 struct vme_lm_resource *allocated_lm = NULL;
1089 struct vme_lm_resource *lm = NULL;
1090 struct vme_resource *resource = NULL;
1092 bridge = dev_to_bridge(dev);
1093 if (bridge == NULL) {
1094 printk(KERN_ERR "Can't find VME bus\n");
1095 goto err_bus;
1098 /* Loop through DMA resources */
1099 list_for_each(lm_pos, &(bridge->lm_resources)) {
1100 lm = list_entry(lm_pos,
1101 struct vme_lm_resource, list);
1103 if (lm == NULL) {
1104 printk(KERN_ERR "Registered NULL Location Monitor "
1105 "resource\n");
1106 continue;
1109 /* Find an unlocked controller */
1110 mutex_lock(&(lm->mtx));
1111 if (lm->locked == 0) {
1112 lm->locked = 1;
1113 mutex_unlock(&(lm->mtx));
1114 allocated_lm = lm;
1115 break;
1117 mutex_unlock(&(lm->mtx));
1120 /* Check to see if we found a resource */
1121 if (allocated_lm == NULL)
1122 goto err_lm;
1124 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1125 if (resource == NULL) {
1126 printk(KERN_ERR "Unable to allocate resource structure\n");
1127 goto err_alloc;
1129 resource->type = VME_LM;
1130 resource->entry = &(allocated_lm->list);
1132 return resource;
1134 err_alloc:
1135 /* Unlock image */
1136 mutex_lock(&(lm->mtx));
1137 lm->locked = 0;
1138 mutex_unlock(&(lm->mtx));
1139 err_lm:
1140 err_bus:
1141 return NULL;
1143 EXPORT_SYMBOL(vme_lm_request);
1145 int vme_lm_count(struct vme_resource *resource)
1147 struct vme_lm_resource *lm;
1149 if (resource->type != VME_LM) {
1150 printk(KERN_ERR "Not a Location Monitor resource\n");
1151 return -EINVAL;
1154 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1156 return lm->monitors;
1158 EXPORT_SYMBOL(vme_lm_count);
1160 int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1161 vme_address_t aspace, vme_cycle_t cycle)
1163 struct vme_bridge *bridge = find_bridge(resource);
1164 struct vme_lm_resource *lm;
1166 if (resource->type != VME_LM) {
1167 printk(KERN_ERR "Not a Location Monitor resource\n");
1168 return -EINVAL;
1171 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1173 if (bridge->lm_set == NULL) {
1174 printk(KERN_ERR "vme_lm_set not supported\n");
1175 return -EINVAL;
1178 return bridge->lm_set(lm, lm_base, aspace, cycle);
1180 EXPORT_SYMBOL(vme_lm_set);
1182 int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1183 vme_address_t *aspace, vme_cycle_t *cycle)
1185 struct vme_bridge *bridge = find_bridge(resource);
1186 struct vme_lm_resource *lm;
1188 if (resource->type != VME_LM) {
1189 printk(KERN_ERR "Not a Location Monitor resource\n");
1190 return -EINVAL;
1193 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1195 if (bridge->lm_get == NULL) {
1196 printk(KERN_ERR "vme_lm_get not supported\n");
1197 return -EINVAL;
1200 return bridge->lm_get(lm, lm_base, aspace, cycle);
1202 EXPORT_SYMBOL(vme_lm_get);
1204 int vme_lm_attach(struct vme_resource *resource, int monitor,
1205 void (*callback)(int))
1207 struct vme_bridge *bridge = find_bridge(resource);
1208 struct vme_lm_resource *lm;
1210 if (resource->type != VME_LM) {
1211 printk(KERN_ERR "Not a Location Monitor resource\n");
1212 return -EINVAL;
1215 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1217 if (bridge->lm_attach == NULL) {
1218 printk(KERN_ERR "vme_lm_attach not supported\n");
1219 return -EINVAL;
1222 return bridge->lm_attach(lm, monitor, callback);
1224 EXPORT_SYMBOL(vme_lm_attach);
1226 int vme_lm_detach(struct vme_resource *resource, int monitor)
1228 struct vme_bridge *bridge = find_bridge(resource);
1229 struct vme_lm_resource *lm;
1231 if (resource->type != VME_LM) {
1232 printk(KERN_ERR "Not a Location Monitor resource\n");
1233 return -EINVAL;
1236 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1238 if (bridge->lm_detach == NULL) {
1239 printk(KERN_ERR "vme_lm_detach not supported\n");
1240 return -EINVAL;
1243 return bridge->lm_detach(lm, monitor);
1245 EXPORT_SYMBOL(vme_lm_detach);
1247 void vme_lm_free(struct vme_resource *resource)
1249 struct vme_lm_resource *lm;
1251 if (resource->type != VME_LM) {
1252 printk(KERN_ERR "Not a Location Monitor resource\n");
1253 return;
1256 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1258 mutex_lock(&(lm->mtx));
1261 lm->locked = 0;
1263 mutex_unlock(&(lm->mtx));
1265 kfree(resource);
1267 EXPORT_SYMBOL(vme_lm_free);
1269 int vme_slot_get(struct device *bus)
1271 struct vme_bridge *bridge;
1273 bridge = dev_to_bridge(bus);
1274 if (bridge == NULL) {
1275 printk(KERN_ERR "Can't find VME bus\n");
1276 return -EINVAL;
1279 if (bridge->slot_get == NULL) {
1280 printk(KERN_WARNING "vme_slot_get not supported\n");
1281 return -EINVAL;
1284 return bridge->slot_get(bridge);
1286 EXPORT_SYMBOL(vme_slot_get);
1289 /* - Bridge Registration --------------------------------------------------- */
1291 static int vme_alloc_bus_num(void)
1293 int i;
1295 mutex_lock(&vme_bus_num_mtx);
1296 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1297 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1298 vme_bus_numbers |= (0x1 << i);
1299 break;
1302 mutex_unlock(&vme_bus_num_mtx);
1304 return i;
1307 static void vme_free_bus_num(int bus)
1309 mutex_lock(&vme_bus_num_mtx);
1310 vme_bus_numbers |= ~(0x1 << bus);
1311 mutex_unlock(&vme_bus_num_mtx);
1314 int vme_register_bridge(struct vme_bridge *bridge)
1316 struct device *dev;
1317 int retval;
1318 int i;
1320 bridge->num = vme_alloc_bus_num();
1322 /* This creates 32 vme "slot" devices. This equates to a slot for each
1323 * ID available in a system conforming to the ANSI/VITA 1-1994
1324 * specification.
1326 for (i = 0; i < VME_SLOTS_MAX; i++) {
1327 dev = &(bridge->dev[i]);
1328 memset(dev, 0, sizeof(struct device));
1330 dev->parent = bridge->parent;
1331 dev->bus = &(vme_bus_type);
1333 * We save a pointer to the bridge in platform_data so that we
1334 * can get to it later. We keep driver_data for use by the
1335 * driver that binds against the slot
1337 dev->platform_data = bridge;
1338 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1340 retval = device_register(dev);
1341 if (retval)
1342 goto err_reg;
1345 return retval;
1347 i = VME_SLOTS_MAX;
1348 err_reg:
1349 while (i > -1) {
1350 dev = &(bridge->dev[i]);
1351 device_unregister(dev);
1353 vme_free_bus_num(bridge->num);
1354 return retval;
1356 EXPORT_SYMBOL(vme_register_bridge);
1358 void vme_unregister_bridge(struct vme_bridge *bridge)
1360 int i;
1361 struct device *dev;
1364 for (i = 0; i < VME_SLOTS_MAX; i++) {
1365 dev = &(bridge->dev[i]);
1366 device_unregister(dev);
1368 vme_free_bus_num(bridge->num);
1370 EXPORT_SYMBOL(vme_unregister_bridge);
1373 /* - Driver Registration --------------------------------------------------- */
1375 int vme_register_driver(struct vme_driver *drv)
1377 drv->driver.name = drv->name;
1378 drv->driver.bus = &vme_bus_type;
1380 return driver_register(&drv->driver);
1382 EXPORT_SYMBOL(vme_register_driver);
1384 void vme_unregister_driver(struct vme_driver *drv)
1386 driver_unregister(&drv->driver);
1388 EXPORT_SYMBOL(vme_unregister_driver);
1390 /* - Bus Registration ------------------------------------------------------ */
1392 static int vme_calc_slot(struct device *dev)
1394 struct vme_bridge *bridge;
1395 int num;
1397 bridge = dev_to_bridge(dev);
1399 /* Determine slot number */
1400 num = 0;
1401 while (num < VME_SLOTS_MAX) {
1402 if (&(bridge->dev[num]) == dev)
1403 break;
1405 num++;
1407 if (num == VME_SLOTS_MAX) {
1408 dev_err(dev, "Failed to identify slot\n");
1409 num = 0;
1410 goto err_dev;
1412 num++;
1414 err_dev:
1415 return num;
1418 static struct vme_driver *dev_to_vme_driver(struct device *dev)
1420 if (dev->driver == NULL)
1421 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1423 return container_of(dev->driver, struct vme_driver, driver);
1426 static int vme_bus_match(struct device *dev, struct device_driver *drv)
1428 struct vme_bridge *bridge;
1429 struct vme_driver *driver;
1430 int i, num;
1432 bridge = dev_to_bridge(dev);
1433 driver = container_of(drv, struct vme_driver, driver);
1435 num = vme_calc_slot(dev);
1436 if (!num)
1437 goto err_dev;
1439 if (driver->bind_table == NULL) {
1440 dev_err(dev, "Bind table NULL\n");
1441 goto err_table;
1444 i = 0;
1445 while ((driver->bind_table[i].bus != 0) ||
1446 (driver->bind_table[i].slot != 0)) {
1448 if (bridge->num == driver->bind_table[i].bus) {
1449 if (num == driver->bind_table[i].slot)
1450 return 1;
1452 if (driver->bind_table[i].slot == VME_SLOT_ALL)
1453 return 1;
1455 if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1456 (num == vme_slot_get(dev)))
1457 return 1;
1459 i++;
1462 err_dev:
1463 err_table:
1464 return 0;
1467 static int vme_bus_probe(struct device *dev)
1469 struct vme_bridge *bridge;
1470 struct vme_driver *driver;
1471 int retval = -ENODEV;
1473 driver = dev_to_vme_driver(dev);
1474 bridge = dev_to_bridge(dev);
1476 if (driver->probe != NULL)
1477 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1479 return retval;
1482 static int vme_bus_remove(struct device *dev)
1484 struct vme_bridge *bridge;
1485 struct vme_driver *driver;
1486 int retval = -ENODEV;
1488 driver = dev_to_vme_driver(dev);
1489 bridge = dev_to_bridge(dev);
1491 if (driver->remove != NULL)
1492 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1494 return retval;
1497 struct bus_type vme_bus_type = {
1498 .name = "vme",
1499 .match = vme_bus_match,
1500 .probe = vme_bus_probe,
1501 .remove = vme_bus_remove,
1503 EXPORT_SYMBOL(vme_bus_type);
1505 static int __init vme_init(void)
1507 return bus_register(&vme_bus_type);
1510 static void __exit vme_exit(void)
1512 bus_unregister(&vme_bus_type);
1515 MODULE_DESCRIPTION("VME bridge driver framework");
1516 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1517 MODULE_LICENSE("GPL");
1519 module_init(vme_init);
1520 module_exit(vme_exit);