4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/highmem.h>
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include <linux/mutex.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
35 #include "vme_bridge.h"
37 /* Bitmask and mutex to keep track of bridge numbers */
38 static unsigned int vme_bus_numbers
;
39 static DEFINE_MUTEX(vme_bus_num_mtx
);
41 static void __exit
vme_exit(void);
42 static int __init
vme_init(void);
46 * Find the bridge resource associated with a specific device resource
48 static struct vme_bridge
*dev_to_bridge(struct device
*dev
)
50 return dev
->platform_data
;
54 * Find the bridge that the resource is associated with.
56 static struct vme_bridge
*find_bridge(struct vme_resource
*resource
)
58 /* Get list to search */
59 switch (resource
->type
) {
61 return list_entry(resource
->entry
, struct vme_master_resource
,
65 return list_entry(resource
->entry
, struct vme_slave_resource
,
69 return list_entry(resource
->entry
, struct vme_dma_resource
,
73 return list_entry(resource
->entry
, struct vme_lm_resource
,
77 printk(KERN_ERR
"Unknown resource type\n");
83 void *vme_alloc_consistent(struct vme_resource
*resource
, size_t size
,
86 struct vme_bridge
*bridge
;
89 if (resource
== NULL
) {
90 printk(KERN_ERR
"No resource\n");
94 bridge
= find_bridge(resource
);
96 printk(KERN_ERR
"Can't find bridge\n");
100 /* Find pci_dev container of dev */
101 if (bridge
->parent
== NULL
) {
102 printk(KERN_ERR
"Dev entry NULL\n");
105 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
107 return pci_alloc_consistent(pdev
, size
, dma
);
109 EXPORT_SYMBOL(vme_alloc_consistent
);
111 void vme_free_consistent(struct vme_resource
*resource
, size_t size
,
112 void *vaddr
, dma_addr_t dma
)
114 struct vme_bridge
*bridge
;
115 struct pci_dev
*pdev
;
117 if (resource
== NULL
) {
118 printk(KERN_ERR
"No resource\n");
122 bridge
= find_bridge(resource
);
123 if (bridge
== NULL
) {
124 printk(KERN_ERR
"Can't find bridge\n");
128 /* Find pci_dev container of dev */
129 pdev
= container_of(bridge
->parent
, struct pci_dev
, dev
);
131 pci_free_consistent(pdev
, size
, vaddr
, dma
);
133 EXPORT_SYMBOL(vme_free_consistent
);
135 size_t vme_get_size(struct vme_resource
*resource
)
138 unsigned long long base
, size
;
140 vme_address_t aspace
;
144 switch (resource
->type
) {
146 retval
= vme_master_get(resource
, &enabled
, &base
, &size
,
147 &aspace
, &cycle
, &dwidth
);
152 retval
= vme_slave_get(resource
, &enabled
, &base
, &size
,
153 &buf_base
, &aspace
, &cycle
);
161 printk(KERN_ERR
"Unknown resource type\n");
166 EXPORT_SYMBOL(vme_get_size
);
168 static int vme_check_window(vme_address_t aspace
, unsigned long long vme_base
,
169 unsigned long long size
)
175 if (((vme_base
+ size
) > VME_A16_MAX
) ||
176 (vme_base
> VME_A16_MAX
))
180 if (((vme_base
+ size
) > VME_A24_MAX
) ||
181 (vme_base
> VME_A24_MAX
))
185 if (((vme_base
+ size
) > VME_A32_MAX
) ||
186 (vme_base
> VME_A32_MAX
))
191 * Any value held in an unsigned long long can be used as the
196 if (((vme_base
+ size
) > VME_CRCSR_MAX
) ||
197 (vme_base
> VME_CRCSR_MAX
))
207 printk(KERN_ERR
"Invalid address space\n");
216 * Request a slave image with specific attributes, return some unique
219 struct vme_resource
*vme_slave_request(struct device
*dev
,
220 vme_address_t address
, vme_cycle_t cycle
)
222 struct vme_bridge
*bridge
;
223 struct list_head
*slave_pos
= NULL
;
224 struct vme_slave_resource
*allocated_image
= NULL
;
225 struct vme_slave_resource
*slave_image
= NULL
;
226 struct vme_resource
*resource
= NULL
;
228 bridge
= dev_to_bridge(dev
);
229 if (bridge
== NULL
) {
230 printk(KERN_ERR
"Can't find VME bus\n");
234 /* Loop through slave resources */
235 list_for_each(slave_pos
, &(bridge
->slave_resources
)) {
236 slave_image
= list_entry(slave_pos
,
237 struct vme_slave_resource
, list
);
239 if (slave_image
== NULL
) {
240 printk(KERN_ERR
"Registered NULL Slave resource\n");
244 /* Find an unlocked and compatible image */
245 mutex_lock(&(slave_image
->mtx
));
246 if (((slave_image
->address_attr
& address
) == address
) &&
247 ((slave_image
->cycle_attr
& cycle
) == cycle
) &&
248 (slave_image
->locked
== 0)) {
250 slave_image
->locked
= 1;
251 mutex_unlock(&(slave_image
->mtx
));
252 allocated_image
= slave_image
;
255 mutex_unlock(&(slave_image
->mtx
));
259 if (allocated_image
== NULL
)
262 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
263 if (resource
== NULL
) {
264 printk(KERN_WARNING
"Unable to allocate resource structure\n");
267 resource
->type
= VME_SLAVE
;
268 resource
->entry
= &(allocated_image
->list
);
274 mutex_lock(&(slave_image
->mtx
));
275 slave_image
->locked
= 0;
276 mutex_unlock(&(slave_image
->mtx
));
281 EXPORT_SYMBOL(vme_slave_request
);
283 int vme_slave_set(struct vme_resource
*resource
, int enabled
,
284 unsigned long long vme_base
, unsigned long long size
,
285 dma_addr_t buf_base
, vme_address_t aspace
, vme_cycle_t cycle
)
287 struct vme_bridge
*bridge
= find_bridge(resource
);
288 struct vme_slave_resource
*image
;
291 if (resource
->type
!= VME_SLAVE
) {
292 printk(KERN_ERR
"Not a slave resource\n");
296 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
298 if (bridge
->slave_set
== NULL
) {
299 printk(KERN_ERR
"Function not supported\n");
303 if (!(((image
->address_attr
& aspace
) == aspace
) &&
304 ((image
->cycle_attr
& cycle
) == cycle
))) {
305 printk(KERN_ERR
"Invalid attributes\n");
309 retval
= vme_check_window(aspace
, vme_base
, size
);
313 return bridge
->slave_set(image
, enabled
, vme_base
, size
, buf_base
,
316 EXPORT_SYMBOL(vme_slave_set
);
318 int vme_slave_get(struct vme_resource
*resource
, int *enabled
,
319 unsigned long long *vme_base
, unsigned long long *size
,
320 dma_addr_t
*buf_base
, vme_address_t
*aspace
, vme_cycle_t
*cycle
)
322 struct vme_bridge
*bridge
= find_bridge(resource
);
323 struct vme_slave_resource
*image
;
325 if (resource
->type
!= VME_SLAVE
) {
326 printk(KERN_ERR
"Not a slave resource\n");
330 image
= list_entry(resource
->entry
, struct vme_slave_resource
, list
);
332 if (bridge
->slave_get
== NULL
) {
333 printk(KERN_ERR
"vme_slave_get not supported\n");
337 return bridge
->slave_get(image
, enabled
, vme_base
, size
, buf_base
,
340 EXPORT_SYMBOL(vme_slave_get
);
342 void vme_slave_free(struct vme_resource
*resource
)
344 struct vme_slave_resource
*slave_image
;
346 if (resource
->type
!= VME_SLAVE
) {
347 printk(KERN_ERR
"Not a slave resource\n");
351 slave_image
= list_entry(resource
->entry
, struct vme_slave_resource
,
353 if (slave_image
== NULL
) {
354 printk(KERN_ERR
"Can't find slave resource\n");
359 mutex_lock(&(slave_image
->mtx
));
360 if (slave_image
->locked
== 0)
361 printk(KERN_ERR
"Image is already free\n");
363 slave_image
->locked
= 0;
364 mutex_unlock(&(slave_image
->mtx
));
366 /* Free up resource memory */
369 EXPORT_SYMBOL(vme_slave_free
);
372 * Request a master image with specific attributes, return some unique
375 struct vme_resource
*vme_master_request(struct device
*dev
,
376 vme_address_t address
, vme_cycle_t cycle
, vme_width_t dwidth
)
378 struct vme_bridge
*bridge
;
379 struct list_head
*master_pos
= NULL
;
380 struct vme_master_resource
*allocated_image
= NULL
;
381 struct vme_master_resource
*master_image
= NULL
;
382 struct vme_resource
*resource
= NULL
;
384 bridge
= dev_to_bridge(dev
);
385 if (bridge
== NULL
) {
386 printk(KERN_ERR
"Can't find VME bus\n");
390 /* Loop through master resources */
391 list_for_each(master_pos
, &(bridge
->master_resources
)) {
392 master_image
= list_entry(master_pos
,
393 struct vme_master_resource
, list
);
395 if (master_image
== NULL
) {
396 printk(KERN_WARNING
"Registered NULL master resource\n");
400 /* Find an unlocked and compatible image */
401 spin_lock(&(master_image
->lock
));
402 if (((master_image
->address_attr
& address
) == address
) &&
403 ((master_image
->cycle_attr
& cycle
) == cycle
) &&
404 ((master_image
->width_attr
& dwidth
) == dwidth
) &&
405 (master_image
->locked
== 0)) {
407 master_image
->locked
= 1;
408 spin_unlock(&(master_image
->lock
));
409 allocated_image
= master_image
;
412 spin_unlock(&(master_image
->lock
));
415 /* Check to see if we found a resource */
416 if (allocated_image
== NULL
) {
417 printk(KERN_ERR
"Can't find a suitable resource\n");
421 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
422 if (resource
== NULL
) {
423 printk(KERN_ERR
"Unable to allocate resource structure\n");
426 resource
->type
= VME_MASTER
;
427 resource
->entry
= &(allocated_image
->list
);
434 spin_lock(&(master_image
->lock
));
435 master_image
->locked
= 0;
436 spin_unlock(&(master_image
->lock
));
441 EXPORT_SYMBOL(vme_master_request
);
443 int vme_master_set(struct vme_resource
*resource
, int enabled
,
444 unsigned long long vme_base
, unsigned long long size
,
445 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
447 struct vme_bridge
*bridge
= find_bridge(resource
);
448 struct vme_master_resource
*image
;
451 if (resource
->type
!= VME_MASTER
) {
452 printk(KERN_ERR
"Not a master resource\n");
456 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
458 if (bridge
->master_set
== NULL
) {
459 printk(KERN_WARNING
"vme_master_set not supported\n");
463 if (!(((image
->address_attr
& aspace
) == aspace
) &&
464 ((image
->cycle_attr
& cycle
) == cycle
) &&
465 ((image
->width_attr
& dwidth
) == dwidth
))) {
466 printk(KERN_WARNING
"Invalid attributes\n");
470 retval
= vme_check_window(aspace
, vme_base
, size
);
474 return bridge
->master_set(image
, enabled
, vme_base
, size
, aspace
,
477 EXPORT_SYMBOL(vme_master_set
);
479 int vme_master_get(struct vme_resource
*resource
, int *enabled
,
480 unsigned long long *vme_base
, unsigned long long *size
,
481 vme_address_t
*aspace
, vme_cycle_t
*cycle
, vme_width_t
*dwidth
)
483 struct vme_bridge
*bridge
= find_bridge(resource
);
484 struct vme_master_resource
*image
;
486 if (resource
->type
!= VME_MASTER
) {
487 printk(KERN_ERR
"Not a master resource\n");
491 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
493 if (bridge
->master_get
== NULL
) {
494 printk(KERN_WARNING
"vme_master_set not supported\n");
498 return bridge
->master_get(image
, enabled
, vme_base
, size
, aspace
,
501 EXPORT_SYMBOL(vme_master_get
);
504 * Read data out of VME space into a buffer.
506 ssize_t
vme_master_read(struct vme_resource
*resource
, void *buf
, size_t count
,
509 struct vme_bridge
*bridge
= find_bridge(resource
);
510 struct vme_master_resource
*image
;
513 if (bridge
->master_read
== NULL
) {
514 printk(KERN_WARNING
"Reading from resource not supported\n");
518 if (resource
->type
!= VME_MASTER
) {
519 printk(KERN_ERR
"Not a master resource\n");
523 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
525 length
= vme_get_size(resource
);
527 if (offset
> length
) {
528 printk(KERN_WARNING
"Invalid Offset\n");
532 if ((offset
+ count
) > length
)
533 count
= length
- offset
;
535 return bridge
->master_read(image
, buf
, count
, offset
);
538 EXPORT_SYMBOL(vme_master_read
);
541 * Write data out to VME space from a buffer.
543 ssize_t
vme_master_write(struct vme_resource
*resource
, void *buf
,
544 size_t count
, loff_t offset
)
546 struct vme_bridge
*bridge
= find_bridge(resource
);
547 struct vme_master_resource
*image
;
550 if (bridge
->master_write
== NULL
) {
551 printk(KERN_WARNING
"Writing to resource not supported\n");
555 if (resource
->type
!= VME_MASTER
) {
556 printk(KERN_ERR
"Not a master resource\n");
560 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
562 length
= vme_get_size(resource
);
564 if (offset
> length
) {
565 printk(KERN_WARNING
"Invalid Offset\n");
569 if ((offset
+ count
) > length
)
570 count
= length
- offset
;
572 return bridge
->master_write(image
, buf
, count
, offset
);
574 EXPORT_SYMBOL(vme_master_write
);
577 * Perform RMW cycle to provided location.
579 unsigned int vme_master_rmw(struct vme_resource
*resource
, unsigned int mask
,
580 unsigned int compare
, unsigned int swap
, loff_t offset
)
582 struct vme_bridge
*bridge
= find_bridge(resource
);
583 struct vme_master_resource
*image
;
585 if (bridge
->master_rmw
== NULL
) {
586 printk(KERN_WARNING
"Writing to resource not supported\n");
590 if (resource
->type
!= VME_MASTER
) {
591 printk(KERN_ERR
"Not a master resource\n");
595 image
= list_entry(resource
->entry
, struct vme_master_resource
, list
);
597 return bridge
->master_rmw(image
, mask
, compare
, swap
, offset
);
599 EXPORT_SYMBOL(vme_master_rmw
);
601 void vme_master_free(struct vme_resource
*resource
)
603 struct vme_master_resource
*master_image
;
605 if (resource
->type
!= VME_MASTER
) {
606 printk(KERN_ERR
"Not a master resource\n");
610 master_image
= list_entry(resource
->entry
, struct vme_master_resource
,
612 if (master_image
== NULL
) {
613 printk(KERN_ERR
"Can't find master resource\n");
618 spin_lock(&(master_image
->lock
));
619 if (master_image
->locked
== 0)
620 printk(KERN_ERR
"Image is already free\n");
622 master_image
->locked
= 0;
623 spin_unlock(&(master_image
->lock
));
625 /* Free up resource memory */
628 EXPORT_SYMBOL(vme_master_free
);
631 * Request a DMA controller with specific attributes, return some unique
634 struct vme_resource
*vme_dma_request(struct device
*dev
, vme_dma_route_t route
)
636 struct vme_bridge
*bridge
;
637 struct list_head
*dma_pos
= NULL
;
638 struct vme_dma_resource
*allocated_ctrlr
= NULL
;
639 struct vme_dma_resource
*dma_ctrlr
= NULL
;
640 struct vme_resource
*resource
= NULL
;
642 printk(KERN_ERR
"No VME resource Attribute tests done\n");
644 bridge
= dev_to_bridge(dev
);
645 if (bridge
== NULL
) {
646 printk(KERN_ERR
"Can't find VME bus\n");
650 /* Loop through DMA resources */
651 list_for_each(dma_pos
, &(bridge
->dma_resources
)) {
652 dma_ctrlr
= list_entry(dma_pos
,
653 struct vme_dma_resource
, list
);
655 if (dma_ctrlr
== NULL
) {
656 printk(KERN_ERR
"Registered NULL DMA resource\n");
660 /* Find an unlocked and compatible controller */
661 mutex_lock(&(dma_ctrlr
->mtx
));
662 if (((dma_ctrlr
->route_attr
& route
) == route
) &&
663 (dma_ctrlr
->locked
== 0)) {
665 dma_ctrlr
->locked
= 1;
666 mutex_unlock(&(dma_ctrlr
->mtx
));
667 allocated_ctrlr
= dma_ctrlr
;
670 mutex_unlock(&(dma_ctrlr
->mtx
));
673 /* Check to see if we found a resource */
674 if (allocated_ctrlr
== NULL
)
677 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
678 if (resource
== NULL
) {
679 printk(KERN_WARNING
"Unable to allocate resource structure\n");
682 resource
->type
= VME_DMA
;
683 resource
->entry
= &(allocated_ctrlr
->list
);
689 mutex_lock(&(dma_ctrlr
->mtx
));
690 dma_ctrlr
->locked
= 0;
691 mutex_unlock(&(dma_ctrlr
->mtx
));
696 EXPORT_SYMBOL(vme_dma_request
);
701 struct vme_dma_list
*vme_new_dma_list(struct vme_resource
*resource
)
703 struct vme_dma_resource
*ctrlr
;
704 struct vme_dma_list
*dma_list
;
706 if (resource
->type
!= VME_DMA
) {
707 printk(KERN_ERR
"Not a DMA resource\n");
711 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
713 dma_list
= kmalloc(sizeof(struct vme_dma_list
), GFP_KERNEL
);
714 if (dma_list
== NULL
) {
715 printk(KERN_ERR
"Unable to allocate memory for new dma list\n");
718 INIT_LIST_HEAD(&(dma_list
->entries
));
719 dma_list
->parent
= ctrlr
;
720 mutex_init(&(dma_list
->mtx
));
724 EXPORT_SYMBOL(vme_new_dma_list
);
727 * Create "Pattern" type attributes
729 struct vme_dma_attr
*vme_dma_pattern_attribute(u32 pattern
,
732 struct vme_dma_attr
*attributes
;
733 struct vme_dma_pattern
*pattern_attr
;
735 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
736 if (attributes
== NULL
) {
737 printk(KERN_ERR
"Unable to allocate memory for attributes "
742 pattern_attr
= kmalloc(sizeof(struct vme_dma_pattern
), GFP_KERNEL
);
743 if (pattern_attr
== NULL
) {
744 printk(KERN_ERR
"Unable to allocate memory for pattern "
749 attributes
->type
= VME_DMA_PATTERN
;
750 attributes
->private = (void *)pattern_attr
;
752 pattern_attr
->pattern
= pattern
;
753 pattern_attr
->type
= type
;
763 EXPORT_SYMBOL(vme_dma_pattern_attribute
);
766 * Create "PCI" type attributes
768 struct vme_dma_attr
*vme_dma_pci_attribute(dma_addr_t address
)
770 struct vme_dma_attr
*attributes
;
771 struct vme_dma_pci
*pci_attr
;
774 attributes
= kmalloc(sizeof(struct vme_dma_attr
), GFP_KERNEL
);
775 if (attributes
== NULL
) {
776 printk(KERN_ERR
"Unable to allocate memory for attributes "
781 pci_attr
= kmalloc(sizeof(struct vme_dma_pci
), GFP_KERNEL
);
782 if (pci_attr
== NULL
) {
783 printk(KERN_ERR
"Unable to allocate memory for pci "
790 attributes
->type
= VME_DMA_PCI
;
791 attributes
->private = (void *)pci_attr
;
793 pci_attr
->address
= address
;
803 EXPORT_SYMBOL(vme_dma_pci_attribute
);
806 * Create "VME" type attributes
808 struct vme_dma_attr
*vme_dma_vme_attribute(unsigned long long address
,
809 vme_address_t aspace
, vme_cycle_t cycle
, vme_width_t dwidth
)
811 struct vme_dma_attr
*attributes
;
812 struct vme_dma_vme
*vme_attr
;
814 attributes
= kmalloc(
815 sizeof(struct vme_dma_attr
), GFP_KERNEL
);
816 if (attributes
== NULL
) {
817 printk(KERN_ERR
"Unable to allocate memory for attributes "
822 vme_attr
= kmalloc(sizeof(struct vme_dma_vme
), GFP_KERNEL
);
823 if (vme_attr
== NULL
) {
824 printk(KERN_ERR
"Unable to allocate memory for vme "
829 attributes
->type
= VME_DMA_VME
;
830 attributes
->private = (void *)vme_attr
;
832 vme_attr
->address
= address
;
833 vme_attr
->aspace
= aspace
;
834 vme_attr
->cycle
= cycle
;
835 vme_attr
->dwidth
= dwidth
;
845 EXPORT_SYMBOL(vme_dma_vme_attribute
);
850 void vme_dma_free_attribute(struct vme_dma_attr
*attributes
)
852 kfree(attributes
->private);
855 EXPORT_SYMBOL(vme_dma_free_attribute
);
857 int vme_dma_list_add(struct vme_dma_list
*list
, struct vme_dma_attr
*src
,
858 struct vme_dma_attr
*dest
, size_t count
)
860 struct vme_bridge
*bridge
= list
->parent
->parent
;
863 if (bridge
->dma_list_add
== NULL
) {
864 printk(KERN_WARNING
"Link List DMA generation not supported\n");
868 if (!mutex_trylock(&(list
->mtx
))) {
869 printk(KERN_ERR
"Link List already submitted\n");
873 retval
= bridge
->dma_list_add(list
, src
, dest
, count
);
875 mutex_unlock(&(list
->mtx
));
879 EXPORT_SYMBOL(vme_dma_list_add
);
881 int vme_dma_list_exec(struct vme_dma_list
*list
)
883 struct vme_bridge
*bridge
= list
->parent
->parent
;
886 if (bridge
->dma_list_exec
== NULL
) {
887 printk(KERN_ERR
"Link List DMA execution not supported\n");
891 mutex_lock(&(list
->mtx
));
893 retval
= bridge
->dma_list_exec(list
);
895 mutex_unlock(&(list
->mtx
));
899 EXPORT_SYMBOL(vme_dma_list_exec
);
901 int vme_dma_list_free(struct vme_dma_list
*list
)
903 struct vme_bridge
*bridge
= list
->parent
->parent
;
906 if (bridge
->dma_list_empty
== NULL
) {
907 printk(KERN_WARNING
"Emptying of Link Lists not supported\n");
911 if (!mutex_trylock(&(list
->mtx
))) {
912 printk(KERN_ERR
"Link List in use\n");
917 * Empty out all of the entries from the dma list. We need to go to the
918 * low level driver as dma entries are driver specific.
920 retval
= bridge
->dma_list_empty(list
);
922 printk(KERN_ERR
"Unable to empty link-list entries\n");
923 mutex_unlock(&(list
->mtx
));
926 mutex_unlock(&(list
->mtx
));
931 EXPORT_SYMBOL(vme_dma_list_free
);
933 int vme_dma_free(struct vme_resource
*resource
)
935 struct vme_dma_resource
*ctrlr
;
937 if (resource
->type
!= VME_DMA
) {
938 printk(KERN_ERR
"Not a DMA resource\n");
942 ctrlr
= list_entry(resource
->entry
, struct vme_dma_resource
, list
);
944 if (!mutex_trylock(&(ctrlr
->mtx
))) {
945 printk(KERN_ERR
"Resource busy, can't free\n");
949 if (!(list_empty(&(ctrlr
->pending
)) && list_empty(&(ctrlr
->running
)))) {
950 printk(KERN_WARNING
"Resource still processing transfers\n");
951 mutex_unlock(&(ctrlr
->mtx
));
957 mutex_unlock(&(ctrlr
->mtx
));
961 EXPORT_SYMBOL(vme_dma_free
);
963 void vme_irq_handler(struct vme_bridge
*bridge
, int level
, int statid
)
965 void (*call
)(int, int, void *);
968 call
= bridge
->irq
[level
- 1].callback
[statid
].func
;
969 priv_data
= bridge
->irq
[level
- 1].callback
[statid
].priv_data
;
972 call(level
, statid
, priv_data
);
974 printk(KERN_WARNING
"Spurilous VME interrupt, level:%x, "
975 "vector:%x\n", level
, statid
);
977 EXPORT_SYMBOL(vme_irq_handler
);
979 int vme_irq_request(struct device
*dev
, int level
, int statid
,
980 void (*callback
)(int, int, void *),
983 struct vme_bridge
*bridge
;
985 bridge
= dev_to_bridge(dev
);
986 if (bridge
== NULL
) {
987 printk(KERN_ERR
"Can't find VME bus\n");
991 if ((level
< 1) || (level
> 7)) {
992 printk(KERN_ERR
"Invalid interrupt level\n");
996 if (bridge
->irq_set
== NULL
) {
997 printk(KERN_ERR
"Configuring interrupts not supported\n");
1001 mutex_lock(&(bridge
->irq_mtx
));
1003 if (bridge
->irq
[level
- 1].callback
[statid
].func
) {
1004 mutex_unlock(&(bridge
->irq_mtx
));
1005 printk(KERN_WARNING
"VME Interrupt already taken\n");
1009 bridge
->irq
[level
- 1].count
++;
1010 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= priv_data
;
1011 bridge
->irq
[level
- 1].callback
[statid
].func
= callback
;
1013 /* Enable IRQ level */
1014 bridge
->irq_set(bridge
, level
, 1, 1);
1016 mutex_unlock(&(bridge
->irq_mtx
));
1020 EXPORT_SYMBOL(vme_irq_request
);
1022 void vme_irq_free(struct device
*dev
, int level
, int statid
)
1024 struct vme_bridge
*bridge
;
1026 bridge
= dev_to_bridge(dev
);
1027 if (bridge
== NULL
) {
1028 printk(KERN_ERR
"Can't find VME bus\n");
1032 if ((level
< 1) || (level
> 7)) {
1033 printk(KERN_ERR
"Invalid interrupt level\n");
1037 if (bridge
->irq_set
== NULL
) {
1038 printk(KERN_ERR
"Configuring interrupts not supported\n");
1042 mutex_lock(&(bridge
->irq_mtx
));
1044 bridge
->irq
[level
- 1].count
--;
1046 /* Disable IRQ level if no more interrupts attached at this level*/
1047 if (bridge
->irq
[level
- 1].count
== 0)
1048 bridge
->irq_set(bridge
, level
, 0, 1);
1050 bridge
->irq
[level
- 1].callback
[statid
].func
= NULL
;
1051 bridge
->irq
[level
- 1].callback
[statid
].priv_data
= NULL
;
1053 mutex_unlock(&(bridge
->irq_mtx
));
1055 EXPORT_SYMBOL(vme_irq_free
);
1057 int vme_irq_generate(struct device
*dev
, int level
, int statid
)
1059 struct vme_bridge
*bridge
;
1061 bridge
= dev_to_bridge(dev
);
1062 if (bridge
== NULL
) {
1063 printk(KERN_ERR
"Can't find VME bus\n");
1067 if ((level
< 1) || (level
> 7)) {
1068 printk(KERN_WARNING
"Invalid interrupt level\n");
1072 if (bridge
->irq_generate
== NULL
) {
1073 printk(KERN_WARNING
"Interrupt generation not supported\n");
1077 return bridge
->irq_generate(bridge
, level
, statid
);
1079 EXPORT_SYMBOL(vme_irq_generate
);
1082 * Request the location monitor, return resource or NULL
1084 struct vme_resource
*vme_lm_request(struct device
*dev
)
1086 struct vme_bridge
*bridge
;
1087 struct list_head
*lm_pos
= NULL
;
1088 struct vme_lm_resource
*allocated_lm
= NULL
;
1089 struct vme_lm_resource
*lm
= NULL
;
1090 struct vme_resource
*resource
= NULL
;
1092 bridge
= dev_to_bridge(dev
);
1093 if (bridge
== NULL
) {
1094 printk(KERN_ERR
"Can't find VME bus\n");
1098 /* Loop through DMA resources */
1099 list_for_each(lm_pos
, &(bridge
->lm_resources
)) {
1100 lm
= list_entry(lm_pos
,
1101 struct vme_lm_resource
, list
);
1104 printk(KERN_ERR
"Registered NULL Location Monitor "
1109 /* Find an unlocked controller */
1110 mutex_lock(&(lm
->mtx
));
1111 if (lm
->locked
== 0) {
1113 mutex_unlock(&(lm
->mtx
));
1117 mutex_unlock(&(lm
->mtx
));
1120 /* Check to see if we found a resource */
1121 if (allocated_lm
== NULL
)
1124 resource
= kmalloc(sizeof(struct vme_resource
), GFP_KERNEL
);
1125 if (resource
== NULL
) {
1126 printk(KERN_ERR
"Unable to allocate resource structure\n");
1129 resource
->type
= VME_LM
;
1130 resource
->entry
= &(allocated_lm
->list
);
1136 mutex_lock(&(lm
->mtx
));
1138 mutex_unlock(&(lm
->mtx
));
1143 EXPORT_SYMBOL(vme_lm_request
);
1145 int vme_lm_count(struct vme_resource
*resource
)
1147 struct vme_lm_resource
*lm
;
1149 if (resource
->type
!= VME_LM
) {
1150 printk(KERN_ERR
"Not a Location Monitor resource\n");
1154 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1156 return lm
->monitors
;
1158 EXPORT_SYMBOL(vme_lm_count
);
1160 int vme_lm_set(struct vme_resource
*resource
, unsigned long long lm_base
,
1161 vme_address_t aspace
, vme_cycle_t cycle
)
1163 struct vme_bridge
*bridge
= find_bridge(resource
);
1164 struct vme_lm_resource
*lm
;
1166 if (resource
->type
!= VME_LM
) {
1167 printk(KERN_ERR
"Not a Location Monitor resource\n");
1171 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1173 if (bridge
->lm_set
== NULL
) {
1174 printk(KERN_ERR
"vme_lm_set not supported\n");
1178 return bridge
->lm_set(lm
, lm_base
, aspace
, cycle
);
1180 EXPORT_SYMBOL(vme_lm_set
);
1182 int vme_lm_get(struct vme_resource
*resource
, unsigned long long *lm_base
,
1183 vme_address_t
*aspace
, vme_cycle_t
*cycle
)
1185 struct vme_bridge
*bridge
= find_bridge(resource
);
1186 struct vme_lm_resource
*lm
;
1188 if (resource
->type
!= VME_LM
) {
1189 printk(KERN_ERR
"Not a Location Monitor resource\n");
1193 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1195 if (bridge
->lm_get
== NULL
) {
1196 printk(KERN_ERR
"vme_lm_get not supported\n");
1200 return bridge
->lm_get(lm
, lm_base
, aspace
, cycle
);
1202 EXPORT_SYMBOL(vme_lm_get
);
1204 int vme_lm_attach(struct vme_resource
*resource
, int monitor
,
1205 void (*callback
)(int))
1207 struct vme_bridge
*bridge
= find_bridge(resource
);
1208 struct vme_lm_resource
*lm
;
1210 if (resource
->type
!= VME_LM
) {
1211 printk(KERN_ERR
"Not a Location Monitor resource\n");
1215 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1217 if (bridge
->lm_attach
== NULL
) {
1218 printk(KERN_ERR
"vme_lm_attach not supported\n");
1222 return bridge
->lm_attach(lm
, monitor
, callback
);
1224 EXPORT_SYMBOL(vme_lm_attach
);
1226 int vme_lm_detach(struct vme_resource
*resource
, int monitor
)
1228 struct vme_bridge
*bridge
= find_bridge(resource
);
1229 struct vme_lm_resource
*lm
;
1231 if (resource
->type
!= VME_LM
) {
1232 printk(KERN_ERR
"Not a Location Monitor resource\n");
1236 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1238 if (bridge
->lm_detach
== NULL
) {
1239 printk(KERN_ERR
"vme_lm_detach not supported\n");
1243 return bridge
->lm_detach(lm
, monitor
);
1245 EXPORT_SYMBOL(vme_lm_detach
);
1247 void vme_lm_free(struct vme_resource
*resource
)
1249 struct vme_lm_resource
*lm
;
1251 if (resource
->type
!= VME_LM
) {
1252 printk(KERN_ERR
"Not a Location Monitor resource\n");
1256 lm
= list_entry(resource
->entry
, struct vme_lm_resource
, list
);
1258 mutex_lock(&(lm
->mtx
));
1263 mutex_unlock(&(lm
->mtx
));
1267 EXPORT_SYMBOL(vme_lm_free
);
1269 int vme_slot_get(struct device
*bus
)
1271 struct vme_bridge
*bridge
;
1273 bridge
= dev_to_bridge(bus
);
1274 if (bridge
== NULL
) {
1275 printk(KERN_ERR
"Can't find VME bus\n");
1279 if (bridge
->slot_get
== NULL
) {
1280 printk(KERN_WARNING
"vme_slot_get not supported\n");
1284 return bridge
->slot_get(bridge
);
1286 EXPORT_SYMBOL(vme_slot_get
);
1289 /* - Bridge Registration --------------------------------------------------- */
1291 static int vme_alloc_bus_num(void)
1295 mutex_lock(&vme_bus_num_mtx
);
1296 for (i
= 0; i
< sizeof(vme_bus_numbers
) * 8; i
++) {
1297 if (((vme_bus_numbers
>> i
) & 0x1) == 0) {
1298 vme_bus_numbers
|= (0x1 << i
);
1302 mutex_unlock(&vme_bus_num_mtx
);
1307 static void vme_free_bus_num(int bus
)
1309 mutex_lock(&vme_bus_num_mtx
);
1310 vme_bus_numbers
|= ~(0x1 << bus
);
1311 mutex_unlock(&vme_bus_num_mtx
);
1314 int vme_register_bridge(struct vme_bridge
*bridge
)
1320 bridge
->num
= vme_alloc_bus_num();
1322 /* This creates 32 vme "slot" devices. This equates to a slot for each
1323 * ID available in a system conforming to the ANSI/VITA 1-1994
1326 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1327 dev
= &(bridge
->dev
[i
]);
1328 memset(dev
, 0, sizeof(struct device
));
1330 dev
->parent
= bridge
->parent
;
1331 dev
->bus
= &(vme_bus_type
);
1333 * We save a pointer to the bridge in platform_data so that we
1334 * can get to it later. We keep driver_data for use by the
1335 * driver that binds against the slot
1337 dev
->platform_data
= bridge
;
1338 dev_set_name(dev
, "vme-%x.%x", bridge
->num
, i
+ 1);
1340 retval
= device_register(dev
);
1350 dev
= &(bridge
->dev
[i
]);
1351 device_unregister(dev
);
1353 vme_free_bus_num(bridge
->num
);
1356 EXPORT_SYMBOL(vme_register_bridge
);
1358 void vme_unregister_bridge(struct vme_bridge
*bridge
)
1364 for (i
= 0; i
< VME_SLOTS_MAX
; i
++) {
1365 dev
= &(bridge
->dev
[i
]);
1366 device_unregister(dev
);
1368 vme_free_bus_num(bridge
->num
);
1370 EXPORT_SYMBOL(vme_unregister_bridge
);
1373 /* - Driver Registration --------------------------------------------------- */
1375 int vme_register_driver(struct vme_driver
*drv
)
1377 drv
->driver
.name
= drv
->name
;
1378 drv
->driver
.bus
= &vme_bus_type
;
1380 return driver_register(&drv
->driver
);
1382 EXPORT_SYMBOL(vme_register_driver
);
1384 void vme_unregister_driver(struct vme_driver
*drv
)
1386 driver_unregister(&drv
->driver
);
1388 EXPORT_SYMBOL(vme_unregister_driver
);
1390 /* - Bus Registration ------------------------------------------------------ */
1392 static int vme_calc_slot(struct device
*dev
)
1394 struct vme_bridge
*bridge
;
1397 bridge
= dev_to_bridge(dev
);
1399 /* Determine slot number */
1401 while (num
< VME_SLOTS_MAX
) {
1402 if (&(bridge
->dev
[num
]) == dev
)
1407 if (num
== VME_SLOTS_MAX
) {
1408 dev_err(dev
, "Failed to identify slot\n");
1418 static struct vme_driver
*dev_to_vme_driver(struct device
*dev
)
1420 if (dev
->driver
== NULL
)
1421 printk(KERN_ERR
"Bugger dev->driver is NULL\n");
1423 return container_of(dev
->driver
, struct vme_driver
, driver
);
1426 static int vme_bus_match(struct device
*dev
, struct device_driver
*drv
)
1428 struct vme_bridge
*bridge
;
1429 struct vme_driver
*driver
;
1432 bridge
= dev_to_bridge(dev
);
1433 driver
= container_of(drv
, struct vme_driver
, driver
);
1435 num
= vme_calc_slot(dev
);
1439 if (driver
->bind_table
== NULL
) {
1440 dev_err(dev
, "Bind table NULL\n");
1445 while ((driver
->bind_table
[i
].bus
!= 0) ||
1446 (driver
->bind_table
[i
].slot
!= 0)) {
1448 if (bridge
->num
== driver
->bind_table
[i
].bus
) {
1449 if (num
== driver
->bind_table
[i
].slot
)
1452 if (driver
->bind_table
[i
].slot
== VME_SLOT_ALL
)
1455 if ((driver
->bind_table
[i
].slot
== VME_SLOT_CURRENT
) &&
1456 (num
== vme_slot_get(dev
)))
1467 static int vme_bus_probe(struct device
*dev
)
1469 struct vme_bridge
*bridge
;
1470 struct vme_driver
*driver
;
1471 int retval
= -ENODEV
;
1473 driver
= dev_to_vme_driver(dev
);
1474 bridge
= dev_to_bridge(dev
);
1476 if (driver
->probe
!= NULL
)
1477 retval
= driver
->probe(dev
, bridge
->num
, vme_calc_slot(dev
));
1482 static int vme_bus_remove(struct device
*dev
)
1484 struct vme_bridge
*bridge
;
1485 struct vme_driver
*driver
;
1486 int retval
= -ENODEV
;
1488 driver
= dev_to_vme_driver(dev
);
1489 bridge
= dev_to_bridge(dev
);
1491 if (driver
->remove
!= NULL
)
1492 retval
= driver
->remove(dev
, bridge
->num
, vme_calc_slot(dev
));
1497 struct bus_type vme_bus_type
= {
1499 .match
= vme_bus_match
,
1500 .probe
= vme_bus_probe
,
1501 .remove
= vme_bus_remove
,
1503 EXPORT_SYMBOL(vme_bus_type
);
1505 static int __init
vme_init(void)
1507 return bus_register(&vme_bus_type
);
1510 static void __exit
vme_exit(void)
1512 bus_unregister(&vme_bus_type
);
1515 MODULE_DESCRIPTION("VME bridge driver framework");
1516 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1517 MODULE_LICENSE("GPL");
1519 module_init(vme_init
);
1520 module_exit(vme_exit
);