Import 2.3.18pre1
[davej-history.git] / kernel / dma.c
blobe9f0f7a52ab27fc0086567395e587a9e6ae35e4a
1 /* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $
2 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
4 * Written by Hennus Bergman, 1992.
6 * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
7 * In the previous version the reported device could end up being wrong,
8 * if a device requested a DMA channel that was already in use.
9 * [It also happened to remove the sizeof(char *) == sizeof(int)
10 * assumption introduced because of those /proc/dma patches. -- Hennus]
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/spinlock.h>
16 #include <asm/dma.h>
17 #include <asm/system.h>
20 /* A note on resource allocation:
22 * All drivers needing DMA channels, should allocate and release them
23 * through the public routines `request_dma()' and `free_dma()'.
25 * In order to avoid problems, all processes should allocate resources in
26 * the same sequence and release them in the reverse order.
28 * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
29 * When releasing them, first release the DMA, then release the IRQ.
30 * If you don't, you may cause allocation requests to fail unnecessarily.
31 * This doesn't really matter now, but it will once we get real semaphores
32 * in the kernel.
36 spinlock_t dma_spin_lock = SPIN_LOCK_UNLOCKED;
39 /* Channel n is busy iff dma_chan_busy[n].lock != 0.
40 * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
41 * DMA4 is reserved for cascading.
44 struct dma_chan {
45 int lock;
46 const char *device_id;
49 static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = {
50 { 0, 0 },
51 { 0, 0 },
52 { 0, 0 },
53 { 0, 0 },
54 { 1, "cascade" },
55 { 0, 0 },
56 { 0, 0 },
57 { 0, 0 }
60 int get_dma_list(char *buf)
62 int i, len = 0;
64 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
65 if (dma_chan_busy[i].lock) {
66 len += sprintf(buf+len, "%2d: %s\n",
68 dma_chan_busy[i].device_id);
71 return len;
72 } /* get_dma_list */
75 int request_dma(unsigned int dmanr, const char * device_id)
77 if (dmanr >= MAX_DMA_CHANNELS)
78 return -EINVAL;
80 if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0)
81 return -EBUSY;
83 dma_chan_busy[dmanr].device_id = device_id;
85 /* old flag was 0, now contains 1 to indicate busy */
86 return 0;
87 } /* request_dma */
90 void free_dma(unsigned int dmanr)
92 if (dmanr >= MAX_DMA_CHANNELS) {
93 printk("Trying to free DMA%d\n", dmanr);
94 return;
97 if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
98 printk("Trying to free free DMA%d\n", dmanr);
99 return;
102 } /* free_dma */