Import 2.3.18pre1
[davej-history.git] / arch / arm / kernel / dma.c
blob7d1a11cd599a38026f34577e977af7b08b6caf43
1 /*
2 * linux/arch/arm/kernel/dma.c
4 * Copyright (C) 1995-1998 Russell King
6 * Front-end to the DMA handling. You must provide the following
7 * architecture-specific routines:
9 * int arch_request_dma(dmach_t channel, dma_t *dma, const char *dev_id);
10 * void arch_free_dma(dmach_t channel, dma_t *dma);
11 * void arch_enable_dma(dmach_t channel, dma_t *dma);
12 * void arch_disable_dma(dmach_t channel, dma_t *dma);
13 * int arch_get_dma_residue(dmach_t channel, dma_t *dma);
15 * Moved DMA resource allocation here...
17 #include <linux/sched.h>
18 #include <linux/module.h>
19 #include <linux/malloc.h>
20 #include <linux/mman.h>
21 #include <linux/init.h>
22 #include <linux/spinlock.h>
24 #include <asm/page.h>
25 #include <asm/irq.h>
26 #include <asm/hardware.h>
27 #include <asm/io.h>
28 #include <asm/dma.h>
31 /* A note on resource allocation:
33 * All drivers needing DMA channels, should allocate and release them
34 * through the public routines `request_dma()' and `free_dma()'.
36 * In order to avoid problems, all processes should allocate resources in
37 * the same sequence and release them in the reverse order.
39 * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
40 * When releasing them, first release the DMA, then release the IRQ.
41 * If you don't, you may cause allocation requests to fail unnecessarily.
42 * This doesn't really matter now, but it will once we get real semaphores
43 * in the kernel.
47 spinlock_t dma_spin_lock = SPIN_LOCK_UNLOCKED;
49 #include "dma.h"
51 const char dma_str[] = "%s: dma %d not supported\n";
53 static dma_t dma_chan[MAX_DMA_CHANNELS];
55 /* Get dma list
56 * for /proc/dma
58 int get_dma_list(char *buf)
60 int i, len = 0;
62 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
63 if (dma_chan[i].lock)
64 len += sprintf(buf + len, "%2d: %s\n",
65 i, dma_chan[i].device_id);
67 return len;
70 /* Request DMA channel
72 * On certain platforms, we have to allocate an interrupt as well...
74 int request_dma(dmach_t channel, const char *device_id)
76 if (channel < MAX_DMA_CHANNELS) {
77 int ret;
79 if (xchg(&dma_chan[channel].lock, 1) != 0)
80 return -EBUSY;
82 ret = arch_request_dma(channel, &dma_chan[channel], device_id);
83 if (!ret) {
84 dma_chan[channel].device_id = device_id;
85 dma_chan[channel].active = 0;
86 dma_chan[channel].invalid = 1;
87 } else
88 xchg(&dma_chan[channel].lock, 0);
90 return ret;
91 } else {
92 printk (KERN_ERR "Trying to allocate DMA%d\n", channel);
93 return -EINVAL;
97 /* Free DMA channel
99 * On certain platforms, we have to free interrupt as well...
101 void free_dma(dmach_t channel)
103 if (channel >= MAX_DMA_CHANNELS) {
104 printk (KERN_ERR "Trying to free DMA%d\n", channel);
105 return;
108 if (xchg(&dma_chan[channel].lock, 0) == 0) {
109 if (dma_chan[channel].active) {
110 printk (KERN_ERR "Freeing active DMA%d\n", channel);
111 arch_disable_dma(channel, &dma_chan[channel]);
112 dma_chan[channel].active = 0;
115 printk (KERN_ERR "Trying to free free DMA%d\n", channel);
116 return;
118 arch_free_dma(channel, &dma_chan[channel]);
121 /* Set DMA Scatter-Gather list
123 void set_dma_sg (dmach_t channel, dmasg_t *sg, int nr_sg)
125 dma_chan[channel].sg = sg;
126 dma_chan[channel].sgcount = nr_sg;
127 dma_chan[channel].invalid = 1;
130 /* Set DMA address
132 * Copy address to the structure, and set the invalid bit
134 void set_dma_addr (dmach_t channel, unsigned long physaddr)
136 if (dma_chan[channel].active)
137 printk(KERN_ERR "set_dma_addr: altering DMA%d"
138 " address while DMA active\n",
139 channel);
141 dma_chan[channel].sg = &dma_chan[channel].buf;
142 dma_chan[channel].sgcount = 1;
143 dma_chan[channel].buf.address = physaddr;
144 dma_chan[channel].invalid = 1;
147 /* Set DMA byte count
149 * Copy address to the structure, and set the invalid bit
151 void set_dma_count (dmach_t channel, unsigned long count)
153 if (dma_chan[channel].active)
154 printk(KERN_ERR "set_dma_count: altering DMA%d"
155 " count while DMA active\n",
156 channel);
158 dma_chan[channel].sg = &dma_chan[channel].buf;
159 dma_chan[channel].sgcount = 1;
160 dma_chan[channel].buf.length = count;
161 dma_chan[channel].invalid = 1;
164 /* Set DMA direction mode
166 void set_dma_mode (dmach_t channel, dmamode_t mode)
168 if (dma_chan[channel].active)
169 printk(KERN_ERR "set_dma_mode: altering DMA%d"
170 " mode while DMA active\n",
171 channel);
173 dma_chan[channel].dma_mode = mode;
174 dma_chan[channel].invalid = 1;
177 /* Enable DMA channel
179 void enable_dma (dmach_t channel)
181 if (dma_chan[channel].lock) {
182 if (dma_chan[channel].active == 0) {
183 dma_chan[channel].active = 1;
184 arch_enable_dma(channel, &dma_chan[channel]);
186 } else
187 printk (KERN_ERR "Trying to enable free DMA%d\n", channel);
190 /* Disable DMA channel
192 void disable_dma (dmach_t channel)
194 if (dma_chan[channel].lock) {
195 if (dma_chan[channel].active == 1) {
196 dma_chan[channel].active = 0;
197 arch_disable_dma(channel, &dma_chan[channel]);
199 } else
200 printk (KERN_ERR "Trying to disable free DMA%d\n", channel);
203 void set_dma_speed(dmach_t channel, int cycle_ns)
205 dma_chan[channel].speed =
206 arch_set_dma_speed(channel, &dma_chan[channel], cycle_ns);
209 int get_dma_residue(dmach_t channel)
211 return arch_get_dma_residue(channel, &dma_chan[channel]);
214 EXPORT_SYMBOL(dma_str);
215 EXPORT_SYMBOL(enable_dma);
216 EXPORT_SYMBOL(disable_dma);
217 EXPORT_SYMBOL(set_dma_addr);
218 EXPORT_SYMBOL(set_dma_count);
219 EXPORT_SYMBOL(set_dma_mode);
220 EXPORT_SYMBOL(get_dma_residue);
221 EXPORT_SYMBOL(set_dma_sg);
222 EXPORT_SYMBOL(set_dma_speed);
224 void __init init_dma(void)
226 arch_dma_init(dma_chan);