MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / arch / nios2nommu / kernel / dma.c
blobf23323b08cea94e77ac3243d62aae1b98d788235
1 /*
2 * arch/nios2nommu/kernel/dma.c
4 * Copyright (C) 2005 Microtronix Datacom Ltd
6 * PC like DMA API for Nios's DMAC.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 * Written by Wentao Xu <wentao@microtronix.com>
15 #include <linux/init.h>
16 #include <linux/irq.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <asm/io.h>
23 #include <asm/dma.h>
25 /* nios2 dma controller register map */
26 #define REG_DMA_STATUS 0
27 #define REG_DMA_READADDR 4
28 #define REG_DMA_WRITEADDR 8
29 #define REG_DMA_LENGTH 12
30 #define REG_DMA_CONTROL 24
32 /* status register bits definition */
33 #define ST_DONE 0x01
34 #define ST_BUSY 0x02
35 #define ST_REOP 0x04
36 #define ST_WROP 0x08
37 #define ST_LEN 0x10
39 /* control register bits definition */
40 #define CT_BYTE 0x01
41 #define CT_HW 0x02
42 #define CT_WORD 0x04
43 #define CT_GO 0x08
44 #define CT_IEEN 0x10
45 #define CT_REEN 0x20
46 #define CT_WEEN 0x40
47 #define CT_LEEN 0x80
48 #define CT_RCON 0x100
49 #define CT_WCON 0x200
50 #define CT_DOUBLE 0x400
51 #define CT_QUAD 0x800
53 struct dma_channel {
54 unsigned int addr; /* control address */
55 unsigned int irq; /* interrupt number */
56 atomic_t idle;
57 unsigned int mode; /* dma mode: width, stream etc */
58 int (*handler)(void*, int );
59 void* user;
61 char id[16];
62 char dev_id[16];
64 static struct dma_channel dma_channels[]={
65 #ifdef na_dma_0
67 .addr = na_dma_0,
68 .irq = na_dma_0_irq,
69 .idle = ATOMIC_INIT(1),
71 #endif
72 #ifdef na_dma_1
74 .addr = na_dma_1,
75 .irq = na_dma_1_irq,
76 .idle = ATOMIC_INIT(1),
78 #endif
80 #define MAX_DMA_CHANNELS sizeof(dma_channels)/sizeof(struct dma_channel)
82 void enable_dma(unsigned int dmanr)
84 if (dmanr < MAX_DMA_CHANNELS) {
85 unsigned int ctl = dma_channels[dmanr].mode;
86 ctl |= CT_GO | CT_IEEN;
87 outl(ctl, dma_channels[dmanr].addr+REG_DMA_CONTROL);
91 void disable_dma(unsigned int dmanr)
93 if (dmanr < MAX_DMA_CHANNELS) {
94 unsigned int ctl = dma_channels[dmanr].mode;
95 ctl &= ~(CT_GO | CT_IEEN);
96 outl(ctl, dma_channels[dmanr].addr+REG_DMA_CONTROL);
100 void set_dma_count(unsigned int dmanr, unsigned int count)
102 if (dmanr < MAX_DMA_CHANNELS) {
103 dma_channels[dmanr].mode |= CT_LEEN;
104 outl(count, dma_channels[dmanr].addr+REG_DMA_LENGTH);
108 int get_dma_residue(unsigned int dmanr)
110 int result =-1;
111 if (dmanr < MAX_DMA_CHANNELS) {
112 result = inl(dma_channels[dmanr].addr+REG_DMA_LENGTH);
114 return result;
117 int request_dma(unsigned int chan, const char *dev_id)
119 struct dma_channel *channel;
121 if ( chan >= MAX_DMA_CHANNELS) {
122 return -EINVAL;
125 channel = &dma_channels[chan];
127 if (!atomic_dec_and_test(&channel->idle)) {
128 return -EBUSY;
131 strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
132 channel->handler=NULL;
133 channel->user=NULL;
134 channel->mode =0;
136 return 0;
139 void free_dma(unsigned int chan)
141 if ( chan < MAX_DMA_CHANNELS) {
142 dma_channels[chan].handler=NULL;
143 dma_channels[chan].user=NULL;
144 atomic_set(&dma_channels[chan].idle, 1);
148 int nios2_request_dma(const char *dev_id)
150 int chann;
152 for ( chann=0; chann < MAX_DMA_CHANNELS; chann++) {
153 if (request_dma(chann, dev_id)==0)
154 return chann;
157 return -EINVAL;
159 void nios2_set_dma_handler(unsigned int dmanr, int (*handler)(void*, int), void* user)
161 if (dmanr < MAX_DMA_CHANNELS) {
162 dma_channels[dmanr].handler=handler;
163 dma_channels[dmanr].user=user;
166 #define NIOS2_DMA_WIDTH_MASK (CT_BYTE | CT_HW | CT_WORD | CT_DOUBLE | CT_QUAD)
167 #define NIOS2_MODE_MASK (NIOS2_DMA_WIDTH_MASK | CT_REEN | CT_WEEN | CT_LEEN | CT_RCON | CT_WCON)
168 void nios2_set_dma_data_width(unsigned int dmanr, unsigned int width)
170 if (dmanr < MAX_DMA_CHANNELS) {
171 dma_channels[dmanr].mode &= ~NIOS2_DMA_WIDTH_MASK;
172 switch (width) {
173 case 1:
174 dma_channels[dmanr].mode |= CT_BYTE;
175 break;
176 case 2:
177 dma_channels[dmanr].mode |= CT_HW;
178 break;
179 case 8:
180 dma_channels[dmanr].mode |= CT_DOUBLE;
181 break;
182 case 16:
183 dma_channels[dmanr].mode |= CT_QUAD;
184 break;
185 case 4:
186 default:
187 dma_channels[dmanr].mode |= CT_WORD;
188 break;
193 void nios2_set_dma_rcon(unsigned int dmanr,unsigned int set)
195 if (dmanr < MAX_DMA_CHANNELS) {
196 dma_channels[dmanr].mode &= ~(CT_REEN | CT_RCON);
197 if (set)
198 dma_channels[dmanr].mode |= (CT_REEN | CT_RCON);
201 void nios2_set_dma_wcon(unsigned int dmanr,unsigned int set)
203 if (dmanr < MAX_DMA_CHANNELS) {
204 dma_channels[dmanr].mode &= ~(CT_WEEN | CT_WCON);
205 if (set)
206 dma_channels[dmanr].mode |= (CT_WEEN | CT_WCON);
209 void nios2_set_dma_mode(unsigned int dmanr, unsigned int mode)
211 if (dmanr < MAX_DMA_CHANNELS) {
212 /* set_dma_mode is only allowed to change the bus width,
213 stream setting, etc.
215 mode &= NIOS2_MODE_MASK;
216 dma_channels[dmanr].mode &= ~NIOS2_MODE_MASK;
217 dma_channels[dmanr].mode |= mode;
221 void nios2_set_dma_raddr(unsigned int dmanr, unsigned int a)
223 if (dmanr < MAX_DMA_CHANNELS) {
224 outl(a, dma_channels[dmanr].addr+REG_DMA_READADDR);
227 void nios2_set_dma_waddr(unsigned int dmanr, unsigned int a)
229 if (dmanr < MAX_DMA_CHANNELS) {
230 outl(a, dma_channels[dmanr].addr+REG_DMA_WRITEADDR);
235 static irqreturn_t dma_isr(int irq, void *dev_id)
237 struct dma_channel *chann=(struct dma_channel*)dev_id;
239 if (chann) {
240 int status = inl(chann->addr+REG_DMA_STATUS);
241 /* ack the interrupt, and clear the DONE bit */
242 outl(0, chann->addr+REG_DMA_STATUS);
243 /* call the peripheral callback */
244 if (chann->handler)
245 chann->handler(chann->user, status);
248 return IRQ_HANDLED;
253 #ifdef CONFIG_PROC_FS
254 static int proc_dma_show(struct seq_file *m, void *v)
256 int i;
258 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
259 if (!atomic_read(&dma_channels[i].idle)) {
260 seq_printf(m, "%2d: %s\n", i,
261 dma_channels[i].dev_id);
264 return 0;
267 static int proc_dma_open(struct inode *inode, struct file *file)
269 return single_open(file, proc_dma_show, NULL);
271 static struct file_operations proc_dma_operations = {
272 .open = proc_dma_open,
273 .read = seq_read,
274 .llseek = seq_lseek,
275 .release = single_release,
278 static int __init proc_dma_init(void)
280 struct proc_dir_entry *e;
282 e = create_proc_entry("dma", 0, NULL);
283 if (e)
284 e->proc_fops = &proc_dma_operations;
286 return 0;
289 __initcall(proc_dma_init);
291 #endif /* CONFIG_PROC_FS */
293 int __init init_dma(void)
295 int i;
297 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
298 sprintf(dma_channels[i].id, "dmac-%d", i);
299 /* disable the dmac channel */
300 disable_dma(i);
301 /* request irq*/
302 if (request_irq(dma_channels[i].irq, dma_isr, 0, dma_channels[i].id, (void*)&dma_channels[i])){
303 printk("DMA controller %d failed to get irq %d\n", i, dma_channels[i].irq);
304 atomic_set(&dma_channels[i].idle, 0);
307 return 0;
310 static void __exit exit_dma(void)
312 int i;
314 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
315 /* disable the dmac channel */
316 disable_dma(i);
317 free_irq(dma_channels[i].irq, dma_channels[i].id);
321 module_init(init_dma);
322 module_exit(exit_dma);
324 MODULE_LICENSE("GPL");
326 //EXPORT_SYMBOL(claim_dma_lock);
327 //EXPORT_SYMBOL(release_dma_lock);
328 EXPORT_SYMBOL(enable_dma);
329 EXPORT_SYMBOL(disable_dma);
330 EXPORT_SYMBOL(set_dma_count);
331 EXPORT_SYMBOL(get_dma_residue);
332 EXPORT_SYMBOL(request_dma);
333 EXPORT_SYMBOL(free_dma);
334 EXPORT_SYMBOL(nios2_request_dma);
335 EXPORT_SYMBOL(nios2_set_dma_handler);
336 EXPORT_SYMBOL(nios2_set_dma_data_width);
337 EXPORT_SYMBOL(nios2_set_dma_rcon);
338 EXPORT_SYMBOL(nios2_set_dma_wcon);
339 EXPORT_SYMBOL(nios2_set_dma_mode);
340 EXPORT_SYMBOL(nios2_set_dma_raddr);
341 EXPORT_SYMBOL(nios2_set_dma_waddr);