Fix IP22 timer calibration.
[linux-2.6/linux-mips.git] / arch / sh / kernel / cpu / dma.c
blobbe2c42673514a0ae5221e546ed1a8e06a6b3d265
1 /*
2 * arch/sh/kernel/cpu/dma.c
4 * Copyright (C) 2000 Takashi YOSHII
5 * Copyright (C) 2003 Paul Mundt
7 * PC like DMA API for SuperH's DMAC.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
14 #include <linux/config.h>
15 #include <linux/init.h>
16 #include <linux/irq.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #include <asm/signal.h>
21 #include <asm/dma.h>
23 static struct dma_info_t *dma_info[MAX_DMA_CHANNELS];
24 static struct dma_info_t *autoinit_info[SH_MAX_DMA_CHANNELS] = {0};
25 extern spinlock_t dma_spin_lock;
28 * The SuperH DMAC supports a number of transmit sizes, we list them here,
29 * with their respective values as they appear in the CHCR registers.
31 * Defaults to a 64-bit transfer size.
33 enum {
34 XMIT_SZ_64BIT = 0,
35 XMIT_SZ_8BIT = 1,
36 XMIT_SZ_16BIT = 2,
37 XMIT_SZ_32BIT = 3,
38 XMIT_SZ_256BIT = 4,
42 * The DMA count is defined as the number of bytes to transfer.
44 static unsigned int ts_shift[] = {
45 [XMIT_SZ_64BIT] 3,
46 [XMIT_SZ_8BIT] 0,
47 [XMIT_SZ_16BIT] 1,
48 [XMIT_SZ_32BIT] 2,
49 [XMIT_SZ_256BIT] 5,
53 * We determine the correct shift size based off of the CHCR transmit size
54 * for the given channel. Since we know that it will take:
56 * info->count >> ts_shift[transmit_size]
58 * iterations to complete the transfer.
60 static inline unsigned int calc_xmit_shift(struct dma_info_t *info)
62 return ts_shift[(ctrl_inl(CHCR[info->chan]) >> 4) & 0x0007];
65 static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs)
67 int chan = irq - DMTE_IRQ[0];
68 struct dma_info_t *info = autoinit_info[chan];
70 if( info->mode & DMA_MODE_WRITE )
71 ctrl_outl(info->mem_addr, SAR[info->chan]);
72 else
73 ctrl_outl(info->mem_addr, DAR[info->chan]);
75 ctrl_outl(info->count >> calc_xmit_shift(info), DMATCR[info->chan]);
76 ctrl_outl(ctrl_inl(CHCR[info->chan])&~CHCR_TE, CHCR[info->chan]);
78 return IRQ_HANDLED;
81 static struct irqaction irq_tei = {
82 .handler = dma_tei,
83 .flags = SA_INTERRUPT,
84 .name = "dma_tei",
87 void setup_dma(unsigned int dmanr, struct dma_info_t *info)
89 make_ipr_irq(DMTE_IRQ[info->chan], DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
90 setup_irq(DMTE_IRQ[info->chan], &irq_tei);
91 dma_info[dmanr] = info;
94 unsigned long claim_dma_lock(void)
96 unsigned long flags;
97 spin_lock_irqsave(&dma_spin_lock, flags);
98 return flags;
101 void release_dma_lock(unsigned long flags)
103 spin_unlock_irqrestore(&dma_spin_lock, flags);
106 void enable_dma(unsigned int dmanr)
108 struct dma_info_t *info = dma_info[dmanr];
109 unsigned long chcr;
111 chcr = ctrl_inl(CHCR[info->chan]);
112 chcr |= CHCR_DE;
113 ctrl_outl(chcr, CHCR[info->chan]);
116 void disable_dma(unsigned int dmanr)
118 struct dma_info_t *info = dma_info[dmanr];
119 unsigned long chcr;
121 chcr = ctrl_inl(CHCR[info->chan]);
122 chcr &= ~CHCR_DE;
123 ctrl_outl(chcr, CHCR[info->chan]);
126 void set_dma_mode(unsigned int dmanr, char mode)
128 struct dma_info_t *info = dma_info[dmanr];
130 info->mode = mode;
131 set_dma_addr(dmanr, info->mem_addr);
132 set_dma_count(dmanr, info->count);
133 autoinit_info[info->chan] = info;
136 void set_dma_addr(unsigned int dmanr, unsigned int a)
138 struct dma_info_t *info = dma_info[dmanr];
139 unsigned long sar, dar;
141 info->mem_addr = a;
142 sar = (info->mode & DMA_MODE_WRITE)? info->mem_addr: info->dev_addr;
143 dar = (info->mode & DMA_MODE_WRITE)? info->dev_addr: info->mem_addr;
144 ctrl_outl(sar, SAR[info->chan]);
145 ctrl_outl(dar, DAR[info->chan]);
148 void set_dma_count(unsigned int dmanr, unsigned int count)
150 struct dma_info_t *info = dma_info[dmanr];
151 info->count = count;
152 ctrl_outl(count >> calc_xmit_shift(info), DMATCR[info->chan]);
155 int get_dma_residue(unsigned int dmanr)
157 struct dma_info_t *info = dma_info[dmanr];
158 return (ctrl_inl(DMATCR[info->chan]) << calc_xmit_shift(info));
161 #if defined(CONFIG_CPU_SH4)
162 static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
164 printk(KERN_WARNING "DMAE: DMAOR=%x\n",ctrl_inl(DMAOR));
165 ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_NMIF, DMAOR);
166 ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_AE, DMAOR);
167 ctrl_outl(ctrl_inl(DMAOR)|DMAOR_DME, DMAOR);
169 return IRQ_HANDLED;
172 static struct irqaction irq_err = {
173 .handler = dma_err,
174 .flags = SA_INTERRUPT,
175 .name = "dma_err",
177 #endif
179 int __init init_dma(void)
181 #if defined(CONFIG_CPU_SH4)
182 make_ipr_irq(DMAE_IRQ, DMA_IPR_ADDR, DMA_IPR_POS, DMA_PRIORITY);
183 setup_irq(DMAE_IRQ, &irq_err);
184 #endif
186 ctrl_outl(DMAOR_DME, DMAOR);
187 return 0;
190 static void __exit exit_dma(void)
192 #ifdef CONFIG_CPU_SH4
193 free_irq(DMAE_IRQ, 0);
194 #endif
197 module_init(init_dma);
198 module_exit(exit_dma);
200 MODULE_LICENSE("GPL");
202 EXPORT_SYMBOL(setup_dma);
203 EXPORT_SYMBOL(claim_dma_lock);
204 EXPORT_SYMBOL(release_dma_lock);
205 EXPORT_SYMBOL(enable_dma);
206 EXPORT_SYMBOL(disable_dma);
207 EXPORT_SYMBOL(set_dma_mode);
208 EXPORT_SYMBOL(set_dma_addr);
209 EXPORT_SYMBOL(set_dma_count);
210 EXPORT_SYMBOL(get_dma_residue);