3 * Copyright (C) 2005 Moxa Group All Rights Reserved.
7 * 12-01-2005 Victor Yu. Create it.
9 #include <linux/config.h>
10 #include <asm/arch/cpe/cpe.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
15 #include <linux/slab.h>
18 #include <asm/cacheflush.h>
20 #include <asm/arch/cpe_int.h>
24 #define DBG(x...) printk(x)
29 static apb_dma_priv apb_dma_channel[APB_DMA_MAX_CHANNEL];
30 static spinlock_t apb_dma_lock;
32 apb_dma_priv *apb_dma_alloc(int req_no)
36 apb_dma_priv *priv=apb_dma_channel;
38 spin_lock_irqsave(&apb_dma_lock, flags);
39 for ( i=0; i<APB_DMA_MAX_CHANNEL; i++, priv++ ) {
40 if ( priv->used_flag == 0 ) {
42 priv->irq_handler = NULL;
43 priv->irq_handler_param = NULL;
44 priv->conf_param = NULL;
46 priv->req_no = req_no;
48 case APB_DMA_SPI_TX_REQ_NO :
49 *(unsigned int *)(CPE_PMU_VA_BASE+0xA0) = 0;
51 case APB_DMA_SPI_RX_REQ_NO :
52 *(unsigned int *)(CPE_PMU_VA_BASE+0xA4) = 0;
54 case APB_DMA_SD_REQ_NO :
55 *(unsigned int *)(CPE_PMU_VA_BASE+0xB8) = 0;
57 case APB_DMA_AC97_TX_REQ_NO :
58 *(unsigned int *)(CPE_PMU_VA_BASE+0xBC) = 0;
60 case APB_DMA_AC97_RX_REQ_NO :
61 *(unsigned int *)(CPE_PMU_VA_BASE+0xC0) = 0;
63 case APB_DMA_USB_DEVICE_REQ_NO :
64 *(unsigned int *)(CPE_PMU_VA_BASE+0xCC) = 0;
67 spin_unlock_irqrestore(&apb_dma_lock, flags);
68 DBG("apb_dma_alloc uses DMA channel %d\n", i);
72 spin_unlock_irqrestore(&apb_dma_lock, flags);
75 EXPORT_SYMBOL(apb_dma_alloc);
77 void apb_dma_release(apb_dma_priv *priv)
81 spin_lock_irqsave(&apb_dma_lock, flags);
83 spin_unlock_irqrestore(&apb_dma_lock, flags);
88 priv->irq_handler = NULL;
89 priv->irq_handler_param = NULL;
90 priv->conf_param = NULL;
92 spin_unlock_irqrestore(&apb_dma_lock, flags);
94 EXPORT_SYMBOL(apb_dma_release);
96 void apb_dma_set_irq(apb_dma_priv *priv, void (*func)(void *param), void *param)
101 spin_lock_irqsave(&apb_dma_lock, flags);
102 if ( priv == NULL ) {
103 spin_unlock_irqrestore(&apb_dma_lock, flags);
106 priv->irq_handler = func;
107 priv->irq_handler_param = param;
108 priv->error_flag = 0;
109 cmd = readl(&priv->reg->command.ul);
110 cmd |= (APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
111 writel(cmd, &priv->reg->command.ul);
112 spin_unlock_irqrestore(&apb_dma_lock, flags);
114 EXPORT_SYMBOL(apb_dma_set_irq);
116 void apb_dma_release_irq(apb_dma_priv *priv)
121 spin_lock_irqsave(&apb_dma_lock, flags);
122 if ( priv == NULL ) {
123 spin_unlock_irqrestore(&apb_dma_lock, flags);
126 cmd = readl(&priv->reg->command.ul);
127 cmd &= ~(APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
128 writel(cmd, &priv->reg->command.ul);
129 priv->irq_handler = NULL;
130 priv->irq_handler_param = NULL;
131 priv->error_flag = 0;
132 spin_unlock_irqrestore(&apb_dma_lock, flags);
134 EXPORT_SYMBOL(apb_dma_release_irq);
136 void apb_dma_conf(apb_dma_priv *priv, apb_dma_conf_param *param)
142 if ( param == NULL || priv == NULL )
144 spin_lock_irqsave(&apb_dma_lock, flags);
145 #ifdef CONFIG_UCLINUX
146 writel(param->source_addr, &priv->reg->source_addr);
147 writel(param->dest_addr, &priv->reg->dest_addr);
148 #else // CONFIG_UCLINUX
149 priv->conf_param = param;
150 if ( param->source_sel == APB_DMAB_SOURCE_AHB )
151 writel(virt_to_phys((void *)param->source_addr), &priv->reg->source_addr);
153 writel(PHY_ADDRESS(param->source_addr), &priv->reg->source_addr);
154 if ( param->dest_sel == APB_DMAB_DEST_AHB )
155 writel(virt_to_phys((void *)param->dest_addr), &priv->reg->dest_addr);
157 writel(PHY_ADDRESS(param->dest_addr), &priv->reg->dest_addr);
158 #endif // CONFIG_UCLINUX
160 switch ( param->data_width ) {
161 case APB_DMAB_DATA_WIDTH_1 :
163 case APB_DMAB_DATA_WIDTH_2 :
166 case APB_DMAB_DATA_WIDTH_4 :
171 if ( param->burst_mode )
173 writel(size, &priv->reg->cycles);
174 cmd.ul = readl(&priv->reg->command.ul);
175 cmd.bits.data_width = param->data_width;
176 if ( param->dest_sel == APB_DMAB_DEST_AHB ) { // AHB
177 cmd.bits.dest_req_no = 0;
179 cmd.bits.dest_req_no = priv->req_no;
181 cmd.bits.dest_sel = param->dest_sel;
182 if ( param->source_sel == APB_DMAB_SOURCE_AHB ) { // AHB
183 #ifndef CONFIG_UCLINUX
184 dmac_flush_range(param->source_addr, param->source_addr+param->size);
185 #endif // CONFIG_UCLINUX
186 cmd.bits.source_req_no = 0;
188 cmd.bits.source_req_no = priv->req_no;
190 cmd.bits.source_sel = param->source_sel;
191 cmd.bits.burst = param->burst_mode;
192 cmd.bits.dest_inc = param->dest_inc;
193 cmd.bits.source_inc = param->source_inc;
194 writel(cmd.ul, &priv->reg->command.ul);
195 spin_unlock_irqrestore(&apb_dma_lock, flags);
197 EXPORT_SYMBOL(apb_dma_conf);
199 void apb_dma_enable(apb_dma_priv *priv)
204 spin_lock_irqsave(&apb_dma_lock, flags);
205 cmd = readl(&priv->reg->command.ul);
206 cmd |= APB_DMA_ENABLE;
207 writel(cmd, &priv->reg->command.ul);
208 spin_unlock_irqrestore(&apb_dma_lock, flags);
210 EXPORT_SYMBOL(apb_dma_enable);
212 void apb_dma_disable(apb_dma_priv *priv)
217 spin_lock_irqsave(&apb_dma_lock, flags);
218 cmd = readl(&priv->reg->command.ul);
219 cmd &= ~APB_DMA_ENABLE;
220 writel(cmd, &priv->reg->command.ul);
221 spin_unlock_irqrestore(&apb_dma_lock, flags);
223 EXPORT_SYMBOL(apb_dma_disable);
225 static irqreturn_t apb_dma_irq(int irq, void *devid, struct pt_regs *regs)
229 apb_dma_priv *priv=apb_dma_channel;
231 DBG("apb_dma_irq test01\n");
232 for ( i=0; i<APB_DMA_MAX_CHANNEL; i++, priv++ ) {
233 cmd = readl(&priv->reg->command.ul);
234 if ( cmd & APB_DMA_FIN_INT_STS ) {
235 DBG("apb_dma_irq finish interrupt channel [%d]\n", i);
236 cmd &= ~APB_DMA_FIN_INT_STS;
237 #ifndef CONFIG_UCLINUX
239 apb_dma_conf_param *conf;
240 if ( (conf=priv->conf_param) != NULL ) {
241 if ( conf->dest_sel == APB_DMAB_DEST_AHB ) { // to DRAM
242 dmac_inv_range(conf->dest_addr, conf->dest_addr+conf->size);
244 priv->conf_param = NULL;
247 #endif // CONFIG_UCLINUX
249 if ( cmd & APB_DMA_ERR_INT_STS ) {
250 DBG("apb_dma_irq error interrupt channel [%d]\n", i);
251 cmd &= ~APB_DMA_ERR_INT_STS;
252 priv->error_flag = 1;
254 if ( priv->used_flag && priv->irq_handler ) {
255 priv->irq_handler(priv->irq_handler_param);
257 priv->error_flag = 0;
258 writel(cmd, &priv->reg->command.ul);
264 static int __init apb_dma_init(void)
267 apb_dma_priv *priv=apb_dma_channel;
269 printk("Moxa CPU APB DMA Device Driver V1.0 load ");
271 memset(apb_dma_channel, 0, sizeof(apb_dma_channel));
272 spin_lock_init(&apb_dma_lock);
274 for ( i=0; i<APB_DMA_MAX_CHANNEL; i++, priv++ ) {
275 priv->reg = (apb_dma_reg *)(CPE_APBDMA_VA_BASE+0x80+i*sizeof(apb_dma_reg));
278 cpe_int_set_irq(IRQ_APBDMA, EDGE, H_ACTIVE);
279 ret = request_irq(IRQ_APBDMA, apb_dma_irq, SA_INTERRUPT, "APB DMA", NULL);
288 module_init(apb_dma_init);