Merge tag 'gpio-v3.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6.git] / arch / arm / mach-s3c64xx / dma.c
blob7e22c2113816a4c6bb897dc0c54a74e876da3f86
1 /* linux/arch/arm/plat-s3c64xx/dma.c
3 * Copyright 2009 Openmoko, Inc.
4 * Copyright 2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
8 * S3C64XX DMA core
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 * NOTE: Code in this file is not used when booting with Device Tree support.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/dmapool.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/clk.h>
28 #include <linux/err.h>
29 #include <linux/io.h>
30 #include <linux/amba/pl080.h>
31 #include <linux/of.h>
33 #include <mach/dma.h>
34 #include <mach/map.h>
35 #include <mach/irqs.h>
37 #include "regs-sys.h"
39 /* dma channel state information */
41 struct s3c64xx_dmac {
42 struct device dev;
43 struct clk *clk;
44 void __iomem *regs;
45 struct s3c2410_dma_chan *channels;
46 enum dma_ch chanbase;
49 /* pool to provide LLI buffers */
50 static struct dma_pool *dma_pool;
52 /* Debug configuration and code */
54 static unsigned char debug_show_buffs = 0;
56 static void dbg_showchan(struct s3c2410_dma_chan *chan)
58 pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
59 chan->number,
60 readl(chan->regs + PL080_CH_SRC_ADDR),
61 readl(chan->regs + PL080_CH_DST_ADDR),
62 readl(chan->regs + PL080_CH_LLI),
63 readl(chan->regs + PL080_CH_CONTROL),
64 readl(chan->regs + PL080S_CH_CONTROL2),
65 readl(chan->regs + PL080S_CH_CONFIG));
68 static void show_lli(struct pl080s_lli *lli)
70 pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
71 lli, lli->src_addr, lli->dst_addr, lli->next_lli,
72 lli->control0, lli->control1);
75 static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
77 struct s3c64xx_dma_buff *ptr;
78 struct s3c64xx_dma_buff *end;
80 pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
81 chan->number, chan->next, chan->curr, chan->end);
83 ptr = chan->next;
84 end = chan->end;
86 if (debug_show_buffs) {
87 for (; ptr != NULL; ptr = ptr->next) {
88 pr_debug("DMA%d: %08x ",
89 chan->number, ptr->lli_dma);
90 show_lli(ptr->lli);
95 /* End of Debug */
97 static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
99 struct s3c2410_dma_chan *chan;
100 unsigned int start, offs;
102 start = 0;
104 if (channel >= DMACH_PCM1_TX)
105 start = 8;
107 for (offs = 0; offs < 8; offs++) {
108 chan = &s3c2410_chans[start + offs];
109 if (!chan->in_use)
110 goto found;
113 return NULL;
115 found:
116 s3c_dma_chan_map[channel] = chan;
117 return chan;
120 int s3c2410_dma_config(enum dma_ch channel, int xferunit)
122 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
124 if (chan == NULL)
125 return -EINVAL;
127 switch (xferunit) {
128 case 1:
129 chan->hw_width = 0;
130 break;
131 case 2:
132 chan->hw_width = 1;
133 break;
134 case 4:
135 chan->hw_width = 2;
136 break;
137 default:
138 printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
139 return -EINVAL;
142 return 0;
144 EXPORT_SYMBOL(s3c2410_dma_config);
146 static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
147 struct pl080s_lli *lli,
148 dma_addr_t data, int size)
150 dma_addr_t src, dst;
151 u32 control0, control1;
153 switch (chan->source) {
154 case DMA_FROM_DEVICE:
155 src = chan->dev_addr;
156 dst = data;
157 control0 = PL080_CONTROL_SRC_AHB2;
158 control0 |= PL080_CONTROL_DST_INCR;
159 break;
161 case DMA_TO_DEVICE:
162 src = data;
163 dst = chan->dev_addr;
164 control0 = PL080_CONTROL_DST_AHB2;
165 control0 |= PL080_CONTROL_SRC_INCR;
166 break;
167 default:
168 BUG();
171 /* note, we do not currently setup any of the burst controls */
173 control1 = size >> chan->hw_width; /* size in no of xfers */
174 control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
175 control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
176 control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
177 control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
179 lli->src_addr = src;
180 lli->dst_addr = dst;
181 lli->next_lli = 0;
182 lli->control0 = control0;
183 lli->control1 = control1;
186 static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
187 struct pl080s_lli *lli)
189 void __iomem *regs = chan->regs;
191 pr_debug("%s: LLI %p => regs\n", __func__, lli);
192 show_lli(lli);
194 writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
195 writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
196 writel(lli->next_lli, regs + PL080_CH_LLI);
197 writel(lli->control0, regs + PL080_CH_CONTROL);
198 writel(lli->control1, regs + PL080S_CH_CONTROL2);
201 static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
203 struct s3c64xx_dmac *dmac = chan->dmac;
204 u32 config;
205 u32 bit = chan->bit;
207 dbg_showchan(chan);
209 pr_debug("%s: clearing interrupts\n", __func__);
211 /* clear interrupts */
212 writel(bit, dmac->regs + PL080_TC_CLEAR);
213 writel(bit, dmac->regs + PL080_ERR_CLEAR);
215 pr_debug("%s: starting channel\n", __func__);
217 config = readl(chan->regs + PL080S_CH_CONFIG);
218 config |= PL080_CONFIG_ENABLE;
219 config &= ~PL080_CONFIG_HALT;
221 pr_debug("%s: writing config %08x\n", __func__, config);
222 writel(config, chan->regs + PL080S_CH_CONFIG);
224 return 0;
227 static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
229 u32 config;
230 int timeout;
232 pr_debug("%s: stopping channel\n", __func__);
234 dbg_showchan(chan);
236 config = readl(chan->regs + PL080S_CH_CONFIG);
237 config |= PL080_CONFIG_HALT;
238 writel(config, chan->regs + PL080S_CH_CONFIG);
240 timeout = 1000;
241 do {
242 config = readl(chan->regs + PL080S_CH_CONFIG);
243 pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
244 if (config & PL080_CONFIG_ACTIVE)
245 udelay(10);
246 else
247 break;
248 } while (--timeout > 0);
250 if (config & PL080_CONFIG_ACTIVE) {
251 printk(KERN_ERR "%s: channel still active\n", __func__);
252 return -EFAULT;
255 config = readl(chan->regs + PL080S_CH_CONFIG);
256 config &= ~PL080_CONFIG_ENABLE;
257 writel(config, chan->regs + PL080S_CH_CONFIG);
259 return 0;
262 static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
263 struct s3c64xx_dma_buff *buf,
264 enum s3c2410_dma_buffresult result)
266 if (chan->callback_fn != NULL)
267 (chan->callback_fn)(chan, buf->pw, 0, result);
270 static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
272 dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
273 kfree(buff);
276 static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
278 struct s3c64xx_dma_buff *buff, *next;
279 u32 config;
281 dbg_showchan(chan);
283 pr_debug("%s: flushing channel\n", __func__);
285 config = readl(chan->regs + PL080S_CH_CONFIG);
286 config &= ~PL080_CONFIG_ENABLE;
287 writel(config, chan->regs + PL080S_CH_CONFIG);
289 /* dump all the buffers associated with this channel */
291 for (buff = chan->curr; buff != NULL; buff = next) {
292 next = buff->next;
293 pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
295 s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
296 s3c64xx_dma_freebuff(buff);
299 chan->curr = chan->next = chan->end = NULL;
301 return 0;
304 int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
306 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
308 WARN_ON(!chan);
309 if (!chan)
310 return -EINVAL;
312 switch (op) {
313 case S3C2410_DMAOP_START:
314 return s3c64xx_dma_start(chan);
316 case S3C2410_DMAOP_STOP:
317 return s3c64xx_dma_stop(chan);
319 case S3C2410_DMAOP_FLUSH:
320 return s3c64xx_dma_flush(chan);
322 /* believe PAUSE/RESUME are no-ops */
323 case S3C2410_DMAOP_PAUSE:
324 case S3C2410_DMAOP_RESUME:
325 case S3C2410_DMAOP_STARTED:
326 case S3C2410_DMAOP_TIMEOUT:
327 return 0;
330 return -ENOENT;
332 EXPORT_SYMBOL(s3c2410_dma_ctrl);
334 /* s3c2410_dma_enque
338 int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
339 dma_addr_t data, int size)
341 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
342 struct s3c64xx_dma_buff *next;
343 struct s3c64xx_dma_buff *buff;
344 struct pl080s_lli *lli;
345 unsigned long flags;
346 int ret;
348 WARN_ON(!chan);
349 if (!chan)
350 return -EINVAL;
352 buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
353 if (!buff) {
354 printk(KERN_ERR "%s: no memory for buffer\n", __func__);
355 return -ENOMEM;
358 lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
359 if (!lli) {
360 printk(KERN_ERR "%s: no memory for lli\n", __func__);
361 ret = -ENOMEM;
362 goto err_buff;
365 pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
366 __func__, buff, data, lli, (u32)buff->lli_dma, size);
368 buff->lli = lli;
369 buff->pw = id;
371 s3c64xx_dma_fill_lli(chan, lli, data, size);
373 local_irq_save(flags);
375 if ((next = chan->next) != NULL) {
376 struct s3c64xx_dma_buff *end = chan->end;
377 struct pl080s_lli *endlli = end->lli;
379 pr_debug("enquing onto channel\n");
381 end->next = buff;
382 endlli->next_lli = buff->lli_dma;
384 if (chan->flags & S3C2410_DMAF_CIRCULAR) {
385 struct s3c64xx_dma_buff *curr = chan->curr;
386 lli->next_lli = curr->lli_dma;
389 if (next == chan->curr) {
390 writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
391 chan->next = buff;
394 show_lli(endlli);
395 chan->end = buff;
396 } else {
397 pr_debug("enquing onto empty channel\n");
399 chan->curr = buff;
400 chan->next = buff;
401 chan->end = buff;
403 s3c64xx_lli_to_regs(chan, lli);
406 local_irq_restore(flags);
408 show_lli(lli);
410 dbg_showchan(chan);
411 dbg_showbuffs(chan);
412 return 0;
414 err_buff:
415 kfree(buff);
416 return ret;
419 EXPORT_SYMBOL(s3c2410_dma_enqueue);
422 int s3c2410_dma_devconfig(enum dma_ch channel,
423 enum dma_data_direction source,
424 unsigned long devaddr)
426 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
427 u32 peripheral;
428 u32 config = 0;
430 pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
431 __func__, channel, source, devaddr, chan);
433 WARN_ON(!chan);
434 if (!chan)
435 return -EINVAL;
437 peripheral = (chan->peripheral & 0xf);
438 chan->source = source;
439 chan->dev_addr = devaddr;
441 pr_debug("%s: peripheral %d\n", __func__, peripheral);
443 switch (source) {
444 case DMA_FROM_DEVICE:
445 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
446 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
447 break;
448 case DMA_TO_DEVICE:
449 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
450 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
451 break;
452 default:
453 printk(KERN_ERR "%s: bad source\n", __func__);
454 return -EINVAL;
457 /* allow TC and ERR interrupts */
458 config |= PL080_CONFIG_TC_IRQ_MASK;
459 config |= PL080_CONFIG_ERR_IRQ_MASK;
461 pr_debug("%s: config %08x\n", __func__, config);
463 writel(config, chan->regs + PL080S_CH_CONFIG);
465 return 0;
467 EXPORT_SYMBOL(s3c2410_dma_devconfig);
470 int s3c2410_dma_getposition(enum dma_ch channel,
471 dma_addr_t *src, dma_addr_t *dst)
473 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
475 WARN_ON(!chan);
476 if (!chan)
477 return -EINVAL;
479 if (src != NULL)
480 *src = readl(chan->regs + PL080_CH_SRC_ADDR);
482 if (dst != NULL)
483 *dst = readl(chan->regs + PL080_CH_DST_ADDR);
485 return 0;
487 EXPORT_SYMBOL(s3c2410_dma_getposition);
489 /* s3c2410_request_dma
491 * get control of an dma channel
494 int s3c2410_dma_request(enum dma_ch channel,
495 struct s3c2410_dma_client *client,
496 void *dev)
498 struct s3c2410_dma_chan *chan;
499 unsigned long flags;
501 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
502 channel, client->name, dev);
504 local_irq_save(flags);
506 chan = s3c64xx_dma_map_channel(channel);
507 if (chan == NULL) {
508 local_irq_restore(flags);
509 return -EBUSY;
512 dbg_showchan(chan);
514 chan->client = client;
515 chan->in_use = 1;
516 chan->peripheral = channel;
517 chan->flags = 0;
519 local_irq_restore(flags);
521 /* need to setup */
523 pr_debug("%s: channel initialised, %p\n", __func__, chan);
525 return chan->number | DMACH_LOW_LEVEL;
528 EXPORT_SYMBOL(s3c2410_dma_request);
530 /* s3c2410_dma_free
532 * release the given channel back to the system, will stop and flush
533 * any outstanding transfers, and ensure the channel is ready for the
534 * next claimant.
536 * Note, although a warning is currently printed if the freeing client
537 * info is not the same as the registrant's client info, the free is still
538 * allowed to go through.
541 int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
543 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
544 unsigned long flags;
546 if (chan == NULL)
547 return -EINVAL;
549 local_irq_save(flags);
551 if (chan->client != client) {
552 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
553 channel, chan->client, client);
556 /* sort out stopping and freeing the channel */
559 chan->client = NULL;
560 chan->in_use = 0;
562 if (!(channel & DMACH_LOW_LEVEL))
563 s3c_dma_chan_map[channel] = NULL;
565 local_irq_restore(flags);
567 return 0;
570 EXPORT_SYMBOL(s3c2410_dma_free);
572 static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
574 struct s3c64xx_dmac *dmac = pw;
575 struct s3c2410_dma_chan *chan;
576 enum s3c2410_dma_buffresult res;
577 u32 tcstat, errstat;
578 u32 bit;
579 int offs;
581 tcstat = readl(dmac->regs + PL080_TC_STATUS);
582 errstat = readl(dmac->regs + PL080_ERR_STATUS);
584 for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
585 struct s3c64xx_dma_buff *buff;
587 if (!(errstat & bit) && !(tcstat & bit))
588 continue;
590 chan = dmac->channels + offs;
591 res = S3C2410_RES_ERR;
593 if (tcstat & bit) {
594 writel(bit, dmac->regs + PL080_TC_CLEAR);
595 res = S3C2410_RES_OK;
598 if (errstat & bit)
599 writel(bit, dmac->regs + PL080_ERR_CLEAR);
601 /* 'next' points to the buffer that is next to the
602 * currently active buffer.
603 * For CIRCULAR queues, 'next' will be same as 'curr'
604 * when 'end' is the active buffer.
606 buff = chan->curr;
607 while (buff && buff != chan->next
608 && buff->next != chan->next)
609 buff = buff->next;
611 if (!buff)
612 BUG();
614 if (buff == chan->next)
615 buff = chan->end;
617 s3c64xx_dma_bufffdone(chan, buff, res);
619 /* Free the node and update curr, if non-circular queue */
620 if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
621 chan->curr = buff->next;
622 s3c64xx_dma_freebuff(buff);
625 /* Update 'next' */
626 buff = chan->next;
627 if (chan->next == chan->end) {
628 chan->next = chan->curr;
629 if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
630 chan->end = NULL;
631 } else {
632 chan->next = buff->next;
636 return IRQ_HANDLED;
639 static struct bus_type dma_subsys = {
640 .name = "s3c64xx-dma",
641 .dev_name = "s3c64xx-dma",
644 static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
645 int irq, unsigned int base)
647 struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
648 struct s3c64xx_dmac *dmac;
649 char clkname[16];
650 void __iomem *regs;
651 void __iomem *regptr;
652 int err, ch;
654 dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
655 if (!dmac) {
656 printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
657 return -ENOMEM;
660 dmac->dev.id = chno / 8;
661 dmac->dev.bus = &dma_subsys;
663 err = device_register(&dmac->dev);
664 if (err) {
665 printk(KERN_ERR "%s: failed to register device\n", __func__);
666 goto err_alloc;
669 regs = ioremap(base, 0x200);
670 if (!regs) {
671 printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
672 err = -ENXIO;
673 goto err_dev;
676 snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
678 dmac->clk = clk_get(NULL, clkname);
679 if (IS_ERR(dmac->clk)) {
680 printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
681 err = PTR_ERR(dmac->clk);
682 goto err_map;
685 clk_prepare_enable(dmac->clk);
687 dmac->regs = regs;
688 dmac->chanbase = chbase;
689 dmac->channels = chptr;
691 err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
692 if (err < 0) {
693 printk(KERN_ERR "%s: failed to get irq\n", __func__);
694 goto err_clk;
697 regptr = regs + PL080_Cx_BASE(0);
699 for (ch = 0; ch < 8; ch++, chptr++) {
700 pr_debug("%s: registering DMA %d (%p)\n",
701 __func__, chno + ch, regptr);
703 chptr->bit = 1 << ch;
704 chptr->number = chno + ch;
705 chptr->dmac = dmac;
706 chptr->regs = regptr;
707 regptr += PL080_Cx_STRIDE;
710 /* for the moment, permanently enable the controller */
711 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
713 printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
714 irq, regs, chno, chno+8);
716 return 0;
718 err_clk:
719 clk_disable_unprepare(dmac->clk);
720 clk_put(dmac->clk);
721 err_map:
722 iounmap(regs);
723 err_dev:
724 device_unregister(&dmac->dev);
725 err_alloc:
726 kfree(dmac);
727 return err;
730 static int __init s3c64xx_dma_init(void)
732 int ret;
734 /* This driver is not supported when booting with device tree. */
735 if (of_have_populated_dt())
736 return -ENODEV;
738 printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
740 dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
741 if (!dma_pool) {
742 printk(KERN_ERR "%s: failed to create pool\n", __func__);
743 return -ENOMEM;
746 ret = subsys_system_register(&dma_subsys, NULL);
747 if (ret) {
748 printk(KERN_ERR "%s: failed to create subsys\n", __func__);
749 return -ENOMEM;
752 /* Set all DMA configuration to be DMA, not SDMA */
753 writel(0xffffff, S3C64XX_SDMA_SEL);
755 /* Register standard DMA controllers */
756 s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
757 s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
759 return 0;
762 arch_initcall(s3c64xx_dma_init);