2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_platform.h>
19 #include <linux/sirfsoc_dma.h>
21 #include "dmaengine.h"
23 #define SIRFSOC_DMA_DESCRIPTORS 16
24 #define SIRFSOC_DMA_CHANNELS 16
26 #define SIRFSOC_DMA_CH_ADDR 0x00
27 #define SIRFSOC_DMA_CH_XLEN 0x04
28 #define SIRFSOC_DMA_CH_YLEN 0x08
29 #define SIRFSOC_DMA_CH_CTRL 0x0C
31 #define SIRFSOC_DMA_WIDTH_0 0x100
32 #define SIRFSOC_DMA_CH_VALID 0x140
33 #define SIRFSOC_DMA_CH_INT 0x144
34 #define SIRFSOC_DMA_INT_EN 0x148
35 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
37 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
38 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
40 /* xlen and dma_width register is in 4 bytes boundary */
41 #define SIRFSOC_DMA_WORD_LEN 4
43 struct sirfsoc_dma_desc
{
44 struct dma_async_tx_descriptor desc
;
45 struct list_head node
;
47 /* SiRFprimaII 2D-DMA parameters */
49 int xlen
; /* DMA xlen */
50 int ylen
; /* DMA ylen */
51 int width
; /* DMA width */
53 bool cyclic
; /* is loop DMA? */
54 u32 addr
; /* DMA buffer address */
57 struct sirfsoc_dma_chan
{
59 struct list_head free
;
60 struct list_head prepared
;
61 struct list_head queued
;
62 struct list_head active
;
63 struct list_head completed
;
64 unsigned long happened_cyclic
;
65 unsigned long completed_cyclic
;
67 /* Lock for this structure */
74 struct dma_device dma
;
75 struct tasklet_struct tasklet
;
76 struct sirfsoc_dma_chan channels
[SIRFSOC_DMA_CHANNELS
];
81 #define DRV_NAME "sirfsoc_dma"
83 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
85 struct sirfsoc_dma_chan
*dma_chan_to_sirfsoc_dma_chan(struct dma_chan
*c
)
87 return container_of(c
, struct sirfsoc_dma_chan
, chan
);
90 /* Convert struct dma_chan to struct sirfsoc_dma */
91 static inline struct sirfsoc_dma
*dma_chan_to_sirfsoc_dma(struct dma_chan
*c
)
93 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(c
);
94 return container_of(schan
, struct sirfsoc_dma
, channels
[c
->chan_id
]);
97 /* Execute all queued DMA descriptors */
98 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan
*schan
)
100 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
101 int cid
= schan
->chan
.chan_id
;
102 struct sirfsoc_dma_desc
*sdesc
= NULL
;
105 * lock has been held by functions calling this, so we don't hold
109 sdesc
= list_first_entry(&schan
->queued
, struct sirfsoc_dma_desc
,
111 /* Move the first queued descriptor to active list */
112 list_move_tail(&sdesc
->node
, &schan
->active
);
114 /* Start the DMA transfer */
115 writel_relaxed(sdesc
->width
, sdma
->base
+ SIRFSOC_DMA_WIDTH_0
+
117 writel_relaxed(cid
| (schan
->mode
<< SIRFSOC_DMA_MODE_CTRL_BIT
) |
118 (sdesc
->dir
<< SIRFSOC_DMA_DIR_CTRL_BIT
),
119 sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_CTRL
);
120 writel_relaxed(sdesc
->xlen
, sdma
->base
+ cid
* 0x10 +
121 SIRFSOC_DMA_CH_XLEN
);
122 writel_relaxed(sdesc
->ylen
, sdma
->base
+ cid
* 0x10 +
123 SIRFSOC_DMA_CH_YLEN
);
124 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) |
125 (1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
128 * writel has an implict memory write barrier to make sure data is
129 * flushed into memory before starting DMA
131 writel(sdesc
->addr
>> 2, sdma
->base
+ cid
* 0x10 + SIRFSOC_DMA_CH_ADDR
);
134 writel((1 << cid
) | 1 << (cid
+ 16) |
135 readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
),
136 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
137 schan
->happened_cyclic
= schan
->completed_cyclic
= 0;
141 /* Interrupt handler */
142 static irqreturn_t
sirfsoc_dma_irq(int irq
, void *data
)
144 struct sirfsoc_dma
*sdma
= data
;
145 struct sirfsoc_dma_chan
*schan
;
146 struct sirfsoc_dma_desc
*sdesc
= NULL
;
150 is
= readl(sdma
->base
+ SIRFSOC_DMA_CH_INT
);
151 while ((ch
= fls(is
) - 1) >= 0) {
153 writel_relaxed(1 << ch
, sdma
->base
+ SIRFSOC_DMA_CH_INT
);
154 schan
= &sdma
->channels
[ch
];
156 spin_lock(&schan
->lock
);
158 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
160 if (!sdesc
->cyclic
) {
161 /* Execute queued descriptors */
162 list_splice_tail_init(&schan
->active
, &schan
->completed
);
163 if (!list_empty(&schan
->queued
))
164 sirfsoc_dma_execute(schan
);
166 schan
->happened_cyclic
++;
168 spin_unlock(&schan
->lock
);
171 /* Schedule tasklet */
172 tasklet_schedule(&sdma
->tasklet
);
177 /* process completed descriptors */
178 static void sirfsoc_dma_process_completed(struct sirfsoc_dma
*sdma
)
180 dma_cookie_t last_cookie
= 0;
181 struct sirfsoc_dma_chan
*schan
;
182 struct sirfsoc_dma_desc
*sdesc
;
183 struct dma_async_tx_descriptor
*desc
;
185 unsigned long happened_cyclic
;
189 for (i
= 0; i
< sdma
->dma
.chancnt
; i
++) {
190 schan
= &sdma
->channels
[i
];
192 /* Get all completed descriptors */
193 spin_lock_irqsave(&schan
->lock
, flags
);
194 if (!list_empty(&schan
->completed
)) {
195 list_splice_tail_init(&schan
->completed
, &list
);
196 spin_unlock_irqrestore(&schan
->lock
, flags
);
198 /* Execute callbacks and run dependencies */
199 list_for_each_entry(sdesc
, &list
, node
) {
203 desc
->callback(desc
->callback_param
);
205 last_cookie
= desc
->cookie
;
206 dma_run_dependencies(desc
);
209 /* Free descriptors */
210 spin_lock_irqsave(&schan
->lock
, flags
);
211 list_splice_tail_init(&list
, &schan
->free
);
212 schan
->chan
.completed_cookie
= last_cookie
;
213 spin_unlock_irqrestore(&schan
->lock
, flags
);
215 /* for cyclic channel, desc is always in active list */
216 sdesc
= list_first_entry(&schan
->active
, struct sirfsoc_dma_desc
,
219 if (!sdesc
|| (sdesc
&& !sdesc
->cyclic
)) {
220 /* without active cyclic DMA */
221 spin_unlock_irqrestore(&schan
->lock
, flags
);
226 happened_cyclic
= schan
->happened_cyclic
;
227 spin_unlock_irqrestore(&schan
->lock
, flags
);
230 while (happened_cyclic
!= schan
->completed_cyclic
) {
232 desc
->callback(desc
->callback_param
);
233 schan
->completed_cyclic
++;
240 static void sirfsoc_dma_tasklet(unsigned long data
)
242 struct sirfsoc_dma
*sdma
= (void *)data
;
244 sirfsoc_dma_process_completed(sdma
);
247 /* Submit descriptor to hardware */
248 static dma_cookie_t
sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor
*txd
)
250 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(txd
->chan
);
251 struct sirfsoc_dma_desc
*sdesc
;
255 sdesc
= container_of(txd
, struct sirfsoc_dma_desc
, desc
);
257 spin_lock_irqsave(&schan
->lock
, flags
);
259 /* Move descriptor to queue */
260 list_move_tail(&sdesc
->node
, &schan
->queued
);
262 cookie
= dma_cookie_assign(txd
);
264 spin_unlock_irqrestore(&schan
->lock
, flags
);
269 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan
*schan
,
270 struct dma_slave_config
*config
)
274 if ((config
->src_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
) ||
275 (config
->dst_addr_width
!= DMA_SLAVE_BUSWIDTH_4_BYTES
))
278 spin_lock_irqsave(&schan
->lock
, flags
);
279 schan
->mode
= (config
->src_maxburst
== 4 ? 1 : 0);
280 spin_unlock_irqrestore(&schan
->lock
, flags
);
285 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan
*schan
)
287 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(&schan
->chan
);
288 int cid
= schan
->chan
.chan_id
;
291 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_INT_EN
) &
292 ~(1 << cid
), sdma
->base
+ SIRFSOC_DMA_INT_EN
);
293 writel_relaxed(1 << cid
, sdma
->base
+ SIRFSOC_DMA_CH_VALID
);
295 writel_relaxed(readl_relaxed(sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
)
296 & ~((1 << cid
) | 1 << (cid
+ 16)),
297 sdma
->base
+ SIRFSOC_DMA_CH_LOOP_CTRL
);
299 spin_lock_irqsave(&schan
->lock
, flags
);
300 list_splice_tail_init(&schan
->active
, &schan
->free
);
301 list_splice_tail_init(&schan
->queued
, &schan
->free
);
302 spin_unlock_irqrestore(&schan
->lock
, flags
);
307 static int sirfsoc_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
310 struct dma_slave_config
*config
;
311 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
314 case DMA_TERMINATE_ALL
:
315 return sirfsoc_dma_terminate_all(schan
);
316 case DMA_SLAVE_CONFIG
:
317 config
= (struct dma_slave_config
*)arg
;
318 return sirfsoc_dma_slave_config(schan
, config
);
327 /* Alloc channel resources */
328 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan
*chan
)
330 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
331 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
332 struct sirfsoc_dma_desc
*sdesc
;
337 /* Alloc descriptors for this channel */
338 for (i
= 0; i
< SIRFSOC_DMA_DESCRIPTORS
; i
++) {
339 sdesc
= kzalloc(sizeof(*sdesc
), GFP_KERNEL
);
341 dev_notice(sdma
->dma
.dev
, "Memory allocation error. "
342 "Allocated only %u descriptors\n", i
);
346 dma_async_tx_descriptor_init(&sdesc
->desc
, chan
);
347 sdesc
->desc
.flags
= DMA_CTRL_ACK
;
348 sdesc
->desc
.tx_submit
= sirfsoc_dma_tx_submit
;
350 list_add_tail(&sdesc
->node
, &descs
);
353 /* Return error only if no descriptors were allocated */
357 spin_lock_irqsave(&schan
->lock
, flags
);
359 list_splice_tail_init(&descs
, &schan
->free
);
360 spin_unlock_irqrestore(&schan
->lock
, flags
);
365 /* Free channel resources */
366 static void sirfsoc_dma_free_chan_resources(struct dma_chan
*chan
)
368 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
369 struct sirfsoc_dma_desc
*sdesc
, *tmp
;
373 spin_lock_irqsave(&schan
->lock
, flags
);
375 /* Channel must be idle */
376 BUG_ON(!list_empty(&schan
->prepared
));
377 BUG_ON(!list_empty(&schan
->queued
));
378 BUG_ON(!list_empty(&schan
->active
));
379 BUG_ON(!list_empty(&schan
->completed
));
382 list_splice_tail_init(&schan
->free
, &descs
);
384 spin_unlock_irqrestore(&schan
->lock
, flags
);
386 /* Free descriptors */
387 list_for_each_entry_safe(sdesc
, tmp
, &descs
, node
)
391 /* Send pending descriptor to hardware */
392 static void sirfsoc_dma_issue_pending(struct dma_chan
*chan
)
394 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
397 spin_lock_irqsave(&schan
->lock
, flags
);
399 if (list_empty(&schan
->active
) && !list_empty(&schan
->queued
))
400 sirfsoc_dma_execute(schan
);
402 spin_unlock_irqrestore(&schan
->lock
, flags
);
405 /* Check request completion status */
406 static enum dma_status
407 sirfsoc_dma_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
408 struct dma_tx_state
*txstate
)
410 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
414 spin_lock_irqsave(&schan
->lock
, flags
);
415 ret
= dma_cookie_status(chan
, cookie
, txstate
);
416 spin_unlock_irqrestore(&schan
->lock
, flags
);
421 static struct dma_async_tx_descriptor
*sirfsoc_dma_prep_interleaved(
422 struct dma_chan
*chan
, struct dma_interleaved_template
*xt
,
425 struct sirfsoc_dma
*sdma
= dma_chan_to_sirfsoc_dma(chan
);
426 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
427 struct sirfsoc_dma_desc
*sdesc
= NULL
;
428 unsigned long iflags
;
431 if ((xt
->dir
!= DMA_MEM_TO_DEV
) && (xt
->dir
!= DMA_DEV_TO_MEM
)) {
436 /* Get free descriptor */
437 spin_lock_irqsave(&schan
->lock
, iflags
);
438 if (!list_empty(&schan
->free
)) {
439 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
441 list_del(&sdesc
->node
);
443 spin_unlock_irqrestore(&schan
->lock
, iflags
);
446 /* try to free completed descriptors */
447 sirfsoc_dma_process_completed(sdma
);
452 /* Place descriptor in prepared list */
453 spin_lock_irqsave(&schan
->lock
, iflags
);
456 * Number of chunks in a frame can only be 1 for prima2
457 * and ylen (number of frame - 1) must be at least 0
459 if ((xt
->frame_size
== 1) && (xt
->numf
> 0)) {
461 sdesc
->xlen
= xt
->sgl
[0].size
/ SIRFSOC_DMA_WORD_LEN
;
462 sdesc
->width
= (xt
->sgl
[0].size
+ xt
->sgl
[0].icg
) /
463 SIRFSOC_DMA_WORD_LEN
;
464 sdesc
->ylen
= xt
->numf
- 1;
465 if (xt
->dir
== DMA_MEM_TO_DEV
) {
466 sdesc
->addr
= xt
->src_start
;
469 sdesc
->addr
= xt
->dst_start
;
473 list_add_tail(&sdesc
->node
, &schan
->prepared
);
475 pr_err("sirfsoc DMA Invalid xfer\n");
479 spin_unlock_irqrestore(&schan
->lock
, iflags
);
483 spin_unlock_irqrestore(&schan
->lock
, iflags
);
489 static struct dma_async_tx_descriptor
*
490 sirfsoc_dma_prep_cyclic(struct dma_chan
*chan
, dma_addr_t addr
,
491 size_t buf_len
, size_t period_len
,
492 enum dma_transfer_direction direction
, unsigned long flags
, void *context
)
494 struct sirfsoc_dma_chan
*schan
= dma_chan_to_sirfsoc_dma_chan(chan
);
495 struct sirfsoc_dma_desc
*sdesc
= NULL
;
496 unsigned long iflags
;
499 * we only support cycle transfer with 2 period
500 * If the X-length is set to 0, it would be the loop mode.
501 * The DMA address keeps increasing until reaching the end of a loop
502 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
503 * the DMA address goes back to the beginning of this area.
504 * In loop mode, the DMA data region is divided into two parts, BUFA
505 * and BUFB. DMA controller generates interrupts twice in each loop:
506 * when the DMA address reaches the end of BUFA or the end of the
509 if (buf_len
!= 2 * period_len
)
510 return ERR_PTR(-EINVAL
);
512 /* Get free descriptor */
513 spin_lock_irqsave(&schan
->lock
, iflags
);
514 if (!list_empty(&schan
->free
)) {
515 sdesc
= list_first_entry(&schan
->free
, struct sirfsoc_dma_desc
,
517 list_del(&sdesc
->node
);
519 spin_unlock_irqrestore(&schan
->lock
, iflags
);
524 /* Place descriptor in prepared list */
525 spin_lock_irqsave(&schan
->lock
, iflags
);
529 sdesc
->ylen
= buf_len
/ SIRFSOC_DMA_WORD_LEN
- 1;
531 list_add_tail(&sdesc
->node
, &schan
->prepared
);
532 spin_unlock_irqrestore(&schan
->lock
, iflags
);
538 * The DMA controller consists of 16 independent DMA channels.
539 * Each channel is allocated to a different function
541 bool sirfsoc_dma_filter_id(struct dma_chan
*chan
, void *chan_id
)
543 unsigned int ch_nr
= (unsigned int) chan_id
;
545 if (ch_nr
== chan
->chan_id
+
546 chan
->device
->dev_id
* SIRFSOC_DMA_CHANNELS
)
551 EXPORT_SYMBOL(sirfsoc_dma_filter_id
);
553 static int __devinit
sirfsoc_dma_probe(struct platform_device
*op
)
555 struct device_node
*dn
= op
->dev
.of_node
;
556 struct device
*dev
= &op
->dev
;
557 struct dma_device
*dma
;
558 struct sirfsoc_dma
*sdma
;
559 struct sirfsoc_dma_chan
*schan
;
561 ulong regs_start
, regs_size
;
565 sdma
= devm_kzalloc(dev
, sizeof(*sdma
), GFP_KERNEL
);
567 dev_err(dev
, "Memory exhausted!\n");
571 if (of_property_read_u32(dn
, "cell-index", &id
)) {
572 dev_err(dev
, "Fail to get DMAC index\n");
576 sdma
->irq
= irq_of_parse_and_map(dn
, 0);
577 if (sdma
->irq
== NO_IRQ
) {
578 dev_err(dev
, "Error mapping IRQ!\n");
582 ret
= of_address_to_resource(dn
, 0, &res
);
584 dev_err(dev
, "Error parsing memory region!\n");
588 regs_start
= res
.start
;
589 regs_size
= resource_size(&res
);
591 sdma
->base
= devm_ioremap(dev
, regs_start
, regs_size
);
593 dev_err(dev
, "Error mapping memory region!\n");
598 ret
= request_irq(sdma
->irq
, &sirfsoc_dma_irq
, 0, DRV_NAME
, sdma
);
600 dev_err(dev
, "Error requesting IRQ!\n");
607 dma
->chancnt
= SIRFSOC_DMA_CHANNELS
;
609 dma
->device_alloc_chan_resources
= sirfsoc_dma_alloc_chan_resources
;
610 dma
->device_free_chan_resources
= sirfsoc_dma_free_chan_resources
;
611 dma
->device_issue_pending
= sirfsoc_dma_issue_pending
;
612 dma
->device_control
= sirfsoc_dma_control
;
613 dma
->device_tx_status
= sirfsoc_dma_tx_status
;
614 dma
->device_prep_interleaved_dma
= sirfsoc_dma_prep_interleaved
;
615 dma
->device_prep_dma_cyclic
= sirfsoc_dma_prep_cyclic
;
617 INIT_LIST_HEAD(&dma
->channels
);
618 dma_cap_set(DMA_SLAVE
, dma
->cap_mask
);
619 dma_cap_set(DMA_CYCLIC
, dma
->cap_mask
);
620 dma_cap_set(DMA_INTERLEAVE
, dma
->cap_mask
);
621 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
623 for (i
= 0; i
< dma
->chancnt
; i
++) {
624 schan
= &sdma
->channels
[i
];
626 schan
->chan
.device
= dma
;
627 dma_cookie_init(&schan
->chan
);
629 INIT_LIST_HEAD(&schan
->free
);
630 INIT_LIST_HEAD(&schan
->prepared
);
631 INIT_LIST_HEAD(&schan
->queued
);
632 INIT_LIST_HEAD(&schan
->active
);
633 INIT_LIST_HEAD(&schan
->completed
);
635 spin_lock_init(&schan
->lock
);
636 list_add_tail(&schan
->chan
.device_node
, &dma
->channels
);
639 tasklet_init(&sdma
->tasklet
, sirfsoc_dma_tasklet
, (unsigned long)sdma
);
641 /* Register DMA engine */
642 dev_set_drvdata(dev
, sdma
);
643 ret
= dma_async_device_register(dma
);
647 dev_info(dev
, "initialized SIRFSOC DMAC driver\n");
652 free_irq(sdma
->irq
, sdma
);
654 irq_dispose_mapping(sdma
->irq
);
658 static int __devexit
sirfsoc_dma_remove(struct platform_device
*op
)
660 struct device
*dev
= &op
->dev
;
661 struct sirfsoc_dma
*sdma
= dev_get_drvdata(dev
);
663 dma_async_device_unregister(&sdma
->dma
);
664 free_irq(sdma
->irq
, sdma
);
665 irq_dispose_mapping(sdma
->irq
);
669 static struct of_device_id sirfsoc_dma_match
[] = {
670 { .compatible
= "sirf,prima2-dmac", },
674 static struct platform_driver sirfsoc_dma_driver
= {
675 .probe
= sirfsoc_dma_probe
,
676 .remove
= __devexit_p(sirfsoc_dma_remove
),
679 .owner
= THIS_MODULE
,
680 .of_match_table
= sirfsoc_dma_match
,
684 module_platform_driver(sirfsoc_dma_driver
);
686 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
687 "Barry Song <baohua.song@csr.com>");
688 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
689 MODULE_LICENSE("GPL v2");