2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
17 #include <dt-bindings/dma/at91.h>
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include <linux/of_device.h>
28 #include <linux/of_dma.h>
30 #include "at_hdmac_regs.h"
31 #include "dmaengine.h"
37 * at_hdmac : Name of the ATmel AHB DMA Controller
38 * at_dma_ / atdma : ATmel DMA controller entity related
39 * atc_ / atchan : ATmel DMA Channel entity related
42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
45 #define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
52 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage.
55 static unsigned int init_nr_desc_per_channel
= 64;
56 module_param(init_nr_desc_per_channel
, uint
, 0644);
57 MODULE_PARM_DESC(init_nr_desc_per_channel
,
58 "initial descriptors per channel (default: 64)");
62 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
);
63 static void atc_issue_pending(struct dma_chan
*chan
);
66 /*----------------------------------------------------------------------*/
68 static inline unsigned int atc_get_xfer_width(dma_addr_t src
, dma_addr_t dst
,
73 if (!((src
| dst
| len
) & 3))
75 else if (!((src
| dst
| len
) & 1))
83 static struct at_desc
*atc_first_active(struct at_dma_chan
*atchan
)
85 return list_first_entry(&atchan
->active_list
,
86 struct at_desc
, desc_node
);
89 static struct at_desc
*atc_first_queued(struct at_dma_chan
*atchan
)
91 return list_first_entry(&atchan
->queue
,
92 struct at_desc
, desc_node
);
96 * atc_alloc_descriptor - allocate and return an initialized descriptor
97 * @chan: the channel to allocate descriptors for
98 * @gfp_flags: GFP allocation flags
100 * Note: The ack-bit is positioned in the descriptor flag at creation time
101 * to make initial allocation more convenient. This bit will be cleared
102 * and control will be given to client at usage time (during
103 * preparation functions).
105 static struct at_desc
*atc_alloc_descriptor(struct dma_chan
*chan
,
108 struct at_desc
*desc
= NULL
;
109 struct at_dma
*atdma
= to_at_dma(chan
->device
);
112 desc
= dma_pool_alloc(atdma
->dma_desc_pool
, gfp_flags
, &phys
);
114 memset(desc
, 0, sizeof(struct at_desc
));
115 INIT_LIST_HEAD(&desc
->tx_list
);
116 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
117 /* txd.flags will be overwritten in prep functions */
118 desc
->txd
.flags
= DMA_CTRL_ACK
;
119 desc
->txd
.tx_submit
= atc_tx_submit
;
120 desc
->txd
.phys
= phys
;
127 * atc_desc_get - get an unused descriptor from free_list
128 * @atchan: channel we want a new descriptor for
130 static struct at_desc
*atc_desc_get(struct at_dma_chan
*atchan
)
132 struct at_desc
*desc
, *_desc
;
133 struct at_desc
*ret
= NULL
;
138 spin_lock_irqsave(&atchan
->lock
, flags
);
139 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
141 if (async_tx_test_ack(&desc
->txd
)) {
142 list_del(&desc
->desc_node
);
146 dev_dbg(chan2dev(&atchan
->chan_common
),
147 "desc %p not ACKed\n", desc
);
149 spin_unlock_irqrestore(&atchan
->lock
, flags
);
150 dev_vdbg(chan2dev(&atchan
->chan_common
),
151 "scanned %u descriptors on freelist\n", i
);
153 /* no more descriptor available in initial pool: create one more */
155 ret
= atc_alloc_descriptor(&atchan
->chan_common
, GFP_ATOMIC
);
157 spin_lock_irqsave(&atchan
->lock
, flags
);
158 atchan
->descs_allocated
++;
159 spin_unlock_irqrestore(&atchan
->lock
, flags
);
161 dev_err(chan2dev(&atchan
->chan_common
),
162 "not enough descriptors available\n");
170 * atc_desc_put - move a descriptor, including any children, to the free list
171 * @atchan: channel we work on
172 * @desc: descriptor, at the head of a chain, to move to free list
174 static void atc_desc_put(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
177 struct at_desc
*child
;
180 spin_lock_irqsave(&atchan
->lock
, flags
);
181 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
182 dev_vdbg(chan2dev(&atchan
->chan_common
),
183 "moving child desc %p to freelist\n",
185 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
186 dev_vdbg(chan2dev(&atchan
->chan_common
),
187 "moving desc %p to freelist\n", desc
);
188 list_add(&desc
->desc_node
, &atchan
->free_list
);
189 spin_unlock_irqrestore(&atchan
->lock
, flags
);
194 * atc_desc_chain - build chain adding a descriptor
195 * @first: address of first descriptor of the chain
196 * @prev: address of previous descriptor of the chain
197 * @desc: descriptor to queue
199 * Called from prep_* functions
201 static void atc_desc_chain(struct at_desc
**first
, struct at_desc
**prev
,
202 struct at_desc
*desc
)
207 /* inform the HW lli about chaining */
208 (*prev
)->lli
.dscr
= desc
->txd
.phys
;
209 /* insert the link descriptor to the LD ring */
210 list_add_tail(&desc
->desc_node
,
217 * atc_dostart - starts the DMA engine for real
218 * @atchan: the channel we want to start
219 * @first: first descriptor in the list we want to begin with
221 * Called with atchan->lock held and bh disabled
223 static void atc_dostart(struct at_dma_chan
*atchan
, struct at_desc
*first
)
225 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
227 /* ASSERT: channel is idle */
228 if (atc_chan_is_enabled(atchan
)) {
229 dev_err(chan2dev(&atchan
->chan_common
),
230 "BUG: Attempted to start non-idle channel\n");
231 dev_err(chan2dev(&atchan
->chan_common
),
232 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
233 channel_readl(atchan
, SADDR
),
234 channel_readl(atchan
, DADDR
),
235 channel_readl(atchan
, CTRLA
),
236 channel_readl(atchan
, CTRLB
),
237 channel_readl(atchan
, DSCR
));
239 /* The tasklet will hopefully advance the queue... */
243 vdbg_dump_regs(atchan
);
245 channel_writel(atchan
, SADDR
, 0);
246 channel_writel(atchan
, DADDR
, 0);
247 channel_writel(atchan
, CTRLA
, 0);
248 channel_writel(atchan
, CTRLB
, 0);
249 channel_writel(atchan
, DSCR
, first
->txd
.phys
);
250 dma_writel(atdma
, CHER
, atchan
->mask
);
252 vdbg_dump_regs(atchan
);
256 * atc_get_current_descriptors -
257 * locate the descriptor which equal to physical address in DSCR
258 * @atchan: the channel we want to start
259 * @dscr_addr: physical descriptor address in DSCR
261 static struct at_desc
*atc_get_current_descriptors(struct at_dma_chan
*atchan
,
264 struct at_desc
*desc
, *_desc
, *child
, *desc_cur
= NULL
;
266 list_for_each_entry_safe(desc
, _desc
, &atchan
->active_list
, desc_node
) {
267 if (desc
->lli
.dscr
== dscr_addr
) {
272 list_for_each_entry(child
, &desc
->tx_list
, desc_node
) {
273 if (child
->lli
.dscr
== dscr_addr
) {
284 * atc_get_bytes_left -
285 * Get the number of bytes residue in dma buffer,
286 * @chan: the channel we want to start
288 static int atc_get_bytes_left(struct dma_chan
*chan
)
290 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
291 struct at_dma
*atdma
= to_at_dma(chan
->device
);
292 int chan_id
= atchan
->chan_common
.chan_id
;
293 struct at_desc
*desc_first
= atc_first_active(atchan
);
294 struct at_desc
*desc_cur
;
295 int ret
= 0, count
= 0;
298 * Initialize necessary values in the first time.
299 * remain_desc record remain desc length.
301 if (atchan
->remain_desc
== 0)
302 /* First descriptor embedds the transaction length */
303 atchan
->remain_desc
= desc_first
->len
;
306 * This happens when current descriptor transfer complete.
307 * The residual buffer size should reduce current descriptor length.
309 if (unlikely(test_bit(ATC_IS_BTC
, &atchan
->status
))) {
310 clear_bit(ATC_IS_BTC
, &atchan
->status
);
311 desc_cur
= atc_get_current_descriptors(atchan
,
312 channel_readl(atchan
, DSCR
));
318 count
= (desc_cur
->lli
.ctrla
& ATC_BTSIZE_MAX
)
319 << desc_first
->tx_width
;
320 if (atchan
->remain_desc
< count
) {
325 atchan
->remain_desc
-= count
;
326 ret
= atchan
->remain_desc
;
329 * Get residual bytes when current
330 * descriptor transfer in progress.
332 count
= (channel_readl(atchan
, CTRLA
) & ATC_BTSIZE_MAX
)
333 << (desc_first
->tx_width
);
334 ret
= atchan
->remain_desc
- count
;
339 if (!(dma_readl(atdma
, CHSR
) & AT_DMA_EMPT(chan_id
)))
340 atc_issue_pending(chan
);
347 * atc_chain_complete - finish work for one transaction chain
348 * @atchan: channel we work on
349 * @desc: descriptor at the head of the chain we want do complete
351 * Called with atchan->lock held and bh disabled */
353 atc_chain_complete(struct at_dma_chan
*atchan
, struct at_desc
*desc
)
355 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
357 dev_vdbg(chan2dev(&atchan
->chan_common
),
358 "descriptor %u complete\n", txd
->cookie
);
360 /* mark the descriptor as complete for non cyclic cases only */
361 if (!atc_chan_is_cyclic(atchan
))
362 dma_cookie_complete(txd
);
364 /* move children to free_list */
365 list_splice_init(&desc
->tx_list
, &atchan
->free_list
);
366 /* move myself to free_list */
367 list_move(&desc
->desc_node
, &atchan
->free_list
);
369 dma_descriptor_unmap(txd
);
370 /* for cyclic transfers,
371 * no need to replay callback function while stopping */
372 if (!atc_chan_is_cyclic(atchan
)) {
373 dma_async_tx_callback callback
= txd
->callback
;
374 void *param
= txd
->callback_param
;
377 * The API requires that no submissions are done from a
378 * callback, so we don't need to drop the lock here
384 dma_run_dependencies(txd
);
388 * atc_complete_all - finish work for all transactions
389 * @atchan: channel to complete transactions for
391 * Eventually submit queued descriptors if any
393 * Assume channel is idle while calling this function
394 * Called with atchan->lock held and bh disabled
396 static void atc_complete_all(struct at_dma_chan
*atchan
)
398 struct at_desc
*desc
, *_desc
;
401 dev_vdbg(chan2dev(&atchan
->chan_common
), "complete all\n");
404 * Submit queued descriptors ASAP, i.e. before we go through
405 * the completed ones.
407 if (!list_empty(&atchan
->queue
))
408 atc_dostart(atchan
, atc_first_queued(atchan
));
409 /* empty active_list now it is completed */
410 list_splice_init(&atchan
->active_list
, &list
);
411 /* empty queue list by moving descriptors (if any) to active_list */
412 list_splice_init(&atchan
->queue
, &atchan
->active_list
);
414 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
415 atc_chain_complete(atchan
, desc
);
419 * atc_advance_work - at the end of a transaction, move forward
420 * @atchan: channel where the transaction ended
422 * Called with atchan->lock held and bh disabled
424 static void atc_advance_work(struct at_dma_chan
*atchan
)
426 dev_vdbg(chan2dev(&atchan
->chan_common
), "advance_work\n");
428 if (atc_chan_is_enabled(atchan
))
431 if (list_empty(&atchan
->active_list
) ||
432 list_is_singular(&atchan
->active_list
)) {
433 atc_complete_all(atchan
);
435 atc_chain_complete(atchan
, atc_first_active(atchan
));
437 atc_dostart(atchan
, atc_first_active(atchan
));
443 * atc_handle_error - handle errors reported by DMA controller
444 * @atchan: channel where error occurs
446 * Called with atchan->lock held and bh disabled
448 static void atc_handle_error(struct at_dma_chan
*atchan
)
450 struct at_desc
*bad_desc
;
451 struct at_desc
*child
;
454 * The descriptor currently at the head of the active list is
455 * broked. Since we don't have any way to report errors, we'll
456 * just have to scream loudly and try to carry on.
458 bad_desc
= atc_first_active(atchan
);
459 list_del_init(&bad_desc
->desc_node
);
461 /* As we are stopped, take advantage to push queued descriptors
463 list_splice_init(&atchan
->queue
, atchan
->active_list
.prev
);
465 /* Try to restart the controller */
466 if (!list_empty(&atchan
->active_list
))
467 atc_dostart(atchan
, atc_first_active(atchan
));
470 * KERN_CRITICAL may seem harsh, but since this only happens
471 * when someone submits a bad physical address in a
472 * descriptor, we should consider ourselves lucky that the
473 * controller flagged an error instead of scribbling over
474 * random memory locations.
476 dev_crit(chan2dev(&atchan
->chan_common
),
477 "Bad descriptor submitted for DMA!\n");
478 dev_crit(chan2dev(&atchan
->chan_common
),
479 " cookie: %d\n", bad_desc
->txd
.cookie
);
480 atc_dump_lli(atchan
, &bad_desc
->lli
);
481 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
482 atc_dump_lli(atchan
, &child
->lli
);
484 /* Pretend the descriptor completed successfully */
485 atc_chain_complete(atchan
, bad_desc
);
489 * atc_handle_cyclic - at the end of a period, run callback function
490 * @atchan: channel used for cyclic operations
492 * Called with atchan->lock held and bh disabled
494 static void atc_handle_cyclic(struct at_dma_chan
*atchan
)
496 struct at_desc
*first
= atc_first_active(atchan
);
497 struct dma_async_tx_descriptor
*txd
= &first
->txd
;
498 dma_async_tx_callback callback
= txd
->callback
;
499 void *param
= txd
->callback_param
;
501 dev_vdbg(chan2dev(&atchan
->chan_common
),
502 "new cyclic period llp 0x%08x\n",
503 channel_readl(atchan
, DSCR
));
509 /*-- IRQ & Tasklet ---------------------------------------------------*/
511 static void atc_tasklet(unsigned long data
)
513 struct at_dma_chan
*atchan
= (struct at_dma_chan
*)data
;
516 spin_lock_irqsave(&atchan
->lock
, flags
);
517 if (test_and_clear_bit(ATC_IS_ERROR
, &atchan
->status
))
518 atc_handle_error(atchan
);
519 else if (atc_chan_is_cyclic(atchan
))
520 atc_handle_cyclic(atchan
);
522 atc_advance_work(atchan
);
524 spin_unlock_irqrestore(&atchan
->lock
, flags
);
527 static irqreturn_t
at_dma_interrupt(int irq
, void *dev_id
)
529 struct at_dma
*atdma
= (struct at_dma
*)dev_id
;
530 struct at_dma_chan
*atchan
;
532 u32 status
, pending
, imr
;
536 imr
= dma_readl(atdma
, EBCIMR
);
537 status
= dma_readl(atdma
, EBCISR
);
538 pending
= status
& imr
;
543 dev_vdbg(atdma
->dma_common
.dev
,
544 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
545 status
, imr
, pending
);
547 for (i
= 0; i
< atdma
->dma_common
.chancnt
; i
++) {
548 atchan
= &atdma
->chan
[i
];
549 if (pending
& (AT_DMA_BTC(i
) | AT_DMA_ERR(i
))) {
550 if (pending
& AT_DMA_ERR(i
)) {
551 /* Disable channel on AHB error */
552 dma_writel(atdma
, CHDR
,
553 AT_DMA_RES(i
) | atchan
->mask
);
554 /* Give information to tasklet */
555 set_bit(ATC_IS_ERROR
, &atchan
->status
);
557 if (pending
& AT_DMA_BTC(i
))
558 set_bit(ATC_IS_BTC
, &atchan
->status
);
559 tasklet_schedule(&atchan
->tasklet
);
570 /*-- DMA Engine API --------------------------------------------------*/
573 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
574 * @desc: descriptor at the head of the transaction chain
576 * Queue chain if DMA engine is working already
578 * Cookie increment and adding to active_list or queue must be atomic
580 static dma_cookie_t
atc_tx_submit(struct dma_async_tx_descriptor
*tx
)
582 struct at_desc
*desc
= txd_to_at_desc(tx
);
583 struct at_dma_chan
*atchan
= to_at_dma_chan(tx
->chan
);
587 spin_lock_irqsave(&atchan
->lock
, flags
);
588 cookie
= dma_cookie_assign(tx
);
590 if (list_empty(&atchan
->active_list
)) {
591 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
593 atc_dostart(atchan
, desc
);
594 list_add_tail(&desc
->desc_node
, &atchan
->active_list
);
596 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
598 list_add_tail(&desc
->desc_node
, &atchan
->queue
);
601 spin_unlock_irqrestore(&atchan
->lock
, flags
);
607 * atc_prep_dma_memcpy - prepare a memcpy operation
608 * @chan: the channel to prepare operation on
609 * @dest: operation virtual destination address
610 * @src: operation virtual source address
611 * @len: operation length
612 * @flags: tx descriptor status flags
614 static struct dma_async_tx_descriptor
*
615 atc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
616 size_t len
, unsigned long flags
)
618 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
619 struct at_desc
*desc
= NULL
;
620 struct at_desc
*first
= NULL
;
621 struct at_desc
*prev
= NULL
;
624 unsigned int src_width
;
625 unsigned int dst_width
;
629 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
630 dest
, src
, len
, flags
);
632 if (unlikely(!len
)) {
633 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
637 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
638 | ATC_SRC_ADDR_MODE_INCR
639 | ATC_DST_ADDR_MODE_INCR
643 * We can be a lot more clever here, but this should take care
644 * of the most common optimization.
646 src_width
= dst_width
= atc_get_xfer_width(src
, dest
, len
);
648 ctrla
= ATC_SRC_WIDTH(src_width
) |
649 ATC_DST_WIDTH(dst_width
);
651 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
652 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
655 desc
= atc_desc_get(atchan
);
659 desc
->lli
.saddr
= src
+ offset
;
660 desc
->lli
.daddr
= dest
+ offset
;
661 desc
->lli
.ctrla
= ctrla
| xfer_count
;
662 desc
->lli
.ctrlb
= ctrlb
;
664 desc
->txd
.cookie
= 0;
666 atc_desc_chain(&first
, &prev
, desc
);
669 /* First descriptor of the chain embedds additional information */
670 first
->txd
.cookie
= -EBUSY
;
672 first
->tx_width
= src_width
;
674 /* set end-of-link to the last link descriptor of list*/
677 first
->txd
.flags
= flags
; /* client is in control of this ack */
682 atc_desc_put(atchan
, first
);
688 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
690 * @sgl: scatterlist to transfer to/from
691 * @sg_len: number of entries in @scatterlist
692 * @direction: DMA direction
693 * @flags: tx descriptor status flags
694 * @context: transaction context (ignored)
696 static struct dma_async_tx_descriptor
*
697 atc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
698 unsigned int sg_len
, enum dma_transfer_direction direction
,
699 unsigned long flags
, void *context
)
701 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
702 struct at_dma_slave
*atslave
= chan
->private;
703 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
704 struct at_desc
*first
= NULL
;
705 struct at_desc
*prev
= NULL
;
709 unsigned int reg_width
;
710 unsigned int mem_width
;
712 struct scatterlist
*sg
;
713 size_t total_len
= 0;
715 dev_vdbg(chan2dev(chan
), "prep_slave_sg (%d): %s f0x%lx\n",
717 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
720 if (unlikely(!atslave
|| !sg_len
)) {
721 dev_dbg(chan2dev(chan
), "prep_slave_sg: sg length is zero!\n");
725 ctrla
= ATC_SCSIZE(sconfig
->src_maxburst
)
726 | ATC_DCSIZE(sconfig
->dst_maxburst
);
731 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
732 ctrla
|= ATC_DST_WIDTH(reg_width
);
733 ctrlb
|= ATC_DST_ADDR_MODE_FIXED
734 | ATC_SRC_ADDR_MODE_INCR
736 | ATC_SIF(atchan
->mem_if
) | ATC_DIF(atchan
->per_if
);
737 reg
= sconfig
->dst_addr
;
738 for_each_sg(sgl
, sg
, sg_len
, i
) {
739 struct at_desc
*desc
;
743 desc
= atc_desc_get(atchan
);
747 mem
= sg_dma_address(sg
);
748 len
= sg_dma_len(sg
);
749 if (unlikely(!len
)) {
750 dev_dbg(chan2dev(chan
),
751 "prep_slave_sg: sg(%d) data length is zero\n", i
);
755 if (unlikely(mem
& 3 || len
& 3))
758 desc
->lli
.saddr
= mem
;
759 desc
->lli
.daddr
= reg
;
760 desc
->lli
.ctrla
= ctrla
761 | ATC_SRC_WIDTH(mem_width
)
763 desc
->lli
.ctrlb
= ctrlb
;
765 atc_desc_chain(&first
, &prev
, desc
);
770 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
771 ctrla
|= ATC_SRC_WIDTH(reg_width
);
772 ctrlb
|= ATC_DST_ADDR_MODE_INCR
773 | ATC_SRC_ADDR_MODE_FIXED
775 | ATC_SIF(atchan
->per_if
) | ATC_DIF(atchan
->mem_if
);
777 reg
= sconfig
->src_addr
;
778 for_each_sg(sgl
, sg
, sg_len
, i
) {
779 struct at_desc
*desc
;
783 desc
= atc_desc_get(atchan
);
787 mem
= sg_dma_address(sg
);
788 len
= sg_dma_len(sg
);
789 if (unlikely(!len
)) {
790 dev_dbg(chan2dev(chan
),
791 "prep_slave_sg: sg(%d) data length is zero\n", i
);
795 if (unlikely(mem
& 3 || len
& 3))
798 desc
->lli
.saddr
= reg
;
799 desc
->lli
.daddr
= mem
;
800 desc
->lli
.ctrla
= ctrla
801 | ATC_DST_WIDTH(mem_width
)
803 desc
->lli
.ctrlb
= ctrlb
;
805 atc_desc_chain(&first
, &prev
, desc
);
813 /* set end-of-link to the last link descriptor of list*/
816 /* First descriptor of the chain embedds additional information */
817 first
->txd
.cookie
= -EBUSY
;
818 first
->len
= total_len
;
819 first
->tx_width
= reg_width
;
821 /* first link descriptor of list is responsible of flags */
822 first
->txd
.flags
= flags
; /* client is in control of this ack */
827 dev_err(chan2dev(chan
), "not enough descriptors available\n");
829 atc_desc_put(atchan
, first
);
834 * atc_prep_dma_sg - prepare memory to memory scather-gather operation
835 * @chan: the channel to prepare operation on
836 * @dst_sg: destination scatterlist
837 * @dst_nents: number of destination scatterlist entries
838 * @src_sg: source scatterlist
839 * @src_nents: number of source scatterlist entries
840 * @flags: tx descriptor status flags
842 static struct dma_async_tx_descriptor
*
843 atc_prep_dma_sg(struct dma_chan
*chan
,
844 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
845 struct scatterlist
*src_sg
, unsigned int src_nents
,
848 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
849 struct at_desc
*desc
= NULL
;
850 struct at_desc
*first
= NULL
;
851 struct at_desc
*prev
= NULL
;
852 unsigned int src_width
;
853 unsigned int dst_width
;
857 size_t dst_len
= 0, src_len
= 0;
858 dma_addr_t dst
= 0, src
= 0;
859 size_t len
= 0, total_len
= 0;
861 if (unlikely(dst_nents
== 0 || src_nents
== 0))
864 if (unlikely(dst_sg
== NULL
|| src_sg
== NULL
))
867 ctrlb
= ATC_DEFAULT_CTRLB
| ATC_IEN
868 | ATC_SRC_ADDR_MODE_INCR
869 | ATC_DST_ADDR_MODE_INCR
873 * loop until there is either no more source or no more destination
878 /* prepare the next transfer */
881 /* no more destination scatterlist entries */
882 if (!dst_sg
|| !dst_nents
)
885 dst
= sg_dma_address(dst_sg
);
886 dst_len
= sg_dma_len(dst_sg
);
888 dst_sg
= sg_next(dst_sg
);
894 /* no more source scatterlist entries */
895 if (!src_sg
|| !src_nents
)
898 src
= sg_dma_address(src_sg
);
899 src_len
= sg_dma_len(src_sg
);
901 src_sg
= sg_next(src_sg
);
905 len
= min_t(size_t, src_len
, dst_len
);
909 /* take care for the alignment */
910 src_width
= dst_width
= atc_get_xfer_width(src
, dst
, len
);
912 ctrla
= ATC_SRC_WIDTH(src_width
) |
913 ATC_DST_WIDTH(dst_width
);
916 * The number of transfers to set up refer to the source width
917 * that depends on the alignment.
919 xfer_count
= len
>> src_width
;
920 if (xfer_count
> ATC_BTSIZE_MAX
) {
921 xfer_count
= ATC_BTSIZE_MAX
;
922 len
= ATC_BTSIZE_MAX
<< src_width
;
925 /* create the transfer */
926 desc
= atc_desc_get(atchan
);
930 desc
->lli
.saddr
= src
;
931 desc
->lli
.daddr
= dst
;
932 desc
->lli
.ctrla
= ctrla
| xfer_count
;
933 desc
->lli
.ctrlb
= ctrlb
;
935 desc
->txd
.cookie
= 0;
939 * Although we only need the transfer width for the first and
940 * the last descriptor, its easier to set it to all descriptors.
942 desc
->tx_width
= src_width
;
944 atc_desc_chain(&first
, &prev
, desc
);
946 /* update the lengths and addresses for the next loop cycle */
955 /* First descriptor of the chain embedds additional information */
956 first
->txd
.cookie
= -EBUSY
;
957 first
->total_len
= total_len
;
959 /* set end-of-link to the last link descriptor of list*/
962 first
->txd
.flags
= flags
; /* client is in control of this ack */
967 atc_desc_put(atchan
, first
);
972 * atc_dma_cyclic_check_values
973 * Check for too big/unaligned periods and unaligned DMA buffer
976 atc_dma_cyclic_check_values(unsigned int reg_width
, dma_addr_t buf_addr
,
979 if (period_len
> (ATC_BTSIZE_MAX
<< reg_width
))
981 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
983 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
993 * atc_dma_cyclic_fill_desc - Fill one period descriptor
996 atc_dma_cyclic_fill_desc(struct dma_chan
*chan
, struct at_desc
*desc
,
997 unsigned int period_index
, dma_addr_t buf_addr
,
998 unsigned int reg_width
, size_t period_len
,
999 enum dma_transfer_direction direction
)
1001 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1002 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
1005 /* prepare common CRTLA value */
1006 ctrla
= ATC_SCSIZE(sconfig
->src_maxburst
)
1007 | ATC_DCSIZE(sconfig
->dst_maxburst
)
1008 | ATC_DST_WIDTH(reg_width
)
1009 | ATC_SRC_WIDTH(reg_width
)
1010 | period_len
>> reg_width
;
1012 switch (direction
) {
1013 case DMA_MEM_TO_DEV
:
1014 desc
->lli
.saddr
= buf_addr
+ (period_len
* period_index
);
1015 desc
->lli
.daddr
= sconfig
->dst_addr
;
1016 desc
->lli
.ctrla
= ctrla
;
1017 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_FIXED
1018 | ATC_SRC_ADDR_MODE_INCR
1020 | ATC_SIF(atchan
->mem_if
)
1021 | ATC_DIF(atchan
->per_if
);
1024 case DMA_DEV_TO_MEM
:
1025 desc
->lli
.saddr
= sconfig
->src_addr
;
1026 desc
->lli
.daddr
= buf_addr
+ (period_len
* period_index
);
1027 desc
->lli
.ctrla
= ctrla
;
1028 desc
->lli
.ctrlb
= ATC_DST_ADDR_MODE_INCR
1029 | ATC_SRC_ADDR_MODE_FIXED
1031 | ATC_SIF(atchan
->per_if
)
1032 | ATC_DIF(atchan
->mem_if
);
1043 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1044 * @chan: the DMA channel to prepare
1045 * @buf_addr: physical DMA address where the buffer starts
1046 * @buf_len: total number of bytes for the entire buffer
1047 * @period_len: number of bytes for each period
1048 * @direction: transfer direction, to or from device
1049 * @flags: tx descriptor status flags
1051 static struct dma_async_tx_descriptor
*
1052 atc_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
1053 size_t period_len
, enum dma_transfer_direction direction
,
1054 unsigned long flags
)
1056 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1057 struct at_dma_slave
*atslave
= chan
->private;
1058 struct dma_slave_config
*sconfig
= &atchan
->dma_sconfig
;
1059 struct at_desc
*first
= NULL
;
1060 struct at_desc
*prev
= NULL
;
1061 unsigned long was_cyclic
;
1062 unsigned int reg_width
;
1063 unsigned int periods
= buf_len
/ period_len
;
1066 dev_vdbg(chan2dev(chan
), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
1067 direction
== DMA_MEM_TO_DEV
? "TO DEVICE" : "FROM DEVICE",
1069 periods
, buf_len
, period_len
);
1071 if (unlikely(!atslave
|| !buf_len
|| !period_len
)) {
1072 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: length is zero!\n");
1076 was_cyclic
= test_and_set_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1078 dev_dbg(chan2dev(chan
), "prep_dma_cyclic: channel in use!\n");
1082 if (unlikely(!is_slave_direction(direction
)))
1085 if (sconfig
->direction
== DMA_MEM_TO_DEV
)
1086 reg_width
= convert_buswidth(sconfig
->dst_addr_width
);
1088 reg_width
= convert_buswidth(sconfig
->src_addr_width
);
1090 /* Check for too big/unaligned periods and unaligned DMA buffer */
1091 if (atc_dma_cyclic_check_values(reg_width
, buf_addr
, period_len
))
1094 /* build cyclic linked list */
1095 for (i
= 0; i
< periods
; i
++) {
1096 struct at_desc
*desc
;
1098 desc
= atc_desc_get(atchan
);
1102 if (atc_dma_cyclic_fill_desc(chan
, desc
, i
, buf_addr
,
1103 reg_width
, period_len
, direction
))
1106 atc_desc_chain(&first
, &prev
, desc
);
1109 /* lets make a cyclic list */
1110 prev
->lli
.dscr
= first
->txd
.phys
;
1112 /* First descriptor of the chain embedds additional information */
1113 first
->txd
.cookie
= -EBUSY
;
1114 first
->len
= buf_len
;
1115 first
->tx_width
= reg_width
;
1120 dev_err(chan2dev(chan
), "not enough descriptors available\n");
1121 atc_desc_put(atchan
, first
);
1123 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1127 static int atc_config(struct dma_chan
*chan
,
1128 struct dma_slave_config
*sconfig
)
1130 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1132 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1134 /* Check if it is chan is configured for slave transfers */
1138 memcpy(&atchan
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
1140 convert_burst(&atchan
->dma_sconfig
.src_maxburst
);
1141 convert_burst(&atchan
->dma_sconfig
.dst_maxburst
);
1146 static int atc_pause(struct dma_chan
*chan
)
1148 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1149 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1150 int chan_id
= atchan
->chan_common
.chan_id
;
1151 unsigned long flags
;
1155 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1157 spin_lock_irqsave(&atchan
->lock
, flags
);
1159 dma_writel(atdma
, CHER
, AT_DMA_SUSP(chan_id
));
1160 set_bit(ATC_IS_PAUSED
, &atchan
->status
);
1162 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1167 static int atc_resume(struct dma_chan
*chan
)
1169 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1170 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1171 int chan_id
= atchan
->chan_common
.chan_id
;
1172 unsigned long flags
;
1176 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1178 if (!atc_chan_is_paused(atchan
))
1181 spin_lock_irqsave(&atchan
->lock
, flags
);
1183 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
));
1184 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
1186 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1191 static int atc_terminate_all(struct dma_chan
*chan
)
1193 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1194 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1195 int chan_id
= atchan
->chan_common
.chan_id
;
1196 struct at_desc
*desc
, *_desc
;
1197 unsigned long flags
;
1201 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1204 * This is only called when something went wrong elsewhere, so
1205 * we don't really care about the data. Just disable the
1206 * channel. We still have to poll the channel enable bit due
1207 * to AHB/HSB limitations.
1209 spin_lock_irqsave(&atchan
->lock
, flags
);
1211 /* disabling channel: must also remove suspend state */
1212 dma_writel(atdma
, CHDR
, AT_DMA_RES(chan_id
) | atchan
->mask
);
1214 /* confirm that this channel is disabled */
1215 while (dma_readl(atdma
, CHSR
) & atchan
->mask
)
1218 /* active_list entries will end up before queued entries */
1219 list_splice_init(&atchan
->queue
, &list
);
1220 list_splice_init(&atchan
->active_list
, &list
);
1222 /* Flush all pending and queued descriptors */
1223 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1224 atc_chain_complete(atchan
, desc
);
1226 clear_bit(ATC_IS_PAUSED
, &atchan
->status
);
1227 /* if channel dedicated to cyclic operations, free it */
1228 clear_bit(ATC_IS_CYCLIC
, &atchan
->status
);
1230 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1236 * atc_tx_status - poll for transaction completion
1237 * @chan: DMA channel
1238 * @cookie: transaction identifier to check status of
1239 * @txstate: if not %NULL updated with transaction state
1241 * If @txstate is passed in, upon return it reflect the driver
1242 * internal state and can be used with dma_async_is_complete() to check
1243 * the status of multiple cookies without re-checking hardware state.
1245 static enum dma_status
1246 atc_tx_status(struct dma_chan
*chan
,
1247 dma_cookie_t cookie
,
1248 struct dma_tx_state
*txstate
)
1250 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1251 unsigned long flags
;
1252 enum dma_status ret
;
1255 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1256 if (ret
== DMA_COMPLETE
)
1259 * There's no point calculating the residue if there's
1260 * no txstate to store the value.
1265 spin_lock_irqsave(&atchan
->lock
, flags
);
1267 /* Get number of bytes left in the active transactions */
1268 bytes
= atc_get_bytes_left(chan
);
1270 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1272 if (unlikely(bytes
< 0)) {
1273 dev_vdbg(chan2dev(chan
), "get residual bytes error\n");
1276 dma_set_residue(txstate
, bytes
);
1279 dev_vdbg(chan2dev(chan
), "tx_status %d: cookie = %d residue = %d\n",
1280 ret
, cookie
, bytes
);
1286 * atc_issue_pending - try to finish work
1287 * @chan: target DMA channel
1289 static void atc_issue_pending(struct dma_chan
*chan
)
1291 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1292 unsigned long flags
;
1294 dev_vdbg(chan2dev(chan
), "issue_pending\n");
1296 /* Not needed for cyclic transfers */
1297 if (atc_chan_is_cyclic(atchan
))
1300 spin_lock_irqsave(&atchan
->lock
, flags
);
1301 atc_advance_work(atchan
);
1302 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1306 * atc_alloc_chan_resources - allocate resources for DMA channel
1307 * @chan: allocate descriptor resources for this channel
1308 * @client: current client requesting the channel be ready for requests
1310 * return - the number of allocated descriptors
1312 static int atc_alloc_chan_resources(struct dma_chan
*chan
)
1314 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1315 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1316 struct at_desc
*desc
;
1317 struct at_dma_slave
*atslave
;
1318 unsigned long flags
;
1321 LIST_HEAD(tmp_list
);
1323 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1325 /* ASSERT: channel is idle */
1326 if (atc_chan_is_enabled(atchan
)) {
1327 dev_dbg(chan2dev(chan
), "DMA channel not idle ?\n");
1331 cfg
= ATC_DEFAULT_CFG
;
1333 atslave
= chan
->private;
1336 * We need controller-specific data to set up slave
1339 BUG_ON(!atslave
->dma_dev
|| atslave
->dma_dev
!= atdma
->dma_common
.dev
);
1341 /* if cfg configuration specified take it instead of default */
1346 /* have we already been set up?
1347 * reconfigure channel but no need to reallocate descriptors */
1348 if (!list_empty(&atchan
->free_list
))
1349 return atchan
->descs_allocated
;
1351 /* Allocate initial pool of descriptors */
1352 for (i
= 0; i
< init_nr_desc_per_channel
; i
++) {
1353 desc
= atc_alloc_descriptor(chan
, GFP_KERNEL
);
1355 dev_err(atdma
->dma_common
.dev
,
1356 "Only %d initial descriptors\n", i
);
1359 list_add_tail(&desc
->desc_node
, &tmp_list
);
1362 spin_lock_irqsave(&atchan
->lock
, flags
);
1363 atchan
->descs_allocated
= i
;
1364 atchan
->remain_desc
= 0;
1365 list_splice(&tmp_list
, &atchan
->free_list
);
1366 dma_cookie_init(chan
);
1367 spin_unlock_irqrestore(&atchan
->lock
, flags
);
1369 /* channel parameters */
1370 channel_writel(atchan
, CFG
, cfg
);
1372 dev_dbg(chan2dev(chan
),
1373 "alloc_chan_resources: allocated %d descriptors\n",
1374 atchan
->descs_allocated
);
1376 return atchan
->descs_allocated
;
1380 * atc_free_chan_resources - free all channel resources
1381 * @chan: DMA channel
1383 static void atc_free_chan_resources(struct dma_chan
*chan
)
1385 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1386 struct at_dma
*atdma
= to_at_dma(chan
->device
);
1387 struct at_desc
*desc
, *_desc
;
1390 dev_dbg(chan2dev(chan
), "free_chan_resources: (descs allocated=%u)\n",
1391 atchan
->descs_allocated
);
1393 /* ASSERT: channel is idle */
1394 BUG_ON(!list_empty(&atchan
->active_list
));
1395 BUG_ON(!list_empty(&atchan
->queue
));
1396 BUG_ON(atc_chan_is_enabled(atchan
));
1398 list_for_each_entry_safe(desc
, _desc
, &atchan
->free_list
, desc_node
) {
1399 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1400 list_del(&desc
->desc_node
);
1401 /* free link descriptor */
1402 dma_pool_free(atdma
->dma_desc_pool
, desc
, desc
->txd
.phys
);
1404 list_splice_init(&atchan
->free_list
, &list
);
1405 atchan
->descs_allocated
= 0;
1407 atchan
->remain_desc
= 0;
1409 dev_vdbg(chan2dev(chan
), "free_chan_resources: done\n");
1413 static bool at_dma_filter(struct dma_chan
*chan
, void *slave
)
1415 struct at_dma_slave
*atslave
= slave
;
1417 if (atslave
->dma_dev
== chan
->device
->dev
) {
1418 chan
->private = atslave
;
1425 static struct dma_chan
*at_dma_xlate(struct of_phandle_args
*dma_spec
,
1426 struct of_dma
*of_dma
)
1428 struct dma_chan
*chan
;
1429 struct at_dma_chan
*atchan
;
1430 struct at_dma_slave
*atslave
;
1431 dma_cap_mask_t mask
;
1432 unsigned int per_id
;
1433 struct platform_device
*dmac_pdev
;
1435 if (dma_spec
->args_count
!= 2)
1438 dmac_pdev
= of_find_device_by_node(dma_spec
->np
);
1441 dma_cap_set(DMA_SLAVE
, mask
);
1443 atslave
= devm_kzalloc(&dmac_pdev
->dev
, sizeof(*atslave
), GFP_KERNEL
);
1447 atslave
->cfg
= ATC_DST_H2SEL_HW
| ATC_SRC_H2SEL_HW
;
1449 * We can fill both SRC_PER and DST_PER, one of these fields will be
1450 * ignored depending on DMA transfer direction.
1452 per_id
= dma_spec
->args
[1] & AT91_DMA_CFG_PER_ID_MASK
;
1453 atslave
->cfg
|= ATC_DST_PER_MSB(per_id
) | ATC_DST_PER(per_id
)
1454 | ATC_SRC_PER_MSB(per_id
) | ATC_SRC_PER(per_id
);
1456 * We have to translate the value we get from the device tree since
1457 * the half FIFO configuration value had to be 0 to keep backward
1460 switch (dma_spec
->args
[1] & AT91_DMA_CFG_FIFOCFG_MASK
) {
1461 case AT91_DMA_CFG_FIFOCFG_ALAP
:
1462 atslave
->cfg
|= ATC_FIFOCFG_LARGESTBURST
;
1464 case AT91_DMA_CFG_FIFOCFG_ASAP
:
1465 atslave
->cfg
|= ATC_FIFOCFG_ENOUGHSPACE
;
1467 case AT91_DMA_CFG_FIFOCFG_HALF
:
1469 atslave
->cfg
|= ATC_FIFOCFG_HALFFIFO
;
1471 atslave
->dma_dev
= &dmac_pdev
->dev
;
1473 chan
= dma_request_channel(mask
, at_dma_filter
, atslave
);
1477 atchan
= to_at_dma_chan(chan
);
1478 atchan
->per_if
= dma_spec
->args
[0] & 0xff;
1479 atchan
->mem_if
= (dma_spec
->args
[0] >> 16) & 0xff;
1484 static struct dma_chan
*at_dma_xlate(struct of_phandle_args
*dma_spec
,
1485 struct of_dma
*of_dma
)
1491 /*-- Module Management -----------------------------------------------*/
1493 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1494 static struct at_dma_platform_data at91sam9rl_config
= {
1497 static struct at_dma_platform_data at91sam9g45_config
= {
1501 #if defined(CONFIG_OF)
1502 static const struct of_device_id atmel_dma_dt_ids
[] = {
1504 .compatible
= "atmel,at91sam9rl-dma",
1505 .data
= &at91sam9rl_config
,
1507 .compatible
= "atmel,at91sam9g45-dma",
1508 .data
= &at91sam9g45_config
,
1514 MODULE_DEVICE_TABLE(of
, atmel_dma_dt_ids
);
1517 static const struct platform_device_id atdma_devtypes
[] = {
1519 .name
= "at91sam9rl_dma",
1520 .driver_data
= (unsigned long) &at91sam9rl_config
,
1522 .name
= "at91sam9g45_dma",
1523 .driver_data
= (unsigned long) &at91sam9g45_config
,
1529 static inline const struct at_dma_platform_data
* __init
at_dma_get_driver_data(
1530 struct platform_device
*pdev
)
1532 if (pdev
->dev
.of_node
) {
1533 const struct of_device_id
*match
;
1534 match
= of_match_node(atmel_dma_dt_ids
, pdev
->dev
.of_node
);
1539 return (struct at_dma_platform_data
*)
1540 platform_get_device_id(pdev
)->driver_data
;
1544 * at_dma_off - disable DMA controller
1545 * @atdma: the Atmel HDAMC device
1547 static void at_dma_off(struct at_dma
*atdma
)
1549 dma_writel(atdma
, EN
, 0);
1551 /* disable all interrupts */
1552 dma_writel(atdma
, EBCIDR
, -1L);
1554 /* confirm that all channels are disabled */
1555 while (dma_readl(atdma
, CHSR
) & atdma
->all_chan_mask
)
1559 static int __init
at_dma_probe(struct platform_device
*pdev
)
1561 struct resource
*io
;
1562 struct at_dma
*atdma
;
1567 const struct at_dma_platform_data
*plat_dat
;
1569 /* setup platform data for each SoC */
1570 dma_cap_set(DMA_MEMCPY
, at91sam9rl_config
.cap_mask
);
1571 dma_cap_set(DMA_SG
, at91sam9rl_config
.cap_mask
);
1572 dma_cap_set(DMA_MEMCPY
, at91sam9g45_config
.cap_mask
);
1573 dma_cap_set(DMA_SLAVE
, at91sam9g45_config
.cap_mask
);
1574 dma_cap_set(DMA_SG
, at91sam9g45_config
.cap_mask
);
1576 /* get DMA parameters from controller type */
1577 plat_dat
= at_dma_get_driver_data(pdev
);
1581 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1585 irq
= platform_get_irq(pdev
, 0);
1589 size
= sizeof(struct at_dma
);
1590 size
+= plat_dat
->nr_channels
* sizeof(struct at_dma_chan
);
1591 atdma
= kzalloc(size
, GFP_KERNEL
);
1595 /* discover transaction capabilities */
1596 atdma
->dma_common
.cap_mask
= plat_dat
->cap_mask
;
1597 atdma
->all_chan_mask
= (1 << plat_dat
->nr_channels
) - 1;
1599 size
= resource_size(io
);
1600 if (!request_mem_region(io
->start
, size
, pdev
->dev
.driver
->name
)) {
1605 atdma
->regs
= ioremap(io
->start
, size
);
1611 atdma
->clk
= clk_get(&pdev
->dev
, "dma_clk");
1612 if (IS_ERR(atdma
->clk
)) {
1613 err
= PTR_ERR(atdma
->clk
);
1616 err
= clk_prepare_enable(atdma
->clk
);
1618 goto err_clk_prepare
;
1620 /* force dma off, just in case */
1623 err
= request_irq(irq
, at_dma_interrupt
, 0, "at_hdmac", atdma
);
1627 platform_set_drvdata(pdev
, atdma
);
1629 /* create a pool of consistent memory blocks for hardware descriptors */
1630 atdma
->dma_desc_pool
= dma_pool_create("at_hdmac_desc_pool",
1631 &pdev
->dev
, sizeof(struct at_desc
),
1632 4 /* word alignment */, 0);
1633 if (!atdma
->dma_desc_pool
) {
1634 dev_err(&pdev
->dev
, "No memory for descriptors dma pool\n");
1636 goto err_pool_create
;
1639 /* clear any pending interrupt */
1640 while (dma_readl(atdma
, EBCISR
))
1643 /* initialize channels related values */
1644 INIT_LIST_HEAD(&atdma
->dma_common
.channels
);
1645 for (i
= 0; i
< plat_dat
->nr_channels
; i
++) {
1646 struct at_dma_chan
*atchan
= &atdma
->chan
[i
];
1648 atchan
->mem_if
= AT_DMA_MEM_IF
;
1649 atchan
->per_if
= AT_DMA_PER_IF
;
1650 atchan
->chan_common
.device
= &atdma
->dma_common
;
1651 dma_cookie_init(&atchan
->chan_common
);
1652 list_add_tail(&atchan
->chan_common
.device_node
,
1653 &atdma
->dma_common
.channels
);
1655 atchan
->ch_regs
= atdma
->regs
+ ch_regs(i
);
1656 spin_lock_init(&atchan
->lock
);
1657 atchan
->mask
= 1 << i
;
1659 INIT_LIST_HEAD(&atchan
->active_list
);
1660 INIT_LIST_HEAD(&atchan
->queue
);
1661 INIT_LIST_HEAD(&atchan
->free_list
);
1663 tasklet_init(&atchan
->tasklet
, atc_tasklet
,
1664 (unsigned long)atchan
);
1665 atc_enable_chan_irq(atdma
, i
);
1668 /* set base routines */
1669 atdma
->dma_common
.device_alloc_chan_resources
= atc_alloc_chan_resources
;
1670 atdma
->dma_common
.device_free_chan_resources
= atc_free_chan_resources
;
1671 atdma
->dma_common
.device_tx_status
= atc_tx_status
;
1672 atdma
->dma_common
.device_issue_pending
= atc_issue_pending
;
1673 atdma
->dma_common
.dev
= &pdev
->dev
;
1675 /* set prep routines based on capability */
1676 if (dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
))
1677 atdma
->dma_common
.device_prep_dma_memcpy
= atc_prep_dma_memcpy
;
1679 if (dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
)) {
1680 atdma
->dma_common
.device_prep_slave_sg
= atc_prep_slave_sg
;
1681 /* controller can do slave DMA: can trigger cyclic transfers */
1682 dma_cap_set(DMA_CYCLIC
, atdma
->dma_common
.cap_mask
);
1683 atdma
->dma_common
.device_prep_dma_cyclic
= atc_prep_dma_cyclic
;
1684 atdma
->dma_common
.device_config
= atc_config
;
1685 atdma
->dma_common
.device_pause
= atc_pause
;
1686 atdma
->dma_common
.device_resume
= atc_resume
;
1687 atdma
->dma_common
.device_terminate_all
= atc_terminate_all
;
1688 atdma
->dma_common
.src_addr_widths
= ATC_DMA_BUSWIDTHS
;
1689 atdma
->dma_common
.dst_addr_widths
= ATC_DMA_BUSWIDTHS
;
1690 atdma
->dma_common
.directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
1691 atdma
->dma_common
.residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1694 if (dma_has_cap(DMA_SG
, atdma
->dma_common
.cap_mask
))
1695 atdma
->dma_common
.device_prep_dma_sg
= atc_prep_dma_sg
;
1697 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1699 dev_info(&pdev
->dev
, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1700 dma_has_cap(DMA_MEMCPY
, atdma
->dma_common
.cap_mask
) ? "cpy " : "",
1701 dma_has_cap(DMA_SLAVE
, atdma
->dma_common
.cap_mask
) ? "slave " : "",
1702 dma_has_cap(DMA_SG
, atdma
->dma_common
.cap_mask
) ? "sg-cpy " : "",
1703 plat_dat
->nr_channels
);
1705 dma_async_device_register(&atdma
->dma_common
);
1708 * Do not return an error if the dmac node is not present in order to
1709 * not break the existing way of requesting channel with
1710 * dma_request_channel().
1712 if (pdev
->dev
.of_node
) {
1713 err
= of_dma_controller_register(pdev
->dev
.of_node
,
1714 at_dma_xlate
, atdma
);
1716 dev_err(&pdev
->dev
, "could not register of_dma_controller\n");
1717 goto err_of_dma_controller_register
;
1723 err_of_dma_controller_register
:
1724 dma_async_device_unregister(&atdma
->dma_common
);
1725 dma_pool_destroy(atdma
->dma_desc_pool
);
1727 free_irq(platform_get_irq(pdev
, 0), atdma
);
1729 clk_disable_unprepare(atdma
->clk
);
1731 clk_put(atdma
->clk
);
1733 iounmap(atdma
->regs
);
1736 release_mem_region(io
->start
, size
);
1742 static int at_dma_remove(struct platform_device
*pdev
)
1744 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1745 struct dma_chan
*chan
, *_chan
;
1746 struct resource
*io
;
1749 dma_async_device_unregister(&atdma
->dma_common
);
1751 dma_pool_destroy(atdma
->dma_desc_pool
);
1752 free_irq(platform_get_irq(pdev
, 0), atdma
);
1754 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1756 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1758 /* Disable interrupts */
1759 atc_disable_chan_irq(atdma
, chan
->chan_id
);
1761 tasklet_kill(&atchan
->tasklet
);
1762 list_del(&chan
->device_node
);
1765 clk_disable_unprepare(atdma
->clk
);
1766 clk_put(atdma
->clk
);
1768 iounmap(atdma
->regs
);
1771 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1772 release_mem_region(io
->start
, resource_size(io
));
1779 static void at_dma_shutdown(struct platform_device
*pdev
)
1781 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1783 at_dma_off(platform_get_drvdata(pdev
));
1784 clk_disable_unprepare(atdma
->clk
);
1787 static int at_dma_prepare(struct device
*dev
)
1789 struct platform_device
*pdev
= to_platform_device(dev
);
1790 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1791 struct dma_chan
*chan
, *_chan
;
1793 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1795 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1796 /* wait for transaction completion (except in cyclic case) */
1797 if (atc_chan_is_enabled(atchan
) && !atc_chan_is_cyclic(atchan
))
1803 static void atc_suspend_cyclic(struct at_dma_chan
*atchan
)
1805 struct dma_chan
*chan
= &atchan
->chan_common
;
1807 /* Channel should be paused by user
1808 * do it anyway even if it is not done already */
1809 if (!atc_chan_is_paused(atchan
)) {
1810 dev_warn(chan2dev(chan
),
1811 "cyclic channel not paused, should be done by channel user\n");
1815 /* now preserve additional data for cyclic operations */
1816 /* next descriptor address in the cyclic list */
1817 atchan
->save_dscr
= channel_readl(atchan
, DSCR
);
1819 vdbg_dump_regs(atchan
);
1822 static int at_dma_suspend_noirq(struct device
*dev
)
1824 struct platform_device
*pdev
= to_platform_device(dev
);
1825 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1826 struct dma_chan
*chan
, *_chan
;
1829 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1831 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1833 if (atc_chan_is_cyclic(atchan
))
1834 atc_suspend_cyclic(atchan
);
1835 atchan
->save_cfg
= channel_readl(atchan
, CFG
);
1837 atdma
->save_imr
= dma_readl(atdma
, EBCIMR
);
1839 /* disable DMA controller */
1841 clk_disable_unprepare(atdma
->clk
);
1845 static void atc_resume_cyclic(struct at_dma_chan
*atchan
)
1847 struct at_dma
*atdma
= to_at_dma(atchan
->chan_common
.device
);
1849 /* restore channel status for cyclic descriptors list:
1850 * next descriptor in the cyclic list at the time of suspend */
1851 channel_writel(atchan
, SADDR
, 0);
1852 channel_writel(atchan
, DADDR
, 0);
1853 channel_writel(atchan
, CTRLA
, 0);
1854 channel_writel(atchan
, CTRLB
, 0);
1855 channel_writel(atchan
, DSCR
, atchan
->save_dscr
);
1856 dma_writel(atdma
, CHER
, atchan
->mask
);
1858 /* channel pause status should be removed by channel user
1859 * We cannot take the initiative to do it here */
1861 vdbg_dump_regs(atchan
);
1864 static int at_dma_resume_noirq(struct device
*dev
)
1866 struct platform_device
*pdev
= to_platform_device(dev
);
1867 struct at_dma
*atdma
= platform_get_drvdata(pdev
);
1868 struct dma_chan
*chan
, *_chan
;
1870 /* bring back DMA controller */
1871 clk_prepare_enable(atdma
->clk
);
1872 dma_writel(atdma
, EN
, AT_DMA_ENABLE
);
1874 /* clear any pending interrupt */
1875 while (dma_readl(atdma
, EBCISR
))
1878 /* restore saved data */
1879 dma_writel(atdma
, EBCIER
, atdma
->save_imr
);
1880 list_for_each_entry_safe(chan
, _chan
, &atdma
->dma_common
.channels
,
1882 struct at_dma_chan
*atchan
= to_at_dma_chan(chan
);
1884 channel_writel(atchan
, CFG
, atchan
->save_cfg
);
1885 if (atc_chan_is_cyclic(atchan
))
1886 atc_resume_cyclic(atchan
);
1891 static const struct dev_pm_ops at_dma_dev_pm_ops
= {
1892 .prepare
= at_dma_prepare
,
1893 .suspend_noirq
= at_dma_suspend_noirq
,
1894 .resume_noirq
= at_dma_resume_noirq
,
1897 static struct platform_driver at_dma_driver
= {
1898 .remove
= at_dma_remove
,
1899 .shutdown
= at_dma_shutdown
,
1900 .id_table
= atdma_devtypes
,
1903 .pm
= &at_dma_dev_pm_ops
,
1904 .of_match_table
= of_match_ptr(atmel_dma_dt_ids
),
1908 static int __init
at_dma_init(void)
1910 return platform_driver_probe(&at_dma_driver
, at_dma_probe
);
1912 subsys_initcall(at_dma_init
);
1914 static void __exit
at_dma_exit(void)
1916 platform_driver_unregister(&at_dma_driver
);
1918 module_exit(at_dma_exit
);
1920 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1921 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1922 MODULE_LICENSE("GPL");
1923 MODULE_ALIAS("platform:at_hdmac");