2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
26 #include "dw_dmac_regs.h"
27 #include "dmaengine.h"
30 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32 * of which use ARM any more). See the "Databook" from Synopsys for
33 * information beyond what licensees probably provide.
35 * The driver has currently been tested only with the Atmel AT32AP7000,
36 * which does not support descriptor writeback.
39 static inline unsigned int dwc_get_dms(struct dw_dma_slave
*slave
)
41 return slave
? slave
->dst_master
: 0;
44 static inline unsigned int dwc_get_sms(struct dw_dma_slave
*slave
)
46 return slave
? slave
->src_master
: 1;
49 #define DWC_DEFAULT_CTLLO(_chan) ({ \
50 struct dw_dma_slave *__slave = (_chan->private); \
51 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
52 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
53 int _dms = dwc_get_dms(__slave); \
54 int _sms = dwc_get_sms(__slave); \
55 u8 _smsize = __slave ? _sconfig->src_maxburst : \
57 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
60 (DWC_CTLL_DST_MSIZE(_dmsize) \
61 | DWC_CTLL_SRC_MSIZE(_smsize) \
64 | DWC_CTLL_DMS(_dms) \
65 | DWC_CTLL_SMS(_sms)); \
69 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint.
73 #define NR_DESCS_PER_CHANNEL 64
75 /*----------------------------------------------------------------------*/
78 * Because we're not relying on writeback from the controller (it may not
79 * even be configured into the core!) we don't need to use dma_pool. These
80 * descriptors -- and associated data -- are cacheable. We do need to make
81 * sure their dcache entries are written back before handing them off to
82 * the controller, though.
85 static struct device
*chan2dev(struct dma_chan
*chan
)
87 return &chan
->dev
->device
;
89 static struct device
*chan2parent(struct dma_chan
*chan
)
91 return chan
->dev
->device
.parent
;
94 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
96 return list_entry(dwc
->active_list
.next
, struct dw_desc
, desc_node
);
99 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
101 struct dw_desc
*desc
, *_desc
;
102 struct dw_desc
*ret
= NULL
;
106 spin_lock_irqsave(&dwc
->lock
, flags
);
107 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
109 if (async_tx_test_ack(&desc
->txd
)) {
110 list_del(&desc
->desc_node
);
114 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
116 spin_unlock_irqrestore(&dwc
->lock
, flags
);
118 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
123 static void dwc_sync_desc_for_cpu(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
125 struct dw_desc
*child
;
127 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
128 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
129 child
->txd
.phys
, sizeof(child
->lli
),
131 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
132 desc
->txd
.phys
, sizeof(desc
->lli
),
137 * Move a descriptor, including any children, to the free list.
138 * `desc' must not be on any lists.
140 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
145 struct dw_desc
*child
;
147 dwc_sync_desc_for_cpu(dwc
, desc
);
149 spin_lock_irqsave(&dwc
->lock
, flags
);
150 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
151 dev_vdbg(chan2dev(&dwc
->chan
),
152 "moving child desc %p to freelist\n",
154 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
155 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
156 list_add(&desc
->desc_node
, &dwc
->free_list
);
157 spin_unlock_irqrestore(&dwc
->lock
, flags
);
161 static void dwc_initialize(struct dw_dma_chan
*dwc
)
163 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
164 struct dw_dma_slave
*dws
= dwc
->chan
.private;
165 u32 cfghi
= DWC_CFGH_FIFO_MODE
;
166 u32 cfglo
= DWC_CFGL_CH_PRIOR(dwc
->priority
);
168 if (dwc
->initialized
== true)
173 * We need controller-specific data to set up slave
176 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
179 cfglo
|= dws
->cfg_lo
& ~DWC_CFGL_CH_PRIOR_MASK
;
181 if (dwc
->dma_sconfig
.direction
== DMA_MEM_TO_DEV
)
182 cfghi
= DWC_CFGH_DST_PER(dwc
->dma_sconfig
.slave_id
);
183 else if (dwc
->dma_sconfig
.direction
== DMA_DEV_TO_MEM
)
184 cfghi
= DWC_CFGH_SRC_PER(dwc
->dma_sconfig
.slave_id
);
187 channel_writel(dwc
, CFG_LO
, cfglo
);
188 channel_writel(dwc
, CFG_HI
, cfghi
);
190 /* Enable interrupts */
191 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
192 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
194 dwc
->initialized
= true;
197 /*----------------------------------------------------------------------*/
199 static inline unsigned int dwc_fast_fls(unsigned long long v
)
202 * We can be a lot more clever here, but this should take care
203 * of the most common optimization.
214 static inline void dwc_dump_chan_regs(struct dw_dma_chan
*dwc
)
216 dev_err(chan2dev(&dwc
->chan
),
217 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
218 channel_readl(dwc
, SAR
),
219 channel_readl(dwc
, DAR
),
220 channel_readl(dwc
, LLP
),
221 channel_readl(dwc
, CTL_HI
),
222 channel_readl(dwc
, CTL_LO
));
226 static inline void dwc_chan_disable(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
228 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
229 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
233 /*----------------------------------------------------------------------*/
235 /* Perform single block transfer */
236 static inline void dwc_do_single_block(struct dw_dma_chan
*dwc
,
237 struct dw_desc
*desc
)
239 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
242 /* Software emulation of LLP mode relies on interrupts to continue
243 * multi block transfer. */
244 ctllo
= desc
->lli
.ctllo
| DWC_CTLL_INT_EN
;
246 channel_writel(dwc
, SAR
, desc
->lli
.sar
);
247 channel_writel(dwc
, DAR
, desc
->lli
.dar
);
248 channel_writel(dwc
, CTL_LO
, ctllo
);
249 channel_writel(dwc
, CTL_HI
, desc
->lli
.ctlhi
);
250 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
253 /* Called with dwc->lock held and bh disabled */
254 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
256 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
257 unsigned long was_soft_llp
;
259 /* ASSERT: channel is idle */
260 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
261 dev_err(chan2dev(&dwc
->chan
),
262 "BUG: Attempted to start non-idle channel\n");
263 dwc_dump_chan_regs(dwc
);
265 /* The tasklet will hopefully advance the queue... */
270 was_soft_llp
= test_and_set_bit(DW_DMA_IS_SOFT_LLP
,
273 dev_err(chan2dev(&dwc
->chan
),
274 "BUG: Attempted to start new LLP transfer "
275 "inside ongoing one\n");
281 dwc
->tx_list
= &first
->tx_list
;
282 dwc
->tx_node_active
= first
->tx_list
.next
;
284 dwc_do_single_block(dwc
, first
);
291 channel_writel(dwc
, LLP
, first
->txd
.phys
);
292 channel_writel(dwc
, CTL_LO
,
293 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
294 channel_writel(dwc
, CTL_HI
, 0);
295 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
298 /*----------------------------------------------------------------------*/
301 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
,
302 bool callback_required
)
304 dma_async_tx_callback callback
= NULL
;
306 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
307 struct dw_desc
*child
;
310 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
312 spin_lock_irqsave(&dwc
->lock
, flags
);
313 dma_cookie_complete(txd
);
314 if (callback_required
) {
315 callback
= txd
->callback
;
316 param
= txd
->callback_param
;
319 dwc_sync_desc_for_cpu(dwc
, desc
);
322 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
323 async_tx_ack(&child
->txd
);
324 async_tx_ack(&desc
->txd
);
326 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
327 list_move(&desc
->desc_node
, &dwc
->free_list
);
329 if (!dwc
->chan
.private) {
330 struct device
*parent
= chan2parent(&dwc
->chan
);
331 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
332 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
333 dma_unmap_single(parent
, desc
->lli
.dar
,
334 desc
->len
, DMA_FROM_DEVICE
);
336 dma_unmap_page(parent
, desc
->lli
.dar
,
337 desc
->len
, DMA_FROM_DEVICE
);
339 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
340 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
341 dma_unmap_single(parent
, desc
->lli
.sar
,
342 desc
->len
, DMA_TO_DEVICE
);
344 dma_unmap_page(parent
, desc
->lli
.sar
,
345 desc
->len
, DMA_TO_DEVICE
);
349 spin_unlock_irqrestore(&dwc
->lock
, flags
);
351 if (callback_required
&& callback
)
355 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
357 struct dw_desc
*desc
, *_desc
;
361 spin_lock_irqsave(&dwc
->lock
, flags
);
362 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
363 dev_err(chan2dev(&dwc
->chan
),
364 "BUG: XFER bit set, but channel not idle!\n");
366 /* Try to continue after resetting the channel... */
367 dwc_chan_disable(dw
, dwc
);
371 * Submit queued descriptors ASAP, i.e. before we go through
372 * the completed ones.
374 list_splice_init(&dwc
->active_list
, &list
);
375 if (!list_empty(&dwc
->queue
)) {
376 list_move(dwc
->queue
.next
, &dwc
->active_list
);
377 dwc_dostart(dwc
, dwc_first_active(dwc
));
380 spin_unlock_irqrestore(&dwc
->lock
, flags
);
382 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
383 dwc_descriptor_complete(dwc
, desc
, true);
386 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
389 struct dw_desc
*desc
, *_desc
;
390 struct dw_desc
*child
;
394 spin_lock_irqsave(&dwc
->lock
, flags
);
395 llp
= channel_readl(dwc
, LLP
);
396 status_xfer
= dma_readl(dw
, RAW
.XFER
);
398 if (status_xfer
& dwc
->mask
) {
399 /* Everything we've submitted is done */
400 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
401 spin_unlock_irqrestore(&dwc
->lock
, flags
);
403 dwc_complete_all(dw
, dwc
);
407 if (list_empty(&dwc
->active_list
)) {
408 spin_unlock_irqrestore(&dwc
->lock
, flags
);
412 dev_vdbg(chan2dev(&dwc
->chan
), "%s: llp=0x%llx\n", __func__
,
413 (unsigned long long)llp
);
415 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
416 /* check first descriptors addr */
417 if (desc
->txd
.phys
== llp
) {
418 spin_unlock_irqrestore(&dwc
->lock
, flags
);
422 /* check first descriptors llp */
423 if (desc
->lli
.llp
== llp
) {
424 /* This one is currently in progress */
425 spin_unlock_irqrestore(&dwc
->lock
, flags
);
429 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
430 if (child
->lli
.llp
== llp
) {
431 /* Currently in progress */
432 spin_unlock_irqrestore(&dwc
->lock
, flags
);
437 * No descriptors so far seem to be in progress, i.e.
438 * this one must be done.
440 spin_unlock_irqrestore(&dwc
->lock
, flags
);
441 dwc_descriptor_complete(dwc
, desc
, true);
442 spin_lock_irqsave(&dwc
->lock
, flags
);
445 dev_err(chan2dev(&dwc
->chan
),
446 "BUG: All descriptors done, but channel not idle!\n");
448 /* Try to continue after resetting the channel... */
449 dwc_chan_disable(dw
, dwc
);
451 if (!list_empty(&dwc
->queue
)) {
452 list_move(dwc
->queue
.next
, &dwc
->active_list
);
453 dwc_dostart(dwc
, dwc_first_active(dwc
));
455 spin_unlock_irqrestore(&dwc
->lock
, flags
);
458 static inline void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
460 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
461 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
462 lli
->sar
, lli
->dar
, lli
->llp
, lli
->ctlhi
, lli
->ctllo
);
465 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
467 struct dw_desc
*bad_desc
;
468 struct dw_desc
*child
;
471 dwc_scan_descriptors(dw
, dwc
);
473 spin_lock_irqsave(&dwc
->lock
, flags
);
476 * The descriptor currently at the head of the active list is
477 * borked. Since we don't have any way to report errors, we'll
478 * just have to scream loudly and try to carry on.
480 bad_desc
= dwc_first_active(dwc
);
481 list_del_init(&bad_desc
->desc_node
);
482 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
484 /* Clear the error flag and try to restart the controller */
485 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
486 if (!list_empty(&dwc
->active_list
))
487 dwc_dostart(dwc
, dwc_first_active(dwc
));
490 * KERN_CRITICAL may seem harsh, but since this only happens
491 * when someone submits a bad physical address in a
492 * descriptor, we should consider ourselves lucky that the
493 * controller flagged an error instead of scribbling over
494 * random memory locations.
496 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
497 "Bad descriptor submitted for DMA!\n");
498 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
499 " cookie: %d\n", bad_desc
->txd
.cookie
);
500 dwc_dump_lli(dwc
, &bad_desc
->lli
);
501 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
502 dwc_dump_lli(dwc
, &child
->lli
);
504 spin_unlock_irqrestore(&dwc
->lock
, flags
);
506 /* Pretend the descriptor completed successfully */
507 dwc_descriptor_complete(dwc
, bad_desc
, true);
510 /* --------------------- Cyclic DMA API extensions -------------------- */
512 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
514 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
515 return channel_readl(dwc
, SAR
);
517 EXPORT_SYMBOL(dw_dma_get_src_addr
);
519 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
521 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
522 return channel_readl(dwc
, DAR
);
524 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
526 /* called with dwc->lock held and all DMAC interrupts disabled */
527 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
528 u32 status_err
, u32 status_xfer
)
533 void (*callback
)(void *param
);
534 void *callback_param
;
536 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
537 channel_readl(dwc
, LLP
));
539 callback
= dwc
->cdesc
->period_callback
;
540 callback_param
= dwc
->cdesc
->period_callback_param
;
543 callback(callback_param
);
547 * Error and transfer complete are highly unlikely, and will most
548 * likely be due to a configuration error by the user.
550 if (unlikely(status_err
& dwc
->mask
) ||
551 unlikely(status_xfer
& dwc
->mask
)) {
554 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
555 "interrupt, stopping DMA transfer\n",
556 status_xfer
? "xfer" : "error");
558 spin_lock_irqsave(&dwc
->lock
, flags
);
560 dwc_dump_chan_regs(dwc
);
562 dwc_chan_disable(dw
, dwc
);
564 /* make sure DMA does not restart by loading a new list */
565 channel_writel(dwc
, LLP
, 0);
566 channel_writel(dwc
, CTL_LO
, 0);
567 channel_writel(dwc
, CTL_HI
, 0);
569 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
570 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
572 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
573 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
575 spin_unlock_irqrestore(&dwc
->lock
, flags
);
579 /* ------------------------------------------------------------------------- */
581 static void dw_dma_tasklet(unsigned long data
)
583 struct dw_dma
*dw
= (struct dw_dma
*)data
;
584 struct dw_dma_chan
*dwc
;
589 status_xfer
= dma_readl(dw
, RAW
.XFER
);
590 status_err
= dma_readl(dw
, RAW
.ERROR
);
592 dev_vdbg(dw
->dma
.dev
, "%s: status_err=%x\n", __func__
, status_err
);
594 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
596 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
597 dwc_handle_cyclic(dw
, dwc
, status_err
, status_xfer
);
598 else if (status_err
& (1 << i
))
599 dwc_handle_error(dw
, dwc
);
600 else if (status_xfer
& (1 << i
)) {
603 spin_lock_irqsave(&dwc
->lock
, flags
);
604 if (test_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
)) {
605 if (dwc
->tx_node_active
!= dwc
->tx_list
) {
606 struct dw_desc
*desc
=
607 list_entry(dwc
->tx_node_active
,
611 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
613 /* move pointer to next descriptor */
614 dwc
->tx_node_active
=
615 dwc
->tx_node_active
->next
;
617 dwc_do_single_block(dwc
, desc
);
619 spin_unlock_irqrestore(&dwc
->lock
, flags
);
622 /* we are done here */
623 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
626 spin_unlock_irqrestore(&dwc
->lock
, flags
);
628 dwc_scan_descriptors(dw
, dwc
);
633 * Re-enable interrupts.
635 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
636 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
639 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
641 struct dw_dma
*dw
= dev_id
;
644 dev_vdbg(dw
->dma
.dev
, "%s: status=0x%x\n", __func__
,
645 dma_readl(dw
, STATUS_INT
));
648 * Just disable the interrupts. We'll turn them back on in the
651 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
652 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
654 status
= dma_readl(dw
, STATUS_INT
);
657 "BUG: Unexpected interrupts pending: 0x%x\n",
661 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
662 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
663 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
664 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
667 tasklet_schedule(&dw
->tasklet
);
672 /*----------------------------------------------------------------------*/
674 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
676 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
677 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
681 spin_lock_irqsave(&dwc
->lock
, flags
);
682 cookie
= dma_cookie_assign(tx
);
685 * REVISIT: We should attempt to chain as many descriptors as
686 * possible, perhaps even appending to those already submitted
687 * for DMA. But this is hard to do in a race-free manner.
689 if (list_empty(&dwc
->active_list
)) {
690 dev_vdbg(chan2dev(tx
->chan
), "%s: started %u\n", __func__
,
692 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
693 dwc_dostart(dwc
, dwc_first_active(dwc
));
695 dev_vdbg(chan2dev(tx
->chan
), "%s: queued %u\n", __func__
,
698 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
701 spin_unlock_irqrestore(&dwc
->lock
, flags
);
706 static struct dma_async_tx_descriptor
*
707 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
708 size_t len
, unsigned long flags
)
710 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
711 struct dw_dma_slave
*dws
= chan
->private;
712 struct dw_desc
*desc
;
713 struct dw_desc
*first
;
714 struct dw_desc
*prev
;
717 unsigned int src_width
;
718 unsigned int dst_width
;
719 unsigned int data_width
;
722 dev_vdbg(chan2dev(chan
),
723 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__
,
724 (unsigned long long)dest
, (unsigned long long)src
,
727 if (unlikely(!len
)) {
728 dev_dbg(chan2dev(chan
), "%s: length is zero!\n", __func__
);
732 data_width
= min_t(unsigned int, dwc
->dw
->data_width
[dwc_get_sms(dws
)],
733 dwc
->dw
->data_width
[dwc_get_dms(dws
)]);
735 src_width
= dst_width
= min_t(unsigned int, data_width
,
736 dwc_fast_fls(src
| dest
| len
));
738 ctllo
= DWC_DEFAULT_CTLLO(chan
)
739 | DWC_CTLL_DST_WIDTH(dst_width
)
740 | DWC_CTLL_SRC_WIDTH(src_width
)
746 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
747 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
750 desc
= dwc_desc_get(dwc
);
754 desc
->lli
.sar
= src
+ offset
;
755 desc
->lli
.dar
= dest
+ offset
;
756 desc
->lli
.ctllo
= ctllo
;
757 desc
->lli
.ctlhi
= xfer_count
;
762 prev
->lli
.llp
= desc
->txd
.phys
;
763 dma_sync_single_for_device(chan2parent(chan
),
764 prev
->txd
.phys
, sizeof(prev
->lli
),
766 list_add_tail(&desc
->desc_node
,
773 if (flags
& DMA_PREP_INTERRUPT
)
774 /* Trigger interrupt after last block */
775 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
778 dma_sync_single_for_device(chan2parent(chan
),
779 prev
->txd
.phys
, sizeof(prev
->lli
),
782 first
->txd
.flags
= flags
;
788 dwc_desc_put(dwc
, first
);
792 static struct dma_async_tx_descriptor
*
793 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
794 unsigned int sg_len
, enum dma_transfer_direction direction
,
795 unsigned long flags
, void *context
)
797 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
798 struct dw_dma_slave
*dws
= chan
->private;
799 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
800 struct dw_desc
*prev
;
801 struct dw_desc
*first
;
804 unsigned int reg_width
;
805 unsigned int mem_width
;
806 unsigned int data_width
;
808 struct scatterlist
*sg
;
809 size_t total_len
= 0;
811 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
813 if (unlikely(!dws
|| !sg_len
))
820 reg_width
= __fls(sconfig
->dst_addr_width
);
821 reg
= sconfig
->dst_addr
;
822 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
823 | DWC_CTLL_DST_WIDTH(reg_width
)
827 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
828 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
830 data_width
= dwc
->dw
->data_width
[dwc_get_sms(dws
)];
832 for_each_sg(sgl
, sg
, sg_len
, i
) {
833 struct dw_desc
*desc
;
836 mem
= sg_dma_address(sg
);
837 len
= sg_dma_len(sg
);
839 mem_width
= min_t(unsigned int,
840 data_width
, dwc_fast_fls(mem
| len
));
842 slave_sg_todev_fill_desc
:
843 desc
= dwc_desc_get(dwc
);
845 dev_err(chan2dev(chan
),
846 "not enough descriptors available\n");
852 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
853 if ((len
>> mem_width
) > dwc
->block_size
) {
854 dlen
= dwc
->block_size
<< mem_width
;
862 desc
->lli
.ctlhi
= dlen
>> mem_width
;
867 prev
->lli
.llp
= desc
->txd
.phys
;
868 dma_sync_single_for_device(chan2parent(chan
),
872 list_add_tail(&desc
->desc_node
,
879 goto slave_sg_todev_fill_desc
;
883 reg_width
= __fls(sconfig
->src_addr_width
);
884 reg
= sconfig
->src_addr
;
885 ctllo
= (DWC_DEFAULT_CTLLO(chan
)
886 | DWC_CTLL_SRC_WIDTH(reg_width
)
890 ctllo
|= sconfig
->device_fc
? DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
891 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
893 data_width
= dwc
->dw
->data_width
[dwc_get_dms(dws
)];
895 for_each_sg(sgl
, sg
, sg_len
, i
) {
896 struct dw_desc
*desc
;
899 mem
= sg_dma_address(sg
);
900 len
= sg_dma_len(sg
);
902 mem_width
= min_t(unsigned int,
903 data_width
, dwc_fast_fls(mem
| len
));
905 slave_sg_fromdev_fill_desc
:
906 desc
= dwc_desc_get(dwc
);
908 dev_err(chan2dev(chan
),
909 "not enough descriptors available\n");
915 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
916 if ((len
>> reg_width
) > dwc
->block_size
) {
917 dlen
= dwc
->block_size
<< reg_width
;
924 desc
->lli
.ctlhi
= dlen
>> reg_width
;
929 prev
->lli
.llp
= desc
->txd
.phys
;
930 dma_sync_single_for_device(chan2parent(chan
),
934 list_add_tail(&desc
->desc_node
,
941 goto slave_sg_fromdev_fill_desc
;
948 if (flags
& DMA_PREP_INTERRUPT
)
949 /* Trigger interrupt after last block */
950 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
953 dma_sync_single_for_device(chan2parent(chan
),
954 prev
->txd
.phys
, sizeof(prev
->lli
),
957 first
->len
= total_len
;
962 dwc_desc_put(dwc
, first
);
967 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
968 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
970 * NOTE: burst size 2 is not supported by controller.
972 * This can be done by finding least significant bit set: n & (n - 1)
974 static inline void convert_burst(u32
*maxburst
)
977 *maxburst
= fls(*maxburst
) - 2;
983 set_runtime_config(struct dma_chan
*chan
, struct dma_slave_config
*sconfig
)
985 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
987 /* Check if it is chan is configured for slave transfers */
991 memcpy(&dwc
->dma_sconfig
, sconfig
, sizeof(*sconfig
));
993 convert_burst(&dwc
->dma_sconfig
.src_maxburst
);
994 convert_burst(&dwc
->dma_sconfig
.dst_maxburst
);
999 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1002 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1003 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1004 struct dw_desc
*desc
, *_desc
;
1005 unsigned long flags
;
1009 if (cmd
== DMA_PAUSE
) {
1010 spin_lock_irqsave(&dwc
->lock
, flags
);
1012 cfglo
= channel_readl(dwc
, CFG_LO
);
1013 channel_writel(dwc
, CFG_LO
, cfglo
| DWC_CFGL_CH_SUSP
);
1014 while (!(channel_readl(dwc
, CFG_LO
) & DWC_CFGL_FIFO_EMPTY
))
1018 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1019 } else if (cmd
== DMA_RESUME
) {
1023 spin_lock_irqsave(&dwc
->lock
, flags
);
1025 cfglo
= channel_readl(dwc
, CFG_LO
);
1026 channel_writel(dwc
, CFG_LO
, cfglo
& ~DWC_CFGL_CH_SUSP
);
1027 dwc
->paused
= false;
1029 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1030 } else if (cmd
== DMA_TERMINATE_ALL
) {
1031 spin_lock_irqsave(&dwc
->lock
, flags
);
1033 clear_bit(DW_DMA_IS_SOFT_LLP
, &dwc
->flags
);
1035 dwc_chan_disable(dw
, dwc
);
1037 dwc
->paused
= false;
1039 /* active_list entries will end up before queued entries */
1040 list_splice_init(&dwc
->queue
, &list
);
1041 list_splice_init(&dwc
->active_list
, &list
);
1043 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1045 /* Flush all pending and queued descriptors */
1046 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
1047 dwc_descriptor_complete(dwc
, desc
, false);
1048 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1049 return set_runtime_config(chan
, (struct dma_slave_config
*)arg
);
1057 static enum dma_status
1058 dwc_tx_status(struct dma_chan
*chan
,
1059 dma_cookie_t cookie
,
1060 struct dma_tx_state
*txstate
)
1062 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1063 enum dma_status ret
;
1065 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1066 if (ret
!= DMA_SUCCESS
) {
1067 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1069 ret
= dma_cookie_status(chan
, cookie
, txstate
);
1072 if (ret
!= DMA_SUCCESS
)
1073 dma_set_residue(txstate
, dwc_first_active(dwc
)->len
);
1081 static void dwc_issue_pending(struct dma_chan
*chan
)
1083 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1085 if (!list_empty(&dwc
->queue
))
1086 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
1089 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
1091 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1092 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1093 struct dw_desc
*desc
;
1095 unsigned long flags
;
1097 dev_vdbg(chan2dev(chan
), "%s\n", __func__
);
1099 /* ASSERT: channel is idle */
1100 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1101 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1105 dma_cookie_init(chan
);
1108 * NOTE: some controllers may have additional features that we
1109 * need to initialize here, like "scatter-gather" (which
1110 * doesn't mean what you think it means), and status writeback.
1113 spin_lock_irqsave(&dwc
->lock
, flags
);
1114 i
= dwc
->descs_allocated
;
1115 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
1116 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1118 desc
= kzalloc(sizeof(struct dw_desc
), GFP_KERNEL
);
1120 dev_info(chan2dev(chan
),
1121 "only allocated %d descriptors\n", i
);
1122 spin_lock_irqsave(&dwc
->lock
, flags
);
1126 INIT_LIST_HEAD(&desc
->tx_list
);
1127 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
1128 desc
->txd
.tx_submit
= dwc_tx_submit
;
1129 desc
->txd
.flags
= DMA_CTRL_ACK
;
1130 desc
->txd
.phys
= dma_map_single(chan2parent(chan
), &desc
->lli
,
1131 sizeof(desc
->lli
), DMA_TO_DEVICE
);
1132 dwc_desc_put(dwc
, desc
);
1134 spin_lock_irqsave(&dwc
->lock
, flags
);
1135 i
= ++dwc
->descs_allocated
;
1138 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1140 dev_dbg(chan2dev(chan
), "%s: allocated %d descriptors\n", __func__
, i
);
1145 static void dwc_free_chan_resources(struct dma_chan
*chan
)
1147 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1148 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
1149 struct dw_desc
*desc
, *_desc
;
1150 unsigned long flags
;
1153 dev_dbg(chan2dev(chan
), "%s: descs allocated=%u\n", __func__
,
1154 dwc
->descs_allocated
);
1156 /* ASSERT: channel is idle */
1157 BUG_ON(!list_empty(&dwc
->active_list
));
1158 BUG_ON(!list_empty(&dwc
->queue
));
1159 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
1161 spin_lock_irqsave(&dwc
->lock
, flags
);
1162 list_splice_init(&dwc
->free_list
, &list
);
1163 dwc
->descs_allocated
= 0;
1164 dwc
->initialized
= false;
1166 /* Disable interrupts */
1167 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
1168 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
1170 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1172 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1173 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1174 dma_unmap_single(chan2parent(chan
), desc
->txd
.phys
,
1175 sizeof(desc
->lli
), DMA_TO_DEVICE
);
1179 dev_vdbg(chan2dev(chan
), "%s: done\n", __func__
);
1182 /* --------------------- Cyclic DMA API extensions -------------------- */
1185 * dw_dma_cyclic_start - start the cyclic DMA transfer
1186 * @chan: the DMA channel to start
1188 * Must be called with soft interrupts disabled. Returns zero on success or
1189 * -errno on failure.
1191 int dw_dma_cyclic_start(struct dma_chan
*chan
)
1193 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1194 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1195 unsigned long flags
;
1197 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
1198 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1202 spin_lock_irqsave(&dwc
->lock
, flags
);
1204 /* assert channel is idle */
1205 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1206 dev_err(chan2dev(&dwc
->chan
),
1207 "BUG: Attempted to start non-idle channel\n");
1208 dwc_dump_chan_regs(dwc
);
1209 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1213 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1214 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1216 /* setup DMAC channel registers */
1217 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1218 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1219 channel_writel(dwc
, CTL_HI
, 0);
1221 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1223 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1227 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1230 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1231 * @chan: the DMA channel to stop
1233 * Must be called with soft interrupts disabled.
1235 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1237 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1238 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1239 unsigned long flags
;
1241 spin_lock_irqsave(&dwc
->lock
, flags
);
1243 dwc_chan_disable(dw
, dwc
);
1245 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1247 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1250 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1251 * @chan: the DMA channel to prepare
1252 * @buf_addr: physical DMA address where the buffer starts
1253 * @buf_len: total number of bytes for the entire buffer
1254 * @period_len: number of bytes for each period
1255 * @direction: transfer direction, to or from device
1257 * Must be called before trying to start the transfer. Returns a valid struct
1258 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1260 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1261 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1262 enum dma_transfer_direction direction
)
1264 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1265 struct dma_slave_config
*sconfig
= &dwc
->dma_sconfig
;
1266 struct dw_cyclic_desc
*cdesc
;
1267 struct dw_cyclic_desc
*retval
= NULL
;
1268 struct dw_desc
*desc
;
1269 struct dw_desc
*last
= NULL
;
1270 unsigned long was_cyclic
;
1271 unsigned int reg_width
;
1272 unsigned int periods
;
1274 unsigned long flags
;
1276 spin_lock_irqsave(&dwc
->lock
, flags
);
1278 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1279 dev_dbg(chan2dev(&dwc
->chan
),
1280 "channel doesn't support LLP transfers\n");
1281 return ERR_PTR(-EINVAL
);
1284 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1285 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1286 dev_dbg(chan2dev(&dwc
->chan
),
1287 "queue and/or active list are not empty\n");
1288 return ERR_PTR(-EBUSY
);
1291 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1292 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1294 dev_dbg(chan2dev(&dwc
->chan
),
1295 "channel already prepared for cyclic DMA\n");
1296 return ERR_PTR(-EBUSY
);
1299 retval
= ERR_PTR(-EINVAL
);
1301 if (direction
== DMA_MEM_TO_DEV
)
1302 reg_width
= __ffs(sconfig
->dst_addr_width
);
1304 reg_width
= __ffs(sconfig
->src_addr_width
);
1306 periods
= buf_len
/ period_len
;
1308 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1309 if (period_len
> (dwc
->block_size
<< reg_width
))
1311 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1313 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1315 if (unlikely(!(direction
& (DMA_MEM_TO_DEV
| DMA_DEV_TO_MEM
))))
1318 retval
= ERR_PTR(-ENOMEM
);
1320 if (periods
> NR_DESCS_PER_CHANNEL
)
1323 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1327 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1331 for (i
= 0; i
< periods
; i
++) {
1332 desc
= dwc_desc_get(dwc
);
1334 goto out_err_desc_get
;
1336 switch (direction
) {
1337 case DMA_MEM_TO_DEV
:
1338 desc
->lli
.dar
= sconfig
->dst_addr
;
1339 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1340 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1341 | DWC_CTLL_DST_WIDTH(reg_width
)
1342 | DWC_CTLL_SRC_WIDTH(reg_width
)
1347 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1348 DWC_CTLL_FC(DW_DMA_FC_P_M2P
) :
1349 DWC_CTLL_FC(DW_DMA_FC_D_M2P
);
1352 case DMA_DEV_TO_MEM
:
1353 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1354 desc
->lli
.sar
= sconfig
->src_addr
;
1355 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
)
1356 | DWC_CTLL_SRC_WIDTH(reg_width
)
1357 | DWC_CTLL_DST_WIDTH(reg_width
)
1362 desc
->lli
.ctllo
|= sconfig
->device_fc
?
1363 DWC_CTLL_FC(DW_DMA_FC_P_P2M
) :
1364 DWC_CTLL_FC(DW_DMA_FC_D_P2M
);
1371 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1372 cdesc
->desc
[i
] = desc
;
1375 last
->lli
.llp
= desc
->txd
.phys
;
1376 dma_sync_single_for_device(chan2parent(chan
),
1377 last
->txd
.phys
, sizeof(last
->lli
),
1384 /* lets make a cyclic list */
1385 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1386 dma_sync_single_for_device(chan2parent(chan
), last
->txd
.phys
,
1387 sizeof(last
->lli
), DMA_TO_DEVICE
);
1389 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%llx len %zu "
1390 "period %zu periods %d\n", (unsigned long long)buf_addr
,
1391 buf_len
, period_len
, periods
);
1393 cdesc
->periods
= periods
;
1400 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1404 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1405 return (struct dw_cyclic_desc
*)retval
;
1407 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1410 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1411 * @chan: the DMA channel to free
1413 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1415 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1416 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1417 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1419 unsigned long flags
;
1421 dev_dbg(chan2dev(&dwc
->chan
), "%s\n", __func__
);
1426 spin_lock_irqsave(&dwc
->lock
, flags
);
1428 dwc_chan_disable(dw
, dwc
);
1430 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1431 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1433 spin_unlock_irqrestore(&dwc
->lock
, flags
);
1435 for (i
= 0; i
< cdesc
->periods
; i
++)
1436 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1441 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1443 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1445 /*----------------------------------------------------------------------*/
1447 static void dw_dma_off(struct dw_dma
*dw
)
1451 dma_writel(dw
, CFG
, 0);
1453 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1454 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1455 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1456 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1458 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1461 for (i
= 0; i
< dw
->dma
.chancnt
; i
++)
1462 dw
->chan
[i
].initialized
= false;
1465 static int dw_probe(struct platform_device
*pdev
)
1467 struct dw_dma_platform_data
*pdata
;
1468 struct resource
*io
;
1473 unsigned int dw_params
;
1474 unsigned int nr_channels
;
1475 unsigned int max_blk_size
= 0;
1480 pdata
= dev_get_platdata(&pdev
->dev
);
1481 if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1484 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1488 irq
= platform_get_irq(pdev
, 0);
1492 regs
= devm_request_and_ioremap(&pdev
->dev
, io
);
1496 dw_params
= dma_read_byaddr(regs
, DW_PARAMS
);
1497 autocfg
= dw_params
>> DW_PARAMS_EN
& 0x1;
1500 nr_channels
= (dw_params
>> DW_PARAMS_NR_CHAN
& 0x7) + 1;
1502 nr_channels
= pdata
->nr_channels
;
1504 size
= sizeof(struct dw_dma
) + nr_channels
* sizeof(struct dw_dma_chan
);
1505 dw
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
1509 dw
->clk
= devm_clk_get(&pdev
->dev
, "hclk");
1510 if (IS_ERR(dw
->clk
))
1511 return PTR_ERR(dw
->clk
);
1512 clk_prepare_enable(dw
->clk
);
1516 /* get hardware configuration parameters */
1518 max_blk_size
= dma_readl(dw
, MAX_BLK_SIZE
);
1520 dw
->nr_masters
= (dw_params
>> DW_PARAMS_NR_MASTER
& 3) + 1;
1521 for (i
= 0; i
< dw
->nr_masters
; i
++) {
1523 (dw_params
>> DW_PARAMS_DATA_WIDTH(i
) & 3) + 2;
1526 dw
->nr_masters
= pdata
->nr_masters
;
1527 memcpy(dw
->data_width
, pdata
->data_width
, 4);
1530 /* Calculate all channel mask before DMA setup */
1531 dw
->all_chan_mask
= (1 << nr_channels
) - 1;
1533 /* force dma off, just in case */
1536 /* disable BLOCK interrupts as well */
1537 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1539 err
= devm_request_irq(&pdev
->dev
, irq
, dw_dma_interrupt
, 0,
1544 platform_set_drvdata(pdev
, dw
);
1546 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1548 INIT_LIST_HEAD(&dw
->dma
.channels
);
1549 for (i
= 0; i
< nr_channels
; i
++) {
1550 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1551 int r
= nr_channels
- i
- 1;
1553 dwc
->chan
.device
= &dw
->dma
;
1554 dma_cookie_init(&dwc
->chan
);
1555 if (pdata
->chan_allocation_order
== CHAN_ALLOCATION_ASCENDING
)
1556 list_add_tail(&dwc
->chan
.device_node
,
1559 list_add(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1561 /* 7 is highest priority & 0 is lowest. */
1562 if (pdata
->chan_priority
== CHAN_PRIORITY_ASCENDING
)
1567 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1568 spin_lock_init(&dwc
->lock
);
1571 INIT_LIST_HEAD(&dwc
->active_list
);
1572 INIT_LIST_HEAD(&dwc
->queue
);
1573 INIT_LIST_HEAD(&dwc
->free_list
);
1575 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1579 /* hardware configuration */
1581 unsigned int dwc_params
;
1583 dwc_params
= dma_read_byaddr(regs
+ r
* sizeof(u32
),
1586 /* Decode maximum block size for given channel. The
1587 * stored 4 bit value represents blocks from 0x00 for 3
1588 * up to 0x0a for 4095. */
1590 (4 << ((max_blk_size
>> 4 * i
) & 0xf)) - 1;
1592 (dwc_params
>> DWC_PARAMS_MBLK_EN
& 0x1) == 0;
1594 dwc
->block_size
= pdata
->block_size
;
1596 /* Check if channel supports multi block transfer */
1597 channel_writel(dwc
, LLP
, 0xfffffffc);
1599 (channel_readl(dwc
, LLP
) & 0xfffffffc) == 0;
1600 channel_writel(dwc
, LLP
, 0);
1604 /* Clear all interrupts on all channels. */
1605 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1606 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1607 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1608 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1609 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1611 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1612 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1613 if (pdata
->is_private
)
1614 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1615 dw
->dma
.dev
= &pdev
->dev
;
1616 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1617 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1619 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1621 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1622 dw
->dma
.device_control
= dwc_control
;
1624 dw
->dma
.device_tx_status
= dwc_tx_status
;
1625 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1627 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1629 printk(KERN_INFO
"%s: DesignWare DMA Controller, %d channels\n",
1630 dev_name(&pdev
->dev
), nr_channels
);
1632 dma_async_device_register(&dw
->dma
);
1637 static int dw_remove(struct platform_device
*pdev
)
1639 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1640 struct dw_dma_chan
*dwc
, *_dwc
;
1643 dma_async_device_unregister(&dw
->dma
);
1645 tasklet_kill(&dw
->tasklet
);
1647 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1649 list_del(&dwc
->chan
.device_node
);
1650 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1656 static void dw_shutdown(struct platform_device
*pdev
)
1658 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1660 dw_dma_off(platform_get_drvdata(pdev
));
1661 clk_disable_unprepare(dw
->clk
);
1664 static int dw_suspend_noirq(struct device
*dev
)
1666 struct platform_device
*pdev
= to_platform_device(dev
);
1667 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1669 dw_dma_off(platform_get_drvdata(pdev
));
1670 clk_disable_unprepare(dw
->clk
);
1675 static int dw_resume_noirq(struct device
*dev
)
1677 struct platform_device
*pdev
= to_platform_device(dev
);
1678 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1680 clk_prepare_enable(dw
->clk
);
1681 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1685 static const struct dev_pm_ops dw_dev_pm_ops
= {
1686 .suspend_noirq
= dw_suspend_noirq
,
1687 .resume_noirq
= dw_resume_noirq
,
1688 .freeze_noirq
= dw_suspend_noirq
,
1689 .thaw_noirq
= dw_resume_noirq
,
1690 .restore_noirq
= dw_resume_noirq
,
1691 .poweroff_noirq
= dw_suspend_noirq
,
1695 static const struct of_device_id dw_dma_id_table
[] = {
1696 { .compatible
= "snps,dma-spear1340" },
1699 MODULE_DEVICE_TABLE(of
, dw_dma_id_table
);
1702 static struct platform_driver dw_driver
= {
1703 .remove
= dw_remove
,
1704 .shutdown
= dw_shutdown
,
1707 .pm
= &dw_dev_pm_ops
,
1708 .of_match_table
= of_match_ptr(dw_dma_id_table
),
1712 static int __init
dw_init(void)
1714 return platform_driver_probe(&dw_driver
, dw_probe
);
1716 subsys_initcall(dw_init
);
1718 static void __exit
dw_exit(void)
1720 platform_driver_unregister(&dw_driver
);
1722 module_exit(dw_exit
);
1724 MODULE_LICENSE("GPL v2");
1725 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1726 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1727 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");