2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
23 #include "dw_dmac_regs.h"
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
35 #define DWC_DEFAULT_CTLLO(private) ({ \
36 struct dw_dma_slave *__slave = (private); \
37 int dms = __slave ? __slave->dst_master : 0; \
38 int sms = __slave ? __slave->src_master : 1; \
40 (DWC_CTLL_DST_MSIZE(0) \
41 | DWC_CTLL_SRC_MSIZE(0) \
45 | DWC_CTLL_SMS(sms)); \
49 * This is configuration-dependent and usually a funny size like 4095.
51 * Note that this is a transfer count, i.e. if we transfer 32-bit
52 * words, we can do 16380 bytes per descriptor.
54 * This parameter is also system-specific.
56 #define DWC_MAX_COUNT 4095U
59 * Number of descriptors to allocate for each channel. This should be
60 * made configurable somehow; preferably, the clients (at least the
61 * ones using slave transfers) should be able to give us a hint.
63 #define NR_DESCS_PER_CHANNEL 64
65 /*----------------------------------------------------------------------*/
68 * Because we're not relying on writeback from the controller (it may not
69 * even be configured into the core!) we don't need to use dma_pool. These
70 * descriptors -- and associated data -- are cacheable. We do need to make
71 * sure their dcache entries are written back before handing them off to
72 * the controller, though.
75 static struct device
*chan2dev(struct dma_chan
*chan
)
77 return &chan
->dev
->device
;
79 static struct device
*chan2parent(struct dma_chan
*chan
)
81 return chan
->dev
->device
.parent
;
84 static struct dw_desc
*dwc_first_active(struct dw_dma_chan
*dwc
)
86 return list_entry(dwc
->active_list
.next
, struct dw_desc
, desc_node
);
89 static struct dw_desc
*dwc_desc_get(struct dw_dma_chan
*dwc
)
91 struct dw_desc
*desc
, *_desc
;
92 struct dw_desc
*ret
= NULL
;
95 spin_lock_bh(&dwc
->lock
);
96 list_for_each_entry_safe(desc
, _desc
, &dwc
->free_list
, desc_node
) {
97 if (async_tx_test_ack(&desc
->txd
)) {
98 list_del(&desc
->desc_node
);
102 dev_dbg(chan2dev(&dwc
->chan
), "desc %p not ACKed\n", desc
);
105 spin_unlock_bh(&dwc
->lock
);
107 dev_vdbg(chan2dev(&dwc
->chan
), "scanned %u descriptors on freelist\n", i
);
112 static void dwc_sync_desc_for_cpu(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
114 struct dw_desc
*child
;
116 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
117 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
118 child
->txd
.phys
, sizeof(child
->lli
),
120 dma_sync_single_for_cpu(chan2parent(&dwc
->chan
),
121 desc
->txd
.phys
, sizeof(desc
->lli
),
126 * Move a descriptor, including any children, to the free list.
127 * `desc' must not be on any lists.
129 static void dwc_desc_put(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
132 struct dw_desc
*child
;
134 dwc_sync_desc_for_cpu(dwc
, desc
);
136 spin_lock_bh(&dwc
->lock
);
137 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
138 dev_vdbg(chan2dev(&dwc
->chan
),
139 "moving child desc %p to freelist\n",
141 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
142 dev_vdbg(chan2dev(&dwc
->chan
), "moving desc %p to freelist\n", desc
);
143 list_add(&desc
->desc_node
, &dwc
->free_list
);
144 spin_unlock_bh(&dwc
->lock
);
148 /* Called with dwc->lock held and bh disabled */
150 dwc_assign_cookie(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
152 dma_cookie_t cookie
= dwc
->chan
.cookie
;
157 dwc
->chan
.cookie
= cookie
;
158 desc
->txd
.cookie
= cookie
;
163 /*----------------------------------------------------------------------*/
165 /* Called with dwc->lock held and bh disabled */
166 static void dwc_dostart(struct dw_dma_chan
*dwc
, struct dw_desc
*first
)
168 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
170 /* ASSERT: channel is idle */
171 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
172 dev_err(chan2dev(&dwc
->chan
),
173 "BUG: Attempted to start non-idle channel\n");
174 dev_err(chan2dev(&dwc
->chan
),
175 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
176 channel_readl(dwc
, SAR
),
177 channel_readl(dwc
, DAR
),
178 channel_readl(dwc
, LLP
),
179 channel_readl(dwc
, CTL_HI
),
180 channel_readl(dwc
, CTL_LO
));
182 /* The tasklet will hopefully advance the queue... */
186 channel_writel(dwc
, LLP
, first
->txd
.phys
);
187 channel_writel(dwc
, CTL_LO
,
188 DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
189 channel_writel(dwc
, CTL_HI
, 0);
190 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
193 /*----------------------------------------------------------------------*/
196 dwc_descriptor_complete(struct dw_dma_chan
*dwc
, struct dw_desc
*desc
)
198 dma_async_tx_callback callback
;
200 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
202 dev_vdbg(chan2dev(&dwc
->chan
), "descriptor %u complete\n", txd
->cookie
);
204 dwc
->completed
= txd
->cookie
;
205 callback
= txd
->callback
;
206 param
= txd
->callback_param
;
208 dwc_sync_desc_for_cpu(dwc
, desc
);
209 list_splice_init(&desc
->tx_list
, &dwc
->free_list
);
210 list_move(&desc
->desc_node
, &dwc
->free_list
);
212 if (!dwc
->chan
.private) {
213 struct device
*parent
= chan2parent(&dwc
->chan
);
214 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
215 if (txd
->flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
216 dma_unmap_single(parent
, desc
->lli
.dar
,
217 desc
->len
, DMA_FROM_DEVICE
);
219 dma_unmap_page(parent
, desc
->lli
.dar
,
220 desc
->len
, DMA_FROM_DEVICE
);
222 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
223 if (txd
->flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
224 dma_unmap_single(parent
, desc
->lli
.sar
,
225 desc
->len
, DMA_TO_DEVICE
);
227 dma_unmap_page(parent
, desc
->lli
.sar
,
228 desc
->len
, DMA_TO_DEVICE
);
233 * The API requires that no submissions are done from a
234 * callback, so we don't need to drop the lock here
240 static void dwc_complete_all(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
242 struct dw_desc
*desc
, *_desc
;
245 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
246 dev_err(chan2dev(&dwc
->chan
),
247 "BUG: XFER bit set, but channel not idle!\n");
249 /* Try to continue after resetting the channel... */
250 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
251 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
256 * Submit queued descriptors ASAP, i.e. before we go through
257 * the completed ones.
259 list_splice_init(&dwc
->active_list
, &list
);
260 if (!list_empty(&dwc
->queue
)) {
261 list_move(dwc
->queue
.next
, &dwc
->active_list
);
262 dwc_dostart(dwc
, dwc_first_active(dwc
));
265 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
266 dwc_descriptor_complete(dwc
, desc
);
269 static void dwc_scan_descriptors(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
272 struct dw_desc
*desc
, *_desc
;
273 struct dw_desc
*child
;
277 * Clear block interrupt flag before scanning so that we don't
278 * miss any, and read LLP before RAW_XFER to ensure it is
279 * valid if we decide to scan the list.
281 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
282 llp
= channel_readl(dwc
, LLP
);
283 status_xfer
= dma_readl(dw
, RAW
.XFER
);
285 if (status_xfer
& dwc
->mask
) {
286 /* Everything we've submitted is done */
287 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
288 dwc_complete_all(dw
, dwc
);
292 if (list_empty(&dwc
->active_list
))
295 dev_vdbg(chan2dev(&dwc
->chan
), "scan_descriptors: llp=0x%x\n", llp
);
297 list_for_each_entry_safe(desc
, _desc
, &dwc
->active_list
, desc_node
) {
298 if (desc
->lli
.llp
== llp
)
299 /* This one is currently in progress */
302 list_for_each_entry(child
, &desc
->tx_list
, desc_node
)
303 if (child
->lli
.llp
== llp
)
304 /* Currently in progress */
308 * No descriptors so far seem to be in progress, i.e.
309 * this one must be done.
311 dwc_descriptor_complete(dwc
, desc
);
314 dev_err(chan2dev(&dwc
->chan
),
315 "BUG: All descriptors done, but channel not idle!\n");
317 /* Try to continue after resetting the channel... */
318 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
319 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
322 if (!list_empty(&dwc
->queue
)) {
323 list_move(dwc
->queue
.next
, &dwc
->active_list
);
324 dwc_dostart(dwc
, dwc_first_active(dwc
));
328 static void dwc_dump_lli(struct dw_dma_chan
*dwc
, struct dw_lli
*lli
)
330 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
331 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
332 lli
->sar
, lli
->dar
, lli
->llp
,
333 lli
->ctlhi
, lli
->ctllo
);
336 static void dwc_handle_error(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
)
338 struct dw_desc
*bad_desc
;
339 struct dw_desc
*child
;
341 dwc_scan_descriptors(dw
, dwc
);
344 * The descriptor currently at the head of the active list is
345 * borked. Since we don't have any way to report errors, we'll
346 * just have to scream loudly and try to carry on.
348 bad_desc
= dwc_first_active(dwc
);
349 list_del_init(&bad_desc
->desc_node
);
350 list_move(dwc
->queue
.next
, dwc
->active_list
.prev
);
352 /* Clear the error flag and try to restart the controller */
353 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
354 if (!list_empty(&dwc
->active_list
))
355 dwc_dostart(dwc
, dwc_first_active(dwc
));
358 * KERN_CRITICAL may seem harsh, but since this only happens
359 * when someone submits a bad physical address in a
360 * descriptor, we should consider ourselves lucky that the
361 * controller flagged an error instead of scribbling over
362 * random memory locations.
364 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
365 "Bad descriptor submitted for DMA!\n");
366 dev_printk(KERN_CRIT
, chan2dev(&dwc
->chan
),
367 " cookie: %d\n", bad_desc
->txd
.cookie
);
368 dwc_dump_lli(dwc
, &bad_desc
->lli
);
369 list_for_each_entry(child
, &bad_desc
->tx_list
, desc_node
)
370 dwc_dump_lli(dwc
, &child
->lli
);
372 /* Pretend the descriptor completed successfully */
373 dwc_descriptor_complete(dwc
, bad_desc
);
376 /* --------------------- Cyclic DMA API extensions -------------------- */
378 inline dma_addr_t
dw_dma_get_src_addr(struct dma_chan
*chan
)
380 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
381 return channel_readl(dwc
, SAR
);
383 EXPORT_SYMBOL(dw_dma_get_src_addr
);
385 inline dma_addr_t
dw_dma_get_dst_addr(struct dma_chan
*chan
)
387 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
388 return channel_readl(dwc
, DAR
);
390 EXPORT_SYMBOL(dw_dma_get_dst_addr
);
392 /* called with dwc->lock held and all DMAC interrupts disabled */
393 static void dwc_handle_cyclic(struct dw_dma
*dw
, struct dw_dma_chan
*dwc
,
394 u32 status_block
, u32 status_err
, u32 status_xfer
)
396 if (status_block
& dwc
->mask
) {
397 void (*callback
)(void *param
);
398 void *callback_param
;
400 dev_vdbg(chan2dev(&dwc
->chan
), "new cyclic period llp 0x%08x\n",
401 channel_readl(dwc
, LLP
));
402 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
404 callback
= dwc
->cdesc
->period_callback
;
405 callback_param
= dwc
->cdesc
->period_callback_param
;
407 spin_unlock(&dwc
->lock
);
408 callback(callback_param
);
409 spin_lock(&dwc
->lock
);
414 * Error and transfer complete are highly unlikely, and will most
415 * likely be due to a configuration error by the user.
417 if (unlikely(status_err
& dwc
->mask
) ||
418 unlikely(status_xfer
& dwc
->mask
)) {
421 dev_err(chan2dev(&dwc
->chan
), "cyclic DMA unexpected %s "
422 "interrupt, stopping DMA transfer\n",
423 status_xfer
? "xfer" : "error");
424 dev_err(chan2dev(&dwc
->chan
),
425 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
426 channel_readl(dwc
, SAR
),
427 channel_readl(dwc
, DAR
),
428 channel_readl(dwc
, LLP
),
429 channel_readl(dwc
, CTL_HI
),
430 channel_readl(dwc
, CTL_LO
));
432 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
433 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
436 /* make sure DMA does not restart by loading a new list */
437 channel_writel(dwc
, LLP
, 0);
438 channel_writel(dwc
, CTL_LO
, 0);
439 channel_writel(dwc
, CTL_HI
, 0);
441 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
442 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
443 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
445 for (i
= 0; i
< dwc
->cdesc
->periods
; i
++)
446 dwc_dump_lli(dwc
, &dwc
->cdesc
->desc
[i
]->lli
);
450 /* ------------------------------------------------------------------------- */
452 static void dw_dma_tasklet(unsigned long data
)
454 struct dw_dma
*dw
= (struct dw_dma
*)data
;
455 struct dw_dma_chan
*dwc
;
461 status_block
= dma_readl(dw
, RAW
.BLOCK
);
462 status_xfer
= dma_readl(dw
, RAW
.XFER
);
463 status_err
= dma_readl(dw
, RAW
.ERROR
);
465 dev_vdbg(dw
->dma
.dev
, "tasklet: status_block=%x status_err=%x\n",
466 status_block
, status_err
);
468 for (i
= 0; i
< dw
->dma
.chancnt
; i
++) {
470 spin_lock(&dwc
->lock
);
471 if (test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
))
472 dwc_handle_cyclic(dw
, dwc
, status_block
, status_err
,
474 else if (status_err
& (1 << i
))
475 dwc_handle_error(dw
, dwc
);
476 else if ((status_block
| status_xfer
) & (1 << i
))
477 dwc_scan_descriptors(dw
, dwc
);
478 spin_unlock(&dwc
->lock
);
482 * Re-enable interrupts. Block Complete interrupts are only
483 * enabled if the INT_EN bit in the descriptor is set. This
484 * will trigger a scan before the whole list is done.
486 channel_set_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
487 channel_set_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
488 channel_set_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
491 static irqreturn_t
dw_dma_interrupt(int irq
, void *dev_id
)
493 struct dw_dma
*dw
= dev_id
;
496 dev_vdbg(dw
->dma
.dev
, "interrupt: status=0x%x\n",
497 dma_readl(dw
, STATUS_INT
));
500 * Just disable the interrupts. We'll turn them back on in the
503 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
504 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
505 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
507 status
= dma_readl(dw
, STATUS_INT
);
510 "BUG: Unexpected interrupts pending: 0x%x\n",
514 channel_clear_bit(dw
, MASK
.XFER
, (1 << 8) - 1);
515 channel_clear_bit(dw
, MASK
.BLOCK
, (1 << 8) - 1);
516 channel_clear_bit(dw
, MASK
.SRC_TRAN
, (1 << 8) - 1);
517 channel_clear_bit(dw
, MASK
.DST_TRAN
, (1 << 8) - 1);
518 channel_clear_bit(dw
, MASK
.ERROR
, (1 << 8) - 1);
521 tasklet_schedule(&dw
->tasklet
);
526 /*----------------------------------------------------------------------*/
528 static dma_cookie_t
dwc_tx_submit(struct dma_async_tx_descriptor
*tx
)
530 struct dw_desc
*desc
= txd_to_dw_desc(tx
);
531 struct dw_dma_chan
*dwc
= to_dw_dma_chan(tx
->chan
);
534 spin_lock_bh(&dwc
->lock
);
535 cookie
= dwc_assign_cookie(dwc
, desc
);
538 * REVISIT: We should attempt to chain as many descriptors as
539 * possible, perhaps even appending to those already submitted
540 * for DMA. But this is hard to do in a race-free manner.
542 if (list_empty(&dwc
->active_list
)) {
543 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: started %u\n",
545 list_add_tail(&desc
->desc_node
, &dwc
->active_list
);
546 dwc_dostart(dwc
, dwc_first_active(dwc
));
548 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u\n",
551 list_add_tail(&desc
->desc_node
, &dwc
->queue
);
554 spin_unlock_bh(&dwc
->lock
);
559 static struct dma_async_tx_descriptor
*
560 dwc_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
561 size_t len
, unsigned long flags
)
563 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
564 struct dw_desc
*desc
;
565 struct dw_desc
*first
;
566 struct dw_desc
*prev
;
569 unsigned int src_width
;
570 unsigned int dst_width
;
573 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
574 dest
, src
, len
, flags
);
576 if (unlikely(!len
)) {
577 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
582 * We can be a lot more clever here, but this should take care
583 * of the most common optimization.
585 if (!((src
| dest
| len
) & 7))
586 src_width
= dst_width
= 3;
587 else if (!((src
| dest
| len
) & 3))
588 src_width
= dst_width
= 2;
589 else if (!((src
| dest
| len
) & 1))
590 src_width
= dst_width
= 1;
592 src_width
= dst_width
= 0;
594 ctllo
= DWC_DEFAULT_CTLLO(chan
->private)
595 | DWC_CTLL_DST_WIDTH(dst_width
)
596 | DWC_CTLL_SRC_WIDTH(src_width
)
602 for (offset
= 0; offset
< len
; offset
+= xfer_count
<< src_width
) {
603 xfer_count
= min_t(size_t, (len
- offset
) >> src_width
,
606 desc
= dwc_desc_get(dwc
);
610 desc
->lli
.sar
= src
+ offset
;
611 desc
->lli
.dar
= dest
+ offset
;
612 desc
->lli
.ctllo
= ctllo
;
613 desc
->lli
.ctlhi
= xfer_count
;
618 prev
->lli
.llp
= desc
->txd
.phys
;
619 dma_sync_single_for_device(chan2parent(chan
),
620 prev
->txd
.phys
, sizeof(prev
->lli
),
622 list_add_tail(&desc
->desc_node
,
629 if (flags
& DMA_PREP_INTERRUPT
)
630 /* Trigger interrupt after last block */
631 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
634 dma_sync_single_for_device(chan2parent(chan
),
635 prev
->txd
.phys
, sizeof(prev
->lli
),
638 first
->txd
.flags
= flags
;
644 dwc_desc_put(dwc
, first
);
648 static struct dma_async_tx_descriptor
*
649 dwc_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
650 unsigned int sg_len
, enum dma_data_direction direction
,
653 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
654 struct dw_dma_slave
*dws
= chan
->private;
655 struct dw_desc
*prev
;
656 struct dw_desc
*first
;
659 unsigned int reg_width
;
660 unsigned int mem_width
;
662 struct scatterlist
*sg
;
663 size_t total_len
= 0;
665 dev_vdbg(chan2dev(chan
), "prep_dma_slave\n");
667 if (unlikely(!dws
|| !sg_len
))
670 reg_width
= dws
->reg_width
;
675 ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
676 | DWC_CTLL_DST_WIDTH(reg_width
)
681 for_each_sg(sgl
, sg
, sg_len
, i
) {
682 struct dw_desc
*desc
;
686 desc
= dwc_desc_get(dwc
);
688 dev_err(chan2dev(chan
),
689 "not enough descriptors available\n");
694 len
= sg_dma_len(sg
);
696 if (unlikely(mem
& 3 || len
& 3))
701 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_SRC_WIDTH(mem_width
);
702 desc
->lli
.ctlhi
= len
>> mem_width
;
707 prev
->lli
.llp
= desc
->txd
.phys
;
708 dma_sync_single_for_device(chan2parent(chan
),
712 list_add_tail(&desc
->desc_node
,
719 case DMA_FROM_DEVICE
:
720 ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
721 | DWC_CTLL_SRC_WIDTH(reg_width
)
727 for_each_sg(sgl
, sg
, sg_len
, i
) {
728 struct dw_desc
*desc
;
732 desc
= dwc_desc_get(dwc
);
734 dev_err(chan2dev(chan
),
735 "not enough descriptors available\n");
740 len
= sg_dma_len(sg
);
742 if (unlikely(mem
& 3 || len
& 3))
747 desc
->lli
.ctllo
= ctllo
| DWC_CTLL_DST_WIDTH(mem_width
);
748 desc
->lli
.ctlhi
= len
>> reg_width
;
753 prev
->lli
.llp
= desc
->txd
.phys
;
754 dma_sync_single_for_device(chan2parent(chan
),
758 list_add_tail(&desc
->desc_node
,
769 if (flags
& DMA_PREP_INTERRUPT
)
770 /* Trigger interrupt after last block */
771 prev
->lli
.ctllo
|= DWC_CTLL_INT_EN
;
774 dma_sync_single_for_device(chan2parent(chan
),
775 prev
->txd
.phys
, sizeof(prev
->lli
),
778 first
->len
= total_len
;
783 dwc_desc_put(dwc
, first
);
787 static int dwc_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
790 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
791 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
792 struct dw_desc
*desc
, *_desc
;
795 /* Only supports DMA_TERMINATE_ALL */
796 if (cmd
!= DMA_TERMINATE_ALL
)
800 * This is only called when something went wrong elsewhere, so
801 * we don't really care about the data. Just disable the
802 * channel. We still have to poll the channel enable bit due
803 * to AHB/HSB limitations.
805 spin_lock_bh(&dwc
->lock
);
807 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
809 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
812 /* active_list entries will end up before queued entries */
813 list_splice_init(&dwc
->queue
, &list
);
814 list_splice_init(&dwc
->active_list
, &list
);
816 spin_unlock_bh(&dwc
->lock
);
818 /* Flush all pending and queued descriptors */
819 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
820 dwc_descriptor_complete(dwc
, desc
);
825 static enum dma_status
826 dwc_tx_status(struct dma_chan
*chan
,
828 struct dma_tx_state
*txstate
)
830 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
831 dma_cookie_t last_used
;
832 dma_cookie_t last_complete
;
835 last_complete
= dwc
->completed
;
836 last_used
= chan
->cookie
;
838 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
839 if (ret
!= DMA_SUCCESS
) {
840 spin_lock_bh(&dwc
->lock
);
841 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
842 spin_unlock_bh(&dwc
->lock
);
844 last_complete
= dwc
->completed
;
845 last_used
= chan
->cookie
;
847 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
850 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
855 static void dwc_issue_pending(struct dma_chan
*chan
)
857 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
859 spin_lock_bh(&dwc
->lock
);
860 if (!list_empty(&dwc
->queue
))
861 dwc_scan_descriptors(to_dw_dma(chan
->device
), dwc
);
862 spin_unlock_bh(&dwc
->lock
);
865 static int dwc_alloc_chan_resources(struct dma_chan
*chan
)
867 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
868 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
869 struct dw_desc
*desc
;
870 struct dw_dma_slave
*dws
;
875 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
877 /* ASSERT: channel is idle */
878 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
879 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
883 dwc
->completed
= chan
->cookie
= 1;
885 cfghi
= DWC_CFGH_FIFO_MODE
;
891 * We need controller-specific data to set up slave
894 BUG_ON(!dws
->dma_dev
|| dws
->dma_dev
!= dw
->dma
.dev
);
899 channel_writel(dwc
, CFG_LO
, cfglo
);
900 channel_writel(dwc
, CFG_HI
, cfghi
);
903 * NOTE: some controllers may have additional features that we
904 * need to initialize here, like "scatter-gather" (which
905 * doesn't mean what you think it means), and status writeback.
908 spin_lock_bh(&dwc
->lock
);
909 i
= dwc
->descs_allocated
;
910 while (dwc
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
911 spin_unlock_bh(&dwc
->lock
);
913 desc
= kzalloc(sizeof(struct dw_desc
), GFP_KERNEL
);
915 dev_info(chan2dev(chan
),
916 "only allocated %d descriptors\n", i
);
917 spin_lock_bh(&dwc
->lock
);
921 INIT_LIST_HEAD(&desc
->tx_list
);
922 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
923 desc
->txd
.tx_submit
= dwc_tx_submit
;
924 desc
->txd
.flags
= DMA_CTRL_ACK
;
925 desc
->txd
.phys
= dma_map_single(chan2parent(chan
), &desc
->lli
,
926 sizeof(desc
->lli
), DMA_TO_DEVICE
);
927 dwc_desc_put(dwc
, desc
);
929 spin_lock_bh(&dwc
->lock
);
930 i
= ++dwc
->descs_allocated
;
933 /* Enable interrupts */
934 channel_set_bit(dw
, MASK
.XFER
, dwc
->mask
);
935 channel_set_bit(dw
, MASK
.BLOCK
, dwc
->mask
);
936 channel_set_bit(dw
, MASK
.ERROR
, dwc
->mask
);
938 spin_unlock_bh(&dwc
->lock
);
940 dev_dbg(chan2dev(chan
),
941 "alloc_chan_resources allocated %d descriptors\n", i
);
946 static void dwc_free_chan_resources(struct dma_chan
*chan
)
948 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
949 struct dw_dma
*dw
= to_dw_dma(chan
->device
);
950 struct dw_desc
*desc
, *_desc
;
953 dev_dbg(chan2dev(chan
), "free_chan_resources (descs allocated=%u)\n",
954 dwc
->descs_allocated
);
956 /* ASSERT: channel is idle */
957 BUG_ON(!list_empty(&dwc
->active_list
));
958 BUG_ON(!list_empty(&dwc
->queue
));
959 BUG_ON(dma_readl(to_dw_dma(chan
->device
), CH_EN
) & dwc
->mask
);
961 spin_lock_bh(&dwc
->lock
);
962 list_splice_init(&dwc
->free_list
, &list
);
963 dwc
->descs_allocated
= 0;
965 /* Disable interrupts */
966 channel_clear_bit(dw
, MASK
.XFER
, dwc
->mask
);
967 channel_clear_bit(dw
, MASK
.BLOCK
, dwc
->mask
);
968 channel_clear_bit(dw
, MASK
.ERROR
, dwc
->mask
);
970 spin_unlock_bh(&dwc
->lock
);
972 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
973 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
974 dma_unmap_single(chan2parent(chan
), desc
->txd
.phys
,
975 sizeof(desc
->lli
), DMA_TO_DEVICE
);
979 dev_vdbg(chan2dev(chan
), "free_chan_resources done\n");
982 /* --------------------- Cyclic DMA API extensions -------------------- */
985 * dw_dma_cyclic_start - start the cyclic DMA transfer
986 * @chan: the DMA channel to start
988 * Must be called with soft interrupts disabled. Returns zero on success or
991 int dw_dma_cyclic_start(struct dma_chan
*chan
)
993 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
994 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
996 if (!test_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
)) {
997 dev_err(chan2dev(&dwc
->chan
), "missing prep for cyclic DMA\n");
1001 spin_lock(&dwc
->lock
);
1003 /* assert channel is idle */
1004 if (dma_readl(dw
, CH_EN
) & dwc
->mask
) {
1005 dev_err(chan2dev(&dwc
->chan
),
1006 "BUG: Attempted to start non-idle channel\n");
1007 dev_err(chan2dev(&dwc
->chan
),
1008 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1009 channel_readl(dwc
, SAR
),
1010 channel_readl(dwc
, DAR
),
1011 channel_readl(dwc
, LLP
),
1012 channel_readl(dwc
, CTL_HI
),
1013 channel_readl(dwc
, CTL_LO
));
1014 spin_unlock(&dwc
->lock
);
1018 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
1019 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1020 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1022 /* setup DMAC channel registers */
1023 channel_writel(dwc
, LLP
, dwc
->cdesc
->desc
[0]->txd
.phys
);
1024 channel_writel(dwc
, CTL_LO
, DWC_CTLL_LLP_D_EN
| DWC_CTLL_LLP_S_EN
);
1025 channel_writel(dwc
, CTL_HI
, 0);
1027 channel_set_bit(dw
, CH_EN
, dwc
->mask
);
1029 spin_unlock(&dwc
->lock
);
1033 EXPORT_SYMBOL(dw_dma_cyclic_start
);
1036 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1037 * @chan: the DMA channel to stop
1039 * Must be called with soft interrupts disabled.
1041 void dw_dma_cyclic_stop(struct dma_chan
*chan
)
1043 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1044 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1046 spin_lock(&dwc
->lock
);
1048 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1049 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1052 spin_unlock(&dwc
->lock
);
1054 EXPORT_SYMBOL(dw_dma_cyclic_stop
);
1057 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1058 * @chan: the DMA channel to prepare
1059 * @buf_addr: physical DMA address where the buffer starts
1060 * @buf_len: total number of bytes for the entire buffer
1061 * @period_len: number of bytes for each period
1062 * @direction: transfer direction, to or from device
1064 * Must be called before trying to start the transfer. Returns a valid struct
1065 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1067 struct dw_cyclic_desc
*dw_dma_cyclic_prep(struct dma_chan
*chan
,
1068 dma_addr_t buf_addr
, size_t buf_len
, size_t period_len
,
1069 enum dma_data_direction direction
)
1071 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1072 struct dw_cyclic_desc
*cdesc
;
1073 struct dw_cyclic_desc
*retval
= NULL
;
1074 struct dw_desc
*desc
;
1075 struct dw_desc
*last
= NULL
;
1076 struct dw_dma_slave
*dws
= chan
->private;
1077 unsigned long was_cyclic
;
1078 unsigned int reg_width
;
1079 unsigned int periods
;
1082 spin_lock_bh(&dwc
->lock
);
1083 if (!list_empty(&dwc
->queue
) || !list_empty(&dwc
->active_list
)) {
1084 spin_unlock_bh(&dwc
->lock
);
1085 dev_dbg(chan2dev(&dwc
->chan
),
1086 "queue and/or active list are not empty\n");
1087 return ERR_PTR(-EBUSY
);
1090 was_cyclic
= test_and_set_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1091 spin_unlock_bh(&dwc
->lock
);
1093 dev_dbg(chan2dev(&dwc
->chan
),
1094 "channel already prepared for cyclic DMA\n");
1095 return ERR_PTR(-EBUSY
);
1098 retval
= ERR_PTR(-EINVAL
);
1099 reg_width
= dws
->reg_width
;
1100 periods
= buf_len
/ period_len
;
1102 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1103 if (period_len
> (DWC_MAX_COUNT
<< reg_width
))
1105 if (unlikely(period_len
& ((1 << reg_width
) - 1)))
1107 if (unlikely(buf_addr
& ((1 << reg_width
) - 1)))
1109 if (unlikely(!(direction
& (DMA_TO_DEVICE
| DMA_FROM_DEVICE
))))
1112 retval
= ERR_PTR(-ENOMEM
);
1114 if (periods
> NR_DESCS_PER_CHANNEL
)
1117 cdesc
= kzalloc(sizeof(struct dw_cyclic_desc
), GFP_KERNEL
);
1121 cdesc
->desc
= kzalloc(sizeof(struct dw_desc
*) * periods
, GFP_KERNEL
);
1125 for (i
= 0; i
< periods
; i
++) {
1126 desc
= dwc_desc_get(dwc
);
1128 goto out_err_desc_get
;
1130 switch (direction
) {
1132 desc
->lli
.dar
= dws
->tx_reg
;
1133 desc
->lli
.sar
= buf_addr
+ (period_len
* i
);
1134 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
1135 | DWC_CTLL_DST_WIDTH(reg_width
)
1136 | DWC_CTLL_SRC_WIDTH(reg_width
)
1142 case DMA_FROM_DEVICE
:
1143 desc
->lli
.dar
= buf_addr
+ (period_len
* i
);
1144 desc
->lli
.sar
= dws
->rx_reg
;
1145 desc
->lli
.ctllo
= (DWC_DEFAULT_CTLLO(chan
->private)
1146 | DWC_CTLL_SRC_WIDTH(reg_width
)
1147 | DWC_CTLL_DST_WIDTH(reg_width
)
1157 desc
->lli
.ctlhi
= (period_len
>> reg_width
);
1158 cdesc
->desc
[i
] = desc
;
1161 last
->lli
.llp
= desc
->txd
.phys
;
1162 dma_sync_single_for_device(chan2parent(chan
),
1163 last
->txd
.phys
, sizeof(last
->lli
),
1170 /* lets make a cyclic list */
1171 last
->lli
.llp
= cdesc
->desc
[0]->txd
.phys
;
1172 dma_sync_single_for_device(chan2parent(chan
), last
->txd
.phys
,
1173 sizeof(last
->lli
), DMA_TO_DEVICE
);
1175 dev_dbg(chan2dev(&dwc
->chan
), "cyclic prepared buf 0x%08x len %zu "
1176 "period %zu periods %d\n", buf_addr
, buf_len
,
1177 period_len
, periods
);
1179 cdesc
->periods
= periods
;
1186 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1190 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1191 return (struct dw_cyclic_desc
*)retval
;
1193 EXPORT_SYMBOL(dw_dma_cyclic_prep
);
1196 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1197 * @chan: the DMA channel to free
1199 void dw_dma_cyclic_free(struct dma_chan
*chan
)
1201 struct dw_dma_chan
*dwc
= to_dw_dma_chan(chan
);
1202 struct dw_dma
*dw
= to_dw_dma(dwc
->chan
.device
);
1203 struct dw_cyclic_desc
*cdesc
= dwc
->cdesc
;
1206 dev_dbg(chan2dev(&dwc
->chan
), "cyclic free\n");
1211 spin_lock_bh(&dwc
->lock
);
1213 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1214 while (dma_readl(dw
, CH_EN
) & dwc
->mask
)
1217 dma_writel(dw
, CLEAR
.BLOCK
, dwc
->mask
);
1218 dma_writel(dw
, CLEAR
.ERROR
, dwc
->mask
);
1219 dma_writel(dw
, CLEAR
.XFER
, dwc
->mask
);
1221 spin_unlock_bh(&dwc
->lock
);
1223 for (i
= 0; i
< cdesc
->periods
; i
++)
1224 dwc_desc_put(dwc
, cdesc
->desc
[i
]);
1229 clear_bit(DW_DMA_IS_CYCLIC
, &dwc
->flags
);
1231 EXPORT_SYMBOL(dw_dma_cyclic_free
);
1233 /*----------------------------------------------------------------------*/
1235 static void dw_dma_off(struct dw_dma
*dw
)
1237 dma_writel(dw
, CFG
, 0);
1239 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1240 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1241 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1242 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1243 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1245 while (dma_readl(dw
, CFG
) & DW_CFG_DMA_EN
)
1249 static int __init
dw_probe(struct platform_device
*pdev
)
1251 struct dw_dma_platform_data
*pdata
;
1252 struct resource
*io
;
1259 pdata
= pdev
->dev
.platform_data
;
1260 if (!pdata
|| pdata
->nr_channels
> DW_DMA_MAX_NR_CHANNELS
)
1263 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1267 irq
= platform_get_irq(pdev
, 0);
1271 size
= sizeof(struct dw_dma
);
1272 size
+= pdata
->nr_channels
* sizeof(struct dw_dma_chan
);
1273 dw
= kzalloc(size
, GFP_KERNEL
);
1277 if (!request_mem_region(io
->start
, DW_REGLEN
, pdev
->dev
.driver
->name
)) {
1282 dw
->regs
= ioremap(io
->start
, DW_REGLEN
);
1288 dw
->clk
= clk_get(&pdev
->dev
, "hclk");
1289 if (IS_ERR(dw
->clk
)) {
1290 err
= PTR_ERR(dw
->clk
);
1293 clk_enable(dw
->clk
);
1295 /* force dma off, just in case */
1298 err
= request_irq(irq
, dw_dma_interrupt
, 0, "dw_dmac", dw
);
1302 platform_set_drvdata(pdev
, dw
);
1304 tasklet_init(&dw
->tasklet
, dw_dma_tasklet
, (unsigned long)dw
);
1306 dw
->all_chan_mask
= (1 << pdata
->nr_channels
) - 1;
1308 INIT_LIST_HEAD(&dw
->dma
.channels
);
1309 for (i
= 0; i
< pdata
->nr_channels
; i
++, dw
->dma
.chancnt
++) {
1310 struct dw_dma_chan
*dwc
= &dw
->chan
[i
];
1312 dwc
->chan
.device
= &dw
->dma
;
1313 dwc
->chan
.cookie
= dwc
->completed
= 1;
1314 dwc
->chan
.chan_id
= i
;
1315 list_add_tail(&dwc
->chan
.device_node
, &dw
->dma
.channels
);
1317 dwc
->ch_regs
= &__dw_regs(dw
)->CHAN
[i
];
1318 spin_lock_init(&dwc
->lock
);
1321 INIT_LIST_HEAD(&dwc
->active_list
);
1322 INIT_LIST_HEAD(&dwc
->queue
);
1323 INIT_LIST_HEAD(&dwc
->free_list
);
1325 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1328 /* Clear/disable all interrupts on all channels. */
1329 dma_writel(dw
, CLEAR
.XFER
, dw
->all_chan_mask
);
1330 dma_writel(dw
, CLEAR
.BLOCK
, dw
->all_chan_mask
);
1331 dma_writel(dw
, CLEAR
.SRC_TRAN
, dw
->all_chan_mask
);
1332 dma_writel(dw
, CLEAR
.DST_TRAN
, dw
->all_chan_mask
);
1333 dma_writel(dw
, CLEAR
.ERROR
, dw
->all_chan_mask
);
1335 channel_clear_bit(dw
, MASK
.XFER
, dw
->all_chan_mask
);
1336 channel_clear_bit(dw
, MASK
.BLOCK
, dw
->all_chan_mask
);
1337 channel_clear_bit(dw
, MASK
.SRC_TRAN
, dw
->all_chan_mask
);
1338 channel_clear_bit(dw
, MASK
.DST_TRAN
, dw
->all_chan_mask
);
1339 channel_clear_bit(dw
, MASK
.ERROR
, dw
->all_chan_mask
);
1341 dma_cap_set(DMA_MEMCPY
, dw
->dma
.cap_mask
);
1342 dma_cap_set(DMA_SLAVE
, dw
->dma
.cap_mask
);
1343 if (pdata
->is_private
)
1344 dma_cap_set(DMA_PRIVATE
, dw
->dma
.cap_mask
);
1345 dw
->dma
.dev
= &pdev
->dev
;
1346 dw
->dma
.device_alloc_chan_resources
= dwc_alloc_chan_resources
;
1347 dw
->dma
.device_free_chan_resources
= dwc_free_chan_resources
;
1349 dw
->dma
.device_prep_dma_memcpy
= dwc_prep_dma_memcpy
;
1351 dw
->dma
.device_prep_slave_sg
= dwc_prep_slave_sg
;
1352 dw
->dma
.device_control
= dwc_control
;
1354 dw
->dma
.device_tx_status
= dwc_tx_status
;
1355 dw
->dma
.device_issue_pending
= dwc_issue_pending
;
1357 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1359 printk(KERN_INFO
"%s: DesignWare DMA Controller, %d channels\n",
1360 dev_name(&pdev
->dev
), dw
->dma
.chancnt
);
1362 dma_async_device_register(&dw
->dma
);
1367 clk_disable(dw
->clk
);
1373 release_resource(io
);
1379 static int __exit
dw_remove(struct platform_device
*pdev
)
1381 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1382 struct dw_dma_chan
*dwc
, *_dwc
;
1383 struct resource
*io
;
1386 dma_async_device_unregister(&dw
->dma
);
1388 free_irq(platform_get_irq(pdev
, 0), dw
);
1389 tasklet_kill(&dw
->tasklet
);
1391 list_for_each_entry_safe(dwc
, _dwc
, &dw
->dma
.channels
,
1393 list_del(&dwc
->chan
.device_node
);
1394 channel_clear_bit(dw
, CH_EN
, dwc
->mask
);
1397 clk_disable(dw
->clk
);
1403 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1404 release_mem_region(io
->start
, DW_REGLEN
);
1411 static void dw_shutdown(struct platform_device
*pdev
)
1413 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1415 dw_dma_off(platform_get_drvdata(pdev
));
1416 clk_disable(dw
->clk
);
1419 static int dw_suspend_noirq(struct device
*dev
)
1421 struct platform_device
*pdev
= to_platform_device(dev
);
1422 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1424 dw_dma_off(platform_get_drvdata(pdev
));
1425 clk_disable(dw
->clk
);
1429 static int dw_resume_noirq(struct device
*dev
)
1431 struct platform_device
*pdev
= to_platform_device(dev
);
1432 struct dw_dma
*dw
= platform_get_drvdata(pdev
);
1434 clk_enable(dw
->clk
);
1435 dma_writel(dw
, CFG
, DW_CFG_DMA_EN
);
1439 static const struct dev_pm_ops dw_dev_pm_ops
= {
1440 .suspend_noirq
= dw_suspend_noirq
,
1441 .resume_noirq
= dw_resume_noirq
,
1444 static struct platform_driver dw_driver
= {
1445 .remove
= __exit_p(dw_remove
),
1446 .shutdown
= dw_shutdown
,
1449 .pm
= &dw_dev_pm_ops
,
1453 static int __init
dw_init(void)
1455 return platform_driver_probe(&dw_driver
, dw_probe
);
1457 subsys_initcall(dw_init
);
1459 static void __exit
dw_exit(void)
1461 platform_driver_unregister(&dw_driver
);
1463 module_exit(dw_exit
);
1465 MODULE_LICENSE("GPL v2");
1466 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1467 MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");