2 * Copyright 2012 Marvell International Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/platform_data/mmp_dma.h>
19 #include <linux/dmapool.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
23 #include <linux/dma/mmp-pdma.h>
25 #include "dmaengine.h"
35 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
36 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
37 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
38 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
39 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
40 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
41 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
42 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
44 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
45 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
46 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
47 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
48 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
49 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
50 #define DCSR_EORINTR (1 << 9) /* The end of Receive */
52 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
54 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58 #define DDADR_STOP (1 << 0) /* Stop (read / write) */
60 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
61 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
62 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
63 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
64 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
65 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
66 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
67 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75 #define PDMA_ALIGNMENT 3
76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
78 struct mmp_pdma_desc_hw
{
79 u32 ddadr
; /* Points to the next descriptor + flags */
80 u32 dsadr
; /* DSADR value for the current transfer */
81 u32 dtadr
; /* DTADR value for the current transfer */
82 u32 dcmd
; /* DCMD value for the current transfer */
85 struct mmp_pdma_desc_sw
{
86 struct mmp_pdma_desc_hw desc
;
87 struct list_head node
;
88 struct list_head tx_list
;
89 struct dma_async_tx_descriptor async_tx
;
94 struct mmp_pdma_chan
{
97 struct dma_async_tx_descriptor desc
;
98 struct mmp_pdma_phy
*phy
;
99 enum dma_transfer_direction dir
;
101 struct mmp_pdma_desc_sw
*cyclic_first
; /* first desc_sw if channel
102 * is in cyclic mode */
104 /* channel's basic info */
105 struct tasklet_struct tasklet
;
111 spinlock_t desc_lock
; /* Descriptor list lock */
112 struct list_head chain_pending
; /* Link descriptors queue for pending */
113 struct list_head chain_running
; /* Link descriptors queue for running */
114 bool idle
; /* channel statue machine */
117 struct dma_pool
*desc_pool
; /* Descriptors pool */
120 struct mmp_pdma_phy
{
123 struct mmp_pdma_chan
*vchan
;
126 struct mmp_pdma_device
{
130 struct dma_device device
;
131 struct mmp_pdma_phy
*phy
;
132 spinlock_t phy_lock
; /* protect alloc/free phy channels */
135 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
136 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
137 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
138 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
140 static void set_desc(struct mmp_pdma_phy
*phy
, dma_addr_t addr
)
142 u32 reg
= (phy
->idx
<< 4) + DDADR
;
144 writel(addr
, phy
->base
+ reg
);
147 static void enable_chan(struct mmp_pdma_phy
*phy
)
154 reg
= DRCMR(phy
->vchan
->drcmr
);
155 writel(DRCMR_MAPVLD
| phy
->idx
, phy
->base
+ reg
);
157 dalgn
= readl(phy
->base
+ DALGN
);
158 if (phy
->vchan
->byte_align
)
159 dalgn
|= 1 << phy
->idx
;
161 dalgn
&= ~(1 << phy
->idx
);
162 writel(dalgn
, phy
->base
+ DALGN
);
164 reg
= (phy
->idx
<< 2) + DCSR
;
165 writel(readl(phy
->base
+ reg
) | DCSR_RUN
,
169 static void disable_chan(struct mmp_pdma_phy
*phy
)
174 reg
= (phy
->idx
<< 2) + DCSR
;
175 writel(readl(phy
->base
+ reg
) & ~DCSR_RUN
,
180 static int clear_chan_irq(struct mmp_pdma_phy
*phy
)
183 u32 dint
= readl(phy
->base
+ DINT
);
184 u32 reg
= (phy
->idx
<< 2) + DCSR
;
186 if (dint
& BIT(phy
->idx
)) {
188 dcsr
= readl(phy
->base
+ reg
);
189 writel(dcsr
, phy
->base
+ reg
);
190 if ((dcsr
& DCSR_BUSERR
) && (phy
->vchan
))
191 dev_warn(phy
->vchan
->dev
, "DCSR_BUSERR\n");
197 static irqreturn_t
mmp_pdma_chan_handler(int irq
, void *dev_id
)
199 struct mmp_pdma_phy
*phy
= dev_id
;
201 if (clear_chan_irq(phy
) == 0) {
202 tasklet_schedule(&phy
->vchan
->tasklet
);
208 static irqreturn_t
mmp_pdma_int_handler(int irq
, void *dev_id
)
210 struct mmp_pdma_device
*pdev
= dev_id
;
211 struct mmp_pdma_phy
*phy
;
212 u32 dint
= readl(pdev
->base
+ DINT
);
220 ret
= mmp_pdma_chan_handler(irq
, phy
);
221 if (ret
== IRQ_HANDLED
)
231 /* lookup free phy channel as descending priority */
232 static struct mmp_pdma_phy
*lookup_phy(struct mmp_pdma_chan
*pchan
)
235 struct mmp_pdma_device
*pdev
= to_mmp_pdma_dev(pchan
->chan
.device
);
236 struct mmp_pdma_phy
*phy
, *found
= NULL
;
240 * dma channel priorities
241 * ch 0 - 3, 16 - 19 <--> (0)
242 * ch 4 - 7, 20 - 23 <--> (1)
243 * ch 8 - 11, 24 - 27 <--> (2)
244 * ch 12 - 15, 28 - 31 <--> (3)
247 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
248 for (prio
= 0; prio
<= (((pdev
->dma_channels
- 1) & 0xf) >> 2); prio
++) {
249 for (i
= 0; i
< pdev
->dma_channels
; i
++) {
250 if (prio
!= ((i
& 0xf) >> 2))
262 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
266 static void mmp_pdma_free_phy(struct mmp_pdma_chan
*pchan
)
268 struct mmp_pdma_device
*pdev
= to_mmp_pdma_dev(pchan
->chan
.device
);
275 /* clear the channel mapping in DRCMR */
276 reg
= DRCMR(pchan
->phy
->vchan
->drcmr
);
277 writel(0, pchan
->phy
->base
+ reg
);
279 spin_lock_irqsave(&pdev
->phy_lock
, flags
);
280 pchan
->phy
->vchan
= NULL
;
282 spin_unlock_irqrestore(&pdev
->phy_lock
, flags
);
286 * start_pending_queue - transfer any pending transactions
287 * pending list ==> running list
289 static void start_pending_queue(struct mmp_pdma_chan
*chan
)
291 struct mmp_pdma_desc_sw
*desc
;
293 /* still in running, irq will start the pending list */
295 dev_dbg(chan
->dev
, "DMA controller still busy\n");
299 if (list_empty(&chan
->chain_pending
)) {
300 /* chance to re-fetch phy channel with higher prio */
301 mmp_pdma_free_phy(chan
);
302 dev_dbg(chan
->dev
, "no pending list\n");
307 chan
->phy
= lookup_phy(chan
);
309 dev_dbg(chan
->dev
, "no free dma channel\n");
316 * reintilize pending list
318 desc
= list_first_entry(&chan
->chain_pending
,
319 struct mmp_pdma_desc_sw
, node
);
320 list_splice_tail_init(&chan
->chain_pending
, &chan
->chain_running
);
323 * Program the descriptor's address into the DMA controller,
324 * then start the DMA transaction
326 set_desc(chan
->phy
, desc
->async_tx
.phys
);
327 enable_chan(chan
->phy
);
332 /* desc->tx_list ==> pending list */
333 static dma_cookie_t
mmp_pdma_tx_submit(struct dma_async_tx_descriptor
*tx
)
335 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(tx
->chan
);
336 struct mmp_pdma_desc_sw
*desc
= tx_to_mmp_pdma_desc(tx
);
337 struct mmp_pdma_desc_sw
*child
;
339 dma_cookie_t cookie
= -EBUSY
;
341 spin_lock_irqsave(&chan
->desc_lock
, flags
);
343 list_for_each_entry(child
, &desc
->tx_list
, node
) {
344 cookie
= dma_cookie_assign(&child
->async_tx
);
347 /* softly link to pending list - desc->tx_list ==> pending list */
348 list_splice_tail_init(&desc
->tx_list
, &chan
->chain_pending
);
350 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
355 static struct mmp_pdma_desc_sw
*
356 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan
*chan
)
358 struct mmp_pdma_desc_sw
*desc
;
361 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
363 dev_err(chan
->dev
, "out of memory for link descriptor\n");
367 memset(desc
, 0, sizeof(*desc
));
368 INIT_LIST_HEAD(&desc
->tx_list
);
369 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->chan
);
370 /* each desc has submit */
371 desc
->async_tx
.tx_submit
= mmp_pdma_tx_submit
;
372 desc
->async_tx
.phys
= pdesc
;
378 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
380 * This function will create a dma pool for descriptor allocation.
381 * Request irq only when channel is requested
382 * Return - The number of allocated descriptors.
385 static int mmp_pdma_alloc_chan_resources(struct dma_chan
*dchan
)
387 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
393 dma_pool_create(dev_name(&dchan
->dev
->device
), chan
->dev
,
394 sizeof(struct mmp_pdma_desc_sw
),
395 __alignof__(struct mmp_pdma_desc_sw
), 0);
396 if (!chan
->desc_pool
) {
397 dev_err(chan
->dev
, "unable to allocate descriptor pool\n");
400 mmp_pdma_free_phy(chan
);
406 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan
*chan
,
407 struct list_head
*list
)
409 struct mmp_pdma_desc_sw
*desc
, *_desc
;
411 list_for_each_entry_safe(desc
, _desc
, list
, node
) {
412 list_del(&desc
->node
);
413 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
417 static void mmp_pdma_free_chan_resources(struct dma_chan
*dchan
)
419 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
422 spin_lock_irqsave(&chan
->desc_lock
, flags
);
423 mmp_pdma_free_desc_list(chan
, &chan
->chain_pending
);
424 mmp_pdma_free_desc_list(chan
, &chan
->chain_running
);
425 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
427 dma_pool_destroy(chan
->desc_pool
);
428 chan
->desc_pool
= NULL
;
431 mmp_pdma_free_phy(chan
);
435 static struct dma_async_tx_descriptor
*
436 mmp_pdma_prep_memcpy(struct dma_chan
*dchan
,
437 dma_addr_t dma_dst
, dma_addr_t dma_src
,
438 size_t len
, unsigned long flags
)
440 struct mmp_pdma_chan
*chan
;
441 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
450 chan
= to_mmp_pdma_chan(dchan
);
451 chan
->byte_align
= false;
454 chan
->dir
= DMA_MEM_TO_MEM
;
455 chan
->dcmd
= DCMD_INCTRGADDR
| DCMD_INCSRCADDR
;
456 chan
->dcmd
|= DCMD_BURST32
;
460 /* Allocate the link descriptor from DMA pool */
461 new = mmp_pdma_alloc_descriptor(chan
);
463 dev_err(chan
->dev
, "no memory for desc\n");
467 copy
= min_t(size_t, len
, PDMA_MAX_DESC_BYTES
);
468 if (dma_src
& 0x7 || dma_dst
& 0x7)
469 chan
->byte_align
= true;
471 new->desc
.dcmd
= chan
->dcmd
| (DCMD_LENGTH
& copy
);
472 new->desc
.dsadr
= dma_src
;
473 new->desc
.dtadr
= dma_dst
;
478 prev
->desc
.ddadr
= new->async_tx
.phys
;
480 new->async_tx
.cookie
= 0;
481 async_tx_ack(&new->async_tx
);
486 if (chan
->dir
== DMA_MEM_TO_DEV
) {
488 } else if (chan
->dir
== DMA_DEV_TO_MEM
) {
490 } else if (chan
->dir
== DMA_MEM_TO_MEM
) {
495 /* Insert the link descriptor to the LD ring */
496 list_add_tail(&new->node
, &first
->tx_list
);
499 first
->async_tx
.flags
= flags
; /* client is in control of this ack */
500 first
->async_tx
.cookie
= -EBUSY
;
502 /* last desc and fire IRQ */
503 new->desc
.ddadr
= DDADR_STOP
;
504 new->desc
.dcmd
|= DCMD_ENDIRQEN
;
506 chan
->cyclic_first
= NULL
;
508 return &first
->async_tx
;
512 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
516 static struct dma_async_tx_descriptor
*
517 mmp_pdma_prep_slave_sg(struct dma_chan
*dchan
, struct scatterlist
*sgl
,
518 unsigned int sg_len
, enum dma_transfer_direction dir
,
519 unsigned long flags
, void *context
)
521 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
522 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
524 struct scatterlist
*sg
;
528 if ((sgl
== NULL
) || (sg_len
== 0))
531 chan
->byte_align
= false;
533 for_each_sg(sgl
, sg
, sg_len
, i
) {
534 addr
= sg_dma_address(sg
);
535 avail
= sg_dma_len(sgl
);
538 len
= min_t(size_t, avail
, PDMA_MAX_DESC_BYTES
);
540 chan
->byte_align
= true;
542 /* allocate and populate the descriptor */
543 new = mmp_pdma_alloc_descriptor(chan
);
545 dev_err(chan
->dev
, "no memory for desc\n");
549 new->desc
.dcmd
= chan
->dcmd
| (DCMD_LENGTH
& len
);
550 if (dir
== DMA_MEM_TO_DEV
) {
551 new->desc
.dsadr
= addr
;
552 new->desc
.dtadr
= chan
->dev_addr
;
554 new->desc
.dsadr
= chan
->dev_addr
;
555 new->desc
.dtadr
= addr
;
561 prev
->desc
.ddadr
= new->async_tx
.phys
;
563 new->async_tx
.cookie
= 0;
564 async_tx_ack(&new->async_tx
);
567 /* Insert the link descriptor to the LD ring */
568 list_add_tail(&new->node
, &first
->tx_list
);
570 /* update metadata */
576 first
->async_tx
.cookie
= -EBUSY
;
577 first
->async_tx
.flags
= flags
;
579 /* last desc and fire IRQ */
580 new->desc
.ddadr
= DDADR_STOP
;
581 new->desc
.dcmd
|= DCMD_ENDIRQEN
;
584 chan
->cyclic_first
= NULL
;
586 return &first
->async_tx
;
590 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
594 static struct dma_async_tx_descriptor
*mmp_pdma_prep_dma_cyclic(
595 struct dma_chan
*dchan
, dma_addr_t buf_addr
, size_t len
,
596 size_t period_len
, enum dma_transfer_direction direction
,
597 unsigned long flags
, void *context
)
599 struct mmp_pdma_chan
*chan
;
600 struct mmp_pdma_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
601 dma_addr_t dma_src
, dma_dst
;
603 if (!dchan
|| !len
|| !period_len
)
606 /* the buffer length must be a multiple of period_len */
607 if (len
% period_len
!= 0)
610 if (period_len
> PDMA_MAX_DESC_BYTES
)
613 chan
= to_mmp_pdma_chan(dchan
);
618 dma_dst
= chan
->dev_addr
;
622 dma_src
= chan
->dev_addr
;
625 dev_err(chan
->dev
, "Unsupported direction for cyclic DMA\n");
629 chan
->dir
= direction
;
632 /* Allocate the link descriptor from DMA pool */
633 new = mmp_pdma_alloc_descriptor(chan
);
635 dev_err(chan
->dev
, "no memory for desc\n");
639 new->desc
.dcmd
= chan
->dcmd
| DCMD_ENDIRQEN
|
640 (DCMD_LENGTH
& period_len
);
641 new->desc
.dsadr
= dma_src
;
642 new->desc
.dtadr
= dma_dst
;
647 prev
->desc
.ddadr
= new->async_tx
.phys
;
649 new->async_tx
.cookie
= 0;
650 async_tx_ack(&new->async_tx
);
655 if (chan
->dir
== DMA_MEM_TO_DEV
)
656 dma_src
+= period_len
;
658 dma_dst
+= period_len
;
660 /* Insert the link descriptor to the LD ring */
661 list_add_tail(&new->node
, &first
->tx_list
);
664 first
->async_tx
.flags
= flags
; /* client is in control of this ack */
665 first
->async_tx
.cookie
= -EBUSY
;
667 /* make the cyclic link */
668 new->desc
.ddadr
= first
->async_tx
.phys
;
669 chan
->cyclic_first
= first
;
671 return &first
->async_tx
;
675 mmp_pdma_free_desc_list(chan
, &first
->tx_list
);
679 static int mmp_pdma_control(struct dma_chan
*dchan
, enum dma_ctrl_cmd cmd
,
682 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
683 struct dma_slave_config
*cfg
= (void *)arg
;
686 u32 maxburst
= 0, addr
= 0;
687 enum dma_slave_buswidth width
= DMA_SLAVE_BUSWIDTH_UNDEFINED
;
693 case DMA_TERMINATE_ALL
:
694 disable_chan(chan
->phy
);
695 mmp_pdma_free_phy(chan
);
696 spin_lock_irqsave(&chan
->desc_lock
, flags
);
697 mmp_pdma_free_desc_list(chan
, &chan
->chain_pending
);
698 mmp_pdma_free_desc_list(chan
, &chan
->chain_running
);
699 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
702 case DMA_SLAVE_CONFIG
:
703 if (cfg
->direction
== DMA_DEV_TO_MEM
) {
704 chan
->dcmd
= DCMD_INCTRGADDR
| DCMD_FLOWSRC
;
705 maxburst
= cfg
->src_maxburst
;
706 width
= cfg
->src_addr_width
;
707 addr
= cfg
->src_addr
;
708 } else if (cfg
->direction
== DMA_MEM_TO_DEV
) {
709 chan
->dcmd
= DCMD_INCSRCADDR
| DCMD_FLOWTRG
;
710 maxburst
= cfg
->dst_maxburst
;
711 width
= cfg
->dst_addr_width
;
712 addr
= cfg
->dst_addr
;
715 if (width
== DMA_SLAVE_BUSWIDTH_1_BYTE
)
716 chan
->dcmd
|= DCMD_WIDTH1
;
717 else if (width
== DMA_SLAVE_BUSWIDTH_2_BYTES
)
718 chan
->dcmd
|= DCMD_WIDTH2
;
719 else if (width
== DMA_SLAVE_BUSWIDTH_4_BYTES
)
720 chan
->dcmd
|= DCMD_WIDTH4
;
723 chan
->dcmd
|= DCMD_BURST8
;
724 else if (maxburst
== 16)
725 chan
->dcmd
|= DCMD_BURST16
;
726 else if (maxburst
== 32)
727 chan
->dcmd
|= DCMD_BURST32
;
729 chan
->dir
= cfg
->direction
;
730 chan
->dev_addr
= addr
;
731 /* FIXME: drivers should be ported over to use the filter
732 * function. Once that's done, the following two lines can
736 chan
->drcmr
= cfg
->slave_id
;
745 static enum dma_status
mmp_pdma_tx_status(struct dma_chan
*dchan
,
746 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
748 return dma_cookie_status(dchan
, cookie
, txstate
);
752 * mmp_pdma_issue_pending - Issue the DMA start command
753 * pending list ==> running list
755 static void mmp_pdma_issue_pending(struct dma_chan
*dchan
)
757 struct mmp_pdma_chan
*chan
= to_mmp_pdma_chan(dchan
);
760 spin_lock_irqsave(&chan
->desc_lock
, flags
);
761 start_pending_queue(chan
);
762 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
770 static void dma_do_tasklet(unsigned long data
)
772 struct mmp_pdma_chan
*chan
= (struct mmp_pdma_chan
*)data
;
773 struct mmp_pdma_desc_sw
*desc
, *_desc
;
774 LIST_HEAD(chain_cleanup
);
777 if (chan
->cyclic_first
) {
778 dma_async_tx_callback cb
= NULL
;
779 void *cb_data
= NULL
;
781 spin_lock_irqsave(&chan
->desc_lock
, flags
);
782 desc
= chan
->cyclic_first
;
783 cb
= desc
->async_tx
.callback
;
784 cb_data
= desc
->async_tx
.callback_param
;
785 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
793 /* submit pending list; callback for each desc; free desc */
794 spin_lock_irqsave(&chan
->desc_lock
, flags
);
796 list_for_each_entry_safe(desc
, _desc
, &chan
->chain_running
, node
) {
798 * move the descriptors to a temporary list so we can drop
799 * the lock during the entire cleanup operation
801 list_move(&desc
->node
, &chain_cleanup
);
804 * Look for the first list entry which has the ENDIRQEN flag
805 * set. That is the descriptor we got an interrupt for, so
806 * complete that transaction and its cookie.
808 if (desc
->desc
.dcmd
& DCMD_ENDIRQEN
) {
809 dma_cookie_t cookie
= desc
->async_tx
.cookie
;
810 dma_cookie_complete(&desc
->async_tx
);
811 dev_dbg(chan
->dev
, "completed_cookie=%d\n", cookie
);
817 * The hardware is idle and ready for more when the
818 * chain_running list is empty.
820 chan
->idle
= list_empty(&chan
->chain_running
);
822 /* Start any pending transactions automatically */
823 start_pending_queue(chan
);
824 spin_unlock_irqrestore(&chan
->desc_lock
, flags
);
826 /* Run the callback for each descriptor, in order */
827 list_for_each_entry_safe(desc
, _desc
, &chain_cleanup
, node
) {
828 struct dma_async_tx_descriptor
*txd
= &desc
->async_tx
;
830 /* Remove from the list of transactions */
831 list_del(&desc
->node
);
832 /* Run the link descriptor callback function */
834 txd
->callback(txd
->callback_param
);
836 dma_pool_free(chan
->desc_pool
, desc
, txd
->phys
);
840 static int mmp_pdma_remove(struct platform_device
*op
)
842 struct mmp_pdma_device
*pdev
= platform_get_drvdata(op
);
844 dma_async_device_unregister(&pdev
->device
);
848 static int mmp_pdma_chan_init(struct mmp_pdma_device
*pdev
,
851 struct mmp_pdma_phy
*phy
= &pdev
->phy
[idx
];
852 struct mmp_pdma_chan
*chan
;
855 chan
= devm_kzalloc(pdev
->dev
,
856 sizeof(struct mmp_pdma_chan
), GFP_KERNEL
);
861 phy
->base
= pdev
->base
;
864 ret
= devm_request_irq(pdev
->dev
, irq
,
865 mmp_pdma_chan_handler
, 0, "pdma", phy
);
867 dev_err(pdev
->dev
, "channel request irq fail!\n");
872 spin_lock_init(&chan
->desc_lock
);
873 chan
->dev
= pdev
->dev
;
874 chan
->chan
.device
= &pdev
->device
;
875 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
876 INIT_LIST_HEAD(&chan
->chain_pending
);
877 INIT_LIST_HEAD(&chan
->chain_running
);
879 /* register virt channel to dma engine */
880 list_add_tail(&chan
->chan
.device_node
,
881 &pdev
->device
.channels
);
886 static struct of_device_id mmp_pdma_dt_ids
[] = {
887 { .compatible
= "marvell,pdma-1.0", },
890 MODULE_DEVICE_TABLE(of
, mmp_pdma_dt_ids
);
892 static struct dma_chan
*mmp_pdma_dma_xlate(struct of_phandle_args
*dma_spec
,
893 struct of_dma
*ofdma
)
895 struct mmp_pdma_device
*d
= ofdma
->of_dma_data
;
896 struct dma_chan
*chan
, *candidate
;
901 /* walk the list of channels registered with the current instance and
902 * find one that is currently unused */
903 list_for_each_entry(chan
, &d
->device
.channels
, device_node
)
904 if (chan
->client_count
== 0) {
912 /* dma_get_slave_channel will return NULL if we lost a race between
913 * the lookup and the reservation */
914 chan
= dma_get_slave_channel(candidate
);
917 struct mmp_pdma_chan
*c
= to_mmp_pdma_chan(chan
);
918 c
->drcmr
= dma_spec
->args
[0];
925 static int mmp_pdma_probe(struct platform_device
*op
)
927 struct mmp_pdma_device
*pdev
;
928 const struct of_device_id
*of_id
;
929 struct mmp_dma_platdata
*pdata
= dev_get_platdata(&op
->dev
);
930 struct resource
*iores
;
932 int dma_channels
= 0, irq_num
= 0;
934 pdev
= devm_kzalloc(&op
->dev
, sizeof(*pdev
), GFP_KERNEL
);
937 pdev
->dev
= &op
->dev
;
939 spin_lock_init(&pdev
->phy_lock
);
941 iores
= platform_get_resource(op
, IORESOURCE_MEM
, 0);
942 pdev
->base
= devm_ioremap_resource(pdev
->dev
, iores
);
943 if (IS_ERR(pdev
->base
))
944 return PTR_ERR(pdev
->base
);
946 of_id
= of_match_device(mmp_pdma_dt_ids
, pdev
->dev
);
948 of_property_read_u32(pdev
->dev
->of_node
,
949 "#dma-channels", &dma_channels
);
950 else if (pdata
&& pdata
->dma_channels
)
951 dma_channels
= pdata
->dma_channels
;
953 dma_channels
= 32; /* default 32 channel */
954 pdev
->dma_channels
= dma_channels
;
956 for (i
= 0; i
< dma_channels
; i
++) {
957 if (platform_get_irq(op
, i
) > 0)
961 pdev
->phy
= devm_kzalloc(pdev
->dev
,
962 dma_channels
* sizeof(struct mmp_pdma_chan
), GFP_KERNEL
);
963 if (pdev
->phy
== NULL
)
966 INIT_LIST_HEAD(&pdev
->device
.channels
);
968 if (irq_num
!= dma_channels
) {
969 /* all chan share one irq, demux inside */
970 irq
= platform_get_irq(op
, 0);
971 ret
= devm_request_irq(pdev
->dev
, irq
,
972 mmp_pdma_int_handler
, 0, "pdma", pdev
);
977 for (i
= 0; i
< dma_channels
; i
++) {
978 irq
= (irq_num
!= dma_channels
) ? 0 : platform_get_irq(op
, i
);
979 ret
= mmp_pdma_chan_init(pdev
, i
, irq
);
984 dma_cap_set(DMA_SLAVE
, pdev
->device
.cap_mask
);
985 dma_cap_set(DMA_MEMCPY
, pdev
->device
.cap_mask
);
986 dma_cap_set(DMA_CYCLIC
, pdev
->device
.cap_mask
);
987 dma_cap_set(DMA_PRIVATE
, pdev
->device
.cap_mask
);
988 pdev
->device
.dev
= &op
->dev
;
989 pdev
->device
.device_alloc_chan_resources
= mmp_pdma_alloc_chan_resources
;
990 pdev
->device
.device_free_chan_resources
= mmp_pdma_free_chan_resources
;
991 pdev
->device
.device_tx_status
= mmp_pdma_tx_status
;
992 pdev
->device
.device_prep_dma_memcpy
= mmp_pdma_prep_memcpy
;
993 pdev
->device
.device_prep_slave_sg
= mmp_pdma_prep_slave_sg
;
994 pdev
->device
.device_prep_dma_cyclic
= mmp_pdma_prep_dma_cyclic
;
995 pdev
->device
.device_issue_pending
= mmp_pdma_issue_pending
;
996 pdev
->device
.device_control
= mmp_pdma_control
;
997 pdev
->device
.copy_align
= PDMA_ALIGNMENT
;
999 if (pdev
->dev
->coherent_dma_mask
)
1000 dma_set_mask(pdev
->dev
, pdev
->dev
->coherent_dma_mask
);
1002 dma_set_mask(pdev
->dev
, DMA_BIT_MASK(64));
1004 ret
= dma_async_device_register(&pdev
->device
);
1006 dev_err(pdev
->device
.dev
, "unable to register\n");
1010 if (op
->dev
.of_node
) {
1011 /* Device-tree DMA controller registration */
1012 ret
= of_dma_controller_register(op
->dev
.of_node
,
1013 mmp_pdma_dma_xlate
, pdev
);
1015 dev_err(&op
->dev
, "of_dma_controller_register failed\n");
1020 platform_set_drvdata(op
, pdev
);
1021 dev_info(pdev
->device
.dev
, "initialized %d channels\n", dma_channels
);
1025 static const struct platform_device_id mmp_pdma_id_table
[] = {
1030 static struct platform_driver mmp_pdma_driver
= {
1033 .owner
= THIS_MODULE
,
1034 .of_match_table
= mmp_pdma_dt_ids
,
1036 .id_table
= mmp_pdma_id_table
,
1037 .probe
= mmp_pdma_probe
,
1038 .remove
= mmp_pdma_remove
,
1041 bool mmp_pdma_filter_fn(struct dma_chan
*chan
, void *param
)
1043 struct mmp_pdma_chan
*c
= to_mmp_pdma_chan(chan
);
1045 if (chan
->device
->dev
->driver
!= &mmp_pdma_driver
.driver
)
1048 c
->drcmr
= *(unsigned int *) param
;
1052 EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn
);
1054 module_platform_driver(mmp_pdma_driver
);
1056 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
1057 MODULE_AUTHOR("Marvell International Ltd.");
1058 MODULE_LICENSE("GPL v2");