2 * drivers/dma/fsl-edma.c
4 * Copyright 2013-2014 Freescale Semiconductor, Inc.
6 * Driver for the Freescale eDMA engine with flexible channel multiplexing
7 * capability for DMA request sources. The eDMA block can be found on some
8 * Vybrid and Layerscape SoCs.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/interrupt.h>
19 #include <linux/clk.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
25 #include <linux/of_device.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_dma.h>
36 #define EDMA_SERQ 0x1B
37 #define EDMA_CERQ 0x1A
38 #define EDMA_SEEI 0x19
39 #define EDMA_CEEI 0x18
40 #define EDMA_CINT 0x1F
41 #define EDMA_CERR 0x1E
42 #define EDMA_SSRT 0x1D
43 #define EDMA_CDNE 0x1C
44 #define EDMA_INTR 0x24
47 #define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
48 #define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
49 #define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
50 #define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
51 #define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
52 #define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
53 #define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
54 #define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
55 #define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
56 #define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
57 #define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
58 #define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
59 #define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
61 #define EDMA_CR_EDBG BIT(1)
62 #define EDMA_CR_ERCA BIT(2)
63 #define EDMA_CR_ERGA BIT(3)
64 #define EDMA_CR_HOE BIT(4)
65 #define EDMA_CR_HALT BIT(5)
66 #define EDMA_CR_CLM BIT(6)
67 #define EDMA_CR_EMLM BIT(7)
68 #define EDMA_CR_ECX BIT(16)
69 #define EDMA_CR_CX BIT(17)
71 #define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
72 #define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
73 #define EDMA_CINT_CINT(x) ((x) & 0x1F)
74 #define EDMA_CERR_CERR(x) ((x) & 0x1F)
76 #define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
77 #define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
78 #define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
79 #define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
80 #define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
81 #define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
82 #define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
83 #define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
84 #define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
85 #define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
86 #define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
87 #define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
88 #define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
89 #define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
91 #define EDMA_TCD_SOFF_SOFF(x) (x)
92 #define EDMA_TCD_NBYTES_NBYTES(x) (x)
93 #define EDMA_TCD_SLAST_SLAST(x) (x)
94 #define EDMA_TCD_DADDR_DADDR(x) (x)
95 #define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
96 #define EDMA_TCD_DOFF_DOFF(x) (x)
97 #define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
98 #define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
100 #define EDMA_TCD_CSR_START BIT(0)
101 #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
102 #define EDMA_TCD_CSR_INT_HALF BIT(2)
103 #define EDMA_TCD_CSR_D_REQ BIT(3)
104 #define EDMA_TCD_CSR_E_SG BIT(4)
105 #define EDMA_TCD_CSR_E_LINK BIT(5)
106 #define EDMA_TCD_CSR_ACTIVE BIT(6)
107 #define EDMA_TCD_CSR_DONE BIT(7)
109 #define EDMAMUX_CHCFG_DIS 0x0
110 #define EDMAMUX_CHCFG_ENBL 0x80
111 #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
115 #define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
120 struct fsl_edma_hw_tcd
{
134 struct fsl_edma_sw_tcd
{
136 struct fsl_edma_hw_tcd
*vtcd
;
139 struct fsl_edma_slave_config
{
140 enum dma_transfer_direction dir
;
141 enum dma_slave_buswidth addr_width
;
147 struct fsl_edma_chan
{
148 struct virt_dma_chan vchan
;
149 enum dma_status status
;
150 struct fsl_edma_engine
*edma
;
151 struct fsl_edma_desc
*edesc
;
152 struct fsl_edma_slave_config fsc
;
153 struct dma_pool
*tcd_pool
;
156 struct fsl_edma_desc
{
157 struct virt_dma_desc vdesc
;
158 struct fsl_edma_chan
*echan
;
161 struct fsl_edma_sw_tcd tcd
[];
164 struct fsl_edma_engine
{
165 struct dma_device dma_dev
;
166 void __iomem
*membase
;
167 void __iomem
*muxbase
[DMAMUX_NR
];
168 struct clk
*muxclk
[DMAMUX_NR
];
169 struct mutex fsl_edma_mutex
;
174 struct fsl_edma_chan chans
[];
178 * R/W functions for big- or little-endian registers
179 * the eDMA controller's endian is independent of the CPU core's endian.
182 static u16
edma_readw(struct fsl_edma_engine
*edma
, void __iomem
*addr
)
184 if (edma
->big_endian
)
185 return ioread16be(addr
);
187 return ioread16(addr
);
190 static u32
edma_readl(struct fsl_edma_engine
*edma
, void __iomem
*addr
)
192 if (edma
->big_endian
)
193 return ioread32be(addr
);
195 return ioread32(addr
);
198 static void edma_writeb(struct fsl_edma_engine
*edma
, u8 val
, void __iomem
*addr
)
203 static void edma_writew(struct fsl_edma_engine
*edma
, u16 val
, void __iomem
*addr
)
205 if (edma
->big_endian
)
206 iowrite16be(val
, addr
);
208 iowrite16(val
, addr
);
211 static void edma_writel(struct fsl_edma_engine
*edma
, u32 val
, void __iomem
*addr
)
213 if (edma
->big_endian
)
214 iowrite32be(val
, addr
);
216 iowrite32(val
, addr
);
219 static struct fsl_edma_chan
*to_fsl_edma_chan(struct dma_chan
*chan
)
221 return container_of(chan
, struct fsl_edma_chan
, vchan
.chan
);
224 static struct fsl_edma_desc
*to_fsl_edma_desc(struct virt_dma_desc
*vd
)
226 return container_of(vd
, struct fsl_edma_desc
, vdesc
);
229 static void fsl_edma_enable_request(struct fsl_edma_chan
*fsl_chan
)
231 void __iomem
*addr
= fsl_chan
->edma
->membase
;
232 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
234 edma_writeb(fsl_chan
->edma
, EDMA_SEEI_SEEI(ch
), addr
+ EDMA_SEEI
);
235 edma_writeb(fsl_chan
->edma
, ch
, addr
+ EDMA_SERQ
);
238 static void fsl_edma_disable_request(struct fsl_edma_chan
*fsl_chan
)
240 void __iomem
*addr
= fsl_chan
->edma
->membase
;
241 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
243 edma_writeb(fsl_chan
->edma
, ch
, addr
+ EDMA_CERQ
);
244 edma_writeb(fsl_chan
->edma
, EDMA_CEEI_CEEI(ch
), addr
+ EDMA_CEEI
);
247 static void fsl_edma_chan_mux(struct fsl_edma_chan
*fsl_chan
,
248 unsigned int slot
, bool enable
)
250 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
251 void __iomem
*muxaddr
= fsl_chan
->edma
->muxbase
[ch
/ DMAMUX_NR
];
252 unsigned chans_per_mux
, ch_off
;
254 chans_per_mux
= fsl_chan
->edma
->n_chans
/ DMAMUX_NR
;
255 ch_off
= fsl_chan
->vchan
.chan
.chan_id
% chans_per_mux
;
258 edma_writeb(fsl_chan
->edma
,
259 EDMAMUX_CHCFG_ENBL
| EDMAMUX_CHCFG_SOURCE(slot
),
262 edma_writeb(fsl_chan
->edma
, EDMAMUX_CHCFG_DIS
, muxaddr
+ ch_off
);
265 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width
)
267 switch (addr_width
) {
269 return EDMA_TCD_ATTR_SSIZE_8BIT
| EDMA_TCD_ATTR_DSIZE_8BIT
;
271 return EDMA_TCD_ATTR_SSIZE_16BIT
| EDMA_TCD_ATTR_DSIZE_16BIT
;
273 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
275 return EDMA_TCD_ATTR_SSIZE_64BIT
| EDMA_TCD_ATTR_DSIZE_64BIT
;
277 return EDMA_TCD_ATTR_SSIZE_32BIT
| EDMA_TCD_ATTR_DSIZE_32BIT
;
281 static void fsl_edma_free_desc(struct virt_dma_desc
*vdesc
)
283 struct fsl_edma_desc
*fsl_desc
;
286 fsl_desc
= to_fsl_edma_desc(vdesc
);
287 for (i
= 0; i
< fsl_desc
->n_tcds
; i
++)
288 dma_pool_free(fsl_desc
->echan
->tcd_pool
,
289 fsl_desc
->tcd
[i
].vtcd
,
290 fsl_desc
->tcd
[i
].ptcd
);
294 static int fsl_edma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
297 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
298 struct dma_slave_config
*cfg
= (void *)arg
;
303 case DMA_TERMINATE_ALL
:
304 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
305 fsl_edma_disable_request(fsl_chan
);
306 fsl_chan
->edesc
= NULL
;
307 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
308 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
309 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
312 case DMA_SLAVE_CONFIG
:
313 fsl_chan
->fsc
.dir
= cfg
->direction
;
314 if (cfg
->direction
== DMA_DEV_TO_MEM
) {
315 fsl_chan
->fsc
.dev_addr
= cfg
->src_addr
;
316 fsl_chan
->fsc
.addr_width
= cfg
->src_addr_width
;
317 fsl_chan
->fsc
.burst
= cfg
->src_maxburst
;
318 fsl_chan
->fsc
.attr
= fsl_edma_get_tcd_attr(cfg
->src_addr_width
);
319 } else if (cfg
->direction
== DMA_MEM_TO_DEV
) {
320 fsl_chan
->fsc
.dev_addr
= cfg
->dst_addr
;
321 fsl_chan
->fsc
.addr_width
= cfg
->dst_addr_width
;
322 fsl_chan
->fsc
.burst
= cfg
->dst_maxburst
;
323 fsl_chan
->fsc
.attr
= fsl_edma_get_tcd_attr(cfg
->dst_addr_width
);
330 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
331 if (fsl_chan
->edesc
) {
332 fsl_edma_disable_request(fsl_chan
);
333 fsl_chan
->status
= DMA_PAUSED
;
335 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
339 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
340 if (fsl_chan
->edesc
) {
341 fsl_edma_enable_request(fsl_chan
);
342 fsl_chan
->status
= DMA_IN_PROGRESS
;
344 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
352 static size_t fsl_edma_desc_residue(struct fsl_edma_chan
*fsl_chan
,
353 struct virt_dma_desc
*vdesc
, bool in_progress
)
355 struct fsl_edma_desc
*edesc
= fsl_chan
->edesc
;
356 void __iomem
*addr
= fsl_chan
->edma
->membase
;
357 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
358 enum dma_transfer_direction dir
= fsl_chan
->fsc
.dir
;
359 dma_addr_t cur_addr
, dma_addr
;
363 /* calculate the total size in this desc */
364 for (len
= i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++)
365 len
+= edma_readl(fsl_chan
->edma
, &(edesc
->tcd
[i
].vtcd
->nbytes
))
366 * edma_readw(fsl_chan
->edma
, &(edesc
->tcd
[i
].vtcd
->biter
));
371 if (dir
== DMA_MEM_TO_DEV
)
372 cur_addr
= edma_readl(fsl_chan
->edma
, addr
+ EDMA_TCD_SADDR(ch
));
374 cur_addr
= edma_readl(fsl_chan
->edma
, addr
+ EDMA_TCD_DADDR(ch
));
376 /* figure out the finished and calculate the residue */
377 for (i
= 0; i
< fsl_chan
->edesc
->n_tcds
; i
++) {
378 size
= edma_readl(fsl_chan
->edma
, &(edesc
->tcd
[i
].vtcd
->nbytes
))
379 * edma_readw(fsl_chan
->edma
, &(edesc
->tcd
[i
].vtcd
->biter
));
380 if (dir
== DMA_MEM_TO_DEV
)
381 dma_addr
= edma_readl(fsl_chan
->edma
,
382 &(edesc
->tcd
[i
].vtcd
->saddr
));
384 dma_addr
= edma_readl(fsl_chan
->edma
,
385 &(edesc
->tcd
[i
].vtcd
->daddr
));
388 if (cur_addr
> dma_addr
&& cur_addr
< dma_addr
+ size
) {
389 len
+= dma_addr
+ size
- cur_addr
;
397 static enum dma_status
fsl_edma_tx_status(struct dma_chan
*chan
,
398 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
400 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
401 struct virt_dma_desc
*vdesc
;
402 enum dma_status status
;
405 status
= dma_cookie_status(chan
, cookie
, txstate
);
406 if (status
== DMA_COMPLETE
)
410 return fsl_chan
->status
;
412 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
413 vdesc
= vchan_find_desc(&fsl_chan
->vchan
, cookie
);
414 if (fsl_chan
->edesc
&& cookie
== fsl_chan
->edesc
->vdesc
.tx
.cookie
)
415 txstate
->residue
= fsl_edma_desc_residue(fsl_chan
, vdesc
, true);
417 txstate
->residue
= fsl_edma_desc_residue(fsl_chan
, vdesc
, false);
419 txstate
->residue
= 0;
421 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
423 return fsl_chan
->status
;
426 static void fsl_edma_set_tcd_params(struct fsl_edma_chan
*fsl_chan
,
427 u32 src
, u32 dst
, u16 attr
, u16 soff
, u32 nbytes
,
428 u32 slast
, u16 citer
, u16 biter
, u32 doff
, u32 dlast_sga
,
431 void __iomem
*addr
= fsl_chan
->edma
->membase
;
432 u32 ch
= fsl_chan
->vchan
.chan
.chan_id
;
435 * TCD parameters have been swapped in fill_tcd_params(),
436 * so just write them to registers in the cpu endian here
438 writew(0, addr
+ EDMA_TCD_CSR(ch
));
439 writel(src
, addr
+ EDMA_TCD_SADDR(ch
));
440 writel(dst
, addr
+ EDMA_TCD_DADDR(ch
));
441 writew(attr
, addr
+ EDMA_TCD_ATTR(ch
));
442 writew(soff
, addr
+ EDMA_TCD_SOFF(ch
));
443 writel(nbytes
, addr
+ EDMA_TCD_NBYTES(ch
));
444 writel(slast
, addr
+ EDMA_TCD_SLAST(ch
));
445 writew(citer
, addr
+ EDMA_TCD_CITER(ch
));
446 writew(biter
, addr
+ EDMA_TCD_BITER(ch
));
447 writew(doff
, addr
+ EDMA_TCD_DOFF(ch
));
448 writel(dlast_sga
, addr
+ EDMA_TCD_DLAST_SGA(ch
));
449 writew(csr
, addr
+ EDMA_TCD_CSR(ch
));
452 static void fill_tcd_params(struct fsl_edma_engine
*edma
,
453 struct fsl_edma_hw_tcd
*tcd
, u32 src
, u32 dst
,
454 u16 attr
, u16 soff
, u32 nbytes
, u32 slast
, u16 citer
,
455 u16 biter
, u16 doff
, u32 dlast_sga
, bool major_int
,
456 bool disable_req
, bool enable_sg
)
461 * eDMA hardware SGs require the TCD parameters stored in memory
462 * the same endian as the eDMA module so that they can be loaded
463 * automatically by the engine
465 edma_writel(edma
, src
, &(tcd
->saddr
));
466 edma_writel(edma
, dst
, &(tcd
->daddr
));
467 edma_writew(edma
, attr
, &(tcd
->attr
));
468 edma_writew(edma
, EDMA_TCD_SOFF_SOFF(soff
), &(tcd
->soff
));
469 edma_writel(edma
, EDMA_TCD_NBYTES_NBYTES(nbytes
), &(tcd
->nbytes
));
470 edma_writel(edma
, EDMA_TCD_SLAST_SLAST(slast
), &(tcd
->slast
));
471 edma_writew(edma
, EDMA_TCD_CITER_CITER(citer
), &(tcd
->citer
));
472 edma_writew(edma
, EDMA_TCD_DOFF_DOFF(doff
), &(tcd
->doff
));
473 edma_writel(edma
, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga
), &(tcd
->dlast_sga
));
474 edma_writew(edma
, EDMA_TCD_BITER_BITER(biter
), &(tcd
->biter
));
476 csr
|= EDMA_TCD_CSR_INT_MAJOR
;
479 csr
|= EDMA_TCD_CSR_D_REQ
;
482 csr
|= EDMA_TCD_CSR_E_SG
;
484 edma_writew(edma
, csr
, &(tcd
->csr
));
487 static struct fsl_edma_desc
*fsl_edma_alloc_desc(struct fsl_edma_chan
*fsl_chan
,
490 struct fsl_edma_desc
*fsl_desc
;
493 fsl_desc
= kzalloc(sizeof(*fsl_desc
) + sizeof(struct fsl_edma_sw_tcd
) * sg_len
,
498 fsl_desc
->echan
= fsl_chan
;
499 fsl_desc
->n_tcds
= sg_len
;
500 for (i
= 0; i
< sg_len
; i
++) {
501 fsl_desc
->tcd
[i
].vtcd
= dma_pool_alloc(fsl_chan
->tcd_pool
,
502 GFP_NOWAIT
, &fsl_desc
->tcd
[i
].ptcd
);
503 if (!fsl_desc
->tcd
[i
].vtcd
)
510 dma_pool_free(fsl_chan
->tcd_pool
, fsl_desc
->tcd
[i
].vtcd
,
511 fsl_desc
->tcd
[i
].ptcd
);
516 static struct dma_async_tx_descriptor
*fsl_edma_prep_dma_cyclic(
517 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t buf_len
,
518 size_t period_len
, enum dma_transfer_direction direction
,
519 unsigned long flags
, void *context
)
521 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
522 struct fsl_edma_desc
*fsl_desc
;
523 dma_addr_t dma_buf_next
;
525 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
526 u16 soff
, doff
, iter
;
528 if (!is_slave_direction(fsl_chan
->fsc
.dir
))
531 sg_len
= buf_len
/ period_len
;
532 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
535 fsl_desc
->iscyclic
= true;
537 dma_buf_next
= dma_addr
;
538 nbytes
= fsl_chan
->fsc
.addr_width
* fsl_chan
->fsc
.burst
;
539 iter
= period_len
/ nbytes
;
541 for (i
= 0; i
< sg_len
; i
++) {
542 if (dma_buf_next
>= dma_addr
+ buf_len
)
543 dma_buf_next
= dma_addr
;
545 /* get next sg's physical address */
546 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
548 if (fsl_chan
->fsc
.dir
== DMA_MEM_TO_DEV
) {
549 src_addr
= dma_buf_next
;
550 dst_addr
= fsl_chan
->fsc
.dev_addr
;
551 soff
= fsl_chan
->fsc
.addr_width
;
554 src_addr
= fsl_chan
->fsc
.dev_addr
;
555 dst_addr
= dma_buf_next
;
557 doff
= fsl_chan
->fsc
.addr_width
;
560 fill_tcd_params(fsl_chan
->edma
, fsl_desc
->tcd
[i
].vtcd
, src_addr
,
561 dst_addr
, fsl_chan
->fsc
.attr
, soff
, nbytes
, 0,
562 iter
, iter
, doff
, last_sg
, true, false, true);
563 dma_buf_next
+= period_len
;
566 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
569 static struct dma_async_tx_descriptor
*fsl_edma_prep_slave_sg(
570 struct dma_chan
*chan
, struct scatterlist
*sgl
,
571 unsigned int sg_len
, enum dma_transfer_direction direction
,
572 unsigned long flags
, void *context
)
574 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
575 struct fsl_edma_desc
*fsl_desc
;
576 struct scatterlist
*sg
;
577 u32 src_addr
, dst_addr
, last_sg
, nbytes
;
578 u16 soff
, doff
, iter
;
581 if (!is_slave_direction(fsl_chan
->fsc
.dir
))
584 fsl_desc
= fsl_edma_alloc_desc(fsl_chan
, sg_len
);
587 fsl_desc
->iscyclic
= false;
589 nbytes
= fsl_chan
->fsc
.addr_width
* fsl_chan
->fsc
.burst
;
590 for_each_sg(sgl
, sg
, sg_len
, i
) {
591 /* get next sg's physical address */
592 last_sg
= fsl_desc
->tcd
[(i
+ 1) % sg_len
].ptcd
;
594 if (fsl_chan
->fsc
.dir
== DMA_MEM_TO_DEV
) {
595 src_addr
= sg_dma_address(sg
);
596 dst_addr
= fsl_chan
->fsc
.dev_addr
;
597 soff
= fsl_chan
->fsc
.addr_width
;
600 src_addr
= fsl_chan
->fsc
.dev_addr
;
601 dst_addr
= sg_dma_address(sg
);
603 doff
= fsl_chan
->fsc
.addr_width
;
606 iter
= sg_dma_len(sg
) / nbytes
;
607 if (i
< sg_len
- 1) {
608 last_sg
= fsl_desc
->tcd
[(i
+ 1)].ptcd
;
609 fill_tcd_params(fsl_chan
->edma
, fsl_desc
->tcd
[i
].vtcd
,
610 src_addr
, dst_addr
, fsl_chan
->fsc
.attr
,
611 soff
, nbytes
, 0, iter
, iter
, doff
, last_sg
,
615 fill_tcd_params(fsl_chan
->edma
, fsl_desc
->tcd
[i
].vtcd
,
616 src_addr
, dst_addr
, fsl_chan
->fsc
.attr
,
617 soff
, nbytes
, 0, iter
, iter
, doff
, last_sg
,
622 return vchan_tx_prep(&fsl_chan
->vchan
, &fsl_desc
->vdesc
, flags
);
625 static void fsl_edma_xfer_desc(struct fsl_edma_chan
*fsl_chan
)
627 struct fsl_edma_hw_tcd
*tcd
;
628 struct virt_dma_desc
*vdesc
;
630 vdesc
= vchan_next_desc(&fsl_chan
->vchan
);
633 fsl_chan
->edesc
= to_fsl_edma_desc(vdesc
);
634 tcd
= fsl_chan
->edesc
->tcd
[0].vtcd
;
635 fsl_edma_set_tcd_params(fsl_chan
, tcd
->saddr
, tcd
->daddr
, tcd
->attr
,
636 tcd
->soff
, tcd
->nbytes
, tcd
->slast
, tcd
->citer
,
637 tcd
->biter
, tcd
->doff
, tcd
->dlast_sga
, tcd
->csr
);
638 fsl_edma_enable_request(fsl_chan
);
639 fsl_chan
->status
= DMA_IN_PROGRESS
;
642 static irqreturn_t
fsl_edma_tx_handler(int irq
, void *dev_id
)
644 struct fsl_edma_engine
*fsl_edma
= dev_id
;
645 unsigned int intr
, ch
;
646 void __iomem
*base_addr
;
647 struct fsl_edma_chan
*fsl_chan
;
649 base_addr
= fsl_edma
->membase
;
651 intr
= edma_readl(fsl_edma
, base_addr
+ EDMA_INTR
);
655 for (ch
= 0; ch
< fsl_edma
->n_chans
; ch
++) {
656 if (intr
& (0x1 << ch
)) {
657 edma_writeb(fsl_edma
, EDMA_CINT_CINT(ch
),
658 base_addr
+ EDMA_CINT
);
660 fsl_chan
= &fsl_edma
->chans
[ch
];
662 spin_lock(&fsl_chan
->vchan
.lock
);
663 if (!fsl_chan
->edesc
->iscyclic
) {
664 list_del(&fsl_chan
->edesc
->vdesc
.node
);
665 vchan_cookie_complete(&fsl_chan
->edesc
->vdesc
);
666 fsl_chan
->edesc
= NULL
;
667 fsl_chan
->status
= DMA_COMPLETE
;
669 vchan_cyclic_callback(&fsl_chan
->edesc
->vdesc
);
672 if (!fsl_chan
->edesc
)
673 fsl_edma_xfer_desc(fsl_chan
);
675 spin_unlock(&fsl_chan
->vchan
.lock
);
681 static irqreturn_t
fsl_edma_err_handler(int irq
, void *dev_id
)
683 struct fsl_edma_engine
*fsl_edma
= dev_id
;
684 unsigned int err
, ch
;
686 err
= edma_readl(fsl_edma
, fsl_edma
->membase
+ EDMA_ERR
);
690 for (ch
= 0; ch
< fsl_edma
->n_chans
; ch
++) {
691 if (err
& (0x1 << ch
)) {
692 fsl_edma_disable_request(&fsl_edma
->chans
[ch
]);
693 edma_writeb(fsl_edma
, EDMA_CERR_CERR(ch
),
694 fsl_edma
->membase
+ EDMA_CERR
);
695 fsl_edma
->chans
[ch
].status
= DMA_ERROR
;
701 static irqreturn_t
fsl_edma_irq_handler(int irq
, void *dev_id
)
703 if (fsl_edma_tx_handler(irq
, dev_id
) == IRQ_HANDLED
)
706 return fsl_edma_err_handler(irq
, dev_id
);
709 static void fsl_edma_issue_pending(struct dma_chan
*chan
)
711 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
714 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
716 if (vchan_issue_pending(&fsl_chan
->vchan
) && !fsl_chan
->edesc
)
717 fsl_edma_xfer_desc(fsl_chan
);
719 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
722 static struct dma_chan
*fsl_edma_xlate(struct of_phandle_args
*dma_spec
,
723 struct of_dma
*ofdma
)
725 struct fsl_edma_engine
*fsl_edma
= ofdma
->of_dma_data
;
726 struct dma_chan
*chan
, *_chan
;
728 if (dma_spec
->args_count
!= 2)
731 mutex_lock(&fsl_edma
->fsl_edma_mutex
);
732 list_for_each_entry_safe(chan
, _chan
, &fsl_edma
->dma_dev
.channels
, device_node
) {
733 if (chan
->client_count
)
735 if ((chan
->chan_id
/ DMAMUX_NR
) == dma_spec
->args
[0]) {
736 chan
= dma_get_slave_channel(chan
);
738 chan
->device
->privatecnt
++;
739 fsl_edma_chan_mux(to_fsl_edma_chan(chan
),
740 dma_spec
->args
[1], true);
741 mutex_unlock(&fsl_edma
->fsl_edma_mutex
);
746 mutex_unlock(&fsl_edma
->fsl_edma_mutex
);
750 static int fsl_edma_alloc_chan_resources(struct dma_chan
*chan
)
752 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
754 fsl_chan
->tcd_pool
= dma_pool_create("tcd_pool", chan
->device
->dev
,
755 sizeof(struct fsl_edma_hw_tcd
),
760 static void fsl_edma_free_chan_resources(struct dma_chan
*chan
)
762 struct fsl_edma_chan
*fsl_chan
= to_fsl_edma_chan(chan
);
766 spin_lock_irqsave(&fsl_chan
->vchan
.lock
, flags
);
767 fsl_edma_disable_request(fsl_chan
);
768 fsl_edma_chan_mux(fsl_chan
, 0, false);
769 fsl_chan
->edesc
= NULL
;
770 vchan_get_all_descriptors(&fsl_chan
->vchan
, &head
);
771 spin_unlock_irqrestore(&fsl_chan
->vchan
.lock
, flags
);
773 vchan_dma_desc_free_list(&fsl_chan
->vchan
, &head
);
774 dma_pool_destroy(fsl_chan
->tcd_pool
);
775 fsl_chan
->tcd_pool
= NULL
;
778 static int fsl_dma_device_slave_caps(struct dma_chan
*dchan
,
779 struct dma_slave_caps
*caps
)
781 caps
->src_addr_widths
= FSL_EDMA_BUSWIDTHS
;
782 caps
->dstn_addr_widths
= FSL_EDMA_BUSWIDTHS
;
783 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
784 caps
->cmd_pause
= true;
785 caps
->cmd_terminate
= true;
791 fsl_edma_irq_init(struct platform_device
*pdev
, struct fsl_edma_engine
*fsl_edma
)
795 fsl_edma
->txirq
= platform_get_irq_byname(pdev
, "edma-tx");
796 if (fsl_edma
->txirq
< 0) {
797 dev_err(&pdev
->dev
, "Can't get edma-tx irq.\n");
798 return fsl_edma
->txirq
;
801 fsl_edma
->errirq
= platform_get_irq_byname(pdev
, "edma-err");
802 if (fsl_edma
->errirq
< 0) {
803 dev_err(&pdev
->dev
, "Can't get edma-err irq.\n");
804 return fsl_edma
->errirq
;
807 if (fsl_edma
->txirq
== fsl_edma
->errirq
) {
808 ret
= devm_request_irq(&pdev
->dev
, fsl_edma
->txirq
,
809 fsl_edma_irq_handler
, 0, "eDMA", fsl_edma
);
811 dev_err(&pdev
->dev
, "Can't register eDMA IRQ.\n");
815 ret
= devm_request_irq(&pdev
->dev
, fsl_edma
->txirq
,
816 fsl_edma_tx_handler
, 0, "eDMA tx", fsl_edma
);
818 dev_err(&pdev
->dev
, "Can't register eDMA tx IRQ.\n");
822 ret
= devm_request_irq(&pdev
->dev
, fsl_edma
->errirq
,
823 fsl_edma_err_handler
, 0, "eDMA err", fsl_edma
);
825 dev_err(&pdev
->dev
, "Can't register eDMA err IRQ.\n");
833 static int fsl_edma_probe(struct platform_device
*pdev
)
835 struct device_node
*np
= pdev
->dev
.of_node
;
836 struct fsl_edma_engine
*fsl_edma
;
837 struct fsl_edma_chan
*fsl_chan
;
838 struct resource
*res
;
842 ret
= of_property_read_u32(np
, "dma-channels", &chans
);
844 dev_err(&pdev
->dev
, "Can't get dma-channels.\n");
848 len
= sizeof(*fsl_edma
) + sizeof(*fsl_chan
) * chans
;
849 fsl_edma
= devm_kzalloc(&pdev
->dev
, len
, GFP_KERNEL
);
853 fsl_edma
->n_chans
= chans
;
854 mutex_init(&fsl_edma
->fsl_edma_mutex
);
856 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
857 fsl_edma
->membase
= devm_ioremap_resource(&pdev
->dev
, res
);
858 if (IS_ERR(fsl_edma
->membase
))
859 return PTR_ERR(fsl_edma
->membase
);
861 for (i
= 0; i
< DMAMUX_NR
; i
++) {
864 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1 + i
);
865 fsl_edma
->muxbase
[i
] = devm_ioremap_resource(&pdev
->dev
, res
);
866 if (IS_ERR(fsl_edma
->muxbase
[i
]))
867 return PTR_ERR(fsl_edma
->muxbase
[i
]);
869 sprintf(clkname
, "dmamux%d", i
);
870 fsl_edma
->muxclk
[i
] = devm_clk_get(&pdev
->dev
, clkname
);
871 if (IS_ERR(fsl_edma
->muxclk
[i
])) {
872 dev_err(&pdev
->dev
, "Missing DMAMUX block clock.\n");
873 return PTR_ERR(fsl_edma
->muxclk
[i
]);
876 ret
= clk_prepare_enable(fsl_edma
->muxclk
[i
]);
878 dev_err(&pdev
->dev
, "DMAMUX clk block failed.\n");
884 ret
= fsl_edma_irq_init(pdev
, fsl_edma
);
888 fsl_edma
->big_endian
= of_property_read_bool(np
, "big-endian");
890 INIT_LIST_HEAD(&fsl_edma
->dma_dev
.channels
);
891 for (i
= 0; i
< fsl_edma
->n_chans
; i
++) {
892 struct fsl_edma_chan
*fsl_chan
= &fsl_edma
->chans
[i
];
894 fsl_chan
->edma
= fsl_edma
;
896 fsl_chan
->vchan
.desc_free
= fsl_edma_free_desc
;
897 vchan_init(&fsl_chan
->vchan
, &fsl_edma
->dma_dev
);
899 edma_writew(fsl_edma
, 0x0, fsl_edma
->membase
+ EDMA_TCD_CSR(i
));
900 fsl_edma_chan_mux(fsl_chan
, 0, false);
903 dma_cap_set(DMA_PRIVATE
, fsl_edma
->dma_dev
.cap_mask
);
904 dma_cap_set(DMA_SLAVE
, fsl_edma
->dma_dev
.cap_mask
);
905 dma_cap_set(DMA_CYCLIC
, fsl_edma
->dma_dev
.cap_mask
);
907 fsl_edma
->dma_dev
.dev
= &pdev
->dev
;
908 fsl_edma
->dma_dev
.device_alloc_chan_resources
909 = fsl_edma_alloc_chan_resources
;
910 fsl_edma
->dma_dev
.device_free_chan_resources
911 = fsl_edma_free_chan_resources
;
912 fsl_edma
->dma_dev
.device_tx_status
= fsl_edma_tx_status
;
913 fsl_edma
->dma_dev
.device_prep_slave_sg
= fsl_edma_prep_slave_sg
;
914 fsl_edma
->dma_dev
.device_prep_dma_cyclic
= fsl_edma_prep_dma_cyclic
;
915 fsl_edma
->dma_dev
.device_control
= fsl_edma_control
;
916 fsl_edma
->dma_dev
.device_issue_pending
= fsl_edma_issue_pending
;
917 fsl_edma
->dma_dev
.device_slave_caps
= fsl_dma_device_slave_caps
;
919 platform_set_drvdata(pdev
, fsl_edma
);
921 ret
= dma_async_device_register(&fsl_edma
->dma_dev
);
923 dev_err(&pdev
->dev
, "Can't register Freescale eDMA engine.\n");
927 ret
= of_dma_controller_register(np
, fsl_edma_xlate
, fsl_edma
);
929 dev_err(&pdev
->dev
, "Can't register Freescale eDMA of_dma.\n");
930 dma_async_device_unregister(&fsl_edma
->dma_dev
);
934 /* enable round robin arbitration */
935 edma_writel(fsl_edma
, EDMA_CR_ERGA
| EDMA_CR_ERCA
, fsl_edma
->membase
+ EDMA_CR
);
940 static int fsl_edma_remove(struct platform_device
*pdev
)
942 struct device_node
*np
= pdev
->dev
.of_node
;
943 struct fsl_edma_engine
*fsl_edma
= platform_get_drvdata(pdev
);
946 of_dma_controller_free(np
);
947 dma_async_device_unregister(&fsl_edma
->dma_dev
);
949 for (i
= 0; i
< DMAMUX_NR
; i
++)
950 clk_disable_unprepare(fsl_edma
->muxclk
[i
]);
955 static const struct of_device_id fsl_edma_dt_ids
[] = {
956 { .compatible
= "fsl,vf610-edma", },
959 MODULE_DEVICE_TABLE(of
, fsl_edma_dt_ids
);
961 static struct platform_driver fsl_edma_driver
= {
964 .owner
= THIS_MODULE
,
965 .of_match_table
= fsl_edma_dt_ids
,
967 .probe
= fsl_edma_probe
,
968 .remove
= fsl_edma_remove
,
971 static int __init
fsl_edma_init(void)
973 return platform_driver_register(&fsl_edma_driver
);
975 subsys_initcall(fsl_edma_init
);
977 static void __exit
fsl_edma_exit(void)
979 platform_driver_unregister(&fsl_edma_driver
);
981 module_exit(fsl_edma_exit
);
983 MODULE_ALIAS("platform:fsl-edma");
984 MODULE_DESCRIPTION("Freescale eDMA engine driver");
985 MODULE_LICENSE("GPL v2");