2 * Driver for the Cirrus Logic EP93xx DMA Controller
4 * Copyright (C) 2011 Mika Westerberg
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
13 * This driver is based on dw_dmac and amba-pl08x drivers.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
31 #define M2P_CONTROL 0x0000
32 #define M2P_CONTROL_STALLINT BIT(0)
33 #define M2P_CONTROL_NFBINT BIT(1)
34 #define M2P_CONTROL_CH_ERROR_INT BIT(3)
35 #define M2P_CONTROL_ENABLE BIT(4)
36 #define M2P_CONTROL_ICE BIT(6)
38 #define M2P_INTERRUPT 0x0004
39 #define M2P_INTERRUPT_STALL BIT(0)
40 #define M2P_INTERRUPT_NFB BIT(1)
41 #define M2P_INTERRUPT_ERROR BIT(3)
43 #define M2P_PPALLOC 0x0008
44 #define M2P_STATUS 0x000c
46 #define M2P_MAXCNT0 0x0020
47 #define M2P_BASE0 0x0024
48 #define M2P_MAXCNT1 0x0030
49 #define M2P_BASE1 0x0034
51 #define M2P_STATE_IDLE 0
52 #define M2P_STATE_STALL 1
53 #define M2P_STATE_ON 2
54 #define M2P_STATE_NEXT 3
57 #define M2M_CONTROL 0x0000
58 #define M2M_CONTROL_DONEINT BIT(2)
59 #define M2M_CONTROL_ENABLE BIT(3)
60 #define M2M_CONTROL_START BIT(4)
61 #define M2M_CONTROL_DAH BIT(11)
62 #define M2M_CONTROL_SAH BIT(12)
63 #define M2M_CONTROL_PW_SHIFT 9
64 #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
65 #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
66 #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
67 #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_TM_SHIFT 13
69 #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
70 #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
71 #define M2M_CONTROL_RSS_SHIFT 22
72 #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
73 #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
74 #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
75 #define M2M_CONTROL_NO_HDSK BIT(24)
76 #define M2M_CONTROL_PWSC_SHIFT 25
78 #define M2M_INTERRUPT 0x0004
79 #define M2M_INTERRUPT_DONEINT BIT(1)
81 #define M2M_BCR0 0x0010
82 #define M2M_BCR1 0x0014
83 #define M2M_SAR_BASE0 0x0018
84 #define M2M_SAR_BASE1 0x001c
85 #define M2M_DAR_BASE0 0x002c
86 #define M2M_DAR_BASE1 0x0030
88 #define DMA_MAX_CHAN_BYTES 0xffff
89 #define DMA_MAX_CHAN_DESCRIPTORS 32
91 struct ep93xx_dma_engine
;
94 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
95 * @src_addr: source address of the transaction
96 * @dst_addr: destination address of the transaction
97 * @size: size of the transaction (in bytes)
98 * @complete: this descriptor is completed
99 * @txd: dmaengine API descriptor
100 * @tx_list: list of linked descriptors
101 * @node: link used for putting this into a channel queue
103 struct ep93xx_dma_desc
{
108 struct dma_async_tx_descriptor txd
;
109 struct list_head tx_list
;
110 struct list_head node
;
114 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
115 * @chan: dmaengine API channel
116 * @edma: pointer to to the engine device
117 * @regs: memory mapped registers
118 * @irq: interrupt number of the channel
119 * @clk: clock used by this channel
120 * @tasklet: channel specific tasklet used for callbacks
121 * @lock: lock protecting the fields following
122 * @flags: flags for the channel
123 * @buffer: which buffer to use next (0/1)
124 * @last_completed: last completed cookie value
125 * @active: flattened chain of descriptors currently being processed
126 * @queue: pending descriptors which are handled next
127 * @free_list: list of free descriptors which can be used
128 * @runtime_addr: physical address currently used as dest/src (M2M only). This
129 * is set via %DMA_SLAVE_CONFIG before slave operation is
131 * @runtime_ctrl: M2M runtime values for the control register.
133 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
134 * will have slightly different scheme here: @active points to a head of
135 * flattened DMA descriptor chain.
137 * @queue holds pending transactions. These are linked through the first
138 * descriptor in the chain. When a descriptor is moved to the @active queue,
139 * the first and chained descriptors are flattened into a single list.
141 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
142 * necessary channel configuration information. For memcpy channels this must
145 struct ep93xx_dma_chan
{
146 struct dma_chan chan
;
147 const struct ep93xx_dma_engine
*edma
;
151 struct tasklet_struct tasklet
;
152 /* protects the fields following */
155 /* Channel is configured for cyclic transfers */
156 #define EP93XX_DMA_IS_CYCLIC 0
159 dma_cookie_t last_completed
;
160 struct list_head active
;
161 struct list_head queue
;
162 struct list_head free_list
;
168 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
169 * @dma_dev: holds the dmaengine device
170 * @m2m: is this an M2M or M2P device
171 * @hw_setup: method which sets the channel up for operation
172 * @hw_shutdown: shuts the channel down and flushes whatever is left
173 * @hw_submit: pushes active descriptor(s) to the hardware
174 * @hw_interrupt: handle the interrupt
175 * @num_channels: number of channels for this instance
176 * @channels: array of channels
178 * There is one instance of this struct for the M2P channels and one for the
179 * M2M channels. hw_xxx() methods are used to perform operations which are
180 * different on M2M and M2P channels. These methods are called with channel
181 * lock held and interrupts disabled so they cannot sleep.
183 struct ep93xx_dma_engine
{
184 struct dma_device dma_dev
;
186 int (*hw_setup
)(struct ep93xx_dma_chan
*);
187 void (*hw_shutdown
)(struct ep93xx_dma_chan
*);
188 void (*hw_submit
)(struct ep93xx_dma_chan
*);
189 int (*hw_interrupt
)(struct ep93xx_dma_chan
*);
190 #define INTERRUPT_UNKNOWN 0
191 #define INTERRUPT_DONE 1
192 #define INTERRUPT_NEXT_BUFFER 2
195 struct ep93xx_dma_chan channels
[];
198 static inline struct device
*chan2dev(struct ep93xx_dma_chan
*edmac
)
200 return &edmac
->chan
.dev
->device
;
203 static struct ep93xx_dma_chan
*to_ep93xx_dma_chan(struct dma_chan
*chan
)
205 return container_of(chan
, struct ep93xx_dma_chan
, chan
);
209 * ep93xx_dma_set_active - set new active descriptor chain
211 * @desc: head of the new active descriptor chain
213 * Sets @desc to be the head of the new active descriptor chain. This is the
214 * chain which is processed next. The active list must be empty before calling
217 * Called with @edmac->lock held and interrupts disabled.
219 static void ep93xx_dma_set_active(struct ep93xx_dma_chan
*edmac
,
220 struct ep93xx_dma_desc
*desc
)
222 BUG_ON(!list_empty(&edmac
->active
));
224 list_add_tail(&desc
->node
, &edmac
->active
);
226 /* Flatten the @desc->tx_list chain into @edmac->active list */
227 while (!list_empty(&desc
->tx_list
)) {
228 struct ep93xx_dma_desc
*d
= list_first_entry(&desc
->tx_list
,
229 struct ep93xx_dma_desc
, node
);
232 * We copy the callback parameters from the first descriptor
233 * to all the chained descriptors. This way we can call the
234 * callback without having to find out the first descriptor in
235 * the chain. Useful for cyclic transfers.
237 d
->txd
.callback
= desc
->txd
.callback
;
238 d
->txd
.callback_param
= desc
->txd
.callback_param
;
240 list_move_tail(&d
->node
, &edmac
->active
);
244 /* Called with @edmac->lock held and interrupts disabled */
245 static struct ep93xx_dma_desc
*
246 ep93xx_dma_get_active(struct ep93xx_dma_chan
*edmac
)
248 return list_first_entry(&edmac
->active
, struct ep93xx_dma_desc
, node
);
252 * ep93xx_dma_advance_active - advances to the next active descriptor
255 * Function advances active descriptor to the next in the @edmac->active and
256 * returns %true if we still have descriptors in the chain to process.
257 * Otherwise returns %false.
259 * When the channel is in cyclic mode always returns %true.
261 * Called with @edmac->lock held and interrupts disabled.
263 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan
*edmac
)
265 list_rotate_left(&edmac
->active
);
267 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
271 * If txd.cookie is set it means that we are back in the first
272 * descriptor in the chain and hence done with it.
274 return !ep93xx_dma_get_active(edmac
)->txd
.cookie
;
278 * M2P DMA implementation
281 static void m2p_set_control(struct ep93xx_dma_chan
*edmac
, u32 control
)
283 writel(control
, edmac
->regs
+ M2P_CONTROL
);
285 * EP93xx User's Guide states that we must perform a dummy read after
286 * write to the control register.
288 readl(edmac
->regs
+ M2P_CONTROL
);
291 static int m2p_hw_setup(struct ep93xx_dma_chan
*edmac
)
293 struct ep93xx_dma_data
*data
= edmac
->chan
.private;
296 writel(data
->port
& 0xf, edmac
->regs
+ M2P_PPALLOC
);
298 control
= M2P_CONTROL_CH_ERROR_INT
| M2P_CONTROL_ICE
299 | M2P_CONTROL_ENABLE
;
300 m2p_set_control(edmac
, control
);
305 static inline u32
m2p_channel_state(struct ep93xx_dma_chan
*edmac
)
307 return (readl(edmac
->regs
+ M2P_STATUS
) >> 4) & 0x3;
310 static void m2p_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
314 control
= readl(edmac
->regs
+ M2P_CONTROL
);
315 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
316 m2p_set_control(edmac
, control
);
318 while (m2p_channel_state(edmac
) >= M2P_STATE_ON
)
321 m2p_set_control(edmac
, 0);
323 while (m2p_channel_state(edmac
) == M2P_STATE_STALL
)
327 static void m2p_fill_desc(struct ep93xx_dma_chan
*edmac
)
329 struct ep93xx_dma_desc
*desc
= ep93xx_dma_get_active(edmac
);
332 if (ep93xx_dma_chan_direction(&edmac
->chan
) == DMA_TO_DEVICE
)
333 bus_addr
= desc
->src_addr
;
335 bus_addr
= desc
->dst_addr
;
337 if (edmac
->buffer
== 0) {
338 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT0
);
339 writel(bus_addr
, edmac
->regs
+ M2P_BASE0
);
341 writel(desc
->size
, edmac
->regs
+ M2P_MAXCNT1
);
342 writel(bus_addr
, edmac
->regs
+ M2P_BASE1
);
348 static void m2p_hw_submit(struct ep93xx_dma_chan
*edmac
)
350 u32 control
= readl(edmac
->regs
+ M2P_CONTROL
);
352 m2p_fill_desc(edmac
);
353 control
|= M2P_CONTROL_STALLINT
;
355 if (ep93xx_dma_advance_active(edmac
)) {
356 m2p_fill_desc(edmac
);
357 control
|= M2P_CONTROL_NFBINT
;
360 m2p_set_control(edmac
, control
);
363 static int m2p_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
365 u32 irq_status
= readl(edmac
->regs
+ M2P_INTERRUPT
);
368 if (irq_status
& M2P_INTERRUPT_ERROR
) {
369 struct ep93xx_dma_desc
*desc
= ep93xx_dma_get_active(edmac
);
371 /* Clear the error interrupt */
372 writel(1, edmac
->regs
+ M2P_INTERRUPT
);
375 * It seems that there is no easy way of reporting errors back
376 * to client so we just report the error here and continue as
379 * Revisit this when there is a mechanism to report back the
382 dev_err(chan2dev(edmac
),
383 "DMA transfer failed! Details:\n"
385 "\tsrc_addr : 0x%08x\n"
386 "\tdst_addr : 0x%08x\n"
388 desc
->txd
.cookie
, desc
->src_addr
, desc
->dst_addr
,
392 switch (irq_status
& (M2P_INTERRUPT_STALL
| M2P_INTERRUPT_NFB
)) {
393 case M2P_INTERRUPT_STALL
:
394 /* Disable interrupts */
395 control
= readl(edmac
->regs
+ M2P_CONTROL
);
396 control
&= ~(M2P_CONTROL_STALLINT
| M2P_CONTROL_NFBINT
);
397 m2p_set_control(edmac
, control
);
399 return INTERRUPT_DONE
;
401 case M2P_INTERRUPT_NFB
:
402 if (ep93xx_dma_advance_active(edmac
))
403 m2p_fill_desc(edmac
);
405 return INTERRUPT_NEXT_BUFFER
;
408 return INTERRUPT_UNKNOWN
;
412 * M2M DMA implementation
414 * For the M2M transfers we don't use NFB at all. This is because it simply
415 * doesn't work well with memcpy transfers. When you submit both buffers it is
416 * extremely unlikely that you get an NFB interrupt, but it instead reports
417 * DONE interrupt and both buffers are already transferred which means that we
418 * weren't able to update the next buffer.
420 * So for now we "simulate" NFB by just submitting buffer after buffer
421 * without double buffering.
424 static int m2m_hw_setup(struct ep93xx_dma_chan
*edmac
)
426 const struct ep93xx_dma_data
*data
= edmac
->chan
.private;
430 /* This is memcpy channel, nothing to configure */
431 writel(control
, edmac
->regs
+ M2M_CONTROL
);
435 switch (data
->port
) {
438 * This was found via experimenting - anything less than 5
439 * causes the channel to perform only a partial transfer which
440 * leads to problems since we don't get DONE interrupt then.
442 control
= (5 << M2M_CONTROL_PWSC_SHIFT
);
443 control
|= M2M_CONTROL_NO_HDSK
;
445 if (data
->direction
== DMA_TO_DEVICE
) {
446 control
|= M2M_CONTROL_DAH
;
447 control
|= M2M_CONTROL_TM_TX
;
448 control
|= M2M_CONTROL_RSS_SSPTX
;
450 control
|= M2M_CONTROL_SAH
;
451 control
|= M2M_CONTROL_TM_RX
;
452 control
|= M2M_CONTROL_RSS_SSPRX
;
458 * This IDE part is totally untested. Values below are taken
459 * from the EP93xx Users's Guide and might not be correct.
461 control
|= M2M_CONTROL_NO_HDSK
;
462 control
|= M2M_CONTROL_RSS_IDE
;
463 control
|= M2M_CONTROL_PW_16
;
465 if (data
->direction
== DMA_TO_DEVICE
) {
466 /* Worst case from the UG */
467 control
= (3 << M2M_CONTROL_PWSC_SHIFT
);
468 control
|= M2M_CONTROL_DAH
;
469 control
|= M2M_CONTROL_TM_TX
;
471 control
= (2 << M2M_CONTROL_PWSC_SHIFT
);
472 control
|= M2M_CONTROL_SAH
;
473 control
|= M2M_CONTROL_TM_RX
;
481 writel(control
, edmac
->regs
+ M2M_CONTROL
);
485 static void m2m_hw_shutdown(struct ep93xx_dma_chan
*edmac
)
487 /* Just disable the channel */
488 writel(0, edmac
->regs
+ M2M_CONTROL
);
491 static void m2m_fill_desc(struct ep93xx_dma_chan
*edmac
)
493 struct ep93xx_dma_desc
*desc
= ep93xx_dma_get_active(edmac
);
495 if (edmac
->buffer
== 0) {
496 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE0
);
497 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE0
);
498 writel(desc
->size
, edmac
->regs
+ M2M_BCR0
);
500 writel(desc
->src_addr
, edmac
->regs
+ M2M_SAR_BASE1
);
501 writel(desc
->dst_addr
, edmac
->regs
+ M2M_DAR_BASE1
);
502 writel(desc
->size
, edmac
->regs
+ M2M_BCR1
);
508 static void m2m_hw_submit(struct ep93xx_dma_chan
*edmac
)
510 struct ep93xx_dma_data
*data
= edmac
->chan
.private;
511 u32 control
= readl(edmac
->regs
+ M2M_CONTROL
);
514 * Since we allow clients to configure PW (peripheral width) we always
515 * clear PW bits here and then set them according what is given in
516 * the runtime configuration.
518 control
&= ~M2M_CONTROL_PW_MASK
;
519 control
|= edmac
->runtime_ctrl
;
521 m2m_fill_desc(edmac
);
522 control
|= M2M_CONTROL_DONEINT
;
525 * Now we can finally enable the channel. For M2M channel this must be
526 * done _after_ the BCRx registers are programmed.
528 control
|= M2M_CONTROL_ENABLE
;
529 writel(control
, edmac
->regs
+ M2M_CONTROL
);
533 * For memcpy channels the software trigger must be asserted
534 * in order to start the memcpy operation.
536 control
|= M2M_CONTROL_START
;
537 writel(control
, edmac
->regs
+ M2M_CONTROL
);
541 static int m2m_hw_interrupt(struct ep93xx_dma_chan
*edmac
)
545 if (!(readl(edmac
->regs
+ M2M_INTERRUPT
) & M2M_INTERRUPT_DONEINT
))
546 return INTERRUPT_UNKNOWN
;
548 /* Clear the DONE bit */
549 writel(0, edmac
->regs
+ M2M_INTERRUPT
);
551 /* Disable interrupts and the channel */
552 control
= readl(edmac
->regs
+ M2M_CONTROL
);
553 control
&= ~(M2M_CONTROL_DONEINT
| M2M_CONTROL_ENABLE
);
554 writel(control
, edmac
->regs
+ M2M_CONTROL
);
557 * Since we only get DONE interrupt we have to find out ourselves
558 * whether there still is something to process. So we try to advance
559 * the chain an see whether it succeeds.
561 if (ep93xx_dma_advance_active(edmac
)) {
562 edmac
->edma
->hw_submit(edmac
);
563 return INTERRUPT_NEXT_BUFFER
;
566 return INTERRUPT_DONE
;
570 * DMA engine API implementation
573 static struct ep93xx_dma_desc
*
574 ep93xx_dma_desc_get(struct ep93xx_dma_chan
*edmac
)
576 struct ep93xx_dma_desc
*desc
, *_desc
;
577 struct ep93xx_dma_desc
*ret
= NULL
;
580 spin_lock_irqsave(&edmac
->lock
, flags
);
581 list_for_each_entry_safe(desc
, _desc
, &edmac
->free_list
, node
) {
582 if (async_tx_test_ack(&desc
->txd
)) {
583 list_del_init(&desc
->node
);
585 /* Re-initialize the descriptor */
589 desc
->complete
= false;
590 desc
->txd
.cookie
= 0;
591 desc
->txd
.callback
= NULL
;
592 desc
->txd
.callback_param
= NULL
;
598 spin_unlock_irqrestore(&edmac
->lock
, flags
);
602 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan
*edmac
,
603 struct ep93xx_dma_desc
*desc
)
608 spin_lock_irqsave(&edmac
->lock
, flags
);
609 list_splice_init(&desc
->tx_list
, &edmac
->free_list
);
610 list_add(&desc
->node
, &edmac
->free_list
);
611 spin_unlock_irqrestore(&edmac
->lock
, flags
);
616 * ep93xx_dma_advance_work - start processing the next pending transaction
619 * If we have pending transactions queued and we are currently idling, this
620 * function takes the next queued transaction from the @edmac->queue and
621 * pushes it to the hardware for execution.
623 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan
*edmac
)
625 struct ep93xx_dma_desc
*new;
628 spin_lock_irqsave(&edmac
->lock
, flags
);
629 if (!list_empty(&edmac
->active
) || list_empty(&edmac
->queue
)) {
630 spin_unlock_irqrestore(&edmac
->lock
, flags
);
634 /* Take the next descriptor from the pending queue */
635 new = list_first_entry(&edmac
->queue
, struct ep93xx_dma_desc
, node
);
636 list_del_init(&new->node
);
638 ep93xx_dma_set_active(edmac
, new);
640 /* Push it to the hardware */
641 edmac
->edma
->hw_submit(edmac
);
642 spin_unlock_irqrestore(&edmac
->lock
, flags
);
645 static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc
*desc
)
647 struct device
*dev
= desc
->txd
.chan
->device
->dev
;
649 if (!(desc
->txd
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
650 if (desc
->txd
.flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
651 dma_unmap_single(dev
, desc
->src_addr
, desc
->size
,
654 dma_unmap_page(dev
, desc
->src_addr
, desc
->size
,
657 if (!(desc
->txd
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
658 if (desc
->txd
.flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
659 dma_unmap_single(dev
, desc
->dst_addr
, desc
->size
,
662 dma_unmap_page(dev
, desc
->dst_addr
, desc
->size
,
667 static void ep93xx_dma_tasklet(unsigned long data
)
669 struct ep93xx_dma_chan
*edmac
= (struct ep93xx_dma_chan
*)data
;
670 struct ep93xx_dma_desc
*desc
, *d
;
671 dma_async_tx_callback callback
;
672 void *callback_param
;
675 spin_lock_irq(&edmac
->lock
);
676 desc
= ep93xx_dma_get_active(edmac
);
677 if (desc
->complete
) {
678 edmac
->last_completed
= desc
->txd
.cookie
;
679 list_splice_init(&edmac
->active
, &list
);
681 spin_unlock_irq(&edmac
->lock
);
683 /* Pick up the next descriptor from the queue */
684 ep93xx_dma_advance_work(edmac
);
686 callback
= desc
->txd
.callback
;
687 callback_param
= desc
->txd
.callback_param
;
689 /* Now we can release all the chained descriptors */
690 list_for_each_entry_safe(desc
, d
, &list
, node
) {
692 * For the memcpy channels the API requires us to unmap the
693 * buffers unless requested otherwise.
695 if (!edmac
->chan
.private)
696 ep93xx_dma_unmap_buffers(desc
);
698 ep93xx_dma_desc_put(edmac
, desc
);
702 callback(callback_param
);
705 static irqreturn_t
ep93xx_dma_interrupt(int irq
, void *dev_id
)
707 struct ep93xx_dma_chan
*edmac
= dev_id
;
708 irqreturn_t ret
= IRQ_HANDLED
;
710 spin_lock(&edmac
->lock
);
712 switch (edmac
->edma
->hw_interrupt(edmac
)) {
714 ep93xx_dma_get_active(edmac
)->complete
= true;
715 tasklet_schedule(&edmac
->tasklet
);
718 case INTERRUPT_NEXT_BUFFER
:
719 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
))
720 tasklet_schedule(&edmac
->tasklet
);
724 dev_warn(chan2dev(edmac
), "unknown interrupt!\n");
729 spin_unlock(&edmac
->lock
);
734 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
735 * @tx: descriptor to be executed
737 * Function will execute given descriptor on the hardware or if the hardware
738 * is busy, queue the descriptor to be executed later on. Returns cookie which
739 * can be used to poll the status of the descriptor.
741 static dma_cookie_t
ep93xx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
743 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(tx
->chan
);
744 struct ep93xx_dma_desc
*desc
;
748 spin_lock_irqsave(&edmac
->lock
, flags
);
750 cookie
= edmac
->chan
.cookie
;
755 desc
= container_of(tx
, struct ep93xx_dma_desc
, txd
);
757 edmac
->chan
.cookie
= cookie
;
758 desc
->txd
.cookie
= cookie
;
761 * If nothing is currently prosessed, we push this descriptor
762 * directly to the hardware. Otherwise we put the descriptor
763 * to the pending queue.
765 if (list_empty(&edmac
->active
)) {
766 ep93xx_dma_set_active(edmac
, desc
);
767 edmac
->edma
->hw_submit(edmac
);
769 list_add_tail(&desc
->node
, &edmac
->queue
);
772 spin_unlock_irqrestore(&edmac
->lock
, flags
);
777 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
778 * @chan: channel to allocate resources
780 * Function allocates necessary resources for the given DMA channel and
781 * returns number of allocated descriptors for the channel. Negative errno
782 * is returned in case of failure.
784 static int ep93xx_dma_alloc_chan_resources(struct dma_chan
*chan
)
786 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
787 struct ep93xx_dma_data
*data
= chan
->private;
788 const char *name
= dma_chan_name(chan
);
791 /* Sanity check the channel parameters */
792 if (!edmac
->edma
->m2m
) {
795 if (data
->port
< EP93XX_DMA_I2S1
||
796 data
->port
> EP93XX_DMA_IRDA
)
798 if (data
->direction
!= ep93xx_dma_chan_direction(chan
))
802 switch (data
->port
) {
805 if (data
->direction
!= DMA_TO_DEVICE
&&
806 data
->direction
!= DMA_FROM_DEVICE
)
815 if (data
&& data
->name
)
818 ret
= clk_enable(edmac
->clk
);
822 ret
= request_irq(edmac
->irq
, ep93xx_dma_interrupt
, 0, name
, edmac
);
824 goto fail_clk_disable
;
826 spin_lock_irq(&edmac
->lock
);
827 edmac
->last_completed
= 1;
828 edmac
->chan
.cookie
= 1;
829 ret
= edmac
->edma
->hw_setup(edmac
);
830 spin_unlock_irq(&edmac
->lock
);
835 for (i
= 0; i
< DMA_MAX_CHAN_DESCRIPTORS
; i
++) {
836 struct ep93xx_dma_desc
*desc
;
838 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
840 dev_warn(chan2dev(edmac
), "not enough descriptors\n");
844 INIT_LIST_HEAD(&desc
->tx_list
);
846 dma_async_tx_descriptor_init(&desc
->txd
, chan
);
847 desc
->txd
.flags
= DMA_CTRL_ACK
;
848 desc
->txd
.tx_submit
= ep93xx_dma_tx_submit
;
850 ep93xx_dma_desc_put(edmac
, desc
);
856 free_irq(edmac
->irq
, edmac
);
858 clk_disable(edmac
->clk
);
864 * ep93xx_dma_free_chan_resources - release resources for the channel
867 * Function releases all the resources allocated for the given channel.
868 * The channel must be idle when this is called.
870 static void ep93xx_dma_free_chan_resources(struct dma_chan
*chan
)
872 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
873 struct ep93xx_dma_desc
*desc
, *d
;
877 BUG_ON(!list_empty(&edmac
->active
));
878 BUG_ON(!list_empty(&edmac
->queue
));
880 spin_lock_irqsave(&edmac
->lock
, flags
);
881 edmac
->edma
->hw_shutdown(edmac
);
882 edmac
->runtime_addr
= 0;
883 edmac
->runtime_ctrl
= 0;
885 list_splice_init(&edmac
->free_list
, &list
);
886 spin_unlock_irqrestore(&edmac
->lock
, flags
);
888 list_for_each_entry_safe(desc
, d
, &list
, node
)
891 clk_disable(edmac
->clk
);
892 free_irq(edmac
->irq
, edmac
);
896 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
898 * @dest: destination bus address
899 * @src: source bus address
900 * @len: size of the transaction
901 * @flags: flags for the descriptor
903 * Returns a valid DMA descriptor or %NULL in case of failure.
905 static struct dma_async_tx_descriptor
*
906 ep93xx_dma_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
,
907 dma_addr_t src
, size_t len
, unsigned long flags
)
909 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
910 struct ep93xx_dma_desc
*desc
, *first
;
911 size_t bytes
, offset
;
914 for (offset
= 0; offset
< len
; offset
+= bytes
) {
915 desc
= ep93xx_dma_desc_get(edmac
);
917 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
921 bytes
= min_t(size_t, len
- offset
, DMA_MAX_CHAN_BYTES
);
923 desc
->src_addr
= src
+ offset
;
924 desc
->dst_addr
= dest
+ offset
;
930 list_add_tail(&desc
->node
, &first
->tx_list
);
933 first
->txd
.cookie
= -EBUSY
;
934 first
->txd
.flags
= flags
;
938 ep93xx_dma_desc_put(edmac
, first
);
943 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
945 * @sgl: list of buffers to transfer
946 * @sg_len: number of entries in @sgl
947 * @dir: direction of tha DMA transfer
948 * @flags: flags for the descriptor
950 * Returns a valid DMA descriptor or %NULL in case of failure.
952 static struct dma_async_tx_descriptor
*
953 ep93xx_dma_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
954 unsigned int sg_len
, enum dma_data_direction dir
,
957 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
958 struct ep93xx_dma_desc
*desc
, *first
;
959 struct scatterlist
*sg
;
962 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
963 dev_warn(chan2dev(edmac
),
964 "channel was configured with different direction\n");
968 if (test_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
969 dev_warn(chan2dev(edmac
),
970 "channel is already used for cyclic transfers\n");
975 for_each_sg(sgl
, sg
, sg_len
, i
) {
976 size_t sg_len
= sg_dma_len(sg
);
978 if (sg_len
> DMA_MAX_CHAN_BYTES
) {
979 dev_warn(chan2dev(edmac
), "too big transfer size %d\n",
984 desc
= ep93xx_dma_desc_get(edmac
);
986 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
990 if (dir
== DMA_TO_DEVICE
) {
991 desc
->src_addr
= sg_dma_address(sg
);
992 desc
->dst_addr
= edmac
->runtime_addr
;
994 desc
->src_addr
= edmac
->runtime_addr
;
995 desc
->dst_addr
= sg_dma_address(sg
);
1002 list_add_tail(&desc
->node
, &first
->tx_list
);
1005 first
->txd
.cookie
= -EBUSY
;
1006 first
->txd
.flags
= flags
;
1011 ep93xx_dma_desc_put(edmac
, first
);
1016 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1018 * @dma_addr: DMA mapped address of the buffer
1019 * @buf_len: length of the buffer (in bytes)
1020 * @period_len: lenght of a single period
1021 * @dir: direction of the operation
1023 * Prepares a descriptor for cyclic DMA operation. This means that once the
1024 * descriptor is submitted, we will be submitting in a @period_len sized
1025 * buffers and calling callback once the period has been elapsed. Transfer
1026 * terminates only when client calls dmaengine_terminate_all() for this
1029 * Returns a valid DMA descriptor or %NULL in case of failure.
1031 static struct dma_async_tx_descriptor
*
1032 ep93xx_dma_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
1033 size_t buf_len
, size_t period_len
,
1034 enum dma_data_direction dir
)
1036 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1037 struct ep93xx_dma_desc
*desc
, *first
;
1040 if (!edmac
->edma
->m2m
&& dir
!= ep93xx_dma_chan_direction(chan
)) {
1041 dev_warn(chan2dev(edmac
),
1042 "channel was configured with different direction\n");
1046 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
)) {
1047 dev_warn(chan2dev(edmac
),
1048 "channel is already used for cyclic transfers\n");
1052 if (period_len
> DMA_MAX_CHAN_BYTES
) {
1053 dev_warn(chan2dev(edmac
), "too big period length %d\n",
1058 /* Split the buffer into period size chunks */
1060 for (offset
= 0; offset
< buf_len
; offset
+= period_len
) {
1061 desc
= ep93xx_dma_desc_get(edmac
);
1063 dev_warn(chan2dev(edmac
), "couln't get descriptor\n");
1067 if (dir
== DMA_TO_DEVICE
) {
1068 desc
->src_addr
= dma_addr
+ offset
;
1069 desc
->dst_addr
= edmac
->runtime_addr
;
1071 desc
->src_addr
= edmac
->runtime_addr
;
1072 desc
->dst_addr
= dma_addr
+ offset
;
1075 desc
->size
= period_len
;
1080 list_add_tail(&desc
->node
, &first
->tx_list
);
1083 first
->txd
.cookie
= -EBUSY
;
1088 ep93xx_dma_desc_put(edmac
, first
);
1093 * ep93xx_dma_terminate_all - terminate all transactions
1096 * Stops all DMA transactions. All descriptors are put back to the
1097 * @edmac->free_list and callbacks are _not_ called.
1099 static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan
*edmac
)
1101 struct ep93xx_dma_desc
*desc
, *_d
;
1102 unsigned long flags
;
1105 spin_lock_irqsave(&edmac
->lock
, flags
);
1106 /* First we disable and flush the DMA channel */
1107 edmac
->edma
->hw_shutdown(edmac
);
1108 clear_bit(EP93XX_DMA_IS_CYCLIC
, &edmac
->flags
);
1109 list_splice_init(&edmac
->active
, &list
);
1110 list_splice_init(&edmac
->queue
, &list
);
1112 * We then re-enable the channel. This way we can continue submitting
1113 * the descriptors by just calling ->hw_submit() again.
1115 edmac
->edma
->hw_setup(edmac
);
1116 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1118 list_for_each_entry_safe(desc
, _d
, &list
, node
)
1119 ep93xx_dma_desc_put(edmac
, desc
);
1124 static int ep93xx_dma_slave_config(struct ep93xx_dma_chan
*edmac
,
1125 struct dma_slave_config
*config
)
1127 enum dma_slave_buswidth width
;
1128 unsigned long flags
;
1131 if (!edmac
->edma
->m2m
)
1134 switch (config
->direction
) {
1135 case DMA_FROM_DEVICE
:
1136 width
= config
->src_addr_width
;
1137 addr
= config
->src_addr
;
1141 width
= config
->dst_addr_width
;
1142 addr
= config
->dst_addr
;
1150 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1153 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1154 ctrl
= M2M_CONTROL_PW_16
;
1156 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1157 ctrl
= M2M_CONTROL_PW_32
;
1163 spin_lock_irqsave(&edmac
->lock
, flags
);
1164 edmac
->runtime_addr
= addr
;
1165 edmac
->runtime_ctrl
= ctrl
;
1166 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1172 * ep93xx_dma_control - manipulate all pending operations on a channel
1174 * @cmd: control command to perform
1175 * @arg: optional argument
1177 * Controls the channel. Function returns %0 in case of success or negative
1178 * error in case of failure.
1180 static int ep93xx_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1183 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1184 struct dma_slave_config
*config
;
1187 case DMA_TERMINATE_ALL
:
1188 return ep93xx_dma_terminate_all(edmac
);
1190 case DMA_SLAVE_CONFIG
:
1191 config
= (struct dma_slave_config
*)arg
;
1192 return ep93xx_dma_slave_config(edmac
, config
);
1202 * ep93xx_dma_tx_status - check if a transaction is completed
1204 * @cookie: transaction specific cookie
1205 * @state: state of the transaction is stored here if given
1207 * This function can be used to query state of a given transaction.
1209 static enum dma_status
ep93xx_dma_tx_status(struct dma_chan
*chan
,
1210 dma_cookie_t cookie
,
1211 struct dma_tx_state
*state
)
1213 struct ep93xx_dma_chan
*edmac
= to_ep93xx_dma_chan(chan
);
1214 dma_cookie_t last_used
, last_completed
;
1215 enum dma_status ret
;
1216 unsigned long flags
;
1218 spin_lock_irqsave(&edmac
->lock
, flags
);
1219 last_used
= chan
->cookie
;
1220 last_completed
= edmac
->last_completed
;
1221 spin_unlock_irqrestore(&edmac
->lock
, flags
);
1223 ret
= dma_async_is_complete(cookie
, last_completed
, last_used
);
1224 dma_set_tx_state(state
, last_completed
, last_used
, 0);
1230 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1233 * When this function is called, all pending transactions are pushed to the
1234 * hardware and executed.
1236 static void ep93xx_dma_issue_pending(struct dma_chan
*chan
)
1238 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan
));
1241 static int __init
ep93xx_dma_probe(struct platform_device
*pdev
)
1243 struct ep93xx_dma_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1244 struct ep93xx_dma_engine
*edma
;
1245 struct dma_device
*dma_dev
;
1249 edma_size
= pdata
->num_channels
* sizeof(struct ep93xx_dma_chan
);
1250 edma
= kzalloc(sizeof(*edma
) + edma_size
, GFP_KERNEL
);
1254 dma_dev
= &edma
->dma_dev
;
1255 edma
->m2m
= platform_get_device_id(pdev
)->driver_data
;
1256 edma
->num_channels
= pdata
->num_channels
;
1258 INIT_LIST_HEAD(&dma_dev
->channels
);
1259 for (i
= 0; i
< pdata
->num_channels
; i
++) {
1260 const struct ep93xx_dma_chan_data
*cdata
= &pdata
->channels
[i
];
1261 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1263 edmac
->chan
.device
= dma_dev
;
1264 edmac
->regs
= cdata
->base
;
1265 edmac
->irq
= cdata
->irq
;
1268 edmac
->clk
= clk_get(NULL
, cdata
->name
);
1269 if (IS_ERR(edmac
->clk
)) {
1270 dev_warn(&pdev
->dev
, "failed to get clock for %s\n",
1275 spin_lock_init(&edmac
->lock
);
1276 INIT_LIST_HEAD(&edmac
->active
);
1277 INIT_LIST_HEAD(&edmac
->queue
);
1278 INIT_LIST_HEAD(&edmac
->free_list
);
1279 tasklet_init(&edmac
->tasklet
, ep93xx_dma_tasklet
,
1280 (unsigned long)edmac
);
1282 list_add_tail(&edmac
->chan
.device_node
,
1283 &dma_dev
->channels
);
1286 dma_cap_zero(dma_dev
->cap_mask
);
1287 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
1288 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
1290 dma_dev
->dev
= &pdev
->dev
;
1291 dma_dev
->device_alloc_chan_resources
= ep93xx_dma_alloc_chan_resources
;
1292 dma_dev
->device_free_chan_resources
= ep93xx_dma_free_chan_resources
;
1293 dma_dev
->device_prep_slave_sg
= ep93xx_dma_prep_slave_sg
;
1294 dma_dev
->device_prep_dma_cyclic
= ep93xx_dma_prep_dma_cyclic
;
1295 dma_dev
->device_control
= ep93xx_dma_control
;
1296 dma_dev
->device_issue_pending
= ep93xx_dma_issue_pending
;
1297 dma_dev
->device_tx_status
= ep93xx_dma_tx_status
;
1299 dma_set_max_seg_size(dma_dev
->dev
, DMA_MAX_CHAN_BYTES
);
1302 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1303 dma_dev
->device_prep_dma_memcpy
= ep93xx_dma_prep_dma_memcpy
;
1305 edma
->hw_setup
= m2m_hw_setup
;
1306 edma
->hw_shutdown
= m2m_hw_shutdown
;
1307 edma
->hw_submit
= m2m_hw_submit
;
1308 edma
->hw_interrupt
= m2m_hw_interrupt
;
1310 dma_cap_set(DMA_PRIVATE
, dma_dev
->cap_mask
);
1312 edma
->hw_setup
= m2p_hw_setup
;
1313 edma
->hw_shutdown
= m2p_hw_shutdown
;
1314 edma
->hw_submit
= m2p_hw_submit
;
1315 edma
->hw_interrupt
= m2p_hw_interrupt
;
1318 ret
= dma_async_device_register(dma_dev
);
1319 if (unlikely(ret
)) {
1320 for (i
= 0; i
< edma
->num_channels
; i
++) {
1321 struct ep93xx_dma_chan
*edmac
= &edma
->channels
[i
];
1322 if (!IS_ERR_OR_NULL(edmac
->clk
))
1323 clk_put(edmac
->clk
);
1327 dev_info(dma_dev
->dev
, "EP93xx M2%s DMA ready\n",
1328 edma
->m2m
? "M" : "P");
1334 static struct platform_device_id ep93xx_dma_driver_ids
[] = {
1335 { "ep93xx-dma-m2p", 0 },
1336 { "ep93xx-dma-m2m", 1 },
1340 static struct platform_driver ep93xx_dma_driver
= {
1342 .name
= "ep93xx-dma",
1344 .id_table
= ep93xx_dma_driver_ids
,
1347 static int __init
ep93xx_dma_module_init(void)
1349 return platform_driver_probe(&ep93xx_dma_driver
, ep93xx_dma_probe
);
1351 subsys_initcall(ep93xx_dma_module_init
);
1353 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1354 MODULE_DESCRIPTION("EP93xx DMA driver");
1355 MODULE_LICENSE("GPL");