2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
17 #include <plat/ste_dma40.h>
19 #include "ste_dma40_ll.h"
21 #define D40_NAME "dma40"
23 #define D40_PHY_CHAN -1
25 /* For masking out/in 2 bit channel positions */
26 #define D40_CHAN_POS(chan) (2 * (chan / 2))
27 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 /* Maximum iterations taken before giving up suspending a channel */
30 #define D40_SUSPEND_MAX_IT 500
32 /* Hardware requirement on LCLA alignment */
33 #define LCLA_ALIGNMENT 0x40000
35 /* Max number of links per event group */
36 #define D40_LCLA_LINK_PER_EVENT_GRP 128
37 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39 /* Attempts before giving up to trying to get pages that are aligned */
40 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42 /* Bit markings for allocation map */
43 #define D40_ALLOC_FREE (1 << 31)
44 #define D40_ALLOC_PHY (1 << 30)
45 #define D40_ALLOC_LOG_FREE 0
47 /* Hardware designer of the block */
48 #define D40_HW_DESIGNER 0x8
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ
= 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer.
78 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
83 * struct d40_desc - A descriptor is one DMA job.
85 * @lli_phy: LLI settings for physical channel. Both src and dst=
86 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
88 * @lli_log: Same as above but for logical channels.
89 * @lli_pool: The pool with two entries pre-allocated.
90 * @lli_len: Number of llis of current descriptor.
91 * @lli_current: Number of transfered llis.
92 * @lcla_alloc: Number of LCLA entries allocated.
93 * @txd: DMA engine struct. Used for among other things for communication
96 * @is_in_client_list: true if the client owns this descriptor.
99 * This descriptor is used for both logical and physical transfers.
103 struct d40_phy_lli_bidir lli_phy
;
105 struct d40_log_lli_bidir lli_log
;
107 struct d40_lli_pool lli_pool
;
112 struct dma_async_tx_descriptor txd
;
113 struct list_head node
;
115 bool is_in_client_list
;
119 * struct d40_lcla_pool - LCLA pool settings and data.
121 * @base: The virtual address of LCLA. 18 bit aligned.
122 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
123 * This pointer is only there for clean-up on error.
124 * @pages: The number of pages needed for all physical channels.
125 * Only used later for clean-up on error
126 * @lock: Lock to protect the content in this struct.
127 * @alloc_map: big map over which LCLA entry is own by which job.
129 struct d40_lcla_pool
{
131 void *base_unaligned
;
134 struct d40_desc
**alloc_map
;
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.
143 * @allocated_src: Bit mapped to show which src event line's are mapped to
144 * this physical channel. Can also be free or physically allocated.
145 * @allocated_dst: Same as for src but is dst.
146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
159 * struct d40_chan - Struct that describes a channel.
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on. If this
169 * point is NULL, then the channel is not allocated.
170 * @chan: DMA engine handle.
171 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
172 * transfer and call client callback.
173 * @client: Cliented owned descriptor list.
174 * @active: Active descriptor.
175 * @queue: Queued jobs.
176 * @dma_cfg: The client configuration of this dma channel.
177 * @configured: whether the dma_cfg configuration is valid
178 * @base: Pointer to the device instance struct.
179 * @src_def_cfg: Default cfg register setting for src.
180 * @dst_def_cfg: Default cfg register setting for dst.
181 * @log_def: Default logical channel settings.
182 * @lcla: Space for one dst src pair for logical channel transfers.
183 * @lcpa: Pointer to dst and src lcpa settings.
185 * This struct can either "be" a logical or a physical channel.
190 /* ID of the most recent completed transfer */
194 struct d40_phy_res
*phy_chan
;
195 struct dma_chan chan
;
196 struct tasklet_struct tasklet
;
197 struct list_head client
;
198 struct list_head active
;
199 struct list_head queue
;
200 struct stedma40_chan_cfg dma_cfg
;
202 struct d40_base
*base
;
203 /* Default register configurations */
206 struct d40_def_lcsp log_def
;
207 struct d40_log_lli_full
*lcpa
;
208 /* Runtime reconfiguration */
209 dma_addr_t runtime_addr
;
210 enum dma_data_direction runtime_direction
;
214 * struct d40_base - The big global struct, one for each probe'd instance.
216 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
217 * @execmd_lock: Lock for execute command usage since several channels share
218 * the same physical register.
219 * @dev: The device structure.
220 * @virtbase: The virtual base address of the DMA's register.
221 * @rev: silicon revision detected.
222 * @clk: Pointer to the DMA clock structure.
223 * @phy_start: Physical memory start of the DMA registers.
224 * @phy_size: Size of the DMA register map.
225 * @irq: The IRQ number.
226 * @num_phy_chans: The number of physical channels. Read from HW. This
227 * is the number of available channels for this driver, not counting "Secure
228 * mode" allocated physical channels.
229 * @num_log_chans: The number of logical channels. Calculated from
231 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
232 * @dma_slave: dma_device channels that can do only do slave transfers.
233 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
234 * @log_chans: Room for all possible logical channels in system.
235 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
236 * to log_chans entries.
237 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
238 * to phy_chans entries.
239 * @plat_data: Pointer to provided platform_data which is the driver
241 * @phy_res: Vector containing all physical channels.
242 * @lcla_pool: lcla pool settings and data.
243 * @lcpa_base: The virtual mapped address of LCPA.
244 * @phy_lcpa: The physical address of the LCPA.
245 * @lcpa_size: The size of the LCPA area.
246 * @desc_slab: cache for descriptors.
249 spinlock_t interrupt_lock
;
250 spinlock_t execmd_lock
;
252 void __iomem
*virtbase
;
255 phys_addr_t phy_start
;
256 resource_size_t phy_size
;
260 struct dma_device dma_both
;
261 struct dma_device dma_slave
;
262 struct dma_device dma_memcpy
;
263 struct d40_chan
*phy_chans
;
264 struct d40_chan
*log_chans
;
265 struct d40_chan
**lookup_log_chans
;
266 struct d40_chan
**lookup_phy_chans
;
267 struct stedma40_platform_data
*plat_data
;
268 /* Physical half channels */
269 struct d40_phy_res
*phy_res
;
270 struct d40_lcla_pool lcla_pool
;
273 resource_size_t lcpa_size
;
274 struct kmem_cache
*desc_slab
;
278 * struct d40_interrupt_lookup - lookup table for interrupt handler
280 * @src: Interrupt mask register.
281 * @clr: Interrupt clear register.
282 * @is_error: true if this is an error interrupt.
283 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
284 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
286 struct d40_interrupt_lookup
{
294 * struct d40_reg_val - simple lookup struct
296 * @reg: The register.
297 * @val: The value that belongs to the register in reg.
304 static struct device
*chan2dev(struct d40_chan
*d40c
)
306 return &d40c
->chan
.dev
->device
;
309 static int d40_pool_lli_alloc(struct d40_desc
*d40d
,
310 int lli_len
, bool is_log
)
316 align
= sizeof(struct d40_log_lli
);
318 align
= sizeof(struct d40_phy_lli
);
321 base
= d40d
->lli_pool
.pre_alloc_lli
;
322 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
323 d40d
->lli_pool
.base
= NULL
;
325 d40d
->lli_pool
.size
= ALIGN(lli_len
* 2 * align
, align
);
327 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
328 d40d
->lli_pool
.base
= base
;
330 if (d40d
->lli_pool
.base
== NULL
)
335 d40d
->lli_log
.src
= PTR_ALIGN((struct d40_log_lli
*) base
,
337 d40d
->lli_log
.dst
= PTR_ALIGN(d40d
->lli_log
.src
+ lli_len
,
340 d40d
->lli_phy
.src
= PTR_ALIGN((struct d40_phy_lli
*)base
,
342 d40d
->lli_phy
.dst
= PTR_ALIGN(d40d
->lli_phy
.src
+ lli_len
,
349 static void d40_pool_lli_free(struct d40_desc
*d40d
)
351 kfree(d40d
->lli_pool
.base
);
352 d40d
->lli_pool
.base
= NULL
;
353 d40d
->lli_pool
.size
= 0;
354 d40d
->lli_log
.src
= NULL
;
355 d40d
->lli_log
.dst
= NULL
;
356 d40d
->lli_phy
.src
= NULL
;
357 d40d
->lli_phy
.dst
= NULL
;
360 static int d40_lcla_alloc_one(struct d40_chan
*d40c
,
361 struct d40_desc
*d40d
)
368 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
370 p
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
;
373 * Allocate both src and dst at the same time, therefore the half
374 * start on 1 since 0 can't be used since zero is used as end marker.
376 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
377 if (!d40c
->base
->lcla_pool
.alloc_map
[p
+ i
]) {
378 d40c
->base
->lcla_pool
.alloc_map
[p
+ i
] = d40d
;
385 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
390 static int d40_lcla_free_all(struct d40_chan
*d40c
,
391 struct d40_desc
*d40d
)
397 if (d40c
->log_num
== D40_PHY_CHAN
)
400 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
402 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
403 if (d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
404 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] == d40d
) {
405 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
406 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] = NULL
;
408 if (d40d
->lcla_alloc
== 0) {
415 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
421 static void d40_desc_remove(struct d40_desc
*d40d
)
423 list_del(&d40d
->node
);
426 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
428 struct d40_desc
*desc
= NULL
;
430 if (!list_empty(&d40c
->client
)) {
434 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
435 if (async_tx_test_ack(&d
->txd
)) {
436 d40_pool_lli_free(d
);
439 memset(desc
, 0, sizeof(*desc
));
445 desc
= kmem_cache_zalloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
448 INIT_LIST_HEAD(&desc
->node
);
453 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
456 d40_lcla_free_all(d40c
, d40d
);
457 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
460 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
462 list_add_tail(&desc
->node
, &d40c
->active
);
465 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
467 int curr_lcla
= -EINVAL
, next_lcla
;
469 if (d40c
->log_num
== D40_PHY_CHAN
) {
470 d40_phy_lli_write(d40c
->base
->virtbase
,
474 d40d
->lli_current
= d40d
->lli_len
;
477 if ((d40d
->lli_len
- d40d
->lli_current
) > 1)
478 curr_lcla
= d40_lcla_alloc_one(d40c
, d40d
);
480 d40_log_lli_lcpa_write(d40c
->lcpa
,
481 &d40d
->lli_log
.dst
[d40d
->lli_current
],
482 &d40d
->lli_log
.src
[d40d
->lli_current
],
486 for (; d40d
->lli_current
< d40d
->lli_len
; d40d
->lli_current
++) {
487 struct d40_log_lli
*lcla
;
489 if (d40d
->lli_current
+ 1 < d40d
->lli_len
)
490 next_lcla
= d40_lcla_alloc_one(d40c
, d40d
);
494 lcla
= d40c
->base
->lcla_pool
.base
+
495 d40c
->phy_chan
->num
* 1024 +
498 d40_log_lli_lcla_write(lcla
,
499 &d40d
->lli_log
.dst
[d40d
->lli_current
],
500 &d40d
->lli_log
.src
[d40d
->lli_current
],
503 (void) dma_map_single(d40c
->base
->dev
, lcla
,
504 2 * sizeof(struct d40_log_lli
),
507 curr_lcla
= next_lcla
;
509 if (curr_lcla
== -EINVAL
) {
518 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
522 if (list_empty(&d40c
->active
))
525 d
= list_first_entry(&d40c
->active
,
531 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
533 list_add_tail(&desc
->node
, &d40c
->queue
);
536 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
540 if (list_empty(&d40c
->queue
))
543 d
= list_first_entry(&d40c
->queue
,
549 static int d40_psize_2_burst_size(bool is_log
, int psize
)
552 if (psize
== STEDMA40_PSIZE_LOG_1
)
555 if (psize
== STEDMA40_PSIZE_PHY_1
)
563 * The dma only supports transmitting packages up to
564 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
565 * dma elements required to send the entire sg list
567 static int d40_size_2_dmalen(int size
, u32 data_width1
, u32 data_width2
)
570 u32 max_w
= max(data_width1
, data_width2
);
571 u32 min_w
= min(data_width1
, data_width2
);
572 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
574 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
575 seg_max
-= (1 << max_w
);
577 if (!IS_ALIGNED(size
, 1 << max_w
))
583 dmalen
= size
/ seg_max
;
584 if (dmalen
* seg_max
< size
)
590 static int d40_sg_2_dmalen(struct scatterlist
*sgl
, int sg_len
,
591 u32 data_width1
, u32 data_width2
)
593 struct scatterlist
*sg
;
598 for_each_sg(sgl
, sg
, sg_len
, i
) {
599 ret
= d40_size_2_dmalen(sg_dma_len(sg
),
600 data_width1
, data_width2
);
608 /* Support functions for logical channels */
610 static int d40_channel_execute_command(struct d40_chan
*d40c
,
611 enum d40_command command
)
615 void __iomem
*active_reg
;
620 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
622 if (d40c
->phy_chan
->num
% 2 == 0)
623 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
625 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
627 if (command
== D40_DMA_SUSPEND_REQ
) {
628 status
= (readl(active_reg
) &
629 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
630 D40_CHAN_POS(d40c
->phy_chan
->num
);
632 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
636 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
637 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
640 if (command
== D40_DMA_SUSPEND_REQ
) {
642 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
643 status
= (readl(active_reg
) &
644 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
645 D40_CHAN_POS(d40c
->phy_chan
->num
);
649 * Reduce the number of bus accesses while
650 * waiting for the DMA to suspend.
654 if (status
== D40_DMA_STOP
||
655 status
== D40_DMA_SUSPENDED
)
659 if (i
== D40_SUSPEND_MAX_IT
) {
660 dev_err(&d40c
->chan
.dev
->device
,
661 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
662 __func__
, d40c
->phy_chan
->num
, d40c
->log_num
,
670 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
674 static void d40_term_all(struct d40_chan
*d40c
)
676 struct d40_desc
*d40d
;
678 /* Release active descriptors */
679 while ((d40d
= d40_first_active_get(d40c
))) {
680 d40_desc_remove(d40d
);
681 d40_desc_free(d40c
, d40d
);
684 /* Release queued descriptors waiting for transfer */
685 while ((d40d
= d40_first_queued(d40c
))) {
686 d40_desc_remove(d40d
);
687 d40_desc_free(d40c
, d40d
);
691 d40c
->pending_tx
= 0;
695 static void __d40_config_set_event(struct d40_chan
*d40c
, bool enable
,
698 void __iomem
*addr
= d40c
->base
->virtbase
+ D40_DREG_PCBASE
699 + d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+ reg
;
703 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
704 | ~D40_EVENTLINE_MASK(event
), addr
);
709 * The hardware sometimes doesn't register the enable when src and dst
710 * event lines are active on the same logical channel. Retry to ensure
711 * it does. Usually only one retry is sufficient.
715 writel((D40_ACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
716 | ~D40_EVENTLINE_MASK(event
), addr
);
718 if (readl(addr
) & D40_EVENTLINE_MASK(event
))
723 dev_dbg(chan2dev(d40c
),
724 "[%s] workaround enable S%cLNK (%d tries)\n",
725 __func__
, reg
== D40_CHAN_REG_SSLNK
? 'S' : 'D',
731 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
735 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
737 /* Enable event line connected to device (or memcpy) */
738 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
739 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
740 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
742 __d40_config_set_event(d40c
, do_enable
, event
,
746 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
747 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
749 __d40_config_set_event(d40c
, do_enable
, event
,
753 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
756 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
760 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
761 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
764 val
|= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
765 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
770 static u32
d40_get_prmo(struct d40_chan
*d40c
)
772 static const unsigned int phy_map
[] = {
773 [STEDMA40_PCHAN_BASIC_MODE
]
774 = D40_DREG_PRMO_PCHAN_BASIC
,
775 [STEDMA40_PCHAN_MODULO_MODE
]
776 = D40_DREG_PRMO_PCHAN_MODULO
,
777 [STEDMA40_PCHAN_DOUBLE_DST_MODE
]
778 = D40_DREG_PRMO_PCHAN_DOUBLE_DST
,
780 static const unsigned int log_map
[] = {
781 [STEDMA40_LCHAN_SRC_PHY_DST_LOG
]
782 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG
,
783 [STEDMA40_LCHAN_SRC_LOG_DST_PHY
]
784 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY
,
785 [STEDMA40_LCHAN_SRC_LOG_DST_LOG
]
786 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
789 if (d40c
->log_num
== D40_PHY_CHAN
)
790 return phy_map
[d40c
->dma_cfg
.mode_opt
];
792 return log_map
[d40c
->dma_cfg
.mode_opt
];
795 static void d40_config_write(struct d40_chan
*d40c
)
800 /* Odd addresses are even addresses + 4 */
801 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
802 /* Setup channel mode to logical or physical */
803 var
= ((u32
)(d40c
->log_num
!= D40_PHY_CHAN
) + 1) <<
804 D40_CHAN_POS(d40c
->phy_chan
->num
);
805 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
807 /* Setup operational mode option register */
808 var
= d40_get_prmo(d40c
) << D40_CHAN_POS(d40c
->phy_chan
->num
);
810 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
812 if (d40c
->log_num
!= D40_PHY_CHAN
) {
813 /* Set default config for CFG reg */
814 writel(d40c
->src_def_cfg
,
815 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
816 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
818 writel(d40c
->dst_def_cfg
,
819 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
820 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
823 /* Set LIDX for lcla */
824 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
825 D40_SREG_ELEM_LOG_LIDX_MASK
,
826 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
827 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
830 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
831 D40_SREG_ELEM_LOG_LIDX_MASK
,
832 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
833 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
839 static u32
d40_residue(struct d40_chan
*d40c
)
843 if (d40c
->log_num
!= D40_PHY_CHAN
)
844 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
845 >> D40_MEM_LCSP2_ECNT_POS
;
847 num_elt
= (readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
848 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
849 D40_CHAN_REG_SDELT
) &
850 D40_SREG_ELEM_PHY_ECNT_MASK
) >>
851 D40_SREG_ELEM_PHY_ECNT_POS
;
852 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
855 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
859 if (d40c
->log_num
!= D40_PHY_CHAN
)
860 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
862 is_link
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
863 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
864 D40_CHAN_REG_SDLNK
) &
865 D40_SREG_LNK_PHYS_LNK_MASK
;
869 static int d40_pause(struct dma_chan
*chan
)
871 struct d40_chan
*d40c
=
872 container_of(chan
, struct d40_chan
, chan
);
879 spin_lock_irqsave(&d40c
->lock
, flags
);
881 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
883 if (d40c
->log_num
!= D40_PHY_CHAN
) {
884 d40_config_set_event(d40c
, false);
885 /* Resume the other logical channels if any */
886 if (d40_chan_has_events(d40c
))
887 res
= d40_channel_execute_command(d40c
,
892 spin_unlock_irqrestore(&d40c
->lock
, flags
);
896 static int d40_resume(struct dma_chan
*chan
)
898 struct d40_chan
*d40c
=
899 container_of(chan
, struct d40_chan
, chan
);
906 spin_lock_irqsave(&d40c
->lock
, flags
);
908 if (d40c
->base
->rev
== 0)
909 if (d40c
->log_num
!= D40_PHY_CHAN
) {
910 res
= d40_channel_execute_command(d40c
,
911 D40_DMA_SUSPEND_REQ
);
915 /* If bytes left to transfer or linked tx resume job */
916 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
918 if (d40c
->log_num
!= D40_PHY_CHAN
)
919 d40_config_set_event(d40c
, true);
921 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
925 spin_unlock_irqrestore(&d40c
->lock
, flags
);
929 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
931 struct d40_chan
*d40c
= container_of(tx
->chan
,
934 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
937 spin_lock_irqsave(&d40c
->lock
, flags
);
941 if (d40c
->chan
.cookie
< 0)
942 d40c
->chan
.cookie
= 1;
944 d40d
->txd
.cookie
= d40c
->chan
.cookie
;
946 d40_desc_queue(d40c
, d40d
);
948 spin_unlock_irqrestore(&d40c
->lock
, flags
);
953 static int d40_start(struct d40_chan
*d40c
)
955 if (d40c
->base
->rev
== 0) {
958 if (d40c
->log_num
!= D40_PHY_CHAN
) {
959 err
= d40_channel_execute_command(d40c
,
960 D40_DMA_SUSPEND_REQ
);
966 if (d40c
->log_num
!= D40_PHY_CHAN
)
967 d40_config_set_event(d40c
, true);
969 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
972 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
974 struct d40_desc
*d40d
;
977 /* Start queued jobs, if any */
978 d40d
= d40_first_queued(d40c
);
983 /* Remove from queue */
984 d40_desc_remove(d40d
);
986 /* Add to active queue */
987 d40_desc_submit(d40c
, d40d
);
989 /* Initiate DMA job */
990 d40_desc_load(d40c
, d40d
);
993 err
= d40_start(d40c
);
1002 /* called from interrupt context */
1003 static void dma_tc_handle(struct d40_chan
*d40c
)
1005 struct d40_desc
*d40d
;
1007 /* Get first active entry from list */
1008 d40d
= d40_first_active_get(d40c
);
1013 d40_lcla_free_all(d40c
, d40d
);
1015 if (d40d
->lli_current
< d40d
->lli_len
) {
1016 d40_desc_load(d40c
, d40d
);
1018 (void) d40_start(d40c
);
1022 if (d40_queue_start(d40c
) == NULL
)
1026 tasklet_schedule(&d40c
->tasklet
);
1030 static void dma_tasklet(unsigned long data
)
1032 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
1033 struct d40_desc
*d40d
;
1034 unsigned long flags
;
1035 dma_async_tx_callback callback
;
1036 void *callback_param
;
1038 spin_lock_irqsave(&d40c
->lock
, flags
);
1040 /* Get first active entry from list */
1041 d40d
= d40_first_active_get(d40c
);
1046 d40c
->completed
= d40d
->txd
.cookie
;
1049 * If terminating a channel pending_tx is set to zero.
1050 * This prevents any finished active jobs to return to the client.
1052 if (d40c
->pending_tx
== 0) {
1053 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1057 /* Callback to client */
1058 callback
= d40d
->txd
.callback
;
1059 callback_param
= d40d
->txd
.callback_param
;
1061 if (async_tx_test_ack(&d40d
->txd
)) {
1062 d40_pool_lli_free(d40d
);
1063 d40_desc_remove(d40d
);
1064 d40_desc_free(d40c
, d40d
);
1066 if (!d40d
->is_in_client_list
) {
1067 d40_desc_remove(d40d
);
1068 d40_lcla_free_all(d40c
, d40d
);
1069 list_add_tail(&d40d
->node
, &d40c
->client
);
1070 d40d
->is_in_client_list
= true;
1076 if (d40c
->pending_tx
)
1077 tasklet_schedule(&d40c
->tasklet
);
1079 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1081 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1082 callback(callback_param
);
1087 /* Rescue manouver if receiving double interrupts */
1088 if (d40c
->pending_tx
> 0)
1090 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1093 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1095 static const struct d40_interrupt_lookup il
[] = {
1096 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
1097 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
1098 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
1099 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
1100 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
1101 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
1102 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
1103 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
1104 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
1105 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
1109 u32 regs
[ARRAY_SIZE(il
)];
1113 struct d40_chan
*d40c
;
1114 unsigned long flags
;
1115 struct d40_base
*base
= data
;
1117 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1119 /* Read interrupt status of both logical and physical channels */
1120 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
1121 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1125 chan
= find_next_bit((unsigned long *)regs
,
1126 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
1128 /* No more set bits found? */
1129 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
1132 row
= chan
/ BITS_PER_LONG
;
1133 idx
= chan
& (BITS_PER_LONG
- 1);
1136 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1138 if (il
[row
].offset
== D40_PHY_CHAN
)
1139 d40c
= base
->lookup_phy_chans
[idx
];
1141 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1142 spin_lock(&d40c
->lock
);
1144 if (!il
[row
].is_error
)
1145 dma_tc_handle(d40c
);
1148 "[%s] IRQ chan: %ld offset %d idx %d\n",
1149 __func__
, chan
, il
[row
].offset
, idx
);
1151 spin_unlock(&d40c
->lock
);
1154 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1159 static int d40_validate_conf(struct d40_chan
*d40c
,
1160 struct stedma40_chan_cfg
*conf
)
1163 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
1164 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
1165 bool is_log
= conf
->mode
== STEDMA40_MODE_LOGICAL
;
1168 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid direction.\n",
1173 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
1174 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
1175 d40c
->runtime_addr
== 0) {
1177 dev_err(&d40c
->chan
.dev
->device
,
1178 "[%s] Invalid TX channel address (%d)\n",
1179 __func__
, conf
->dst_dev_type
);
1183 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
1184 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
1185 d40c
->runtime_addr
== 0) {
1186 dev_err(&d40c
->chan
.dev
->device
,
1187 "[%s] Invalid RX channel address (%d)\n",
1188 __func__
, conf
->src_dev_type
);
1192 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1193 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1194 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid dst\n",
1199 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1200 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1201 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid src\n",
1206 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1207 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1208 dev_err(&d40c
->chan
.dev
->device
,
1209 "[%s] No event line\n", __func__
);
1213 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1214 (src_event_group
!= dst_event_group
)) {
1215 dev_err(&d40c
->chan
.dev
->device
,
1216 "[%s] Invalid event group\n", __func__
);
1220 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1222 * DMAC HW supports it. Will be added to this driver,
1223 * in case any dma client requires it.
1225 dev_err(&d40c
->chan
.dev
->device
,
1226 "[%s] periph to periph not supported\n",
1231 if (d40_psize_2_burst_size(is_log
, conf
->src_info
.psize
) *
1232 (1 << conf
->src_info
.data_width
) !=
1233 d40_psize_2_burst_size(is_log
, conf
->dst_info
.psize
) *
1234 (1 << conf
->dst_info
.data_width
)) {
1236 * The DMAC hardware only supports
1237 * src (burst x width) == dst (burst x width)
1240 dev_err(&d40c
->chan
.dev
->device
,
1241 "[%s] src (burst x width) != dst (burst x width)\n",
1249 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1250 int log_event_line
, bool is_log
)
1252 unsigned long flags
;
1253 spin_lock_irqsave(&phy
->lock
, flags
);
1255 /* Physical interrupts are masked per physical full channel */
1256 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1257 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1258 phy
->allocated_dst
= D40_ALLOC_PHY
;
1259 phy
->allocated_src
= D40_ALLOC_PHY
;
1265 /* Logical channel */
1267 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1270 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1271 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1273 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1274 phy
->allocated_src
|= 1 << log_event_line
;
1279 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1282 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1283 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1285 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1286 phy
->allocated_dst
|= 1 << log_event_line
;
1293 spin_unlock_irqrestore(&phy
->lock
, flags
);
1296 spin_unlock_irqrestore(&phy
->lock
, flags
);
1300 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1303 unsigned long flags
;
1304 bool is_free
= false;
1306 spin_lock_irqsave(&phy
->lock
, flags
);
1307 if (!log_event_line
) {
1308 phy
->allocated_dst
= D40_ALLOC_FREE
;
1309 phy
->allocated_src
= D40_ALLOC_FREE
;
1314 /* Logical channel */
1316 phy
->allocated_src
&= ~(1 << log_event_line
);
1317 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1318 phy
->allocated_src
= D40_ALLOC_FREE
;
1320 phy
->allocated_dst
&= ~(1 << log_event_line
);
1321 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1322 phy
->allocated_dst
= D40_ALLOC_FREE
;
1325 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1329 spin_unlock_irqrestore(&phy
->lock
, flags
);
1334 static int d40_allocate_channel(struct d40_chan
*d40c
)
1339 struct d40_phy_res
*phys
;
1344 bool is_log
= d40c
->dma_cfg
.mode
== STEDMA40_MODE_LOGICAL
;
1346 phys
= d40c
->base
->phy_res
;
1348 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1349 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1350 log_num
= 2 * dev_type
;
1352 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1353 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1354 /* dst event lines are used for logical memcpy */
1355 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1356 log_num
= 2 * dev_type
+ 1;
1361 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1362 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1365 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1366 /* Find physical half channel */
1367 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1369 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1374 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1375 int phy_num
= j
+ event_group
* 2;
1376 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1377 if (d40_alloc_mask_set(&phys
[i
],
1386 d40c
->phy_chan
= &phys
[i
];
1387 d40c
->log_num
= D40_PHY_CHAN
;
1393 /* Find logical channel */
1394 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1395 int phy_num
= j
+ event_group
* 2;
1397 * Spread logical channels across all available physical rather
1398 * than pack every logical channel at the first available phy
1402 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1403 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1404 event_line
, is_log
))
1408 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1409 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1410 event_line
, is_log
))
1418 d40c
->phy_chan
= &phys
[i
];
1419 d40c
->log_num
= log_num
;
1423 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1425 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1431 static int d40_config_memcpy(struct d40_chan
*d40c
)
1433 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1435 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1436 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1437 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1438 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1439 memcpy
[d40c
->chan
.chan_id
];
1441 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1442 dma_has_cap(DMA_SLAVE
, cap
)) {
1443 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1445 dev_err(&d40c
->chan
.dev
->device
, "[%s] No memcpy\n",
1454 static int d40_free_dma(struct d40_chan
*d40c
)
1459 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1462 struct d40_desc
*_d
;
1465 /* Terminate all queued and active transfers */
1468 /* Release client owned descriptors */
1469 if (!list_empty(&d40c
->client
))
1470 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1471 d40_pool_lli_free(d
);
1473 d40_desc_free(d40c
, d
);
1477 dev_err(&d40c
->chan
.dev
->device
, "[%s] phy == null\n",
1482 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1483 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1484 dev_err(&d40c
->chan
.dev
->device
, "[%s] channel already free\n",
1489 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1490 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1491 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1493 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1494 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1497 dev_err(&d40c
->chan
.dev
->device
,
1498 "[%s] Unknown direction\n", __func__
);
1502 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1504 dev_err(&d40c
->chan
.dev
->device
, "[%s] suspend failed\n",
1509 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1510 /* Release logical channel, deactivate the event line */
1512 d40_config_set_event(d40c
, false);
1513 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1516 * Check if there are more logical allocation
1517 * on this phy channel.
1519 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1520 /* Resume the other logical channels if any */
1521 if (d40_chan_has_events(d40c
)) {
1522 res
= d40_channel_execute_command(d40c
,
1525 dev_err(&d40c
->chan
.dev
->device
,
1526 "[%s] Executing RUN command\n",
1534 (void) d40_alloc_mask_free(phy
, is_src
, 0);
1537 /* Release physical channel */
1538 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1540 dev_err(&d40c
->chan
.dev
->device
,
1541 "[%s] Failed to stop channel\n", __func__
);
1544 d40c
->phy_chan
= NULL
;
1545 d40c
->configured
= false;
1546 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1551 static bool d40_is_paused(struct d40_chan
*d40c
)
1553 bool is_paused
= false;
1554 unsigned long flags
;
1555 void __iomem
*active_reg
;
1559 spin_lock_irqsave(&d40c
->lock
, flags
);
1561 if (d40c
->log_num
== D40_PHY_CHAN
) {
1562 if (d40c
->phy_chan
->num
% 2 == 0)
1563 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1565 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1567 status
= (readl(active_reg
) &
1568 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1569 D40_CHAN_POS(d40c
->phy_chan
->num
);
1570 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1576 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1577 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1578 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1579 status
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1580 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1581 D40_CHAN_REG_SDLNK
);
1582 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1583 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1584 status
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1585 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1586 D40_CHAN_REG_SSLNK
);
1588 dev_err(&d40c
->chan
.dev
->device
,
1589 "[%s] Unknown direction\n", __func__
);
1593 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1594 D40_EVENTLINE_POS(event
);
1596 if (status
!= D40_DMA_RUN
)
1599 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1605 static u32
stedma40_residue(struct dma_chan
*chan
)
1607 struct d40_chan
*d40c
=
1608 container_of(chan
, struct d40_chan
, chan
);
1610 unsigned long flags
;
1612 spin_lock_irqsave(&d40c
->lock
, flags
);
1613 bytes_left
= d40_residue(d40c
);
1614 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1619 struct dma_async_tx_descriptor
*stedma40_memcpy_sg(struct dma_chan
*chan
,
1620 struct scatterlist
*sgl_dst
,
1621 struct scatterlist
*sgl_src
,
1622 unsigned int sgl_len
,
1623 unsigned long dma_flags
)
1626 struct d40_desc
*d40d
;
1627 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1629 unsigned long flags
;
1631 if (d40c
->phy_chan
== NULL
) {
1632 dev_err(&d40c
->chan
.dev
->device
,
1633 "[%s] Unallocated channel.\n", __func__
);
1634 return ERR_PTR(-EINVAL
);
1637 spin_lock_irqsave(&d40c
->lock
, flags
);
1638 d40d
= d40_desc_get(d40c
);
1643 d40d
->lli_len
= d40_sg_2_dmalen(sgl_dst
, sgl_len
,
1644 d40c
->dma_cfg
.src_info
.data_width
,
1645 d40c
->dma_cfg
.dst_info
.data_width
);
1646 if (d40d
->lli_len
< 0) {
1647 dev_err(&d40c
->chan
.dev
->device
,
1648 "[%s] Unaligned size\n", __func__
);
1652 d40d
->lli_current
= 0;
1653 d40d
->txd
.flags
= dma_flags
;
1655 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1657 if (d40_pool_lli_alloc(d40d
, d40d
->lli_len
, true) < 0) {
1658 dev_err(&d40c
->chan
.dev
->device
,
1659 "[%s] Out of memory\n", __func__
);
1663 (void) d40_log_sg_to_lli(sgl_src
,
1666 d40c
->log_def
.lcsp1
,
1667 d40c
->dma_cfg
.src_info
.data_width
,
1668 d40c
->dma_cfg
.dst_info
.data_width
);
1670 (void) d40_log_sg_to_lli(sgl_dst
,
1673 d40c
->log_def
.lcsp3
,
1674 d40c
->dma_cfg
.dst_info
.data_width
,
1675 d40c
->dma_cfg
.src_info
.data_width
);
1677 if (d40_pool_lli_alloc(d40d
, d40d
->lli_len
, false) < 0) {
1678 dev_err(&d40c
->chan
.dev
->device
,
1679 "[%s] Out of memory\n", __func__
);
1683 res
= d40_phy_sg_to_lli(sgl_src
,
1687 virt_to_phys(d40d
->lli_phy
.src
),
1689 d40c
->dma_cfg
.src_info
.data_width
,
1690 d40c
->dma_cfg
.dst_info
.data_width
,
1691 d40c
->dma_cfg
.src_info
.psize
);
1696 res
= d40_phy_sg_to_lli(sgl_dst
,
1700 virt_to_phys(d40d
->lli_phy
.dst
),
1702 d40c
->dma_cfg
.dst_info
.data_width
,
1703 d40c
->dma_cfg
.src_info
.data_width
,
1704 d40c
->dma_cfg
.dst_info
.psize
);
1709 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1710 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1713 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1715 d40d
->txd
.tx_submit
= d40_tx_submit
;
1717 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1722 d40_desc_free(d40c
, d40d
);
1723 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1726 EXPORT_SYMBOL(stedma40_memcpy_sg
);
1728 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1730 struct stedma40_chan_cfg
*info
= data
;
1731 struct d40_chan
*d40c
=
1732 container_of(chan
, struct d40_chan
, chan
);
1736 err
= d40_validate_conf(d40c
, info
);
1738 d40c
->dma_cfg
= *info
;
1740 err
= d40_config_memcpy(d40c
);
1743 d40c
->configured
= true;
1747 EXPORT_SYMBOL(stedma40_filter
);
1749 /* DMA ENGINE functions */
1750 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1753 unsigned long flags
;
1754 struct d40_chan
*d40c
=
1755 container_of(chan
, struct d40_chan
, chan
);
1757 spin_lock_irqsave(&d40c
->lock
, flags
);
1759 d40c
->completed
= chan
->cookie
= 1;
1761 /* If no dma configuration is set use default configuration (memcpy) */
1762 if (!d40c
->configured
) {
1763 err
= d40_config_memcpy(d40c
);
1765 dev_err(&d40c
->chan
.dev
->device
,
1766 "[%s] Failed to configure memcpy channel\n",
1771 is_free_phy
= (d40c
->phy_chan
== NULL
);
1773 err
= d40_allocate_channel(d40c
);
1775 dev_err(&d40c
->chan
.dev
->device
,
1776 "[%s] Failed to allocate channel\n", __func__
);
1780 /* Fill in basic CFG register values */
1781 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1782 &d40c
->dst_def_cfg
, d40c
->log_num
!= D40_PHY_CHAN
);
1784 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1785 d40_log_cfg(&d40c
->dma_cfg
,
1786 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1788 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1789 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1790 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
1792 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1793 d40c
->dma_cfg
.dst_dev_type
*
1794 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
1798 * Only write channel configuration to the DMA if the physical
1799 * resource is free. In case of multiple logical channels
1800 * on the same physical resource, only the first write is necessary.
1803 d40_config_write(d40c
);
1805 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1809 static void d40_free_chan_resources(struct dma_chan
*chan
)
1811 struct d40_chan
*d40c
=
1812 container_of(chan
, struct d40_chan
, chan
);
1814 unsigned long flags
;
1816 if (d40c
->phy_chan
== NULL
) {
1817 dev_err(&d40c
->chan
.dev
->device
,
1818 "[%s] Cannot free unallocated channel\n", __func__
);
1823 spin_lock_irqsave(&d40c
->lock
, flags
);
1825 err
= d40_free_dma(d40c
);
1828 dev_err(&d40c
->chan
.dev
->device
,
1829 "[%s] Failed to free channel\n", __func__
);
1830 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1833 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1837 unsigned long dma_flags
)
1839 struct d40_desc
*d40d
;
1840 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1842 unsigned long flags
;
1844 if (d40c
->phy_chan
== NULL
) {
1845 dev_err(&d40c
->chan
.dev
->device
,
1846 "[%s] Channel is not allocated.\n", __func__
);
1847 return ERR_PTR(-EINVAL
);
1850 spin_lock_irqsave(&d40c
->lock
, flags
);
1851 d40d
= d40_desc_get(d40c
);
1854 dev_err(&d40c
->chan
.dev
->device
,
1855 "[%s] Descriptor is NULL\n", __func__
);
1859 d40d
->txd
.flags
= dma_flags
;
1860 d40d
->lli_len
= d40_size_2_dmalen(size
,
1861 d40c
->dma_cfg
.src_info
.data_width
,
1862 d40c
->dma_cfg
.dst_info
.data_width
);
1863 if (d40d
->lli_len
< 0) {
1864 dev_err(&d40c
->chan
.dev
->device
,
1865 "[%s] Unaligned size\n", __func__
);
1870 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1872 d40d
->txd
.tx_submit
= d40_tx_submit
;
1874 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1876 if (d40_pool_lli_alloc(d40d
, d40d
->lli_len
, true) < 0) {
1877 dev_err(&d40c
->chan
.dev
->device
,
1878 "[%s] Out of memory\n", __func__
);
1881 d40d
->lli_current
= 0;
1883 if (d40_log_buf_to_lli(d40d
->lli_log
.src
,
1886 d40c
->log_def
.lcsp1
,
1887 d40c
->dma_cfg
.src_info
.data_width
,
1888 d40c
->dma_cfg
.dst_info
.data_width
,
1892 if (d40_log_buf_to_lli(d40d
->lli_log
.dst
,
1895 d40c
->log_def
.lcsp3
,
1896 d40c
->dma_cfg
.dst_info
.data_width
,
1897 d40c
->dma_cfg
.src_info
.data_width
,
1903 if (d40_pool_lli_alloc(d40d
, d40d
->lli_len
, false) < 0) {
1904 dev_err(&d40c
->chan
.dev
->device
,
1905 "[%s] Out of memory\n", __func__
);
1909 if (d40_phy_buf_to_lli(d40d
->lli_phy
.src
,
1912 d40c
->dma_cfg
.src_info
.psize
,
1916 d40c
->dma_cfg
.src_info
.data_width
,
1917 d40c
->dma_cfg
.dst_info
.data_width
,
1921 if (d40_phy_buf_to_lli(d40d
->lli_phy
.dst
,
1924 d40c
->dma_cfg
.dst_info
.psize
,
1928 d40c
->dma_cfg
.dst_info
.data_width
,
1929 d40c
->dma_cfg
.src_info
.data_width
,
1933 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1934 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1937 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1942 d40_desc_free(d40c
, d40d
);
1943 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1947 static struct dma_async_tx_descriptor
*
1948 d40_prep_sg(struct dma_chan
*chan
,
1949 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
1950 struct scatterlist
*src_sg
, unsigned int src_nents
,
1951 unsigned long dma_flags
)
1953 if (dst_nents
!= src_nents
)
1956 return stedma40_memcpy_sg(chan
, dst_sg
, src_sg
, dst_nents
, dma_flags
);
1959 static int d40_prep_slave_sg_log(struct d40_desc
*d40d
,
1960 struct d40_chan
*d40c
,
1961 struct scatterlist
*sgl
,
1962 unsigned int sg_len
,
1963 enum dma_data_direction direction
,
1964 unsigned long dma_flags
)
1966 dma_addr_t dev_addr
= 0;
1969 d40d
->lli_len
= d40_sg_2_dmalen(sgl
, sg_len
,
1970 d40c
->dma_cfg
.src_info
.data_width
,
1971 d40c
->dma_cfg
.dst_info
.data_width
);
1972 if (d40d
->lli_len
< 0) {
1973 dev_err(&d40c
->chan
.dev
->device
,
1974 "[%s] Unaligned size\n", __func__
);
1978 if (d40_pool_lli_alloc(d40d
, d40d
->lli_len
, true) < 0) {
1979 dev_err(&d40c
->chan
.dev
->device
,
1980 "[%s] Out of memory\n", __func__
);
1984 d40d
->lli_current
= 0;
1986 if (direction
== DMA_FROM_DEVICE
)
1987 if (d40c
->runtime_addr
)
1988 dev_addr
= d40c
->runtime_addr
;
1990 dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1991 else if (direction
== DMA_TO_DEVICE
)
1992 if (d40c
->runtime_addr
)
1993 dev_addr
= d40c
->runtime_addr
;
1995 dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
2000 total_size
= d40_log_sg_to_dev(sgl
, sg_len
,
2003 d40c
->dma_cfg
.src_info
.data_width
,
2004 d40c
->dma_cfg
.dst_info
.data_width
,
2014 static int d40_prep_slave_sg_phy(struct d40_desc
*d40d
,
2015 struct d40_chan
*d40c
,
2016 struct scatterlist
*sgl
,
2017 unsigned int sgl_len
,
2018 enum dma_data_direction direction
,
2019 unsigned long dma_flags
)
2021 dma_addr_t src_dev_addr
;
2022 dma_addr_t dst_dev_addr
;
2025 d40d
->lli_len
= d40_sg_2_dmalen(sgl
, sgl_len
,
2026 d40c
->dma_cfg
.src_info
.data_width
,
2027 d40c
->dma_cfg
.dst_info
.data_width
);
2028 if (d40d
->lli_len
< 0) {
2029 dev_err(&d40c
->chan
.dev
->device
,
2030 "[%s] Unaligned size\n", __func__
);
2034 if (d40_pool_lli_alloc(d40d
, d40d
->lli_len
, false) < 0) {
2035 dev_err(&d40c
->chan
.dev
->device
,
2036 "[%s] Out of memory\n", __func__
);
2040 d40d
->lli_current
= 0;
2042 if (direction
== DMA_FROM_DEVICE
) {
2044 if (d40c
->runtime_addr
)
2045 src_dev_addr
= d40c
->runtime_addr
;
2047 src_dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
2048 } else if (direction
== DMA_TO_DEVICE
) {
2049 if (d40c
->runtime_addr
)
2050 dst_dev_addr
= d40c
->runtime_addr
;
2052 dst_dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
2057 res
= d40_phy_sg_to_lli(sgl
,
2061 virt_to_phys(d40d
->lli_phy
.src
),
2063 d40c
->dma_cfg
.src_info
.data_width
,
2064 d40c
->dma_cfg
.dst_info
.data_width
,
2065 d40c
->dma_cfg
.src_info
.psize
);
2069 res
= d40_phy_sg_to_lli(sgl
,
2073 virt_to_phys(d40d
->lli_phy
.dst
),
2075 d40c
->dma_cfg
.dst_info
.data_width
,
2076 d40c
->dma_cfg
.src_info
.data_width
,
2077 d40c
->dma_cfg
.dst_info
.psize
);
2081 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
2082 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
2086 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
2087 struct scatterlist
*sgl
,
2088 unsigned int sg_len
,
2089 enum dma_data_direction direction
,
2090 unsigned long dma_flags
)
2092 struct d40_desc
*d40d
;
2093 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
2095 unsigned long flags
;
2098 if (d40c
->phy_chan
== NULL
) {
2099 dev_err(&d40c
->chan
.dev
->device
,
2100 "[%s] Cannot prepare unallocated channel\n", __func__
);
2101 return ERR_PTR(-EINVAL
);
2104 spin_lock_irqsave(&d40c
->lock
, flags
);
2105 d40d
= d40_desc_get(d40c
);
2110 if (d40c
->log_num
!= D40_PHY_CHAN
)
2111 err
= d40_prep_slave_sg_log(d40d
, d40c
, sgl
, sg_len
,
2112 direction
, dma_flags
);
2114 err
= d40_prep_slave_sg_phy(d40d
, d40c
, sgl
, sg_len
,
2115 direction
, dma_flags
);
2117 dev_err(&d40c
->chan
.dev
->device
,
2118 "[%s] Failed to prepare %s slave sg job: %d\n",
2120 d40c
->log_num
!= D40_PHY_CHAN
? "log" : "phy", err
);
2124 d40d
->txd
.flags
= dma_flags
;
2126 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
2128 d40d
->txd
.tx_submit
= d40_tx_submit
;
2130 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2135 d40_desc_free(d40c
, d40d
);
2136 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2140 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2141 dma_cookie_t cookie
,
2142 struct dma_tx_state
*txstate
)
2144 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2145 dma_cookie_t last_used
;
2146 dma_cookie_t last_complete
;
2149 if (d40c
->phy_chan
== NULL
) {
2150 dev_err(&d40c
->chan
.dev
->device
,
2151 "[%s] Cannot read status of unallocated channel\n",
2156 last_complete
= d40c
->completed
;
2157 last_used
= chan
->cookie
;
2159 if (d40_is_paused(d40c
))
2162 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2164 dma_set_tx_state(txstate
, last_complete
, last_used
,
2165 stedma40_residue(chan
));
2170 static void d40_issue_pending(struct dma_chan
*chan
)
2172 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2173 unsigned long flags
;
2175 if (d40c
->phy_chan
== NULL
) {
2176 dev_err(&d40c
->chan
.dev
->device
,
2177 "[%s] Channel is not allocated!\n", __func__
);
2181 spin_lock_irqsave(&d40c
->lock
, flags
);
2183 /* Busy means that pending jobs are already being processed */
2185 (void) d40_queue_start(d40c
);
2187 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2190 /* Runtime reconfiguration extension */
2191 static void d40_set_runtime_config(struct dma_chan
*chan
,
2192 struct dma_slave_config
*config
)
2194 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2195 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2196 enum dma_slave_buswidth config_addr_width
;
2197 dma_addr_t config_addr
;
2198 u32 config_maxburst
;
2199 enum stedma40_periph_data_width addr_width
;
2202 if (config
->direction
== DMA_FROM_DEVICE
) {
2203 dma_addr_t dev_addr_rx
=
2204 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2206 config_addr
= config
->src_addr
;
2208 dev_dbg(d40c
->base
->dev
,
2209 "channel has a pre-wired RX address %08x "
2210 "overriding with %08x\n",
2211 dev_addr_rx
, config_addr
);
2212 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2213 dev_dbg(d40c
->base
->dev
,
2214 "channel was not configured for peripheral "
2215 "to memory transfer (%d) overriding\n",
2217 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2219 config_addr_width
= config
->src_addr_width
;
2220 config_maxburst
= config
->src_maxburst
;
2222 } else if (config
->direction
== DMA_TO_DEVICE
) {
2223 dma_addr_t dev_addr_tx
=
2224 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2226 config_addr
= config
->dst_addr
;
2228 dev_dbg(d40c
->base
->dev
,
2229 "channel has a pre-wired TX address %08x "
2230 "overriding with %08x\n",
2231 dev_addr_tx
, config_addr
);
2232 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2233 dev_dbg(d40c
->base
->dev
,
2234 "channel was not configured for memory "
2235 "to peripheral transfer (%d) overriding\n",
2237 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2239 config_addr_width
= config
->dst_addr_width
;
2240 config_maxburst
= config
->dst_maxburst
;
2243 dev_err(d40c
->base
->dev
,
2244 "unrecognized channel direction %d\n",
2249 switch (config_addr_width
) {
2250 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2251 addr_width
= STEDMA40_BYTE_WIDTH
;
2253 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2254 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2256 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2257 addr_width
= STEDMA40_WORD_WIDTH
;
2259 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2260 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2263 dev_err(d40c
->base
->dev
,
2264 "illegal peripheral address width "
2266 config
->src_addr_width
);
2270 if (d40c
->log_num
!= D40_PHY_CHAN
) {
2271 if (config_maxburst
>= 16)
2272 psize
= STEDMA40_PSIZE_LOG_16
;
2273 else if (config_maxburst
>= 8)
2274 psize
= STEDMA40_PSIZE_LOG_8
;
2275 else if (config_maxburst
>= 4)
2276 psize
= STEDMA40_PSIZE_LOG_4
;
2278 psize
= STEDMA40_PSIZE_LOG_1
;
2280 if (config_maxburst
>= 16)
2281 psize
= STEDMA40_PSIZE_PHY_16
;
2282 else if (config_maxburst
>= 8)
2283 psize
= STEDMA40_PSIZE_PHY_8
;
2284 else if (config_maxburst
>= 4)
2285 psize
= STEDMA40_PSIZE_PHY_4
;
2286 else if (config_maxburst
>= 2)
2287 psize
= STEDMA40_PSIZE_PHY_2
;
2289 psize
= STEDMA40_PSIZE_PHY_1
;
2292 /* Set up all the endpoint configs */
2293 cfg
->src_info
.data_width
= addr_width
;
2294 cfg
->src_info
.psize
= psize
;
2295 cfg
->src_info
.big_endian
= false;
2296 cfg
->src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2297 cfg
->dst_info
.data_width
= addr_width
;
2298 cfg
->dst_info
.psize
= psize
;
2299 cfg
->dst_info
.big_endian
= false;
2300 cfg
->dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2302 /* Fill in register values */
2303 if (d40c
->log_num
!= D40_PHY_CHAN
)
2304 d40_log_cfg(cfg
, &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2306 d40_phy_cfg(cfg
, &d40c
->src_def_cfg
,
2307 &d40c
->dst_def_cfg
, false);
2309 /* These settings will take precedence later */
2310 d40c
->runtime_addr
= config_addr
;
2311 d40c
->runtime_direction
= config
->direction
;
2312 dev_dbg(d40c
->base
->dev
,
2313 "configured channel %s for %s, data width %d, "
2314 "maxburst %d bytes, LE, no flow control\n",
2315 dma_chan_name(chan
),
2316 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
2321 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2324 unsigned long flags
;
2325 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2327 if (d40c
->phy_chan
== NULL
) {
2328 dev_err(&d40c
->chan
.dev
->device
,
2329 "[%s] Channel is not allocated!\n", __func__
);
2334 case DMA_TERMINATE_ALL
:
2335 spin_lock_irqsave(&d40c
->lock
, flags
);
2337 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2340 return d40_pause(chan
);
2342 return d40_resume(chan
);
2343 case DMA_SLAVE_CONFIG
:
2344 d40_set_runtime_config(chan
,
2345 (struct dma_slave_config
*) arg
);
2351 /* Other commands are unimplemented */
2355 /* Initialization functions */
2357 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2358 struct d40_chan
*chans
, int offset
,
2362 struct d40_chan
*d40c
;
2364 INIT_LIST_HEAD(&dma
->channels
);
2366 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2369 d40c
->chan
.device
= dma
;
2371 spin_lock_init(&d40c
->lock
);
2373 d40c
->log_num
= D40_PHY_CHAN
;
2375 INIT_LIST_HEAD(&d40c
->active
);
2376 INIT_LIST_HEAD(&d40c
->queue
);
2377 INIT_LIST_HEAD(&d40c
->client
);
2379 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2380 (unsigned long) d40c
);
2382 list_add_tail(&d40c
->chan
.device_node
,
2387 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2388 int num_reserved_chans
)
2392 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2393 0, base
->num_log_chans
);
2395 dma_cap_zero(base
->dma_slave
.cap_mask
);
2396 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2398 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2399 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2400 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2401 base
->dma_slave
.device_prep_dma_sg
= d40_prep_sg
;
2402 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2403 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2404 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2405 base
->dma_slave
.device_control
= d40_control
;
2406 base
->dma_slave
.dev
= base
->dev
;
2408 err
= dma_async_device_register(&base
->dma_slave
);
2412 "[%s] Failed to register slave channels\n",
2417 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2418 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2420 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2421 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2422 dma_cap_set(DMA_SG
, base
->dma_slave
.cap_mask
);
2424 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2425 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2426 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2427 base
->dma_slave
.device_prep_dma_sg
= d40_prep_sg
;
2428 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2429 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2430 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2431 base
->dma_memcpy
.device_control
= d40_control
;
2432 base
->dma_memcpy
.dev
= base
->dev
;
2434 * This controller can only access address at even
2435 * 32bit boundaries, i.e. 2^2
2437 base
->dma_memcpy
.copy_align
= 2;
2439 err
= dma_async_device_register(&base
->dma_memcpy
);
2443 "[%s] Failed to regsiter memcpy only channels\n",
2448 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2449 0, num_reserved_chans
);
2451 dma_cap_zero(base
->dma_both
.cap_mask
);
2452 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2453 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2454 dma_cap_set(DMA_SG
, base
->dma_slave
.cap_mask
);
2456 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2457 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2458 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2459 base
->dma_slave
.device_prep_dma_sg
= d40_prep_sg
;
2460 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2461 base
->dma_both
.device_tx_status
= d40_tx_status
;
2462 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2463 base
->dma_both
.device_control
= d40_control
;
2464 base
->dma_both
.dev
= base
->dev
;
2465 base
->dma_both
.copy_align
= 2;
2466 err
= dma_async_device_register(&base
->dma_both
);
2470 "[%s] Failed to register logical and physical capable channels\n",
2476 dma_async_device_unregister(&base
->dma_memcpy
);
2478 dma_async_device_unregister(&base
->dma_slave
);
2483 /* Initialization functions. */
2485 static int __init
d40_phy_res_init(struct d40_base
*base
)
2488 int num_phy_chans_avail
= 0;
2490 int odd_even_bit
= -2;
2492 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2493 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2495 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2496 base
->phy_res
[i
].num
= i
;
2497 odd_even_bit
+= 2 * ((i
% 2) == 0);
2498 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2499 /* Mark security only channels as occupied */
2500 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2501 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2503 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2504 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2505 num_phy_chans_avail
++;
2507 spin_lock_init(&base
->phy_res
[i
].lock
);
2510 /* Mark disabled channels as occupied */
2511 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2512 int chan
= base
->plat_data
->disabled_channels
[i
];
2514 base
->phy_res
[chan
].allocated_src
= D40_ALLOC_PHY
;
2515 base
->phy_res
[chan
].allocated_dst
= D40_ALLOC_PHY
;
2516 num_phy_chans_avail
--;
2519 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2520 num_phy_chans_avail
, base
->num_phy_chans
);
2522 /* Verify settings extended vs standard */
2523 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2525 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2527 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2528 (val
[0] & 0x3) != 1)
2530 "[%s] INFO: channel %d is misconfigured (%d)\n",
2531 __func__
, i
, val
[0] & 0x3);
2533 val
[0] = val
[0] >> 2;
2536 return num_phy_chans_avail
;
2539 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2541 static const struct d40_reg_val dma_id_regs
[] = {
2543 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2544 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2546 * D40_DREG_PERIPHID2 Depends on HW revision:
2547 * MOP500/HREF ED has 0x0008,
2549 * HREF V1 has 0x0028
2551 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2554 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2555 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2556 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2557 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2559 struct stedma40_platform_data
*plat_data
;
2560 struct clk
*clk
= NULL
;
2561 void __iomem
*virtbase
= NULL
;
2562 struct resource
*res
= NULL
;
2563 struct d40_base
*base
= NULL
;
2564 int num_log_chans
= 0;
2570 clk
= clk_get(&pdev
->dev
, NULL
);
2573 dev_err(&pdev
->dev
, "[%s] No matching clock found\n",
2580 /* Get IO for DMAC base address */
2581 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2585 if (request_mem_region(res
->start
, resource_size(res
),
2586 D40_NAME
" I/O base") == NULL
)
2589 virtbase
= ioremap(res
->start
, resource_size(res
));
2593 /* HW version check */
2594 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2595 if (dma_id_regs
[i
].val
!=
2596 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2598 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2602 readl(virtbase
+ dma_id_regs
[i
].reg
));
2607 /* Get silicon revision and designer */
2608 val
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2610 if ((val
& D40_DREG_PERIPHID2_DESIGNER_MASK
) !=
2613 "[%s] Unknown designer! Got %x wanted %x\n",
2614 __func__
, val
& D40_DREG_PERIPHID2_DESIGNER_MASK
,
2619 rev
= (val
& D40_DREG_PERIPHID2_REV_MASK
) >>
2620 D40_DREG_PERIPHID2_REV_POS
;
2622 /* The number of physical channels on this HW */
2623 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2625 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2628 plat_data
= pdev
->dev
.platform_data
;
2630 /* Count the number of logical channels in use */
2631 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2632 if (plat_data
->dev_rx
[i
] != 0)
2635 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2636 if (plat_data
->dev_tx
[i
] != 0)
2639 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2640 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2641 sizeof(struct d40_chan
), GFP_KERNEL
);
2644 dev_err(&pdev
->dev
, "[%s] Out of memory\n", __func__
);
2650 base
->num_phy_chans
= num_phy_chans
;
2651 base
->num_log_chans
= num_log_chans
;
2652 base
->phy_start
= res
->start
;
2653 base
->phy_size
= resource_size(res
);
2654 base
->virtbase
= virtbase
;
2655 base
->plat_data
= plat_data
;
2656 base
->dev
= &pdev
->dev
;
2657 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2658 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2660 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2665 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2666 sizeof(struct d40_chan
*),
2668 if (!base
->lookup_phy_chans
)
2671 if (num_log_chans
+ plat_data
->memcpy_len
) {
2673 * The max number of logical channels are event lines for all
2674 * src devices and dst devices
2676 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2677 sizeof(struct d40_chan
*),
2679 if (!base
->lookup_log_chans
)
2683 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
*
2684 sizeof(struct d40_desc
*) *
2685 D40_LCLA_LINK_PER_EVENT_GRP
,
2687 if (!base
->lcla_pool
.alloc_map
)
2690 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2691 0, SLAB_HWCACHE_ALIGN
,
2693 if (base
->desc_slab
== NULL
)
2706 release_mem_region(res
->start
,
2707 resource_size(res
));
2712 kfree(base
->lcla_pool
.alloc_map
);
2713 kfree(base
->lookup_log_chans
);
2714 kfree(base
->lookup_phy_chans
);
2715 kfree(base
->phy_res
);
2722 static void __init
d40_hw_init(struct d40_base
*base
)
2725 static const struct d40_reg_val dma_init_reg
[] = {
2726 /* Clock every part of the DMA block from start */
2727 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2729 /* Interrupts on all logical channels */
2730 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2731 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2732 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2733 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2734 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2735 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2736 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2737 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2738 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2739 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2740 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2741 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2744 u32 prmseo
[2] = {0, 0};
2745 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2749 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2750 writel(dma_init_reg
[i
].val
,
2751 base
->virtbase
+ dma_init_reg
[i
].reg
);
2753 /* Configure all our dma channels to default settings */
2754 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2756 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2758 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2760 activeo
[i
% 2] |= 3;
2764 /* Enable interrupt # */
2765 pcmis
= (pcmis
<< 1) | 1;
2767 /* Clear interrupt # */
2768 pcicr
= (pcicr
<< 1) | 1;
2770 /* Set channel to physical mode */
2771 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2776 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2777 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2778 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2779 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2781 /* Write which interrupt to enable */
2782 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2784 /* Write which interrupt to clear */
2785 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2789 static int __init
d40_lcla_allocate(struct d40_base
*base
)
2791 unsigned long *page_list
;
2796 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2797 * To full fill this hardware requirement without wasting 256 kb
2798 * we allocate pages until we get an aligned one.
2800 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
2808 /* Calculating how many pages that are required */
2809 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
2811 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
2812 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
2813 base
->lcla_pool
.pages
);
2814 if (!page_list
[i
]) {
2817 "[%s] Failed to allocate %d pages.\n",
2818 __func__
, base
->lcla_pool
.pages
);
2820 for (j
= 0; j
< i
; j
++)
2821 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2825 if ((virt_to_phys((void *)page_list
[i
]) &
2826 (LCLA_ALIGNMENT
- 1)) == 0)
2830 for (j
= 0; j
< i
; j
++)
2831 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2833 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
2834 base
->lcla_pool
.base
= (void *)page_list
[i
];
2837 * After many attempts and no succees with finding the correct
2838 * alignment, try with allocating a big buffer.
2841 "[%s] Failed to get %d pages @ 18 bit align.\n",
2842 __func__
, base
->lcla_pool
.pages
);
2843 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
2844 base
->num_phy_chans
+
2847 if (!base
->lcla_pool
.base_unaligned
) {
2852 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
2856 writel(virt_to_phys(base
->lcla_pool
.base
),
2857 base
->virtbase
+ D40_DREG_LCLA
);
2863 static int __init
d40_probe(struct platform_device
*pdev
)
2867 struct d40_base
*base
;
2868 struct resource
*res
= NULL
;
2869 int num_reserved_chans
;
2872 base
= d40_hw_detect_init(pdev
);
2877 num_reserved_chans
= d40_phy_res_init(base
);
2879 platform_set_drvdata(pdev
, base
);
2881 spin_lock_init(&base
->interrupt_lock
);
2882 spin_lock_init(&base
->execmd_lock
);
2884 /* Get IO for logical channel parameter address */
2885 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2889 "[%s] No \"lcpa\" memory resource\n",
2893 base
->lcpa_size
= resource_size(res
);
2894 base
->phy_lcpa
= res
->start
;
2896 if (request_mem_region(res
->start
, resource_size(res
),
2897 D40_NAME
" I/O lcpa") == NULL
) {
2900 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2901 __func__
, res
->start
, res
->end
);
2905 /* We make use of ESRAM memory for this. */
2906 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2907 if (res
->start
!= val
&& val
!= 0) {
2908 dev_warn(&pdev
->dev
,
2909 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2910 __func__
, val
, res
->start
);
2912 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2914 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2915 if (!base
->lcpa_base
) {
2918 "[%s] Failed to ioremap LCPA region\n",
2923 ret
= d40_lcla_allocate(base
);
2925 dev_err(&pdev
->dev
, "[%s] Failed to allocate LCLA area\n",
2930 spin_lock_init(&base
->lcla_pool
.lock
);
2932 base
->irq
= platform_get_irq(pdev
, 0);
2934 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2937 dev_err(&pdev
->dev
, "[%s] No IRQ defined\n", __func__
);
2941 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2947 dev_info(base
->dev
, "initialized\n");
2952 if (base
->desc_slab
)
2953 kmem_cache_destroy(base
->desc_slab
);
2955 iounmap(base
->virtbase
);
2956 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
2957 free_pages((unsigned long)base
->lcla_pool
.base
,
2958 base
->lcla_pool
.pages
);
2960 kfree(base
->lcla_pool
.base_unaligned
);
2963 release_mem_region(base
->phy_lcpa
,
2965 if (base
->phy_start
)
2966 release_mem_region(base
->phy_start
,
2969 clk_disable(base
->clk
);
2973 kfree(base
->lcla_pool
.alloc_map
);
2974 kfree(base
->lookup_log_chans
);
2975 kfree(base
->lookup_phy_chans
);
2976 kfree(base
->phy_res
);
2980 dev_err(&pdev
->dev
, "[%s] probe failed\n", __func__
);
2984 static struct platform_driver d40_driver
= {
2986 .owner
= THIS_MODULE
,
2991 static int __init
stedma40_init(void)
2993 return platform_driver_probe(&d40_driver
, d40_probe
);
2995 arch_initcall(stedma40_init
);