2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/dmaengine.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/err.h>
17 #include <plat/ste_dma40.h>
19 #include "ste_dma40_ll.h"
21 #define D40_NAME "dma40"
23 #define D40_PHY_CHAN -1
25 /* For masking out/in 2 bit channel positions */
26 #define D40_CHAN_POS(chan) (2 * (chan / 2))
27 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 /* Maximum iterations taken before giving up suspending a channel */
30 #define D40_SUSPEND_MAX_IT 500
32 /* Hardware requirement on LCLA alignment */
33 #define LCLA_ALIGNMENT 0x40000
35 /* Max number of links per event group */
36 #define D40_LCLA_LINK_PER_EVENT_GRP 128
37 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
39 /* Attempts before giving up to trying to get pages that are aligned */
40 #define MAX_LCLA_ALLOC_ATTEMPTS 256
42 /* Bit markings for allocation map */
43 #define D40_ALLOC_FREE (1 << 31)
44 #define D40_ALLOC_PHY (1 << 30)
45 #define D40_ALLOC_LOG_FREE 0
47 /* Hardware designer of the block */
48 #define D40_HW_DESIGNER 0x8
51 * enum 40_command - The different commands and/or statuses.
53 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
54 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
55 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
56 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
61 D40_DMA_SUSPEND_REQ
= 2,
66 * struct d40_lli_pool - Structure for keeping LLIs in memory
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used.
71 * @dma_addr: DMA address, if mapped
72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
74 * one buffer to one buffer.
80 /* Space for dst and src, plus an extra for padding */
81 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
85 * struct d40_desc - A descriptor is one DMA job.
87 * @lli_phy: LLI settings for physical channel. Both src and dst=
88 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
90 * @lli_log: Same as above but for logical channels.
91 * @lli_pool: The pool with two entries pre-allocated.
92 * @lli_len: Number of llis of current descriptor.
93 * @lli_current: Number of transfered llis.
94 * @lcla_alloc: Number of LCLA entries allocated.
95 * @txd: DMA engine struct. Used for among other things for communication
98 * @is_in_client_list: true if the client owns this descriptor.
101 * This descriptor is used for both logical and physical transfers.
105 struct d40_phy_lli_bidir lli_phy
;
107 struct d40_log_lli_bidir lli_log
;
109 struct d40_lli_pool lli_pool
;
114 struct dma_async_tx_descriptor txd
;
115 struct list_head node
;
117 bool is_in_client_list
;
121 * struct d40_lcla_pool - LCLA pool settings and data.
123 * @base: The virtual address of LCLA. 18 bit aligned.
124 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
125 * This pointer is only there for clean-up on error.
126 * @pages: The number of pages needed for all physical channels.
127 * Only used later for clean-up on error
128 * @lock: Lock to protect the content in this struct.
129 * @alloc_map: big map over which LCLA entry is own by which job.
131 struct d40_lcla_pool
{
134 void *base_unaligned
;
137 struct d40_desc
**alloc_map
;
141 * struct d40_phy_res - struct for handling eventlines mapped to physical
144 * @lock: A lock protection this entity.
145 * @num: The physical channel number of this entity.
146 * @allocated_src: Bit mapped to show which src event line's are mapped to
147 * this physical channel. Can also be free or physically allocated.
148 * @allocated_dst: Same as for src but is dst.
149 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
162 * struct d40_chan - Struct that describes a channel.
164 * @lock: A spinlock to protect this struct.
165 * @log_num: The logical number, if any of this channel.
166 * @completed: Starts with 1, after first interrupt it is set to dma engine's
168 * @pending_tx: The number of pending transfers. Used between interrupt handler
170 * @busy: Set to true when transfer is ongoing on this channel.
171 * @phy_chan: Pointer to physical channel which this instance runs on. If this
172 * point is NULL, then the channel is not allocated.
173 * @chan: DMA engine handle.
174 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
175 * transfer and call client callback.
176 * @client: Cliented owned descriptor list.
177 * @active: Active descriptor.
178 * @queue: Queued jobs.
179 * @dma_cfg: The client configuration of this dma channel.
180 * @configured: whether the dma_cfg configuration is valid
181 * @base: Pointer to the device instance struct.
182 * @src_def_cfg: Default cfg register setting for src.
183 * @dst_def_cfg: Default cfg register setting for dst.
184 * @log_def: Default logical channel settings.
185 * @lcla: Space for one dst src pair for logical channel transfers.
186 * @lcpa: Pointer to dst and src lcpa settings.
188 * This struct can either "be" a logical or a physical channel.
193 /* ID of the most recent completed transfer */
197 struct d40_phy_res
*phy_chan
;
198 struct dma_chan chan
;
199 struct tasklet_struct tasklet
;
200 struct list_head client
;
201 struct list_head active
;
202 struct list_head queue
;
203 struct stedma40_chan_cfg dma_cfg
;
205 struct d40_base
*base
;
206 /* Default register configurations */
209 struct d40_def_lcsp log_def
;
210 struct d40_log_lli_full
*lcpa
;
211 /* Runtime reconfiguration */
212 dma_addr_t runtime_addr
;
213 enum dma_data_direction runtime_direction
;
217 * struct d40_base - The big global struct, one for each probe'd instance.
219 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
220 * @execmd_lock: Lock for execute command usage since several channels share
221 * the same physical register.
222 * @dev: The device structure.
223 * @virtbase: The virtual base address of the DMA's register.
224 * @rev: silicon revision detected.
225 * @clk: Pointer to the DMA clock structure.
226 * @phy_start: Physical memory start of the DMA registers.
227 * @phy_size: Size of the DMA register map.
228 * @irq: The IRQ number.
229 * @num_phy_chans: The number of physical channels. Read from HW. This
230 * is the number of available channels for this driver, not counting "Secure
231 * mode" allocated physical channels.
232 * @num_log_chans: The number of logical channels. Calculated from
234 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
235 * @dma_slave: dma_device channels that can do only do slave transfers.
236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
237 * @log_chans: Room for all possible logical channels in system.
238 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
239 * to log_chans entries.
240 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
241 * to phy_chans entries.
242 * @plat_data: Pointer to provided platform_data which is the driver
244 * @phy_res: Vector containing all physical channels.
245 * @lcla_pool: lcla pool settings and data.
246 * @lcpa_base: The virtual mapped address of LCPA.
247 * @phy_lcpa: The physical address of the LCPA.
248 * @lcpa_size: The size of the LCPA area.
249 * @desc_slab: cache for descriptors.
252 spinlock_t interrupt_lock
;
253 spinlock_t execmd_lock
;
255 void __iomem
*virtbase
;
258 phys_addr_t phy_start
;
259 resource_size_t phy_size
;
263 struct dma_device dma_both
;
264 struct dma_device dma_slave
;
265 struct dma_device dma_memcpy
;
266 struct d40_chan
*phy_chans
;
267 struct d40_chan
*log_chans
;
268 struct d40_chan
**lookup_log_chans
;
269 struct d40_chan
**lookup_phy_chans
;
270 struct stedma40_platform_data
*plat_data
;
271 /* Physical half channels */
272 struct d40_phy_res
*phy_res
;
273 struct d40_lcla_pool lcla_pool
;
276 resource_size_t lcpa_size
;
277 struct kmem_cache
*desc_slab
;
281 * struct d40_interrupt_lookup - lookup table for interrupt handler
283 * @src: Interrupt mask register.
284 * @clr: Interrupt clear register.
285 * @is_error: true if this is an error interrupt.
286 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
287 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
289 struct d40_interrupt_lookup
{
297 * struct d40_reg_val - simple lookup struct
299 * @reg: The register.
300 * @val: The value that belongs to the register in reg.
307 static struct device
*chan2dev(struct d40_chan
*d40c
)
309 return &d40c
->chan
.dev
->device
;
312 static bool chan_is_physical(struct d40_chan
*chan
)
314 return chan
->log_num
== D40_PHY_CHAN
;
317 static bool chan_is_logical(struct d40_chan
*chan
)
319 return !chan_is_physical(chan
);
322 static void __iomem
*chan_base(struct d40_chan
*chan
)
324 return chan
->base
->virtbase
+ D40_DREG_PCBASE
+
325 chan
->phy_chan
->num
* D40_DREG_PCDELTA
;
328 #define d40_err(dev, format, arg...) \
329 dev_err(dev, "[%s] " format, __func__, ## arg)
331 #define chan_err(d40c, format, arg...) \
332 d40_err(chan2dev(d40c), format, ## arg)
334 static int d40_pool_lli_alloc(struct d40_chan
*d40c
, struct d40_desc
*d40d
,
337 bool is_log
= chan_is_logical(d40c
);
342 align
= sizeof(struct d40_log_lli
);
344 align
= sizeof(struct d40_phy_lli
);
347 base
= d40d
->lli_pool
.pre_alloc_lli
;
348 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
349 d40d
->lli_pool
.base
= NULL
;
351 d40d
->lli_pool
.size
= lli_len
* 2 * align
;
353 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
354 d40d
->lli_pool
.base
= base
;
356 if (d40d
->lli_pool
.base
== NULL
)
361 d40d
->lli_log
.src
= PTR_ALIGN(base
, align
);
362 d40d
->lli_log
.dst
= d40d
->lli_log
.src
+ lli_len
;
364 d40d
->lli_pool
.dma_addr
= 0;
366 d40d
->lli_phy
.src
= PTR_ALIGN(base
, align
);
367 d40d
->lli_phy
.dst
= d40d
->lli_phy
.src
+ lli_len
;
369 d40d
->lli_pool
.dma_addr
= dma_map_single(d40c
->base
->dev
,
374 if (dma_mapping_error(d40c
->base
->dev
,
375 d40d
->lli_pool
.dma_addr
)) {
376 kfree(d40d
->lli_pool
.base
);
377 d40d
->lli_pool
.base
= NULL
;
378 d40d
->lli_pool
.dma_addr
= 0;
386 static void d40_pool_lli_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
388 if (d40d
->lli_pool
.dma_addr
)
389 dma_unmap_single(d40c
->base
->dev
, d40d
->lli_pool
.dma_addr
,
390 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
392 kfree(d40d
->lli_pool
.base
);
393 d40d
->lli_pool
.base
= NULL
;
394 d40d
->lli_pool
.size
= 0;
395 d40d
->lli_log
.src
= NULL
;
396 d40d
->lli_log
.dst
= NULL
;
397 d40d
->lli_phy
.src
= NULL
;
398 d40d
->lli_phy
.dst
= NULL
;
401 static int d40_lcla_alloc_one(struct d40_chan
*d40c
,
402 struct d40_desc
*d40d
)
409 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
411 p
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
;
414 * Allocate both src and dst at the same time, therefore the half
415 * start on 1 since 0 can't be used since zero is used as end marker.
417 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
418 if (!d40c
->base
->lcla_pool
.alloc_map
[p
+ i
]) {
419 d40c
->base
->lcla_pool
.alloc_map
[p
+ i
] = d40d
;
426 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
431 static int d40_lcla_free_all(struct d40_chan
*d40c
,
432 struct d40_desc
*d40d
)
438 if (chan_is_physical(d40c
))
441 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
443 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
444 if (d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
445 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] == d40d
) {
446 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
447 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] = NULL
;
449 if (d40d
->lcla_alloc
== 0) {
456 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
462 static void d40_desc_remove(struct d40_desc
*d40d
)
464 list_del(&d40d
->node
);
467 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
469 struct d40_desc
*desc
= NULL
;
471 if (!list_empty(&d40c
->client
)) {
475 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
476 if (async_tx_test_ack(&d
->txd
)) {
477 d40_pool_lli_free(d40c
, d
);
480 memset(desc
, 0, sizeof(*desc
));
486 desc
= kmem_cache_zalloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
489 INIT_LIST_HEAD(&desc
->node
);
494 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
497 d40_pool_lli_free(d40c
, d40d
);
498 d40_lcla_free_all(d40c
, d40d
);
499 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
502 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
504 list_add_tail(&desc
->node
, &d40c
->active
);
507 static void d40_phy_lli_load(struct d40_chan
*chan
, struct d40_desc
*desc
)
509 struct d40_phy_lli
*lli_dst
= desc
->lli_phy
.dst
;
510 struct d40_phy_lli
*lli_src
= desc
->lli_phy
.src
;
511 void __iomem
*base
= chan_base(chan
);
513 writel(lli_src
->reg_cfg
, base
+ D40_CHAN_REG_SSCFG
);
514 writel(lli_src
->reg_elt
, base
+ D40_CHAN_REG_SSELT
);
515 writel(lli_src
->reg_ptr
, base
+ D40_CHAN_REG_SSPTR
);
516 writel(lli_src
->reg_lnk
, base
+ D40_CHAN_REG_SSLNK
);
518 writel(lli_dst
->reg_cfg
, base
+ D40_CHAN_REG_SDCFG
);
519 writel(lli_dst
->reg_elt
, base
+ D40_CHAN_REG_SDELT
);
520 writel(lli_dst
->reg_ptr
, base
+ D40_CHAN_REG_SDPTR
);
521 writel(lli_dst
->reg_lnk
, base
+ D40_CHAN_REG_SDLNK
);
524 static void d40_log_lli_to_lcxa(struct d40_chan
*chan
, struct d40_desc
*desc
)
526 struct d40_lcla_pool
*pool
= &chan
->base
->lcla_pool
;
527 struct d40_log_lli_bidir
*lli
= &desc
->lli_log
;
528 int lli_current
= desc
->lli_current
;
529 int lli_len
= desc
->lli_len
;
530 int curr_lcla
= -EINVAL
;
532 if (lli_len
- lli_current
> 1)
533 curr_lcla
= d40_lcla_alloc_one(chan
, desc
);
535 d40_log_lli_lcpa_write(chan
->lcpa
,
536 &lli
->dst
[lli_current
],
537 &lli
->src
[lli_current
],
541 for (; lli_current
< lli_len
; lli_current
++) {
542 unsigned int lcla_offset
= chan
->phy_chan
->num
* 1024 +
544 struct d40_log_lli
*lcla
= pool
->base
+ lcla_offset
;
547 if (lli_current
+ 1 < lli_len
)
548 next_lcla
= d40_lcla_alloc_one(chan
, desc
);
552 d40_log_lli_lcla_write(lcla
,
553 &lli
->dst
[lli_current
],
554 &lli
->src
[lli_current
],
557 dma_sync_single_range_for_device(chan
->base
->dev
,
558 pool
->dma_addr
, lcla_offset
,
559 2 * sizeof(struct d40_log_lli
),
562 curr_lcla
= next_lcla
;
564 if (curr_lcla
== -EINVAL
) {
570 desc
->lli_current
= lli_current
;
573 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
575 if (chan_is_physical(d40c
)) {
576 d40_phy_lli_load(d40c
, d40d
);
577 d40d
->lli_current
= d40d
->lli_len
;
579 d40_log_lli_to_lcxa(d40c
, d40d
);
582 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
586 if (list_empty(&d40c
->active
))
589 d
= list_first_entry(&d40c
->active
,
595 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
597 list_add_tail(&desc
->node
, &d40c
->queue
);
600 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
604 if (list_empty(&d40c
->queue
))
607 d
= list_first_entry(&d40c
->queue
,
613 static int d40_psize_2_burst_size(bool is_log
, int psize
)
616 if (psize
== STEDMA40_PSIZE_LOG_1
)
619 if (psize
== STEDMA40_PSIZE_PHY_1
)
627 * The dma only supports transmitting packages up to
628 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
629 * dma elements required to send the entire sg list
631 static int d40_size_2_dmalen(int size
, u32 data_width1
, u32 data_width2
)
634 u32 max_w
= max(data_width1
, data_width2
);
635 u32 min_w
= min(data_width1
, data_width2
);
636 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
638 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
639 seg_max
-= (1 << max_w
);
641 if (!IS_ALIGNED(size
, 1 << max_w
))
647 dmalen
= size
/ seg_max
;
648 if (dmalen
* seg_max
< size
)
654 static int d40_sg_2_dmalen(struct scatterlist
*sgl
, int sg_len
,
655 u32 data_width1
, u32 data_width2
)
657 struct scatterlist
*sg
;
662 for_each_sg(sgl
, sg
, sg_len
, i
) {
663 ret
= d40_size_2_dmalen(sg_dma_len(sg
),
664 data_width1
, data_width2
);
672 /* Support functions for logical channels */
674 static int d40_channel_execute_command(struct d40_chan
*d40c
,
675 enum d40_command command
)
679 void __iomem
*active_reg
;
684 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
686 if (d40c
->phy_chan
->num
% 2 == 0)
687 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
689 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
691 if (command
== D40_DMA_SUSPEND_REQ
) {
692 status
= (readl(active_reg
) &
693 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
694 D40_CHAN_POS(d40c
->phy_chan
->num
);
696 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
700 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
701 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
704 if (command
== D40_DMA_SUSPEND_REQ
) {
706 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
707 status
= (readl(active_reg
) &
708 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
709 D40_CHAN_POS(d40c
->phy_chan
->num
);
713 * Reduce the number of bus accesses while
714 * waiting for the DMA to suspend.
718 if (status
== D40_DMA_STOP
||
719 status
== D40_DMA_SUSPENDED
)
723 if (i
== D40_SUSPEND_MAX_IT
) {
725 "unable to suspend the chl %d (log: %d) status %x\n",
726 d40c
->phy_chan
->num
, d40c
->log_num
,
734 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
738 static void d40_term_all(struct d40_chan
*d40c
)
740 struct d40_desc
*d40d
;
742 /* Release active descriptors */
743 while ((d40d
= d40_first_active_get(d40c
))) {
744 d40_desc_remove(d40d
);
745 d40_desc_free(d40c
, d40d
);
748 /* Release queued descriptors waiting for transfer */
749 while ((d40d
= d40_first_queued(d40c
))) {
750 d40_desc_remove(d40d
);
751 d40_desc_free(d40c
, d40d
);
755 d40c
->pending_tx
= 0;
759 static void __d40_config_set_event(struct d40_chan
*d40c
, bool enable
,
762 void __iomem
*addr
= chan_base(d40c
) + reg
;
766 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
767 | ~D40_EVENTLINE_MASK(event
), addr
);
772 * The hardware sometimes doesn't register the enable when src and dst
773 * event lines are active on the same logical channel. Retry to ensure
774 * it does. Usually only one retry is sufficient.
778 writel((D40_ACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
779 | ~D40_EVENTLINE_MASK(event
), addr
);
781 if (readl(addr
) & D40_EVENTLINE_MASK(event
))
786 dev_dbg(chan2dev(d40c
),
787 "[%s] workaround enable S%cLNK (%d tries)\n",
788 __func__
, reg
== D40_CHAN_REG_SSLNK
? 'S' : 'D',
794 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
798 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
800 /* Enable event line connected to device (or memcpy) */
801 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
802 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
803 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
805 __d40_config_set_event(d40c
, do_enable
, event
,
809 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
810 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
812 __d40_config_set_event(d40c
, do_enable
, event
,
816 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
819 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
821 void __iomem
*chanbase
= chan_base(d40c
);
824 val
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
825 val
|= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
830 static u32
d40_get_prmo(struct d40_chan
*d40c
)
832 static const unsigned int phy_map
[] = {
833 [STEDMA40_PCHAN_BASIC_MODE
]
834 = D40_DREG_PRMO_PCHAN_BASIC
,
835 [STEDMA40_PCHAN_MODULO_MODE
]
836 = D40_DREG_PRMO_PCHAN_MODULO
,
837 [STEDMA40_PCHAN_DOUBLE_DST_MODE
]
838 = D40_DREG_PRMO_PCHAN_DOUBLE_DST
,
840 static const unsigned int log_map
[] = {
841 [STEDMA40_LCHAN_SRC_PHY_DST_LOG
]
842 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG
,
843 [STEDMA40_LCHAN_SRC_LOG_DST_PHY
]
844 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY
,
845 [STEDMA40_LCHAN_SRC_LOG_DST_LOG
]
846 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
849 if (chan_is_physical(d40c
))
850 return phy_map
[d40c
->dma_cfg
.mode_opt
];
852 return log_map
[d40c
->dma_cfg
.mode_opt
];
855 static void d40_config_write(struct d40_chan
*d40c
)
860 /* Odd addresses are even addresses + 4 */
861 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
862 /* Setup channel mode to logical or physical */
863 var
= ((u32
)(chan_is_logical(d40c
)) + 1) <<
864 D40_CHAN_POS(d40c
->phy_chan
->num
);
865 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
867 /* Setup operational mode option register */
868 var
= d40_get_prmo(d40c
) << D40_CHAN_POS(d40c
->phy_chan
->num
);
870 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
872 if (chan_is_logical(d40c
)) {
873 int lidx
= (d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
)
874 & D40_SREG_ELEM_LOG_LIDX_MASK
;
875 void __iomem
*chanbase
= chan_base(d40c
);
877 /* Set default config for CFG reg */
878 writel(d40c
->src_def_cfg
, chanbase
+ D40_CHAN_REG_SSCFG
);
879 writel(d40c
->dst_def_cfg
, chanbase
+ D40_CHAN_REG_SDCFG
);
881 /* Set LIDX for lcla */
882 writel(lidx
, chanbase
+ D40_CHAN_REG_SSELT
);
883 writel(lidx
, chanbase
+ D40_CHAN_REG_SDELT
);
887 static u32
d40_residue(struct d40_chan
*d40c
)
891 if (chan_is_logical(d40c
))
892 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
893 >> D40_MEM_LCSP2_ECNT_POS
;
895 u32 val
= readl(chan_base(d40c
) + D40_CHAN_REG_SDELT
);
896 num_elt
= (val
& D40_SREG_ELEM_PHY_ECNT_MASK
)
897 >> D40_SREG_ELEM_PHY_ECNT_POS
;
900 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
903 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
907 if (chan_is_logical(d40c
))
908 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
910 is_link
= readl(chan_base(d40c
) + D40_CHAN_REG_SDLNK
)
911 & D40_SREG_LNK_PHYS_LNK_MASK
;
916 static int d40_pause(struct dma_chan
*chan
)
918 struct d40_chan
*d40c
=
919 container_of(chan
, struct d40_chan
, chan
);
926 spin_lock_irqsave(&d40c
->lock
, flags
);
928 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
930 if (chan_is_logical(d40c
)) {
931 d40_config_set_event(d40c
, false);
932 /* Resume the other logical channels if any */
933 if (d40_chan_has_events(d40c
))
934 res
= d40_channel_execute_command(d40c
,
939 spin_unlock_irqrestore(&d40c
->lock
, flags
);
943 static int d40_resume(struct dma_chan
*chan
)
945 struct d40_chan
*d40c
=
946 container_of(chan
, struct d40_chan
, chan
);
953 spin_lock_irqsave(&d40c
->lock
, flags
);
955 if (d40c
->base
->rev
== 0)
956 if (chan_is_logical(d40c
)) {
957 res
= d40_channel_execute_command(d40c
,
958 D40_DMA_SUSPEND_REQ
);
962 /* If bytes left to transfer or linked tx resume job */
963 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
965 if (chan_is_logical(d40c
))
966 d40_config_set_event(d40c
, true);
968 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
972 spin_unlock_irqrestore(&d40c
->lock
, flags
);
976 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
978 struct d40_chan
*d40c
= container_of(tx
->chan
,
981 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
984 spin_lock_irqsave(&d40c
->lock
, flags
);
988 if (d40c
->chan
.cookie
< 0)
989 d40c
->chan
.cookie
= 1;
991 d40d
->txd
.cookie
= d40c
->chan
.cookie
;
993 d40_desc_queue(d40c
, d40d
);
995 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1000 static int d40_start(struct d40_chan
*d40c
)
1002 if (d40c
->base
->rev
== 0) {
1005 if (chan_is_logical(d40c
)) {
1006 err
= d40_channel_execute_command(d40c
,
1007 D40_DMA_SUSPEND_REQ
);
1013 if (chan_is_logical(d40c
))
1014 d40_config_set_event(d40c
, true);
1016 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1019 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
1021 struct d40_desc
*d40d
;
1024 /* Start queued jobs, if any */
1025 d40d
= d40_first_queued(d40c
);
1030 /* Remove from queue */
1031 d40_desc_remove(d40d
);
1033 /* Add to active queue */
1034 d40_desc_submit(d40c
, d40d
);
1036 /* Initiate DMA job */
1037 d40_desc_load(d40c
, d40d
);
1040 err
= d40_start(d40c
);
1049 /* called from interrupt context */
1050 static void dma_tc_handle(struct d40_chan
*d40c
)
1052 struct d40_desc
*d40d
;
1054 /* Get first active entry from list */
1055 d40d
= d40_first_active_get(d40c
);
1060 d40_lcla_free_all(d40c
, d40d
);
1062 if (d40d
->lli_current
< d40d
->lli_len
) {
1063 d40_desc_load(d40c
, d40d
);
1065 (void) d40_start(d40c
);
1069 if (d40_queue_start(d40c
) == NULL
)
1073 tasklet_schedule(&d40c
->tasklet
);
1077 static void dma_tasklet(unsigned long data
)
1079 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
1080 struct d40_desc
*d40d
;
1081 unsigned long flags
;
1082 dma_async_tx_callback callback
;
1083 void *callback_param
;
1085 spin_lock_irqsave(&d40c
->lock
, flags
);
1087 /* Get first active entry from list */
1088 d40d
= d40_first_active_get(d40c
);
1093 d40c
->completed
= d40d
->txd
.cookie
;
1096 * If terminating a channel pending_tx is set to zero.
1097 * This prevents any finished active jobs to return to the client.
1099 if (d40c
->pending_tx
== 0) {
1100 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1104 /* Callback to client */
1105 callback
= d40d
->txd
.callback
;
1106 callback_param
= d40d
->txd
.callback_param
;
1108 if (async_tx_test_ack(&d40d
->txd
)) {
1109 d40_pool_lli_free(d40c
, d40d
);
1110 d40_desc_remove(d40d
);
1111 d40_desc_free(d40c
, d40d
);
1113 if (!d40d
->is_in_client_list
) {
1114 d40_desc_remove(d40d
);
1115 d40_lcla_free_all(d40c
, d40d
);
1116 list_add_tail(&d40d
->node
, &d40c
->client
);
1117 d40d
->is_in_client_list
= true;
1123 if (d40c
->pending_tx
)
1124 tasklet_schedule(&d40c
->tasklet
);
1126 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1128 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1129 callback(callback_param
);
1134 /* Rescue manouver if receiving double interrupts */
1135 if (d40c
->pending_tx
> 0)
1137 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1140 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1142 static const struct d40_interrupt_lookup il
[] = {
1143 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
1144 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
1145 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
1146 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
1147 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
1148 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
1149 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
1150 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
1151 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
1152 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
1156 u32 regs
[ARRAY_SIZE(il
)];
1160 struct d40_chan
*d40c
;
1161 unsigned long flags
;
1162 struct d40_base
*base
= data
;
1164 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1166 /* Read interrupt status of both logical and physical channels */
1167 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
1168 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1172 chan
= find_next_bit((unsigned long *)regs
,
1173 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
1175 /* No more set bits found? */
1176 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
1179 row
= chan
/ BITS_PER_LONG
;
1180 idx
= chan
& (BITS_PER_LONG
- 1);
1183 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1185 if (il
[row
].offset
== D40_PHY_CHAN
)
1186 d40c
= base
->lookup_phy_chans
[idx
];
1188 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1189 spin_lock(&d40c
->lock
);
1191 if (!il
[row
].is_error
)
1192 dma_tc_handle(d40c
);
1194 d40_err(base
->dev
, "IRQ chan: %ld offset %d idx %d\n",
1195 chan
, il
[row
].offset
, idx
);
1197 spin_unlock(&d40c
->lock
);
1200 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1205 static int d40_validate_conf(struct d40_chan
*d40c
,
1206 struct stedma40_chan_cfg
*conf
)
1209 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
1210 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
1211 bool is_log
= conf
->mode
== STEDMA40_MODE_LOGICAL
;
1214 chan_err(d40c
, "Invalid direction.\n");
1218 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
1219 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
1220 d40c
->runtime_addr
== 0) {
1222 chan_err(d40c
, "Invalid TX channel address (%d)\n",
1223 conf
->dst_dev_type
);
1227 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
1228 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
1229 d40c
->runtime_addr
== 0) {
1230 chan_err(d40c
, "Invalid RX channel address (%d)\n",
1231 conf
->src_dev_type
);
1235 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1236 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1237 chan_err(d40c
, "Invalid dst\n");
1241 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1242 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1243 chan_err(d40c
, "Invalid src\n");
1247 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1248 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1249 chan_err(d40c
, "No event line\n");
1253 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1254 (src_event_group
!= dst_event_group
)) {
1255 chan_err(d40c
, "Invalid event group\n");
1259 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1261 * DMAC HW supports it. Will be added to this driver,
1262 * in case any dma client requires it.
1264 chan_err(d40c
, "periph to periph not supported\n");
1268 if (d40_psize_2_burst_size(is_log
, conf
->src_info
.psize
) *
1269 (1 << conf
->src_info
.data_width
) !=
1270 d40_psize_2_burst_size(is_log
, conf
->dst_info
.psize
) *
1271 (1 << conf
->dst_info
.data_width
)) {
1273 * The DMAC hardware only supports
1274 * src (burst x width) == dst (burst x width)
1277 chan_err(d40c
, "src (burst x width) != dst (burst x width)\n");
1284 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1285 int log_event_line
, bool is_log
)
1287 unsigned long flags
;
1288 spin_lock_irqsave(&phy
->lock
, flags
);
1290 /* Physical interrupts are masked per physical full channel */
1291 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1292 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1293 phy
->allocated_dst
= D40_ALLOC_PHY
;
1294 phy
->allocated_src
= D40_ALLOC_PHY
;
1300 /* Logical channel */
1302 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1305 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1306 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1308 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1309 phy
->allocated_src
|= 1 << log_event_line
;
1314 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1317 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1318 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1320 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1321 phy
->allocated_dst
|= 1 << log_event_line
;
1328 spin_unlock_irqrestore(&phy
->lock
, flags
);
1331 spin_unlock_irqrestore(&phy
->lock
, flags
);
1335 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1338 unsigned long flags
;
1339 bool is_free
= false;
1341 spin_lock_irqsave(&phy
->lock
, flags
);
1342 if (!log_event_line
) {
1343 phy
->allocated_dst
= D40_ALLOC_FREE
;
1344 phy
->allocated_src
= D40_ALLOC_FREE
;
1349 /* Logical channel */
1351 phy
->allocated_src
&= ~(1 << log_event_line
);
1352 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1353 phy
->allocated_src
= D40_ALLOC_FREE
;
1355 phy
->allocated_dst
&= ~(1 << log_event_line
);
1356 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1357 phy
->allocated_dst
= D40_ALLOC_FREE
;
1360 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1364 spin_unlock_irqrestore(&phy
->lock
, flags
);
1369 static int d40_allocate_channel(struct d40_chan
*d40c
)
1374 struct d40_phy_res
*phys
;
1379 bool is_log
= d40c
->dma_cfg
.mode
== STEDMA40_MODE_LOGICAL
;
1381 phys
= d40c
->base
->phy_res
;
1383 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1384 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1385 log_num
= 2 * dev_type
;
1387 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1388 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1389 /* dst event lines are used for logical memcpy */
1390 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1391 log_num
= 2 * dev_type
+ 1;
1396 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1397 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1400 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1401 /* Find physical half channel */
1402 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1404 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1409 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1410 int phy_num
= j
+ event_group
* 2;
1411 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1412 if (d40_alloc_mask_set(&phys
[i
],
1421 d40c
->phy_chan
= &phys
[i
];
1422 d40c
->log_num
= D40_PHY_CHAN
;
1428 /* Find logical channel */
1429 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1430 int phy_num
= j
+ event_group
* 2;
1432 * Spread logical channels across all available physical rather
1433 * than pack every logical channel at the first available phy
1437 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1438 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1439 event_line
, is_log
))
1443 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1444 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1445 event_line
, is_log
))
1453 d40c
->phy_chan
= &phys
[i
];
1454 d40c
->log_num
= log_num
;
1458 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1460 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1466 static int d40_config_memcpy(struct d40_chan
*d40c
)
1468 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1470 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1471 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1472 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1473 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1474 memcpy
[d40c
->chan
.chan_id
];
1476 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1477 dma_has_cap(DMA_SLAVE
, cap
)) {
1478 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1480 chan_err(d40c
, "No memcpy\n");
1488 static int d40_free_dma(struct d40_chan
*d40c
)
1493 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1496 struct d40_desc
*_d
;
1499 /* Terminate all queued and active transfers */
1502 /* Release client owned descriptors */
1503 if (!list_empty(&d40c
->client
))
1504 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1505 d40_pool_lli_free(d40c
, d
);
1507 d40_desc_free(d40c
, d
);
1511 chan_err(d40c
, "phy == null\n");
1515 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1516 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1517 chan_err(d40c
, "channel already free\n");
1521 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1522 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1523 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1525 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1526 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1529 chan_err(d40c
, "Unknown direction\n");
1533 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1535 chan_err(d40c
, "suspend failed\n");
1539 if (chan_is_logical(d40c
)) {
1540 /* Release logical channel, deactivate the event line */
1542 d40_config_set_event(d40c
, false);
1543 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1546 * Check if there are more logical allocation
1547 * on this phy channel.
1549 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1550 /* Resume the other logical channels if any */
1551 if (d40_chan_has_events(d40c
)) {
1552 res
= d40_channel_execute_command(d40c
,
1556 "Executing RUN command\n");
1563 (void) d40_alloc_mask_free(phy
, is_src
, 0);
1566 /* Release physical channel */
1567 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1569 chan_err(d40c
, "Failed to stop channel\n");
1572 d40c
->phy_chan
= NULL
;
1573 d40c
->configured
= false;
1574 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1579 static bool d40_is_paused(struct d40_chan
*d40c
)
1581 void __iomem
*chanbase
= chan_base(d40c
);
1582 bool is_paused
= false;
1583 unsigned long flags
;
1584 void __iomem
*active_reg
;
1588 spin_lock_irqsave(&d40c
->lock
, flags
);
1590 if (chan_is_physical(d40c
)) {
1591 if (d40c
->phy_chan
->num
% 2 == 0)
1592 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1594 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1596 status
= (readl(active_reg
) &
1597 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1598 D40_CHAN_POS(d40c
->phy_chan
->num
);
1599 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1605 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1606 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1607 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1608 status
= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
1609 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1610 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1611 status
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
1613 chan_err(d40c
, "Unknown direction\n");
1617 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1618 D40_EVENTLINE_POS(event
);
1620 if (status
!= D40_DMA_RUN
)
1623 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1629 static u32
stedma40_residue(struct dma_chan
*chan
)
1631 struct d40_chan
*d40c
=
1632 container_of(chan
, struct d40_chan
, chan
);
1634 unsigned long flags
;
1636 spin_lock_irqsave(&d40c
->lock
, flags
);
1637 bytes_left
= d40_residue(d40c
);
1638 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1644 d40_prep_sg_log(struct d40_chan
*chan
, struct d40_desc
*desc
,
1645 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
1646 unsigned int sg_len
, dma_addr_t src_dev_addr
,
1647 dma_addr_t dst_dev_addr
)
1649 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1650 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
1651 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
1654 ret
= d40_log_sg_to_lli(sg_src
, sg_len
,
1657 chan
->log_def
.lcsp1
,
1658 src_info
->data_width
,
1659 dst_info
->data_width
);
1661 ret
= d40_log_sg_to_lli(sg_dst
, sg_len
,
1664 chan
->log_def
.lcsp3
,
1665 dst_info
->data_width
,
1666 src_info
->data_width
);
1668 return ret
< 0 ? ret
: 0;
1672 d40_prep_sg_phy(struct d40_chan
*chan
, struct d40_desc
*desc
,
1673 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
1674 unsigned int sg_len
, dma_addr_t src_dev_addr
,
1675 dma_addr_t dst_dev_addr
)
1677 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1678 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
1679 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
1682 ret
= d40_phy_sg_to_lli(sg_src
, sg_len
, src_dev_addr
,
1684 virt_to_phys(desc
->lli_phy
.src
),
1686 src_info
, dst_info
);
1688 ret
= d40_phy_sg_to_lli(sg_dst
, sg_len
, dst_dev_addr
,
1690 virt_to_phys(desc
->lli_phy
.dst
),
1692 dst_info
, src_info
);
1694 dma_sync_single_for_device(chan
->base
->dev
, desc
->lli_pool
.dma_addr
,
1695 desc
->lli_pool
.size
, DMA_TO_DEVICE
);
1697 return ret
< 0 ? ret
: 0;
1701 static struct d40_desc
*
1702 d40_prep_desc(struct d40_chan
*chan
, struct scatterlist
*sg
,
1703 unsigned int sg_len
, unsigned long dma_flags
)
1705 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1706 struct d40_desc
*desc
;
1709 desc
= d40_desc_get(chan
);
1713 desc
->lli_len
= d40_sg_2_dmalen(sg
, sg_len
, cfg
->src_info
.data_width
,
1714 cfg
->dst_info
.data_width
);
1715 if (desc
->lli_len
< 0) {
1716 chan_err(chan
, "Unaligned size\n");
1720 ret
= d40_pool_lli_alloc(chan
, desc
, desc
->lli_len
);
1722 chan_err(chan
, "Could not allocate lli\n");
1727 desc
->lli_current
= 0;
1728 desc
->txd
.flags
= dma_flags
;
1729 desc
->txd
.tx_submit
= d40_tx_submit
;
1731 dma_async_tx_descriptor_init(&desc
->txd
, &chan
->chan
);
1736 d40_desc_free(chan
, desc
);
1741 d40_get_dev_addr(struct d40_chan
*chan
, enum dma_data_direction direction
)
1743 struct stedma40_platform_data
*plat
= chan
->base
->plat_data
;
1744 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1747 if (chan
->runtime_addr
)
1748 return chan
->runtime_addr
;
1750 if (direction
== DMA_FROM_DEVICE
)
1751 addr
= plat
->dev_rx
[cfg
->src_dev_type
];
1752 else if (direction
== DMA_TO_DEVICE
)
1753 addr
= plat
->dev_tx
[cfg
->dst_dev_type
];
1758 static struct dma_async_tx_descriptor
*
1759 d40_prep_sg(struct dma_chan
*dchan
, struct scatterlist
*sg_src
,
1760 struct scatterlist
*sg_dst
, unsigned int sg_len
,
1761 enum dma_data_direction direction
, unsigned long dma_flags
)
1763 struct d40_chan
*chan
= container_of(dchan
, struct d40_chan
, chan
);
1764 dma_addr_t src_dev_addr
= 0;
1765 dma_addr_t dst_dev_addr
= 0;
1766 struct d40_desc
*desc
;
1767 unsigned long flags
;
1770 if (!chan
->phy_chan
) {
1771 chan_err(chan
, "Cannot prepare unallocated channel\n");
1775 spin_lock_irqsave(&chan
->lock
, flags
);
1777 desc
= d40_prep_desc(chan
, sg_src
, sg_len
, dma_flags
);
1781 if (direction
!= DMA_NONE
) {
1782 dma_addr_t dev_addr
= d40_get_dev_addr(chan
, direction
);
1784 if (direction
== DMA_FROM_DEVICE
)
1785 src_dev_addr
= dev_addr
;
1786 else if (direction
== DMA_TO_DEVICE
)
1787 dst_dev_addr
= dev_addr
;
1790 if (chan_is_logical(chan
))
1791 ret
= d40_prep_sg_log(chan
, desc
, sg_src
, sg_dst
,
1792 sg_len
, src_dev_addr
, dst_dev_addr
);
1794 ret
= d40_prep_sg_phy(chan
, desc
, sg_src
, sg_dst
,
1795 sg_len
, src_dev_addr
, dst_dev_addr
);
1798 chan_err(chan
, "Failed to prepare %s sg job: %d\n",
1799 chan_is_logical(chan
) ? "log" : "phy", ret
);
1803 spin_unlock_irqrestore(&chan
->lock
, flags
);
1809 d40_desc_free(chan
, desc
);
1810 spin_unlock_irqrestore(&chan
->lock
, flags
);
1814 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1816 struct stedma40_chan_cfg
*info
= data
;
1817 struct d40_chan
*d40c
=
1818 container_of(chan
, struct d40_chan
, chan
);
1822 err
= d40_validate_conf(d40c
, info
);
1824 d40c
->dma_cfg
= *info
;
1826 err
= d40_config_memcpy(d40c
);
1829 d40c
->configured
= true;
1833 EXPORT_SYMBOL(stedma40_filter
);
1835 static void __d40_set_prio_rt(struct d40_chan
*d40c
, int dev_type
, bool src
)
1837 bool realtime
= d40c
->dma_cfg
.realtime
;
1838 bool highprio
= d40c
->dma_cfg
.high_priority
;
1839 u32 prioreg
= highprio
? D40_DREG_PSEG1
: D40_DREG_PCEG1
;
1840 u32 rtreg
= realtime
? D40_DREG_RSEG1
: D40_DREG_RCEG1
;
1841 u32 event
= D40_TYPE_TO_EVENT(dev_type
);
1842 u32 group
= D40_TYPE_TO_GROUP(dev_type
);
1843 u32 bit
= 1 << event
;
1845 /* Destination event lines are stored in the upper halfword */
1849 writel(bit
, d40c
->base
->virtbase
+ prioreg
+ group
* 4);
1850 writel(bit
, d40c
->base
->virtbase
+ rtreg
+ group
* 4);
1853 static void d40_set_prio_realtime(struct d40_chan
*d40c
)
1855 if (d40c
->base
->rev
< 3)
1858 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
1859 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
1860 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.src_dev_type
, true);
1862 if ((d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
) ||
1863 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
1864 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.dst_dev_type
, false);
1867 /* DMA ENGINE functions */
1868 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1871 unsigned long flags
;
1872 struct d40_chan
*d40c
=
1873 container_of(chan
, struct d40_chan
, chan
);
1875 spin_lock_irqsave(&d40c
->lock
, flags
);
1877 d40c
->completed
= chan
->cookie
= 1;
1879 /* If no dma configuration is set use default configuration (memcpy) */
1880 if (!d40c
->configured
) {
1881 err
= d40_config_memcpy(d40c
);
1883 chan_err(d40c
, "Failed to configure memcpy channel\n");
1887 is_free_phy
= (d40c
->phy_chan
== NULL
);
1889 err
= d40_allocate_channel(d40c
);
1891 chan_err(d40c
, "Failed to allocate channel\n");
1895 /* Fill in basic CFG register values */
1896 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1897 &d40c
->dst_def_cfg
, chan_is_logical(d40c
));
1899 d40_set_prio_realtime(d40c
);
1901 if (chan_is_logical(d40c
)) {
1902 d40_log_cfg(&d40c
->dma_cfg
,
1903 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1905 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1906 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1907 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
1909 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1910 d40c
->dma_cfg
.dst_dev_type
*
1911 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
1915 * Only write channel configuration to the DMA if the physical
1916 * resource is free. In case of multiple logical channels
1917 * on the same physical resource, only the first write is necessary.
1920 d40_config_write(d40c
);
1922 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1926 static void d40_free_chan_resources(struct dma_chan
*chan
)
1928 struct d40_chan
*d40c
=
1929 container_of(chan
, struct d40_chan
, chan
);
1931 unsigned long flags
;
1933 if (d40c
->phy_chan
== NULL
) {
1934 chan_err(d40c
, "Cannot free unallocated channel\n");
1939 spin_lock_irqsave(&d40c
->lock
, flags
);
1941 err
= d40_free_dma(d40c
);
1944 chan_err(d40c
, "Failed to free channel\n");
1945 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1948 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1952 unsigned long dma_flags
)
1954 struct scatterlist dst_sg
;
1955 struct scatterlist src_sg
;
1957 sg_init_table(&dst_sg
, 1);
1958 sg_init_table(&src_sg
, 1);
1960 sg_dma_address(&dst_sg
) = dst
;
1961 sg_dma_address(&src_sg
) = src
;
1963 sg_dma_len(&dst_sg
) = size
;
1964 sg_dma_len(&src_sg
) = size
;
1966 return d40_prep_sg(chan
, &src_sg
, &dst_sg
, 1, DMA_NONE
, dma_flags
);
1969 static struct dma_async_tx_descriptor
*
1970 d40_prep_memcpy_sg(struct dma_chan
*chan
,
1971 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
1972 struct scatterlist
*src_sg
, unsigned int src_nents
,
1973 unsigned long dma_flags
)
1975 if (dst_nents
!= src_nents
)
1978 return d40_prep_sg(chan
, src_sg
, dst_sg
, src_nents
, DMA_NONE
, dma_flags
);
1981 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
1982 struct scatterlist
*sgl
,
1983 unsigned int sg_len
,
1984 enum dma_data_direction direction
,
1985 unsigned long dma_flags
)
1987 if (direction
!= DMA_FROM_DEVICE
&& direction
!= DMA_TO_DEVICE
)
1990 return d40_prep_sg(chan
, sgl
, sgl
, sg_len
, direction
, dma_flags
);
1993 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
1994 dma_cookie_t cookie
,
1995 struct dma_tx_state
*txstate
)
1997 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
1998 dma_cookie_t last_used
;
1999 dma_cookie_t last_complete
;
2002 if (d40c
->phy_chan
== NULL
) {
2003 chan_err(d40c
, "Cannot read status of unallocated channel\n");
2007 last_complete
= d40c
->completed
;
2008 last_used
= chan
->cookie
;
2010 if (d40_is_paused(d40c
))
2013 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2015 dma_set_tx_state(txstate
, last_complete
, last_used
,
2016 stedma40_residue(chan
));
2021 static void d40_issue_pending(struct dma_chan
*chan
)
2023 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2024 unsigned long flags
;
2026 if (d40c
->phy_chan
== NULL
) {
2027 chan_err(d40c
, "Channel is not allocated!\n");
2031 spin_lock_irqsave(&d40c
->lock
, flags
);
2033 /* Busy means that pending jobs are already being processed */
2035 (void) d40_queue_start(d40c
);
2037 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2040 /* Runtime reconfiguration extension */
2041 static void d40_set_runtime_config(struct dma_chan
*chan
,
2042 struct dma_slave_config
*config
)
2044 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2045 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2046 enum dma_slave_buswidth config_addr_width
;
2047 dma_addr_t config_addr
;
2048 u32 config_maxburst
;
2049 enum stedma40_periph_data_width addr_width
;
2052 if (config
->direction
== DMA_FROM_DEVICE
) {
2053 dma_addr_t dev_addr_rx
=
2054 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2056 config_addr
= config
->src_addr
;
2058 dev_dbg(d40c
->base
->dev
,
2059 "channel has a pre-wired RX address %08x "
2060 "overriding with %08x\n",
2061 dev_addr_rx
, config_addr
);
2062 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2063 dev_dbg(d40c
->base
->dev
,
2064 "channel was not configured for peripheral "
2065 "to memory transfer (%d) overriding\n",
2067 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2069 config_addr_width
= config
->src_addr_width
;
2070 config_maxburst
= config
->src_maxburst
;
2072 } else if (config
->direction
== DMA_TO_DEVICE
) {
2073 dma_addr_t dev_addr_tx
=
2074 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2076 config_addr
= config
->dst_addr
;
2078 dev_dbg(d40c
->base
->dev
,
2079 "channel has a pre-wired TX address %08x "
2080 "overriding with %08x\n",
2081 dev_addr_tx
, config_addr
);
2082 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2083 dev_dbg(d40c
->base
->dev
,
2084 "channel was not configured for memory "
2085 "to peripheral transfer (%d) overriding\n",
2087 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2089 config_addr_width
= config
->dst_addr_width
;
2090 config_maxburst
= config
->dst_maxburst
;
2093 dev_err(d40c
->base
->dev
,
2094 "unrecognized channel direction %d\n",
2099 switch (config_addr_width
) {
2100 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2101 addr_width
= STEDMA40_BYTE_WIDTH
;
2103 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2104 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2106 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2107 addr_width
= STEDMA40_WORD_WIDTH
;
2109 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2110 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2113 dev_err(d40c
->base
->dev
,
2114 "illegal peripheral address width "
2116 config
->src_addr_width
);
2120 if (chan_is_logical(d40c
)) {
2121 if (config_maxburst
>= 16)
2122 psize
= STEDMA40_PSIZE_LOG_16
;
2123 else if (config_maxburst
>= 8)
2124 psize
= STEDMA40_PSIZE_LOG_8
;
2125 else if (config_maxburst
>= 4)
2126 psize
= STEDMA40_PSIZE_LOG_4
;
2128 psize
= STEDMA40_PSIZE_LOG_1
;
2130 if (config_maxburst
>= 16)
2131 psize
= STEDMA40_PSIZE_PHY_16
;
2132 else if (config_maxburst
>= 8)
2133 psize
= STEDMA40_PSIZE_PHY_8
;
2134 else if (config_maxburst
>= 4)
2135 psize
= STEDMA40_PSIZE_PHY_4
;
2136 else if (config_maxburst
>= 2)
2137 psize
= STEDMA40_PSIZE_PHY_2
;
2139 psize
= STEDMA40_PSIZE_PHY_1
;
2142 /* Set up all the endpoint configs */
2143 cfg
->src_info
.data_width
= addr_width
;
2144 cfg
->src_info
.psize
= psize
;
2145 cfg
->src_info
.big_endian
= false;
2146 cfg
->src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2147 cfg
->dst_info
.data_width
= addr_width
;
2148 cfg
->dst_info
.psize
= psize
;
2149 cfg
->dst_info
.big_endian
= false;
2150 cfg
->dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2152 /* Fill in register values */
2153 if (chan_is_logical(d40c
))
2154 d40_log_cfg(cfg
, &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2156 d40_phy_cfg(cfg
, &d40c
->src_def_cfg
,
2157 &d40c
->dst_def_cfg
, false);
2159 /* These settings will take precedence later */
2160 d40c
->runtime_addr
= config_addr
;
2161 d40c
->runtime_direction
= config
->direction
;
2162 dev_dbg(d40c
->base
->dev
,
2163 "configured channel %s for %s, data width %d, "
2164 "maxburst %d bytes, LE, no flow control\n",
2165 dma_chan_name(chan
),
2166 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
2171 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2174 unsigned long flags
;
2175 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2177 if (d40c
->phy_chan
== NULL
) {
2178 chan_err(d40c
, "Channel is not allocated!\n");
2183 case DMA_TERMINATE_ALL
:
2184 spin_lock_irqsave(&d40c
->lock
, flags
);
2186 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2189 return d40_pause(chan
);
2191 return d40_resume(chan
);
2192 case DMA_SLAVE_CONFIG
:
2193 d40_set_runtime_config(chan
,
2194 (struct dma_slave_config
*) arg
);
2200 /* Other commands are unimplemented */
2204 /* Initialization functions */
2206 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2207 struct d40_chan
*chans
, int offset
,
2211 struct d40_chan
*d40c
;
2213 INIT_LIST_HEAD(&dma
->channels
);
2215 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2218 d40c
->chan
.device
= dma
;
2220 spin_lock_init(&d40c
->lock
);
2222 d40c
->log_num
= D40_PHY_CHAN
;
2224 INIT_LIST_HEAD(&d40c
->active
);
2225 INIT_LIST_HEAD(&d40c
->queue
);
2226 INIT_LIST_HEAD(&d40c
->client
);
2228 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2229 (unsigned long) d40c
);
2231 list_add_tail(&d40c
->chan
.device_node
,
2236 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2237 int num_reserved_chans
)
2241 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2242 0, base
->num_log_chans
);
2244 dma_cap_zero(base
->dma_slave
.cap_mask
);
2245 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2247 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2248 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2249 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2250 base
->dma_slave
.device_prep_dma_sg
= d40_prep_memcpy_sg
;
2251 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2252 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2253 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2254 base
->dma_slave
.device_control
= d40_control
;
2255 base
->dma_slave
.dev
= base
->dev
;
2257 err
= dma_async_device_register(&base
->dma_slave
);
2260 d40_err(base
->dev
, "Failed to register slave channels\n");
2264 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2265 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2267 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2268 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2269 dma_cap_set(DMA_SG
, base
->dma_slave
.cap_mask
);
2271 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2272 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2273 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2274 base
->dma_slave
.device_prep_dma_sg
= d40_prep_memcpy_sg
;
2275 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2276 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2277 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2278 base
->dma_memcpy
.device_control
= d40_control
;
2279 base
->dma_memcpy
.dev
= base
->dev
;
2281 * This controller can only access address at even
2282 * 32bit boundaries, i.e. 2^2
2284 base
->dma_memcpy
.copy_align
= 2;
2286 err
= dma_async_device_register(&base
->dma_memcpy
);
2290 "Failed to regsiter memcpy only channels\n");
2294 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2295 0, num_reserved_chans
);
2297 dma_cap_zero(base
->dma_both
.cap_mask
);
2298 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2299 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2300 dma_cap_set(DMA_SG
, base
->dma_slave
.cap_mask
);
2302 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2303 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2304 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2305 base
->dma_slave
.device_prep_dma_sg
= d40_prep_memcpy_sg
;
2306 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2307 base
->dma_both
.device_tx_status
= d40_tx_status
;
2308 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2309 base
->dma_both
.device_control
= d40_control
;
2310 base
->dma_both
.dev
= base
->dev
;
2311 base
->dma_both
.copy_align
= 2;
2312 err
= dma_async_device_register(&base
->dma_both
);
2316 "Failed to register logical and physical capable channels\n");
2321 dma_async_device_unregister(&base
->dma_memcpy
);
2323 dma_async_device_unregister(&base
->dma_slave
);
2328 /* Initialization functions. */
2330 static int __init
d40_phy_res_init(struct d40_base
*base
)
2333 int num_phy_chans_avail
= 0;
2335 int odd_even_bit
= -2;
2337 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2338 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2340 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2341 base
->phy_res
[i
].num
= i
;
2342 odd_even_bit
+= 2 * ((i
% 2) == 0);
2343 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2344 /* Mark security only channels as occupied */
2345 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2346 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2348 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2349 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2350 num_phy_chans_avail
++;
2352 spin_lock_init(&base
->phy_res
[i
].lock
);
2355 /* Mark disabled channels as occupied */
2356 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2357 int chan
= base
->plat_data
->disabled_channels
[i
];
2359 base
->phy_res
[chan
].allocated_src
= D40_ALLOC_PHY
;
2360 base
->phy_res
[chan
].allocated_dst
= D40_ALLOC_PHY
;
2361 num_phy_chans_avail
--;
2364 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2365 num_phy_chans_avail
, base
->num_phy_chans
);
2367 /* Verify settings extended vs standard */
2368 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2370 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2372 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2373 (val
[0] & 0x3) != 1)
2375 "[%s] INFO: channel %d is misconfigured (%d)\n",
2376 __func__
, i
, val
[0] & 0x3);
2378 val
[0] = val
[0] >> 2;
2381 return num_phy_chans_avail
;
2384 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2386 static const struct d40_reg_val dma_id_regs
[] = {
2388 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2389 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2391 * D40_DREG_PERIPHID2 Depends on HW revision:
2392 * DB8500ed has 0x0008,
2394 * DB8500v1 has 0x0028
2395 * DB8500v2 has 0x0038
2397 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2400 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2401 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2402 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2403 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2405 struct stedma40_platform_data
*plat_data
;
2406 struct clk
*clk
= NULL
;
2407 void __iomem
*virtbase
= NULL
;
2408 struct resource
*res
= NULL
;
2409 struct d40_base
*base
= NULL
;
2410 int num_log_chans
= 0;
2416 clk
= clk_get(&pdev
->dev
, NULL
);
2419 d40_err(&pdev
->dev
, "No matching clock found\n");
2425 /* Get IO for DMAC base address */
2426 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2430 if (request_mem_region(res
->start
, resource_size(res
),
2431 D40_NAME
" I/O base") == NULL
)
2434 virtbase
= ioremap(res
->start
, resource_size(res
));
2438 /* HW version check */
2439 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2440 if (dma_id_regs
[i
].val
!=
2441 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2443 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2446 readl(virtbase
+ dma_id_regs
[i
].reg
));
2451 /* Get silicon revision and designer */
2452 val
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2454 if ((val
& D40_DREG_PERIPHID2_DESIGNER_MASK
) !=
2456 d40_err(&pdev
->dev
, "Unknown designer! Got %x wanted %x\n",
2457 val
& D40_DREG_PERIPHID2_DESIGNER_MASK
,
2462 rev
= (val
& D40_DREG_PERIPHID2_REV_MASK
) >>
2463 D40_DREG_PERIPHID2_REV_POS
;
2465 /* The number of physical channels on this HW */
2466 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2468 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2471 plat_data
= pdev
->dev
.platform_data
;
2473 /* Count the number of logical channels in use */
2474 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2475 if (plat_data
->dev_rx
[i
] != 0)
2478 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2479 if (plat_data
->dev_tx
[i
] != 0)
2482 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2483 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2484 sizeof(struct d40_chan
), GFP_KERNEL
);
2487 d40_err(&pdev
->dev
, "Out of memory\n");
2493 base
->num_phy_chans
= num_phy_chans
;
2494 base
->num_log_chans
= num_log_chans
;
2495 base
->phy_start
= res
->start
;
2496 base
->phy_size
= resource_size(res
);
2497 base
->virtbase
= virtbase
;
2498 base
->plat_data
= plat_data
;
2499 base
->dev
= &pdev
->dev
;
2500 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2501 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2503 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2508 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2509 sizeof(struct d40_chan
*),
2511 if (!base
->lookup_phy_chans
)
2514 if (num_log_chans
+ plat_data
->memcpy_len
) {
2516 * The max number of logical channels are event lines for all
2517 * src devices and dst devices
2519 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2520 sizeof(struct d40_chan
*),
2522 if (!base
->lookup_log_chans
)
2526 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
*
2527 sizeof(struct d40_desc
*) *
2528 D40_LCLA_LINK_PER_EVENT_GRP
,
2530 if (!base
->lcla_pool
.alloc_map
)
2533 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2534 0, SLAB_HWCACHE_ALIGN
,
2536 if (base
->desc_slab
== NULL
)
2549 release_mem_region(res
->start
,
2550 resource_size(res
));
2555 kfree(base
->lcla_pool
.alloc_map
);
2556 kfree(base
->lookup_log_chans
);
2557 kfree(base
->lookup_phy_chans
);
2558 kfree(base
->phy_res
);
2565 static void __init
d40_hw_init(struct d40_base
*base
)
2568 static const struct d40_reg_val dma_init_reg
[] = {
2569 /* Clock every part of the DMA block from start */
2570 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2572 /* Interrupts on all logical channels */
2573 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2574 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2575 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2576 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2577 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2578 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2579 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2580 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2581 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2582 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2583 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2584 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2587 u32 prmseo
[2] = {0, 0};
2588 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2592 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2593 writel(dma_init_reg
[i
].val
,
2594 base
->virtbase
+ dma_init_reg
[i
].reg
);
2596 /* Configure all our dma channels to default settings */
2597 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2599 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2601 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2603 activeo
[i
% 2] |= 3;
2607 /* Enable interrupt # */
2608 pcmis
= (pcmis
<< 1) | 1;
2610 /* Clear interrupt # */
2611 pcicr
= (pcicr
<< 1) | 1;
2613 /* Set channel to physical mode */
2614 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2619 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2620 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2621 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2622 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2624 /* Write which interrupt to enable */
2625 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2627 /* Write which interrupt to clear */
2628 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2632 static int __init
d40_lcla_allocate(struct d40_base
*base
)
2634 struct d40_lcla_pool
*pool
= &base
->lcla_pool
;
2635 unsigned long *page_list
;
2640 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2641 * To full fill this hardware requirement without wasting 256 kb
2642 * we allocate pages until we get an aligned one.
2644 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
2652 /* Calculating how many pages that are required */
2653 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
2655 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
2656 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
2657 base
->lcla_pool
.pages
);
2658 if (!page_list
[i
]) {
2660 d40_err(base
->dev
, "Failed to allocate %d pages.\n",
2661 base
->lcla_pool
.pages
);
2663 for (j
= 0; j
< i
; j
++)
2664 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2668 if ((virt_to_phys((void *)page_list
[i
]) &
2669 (LCLA_ALIGNMENT
- 1)) == 0)
2673 for (j
= 0; j
< i
; j
++)
2674 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2676 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
2677 base
->lcla_pool
.base
= (void *)page_list
[i
];
2680 * After many attempts and no succees with finding the correct
2681 * alignment, try with allocating a big buffer.
2684 "[%s] Failed to get %d pages @ 18 bit align.\n",
2685 __func__
, base
->lcla_pool
.pages
);
2686 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
2687 base
->num_phy_chans
+
2690 if (!base
->lcla_pool
.base_unaligned
) {
2695 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
2699 pool
->dma_addr
= dma_map_single(base
->dev
, pool
->base
,
2700 SZ_1K
* base
->num_phy_chans
,
2702 if (dma_mapping_error(base
->dev
, pool
->dma_addr
)) {
2708 writel(virt_to_phys(base
->lcla_pool
.base
),
2709 base
->virtbase
+ D40_DREG_LCLA
);
2715 static int __init
d40_probe(struct platform_device
*pdev
)
2719 struct d40_base
*base
;
2720 struct resource
*res
= NULL
;
2721 int num_reserved_chans
;
2724 base
= d40_hw_detect_init(pdev
);
2729 num_reserved_chans
= d40_phy_res_init(base
);
2731 platform_set_drvdata(pdev
, base
);
2733 spin_lock_init(&base
->interrupt_lock
);
2734 spin_lock_init(&base
->execmd_lock
);
2736 /* Get IO for logical channel parameter address */
2737 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2740 d40_err(&pdev
->dev
, "No \"lcpa\" memory resource\n");
2743 base
->lcpa_size
= resource_size(res
);
2744 base
->phy_lcpa
= res
->start
;
2746 if (request_mem_region(res
->start
, resource_size(res
),
2747 D40_NAME
" I/O lcpa") == NULL
) {
2750 "Failed to request LCPA region 0x%x-0x%x\n",
2751 res
->start
, res
->end
);
2755 /* We make use of ESRAM memory for this. */
2756 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2757 if (res
->start
!= val
&& val
!= 0) {
2758 dev_warn(&pdev
->dev
,
2759 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2760 __func__
, val
, res
->start
);
2762 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2764 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2765 if (!base
->lcpa_base
) {
2767 d40_err(&pdev
->dev
, "Failed to ioremap LCPA region\n");
2771 ret
= d40_lcla_allocate(base
);
2773 d40_err(&pdev
->dev
, "Failed to allocate LCLA area\n");
2777 spin_lock_init(&base
->lcla_pool
.lock
);
2779 base
->irq
= platform_get_irq(pdev
, 0);
2781 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2783 d40_err(&pdev
->dev
, "No IRQ defined\n");
2787 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2793 dev_info(base
->dev
, "initialized\n");
2798 if (base
->desc_slab
)
2799 kmem_cache_destroy(base
->desc_slab
);
2801 iounmap(base
->virtbase
);
2803 if (base
->lcla_pool
.dma_addr
)
2804 dma_unmap_single(base
->dev
, base
->lcla_pool
.dma_addr
,
2805 SZ_1K
* base
->num_phy_chans
,
2808 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
2809 free_pages((unsigned long)base
->lcla_pool
.base
,
2810 base
->lcla_pool
.pages
);
2812 kfree(base
->lcla_pool
.base_unaligned
);
2815 release_mem_region(base
->phy_lcpa
,
2817 if (base
->phy_start
)
2818 release_mem_region(base
->phy_start
,
2821 clk_disable(base
->clk
);
2825 kfree(base
->lcla_pool
.alloc_map
);
2826 kfree(base
->lookup_log_chans
);
2827 kfree(base
->lookup_phy_chans
);
2828 kfree(base
->phy_res
);
2832 d40_err(&pdev
->dev
, "probe failed\n");
2836 static struct platform_driver d40_driver
= {
2838 .owner
= THIS_MODULE
,
2843 static int __init
stedma40_init(void)
2845 return platform_driver_probe(&d40_driver
, d40_probe
);
2847 arch_initcall(stedma40_init
);