2 * Copyright (C) ST-Ericsson SA 2007-2010
3 * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
4 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/dmaengine.h>
11 #include <linux/platform_device.h>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
15 #include <plat/ste_dma40.h>
17 #include "ste_dma40_ll.h"
19 #define D40_NAME "dma40"
21 #define D40_PHY_CHAN -1
23 /* For masking out/in 2 bit channel positions */
24 #define D40_CHAN_POS(chan) (2 * (chan / 2))
25 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
27 /* Maximum iterations taken before giving up suspending a channel */
28 #define D40_SUSPEND_MAX_IT 500
30 /* Hardware requirement on LCLA alignment */
31 #define LCLA_ALIGNMENT 0x40000
32 /* Attempts before giving up to trying to get pages that are aligned */
33 #define MAX_LCLA_ALLOC_ATTEMPTS 256
35 /* Bit markings for allocation map */
36 #define D40_ALLOC_FREE (1 << 31)
37 #define D40_ALLOC_PHY (1 << 30)
38 #define D40_ALLOC_LOG_FREE 0
40 /* Hardware designer of the block */
41 #define D40_HW_DESIGNER 0x8
44 * enum 40_command - The different commands and/or statuses.
46 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
47 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
48 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
49 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
54 D40_DMA_SUSPEND_REQ
= 2,
59 * struct d40_lli_pool - Structure for keeping LLIs in memory
61 * @base: Pointer to memory area when the pre_alloc_lli's are not large
62 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
63 * pre_alloc_lli is used.
64 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
65 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
66 * one buffer to one buffer.
71 /* Space for dst and src, plus an extra for padding */
72 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
76 * struct d40_desc - A descriptor is one DMA job.
78 * @lli_phy: LLI settings for physical channel. Both src and dst=
79 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
81 * @lli_log: Same as above but for logical channels.
82 * @lli_pool: The pool with two entries pre-allocated.
83 * @lli_len: Number of llis of current descriptor.
84 * @lli_count: Number of transfered llis.
85 * @lli_tx_len: Max number of LLIs per transfer, there can be
86 * many transfer for one descriptor.
87 * @txd: DMA engine struct. Used for among other things for communication
90 * @is_in_client_list: true if the client owns this descriptor.
91 * @is_hw_linked: true if this job will automatically be continued for
94 * This descriptor is used for both logical and physical transfers.
99 struct d40_phy_lli_bidir lli_phy
;
101 struct d40_log_lli_bidir lli_log
;
103 struct d40_lli_pool lli_pool
;
108 struct dma_async_tx_descriptor txd
;
109 struct list_head node
;
111 bool is_in_client_list
;
116 * struct d40_lcla_pool - LCLA pool settings and data.
118 * @base: The virtual address of LCLA. 18 bit aligned.
119 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
120 * This pointer is only there for clean-up on error.
121 * @pages: The number of pages needed for all physical channels.
122 * Only used later for clean-up on error
123 * @lock: Lock to protect the content in this struct.
124 * @alloc_map: Bitmap mapping between physical channel and LCLA entries.
125 * @num_blocks: The number of entries of alloc_map. Equals to the
126 * number of physical channels.
128 struct d40_lcla_pool
{
130 void *base_unaligned
;
138 * struct d40_phy_res - struct for handling eventlines mapped to physical
141 * @lock: A lock protection this entity.
142 * @num: The physical channel number of this entity.
143 * @allocated_src: Bit mapped to show which src event line's are mapped to
144 * this physical channel. Can also be free or physically allocated.
145 * @allocated_dst: Same as for src but is dst.
146 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
159 * struct d40_chan - Struct that describes a channel.
161 * @lock: A spinlock to protect this struct.
162 * @log_num: The logical number, if any of this channel.
163 * @completed: Starts with 1, after first interrupt it is set to dma engine's
165 * @pending_tx: The number of pending transfers. Used between interrupt handler
167 * @busy: Set to true when transfer is ongoing on this channel.
168 * @phy_chan: Pointer to physical channel which this instance runs on. If this
169 * point is NULL, then the channel is not allocated.
170 * @chan: DMA engine handle.
171 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
172 * transfer and call client callback.
173 * @client: Cliented owned descriptor list.
174 * @active: Active descriptor.
175 * @queue: Queued jobs.
176 * @dma_cfg: The client configuration of this dma channel.
177 * @base: Pointer to the device instance struct.
178 * @src_def_cfg: Default cfg register setting for src.
179 * @dst_def_cfg: Default cfg register setting for dst.
180 * @log_def: Default logical channel settings.
181 * @lcla: Space for one dst src pair for logical channel transfers.
182 * @lcpa: Pointer to dst and src lcpa settings.
184 * This struct can either "be" a logical or a physical channel.
189 /* ID of the most recent completed transfer */
193 struct d40_phy_res
*phy_chan
;
194 struct dma_chan chan
;
195 struct tasklet_struct tasklet
;
196 struct list_head client
;
197 struct list_head active
;
198 struct list_head queue
;
199 struct stedma40_chan_cfg dma_cfg
;
200 struct d40_base
*base
;
201 /* Default register configurations */
204 struct d40_def_lcsp log_def
;
205 struct d40_lcla_elem lcla
;
206 struct d40_log_lli_full
*lcpa
;
207 /* Runtime reconfiguration */
208 dma_addr_t runtime_addr
;
209 enum dma_data_direction runtime_direction
;
213 * struct d40_base - The big global struct, one for each probe'd instance.
215 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
216 * @execmd_lock: Lock for execute command usage since several channels share
217 * the same physical register.
218 * @dev: The device structure.
219 * @virtbase: The virtual base address of the DMA's register.
220 * @rev: silicon revision detected.
221 * @clk: Pointer to the DMA clock structure.
222 * @phy_start: Physical memory start of the DMA registers.
223 * @phy_size: Size of the DMA register map.
224 * @irq: The IRQ number.
225 * @num_phy_chans: The number of physical channels. Read from HW. This
226 * is the number of available channels for this driver, not counting "Secure
227 * mode" allocated physical channels.
228 * @num_log_chans: The number of logical channels. Calculated from
230 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
231 * @dma_slave: dma_device channels that can do only do slave transfers.
232 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
233 * @log_chans: Room for all possible logical channels in system.
234 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
235 * to log_chans entries.
236 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
237 * to phy_chans entries.
238 * @plat_data: Pointer to provided platform_data which is the driver
240 * @phy_res: Vector containing all physical channels.
241 * @lcla_pool: lcla pool settings and data.
242 * @lcpa_base: The virtual mapped address of LCPA.
243 * @phy_lcpa: The physical address of the LCPA.
244 * @lcpa_size: The size of the LCPA area.
245 * @desc_slab: cache for descriptors.
248 spinlock_t interrupt_lock
;
249 spinlock_t execmd_lock
;
251 void __iomem
*virtbase
;
254 phys_addr_t phy_start
;
255 resource_size_t phy_size
;
259 struct dma_device dma_both
;
260 struct dma_device dma_slave
;
261 struct dma_device dma_memcpy
;
262 struct d40_chan
*phy_chans
;
263 struct d40_chan
*log_chans
;
264 struct d40_chan
**lookup_log_chans
;
265 struct d40_chan
**lookup_phy_chans
;
266 struct stedma40_platform_data
*plat_data
;
267 /* Physical half channels */
268 struct d40_phy_res
*phy_res
;
269 struct d40_lcla_pool lcla_pool
;
272 resource_size_t lcpa_size
;
273 struct kmem_cache
*desc_slab
;
277 * struct d40_interrupt_lookup - lookup table for interrupt handler
279 * @src: Interrupt mask register.
280 * @clr: Interrupt clear register.
281 * @is_error: true if this is an error interrupt.
282 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
283 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
285 struct d40_interrupt_lookup
{
293 * struct d40_reg_val - simple lookup struct
295 * @reg: The register.
296 * @val: The value that belongs to the register in reg.
303 static int d40_pool_lli_alloc(struct d40_desc
*d40d
,
304 int lli_len
, bool is_log
)
310 align
= sizeof(struct d40_log_lli
);
312 align
= sizeof(struct d40_phy_lli
);
315 base
= d40d
->lli_pool
.pre_alloc_lli
;
316 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
317 d40d
->lli_pool
.base
= NULL
;
319 d40d
->lli_pool
.size
= ALIGN(lli_len
* 2 * align
, align
);
321 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
322 d40d
->lli_pool
.base
= base
;
324 if (d40d
->lli_pool
.base
== NULL
)
329 d40d
->lli_log
.src
= PTR_ALIGN((struct d40_log_lli
*) base
,
331 d40d
->lli_log
.dst
= PTR_ALIGN(d40d
->lli_log
.src
+ lli_len
,
334 d40d
->lli_phy
.src
= PTR_ALIGN((struct d40_phy_lli
*)base
,
336 d40d
->lli_phy
.dst
= PTR_ALIGN(d40d
->lli_phy
.src
+ lli_len
,
343 static void d40_pool_lli_free(struct d40_desc
*d40d
)
345 kfree(d40d
->lli_pool
.base
);
346 d40d
->lli_pool
.base
= NULL
;
347 d40d
->lli_pool
.size
= 0;
348 d40d
->lli_log
.src
= NULL
;
349 d40d
->lli_log
.dst
= NULL
;
350 d40d
->lli_phy
.src
= NULL
;
351 d40d
->lli_phy
.dst
= NULL
;
354 static void d40_desc_remove(struct d40_desc
*d40d
)
356 list_del(&d40d
->node
);
359 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
364 if (!list_empty(&d40c
->client
)) {
365 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
)
366 if (async_tx_test_ack(&d
->txd
)) {
367 d40_pool_lli_free(d
);
372 d
= kmem_cache_alloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
374 memset(d
, 0, sizeof(struct d40_desc
));
375 INIT_LIST_HEAD(&d
->node
);
381 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
383 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
386 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
388 list_add_tail(&desc
->node
, &d40c
->active
);
391 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
395 if (list_empty(&d40c
->active
))
398 d
= list_first_entry(&d40c
->active
,
404 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
406 list_add_tail(&desc
->node
, &d40c
->queue
);
409 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
413 if (list_empty(&d40c
->queue
))
416 d
= list_first_entry(&d40c
->queue
,
422 static struct d40_desc
*d40_last_queued(struct d40_chan
*d40c
)
426 if (list_empty(&d40c
->queue
))
428 list_for_each_entry(d
, &d40c
->queue
, node
)
429 if (list_is_last(&d
->node
, &d40c
->queue
))
434 /* Support functions for logical channels */
436 static int d40_lcla_id_get(struct d40_chan
*d40c
)
440 struct d40_log_lli
*lcla_lidx_base
=
441 d40c
->base
->lcla_pool
.base
+ d40c
->phy_chan
->num
* 1024;
443 int lli_per_log
= d40c
->base
->plat_data
->llis_per_log
;
446 if (d40c
->lcla
.src_id
>= 0 && d40c
->lcla
.dst_id
>= 0)
449 if (d40c
->base
->lcla_pool
.num_blocks
> 32)
452 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
454 for (i
= 0; i
< d40c
->base
->lcla_pool
.num_blocks
; i
++) {
455 if (!(d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &
457 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] |=
463 if (src_id
>= d40c
->base
->lcla_pool
.num_blocks
)
466 for (; i
< d40c
->base
->lcla_pool
.num_blocks
; i
++) {
467 if (!(d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &
469 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] |=
476 if (dst_id
== src_id
)
479 d40c
->lcla
.src_id
= src_id
;
480 d40c
->lcla
.dst_id
= dst_id
;
481 d40c
->lcla
.dst
= lcla_lidx_base
+ dst_id
* lli_per_log
+ 1;
482 d40c
->lcla
.src
= lcla_lidx_base
+ src_id
* lli_per_log
+ 1;
484 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
487 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
492 static int d40_channel_execute_command(struct d40_chan
*d40c
,
493 enum d40_command command
)
497 void __iomem
*active_reg
;
502 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
504 if (d40c
->phy_chan
->num
% 2 == 0)
505 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
507 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
509 if (command
== D40_DMA_SUSPEND_REQ
) {
510 status
= (readl(active_reg
) &
511 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
512 D40_CHAN_POS(d40c
->phy_chan
->num
);
514 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
518 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
519 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
522 if (command
== D40_DMA_SUSPEND_REQ
) {
524 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
525 status
= (readl(active_reg
) &
526 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
527 D40_CHAN_POS(d40c
->phy_chan
->num
);
531 * Reduce the number of bus accesses while
532 * waiting for the DMA to suspend.
536 if (status
== D40_DMA_STOP
||
537 status
== D40_DMA_SUSPENDED
)
541 if (i
== D40_SUSPEND_MAX_IT
) {
542 dev_err(&d40c
->chan
.dev
->device
,
543 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
544 __func__
, d40c
->phy_chan
->num
, d40c
->log_num
,
552 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
556 static void d40_term_all(struct d40_chan
*d40c
)
558 struct d40_desc
*d40d
;
561 /* Release active descriptors */
562 while ((d40d
= d40_first_active_get(d40c
))) {
563 d40_desc_remove(d40d
);
564 d40_desc_free(d40c
, d40d
);
567 /* Release queued descriptors waiting for transfer */
568 while ((d40d
= d40_first_queued(d40c
))) {
569 d40_desc_remove(d40d
);
570 d40_desc_free(d40c
, d40d
);
573 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
575 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &=
576 (~(0x1 << d40c
->lcla
.dst_id
));
577 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
] &=
578 (~(0x1 << d40c
->lcla
.src_id
));
580 d40c
->lcla
.src_id
= -1;
581 d40c
->lcla
.dst_id
= -1;
583 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
585 d40c
->pending_tx
= 0;
589 static void d40_config_set_event(struct d40_chan
*d40c
, bool do_enable
)
594 /* Notice, that disable requires the physical channel to be stopped */
596 val
= D40_ACTIVATE_EVENTLINE
;
598 val
= D40_DEACTIVATE_EVENTLINE
;
600 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
602 /* Enable event line connected to device (or memcpy) */
603 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
604 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
605 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
607 writel((val
<< D40_EVENTLINE_POS(event
)) |
608 ~D40_EVENTLINE_MASK(event
),
609 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
610 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
613 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
614 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
616 writel((val
<< D40_EVENTLINE_POS(event
)) |
617 ~D40_EVENTLINE_MASK(event
),
618 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
619 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
623 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
626 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
630 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
631 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
634 val
|= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
635 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
640 static void d40_config_write(struct d40_chan
*d40c
)
645 /* Odd addresses are even addresses + 4 */
646 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
647 /* Setup channel mode to logical or physical */
648 var
= ((u32
)(d40c
->log_num
!= D40_PHY_CHAN
) + 1) <<
649 D40_CHAN_POS(d40c
->phy_chan
->num
);
650 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
652 /* Setup operational mode option register */
653 var
= ((d40c
->dma_cfg
.channel_type
>> STEDMA40_INFO_CH_MODE_OPT_POS
) &
654 0x3) << D40_CHAN_POS(d40c
->phy_chan
->num
);
656 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
658 if (d40c
->log_num
!= D40_PHY_CHAN
) {
659 /* Set default config for CFG reg */
660 writel(d40c
->src_def_cfg
,
661 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
662 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
664 writel(d40c
->dst_def_cfg
,
665 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
666 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
669 /* Set LIDX for lcla */
670 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
671 D40_SREG_ELEM_LOG_LIDX_MASK
,
672 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
673 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
676 writel((d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
) &
677 D40_SREG_ELEM_LOG_LIDX_MASK
,
678 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
679 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
685 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
687 if (d40c
->log_num
== D40_PHY_CHAN
) {
688 d40_phy_lli_write(d40c
->base
->virtbase
,
693 struct d40_log_lli
*src
= d40d
->lli_log
.src
;
694 struct d40_log_lli
*dst
= d40d
->lli_log
.dst
;
697 src
+= d40d
->lli_count
;
698 dst
+= d40d
->lli_count
;
699 s
= d40_log_lli_write(d40c
->lcpa
,
700 d40c
->lcla
.src
, d40c
->lcla
.dst
,
702 d40c
->base
->plat_data
->llis_per_log
);
704 /* If s equals to zero, the job is not linked */
706 (void) dma_map_single(d40c
->base
->dev
, d40c
->lcla
.src
,
707 s
* sizeof(struct d40_log_lli
),
709 (void) dma_map_single(d40c
->base
->dev
, d40c
->lcla
.dst
,
710 s
* sizeof(struct d40_log_lli
),
714 d40d
->lli_count
+= d40d
->lli_tx_len
;
717 static u32
d40_residue(struct d40_chan
*d40c
)
721 if (d40c
->log_num
!= D40_PHY_CHAN
)
722 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
723 >> D40_MEM_LCSP2_ECNT_POS
;
725 num_elt
= (readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
726 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
727 D40_CHAN_REG_SDELT
) &
728 D40_SREG_ELEM_PHY_ECNT_MASK
) >>
729 D40_SREG_ELEM_PHY_ECNT_POS
;
730 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
733 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
737 if (d40c
->log_num
!= D40_PHY_CHAN
)
738 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
740 is_link
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
741 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
742 D40_CHAN_REG_SDLNK
) &
743 D40_SREG_LNK_PHYS_LNK_MASK
;
747 static int d40_pause(struct dma_chan
*chan
)
749 struct d40_chan
*d40c
=
750 container_of(chan
, struct d40_chan
, chan
);
754 spin_lock_irqsave(&d40c
->lock
, flags
);
756 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
758 if (d40c
->log_num
!= D40_PHY_CHAN
) {
759 d40_config_set_event(d40c
, false);
760 /* Resume the other logical channels if any */
761 if (d40_chan_has_events(d40c
))
762 res
= d40_channel_execute_command(d40c
,
767 spin_unlock_irqrestore(&d40c
->lock
, flags
);
771 static int d40_resume(struct dma_chan
*chan
)
773 struct d40_chan
*d40c
=
774 container_of(chan
, struct d40_chan
, chan
);
778 spin_lock_irqsave(&d40c
->lock
, flags
);
780 if (d40c
->base
->rev
== 0)
781 if (d40c
->log_num
!= D40_PHY_CHAN
) {
782 res
= d40_channel_execute_command(d40c
,
783 D40_DMA_SUSPEND_REQ
);
787 /* If bytes left to transfer or linked tx resume job */
788 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
)) {
790 if (d40c
->log_num
!= D40_PHY_CHAN
)
791 d40_config_set_event(d40c
, true);
793 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
797 spin_unlock_irqrestore(&d40c
->lock
, flags
);
801 static void d40_tx_submit_log(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
806 static void d40_tx_submit_phy(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
808 struct d40_desc
*d40d_prev
= NULL
;
812 if (!list_empty(&d40c
->queue
))
813 d40d_prev
= d40_last_queued(d40c
);
814 else if (!list_empty(&d40c
->active
))
815 d40d_prev
= d40_first_active_get(d40c
);
820 /* Here we try to join this job with previous jobs */
821 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
822 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
825 /* Figure out which link we're currently transmitting */
826 for (i
= 0; i
< d40d_prev
->lli_len
; i
++)
827 if (val
== d40d_prev
->lli_phy
.src
[i
].reg_lnk
)
830 val
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
831 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
832 D40_CHAN_REG_SSELT
) >> D40_SREG_ELEM_LOG_ECNT_POS
;
834 if (i
== (d40d_prev
->lli_len
- 1) && val
> 0) {
835 /* Change the current one */
836 writel(virt_to_phys(d40d
->lli_phy
.src
),
837 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
838 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
840 writel(virt_to_phys(d40d
->lli_phy
.dst
),
841 d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
842 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
845 d40d
->is_hw_linked
= true;
847 } else if (i
< d40d_prev
->lli_len
) {
848 (void) dma_unmap_single(d40c
->base
->dev
,
849 virt_to_phys(d40d_prev
->lli_phy
.src
),
850 d40d_prev
->lli_pool
.size
,
853 /* Keep the settings */
854 val
= d40d_prev
->lli_phy
.src
[d40d_prev
->lli_len
- 1].reg_lnk
&
855 ~D40_SREG_LNK_PHYS_LNK_MASK
;
856 d40d_prev
->lli_phy
.src
[d40d_prev
->lli_len
- 1].reg_lnk
=
857 val
| virt_to_phys(d40d
->lli_phy
.src
);
859 val
= d40d_prev
->lli_phy
.dst
[d40d_prev
->lli_len
- 1].reg_lnk
&
860 ~D40_SREG_LNK_PHYS_LNK_MASK
;
861 d40d_prev
->lli_phy
.dst
[d40d_prev
->lli_len
- 1].reg_lnk
=
862 val
| virt_to_phys(d40d
->lli_phy
.dst
);
864 (void) dma_map_single(d40c
->base
->dev
,
865 d40d_prev
->lli_phy
.src
,
866 d40d_prev
->lli_pool
.size
,
868 d40d
->is_hw_linked
= true;
872 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
874 struct d40_chan
*d40c
= container_of(tx
->chan
,
877 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
880 (void) d40_pause(&d40c
->chan
);
882 spin_lock_irqsave(&d40c
->lock
, flags
);
886 if (d40c
->chan
.cookie
< 0)
887 d40c
->chan
.cookie
= 1;
889 d40d
->txd
.cookie
= d40c
->chan
.cookie
;
891 if (d40c
->log_num
== D40_PHY_CHAN
)
892 d40_tx_submit_phy(d40c
, d40d
);
894 d40_tx_submit_log(d40c
, d40d
);
896 d40_desc_queue(d40c
, d40d
);
898 spin_unlock_irqrestore(&d40c
->lock
, flags
);
900 (void) d40_resume(&d40c
->chan
);
905 static int d40_start(struct d40_chan
*d40c
)
907 if (d40c
->base
->rev
== 0) {
910 if (d40c
->log_num
!= D40_PHY_CHAN
) {
911 err
= d40_channel_execute_command(d40c
,
912 D40_DMA_SUSPEND_REQ
);
918 if (d40c
->log_num
!= D40_PHY_CHAN
)
919 d40_config_set_event(d40c
, true);
921 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
924 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
926 struct d40_desc
*d40d
;
929 /* Start queued jobs, if any */
930 d40d
= d40_first_queued(d40c
);
935 /* Remove from queue */
936 d40_desc_remove(d40d
);
938 /* Add to active queue */
939 d40_desc_submit(d40c
, d40d
);
942 * If this job is already linked in hw,
945 if (!d40d
->is_hw_linked
) {
946 /* Initiate DMA job */
947 d40_desc_load(d40c
, d40d
);
950 err
= d40_start(d40c
);
960 /* called from interrupt context */
961 static void dma_tc_handle(struct d40_chan
*d40c
)
963 struct d40_desc
*d40d
;
965 /* Get first active entry from list */
966 d40d
= d40_first_active_get(d40c
);
971 if (d40d
->lli_count
< d40d
->lli_len
) {
973 d40_desc_load(d40c
, d40d
);
975 (void) d40_start(d40c
);
979 if (d40_queue_start(d40c
) == NULL
)
983 tasklet_schedule(&d40c
->tasklet
);
987 static void dma_tasklet(unsigned long data
)
989 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
990 struct d40_desc
*d40d
;
992 dma_async_tx_callback callback
;
993 void *callback_param
;
995 spin_lock_irqsave(&d40c
->lock
, flags
);
997 /* Get first active entry from list */
998 d40d
= d40_first_active_get(d40c
);
1003 d40c
->completed
= d40d
->txd
.cookie
;
1006 * If terminating a channel pending_tx is set to zero.
1007 * This prevents any finished active jobs to return to the client.
1009 if (d40c
->pending_tx
== 0) {
1010 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1014 /* Callback to client */
1015 callback
= d40d
->txd
.callback
;
1016 callback_param
= d40d
->txd
.callback_param
;
1018 if (async_tx_test_ack(&d40d
->txd
)) {
1019 d40_pool_lli_free(d40d
);
1020 d40_desc_remove(d40d
);
1021 d40_desc_free(d40c
, d40d
);
1023 if (!d40d
->is_in_client_list
) {
1024 d40_desc_remove(d40d
);
1025 list_add_tail(&d40d
->node
, &d40c
->client
);
1026 d40d
->is_in_client_list
= true;
1032 if (d40c
->pending_tx
)
1033 tasklet_schedule(&d40c
->tasklet
);
1035 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1037 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1038 callback(callback_param
);
1043 /* Rescue manouver if receiving double interrupts */
1044 if (d40c
->pending_tx
> 0)
1046 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1049 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1051 static const struct d40_interrupt_lookup il
[] = {
1052 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
1053 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
1054 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
1055 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
1056 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
1057 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
1058 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
1059 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
1060 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
1061 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
1065 u32 regs
[ARRAY_SIZE(il
)];
1069 struct d40_chan
*d40c
;
1070 unsigned long flags
;
1071 struct d40_base
*base
= data
;
1073 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1075 /* Read interrupt status of both logical and physical channels */
1076 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
1077 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1081 chan
= find_next_bit((unsigned long *)regs
,
1082 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
1084 /* No more set bits found? */
1085 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
1088 row
= chan
/ BITS_PER_LONG
;
1089 idx
= chan
& (BITS_PER_LONG
- 1);
1092 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1094 if (il
[row
].offset
== D40_PHY_CHAN
)
1095 d40c
= base
->lookup_phy_chans
[idx
];
1097 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1098 spin_lock(&d40c
->lock
);
1100 if (!il
[row
].is_error
)
1101 dma_tc_handle(d40c
);
1104 "[%s] IRQ chan: %ld offset %d idx %d\n",
1105 __func__
, chan
, il
[row
].offset
, idx
);
1107 spin_unlock(&d40c
->lock
);
1110 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1115 static int d40_validate_conf(struct d40_chan
*d40c
,
1116 struct stedma40_chan_cfg
*conf
)
1119 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
1120 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
1121 bool is_log
= (conf
->channel_type
& STEDMA40_CHANNEL_IN_OPER_MODE
)
1122 == STEDMA40_CHANNEL_IN_LOG_MODE
;
1125 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid direction.\n",
1130 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
1131 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
1132 d40c
->runtime_addr
== 0) {
1134 dev_err(&d40c
->chan
.dev
->device
,
1135 "[%s] Invalid TX channel address (%d)\n",
1136 __func__
, conf
->dst_dev_type
);
1140 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
1141 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
1142 d40c
->runtime_addr
== 0) {
1143 dev_err(&d40c
->chan
.dev
->device
,
1144 "[%s] Invalid RX channel address (%d)\n",
1145 __func__
, conf
->src_dev_type
);
1149 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1150 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1151 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid dst\n",
1156 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1157 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1158 dev_err(&d40c
->chan
.dev
->device
, "[%s] Invalid src\n",
1163 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1164 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1165 dev_err(&d40c
->chan
.dev
->device
,
1166 "[%s] No event line\n", __func__
);
1170 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1171 (src_event_group
!= dst_event_group
)) {
1172 dev_err(&d40c
->chan
.dev
->device
,
1173 "[%s] Invalid event group\n", __func__
);
1177 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1179 * DMAC HW supports it. Will be added to this driver,
1180 * in case any dma client requires it.
1182 dev_err(&d40c
->chan
.dev
->device
,
1183 "[%s] periph to periph not supported\n",
1191 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
, bool is_src
,
1192 int log_event_line
, bool is_log
)
1194 unsigned long flags
;
1195 spin_lock_irqsave(&phy
->lock
, flags
);
1197 /* Physical interrupts are masked per physical full channel */
1198 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1199 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1200 phy
->allocated_dst
= D40_ALLOC_PHY
;
1201 phy
->allocated_src
= D40_ALLOC_PHY
;
1207 /* Logical channel */
1209 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1212 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1213 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1215 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1216 phy
->allocated_src
|= 1 << log_event_line
;
1221 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1224 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1225 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1227 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1228 phy
->allocated_dst
|= 1 << log_event_line
;
1235 spin_unlock_irqrestore(&phy
->lock
, flags
);
1238 spin_unlock_irqrestore(&phy
->lock
, flags
);
1242 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1245 unsigned long flags
;
1246 bool is_free
= false;
1248 spin_lock_irqsave(&phy
->lock
, flags
);
1249 if (!log_event_line
) {
1250 /* Physical interrupts are masked per physical full channel */
1251 phy
->allocated_dst
= D40_ALLOC_FREE
;
1252 phy
->allocated_src
= D40_ALLOC_FREE
;
1257 /* Logical channel */
1259 phy
->allocated_src
&= ~(1 << log_event_line
);
1260 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1261 phy
->allocated_src
= D40_ALLOC_FREE
;
1263 phy
->allocated_dst
&= ~(1 << log_event_line
);
1264 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1265 phy
->allocated_dst
= D40_ALLOC_FREE
;
1268 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1272 spin_unlock_irqrestore(&phy
->lock
, flags
);
1277 static int d40_allocate_channel(struct d40_chan
*d40c
)
1282 struct d40_phy_res
*phys
;
1287 bool is_log
= (d40c
->dma_cfg
.channel_type
&
1288 STEDMA40_CHANNEL_IN_OPER_MODE
)
1289 == STEDMA40_CHANNEL_IN_LOG_MODE
;
1292 phys
= d40c
->base
->phy_res
;
1294 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1295 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1296 log_num
= 2 * dev_type
;
1298 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1299 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1300 /* dst event lines are used for logical memcpy */
1301 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1302 log_num
= 2 * dev_type
+ 1;
1307 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1308 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1311 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1312 /* Find physical half channel */
1313 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1315 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1320 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1321 int phy_num
= j
+ event_group
* 2;
1322 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1323 if (d40_alloc_mask_set(&phys
[i
],
1332 d40c
->phy_chan
= &phys
[i
];
1333 d40c
->log_num
= D40_PHY_CHAN
;
1339 /* Find logical channel */
1340 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1341 int phy_num
= j
+ event_group
* 2;
1343 * Spread logical channels across all available physical rather
1344 * than pack every logical channel at the first available phy
1348 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1349 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1350 event_line
, is_log
))
1354 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1355 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1356 event_line
, is_log
))
1364 d40c
->phy_chan
= &phys
[i
];
1365 d40c
->log_num
= log_num
;
1369 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1371 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1377 static int d40_config_memcpy(struct d40_chan
*d40c
)
1379 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1381 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1382 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1383 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1384 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1385 memcpy
[d40c
->chan
.chan_id
];
1387 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1388 dma_has_cap(DMA_SLAVE
, cap
)) {
1389 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1391 dev_err(&d40c
->chan
.dev
->device
, "[%s] No memcpy\n",
1400 static int d40_free_dma(struct d40_chan
*d40c
)
1405 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1408 struct d40_desc
*_d
;
1411 /* Terminate all queued and active transfers */
1414 /* Release client owned descriptors */
1415 if (!list_empty(&d40c
->client
))
1416 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
1417 d40_pool_lli_free(d
);
1419 d40_desc_free(d40c
, d
);
1423 dev_err(&d40c
->chan
.dev
->device
, "[%s] phy == null\n",
1428 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1429 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1430 dev_err(&d40c
->chan
.dev
->device
, "[%s] channel already free\n",
1435 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1436 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1437 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1439 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1440 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1443 dev_err(&d40c
->chan
.dev
->device
,
1444 "[%s] Unknown direction\n", __func__
);
1448 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1450 dev_err(&d40c
->chan
.dev
->device
, "[%s] suspend failed\n",
1455 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1456 /* Release logical channel, deactivate the event line */
1458 d40_config_set_event(d40c
, false);
1459 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1462 * Check if there are more logical allocation
1463 * on this phy channel.
1465 if (!d40_alloc_mask_free(phy
, is_src
, event
)) {
1466 /* Resume the other logical channels if any */
1467 if (d40_chan_has_events(d40c
)) {
1468 res
= d40_channel_execute_command(d40c
,
1471 dev_err(&d40c
->chan
.dev
->device
,
1472 "[%s] Executing RUN command\n",
1480 (void) d40_alloc_mask_free(phy
, is_src
, 0);
1483 /* Release physical channel */
1484 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1486 dev_err(&d40c
->chan
.dev
->device
,
1487 "[%s] Failed to stop channel\n", __func__
);
1490 d40c
->phy_chan
= NULL
;
1491 /* Invalidate channel type */
1492 d40c
->dma_cfg
.channel_type
= 0;
1493 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1498 static bool d40_is_paused(struct d40_chan
*d40c
)
1500 bool is_paused
= false;
1501 unsigned long flags
;
1502 void __iomem
*active_reg
;
1506 spin_lock_irqsave(&d40c
->lock
, flags
);
1508 if (d40c
->log_num
== D40_PHY_CHAN
) {
1509 if (d40c
->phy_chan
->num
% 2 == 0)
1510 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1512 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1514 status
= (readl(active_reg
) &
1515 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1516 D40_CHAN_POS(d40c
->phy_chan
->num
);
1517 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1523 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1524 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1525 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1526 status
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1527 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1528 D40_CHAN_REG_SDLNK
);
1529 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1530 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1531 status
= readl(d40c
->base
->virtbase
+ D40_DREG_PCBASE
+
1532 d40c
->phy_chan
->num
* D40_DREG_PCDELTA
+
1533 D40_CHAN_REG_SSLNK
);
1535 dev_err(&d40c
->chan
.dev
->device
,
1536 "[%s] Unknown direction\n", __func__
);
1540 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1541 D40_EVENTLINE_POS(event
);
1543 if (status
!= D40_DMA_RUN
)
1546 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1552 static u32
stedma40_residue(struct dma_chan
*chan
)
1554 struct d40_chan
*d40c
=
1555 container_of(chan
, struct d40_chan
, chan
);
1557 unsigned long flags
;
1559 spin_lock_irqsave(&d40c
->lock
, flags
);
1560 bytes_left
= d40_residue(d40c
);
1561 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1566 /* Public DMA functions in addition to the DMA engine framework */
1568 int stedma40_set_psize(struct dma_chan
*chan
,
1572 struct d40_chan
*d40c
=
1573 container_of(chan
, struct d40_chan
, chan
);
1574 unsigned long flags
;
1576 spin_lock_irqsave(&d40c
->lock
, flags
);
1578 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1579 d40c
->log_def
.lcsp1
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1580 d40c
->log_def
.lcsp3
&= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK
;
1581 d40c
->log_def
.lcsp1
|= src_psize
<<
1582 D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1583 d40c
->log_def
.lcsp3
|= dst_psize
<<
1584 D40_MEM_LCSP1_SCFG_PSIZE_POS
;
1588 if (src_psize
== STEDMA40_PSIZE_PHY_1
)
1589 d40c
->src_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1591 d40c
->src_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1592 d40c
->src_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1593 D40_SREG_CFG_PSIZE_POS
);
1594 d40c
->src_def_cfg
|= src_psize
<< D40_SREG_CFG_PSIZE_POS
;
1597 if (dst_psize
== STEDMA40_PSIZE_PHY_1
)
1598 d40c
->dst_def_cfg
&= ~(1 << D40_SREG_CFG_PHY_PEN_POS
);
1600 d40c
->dst_def_cfg
|= 1 << D40_SREG_CFG_PHY_PEN_POS
;
1601 d40c
->dst_def_cfg
&= ~(STEDMA40_PSIZE_PHY_16
<<
1602 D40_SREG_CFG_PSIZE_POS
);
1603 d40c
->dst_def_cfg
|= dst_psize
<< D40_SREG_CFG_PSIZE_POS
;
1606 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1609 EXPORT_SYMBOL(stedma40_set_psize
);
1611 struct dma_async_tx_descriptor
*stedma40_memcpy_sg(struct dma_chan
*chan
,
1612 struct scatterlist
*sgl_dst
,
1613 struct scatterlist
*sgl_src
,
1614 unsigned int sgl_len
,
1615 unsigned long dma_flags
)
1618 struct d40_desc
*d40d
;
1619 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1621 unsigned long flags
;
1623 if (d40c
->phy_chan
== NULL
) {
1624 dev_err(&d40c
->chan
.dev
->device
,
1625 "[%s] Unallocated channel.\n", __func__
);
1626 return ERR_PTR(-EINVAL
);
1629 spin_lock_irqsave(&d40c
->lock
, flags
);
1630 d40d
= d40_desc_get(d40c
);
1635 d40d
->lli_len
= sgl_len
;
1636 d40d
->lli_tx_len
= d40d
->lli_len
;
1637 d40d
->txd
.flags
= dma_flags
;
1639 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1640 if (d40d
->lli_len
> d40c
->base
->plat_data
->llis_per_log
)
1641 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1645 * Check if there is space available in lcla. If not,
1646 * split list into 1-length and run only in lcpa
1649 if (d40_lcla_id_get(d40c
) != 0)
1650 d40d
->lli_tx_len
= 1;
1652 if (d40_pool_lli_alloc(d40d
, sgl_len
, true) < 0) {
1653 dev_err(&d40c
->chan
.dev
->device
,
1654 "[%s] Out of memory\n", __func__
);
1658 (void) d40_log_sg_to_lli(d40c
->lcla
.src_id
,
1662 d40c
->log_def
.lcsp1
,
1663 d40c
->dma_cfg
.src_info
.data_width
,
1665 d40c
->base
->plat_data
->llis_per_log
);
1667 (void) d40_log_sg_to_lli(d40c
->lcla
.dst_id
,
1671 d40c
->log_def
.lcsp3
,
1672 d40c
->dma_cfg
.dst_info
.data_width
,
1674 d40c
->base
->plat_data
->llis_per_log
);
1678 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
1679 dev_err(&d40c
->chan
.dev
->device
,
1680 "[%s] Out of memory\n", __func__
);
1684 res
= d40_phy_sg_to_lli(sgl_src
,
1688 virt_to_phys(d40d
->lli_phy
.src
),
1690 d40c
->dma_cfg
.src_info
.data_width
,
1691 d40c
->dma_cfg
.src_info
.psize
);
1696 res
= d40_phy_sg_to_lli(sgl_dst
,
1700 virt_to_phys(d40d
->lli_phy
.dst
),
1702 d40c
->dma_cfg
.dst_info
.data_width
,
1703 d40c
->dma_cfg
.dst_info
.psize
);
1708 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1709 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1712 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1714 d40d
->txd
.tx_submit
= d40_tx_submit
;
1716 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1720 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1723 EXPORT_SYMBOL(stedma40_memcpy_sg
);
1725 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
1727 struct stedma40_chan_cfg
*info
= data
;
1728 struct d40_chan
*d40c
=
1729 container_of(chan
, struct d40_chan
, chan
);
1733 err
= d40_validate_conf(d40c
, info
);
1735 d40c
->dma_cfg
= *info
;
1737 err
= d40_config_memcpy(d40c
);
1741 EXPORT_SYMBOL(stedma40_filter
);
1743 /* DMA ENGINE functions */
1744 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
1747 unsigned long flags
;
1748 struct d40_chan
*d40c
=
1749 container_of(chan
, struct d40_chan
, chan
);
1751 spin_lock_irqsave(&d40c
->lock
, flags
);
1753 d40c
->completed
= chan
->cookie
= 1;
1756 * If no dma configuration is set (channel_type == 0)
1757 * use default configuration (memcpy)
1759 if (d40c
->dma_cfg
.channel_type
== 0) {
1761 err
= d40_config_memcpy(d40c
);
1763 dev_err(&d40c
->chan
.dev
->device
,
1764 "[%s] Failed to configure memcpy channel\n",
1769 is_free_phy
= (d40c
->phy_chan
== NULL
);
1771 err
= d40_allocate_channel(d40c
);
1773 dev_err(&d40c
->chan
.dev
->device
,
1774 "[%s] Failed to allocate channel\n", __func__
);
1778 /* Fill in basic CFG register values */
1779 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
1780 &d40c
->dst_def_cfg
, d40c
->log_num
!= D40_PHY_CHAN
);
1782 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1783 d40_log_cfg(&d40c
->dma_cfg
,
1784 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
1786 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
1787 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1788 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
1790 d40c
->lcpa
= d40c
->base
->lcpa_base
+
1791 d40c
->dma_cfg
.dst_dev_type
*
1792 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
1796 * Only write channel configuration to the DMA if the physical
1797 * resource is free. In case of multiple logical channels
1798 * on the same physical resource, only the first write is necessary.
1801 d40_config_write(d40c
);
1803 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1807 static void d40_free_chan_resources(struct dma_chan
*chan
)
1809 struct d40_chan
*d40c
=
1810 container_of(chan
, struct d40_chan
, chan
);
1812 unsigned long flags
;
1814 if (d40c
->phy_chan
== NULL
) {
1815 dev_err(&d40c
->chan
.dev
->device
,
1816 "[%s] Cannot free unallocated channel\n", __func__
);
1821 spin_lock_irqsave(&d40c
->lock
, flags
);
1823 err
= d40_free_dma(d40c
);
1826 dev_err(&d40c
->chan
.dev
->device
,
1827 "[%s] Failed to free channel\n", __func__
);
1828 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1831 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
1835 unsigned long dma_flags
)
1837 struct d40_desc
*d40d
;
1838 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
1840 unsigned long flags
;
1843 if (d40c
->phy_chan
== NULL
) {
1844 dev_err(&d40c
->chan
.dev
->device
,
1845 "[%s] Channel is not allocated.\n", __func__
);
1846 return ERR_PTR(-EINVAL
);
1849 spin_lock_irqsave(&d40c
->lock
, flags
);
1850 d40d
= d40_desc_get(d40c
);
1853 dev_err(&d40c
->chan
.dev
->device
,
1854 "[%s] Descriptor is NULL\n", __func__
);
1858 d40d
->txd
.flags
= dma_flags
;
1860 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
1862 d40d
->txd
.tx_submit
= d40_tx_submit
;
1864 if (d40c
->log_num
!= D40_PHY_CHAN
) {
1866 if (d40_pool_lli_alloc(d40d
, 1, true) < 0) {
1867 dev_err(&d40c
->chan
.dev
->device
,
1868 "[%s] Out of memory\n", __func__
);
1872 d40d
->lli_tx_len
= 1;
1874 d40_log_fill_lli(d40d
->lli_log
.src
,
1878 d40c
->log_def
.lcsp1
,
1879 d40c
->dma_cfg
.src_info
.data_width
,
1882 d40_log_fill_lli(d40d
->lli_log
.dst
,
1886 d40c
->log_def
.lcsp3
,
1887 d40c
->dma_cfg
.dst_info
.data_width
,
1892 if (d40_pool_lli_alloc(d40d
, 1, false) < 0) {
1893 dev_err(&d40c
->chan
.dev
->device
,
1894 "[%s] Out of memory\n", __func__
);
1898 err
= d40_phy_fill_lli(d40d
->lli_phy
.src
,
1901 d40c
->dma_cfg
.src_info
.psize
,
1905 d40c
->dma_cfg
.src_info
.data_width
,
1910 err
= d40_phy_fill_lli(d40d
->lli_phy
.dst
,
1913 d40c
->dma_cfg
.dst_info
.psize
,
1917 d40c
->dma_cfg
.dst_info
.data_width
,
1923 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
1924 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
1927 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1931 dev_err(&d40c
->chan
.dev
->device
,
1932 "[%s] Failed filling in PHY LLI\n", __func__
);
1933 d40_pool_lli_free(d40d
);
1935 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1939 static int d40_prep_slave_sg_log(struct d40_desc
*d40d
,
1940 struct d40_chan
*d40c
,
1941 struct scatterlist
*sgl
,
1942 unsigned int sg_len
,
1943 enum dma_data_direction direction
,
1944 unsigned long dma_flags
)
1946 dma_addr_t dev_addr
= 0;
1949 if (d40_pool_lli_alloc(d40d
, sg_len
, true) < 0) {
1950 dev_err(&d40c
->chan
.dev
->device
,
1951 "[%s] Out of memory\n", __func__
);
1955 d40d
->lli_len
= sg_len
;
1956 if (d40d
->lli_len
<= d40c
->base
->plat_data
->llis_per_log
)
1957 d40d
->lli_tx_len
= d40d
->lli_len
;
1959 d40d
->lli_tx_len
= d40c
->base
->plat_data
->llis_per_log
;
1963 * Check if there is space available in lcla.
1964 * If not, split list into 1-length and run only
1967 if (d40_lcla_id_get(d40c
) != 0)
1968 d40d
->lli_tx_len
= 1;
1970 if (direction
== DMA_FROM_DEVICE
)
1971 if (d40c
->runtime_addr
)
1972 dev_addr
= d40c
->runtime_addr
;
1974 dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
1975 else if (direction
== DMA_TO_DEVICE
)
1976 if (d40c
->runtime_addr
)
1977 dev_addr
= d40c
->runtime_addr
;
1979 dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
1984 total_size
= d40_log_sg_to_dev(&d40c
->lcla
,
1988 d40c
->dma_cfg
.src_info
.data_width
,
1989 d40c
->dma_cfg
.dst_info
.data_width
,
1991 dev_addr
, d40d
->lli_tx_len
,
1992 d40c
->base
->plat_data
->llis_per_log
);
2000 static int d40_prep_slave_sg_phy(struct d40_desc
*d40d
,
2001 struct d40_chan
*d40c
,
2002 struct scatterlist
*sgl
,
2003 unsigned int sgl_len
,
2004 enum dma_data_direction direction
,
2005 unsigned long dma_flags
)
2007 dma_addr_t src_dev_addr
;
2008 dma_addr_t dst_dev_addr
;
2011 if (d40_pool_lli_alloc(d40d
, sgl_len
, false) < 0) {
2012 dev_err(&d40c
->chan
.dev
->device
,
2013 "[%s] Out of memory\n", __func__
);
2017 d40d
->lli_len
= sgl_len
;
2018 d40d
->lli_tx_len
= sgl_len
;
2020 if (direction
== DMA_FROM_DEVICE
) {
2022 if (d40c
->runtime_addr
)
2023 src_dev_addr
= d40c
->runtime_addr
;
2025 src_dev_addr
= d40c
->base
->plat_data
->dev_rx
[d40c
->dma_cfg
.src_dev_type
];
2026 } else if (direction
== DMA_TO_DEVICE
) {
2027 if (d40c
->runtime_addr
)
2028 dst_dev_addr
= d40c
->runtime_addr
;
2030 dst_dev_addr
= d40c
->base
->plat_data
->dev_tx
[d40c
->dma_cfg
.dst_dev_type
];
2035 res
= d40_phy_sg_to_lli(sgl
,
2039 virt_to_phys(d40d
->lli_phy
.src
),
2041 d40c
->dma_cfg
.src_info
.data_width
,
2042 d40c
->dma_cfg
.src_info
.psize
);
2046 res
= d40_phy_sg_to_lli(sgl
,
2050 virt_to_phys(d40d
->lli_phy
.dst
),
2052 d40c
->dma_cfg
.dst_info
.data_width
,
2053 d40c
->dma_cfg
.dst_info
.psize
);
2057 (void) dma_map_single(d40c
->base
->dev
, d40d
->lli_phy
.src
,
2058 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
2062 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
2063 struct scatterlist
*sgl
,
2064 unsigned int sg_len
,
2065 enum dma_data_direction direction
,
2066 unsigned long dma_flags
)
2068 struct d40_desc
*d40d
;
2069 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
,
2071 unsigned long flags
;
2074 if (d40c
->phy_chan
== NULL
) {
2075 dev_err(&d40c
->chan
.dev
->device
,
2076 "[%s] Cannot prepare unallocated channel\n", __func__
);
2077 return ERR_PTR(-EINVAL
);
2080 if (d40c
->dma_cfg
.pre_transfer
)
2081 d40c
->dma_cfg
.pre_transfer(chan
,
2082 d40c
->dma_cfg
.pre_transfer_data
,
2085 spin_lock_irqsave(&d40c
->lock
, flags
);
2086 d40d
= d40_desc_get(d40c
);
2087 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2092 if (d40c
->log_num
!= D40_PHY_CHAN
)
2093 err
= d40_prep_slave_sg_log(d40d
, d40c
, sgl
, sg_len
,
2094 direction
, dma_flags
);
2096 err
= d40_prep_slave_sg_phy(d40d
, d40c
, sgl
, sg_len
,
2097 direction
, dma_flags
);
2099 dev_err(&d40c
->chan
.dev
->device
,
2100 "[%s] Failed to prepare %s slave sg job: %d\n",
2102 d40c
->log_num
!= D40_PHY_CHAN
? "log" : "phy", err
);
2106 d40d
->txd
.flags
= dma_flags
;
2108 dma_async_tx_descriptor_init(&d40d
->txd
, chan
);
2110 d40d
->txd
.tx_submit
= d40_tx_submit
;
2115 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2116 dma_cookie_t cookie
,
2117 struct dma_tx_state
*txstate
)
2119 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2120 dma_cookie_t last_used
;
2121 dma_cookie_t last_complete
;
2124 if (d40c
->phy_chan
== NULL
) {
2125 dev_err(&d40c
->chan
.dev
->device
,
2126 "[%s] Cannot read status of unallocated channel\n",
2131 last_complete
= d40c
->completed
;
2132 last_used
= chan
->cookie
;
2134 if (d40_is_paused(d40c
))
2137 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
2139 dma_set_tx_state(txstate
, last_complete
, last_used
,
2140 stedma40_residue(chan
));
2145 static void d40_issue_pending(struct dma_chan
*chan
)
2147 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2148 unsigned long flags
;
2150 if (d40c
->phy_chan
== NULL
) {
2151 dev_err(&d40c
->chan
.dev
->device
,
2152 "[%s] Channel is not allocated!\n", __func__
);
2156 spin_lock_irqsave(&d40c
->lock
, flags
);
2158 /* Busy means that pending jobs are already being processed */
2160 (void) d40_queue_start(d40c
);
2162 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2165 /* Runtime reconfiguration extension */
2166 static void d40_set_runtime_config(struct dma_chan
*chan
,
2167 struct dma_slave_config
*config
)
2169 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2170 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2171 enum dma_slave_buswidth config_addr_width
;
2172 dma_addr_t config_addr
;
2173 u32 config_maxburst
;
2174 enum stedma40_periph_data_width addr_width
;
2177 if (config
->direction
== DMA_FROM_DEVICE
) {
2178 dma_addr_t dev_addr_rx
=
2179 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2181 config_addr
= config
->src_addr
;
2183 dev_dbg(d40c
->base
->dev
,
2184 "channel has a pre-wired RX address %08x "
2185 "overriding with %08x\n",
2186 dev_addr_rx
, config_addr
);
2187 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2188 dev_dbg(d40c
->base
->dev
,
2189 "channel was not configured for peripheral "
2190 "to memory transfer (%d) overriding\n",
2192 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2194 config_addr_width
= config
->src_addr_width
;
2195 config_maxburst
= config
->src_maxburst
;
2197 } else if (config
->direction
== DMA_TO_DEVICE
) {
2198 dma_addr_t dev_addr_tx
=
2199 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2201 config_addr
= config
->dst_addr
;
2203 dev_dbg(d40c
->base
->dev
,
2204 "channel has a pre-wired TX address %08x "
2205 "overriding with %08x\n",
2206 dev_addr_tx
, config_addr
);
2207 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2208 dev_dbg(d40c
->base
->dev
,
2209 "channel was not configured for memory "
2210 "to peripheral transfer (%d) overriding\n",
2212 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2214 config_addr_width
= config
->dst_addr_width
;
2215 config_maxburst
= config
->dst_maxburst
;
2218 dev_err(d40c
->base
->dev
,
2219 "unrecognized channel direction %d\n",
2224 switch (config_addr_width
) {
2225 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2226 addr_width
= STEDMA40_BYTE_WIDTH
;
2228 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2229 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2231 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2232 addr_width
= STEDMA40_WORD_WIDTH
;
2234 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2235 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2238 dev_err(d40c
->base
->dev
,
2239 "illegal peripheral address width "
2241 config
->src_addr_width
);
2245 if (config_maxburst
>= 16)
2246 psize
= STEDMA40_PSIZE_LOG_16
;
2247 else if (config_maxburst
>= 8)
2248 psize
= STEDMA40_PSIZE_LOG_8
;
2249 else if (config_maxburst
>= 4)
2250 psize
= STEDMA40_PSIZE_LOG_4
;
2252 psize
= STEDMA40_PSIZE_LOG_1
;
2254 /* Set up all the endpoint configs */
2255 cfg
->src_info
.data_width
= addr_width
;
2256 cfg
->src_info
.psize
= psize
;
2257 cfg
->src_info
.endianess
= STEDMA40_LITTLE_ENDIAN
;
2258 cfg
->src_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2259 cfg
->dst_info
.data_width
= addr_width
;
2260 cfg
->dst_info
.psize
= psize
;
2261 cfg
->dst_info
.endianess
= STEDMA40_LITTLE_ENDIAN
;
2262 cfg
->dst_info
.flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2264 /* These settings will take precedence later */
2265 d40c
->runtime_addr
= config_addr
;
2266 d40c
->runtime_direction
= config
->direction
;
2267 dev_dbg(d40c
->base
->dev
,
2268 "configured channel %s for %s, data width %d, "
2269 "maxburst %d bytes, LE, no flow control\n",
2270 dma_chan_name(chan
),
2271 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
2276 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2279 unsigned long flags
;
2280 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2282 if (d40c
->phy_chan
== NULL
) {
2283 dev_err(&d40c
->chan
.dev
->device
,
2284 "[%s] Channel is not allocated!\n", __func__
);
2289 case DMA_TERMINATE_ALL
:
2290 spin_lock_irqsave(&d40c
->lock
, flags
);
2292 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2295 return d40_pause(chan
);
2297 return d40_resume(chan
);
2298 case DMA_SLAVE_CONFIG
:
2299 d40_set_runtime_config(chan
,
2300 (struct dma_slave_config
*) arg
);
2306 /* Other commands are unimplemented */
2310 /* Initialization functions */
2312 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2313 struct d40_chan
*chans
, int offset
,
2317 struct d40_chan
*d40c
;
2319 INIT_LIST_HEAD(&dma
->channels
);
2321 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2324 d40c
->chan
.device
= dma
;
2326 /* Invalidate lcla element */
2327 d40c
->lcla
.src_id
= -1;
2328 d40c
->lcla
.dst_id
= -1;
2330 spin_lock_init(&d40c
->lock
);
2332 d40c
->log_num
= D40_PHY_CHAN
;
2334 INIT_LIST_HEAD(&d40c
->active
);
2335 INIT_LIST_HEAD(&d40c
->queue
);
2336 INIT_LIST_HEAD(&d40c
->client
);
2338 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2339 (unsigned long) d40c
);
2341 list_add_tail(&d40c
->chan
.device_node
,
2346 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2347 int num_reserved_chans
)
2351 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2352 0, base
->num_log_chans
);
2354 dma_cap_zero(base
->dma_slave
.cap_mask
);
2355 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2357 base
->dma_slave
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2358 base
->dma_slave
.device_free_chan_resources
= d40_free_chan_resources
;
2359 base
->dma_slave
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2360 base
->dma_slave
.device_prep_slave_sg
= d40_prep_slave_sg
;
2361 base
->dma_slave
.device_tx_status
= d40_tx_status
;
2362 base
->dma_slave
.device_issue_pending
= d40_issue_pending
;
2363 base
->dma_slave
.device_control
= d40_control
;
2364 base
->dma_slave
.dev
= base
->dev
;
2366 err
= dma_async_device_register(&base
->dma_slave
);
2370 "[%s] Failed to register slave channels\n",
2375 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2376 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2378 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2379 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2381 base
->dma_memcpy
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2382 base
->dma_memcpy
.device_free_chan_resources
= d40_free_chan_resources
;
2383 base
->dma_memcpy
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2384 base
->dma_memcpy
.device_prep_slave_sg
= d40_prep_slave_sg
;
2385 base
->dma_memcpy
.device_tx_status
= d40_tx_status
;
2386 base
->dma_memcpy
.device_issue_pending
= d40_issue_pending
;
2387 base
->dma_memcpy
.device_control
= d40_control
;
2388 base
->dma_memcpy
.dev
= base
->dev
;
2390 * This controller can only access address at even
2391 * 32bit boundaries, i.e. 2^2
2393 base
->dma_memcpy
.copy_align
= 2;
2395 err
= dma_async_device_register(&base
->dma_memcpy
);
2399 "[%s] Failed to regsiter memcpy only channels\n",
2404 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2405 0, num_reserved_chans
);
2407 dma_cap_zero(base
->dma_both
.cap_mask
);
2408 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2409 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2411 base
->dma_both
.device_alloc_chan_resources
= d40_alloc_chan_resources
;
2412 base
->dma_both
.device_free_chan_resources
= d40_free_chan_resources
;
2413 base
->dma_both
.device_prep_dma_memcpy
= d40_prep_memcpy
;
2414 base
->dma_both
.device_prep_slave_sg
= d40_prep_slave_sg
;
2415 base
->dma_both
.device_tx_status
= d40_tx_status
;
2416 base
->dma_both
.device_issue_pending
= d40_issue_pending
;
2417 base
->dma_both
.device_control
= d40_control
;
2418 base
->dma_both
.dev
= base
->dev
;
2419 base
->dma_both
.copy_align
= 2;
2420 err
= dma_async_device_register(&base
->dma_both
);
2424 "[%s] Failed to register logical and physical capable channels\n",
2430 dma_async_device_unregister(&base
->dma_memcpy
);
2432 dma_async_device_unregister(&base
->dma_slave
);
2437 /* Initialization functions. */
2439 static int __init
d40_phy_res_init(struct d40_base
*base
)
2442 int num_phy_chans_avail
= 0;
2444 int odd_even_bit
= -2;
2446 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2447 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2449 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2450 base
->phy_res
[i
].num
= i
;
2451 odd_even_bit
+= 2 * ((i
% 2) == 0);
2452 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2453 /* Mark security only channels as occupied */
2454 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2455 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2457 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2458 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2459 num_phy_chans_avail
++;
2461 spin_lock_init(&base
->phy_res
[i
].lock
);
2464 /* Mark disabled channels as occupied */
2465 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2466 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2467 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2468 num_phy_chans_avail
--;
2471 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2472 num_phy_chans_avail
, base
->num_phy_chans
);
2474 /* Verify settings extended vs standard */
2475 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2477 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2479 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2480 (val
[0] & 0x3) != 1)
2482 "[%s] INFO: channel %d is misconfigured (%d)\n",
2483 __func__
, i
, val
[0] & 0x3);
2485 val
[0] = val
[0] >> 2;
2488 return num_phy_chans_avail
;
2491 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2493 static const struct d40_reg_val dma_id_regs
[] = {
2495 { .reg
= D40_DREG_PERIPHID0
, .val
= 0x0040},
2496 { .reg
= D40_DREG_PERIPHID1
, .val
= 0x0000},
2498 * D40_DREG_PERIPHID2 Depends on HW revision:
2499 * MOP500/HREF ED has 0x0008,
2501 * HREF V1 has 0x0028
2503 { .reg
= D40_DREG_PERIPHID3
, .val
= 0x0000},
2506 { .reg
= D40_DREG_CELLID0
, .val
= 0x000d},
2507 { .reg
= D40_DREG_CELLID1
, .val
= 0x00f0},
2508 { .reg
= D40_DREG_CELLID2
, .val
= 0x0005},
2509 { .reg
= D40_DREG_CELLID3
, .val
= 0x00b1}
2511 struct stedma40_platform_data
*plat_data
;
2512 struct clk
*clk
= NULL
;
2513 void __iomem
*virtbase
= NULL
;
2514 struct resource
*res
= NULL
;
2515 struct d40_base
*base
= NULL
;
2516 int num_log_chans
= 0;
2522 clk
= clk_get(&pdev
->dev
, NULL
);
2525 dev_err(&pdev
->dev
, "[%s] No matching clock found\n",
2532 /* Get IO for DMAC base address */
2533 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2537 if (request_mem_region(res
->start
, resource_size(res
),
2538 D40_NAME
" I/O base") == NULL
)
2541 virtbase
= ioremap(res
->start
, resource_size(res
));
2545 /* HW version check */
2546 for (i
= 0; i
< ARRAY_SIZE(dma_id_regs
); i
++) {
2547 if (dma_id_regs
[i
].val
!=
2548 readl(virtbase
+ dma_id_regs
[i
].reg
)) {
2550 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2554 readl(virtbase
+ dma_id_regs
[i
].reg
));
2559 /* Get silicon revision and designer */
2560 val
= readl(virtbase
+ D40_DREG_PERIPHID2
);
2562 if ((val
& D40_DREG_PERIPHID2_DESIGNER_MASK
) !=
2565 "[%s] Unknown designer! Got %x wanted %x\n",
2566 __func__
, val
& D40_DREG_PERIPHID2_DESIGNER_MASK
,
2571 rev
= (val
& D40_DREG_PERIPHID2_REV_MASK
) >>
2572 D40_DREG_PERIPHID2_REV_POS
;
2574 /* The number of physical channels on this HW */
2575 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2577 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2580 plat_data
= pdev
->dev
.platform_data
;
2582 /* Count the number of logical channels in use */
2583 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2584 if (plat_data
->dev_rx
[i
] != 0)
2587 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2588 if (plat_data
->dev_tx
[i
] != 0)
2591 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
2592 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
2593 sizeof(struct d40_chan
), GFP_KERNEL
);
2596 dev_err(&pdev
->dev
, "[%s] Out of memory\n", __func__
);
2602 base
->num_phy_chans
= num_phy_chans
;
2603 base
->num_log_chans
= num_log_chans
;
2604 base
->phy_start
= res
->start
;
2605 base
->phy_size
= resource_size(res
);
2606 base
->virtbase
= virtbase
;
2607 base
->plat_data
= plat_data
;
2608 base
->dev
= &pdev
->dev
;
2609 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
2610 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
2612 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
2617 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
2618 sizeof(struct d40_chan
*),
2620 if (!base
->lookup_phy_chans
)
2623 if (num_log_chans
+ plat_data
->memcpy_len
) {
2625 * The max number of logical channels are event lines for all
2626 * src devices and dst devices
2628 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
2629 sizeof(struct d40_chan
*),
2631 if (!base
->lookup_log_chans
)
2634 base
->lcla_pool
.alloc_map
= kzalloc(num_phy_chans
* sizeof(u32
),
2636 if (!base
->lcla_pool
.alloc_map
)
2639 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
2640 0, SLAB_HWCACHE_ALIGN
,
2642 if (base
->desc_slab
== NULL
)
2655 release_mem_region(res
->start
,
2656 resource_size(res
));
2661 kfree(base
->lcla_pool
.alloc_map
);
2662 kfree(base
->lookup_log_chans
);
2663 kfree(base
->lookup_phy_chans
);
2664 kfree(base
->phy_res
);
2671 static void __init
d40_hw_init(struct d40_base
*base
)
2674 static const struct d40_reg_val dma_init_reg
[] = {
2675 /* Clock every part of the DMA block from start */
2676 { .reg
= D40_DREG_GCC
, .val
= 0x0000ff01},
2678 /* Interrupts on all logical channels */
2679 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
2680 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
2681 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
2682 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
2683 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
2684 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
2685 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
2686 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
2687 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
2688 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
2689 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
2690 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
2693 u32 prmseo
[2] = {0, 0};
2694 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2698 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
2699 writel(dma_init_reg
[i
].val
,
2700 base
->virtbase
+ dma_init_reg
[i
].reg
);
2702 /* Configure all our dma channels to default settings */
2703 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2705 activeo
[i
% 2] = activeo
[i
% 2] << 2;
2707 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
2709 activeo
[i
% 2] |= 3;
2713 /* Enable interrupt # */
2714 pcmis
= (pcmis
<< 1) | 1;
2716 /* Clear interrupt # */
2717 pcicr
= (pcicr
<< 1) | 1;
2719 /* Set channel to physical mode */
2720 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
2725 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
2726 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
2727 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
2728 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
2730 /* Write which interrupt to enable */
2731 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
2733 /* Write which interrupt to clear */
2734 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
2738 static int __init
d40_lcla_allocate(struct d40_base
*base
)
2740 unsigned long *page_list
;
2745 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
2746 * To full fill this hardware requirement without wasting 256 kb
2747 * we allocate pages until we get an aligned one.
2749 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
2757 /* Calculating how many pages that are required */
2758 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
2760 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
2761 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
2762 base
->lcla_pool
.pages
);
2763 if (!page_list
[i
]) {
2766 "[%s] Failed to allocate %d pages.\n",
2767 __func__
, base
->lcla_pool
.pages
);
2769 for (j
= 0; j
< i
; j
++)
2770 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2774 if ((virt_to_phys((void *)page_list
[i
]) &
2775 (LCLA_ALIGNMENT
- 1)) == 0)
2779 for (j
= 0; j
< i
; j
++)
2780 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
2782 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
2783 base
->lcla_pool
.base
= (void *)page_list
[i
];
2786 * After many attempts and no succees with finding the correct
2787 * alignment, try with allocating a big buffer.
2790 "[%s] Failed to get %d pages @ 18 bit align.\n",
2791 __func__
, base
->lcla_pool
.pages
);
2792 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
2793 base
->num_phy_chans
+
2796 if (!base
->lcla_pool
.base_unaligned
) {
2801 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
2805 writel(virt_to_phys(base
->lcla_pool
.base
),
2806 base
->virtbase
+ D40_DREG_LCLA
);
2812 static int __init
d40_probe(struct platform_device
*pdev
)
2816 struct d40_base
*base
;
2817 struct resource
*res
= NULL
;
2818 int num_reserved_chans
;
2821 base
= d40_hw_detect_init(pdev
);
2826 num_reserved_chans
= d40_phy_res_init(base
);
2828 platform_set_drvdata(pdev
, base
);
2830 spin_lock_init(&base
->interrupt_lock
);
2831 spin_lock_init(&base
->execmd_lock
);
2833 /* Get IO for logical channel parameter address */
2834 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
2838 "[%s] No \"lcpa\" memory resource\n",
2842 base
->lcpa_size
= resource_size(res
);
2843 base
->phy_lcpa
= res
->start
;
2845 if (request_mem_region(res
->start
, resource_size(res
),
2846 D40_NAME
" I/O lcpa") == NULL
) {
2849 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2850 __func__
, res
->start
, res
->end
);
2854 /* We make use of ESRAM memory for this. */
2855 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
2856 if (res
->start
!= val
&& val
!= 0) {
2857 dev_warn(&pdev
->dev
,
2858 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2859 __func__
, val
, res
->start
);
2861 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
2863 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
2864 if (!base
->lcpa_base
) {
2867 "[%s] Failed to ioremap LCPA region\n",
2872 ret
= d40_lcla_allocate(base
);
2874 dev_err(&pdev
->dev
, "[%s] Failed to allocate LCLA area\n",
2879 spin_lock_init(&base
->lcla_pool
.lock
);
2881 base
->lcla_pool
.num_blocks
= base
->num_phy_chans
;
2883 base
->irq
= platform_get_irq(pdev
, 0);
2885 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
2888 dev_err(&pdev
->dev
, "[%s] No IRQ defined\n", __func__
);
2892 err
= d40_dmaengine_init(base
, num_reserved_chans
);
2898 dev_info(base
->dev
, "initialized\n");
2903 if (base
->desc_slab
)
2904 kmem_cache_destroy(base
->desc_slab
);
2906 iounmap(base
->virtbase
);
2907 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
2908 free_pages((unsigned long)base
->lcla_pool
.base
,
2909 base
->lcla_pool
.pages
);
2911 kfree(base
->lcla_pool
.base_unaligned
);
2914 release_mem_region(base
->phy_lcpa
,
2916 if (base
->phy_start
)
2917 release_mem_region(base
->phy_start
,
2920 clk_disable(base
->clk
);
2924 kfree(base
->lcla_pool
.alloc_map
);
2925 kfree(base
->lookup_log_chans
);
2926 kfree(base
->lookup_phy_chans
);
2927 kfree(base
->phy_res
);
2931 dev_err(&pdev
->dev
, "[%s] probe failed\n", __func__
);
2935 static struct platform_driver d40_driver
= {
2937 .owner
= THIS_MODULE
,
2942 int __init
stedma40_init(void)
2944 return platform_driver_probe(&d40_driver
, d40_probe
);
2946 arch_initcall(stedma40_init
);