2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
6 * License terms: GNU General Public License (GPL) version 2
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/err.h>
20 #include <linux/amba/bus.h>
21 #include <linux/regulator/consumer.h>
22 #include <linux/platform_data/dma-ste-dma40.h>
24 #include "dmaengine.h"
25 #include "ste_dma40_ll.h"
27 #define D40_NAME "dma40"
29 #define D40_PHY_CHAN -1
31 /* For masking out/in 2 bit channel positions */
32 #define D40_CHAN_POS(chan) (2 * (chan / 2))
33 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
35 /* Maximum iterations taken before giving up suspending a channel */
36 #define D40_SUSPEND_MAX_IT 500
39 #define DMA40_AUTOSUSPEND_DELAY 100
41 /* Hardware requirement on LCLA alignment */
42 #define LCLA_ALIGNMENT 0x40000
44 /* Max number of links per event group */
45 #define D40_LCLA_LINK_PER_EVENT_GRP 128
46 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
48 /* Attempts before giving up to trying to get pages that are aligned */
49 #define MAX_LCLA_ALLOC_ATTEMPTS 256
51 /* Bit markings for allocation map */
52 #define D40_ALLOC_FREE (1 << 31)
53 #define D40_ALLOC_PHY (1 << 30)
54 #define D40_ALLOC_LOG_FREE 0
57 * enum 40_command - The different commands and/or statuses.
59 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
60 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
61 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
62 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
67 D40_DMA_SUSPEND_REQ
= 2,
72 * enum d40_events - The different Event Enables for the event lines.
74 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
75 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
76 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
77 * @D40_ROUND_EVENTLINE: Status check for event line.
81 D40_DEACTIVATE_EVENTLINE
= 0,
82 D40_ACTIVATE_EVENTLINE
= 1,
83 D40_SUSPEND_REQ_EVENTLINE
= 2,
84 D40_ROUND_EVENTLINE
= 3
88 * These are the registers that has to be saved and later restored
89 * when the DMA hw is powered off.
90 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
92 static u32 d40_backup_regs
[] = {
101 #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
103 /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
104 static u32 d40_backup_regs_v3
[] = {
123 #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
125 static u32 d40_backup_regs_chan
[] = {
137 * struct d40_lli_pool - Structure for keeping LLIs in memory
139 * @base: Pointer to memory area when the pre_alloc_lli's are not large
140 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
141 * pre_alloc_lli is used.
142 * @dma_addr: DMA address, if mapped
143 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
144 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
145 * one buffer to one buffer.
147 struct d40_lli_pool
{
151 /* Space for dst and src, plus an extra for padding */
152 u8 pre_alloc_lli
[3 * sizeof(struct d40_phy_lli
)];
156 * struct d40_desc - A descriptor is one DMA job.
158 * @lli_phy: LLI settings for physical channel. Both src and dst=
159 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
160 * lli_len equals one.
161 * @lli_log: Same as above but for logical channels.
162 * @lli_pool: The pool with two entries pre-allocated.
163 * @lli_len: Number of llis of current descriptor.
164 * @lli_current: Number of transferred llis.
165 * @lcla_alloc: Number of LCLA entries allocated.
166 * @txd: DMA engine struct. Used for among other things for communication
169 * @is_in_client_list: true if the client owns this descriptor.
170 * @cyclic: true if this is a cyclic job
172 * This descriptor is used for both logical and physical transfers.
176 struct d40_phy_lli_bidir lli_phy
;
178 struct d40_log_lli_bidir lli_log
;
180 struct d40_lli_pool lli_pool
;
185 struct dma_async_tx_descriptor txd
;
186 struct list_head node
;
188 bool is_in_client_list
;
193 * struct d40_lcla_pool - LCLA pool settings and data.
195 * @base: The virtual address of LCLA. 18 bit aligned.
196 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
197 * This pointer is only there for clean-up on error.
198 * @pages: The number of pages needed for all physical channels.
199 * Only used later for clean-up on error
200 * @lock: Lock to protect the content in this struct.
201 * @alloc_map: big map over which LCLA entry is own by which job.
203 struct d40_lcla_pool
{
206 void *base_unaligned
;
209 struct d40_desc
**alloc_map
;
213 * struct d40_phy_res - struct for handling eventlines mapped to physical
216 * @lock: A lock protection this entity.
217 * @reserved: True if used by secure world or otherwise.
218 * @num: The physical channel number of this entity.
219 * @allocated_src: Bit mapped to show which src event line's are mapped to
220 * this physical channel. Can also be free or physically allocated.
221 * @allocated_dst: Same as for src but is dst.
222 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
236 * struct d40_chan - Struct that describes a channel.
238 * @lock: A spinlock to protect this struct.
239 * @log_num: The logical number, if any of this channel.
240 * @pending_tx: The number of pending transfers. Used between interrupt handler
242 * @busy: Set to true when transfer is ongoing on this channel.
243 * @phy_chan: Pointer to physical channel which this instance runs on. If this
244 * point is NULL, then the channel is not allocated.
245 * @chan: DMA engine handle.
246 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
247 * transfer and call client callback.
248 * @client: Cliented owned descriptor list.
249 * @pending_queue: Submitted jobs, to be issued by issue_pending()
250 * @active: Active descriptor.
251 * @queue: Queued jobs.
252 * @prepare_queue: Prepared jobs.
253 * @dma_cfg: The client configuration of this dma channel.
254 * @configured: whether the dma_cfg configuration is valid
255 * @base: Pointer to the device instance struct.
256 * @src_def_cfg: Default cfg register setting for src.
257 * @dst_def_cfg: Default cfg register setting for dst.
258 * @log_def: Default logical channel settings.
259 * @lcpa: Pointer to dst and src lcpa settings.
260 * @runtime_addr: runtime configured address.
261 * @runtime_direction: runtime configured direction.
263 * This struct can either "be" a logical or a physical channel.
270 struct d40_phy_res
*phy_chan
;
271 struct dma_chan chan
;
272 struct tasklet_struct tasklet
;
273 struct list_head client
;
274 struct list_head pending_queue
;
275 struct list_head active
;
276 struct list_head queue
;
277 struct list_head prepare_queue
;
278 struct stedma40_chan_cfg dma_cfg
;
280 struct d40_base
*base
;
281 /* Default register configurations */
284 struct d40_def_lcsp log_def
;
285 struct d40_log_lli_full
*lcpa
;
286 /* Runtime reconfiguration */
287 dma_addr_t runtime_addr
;
288 enum dma_transfer_direction runtime_direction
;
292 * struct d40_base - The big global struct, one for each probe'd instance.
294 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
295 * @execmd_lock: Lock for execute command usage since several channels share
296 * the same physical register.
297 * @dev: The device structure.
298 * @virtbase: The virtual base address of the DMA's register.
299 * @rev: silicon revision detected.
300 * @clk: Pointer to the DMA clock structure.
301 * @phy_start: Physical memory start of the DMA registers.
302 * @phy_size: Size of the DMA register map.
303 * @irq: The IRQ number.
304 * @num_phy_chans: The number of physical channels. Read from HW. This
305 * is the number of available channels for this driver, not counting "Secure
306 * mode" allocated physical channels.
307 * @num_log_chans: The number of logical channels. Calculated from
309 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
310 * @dma_slave: dma_device channels that can do only do slave transfers.
311 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
312 * @phy_chans: Room for all possible physical channels in system.
313 * @log_chans: Room for all possible logical channels in system.
314 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
315 * to log_chans entries.
316 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
317 * to phy_chans entries.
318 * @plat_data: Pointer to provided platform_data which is the driver
320 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
321 * @phy_res: Vector containing all physical channels.
322 * @lcla_pool: lcla pool settings and data.
323 * @lcpa_base: The virtual mapped address of LCPA.
324 * @phy_lcpa: The physical address of the LCPA.
325 * @lcpa_size: The size of the LCPA area.
326 * @desc_slab: cache for descriptors.
327 * @reg_val_backup: Here the values of some hardware registers are stored
328 * before the DMA is powered off. They are restored when the power is back on.
329 * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
331 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
332 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
333 * @initialized: true if the dma has been initialized
336 spinlock_t interrupt_lock
;
337 spinlock_t execmd_lock
;
339 void __iomem
*virtbase
;
342 phys_addr_t phy_start
;
343 resource_size_t phy_size
;
347 struct dma_device dma_both
;
348 struct dma_device dma_slave
;
349 struct dma_device dma_memcpy
;
350 struct d40_chan
*phy_chans
;
351 struct d40_chan
*log_chans
;
352 struct d40_chan
**lookup_log_chans
;
353 struct d40_chan
**lookup_phy_chans
;
354 struct stedma40_platform_data
*plat_data
;
355 struct regulator
*lcpa_regulator
;
356 /* Physical half channels */
357 struct d40_phy_res
*phy_res
;
358 struct d40_lcla_pool lcla_pool
;
361 resource_size_t lcpa_size
;
362 struct kmem_cache
*desc_slab
;
363 u32 reg_val_backup
[BACKUP_REGS_SZ
];
364 u32 reg_val_backup_v3
[BACKUP_REGS_SZ_V3
];
365 u32
*reg_val_backup_chan
;
366 u16 gcc_pwr_off_mask
;
371 * struct d40_interrupt_lookup - lookup table for interrupt handler
373 * @src: Interrupt mask register.
374 * @clr: Interrupt clear register.
375 * @is_error: true if this is an error interrupt.
376 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
377 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
379 struct d40_interrupt_lookup
{
387 * struct d40_reg_val - simple lookup struct
389 * @reg: The register.
390 * @val: The value that belongs to the register in reg.
397 static struct device
*chan2dev(struct d40_chan
*d40c
)
399 return &d40c
->chan
.dev
->device
;
402 static bool chan_is_physical(struct d40_chan
*chan
)
404 return chan
->log_num
== D40_PHY_CHAN
;
407 static bool chan_is_logical(struct d40_chan
*chan
)
409 return !chan_is_physical(chan
);
412 static void __iomem
*chan_base(struct d40_chan
*chan
)
414 return chan
->base
->virtbase
+ D40_DREG_PCBASE
+
415 chan
->phy_chan
->num
* D40_DREG_PCDELTA
;
418 #define d40_err(dev, format, arg...) \
419 dev_err(dev, "[%s] " format, __func__, ## arg)
421 #define chan_err(d40c, format, arg...) \
422 d40_err(chan2dev(d40c), format, ## arg)
424 static int d40_pool_lli_alloc(struct d40_chan
*d40c
, struct d40_desc
*d40d
,
427 bool is_log
= chan_is_logical(d40c
);
432 align
= sizeof(struct d40_log_lli
);
434 align
= sizeof(struct d40_phy_lli
);
437 base
= d40d
->lli_pool
.pre_alloc_lli
;
438 d40d
->lli_pool
.size
= sizeof(d40d
->lli_pool
.pre_alloc_lli
);
439 d40d
->lli_pool
.base
= NULL
;
441 d40d
->lli_pool
.size
= lli_len
* 2 * align
;
443 base
= kmalloc(d40d
->lli_pool
.size
+ align
, GFP_NOWAIT
);
444 d40d
->lli_pool
.base
= base
;
446 if (d40d
->lli_pool
.base
== NULL
)
451 d40d
->lli_log
.src
= PTR_ALIGN(base
, align
);
452 d40d
->lli_log
.dst
= d40d
->lli_log
.src
+ lli_len
;
454 d40d
->lli_pool
.dma_addr
= 0;
456 d40d
->lli_phy
.src
= PTR_ALIGN(base
, align
);
457 d40d
->lli_phy
.dst
= d40d
->lli_phy
.src
+ lli_len
;
459 d40d
->lli_pool
.dma_addr
= dma_map_single(d40c
->base
->dev
,
464 if (dma_mapping_error(d40c
->base
->dev
,
465 d40d
->lli_pool
.dma_addr
)) {
466 kfree(d40d
->lli_pool
.base
);
467 d40d
->lli_pool
.base
= NULL
;
468 d40d
->lli_pool
.dma_addr
= 0;
476 static void d40_pool_lli_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
478 if (d40d
->lli_pool
.dma_addr
)
479 dma_unmap_single(d40c
->base
->dev
, d40d
->lli_pool
.dma_addr
,
480 d40d
->lli_pool
.size
, DMA_TO_DEVICE
);
482 kfree(d40d
->lli_pool
.base
);
483 d40d
->lli_pool
.base
= NULL
;
484 d40d
->lli_pool
.size
= 0;
485 d40d
->lli_log
.src
= NULL
;
486 d40d
->lli_log
.dst
= NULL
;
487 d40d
->lli_phy
.src
= NULL
;
488 d40d
->lli_phy
.dst
= NULL
;
491 static int d40_lcla_alloc_one(struct d40_chan
*d40c
,
492 struct d40_desc
*d40d
)
499 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
501 p
= d40c
->phy_chan
->num
* D40_LCLA_LINK_PER_EVENT_GRP
;
504 * Allocate both src and dst at the same time, therefore the half
505 * start on 1 since 0 can't be used since zero is used as end marker.
507 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
508 if (!d40c
->base
->lcla_pool
.alloc_map
[p
+ i
]) {
509 d40c
->base
->lcla_pool
.alloc_map
[p
+ i
] = d40d
;
516 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
521 static int d40_lcla_free_all(struct d40_chan
*d40c
,
522 struct d40_desc
*d40d
)
528 if (chan_is_physical(d40c
))
531 spin_lock_irqsave(&d40c
->base
->lcla_pool
.lock
, flags
);
533 for (i
= 1 ; i
< D40_LCLA_LINK_PER_EVENT_GRP
/ 2; i
++) {
534 if (d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
535 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] == d40d
) {
536 d40c
->base
->lcla_pool
.alloc_map
[d40c
->phy_chan
->num
*
537 D40_LCLA_LINK_PER_EVENT_GRP
+ i
] = NULL
;
539 if (d40d
->lcla_alloc
== 0) {
546 spin_unlock_irqrestore(&d40c
->base
->lcla_pool
.lock
, flags
);
552 static void d40_desc_remove(struct d40_desc
*d40d
)
554 list_del(&d40d
->node
);
557 static struct d40_desc
*d40_desc_get(struct d40_chan
*d40c
)
559 struct d40_desc
*desc
= NULL
;
561 if (!list_empty(&d40c
->client
)) {
565 list_for_each_entry_safe(d
, _d
, &d40c
->client
, node
) {
566 if (async_tx_test_ack(&d
->txd
)) {
569 memset(desc
, 0, sizeof(*desc
));
576 desc
= kmem_cache_zalloc(d40c
->base
->desc_slab
, GFP_NOWAIT
);
579 INIT_LIST_HEAD(&desc
->node
);
584 static void d40_desc_free(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
587 d40_pool_lli_free(d40c
, d40d
);
588 d40_lcla_free_all(d40c
, d40d
);
589 kmem_cache_free(d40c
->base
->desc_slab
, d40d
);
592 static void d40_desc_submit(struct d40_chan
*d40c
, struct d40_desc
*desc
)
594 list_add_tail(&desc
->node
, &d40c
->active
);
597 static void d40_phy_lli_load(struct d40_chan
*chan
, struct d40_desc
*desc
)
599 struct d40_phy_lli
*lli_dst
= desc
->lli_phy
.dst
;
600 struct d40_phy_lli
*lli_src
= desc
->lli_phy
.src
;
601 void __iomem
*base
= chan_base(chan
);
603 writel(lli_src
->reg_cfg
, base
+ D40_CHAN_REG_SSCFG
);
604 writel(lli_src
->reg_elt
, base
+ D40_CHAN_REG_SSELT
);
605 writel(lli_src
->reg_ptr
, base
+ D40_CHAN_REG_SSPTR
);
606 writel(lli_src
->reg_lnk
, base
+ D40_CHAN_REG_SSLNK
);
608 writel(lli_dst
->reg_cfg
, base
+ D40_CHAN_REG_SDCFG
);
609 writel(lli_dst
->reg_elt
, base
+ D40_CHAN_REG_SDELT
);
610 writel(lli_dst
->reg_ptr
, base
+ D40_CHAN_REG_SDPTR
);
611 writel(lli_dst
->reg_lnk
, base
+ D40_CHAN_REG_SDLNK
);
614 static void d40_log_lli_to_lcxa(struct d40_chan
*chan
, struct d40_desc
*desc
)
616 struct d40_lcla_pool
*pool
= &chan
->base
->lcla_pool
;
617 struct d40_log_lli_bidir
*lli
= &desc
->lli_log
;
618 int lli_current
= desc
->lli_current
;
619 int lli_len
= desc
->lli_len
;
620 bool cyclic
= desc
->cyclic
;
621 int curr_lcla
= -EINVAL
;
623 bool use_esram_lcla
= chan
->base
->plat_data
->use_esram_lcla
;
627 * We may have partially running cyclic transfers, in case we did't get
628 * enough LCLA entries.
630 linkback
= cyclic
&& lli_current
== 0;
633 * For linkback, we need one LCLA even with only one link, because we
634 * can't link back to the one in LCPA space
636 if (linkback
|| (lli_len
- lli_current
> 1)) {
637 curr_lcla
= d40_lcla_alloc_one(chan
, desc
);
638 first_lcla
= curr_lcla
;
642 * For linkback, we normally load the LCPA in the loop since we need to
643 * link it to the second LCLA and not the first. However, if we
644 * couldn't even get a first LCLA, then we have to run in LCPA and
647 if (!linkback
|| curr_lcla
== -EINVAL
) {
648 unsigned int flags
= 0;
650 if (curr_lcla
== -EINVAL
)
651 flags
|= LLI_TERM_INT
;
653 d40_log_lli_lcpa_write(chan
->lcpa
,
654 &lli
->dst
[lli_current
],
655 &lli
->src
[lli_current
],
664 for (; lli_current
< lli_len
; lli_current
++) {
665 unsigned int lcla_offset
= chan
->phy_chan
->num
* 1024 +
667 struct d40_log_lli
*lcla
= pool
->base
+ lcla_offset
;
668 unsigned int flags
= 0;
671 if (lli_current
+ 1 < lli_len
)
672 next_lcla
= d40_lcla_alloc_one(chan
, desc
);
674 next_lcla
= linkback
? first_lcla
: -EINVAL
;
676 if (cyclic
|| next_lcla
== -EINVAL
)
677 flags
|= LLI_TERM_INT
;
679 if (linkback
&& curr_lcla
== first_lcla
) {
680 /* First link goes in both LCPA and LCLA */
681 d40_log_lli_lcpa_write(chan
->lcpa
,
682 &lli
->dst
[lli_current
],
683 &lli
->src
[lli_current
],
688 * One unused LCLA in the cyclic case if the very first
691 d40_log_lli_lcla_write(lcla
,
692 &lli
->dst
[lli_current
],
693 &lli
->src
[lli_current
],
697 * Cache maintenance is not needed if lcla is
700 if (!use_esram_lcla
) {
701 dma_sync_single_range_for_device(chan
->base
->dev
,
702 pool
->dma_addr
, lcla_offset
,
703 2 * sizeof(struct d40_log_lli
),
706 curr_lcla
= next_lcla
;
708 if (curr_lcla
== -EINVAL
|| curr_lcla
== first_lcla
) {
715 desc
->lli_current
= lli_current
;
718 static void d40_desc_load(struct d40_chan
*d40c
, struct d40_desc
*d40d
)
720 if (chan_is_physical(d40c
)) {
721 d40_phy_lli_load(d40c
, d40d
);
722 d40d
->lli_current
= d40d
->lli_len
;
724 d40_log_lli_to_lcxa(d40c
, d40d
);
727 static struct d40_desc
*d40_first_active_get(struct d40_chan
*d40c
)
731 if (list_empty(&d40c
->active
))
734 d
= list_first_entry(&d40c
->active
,
740 /* remove desc from current queue and add it to the pending_queue */
741 static void d40_desc_queue(struct d40_chan
*d40c
, struct d40_desc
*desc
)
743 d40_desc_remove(desc
);
744 desc
->is_in_client_list
= false;
745 list_add_tail(&desc
->node
, &d40c
->pending_queue
);
748 static struct d40_desc
*d40_first_pending(struct d40_chan
*d40c
)
752 if (list_empty(&d40c
->pending_queue
))
755 d
= list_first_entry(&d40c
->pending_queue
,
761 static struct d40_desc
*d40_first_queued(struct d40_chan
*d40c
)
765 if (list_empty(&d40c
->queue
))
768 d
= list_first_entry(&d40c
->queue
,
774 static int d40_psize_2_burst_size(bool is_log
, int psize
)
777 if (psize
== STEDMA40_PSIZE_LOG_1
)
780 if (psize
== STEDMA40_PSIZE_PHY_1
)
788 * The dma only supports transmitting packages up to
789 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
790 * dma elements required to send the entire sg list
792 static int d40_size_2_dmalen(int size
, u32 data_width1
, u32 data_width2
)
795 u32 max_w
= max(data_width1
, data_width2
);
796 u32 min_w
= min(data_width1
, data_width2
);
797 u32 seg_max
= ALIGN(STEDMA40_MAX_SEG_SIZE
<< min_w
, 1 << max_w
);
799 if (seg_max
> STEDMA40_MAX_SEG_SIZE
)
800 seg_max
-= (1 << max_w
);
802 if (!IS_ALIGNED(size
, 1 << max_w
))
808 dmalen
= size
/ seg_max
;
809 if (dmalen
* seg_max
< size
)
815 static int d40_sg_2_dmalen(struct scatterlist
*sgl
, int sg_len
,
816 u32 data_width1
, u32 data_width2
)
818 struct scatterlist
*sg
;
823 for_each_sg(sgl
, sg
, sg_len
, i
) {
824 ret
= d40_size_2_dmalen(sg_dma_len(sg
),
825 data_width1
, data_width2
);
835 static void dma40_backup(void __iomem
*baseaddr
, u32
*backup
,
836 u32
*regaddr
, int num
, bool save
)
840 for (i
= 0; i
< num
; i
++) {
841 void __iomem
*addr
= baseaddr
+ regaddr
[i
];
844 backup
[i
] = readl_relaxed(addr
);
846 writel_relaxed(backup
[i
], addr
);
850 static void d40_save_restore_registers(struct d40_base
*base
, bool save
)
854 /* Save/Restore channel specific registers */
855 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
859 if (base
->phy_res
[i
].reserved
)
862 addr
= base
->virtbase
+ D40_DREG_PCBASE
+ i
* D40_DREG_PCDELTA
;
863 idx
= i
* ARRAY_SIZE(d40_backup_regs_chan
);
865 dma40_backup(addr
, &base
->reg_val_backup_chan
[idx
],
866 d40_backup_regs_chan
,
867 ARRAY_SIZE(d40_backup_regs_chan
),
871 /* Save/Restore global registers */
872 dma40_backup(base
->virtbase
, base
->reg_val_backup
,
873 d40_backup_regs
, ARRAY_SIZE(d40_backup_regs
),
876 /* Save/Restore registers only existing on dma40 v3 and later */
878 dma40_backup(base
->virtbase
, base
->reg_val_backup_v3
,
880 ARRAY_SIZE(d40_backup_regs_v3
),
884 static void d40_save_restore_registers(struct d40_base
*base
, bool save
)
889 static int __d40_execute_command_phy(struct d40_chan
*d40c
,
890 enum d40_command command
)
894 void __iomem
*active_reg
;
899 if (command
== D40_DMA_STOP
) {
900 ret
= __d40_execute_command_phy(d40c
, D40_DMA_SUSPEND_REQ
);
905 spin_lock_irqsave(&d40c
->base
->execmd_lock
, flags
);
907 if (d40c
->phy_chan
->num
% 2 == 0)
908 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
910 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
912 if (command
== D40_DMA_SUSPEND_REQ
) {
913 status
= (readl(active_reg
) &
914 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
915 D40_CHAN_POS(d40c
->phy_chan
->num
);
917 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
921 wmask
= 0xffffffff & ~(D40_CHAN_POS_MASK(d40c
->phy_chan
->num
));
922 writel(wmask
| (command
<< D40_CHAN_POS(d40c
->phy_chan
->num
)),
925 if (command
== D40_DMA_SUSPEND_REQ
) {
927 for (i
= 0 ; i
< D40_SUSPEND_MAX_IT
; i
++) {
928 status
= (readl(active_reg
) &
929 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
930 D40_CHAN_POS(d40c
->phy_chan
->num
);
934 * Reduce the number of bus accesses while
935 * waiting for the DMA to suspend.
939 if (status
== D40_DMA_STOP
||
940 status
== D40_DMA_SUSPENDED
)
944 if (i
== D40_SUSPEND_MAX_IT
) {
946 "unable to suspend the chl %d (log: %d) status %x\n",
947 d40c
->phy_chan
->num
, d40c
->log_num
,
955 spin_unlock_irqrestore(&d40c
->base
->execmd_lock
, flags
);
959 static void d40_term_all(struct d40_chan
*d40c
)
961 struct d40_desc
*d40d
;
964 /* Release active descriptors */
965 while ((d40d
= d40_first_active_get(d40c
))) {
966 d40_desc_remove(d40d
);
967 d40_desc_free(d40c
, d40d
);
970 /* Release queued descriptors waiting for transfer */
971 while ((d40d
= d40_first_queued(d40c
))) {
972 d40_desc_remove(d40d
);
973 d40_desc_free(d40c
, d40d
);
976 /* Release pending descriptors */
977 while ((d40d
= d40_first_pending(d40c
))) {
978 d40_desc_remove(d40d
);
979 d40_desc_free(d40c
, d40d
);
982 /* Release client owned descriptors */
983 if (!list_empty(&d40c
->client
))
984 list_for_each_entry_safe(d40d
, _d
, &d40c
->client
, node
) {
985 d40_desc_remove(d40d
);
986 d40_desc_free(d40c
, d40d
);
989 /* Release descriptors in prepare queue */
990 if (!list_empty(&d40c
->prepare_queue
))
991 list_for_each_entry_safe(d40d
, _d
,
992 &d40c
->prepare_queue
, node
) {
993 d40_desc_remove(d40d
);
994 d40_desc_free(d40c
, d40d
);
997 d40c
->pending_tx
= 0;
1000 static void __d40_config_set_event(struct d40_chan
*d40c
,
1001 enum d40_events event_type
, u32 event
,
1004 void __iomem
*addr
= chan_base(d40c
) + reg
;
1008 switch (event_type
) {
1010 case D40_DEACTIVATE_EVENTLINE
:
1012 writel((D40_DEACTIVATE_EVENTLINE
<< D40_EVENTLINE_POS(event
))
1013 | ~D40_EVENTLINE_MASK(event
), addr
);
1016 case D40_SUSPEND_REQ_EVENTLINE
:
1017 status
= (readl(addr
) & D40_EVENTLINE_MASK(event
)) >>
1018 D40_EVENTLINE_POS(event
);
1020 if (status
== D40_DEACTIVATE_EVENTLINE
||
1021 status
== D40_SUSPEND_REQ_EVENTLINE
)
1024 writel((D40_SUSPEND_REQ_EVENTLINE
<< D40_EVENTLINE_POS(event
))
1025 | ~D40_EVENTLINE_MASK(event
), addr
);
1027 for (tries
= 0 ; tries
< D40_SUSPEND_MAX_IT
; tries
++) {
1029 status
= (readl(addr
) & D40_EVENTLINE_MASK(event
)) >>
1030 D40_EVENTLINE_POS(event
);
1034 * Reduce the number of bus accesses while
1035 * waiting for the DMA to suspend.
1039 if (status
== D40_DEACTIVATE_EVENTLINE
)
1043 if (tries
== D40_SUSPEND_MAX_IT
) {
1045 "unable to stop the event_line chl %d (log: %d)"
1046 "status %x\n", d40c
->phy_chan
->num
,
1047 d40c
->log_num
, status
);
1051 case D40_ACTIVATE_EVENTLINE
:
1053 * The hardware sometimes doesn't register the enable when src and dst
1054 * event lines are active on the same logical channel. Retry to ensure
1055 * it does. Usually only one retry is sufficient.
1059 writel((D40_ACTIVATE_EVENTLINE
<<
1060 D40_EVENTLINE_POS(event
)) |
1061 ~D40_EVENTLINE_MASK(event
), addr
);
1063 if (readl(addr
) & D40_EVENTLINE_MASK(event
))
1068 dev_dbg(chan2dev(d40c
),
1069 "[%s] workaround enable S%cLNK (%d tries)\n",
1070 __func__
, reg
== D40_CHAN_REG_SSLNK
? 'S' : 'D',
1076 case D40_ROUND_EVENTLINE
:
1083 static void d40_config_set_event(struct d40_chan
*d40c
,
1084 enum d40_events event_type
)
1086 /* Enable event line connected to device (or memcpy) */
1087 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
1088 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
)) {
1089 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1091 __d40_config_set_event(d40c
, event_type
, event
,
1092 D40_CHAN_REG_SSLNK
);
1095 if (d40c
->dma_cfg
.dir
!= STEDMA40_PERIPH_TO_MEM
) {
1096 u32 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1098 __d40_config_set_event(d40c
, event_type
, event
,
1099 D40_CHAN_REG_SDLNK
);
1103 static u32
d40_chan_has_events(struct d40_chan
*d40c
)
1105 void __iomem
*chanbase
= chan_base(d40c
);
1108 val
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
1109 val
|= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
1115 __d40_execute_command_log(struct d40_chan
*d40c
, enum d40_command command
)
1117 unsigned long flags
;
1120 void __iomem
*active_reg
;
1122 if (d40c
->phy_chan
->num
% 2 == 0)
1123 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1125 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1128 spin_lock_irqsave(&d40c
->phy_chan
->lock
, flags
);
1132 case D40_DMA_SUSPEND_REQ
:
1134 active_status
= (readl(active_reg
) &
1135 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1136 D40_CHAN_POS(d40c
->phy_chan
->num
);
1138 if (active_status
== D40_DMA_RUN
)
1139 d40_config_set_event(d40c
, D40_SUSPEND_REQ_EVENTLINE
);
1141 d40_config_set_event(d40c
, D40_DEACTIVATE_EVENTLINE
);
1143 if (!d40_chan_has_events(d40c
) && (command
== D40_DMA_STOP
))
1144 ret
= __d40_execute_command_phy(d40c
, command
);
1150 d40_config_set_event(d40c
, D40_ACTIVATE_EVENTLINE
);
1151 ret
= __d40_execute_command_phy(d40c
, command
);
1154 case D40_DMA_SUSPENDED
:
1159 spin_unlock_irqrestore(&d40c
->phy_chan
->lock
, flags
);
1163 static int d40_channel_execute_command(struct d40_chan
*d40c
,
1164 enum d40_command command
)
1166 if (chan_is_logical(d40c
))
1167 return __d40_execute_command_log(d40c
, command
);
1169 return __d40_execute_command_phy(d40c
, command
);
1172 static u32
d40_get_prmo(struct d40_chan
*d40c
)
1174 static const unsigned int phy_map
[] = {
1175 [STEDMA40_PCHAN_BASIC_MODE
]
1176 = D40_DREG_PRMO_PCHAN_BASIC
,
1177 [STEDMA40_PCHAN_MODULO_MODE
]
1178 = D40_DREG_PRMO_PCHAN_MODULO
,
1179 [STEDMA40_PCHAN_DOUBLE_DST_MODE
]
1180 = D40_DREG_PRMO_PCHAN_DOUBLE_DST
,
1182 static const unsigned int log_map
[] = {
1183 [STEDMA40_LCHAN_SRC_PHY_DST_LOG
]
1184 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG
,
1185 [STEDMA40_LCHAN_SRC_LOG_DST_PHY
]
1186 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY
,
1187 [STEDMA40_LCHAN_SRC_LOG_DST_LOG
]
1188 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG
,
1191 if (chan_is_physical(d40c
))
1192 return phy_map
[d40c
->dma_cfg
.mode_opt
];
1194 return log_map
[d40c
->dma_cfg
.mode_opt
];
1197 static void d40_config_write(struct d40_chan
*d40c
)
1202 /* Odd addresses are even addresses + 4 */
1203 addr_base
= (d40c
->phy_chan
->num
% 2) * 4;
1204 /* Setup channel mode to logical or physical */
1205 var
= ((u32
)(chan_is_logical(d40c
)) + 1) <<
1206 D40_CHAN_POS(d40c
->phy_chan
->num
);
1207 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMSE
+ addr_base
);
1209 /* Setup operational mode option register */
1210 var
= d40_get_prmo(d40c
) << D40_CHAN_POS(d40c
->phy_chan
->num
);
1212 writel(var
, d40c
->base
->virtbase
+ D40_DREG_PRMOE
+ addr_base
);
1214 if (chan_is_logical(d40c
)) {
1215 int lidx
= (d40c
->phy_chan
->num
<< D40_SREG_ELEM_LOG_LIDX_POS
)
1216 & D40_SREG_ELEM_LOG_LIDX_MASK
;
1217 void __iomem
*chanbase
= chan_base(d40c
);
1219 /* Set default config for CFG reg */
1220 writel(d40c
->src_def_cfg
, chanbase
+ D40_CHAN_REG_SSCFG
);
1221 writel(d40c
->dst_def_cfg
, chanbase
+ D40_CHAN_REG_SDCFG
);
1223 /* Set LIDX for lcla */
1224 writel(lidx
, chanbase
+ D40_CHAN_REG_SSELT
);
1225 writel(lidx
, chanbase
+ D40_CHAN_REG_SDELT
);
1227 /* Clear LNK which will be used by d40_chan_has_events() */
1228 writel(0, chanbase
+ D40_CHAN_REG_SSLNK
);
1229 writel(0, chanbase
+ D40_CHAN_REG_SDLNK
);
1233 static u32
d40_residue(struct d40_chan
*d40c
)
1237 if (chan_is_logical(d40c
))
1238 num_elt
= (readl(&d40c
->lcpa
->lcsp2
) & D40_MEM_LCSP2_ECNT_MASK
)
1239 >> D40_MEM_LCSP2_ECNT_POS
;
1241 u32 val
= readl(chan_base(d40c
) + D40_CHAN_REG_SDELT
);
1242 num_elt
= (val
& D40_SREG_ELEM_PHY_ECNT_MASK
)
1243 >> D40_SREG_ELEM_PHY_ECNT_POS
;
1246 return num_elt
* (1 << d40c
->dma_cfg
.dst_info
.data_width
);
1249 static bool d40_tx_is_linked(struct d40_chan
*d40c
)
1253 if (chan_is_logical(d40c
))
1254 is_link
= readl(&d40c
->lcpa
->lcsp3
) & D40_MEM_LCSP3_DLOS_MASK
;
1256 is_link
= readl(chan_base(d40c
) + D40_CHAN_REG_SDLNK
)
1257 & D40_SREG_LNK_PHYS_LNK_MASK
;
1262 static int d40_pause(struct d40_chan
*d40c
)
1265 unsigned long flags
;
1270 pm_runtime_get_sync(d40c
->base
->dev
);
1271 spin_lock_irqsave(&d40c
->lock
, flags
);
1273 res
= d40_channel_execute_command(d40c
, D40_DMA_SUSPEND_REQ
);
1275 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1276 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1277 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1281 static int d40_resume(struct d40_chan
*d40c
)
1284 unsigned long flags
;
1289 spin_lock_irqsave(&d40c
->lock
, flags
);
1290 pm_runtime_get_sync(d40c
->base
->dev
);
1292 /* If bytes left to transfer or linked tx resume job */
1293 if (d40_residue(d40c
) || d40_tx_is_linked(d40c
))
1294 res
= d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1296 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1297 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1298 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1302 static dma_cookie_t
d40_tx_submit(struct dma_async_tx_descriptor
*tx
)
1304 struct d40_chan
*d40c
= container_of(tx
->chan
,
1307 struct d40_desc
*d40d
= container_of(tx
, struct d40_desc
, txd
);
1308 unsigned long flags
;
1309 dma_cookie_t cookie
;
1311 spin_lock_irqsave(&d40c
->lock
, flags
);
1312 cookie
= dma_cookie_assign(tx
);
1313 d40_desc_queue(d40c
, d40d
);
1314 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1319 static int d40_start(struct d40_chan
*d40c
)
1321 return d40_channel_execute_command(d40c
, D40_DMA_RUN
);
1324 static struct d40_desc
*d40_queue_start(struct d40_chan
*d40c
)
1326 struct d40_desc
*d40d
;
1329 /* Start queued jobs, if any */
1330 d40d
= d40_first_queued(d40c
);
1335 pm_runtime_get_sync(d40c
->base
->dev
);
1338 /* Remove from queue */
1339 d40_desc_remove(d40d
);
1341 /* Add to active queue */
1342 d40_desc_submit(d40c
, d40d
);
1344 /* Initiate DMA job */
1345 d40_desc_load(d40c
, d40d
);
1348 err
= d40_start(d40c
);
1357 /* called from interrupt context */
1358 static void dma_tc_handle(struct d40_chan
*d40c
)
1360 struct d40_desc
*d40d
;
1362 /* Get first active entry from list */
1363 d40d
= d40_first_active_get(d40c
);
1370 * If this was a paritially loaded list, we need to reloaded
1371 * it, and only when the list is completed. We need to check
1372 * for done because the interrupt will hit for every link, and
1373 * not just the last one.
1375 if (d40d
->lli_current
< d40d
->lli_len
1376 && !d40_tx_is_linked(d40c
)
1377 && !d40_residue(d40c
)) {
1378 d40_lcla_free_all(d40c
, d40d
);
1379 d40_desc_load(d40c
, d40d
);
1380 (void) d40_start(d40c
);
1382 if (d40d
->lli_current
== d40d
->lli_len
)
1383 d40d
->lli_current
= 0;
1386 d40_lcla_free_all(d40c
, d40d
);
1388 if (d40d
->lli_current
< d40d
->lli_len
) {
1389 d40_desc_load(d40c
, d40d
);
1391 (void) d40_start(d40c
);
1395 if (d40_queue_start(d40c
) == NULL
)
1397 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1398 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1402 tasklet_schedule(&d40c
->tasklet
);
1406 static void dma_tasklet(unsigned long data
)
1408 struct d40_chan
*d40c
= (struct d40_chan
*) data
;
1409 struct d40_desc
*d40d
;
1410 unsigned long flags
;
1411 dma_async_tx_callback callback
;
1412 void *callback_param
;
1414 spin_lock_irqsave(&d40c
->lock
, flags
);
1416 /* Get first active entry from list */
1417 d40d
= d40_first_active_get(d40c
);
1422 dma_cookie_complete(&d40d
->txd
);
1425 * If terminating a channel pending_tx is set to zero.
1426 * This prevents any finished active jobs to return to the client.
1428 if (d40c
->pending_tx
== 0) {
1429 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1433 /* Callback to client */
1434 callback
= d40d
->txd
.callback
;
1435 callback_param
= d40d
->txd
.callback_param
;
1437 if (!d40d
->cyclic
) {
1438 if (async_tx_test_ack(&d40d
->txd
)) {
1439 d40_desc_remove(d40d
);
1440 d40_desc_free(d40c
, d40d
);
1442 if (!d40d
->is_in_client_list
) {
1443 d40_desc_remove(d40d
);
1444 d40_lcla_free_all(d40c
, d40d
);
1445 list_add_tail(&d40d
->node
, &d40c
->client
);
1446 d40d
->is_in_client_list
= true;
1453 if (d40c
->pending_tx
)
1454 tasklet_schedule(&d40c
->tasklet
);
1456 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1458 if (callback
&& (d40d
->txd
.flags
& DMA_PREP_INTERRUPT
))
1459 callback(callback_param
);
1464 /* Rescue manouver if receiving double interrupts */
1465 if (d40c
->pending_tx
> 0)
1467 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1470 static irqreturn_t
d40_handle_interrupt(int irq
, void *data
)
1472 static const struct d40_interrupt_lookup il
[] = {
1473 {D40_DREG_LCTIS0
, D40_DREG_LCICR0
, false, 0},
1474 {D40_DREG_LCTIS1
, D40_DREG_LCICR1
, false, 32},
1475 {D40_DREG_LCTIS2
, D40_DREG_LCICR2
, false, 64},
1476 {D40_DREG_LCTIS3
, D40_DREG_LCICR3
, false, 96},
1477 {D40_DREG_LCEIS0
, D40_DREG_LCICR0
, true, 0},
1478 {D40_DREG_LCEIS1
, D40_DREG_LCICR1
, true, 32},
1479 {D40_DREG_LCEIS2
, D40_DREG_LCICR2
, true, 64},
1480 {D40_DREG_LCEIS3
, D40_DREG_LCICR3
, true, 96},
1481 {D40_DREG_PCTIS
, D40_DREG_PCICR
, false, D40_PHY_CHAN
},
1482 {D40_DREG_PCEIS
, D40_DREG_PCICR
, true, D40_PHY_CHAN
},
1486 u32 regs
[ARRAY_SIZE(il
)];
1490 struct d40_chan
*d40c
;
1491 unsigned long flags
;
1492 struct d40_base
*base
= data
;
1494 spin_lock_irqsave(&base
->interrupt_lock
, flags
);
1496 /* Read interrupt status of both logical and physical channels */
1497 for (i
= 0; i
< ARRAY_SIZE(il
); i
++)
1498 regs
[i
] = readl(base
->virtbase
+ il
[i
].src
);
1502 chan
= find_next_bit((unsigned long *)regs
,
1503 BITS_PER_LONG
* ARRAY_SIZE(il
), chan
+ 1);
1505 /* No more set bits found? */
1506 if (chan
== BITS_PER_LONG
* ARRAY_SIZE(il
))
1509 row
= chan
/ BITS_PER_LONG
;
1510 idx
= chan
& (BITS_PER_LONG
- 1);
1513 writel(1 << idx
, base
->virtbase
+ il
[row
].clr
);
1515 if (il
[row
].offset
== D40_PHY_CHAN
)
1516 d40c
= base
->lookup_phy_chans
[idx
];
1518 d40c
= base
->lookup_log_chans
[il
[row
].offset
+ idx
];
1519 spin_lock(&d40c
->lock
);
1521 if (!il
[row
].is_error
)
1522 dma_tc_handle(d40c
);
1524 d40_err(base
->dev
, "IRQ chan: %ld offset %d idx %d\n",
1525 chan
, il
[row
].offset
, idx
);
1527 spin_unlock(&d40c
->lock
);
1530 spin_unlock_irqrestore(&base
->interrupt_lock
, flags
);
1535 static int d40_validate_conf(struct d40_chan
*d40c
,
1536 struct stedma40_chan_cfg
*conf
)
1539 u32 dst_event_group
= D40_TYPE_TO_GROUP(conf
->dst_dev_type
);
1540 u32 src_event_group
= D40_TYPE_TO_GROUP(conf
->src_dev_type
);
1541 bool is_log
= conf
->mode
== STEDMA40_MODE_LOGICAL
;
1544 chan_err(d40c
, "Invalid direction.\n");
1548 if (conf
->dst_dev_type
!= STEDMA40_DEV_DST_MEMORY
&&
1549 d40c
->base
->plat_data
->dev_tx
[conf
->dst_dev_type
] == 0 &&
1550 d40c
->runtime_addr
== 0) {
1552 chan_err(d40c
, "Invalid TX channel address (%d)\n",
1553 conf
->dst_dev_type
);
1557 if (conf
->src_dev_type
!= STEDMA40_DEV_SRC_MEMORY
&&
1558 d40c
->base
->plat_data
->dev_rx
[conf
->src_dev_type
] == 0 &&
1559 d40c
->runtime_addr
== 0) {
1560 chan_err(d40c
, "Invalid RX channel address (%d)\n",
1561 conf
->src_dev_type
);
1565 if (conf
->dir
== STEDMA40_MEM_TO_PERIPH
&&
1566 dst_event_group
== STEDMA40_DEV_DST_MEMORY
) {
1567 chan_err(d40c
, "Invalid dst\n");
1571 if (conf
->dir
== STEDMA40_PERIPH_TO_MEM
&&
1572 src_event_group
== STEDMA40_DEV_SRC_MEMORY
) {
1573 chan_err(d40c
, "Invalid src\n");
1577 if (src_event_group
== STEDMA40_DEV_SRC_MEMORY
&&
1578 dst_event_group
== STEDMA40_DEV_DST_MEMORY
&& is_log
) {
1579 chan_err(d40c
, "No event line\n");
1583 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
&&
1584 (src_event_group
!= dst_event_group
)) {
1585 chan_err(d40c
, "Invalid event group\n");
1589 if (conf
->dir
== STEDMA40_PERIPH_TO_PERIPH
) {
1591 * DMAC HW supports it. Will be added to this driver,
1592 * in case any dma client requires it.
1594 chan_err(d40c
, "periph to periph not supported\n");
1598 if (d40_psize_2_burst_size(is_log
, conf
->src_info
.psize
) *
1599 (1 << conf
->src_info
.data_width
) !=
1600 d40_psize_2_burst_size(is_log
, conf
->dst_info
.psize
) *
1601 (1 << conf
->dst_info
.data_width
)) {
1603 * The DMAC hardware only supports
1604 * src (burst x width) == dst (burst x width)
1607 chan_err(d40c
, "src (burst x width) != dst (burst x width)\n");
1614 static bool d40_alloc_mask_set(struct d40_phy_res
*phy
,
1615 bool is_src
, int log_event_line
, bool is_log
,
1618 unsigned long flags
;
1619 spin_lock_irqsave(&phy
->lock
, flags
);
1621 *first_user
= ((phy
->allocated_src
| phy
->allocated_dst
)
1625 /* Physical interrupts are masked per physical full channel */
1626 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1627 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1628 phy
->allocated_dst
= D40_ALLOC_PHY
;
1629 phy
->allocated_src
= D40_ALLOC_PHY
;
1635 /* Logical channel */
1637 if (phy
->allocated_src
== D40_ALLOC_PHY
)
1640 if (phy
->allocated_src
== D40_ALLOC_FREE
)
1641 phy
->allocated_src
= D40_ALLOC_LOG_FREE
;
1643 if (!(phy
->allocated_src
& (1 << log_event_line
))) {
1644 phy
->allocated_src
|= 1 << log_event_line
;
1649 if (phy
->allocated_dst
== D40_ALLOC_PHY
)
1652 if (phy
->allocated_dst
== D40_ALLOC_FREE
)
1653 phy
->allocated_dst
= D40_ALLOC_LOG_FREE
;
1655 if (!(phy
->allocated_dst
& (1 << log_event_line
))) {
1656 phy
->allocated_dst
|= 1 << log_event_line
;
1663 spin_unlock_irqrestore(&phy
->lock
, flags
);
1666 spin_unlock_irqrestore(&phy
->lock
, flags
);
1670 static bool d40_alloc_mask_free(struct d40_phy_res
*phy
, bool is_src
,
1673 unsigned long flags
;
1674 bool is_free
= false;
1676 spin_lock_irqsave(&phy
->lock
, flags
);
1677 if (!log_event_line
) {
1678 phy
->allocated_dst
= D40_ALLOC_FREE
;
1679 phy
->allocated_src
= D40_ALLOC_FREE
;
1684 /* Logical channel */
1686 phy
->allocated_src
&= ~(1 << log_event_line
);
1687 if (phy
->allocated_src
== D40_ALLOC_LOG_FREE
)
1688 phy
->allocated_src
= D40_ALLOC_FREE
;
1690 phy
->allocated_dst
&= ~(1 << log_event_line
);
1691 if (phy
->allocated_dst
== D40_ALLOC_LOG_FREE
)
1692 phy
->allocated_dst
= D40_ALLOC_FREE
;
1695 is_free
= ((phy
->allocated_src
| phy
->allocated_dst
) ==
1699 spin_unlock_irqrestore(&phy
->lock
, flags
);
1704 static int d40_allocate_channel(struct d40_chan
*d40c
, bool *first_phy_user
)
1709 struct d40_phy_res
*phys
;
1714 bool is_log
= d40c
->dma_cfg
.mode
== STEDMA40_MODE_LOGICAL
;
1716 phys
= d40c
->base
->phy_res
;
1718 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1719 dev_type
= d40c
->dma_cfg
.src_dev_type
;
1720 log_num
= 2 * dev_type
;
1722 } else if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1723 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1724 /* dst event lines are used for logical memcpy */
1725 dev_type
= d40c
->dma_cfg
.dst_dev_type
;
1726 log_num
= 2 * dev_type
+ 1;
1731 event_group
= D40_TYPE_TO_GROUP(dev_type
);
1732 event_line
= D40_TYPE_TO_EVENT(dev_type
);
1735 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1736 /* Find physical half channel */
1737 for (i
= 0; i
< d40c
->base
->num_phy_chans
; i
++) {
1739 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1745 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1746 int phy_num
= j
+ event_group
* 2;
1747 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1748 if (d40_alloc_mask_set(&phys
[i
],
1758 d40c
->phy_chan
= &phys
[i
];
1759 d40c
->log_num
= D40_PHY_CHAN
;
1765 /* Find logical channel */
1766 for (j
= 0; j
< d40c
->base
->num_phy_chans
; j
+= 8) {
1767 int phy_num
= j
+ event_group
* 2;
1769 if (d40c
->dma_cfg
.use_fixed_channel
) {
1770 i
= d40c
->dma_cfg
.phy_channel
;
1772 if ((i
!= phy_num
) && (i
!= phy_num
+ 1)) {
1773 dev_err(chan2dev(d40c
),
1774 "invalid fixed phy channel %d\n", i
);
1778 if (d40_alloc_mask_set(&phys
[i
], is_src
, event_line
,
1779 is_log
, first_phy_user
))
1782 dev_err(chan2dev(d40c
),
1783 "could not allocate fixed phy channel %d\n", i
);
1788 * Spread logical channels across all available physical rather
1789 * than pack every logical channel at the first available phy
1793 for (i
= phy_num
; i
< phy_num
+ 2; i
++) {
1794 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1800 for (i
= phy_num
+ 1; i
>= phy_num
; i
--) {
1801 if (d40_alloc_mask_set(&phys
[i
], is_src
,
1811 d40c
->phy_chan
= &phys
[i
];
1812 d40c
->log_num
= log_num
;
1816 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = d40c
;
1818 d40c
->base
->lookup_phy_chans
[d40c
->phy_chan
->num
] = d40c
;
1824 static int d40_config_memcpy(struct d40_chan
*d40c
)
1826 dma_cap_mask_t cap
= d40c
->chan
.device
->cap_mask
;
1828 if (dma_has_cap(DMA_MEMCPY
, cap
) && !dma_has_cap(DMA_SLAVE
, cap
)) {
1829 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_log
;
1830 d40c
->dma_cfg
.src_dev_type
= STEDMA40_DEV_SRC_MEMORY
;
1831 d40c
->dma_cfg
.dst_dev_type
= d40c
->base
->plat_data
->
1832 memcpy
[d40c
->chan
.chan_id
];
1834 } else if (dma_has_cap(DMA_MEMCPY
, cap
) &&
1835 dma_has_cap(DMA_SLAVE
, cap
)) {
1836 d40c
->dma_cfg
= *d40c
->base
->plat_data
->memcpy_conf_phy
;
1838 chan_err(d40c
, "No memcpy\n");
1845 static int d40_free_dma(struct d40_chan
*d40c
)
1850 struct d40_phy_res
*phy
= d40c
->phy_chan
;
1853 /* Terminate all queued and active transfers */
1857 chan_err(d40c
, "phy == null\n");
1861 if (phy
->allocated_src
== D40_ALLOC_FREE
&&
1862 phy
->allocated_dst
== D40_ALLOC_FREE
) {
1863 chan_err(d40c
, "channel already free\n");
1867 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1868 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1869 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1871 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1872 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1875 chan_err(d40c
, "Unknown direction\n");
1879 pm_runtime_get_sync(d40c
->base
->dev
);
1880 res
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
1882 chan_err(d40c
, "stop failed\n");
1886 d40_alloc_mask_free(phy
, is_src
, chan_is_logical(d40c
) ? event
: 0);
1888 if (chan_is_logical(d40c
))
1889 d40c
->base
->lookup_log_chans
[d40c
->log_num
] = NULL
;
1891 d40c
->base
->lookup_phy_chans
[phy
->num
] = NULL
;
1894 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1895 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1899 d40c
->phy_chan
= NULL
;
1900 d40c
->configured
= false;
1903 pm_runtime_mark_last_busy(d40c
->base
->dev
);
1904 pm_runtime_put_autosuspend(d40c
->base
->dev
);
1908 static bool d40_is_paused(struct d40_chan
*d40c
)
1910 void __iomem
*chanbase
= chan_base(d40c
);
1911 bool is_paused
= false;
1912 unsigned long flags
;
1913 void __iomem
*active_reg
;
1917 spin_lock_irqsave(&d40c
->lock
, flags
);
1919 if (chan_is_physical(d40c
)) {
1920 if (d40c
->phy_chan
->num
% 2 == 0)
1921 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVE
;
1923 active_reg
= d40c
->base
->virtbase
+ D40_DREG_ACTIVO
;
1925 status
= (readl(active_reg
) &
1926 D40_CHAN_POS_MASK(d40c
->phy_chan
->num
)) >>
1927 D40_CHAN_POS(d40c
->phy_chan
->num
);
1928 if (status
== D40_DMA_SUSPENDED
|| status
== D40_DMA_STOP
)
1934 if (d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
||
1935 d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_MEM
) {
1936 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.dst_dev_type
);
1937 status
= readl(chanbase
+ D40_CHAN_REG_SDLNK
);
1938 } else if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) {
1939 event
= D40_TYPE_TO_EVENT(d40c
->dma_cfg
.src_dev_type
);
1940 status
= readl(chanbase
+ D40_CHAN_REG_SSLNK
);
1942 chan_err(d40c
, "Unknown direction\n");
1946 status
= (status
& D40_EVENTLINE_MASK(event
)) >>
1947 D40_EVENTLINE_POS(event
);
1949 if (status
!= D40_DMA_RUN
)
1952 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1958 static u32
stedma40_residue(struct dma_chan
*chan
)
1960 struct d40_chan
*d40c
=
1961 container_of(chan
, struct d40_chan
, chan
);
1963 unsigned long flags
;
1965 spin_lock_irqsave(&d40c
->lock
, flags
);
1966 bytes_left
= d40_residue(d40c
);
1967 spin_unlock_irqrestore(&d40c
->lock
, flags
);
1973 d40_prep_sg_log(struct d40_chan
*chan
, struct d40_desc
*desc
,
1974 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
1975 unsigned int sg_len
, dma_addr_t src_dev_addr
,
1976 dma_addr_t dst_dev_addr
)
1978 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
1979 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
1980 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
1983 ret
= d40_log_sg_to_lli(sg_src
, sg_len
,
1986 chan
->log_def
.lcsp1
,
1987 src_info
->data_width
,
1988 dst_info
->data_width
);
1990 ret
= d40_log_sg_to_lli(sg_dst
, sg_len
,
1993 chan
->log_def
.lcsp3
,
1994 dst_info
->data_width
,
1995 src_info
->data_width
);
1997 return ret
< 0 ? ret
: 0;
2001 d40_prep_sg_phy(struct d40_chan
*chan
, struct d40_desc
*desc
,
2002 struct scatterlist
*sg_src
, struct scatterlist
*sg_dst
,
2003 unsigned int sg_len
, dma_addr_t src_dev_addr
,
2004 dma_addr_t dst_dev_addr
)
2006 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2007 struct stedma40_half_channel_info
*src_info
= &cfg
->src_info
;
2008 struct stedma40_half_channel_info
*dst_info
= &cfg
->dst_info
;
2009 unsigned long flags
= 0;
2013 flags
|= LLI_CYCLIC
| LLI_TERM_INT
;
2015 ret
= d40_phy_sg_to_lli(sg_src
, sg_len
, src_dev_addr
,
2017 virt_to_phys(desc
->lli_phy
.src
),
2019 src_info
, dst_info
, flags
);
2021 ret
= d40_phy_sg_to_lli(sg_dst
, sg_len
, dst_dev_addr
,
2023 virt_to_phys(desc
->lli_phy
.dst
),
2025 dst_info
, src_info
, flags
);
2027 dma_sync_single_for_device(chan
->base
->dev
, desc
->lli_pool
.dma_addr
,
2028 desc
->lli_pool
.size
, DMA_TO_DEVICE
);
2030 return ret
< 0 ? ret
: 0;
2034 static struct d40_desc
*
2035 d40_prep_desc(struct d40_chan
*chan
, struct scatterlist
*sg
,
2036 unsigned int sg_len
, unsigned long dma_flags
)
2038 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2039 struct d40_desc
*desc
;
2042 desc
= d40_desc_get(chan
);
2046 desc
->lli_len
= d40_sg_2_dmalen(sg
, sg_len
, cfg
->src_info
.data_width
,
2047 cfg
->dst_info
.data_width
);
2048 if (desc
->lli_len
< 0) {
2049 chan_err(chan
, "Unaligned size\n");
2053 ret
= d40_pool_lli_alloc(chan
, desc
, desc
->lli_len
);
2055 chan_err(chan
, "Could not allocate lli\n");
2060 desc
->lli_current
= 0;
2061 desc
->txd
.flags
= dma_flags
;
2062 desc
->txd
.tx_submit
= d40_tx_submit
;
2064 dma_async_tx_descriptor_init(&desc
->txd
, &chan
->chan
);
2069 d40_desc_free(chan
, desc
);
2074 d40_get_dev_addr(struct d40_chan
*chan
, enum dma_transfer_direction direction
)
2076 struct stedma40_platform_data
*plat
= chan
->base
->plat_data
;
2077 struct stedma40_chan_cfg
*cfg
= &chan
->dma_cfg
;
2078 dma_addr_t addr
= 0;
2080 if (chan
->runtime_addr
)
2081 return chan
->runtime_addr
;
2083 if (direction
== DMA_DEV_TO_MEM
)
2084 addr
= plat
->dev_rx
[cfg
->src_dev_type
];
2085 else if (direction
== DMA_MEM_TO_DEV
)
2086 addr
= plat
->dev_tx
[cfg
->dst_dev_type
];
2091 static struct dma_async_tx_descriptor
*
2092 d40_prep_sg(struct dma_chan
*dchan
, struct scatterlist
*sg_src
,
2093 struct scatterlist
*sg_dst
, unsigned int sg_len
,
2094 enum dma_transfer_direction direction
, unsigned long dma_flags
)
2096 struct d40_chan
*chan
= container_of(dchan
, struct d40_chan
, chan
);
2097 dma_addr_t src_dev_addr
= 0;
2098 dma_addr_t dst_dev_addr
= 0;
2099 struct d40_desc
*desc
;
2100 unsigned long flags
;
2103 if (!chan
->phy_chan
) {
2104 chan_err(chan
, "Cannot prepare unallocated channel\n");
2109 spin_lock_irqsave(&chan
->lock
, flags
);
2111 desc
= d40_prep_desc(chan
, sg_src
, sg_len
, dma_flags
);
2115 if (sg_next(&sg_src
[sg_len
- 1]) == sg_src
)
2116 desc
->cyclic
= true;
2118 if (direction
!= DMA_TRANS_NONE
) {
2119 dma_addr_t dev_addr
= d40_get_dev_addr(chan
, direction
);
2121 if (direction
== DMA_DEV_TO_MEM
)
2122 src_dev_addr
= dev_addr
;
2123 else if (direction
== DMA_MEM_TO_DEV
)
2124 dst_dev_addr
= dev_addr
;
2127 if (chan_is_logical(chan
))
2128 ret
= d40_prep_sg_log(chan
, desc
, sg_src
, sg_dst
,
2129 sg_len
, src_dev_addr
, dst_dev_addr
);
2131 ret
= d40_prep_sg_phy(chan
, desc
, sg_src
, sg_dst
,
2132 sg_len
, src_dev_addr
, dst_dev_addr
);
2135 chan_err(chan
, "Failed to prepare %s sg job: %d\n",
2136 chan_is_logical(chan
) ? "log" : "phy", ret
);
2141 * add descriptor to the prepare queue in order to be able
2142 * to free them later in terminate_all
2144 list_add_tail(&desc
->node
, &chan
->prepare_queue
);
2146 spin_unlock_irqrestore(&chan
->lock
, flags
);
2152 d40_desc_free(chan
, desc
);
2153 spin_unlock_irqrestore(&chan
->lock
, flags
);
2157 bool stedma40_filter(struct dma_chan
*chan
, void *data
)
2159 struct stedma40_chan_cfg
*info
= data
;
2160 struct d40_chan
*d40c
=
2161 container_of(chan
, struct d40_chan
, chan
);
2165 err
= d40_validate_conf(d40c
, info
);
2167 d40c
->dma_cfg
= *info
;
2169 err
= d40_config_memcpy(d40c
);
2172 d40c
->configured
= true;
2176 EXPORT_SYMBOL(stedma40_filter
);
2178 static void __d40_set_prio_rt(struct d40_chan
*d40c
, int dev_type
, bool src
)
2180 bool realtime
= d40c
->dma_cfg
.realtime
;
2181 bool highprio
= d40c
->dma_cfg
.high_priority
;
2182 u32 prioreg
= highprio
? D40_DREG_PSEG1
: D40_DREG_PCEG1
;
2183 u32 rtreg
= realtime
? D40_DREG_RSEG1
: D40_DREG_RCEG1
;
2184 u32 event
= D40_TYPE_TO_EVENT(dev_type
);
2185 u32 group
= D40_TYPE_TO_GROUP(dev_type
);
2186 u32 bit
= 1 << event
;
2188 /* Destination event lines are stored in the upper halfword */
2192 writel(bit
, d40c
->base
->virtbase
+ prioreg
+ group
* 4);
2193 writel(bit
, d40c
->base
->virtbase
+ rtreg
+ group
* 4);
2196 static void d40_set_prio_realtime(struct d40_chan
*d40c
)
2198 if (d40c
->base
->rev
< 3)
2201 if ((d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
) ||
2202 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
2203 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.src_dev_type
, true);
2205 if ((d40c
->dma_cfg
.dir
== STEDMA40_MEM_TO_PERIPH
) ||
2206 (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_PERIPH
))
2207 __d40_set_prio_rt(d40c
, d40c
->dma_cfg
.dst_dev_type
, false);
2210 /* DMA ENGINE functions */
2211 static int d40_alloc_chan_resources(struct dma_chan
*chan
)
2214 unsigned long flags
;
2215 struct d40_chan
*d40c
=
2216 container_of(chan
, struct d40_chan
, chan
);
2218 spin_lock_irqsave(&d40c
->lock
, flags
);
2220 dma_cookie_init(chan
);
2222 /* If no dma configuration is set use default configuration (memcpy) */
2223 if (!d40c
->configured
) {
2224 err
= d40_config_memcpy(d40c
);
2226 chan_err(d40c
, "Failed to configure memcpy channel\n");
2231 err
= d40_allocate_channel(d40c
, &is_free_phy
);
2233 chan_err(d40c
, "Failed to allocate channel\n");
2234 d40c
->configured
= false;
2238 pm_runtime_get_sync(d40c
->base
->dev
);
2239 /* Fill in basic CFG register values */
2240 d40_phy_cfg(&d40c
->dma_cfg
, &d40c
->src_def_cfg
,
2241 &d40c
->dst_def_cfg
, chan_is_logical(d40c
));
2243 d40_set_prio_realtime(d40c
);
2245 if (chan_is_logical(d40c
)) {
2246 d40_log_cfg(&d40c
->dma_cfg
,
2247 &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2249 if (d40c
->dma_cfg
.dir
== STEDMA40_PERIPH_TO_MEM
)
2250 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2251 d40c
->dma_cfg
.src_dev_type
* D40_LCPA_CHAN_SIZE
;
2253 d40c
->lcpa
= d40c
->base
->lcpa_base
+
2254 d40c
->dma_cfg
.dst_dev_type
*
2255 D40_LCPA_CHAN_SIZE
+ D40_LCPA_CHAN_DST_DELTA
;
2258 dev_dbg(chan2dev(d40c
), "allocated %s channel (phy %d%s)\n",
2259 chan_is_logical(d40c
) ? "logical" : "physical",
2260 d40c
->phy_chan
->num
,
2261 d40c
->dma_cfg
.use_fixed_channel
? ", fixed" : "");
2265 * Only write channel configuration to the DMA if the physical
2266 * resource is free. In case of multiple logical channels
2267 * on the same physical resource, only the first write is necessary.
2270 d40_config_write(d40c
);
2272 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2273 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2274 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2278 static void d40_free_chan_resources(struct dma_chan
*chan
)
2280 struct d40_chan
*d40c
=
2281 container_of(chan
, struct d40_chan
, chan
);
2283 unsigned long flags
;
2285 if (d40c
->phy_chan
== NULL
) {
2286 chan_err(d40c
, "Cannot free unallocated channel\n");
2291 spin_lock_irqsave(&d40c
->lock
, flags
);
2293 err
= d40_free_dma(d40c
);
2296 chan_err(d40c
, "Failed to free channel\n");
2297 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2300 static struct dma_async_tx_descriptor
*d40_prep_memcpy(struct dma_chan
*chan
,
2304 unsigned long dma_flags
)
2306 struct scatterlist dst_sg
;
2307 struct scatterlist src_sg
;
2309 sg_init_table(&dst_sg
, 1);
2310 sg_init_table(&src_sg
, 1);
2312 sg_dma_address(&dst_sg
) = dst
;
2313 sg_dma_address(&src_sg
) = src
;
2315 sg_dma_len(&dst_sg
) = size
;
2316 sg_dma_len(&src_sg
) = size
;
2318 return d40_prep_sg(chan
, &src_sg
, &dst_sg
, 1, DMA_NONE
, dma_flags
);
2321 static struct dma_async_tx_descriptor
*
2322 d40_prep_memcpy_sg(struct dma_chan
*chan
,
2323 struct scatterlist
*dst_sg
, unsigned int dst_nents
,
2324 struct scatterlist
*src_sg
, unsigned int src_nents
,
2325 unsigned long dma_flags
)
2327 if (dst_nents
!= src_nents
)
2330 return d40_prep_sg(chan
, src_sg
, dst_sg
, src_nents
, DMA_NONE
, dma_flags
);
2333 static struct dma_async_tx_descriptor
*d40_prep_slave_sg(struct dma_chan
*chan
,
2334 struct scatterlist
*sgl
,
2335 unsigned int sg_len
,
2336 enum dma_transfer_direction direction
,
2337 unsigned long dma_flags
,
2340 if (direction
!= DMA_DEV_TO_MEM
&& direction
!= DMA_MEM_TO_DEV
)
2343 return d40_prep_sg(chan
, sgl
, sgl
, sg_len
, direction
, dma_flags
);
2346 static struct dma_async_tx_descriptor
*
2347 dma40_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t dma_addr
,
2348 size_t buf_len
, size_t period_len
,
2349 enum dma_transfer_direction direction
, unsigned long flags
,
2352 unsigned int periods
= buf_len
/ period_len
;
2353 struct dma_async_tx_descriptor
*txd
;
2354 struct scatterlist
*sg
;
2357 sg
= kcalloc(periods
+ 1, sizeof(struct scatterlist
), GFP_NOWAIT
);
2358 for (i
= 0; i
< periods
; i
++) {
2359 sg_dma_address(&sg
[i
]) = dma_addr
;
2360 sg_dma_len(&sg
[i
]) = period_len
;
2361 dma_addr
+= period_len
;
2364 sg
[periods
].offset
= 0;
2365 sg_dma_len(&sg
[periods
]) = 0;
2366 sg
[periods
].page_link
=
2367 ((unsigned long)sg
| 0x01) & ~0x02;
2369 txd
= d40_prep_sg(chan
, sg
, sg
, periods
, direction
,
2370 DMA_PREP_INTERRUPT
);
2377 static enum dma_status
d40_tx_status(struct dma_chan
*chan
,
2378 dma_cookie_t cookie
,
2379 struct dma_tx_state
*txstate
)
2381 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2382 enum dma_status ret
;
2384 if (d40c
->phy_chan
== NULL
) {
2385 chan_err(d40c
, "Cannot read status of unallocated channel\n");
2389 ret
= dma_cookie_status(chan
, cookie
, txstate
);
2390 if (ret
!= DMA_SUCCESS
)
2391 dma_set_residue(txstate
, stedma40_residue(chan
));
2393 if (d40_is_paused(d40c
))
2399 static void d40_issue_pending(struct dma_chan
*chan
)
2401 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2402 unsigned long flags
;
2404 if (d40c
->phy_chan
== NULL
) {
2405 chan_err(d40c
, "Channel is not allocated!\n");
2409 spin_lock_irqsave(&d40c
->lock
, flags
);
2411 list_splice_tail_init(&d40c
->pending_queue
, &d40c
->queue
);
2413 /* Busy means that queued jobs are already being processed */
2415 (void) d40_queue_start(d40c
);
2417 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2420 static void d40_terminate_all(struct dma_chan
*chan
)
2422 unsigned long flags
;
2423 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2426 spin_lock_irqsave(&d40c
->lock
, flags
);
2428 pm_runtime_get_sync(d40c
->base
->dev
);
2429 ret
= d40_channel_execute_command(d40c
, D40_DMA_STOP
);
2431 chan_err(d40c
, "Failed to stop channel\n");
2434 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2435 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2437 pm_runtime_mark_last_busy(d40c
->base
->dev
);
2438 pm_runtime_put_autosuspend(d40c
->base
->dev
);
2442 spin_unlock_irqrestore(&d40c
->lock
, flags
);
2446 dma40_config_to_halfchannel(struct d40_chan
*d40c
,
2447 struct stedma40_half_channel_info
*info
,
2448 enum dma_slave_buswidth width
,
2451 enum stedma40_periph_data_width addr_width
;
2455 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
2456 addr_width
= STEDMA40_BYTE_WIDTH
;
2458 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
2459 addr_width
= STEDMA40_HALFWORD_WIDTH
;
2461 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
2462 addr_width
= STEDMA40_WORD_WIDTH
;
2464 case DMA_SLAVE_BUSWIDTH_8_BYTES
:
2465 addr_width
= STEDMA40_DOUBLEWORD_WIDTH
;
2468 dev_err(d40c
->base
->dev
,
2469 "illegal peripheral address width "
2475 if (chan_is_logical(d40c
)) {
2477 psize
= STEDMA40_PSIZE_LOG_16
;
2478 else if (maxburst
>= 8)
2479 psize
= STEDMA40_PSIZE_LOG_8
;
2480 else if (maxburst
>= 4)
2481 psize
= STEDMA40_PSIZE_LOG_4
;
2483 psize
= STEDMA40_PSIZE_LOG_1
;
2486 psize
= STEDMA40_PSIZE_PHY_16
;
2487 else if (maxburst
>= 8)
2488 psize
= STEDMA40_PSIZE_PHY_8
;
2489 else if (maxburst
>= 4)
2490 psize
= STEDMA40_PSIZE_PHY_4
;
2492 psize
= STEDMA40_PSIZE_PHY_1
;
2495 info
->data_width
= addr_width
;
2496 info
->psize
= psize
;
2497 info
->flow_ctrl
= STEDMA40_NO_FLOW_CTRL
;
2502 /* Runtime reconfiguration extension */
2503 static int d40_set_runtime_config(struct dma_chan
*chan
,
2504 struct dma_slave_config
*config
)
2506 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2507 struct stedma40_chan_cfg
*cfg
= &d40c
->dma_cfg
;
2508 enum dma_slave_buswidth src_addr_width
, dst_addr_width
;
2509 dma_addr_t config_addr
;
2510 u32 src_maxburst
, dst_maxburst
;
2513 src_addr_width
= config
->src_addr_width
;
2514 src_maxburst
= config
->src_maxburst
;
2515 dst_addr_width
= config
->dst_addr_width
;
2516 dst_maxburst
= config
->dst_maxburst
;
2518 if (config
->direction
== DMA_DEV_TO_MEM
) {
2519 dma_addr_t dev_addr_rx
=
2520 d40c
->base
->plat_data
->dev_rx
[cfg
->src_dev_type
];
2522 config_addr
= config
->src_addr
;
2524 dev_dbg(d40c
->base
->dev
,
2525 "channel has a pre-wired RX address %08x "
2526 "overriding with %08x\n",
2527 dev_addr_rx
, config_addr
);
2528 if (cfg
->dir
!= STEDMA40_PERIPH_TO_MEM
)
2529 dev_dbg(d40c
->base
->dev
,
2530 "channel was not configured for peripheral "
2531 "to memory transfer (%d) overriding\n",
2533 cfg
->dir
= STEDMA40_PERIPH_TO_MEM
;
2535 /* Configure the memory side */
2536 if (dst_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
2537 dst_addr_width
= src_addr_width
;
2538 if (dst_maxburst
== 0)
2539 dst_maxburst
= src_maxburst
;
2541 } else if (config
->direction
== DMA_MEM_TO_DEV
) {
2542 dma_addr_t dev_addr_tx
=
2543 d40c
->base
->plat_data
->dev_tx
[cfg
->dst_dev_type
];
2545 config_addr
= config
->dst_addr
;
2547 dev_dbg(d40c
->base
->dev
,
2548 "channel has a pre-wired TX address %08x "
2549 "overriding with %08x\n",
2550 dev_addr_tx
, config_addr
);
2551 if (cfg
->dir
!= STEDMA40_MEM_TO_PERIPH
)
2552 dev_dbg(d40c
->base
->dev
,
2553 "channel was not configured for memory "
2554 "to peripheral transfer (%d) overriding\n",
2556 cfg
->dir
= STEDMA40_MEM_TO_PERIPH
;
2558 /* Configure the memory side */
2559 if (src_addr_width
== DMA_SLAVE_BUSWIDTH_UNDEFINED
)
2560 src_addr_width
= dst_addr_width
;
2561 if (src_maxburst
== 0)
2562 src_maxburst
= dst_maxburst
;
2564 dev_err(d40c
->base
->dev
,
2565 "unrecognized channel direction %d\n",
2570 if (src_maxburst
* src_addr_width
!= dst_maxburst
* dst_addr_width
) {
2571 dev_err(d40c
->base
->dev
,
2572 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2580 ret
= dma40_config_to_halfchannel(d40c
, &cfg
->src_info
,
2586 ret
= dma40_config_to_halfchannel(d40c
, &cfg
->dst_info
,
2592 /* Fill in register values */
2593 if (chan_is_logical(d40c
))
2594 d40_log_cfg(cfg
, &d40c
->log_def
.lcsp1
, &d40c
->log_def
.lcsp3
);
2596 d40_phy_cfg(cfg
, &d40c
->src_def_cfg
,
2597 &d40c
->dst_def_cfg
, false);
2599 /* These settings will take precedence later */
2600 d40c
->runtime_addr
= config_addr
;
2601 d40c
->runtime_direction
= config
->direction
;
2602 dev_dbg(d40c
->base
->dev
,
2603 "configured channel %s for %s, data width %d/%d, "
2604 "maxburst %d/%d elements, LE, no flow control\n",
2605 dma_chan_name(chan
),
2606 (config
->direction
== DMA_DEV_TO_MEM
) ? "RX" : "TX",
2607 src_addr_width
, dst_addr_width
,
2608 src_maxburst
, dst_maxburst
);
2613 static int d40_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
2616 struct d40_chan
*d40c
= container_of(chan
, struct d40_chan
, chan
);
2618 if (d40c
->phy_chan
== NULL
) {
2619 chan_err(d40c
, "Channel is not allocated!\n");
2624 case DMA_TERMINATE_ALL
:
2625 d40_terminate_all(chan
);
2628 return d40_pause(d40c
);
2630 return d40_resume(d40c
);
2631 case DMA_SLAVE_CONFIG
:
2632 return d40_set_runtime_config(chan
,
2633 (struct dma_slave_config
*) arg
);
2638 /* Other commands are unimplemented */
2642 /* Initialization functions */
2644 static void __init
d40_chan_init(struct d40_base
*base
, struct dma_device
*dma
,
2645 struct d40_chan
*chans
, int offset
,
2649 struct d40_chan
*d40c
;
2651 INIT_LIST_HEAD(&dma
->channels
);
2653 for (i
= offset
; i
< offset
+ num_chans
; i
++) {
2656 d40c
->chan
.device
= dma
;
2658 spin_lock_init(&d40c
->lock
);
2660 d40c
->log_num
= D40_PHY_CHAN
;
2662 INIT_LIST_HEAD(&d40c
->active
);
2663 INIT_LIST_HEAD(&d40c
->queue
);
2664 INIT_LIST_HEAD(&d40c
->pending_queue
);
2665 INIT_LIST_HEAD(&d40c
->client
);
2666 INIT_LIST_HEAD(&d40c
->prepare_queue
);
2668 tasklet_init(&d40c
->tasklet
, dma_tasklet
,
2669 (unsigned long) d40c
);
2671 list_add_tail(&d40c
->chan
.device_node
,
2676 static void d40_ops_init(struct d40_base
*base
, struct dma_device
*dev
)
2678 if (dma_has_cap(DMA_SLAVE
, dev
->cap_mask
))
2679 dev
->device_prep_slave_sg
= d40_prep_slave_sg
;
2681 if (dma_has_cap(DMA_MEMCPY
, dev
->cap_mask
)) {
2682 dev
->device_prep_dma_memcpy
= d40_prep_memcpy
;
2685 * This controller can only access address at even
2686 * 32bit boundaries, i.e. 2^2
2688 dev
->copy_align
= 2;
2691 if (dma_has_cap(DMA_SG
, dev
->cap_mask
))
2692 dev
->device_prep_dma_sg
= d40_prep_memcpy_sg
;
2694 if (dma_has_cap(DMA_CYCLIC
, dev
->cap_mask
))
2695 dev
->device_prep_dma_cyclic
= dma40_prep_dma_cyclic
;
2697 dev
->device_alloc_chan_resources
= d40_alloc_chan_resources
;
2698 dev
->device_free_chan_resources
= d40_free_chan_resources
;
2699 dev
->device_issue_pending
= d40_issue_pending
;
2700 dev
->device_tx_status
= d40_tx_status
;
2701 dev
->device_control
= d40_control
;
2702 dev
->dev
= base
->dev
;
2705 static int __init
d40_dmaengine_init(struct d40_base
*base
,
2706 int num_reserved_chans
)
2710 d40_chan_init(base
, &base
->dma_slave
, base
->log_chans
,
2711 0, base
->num_log_chans
);
2713 dma_cap_zero(base
->dma_slave
.cap_mask
);
2714 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
2715 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2717 d40_ops_init(base
, &base
->dma_slave
);
2719 err
= dma_async_device_register(&base
->dma_slave
);
2722 d40_err(base
->dev
, "Failed to register slave channels\n");
2726 d40_chan_init(base
, &base
->dma_memcpy
, base
->log_chans
,
2727 base
->num_log_chans
, base
->plat_data
->memcpy_len
);
2729 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
2730 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
2731 dma_cap_set(DMA_SG
, base
->dma_memcpy
.cap_mask
);
2733 d40_ops_init(base
, &base
->dma_memcpy
);
2735 err
= dma_async_device_register(&base
->dma_memcpy
);
2739 "Failed to regsiter memcpy only channels\n");
2743 d40_chan_init(base
, &base
->dma_both
, base
->phy_chans
,
2744 0, num_reserved_chans
);
2746 dma_cap_zero(base
->dma_both
.cap_mask
);
2747 dma_cap_set(DMA_SLAVE
, base
->dma_both
.cap_mask
);
2748 dma_cap_set(DMA_MEMCPY
, base
->dma_both
.cap_mask
);
2749 dma_cap_set(DMA_SG
, base
->dma_both
.cap_mask
);
2750 dma_cap_set(DMA_CYCLIC
, base
->dma_slave
.cap_mask
);
2752 d40_ops_init(base
, &base
->dma_both
);
2753 err
= dma_async_device_register(&base
->dma_both
);
2757 "Failed to register logical and physical capable channels\n");
2762 dma_async_device_unregister(&base
->dma_memcpy
);
2764 dma_async_device_unregister(&base
->dma_slave
);
2769 /* Suspend resume functionality */
2771 static int dma40_pm_suspend(struct device
*dev
)
2773 struct platform_device
*pdev
= to_platform_device(dev
);
2774 struct d40_base
*base
= platform_get_drvdata(pdev
);
2776 if (!pm_runtime_suspended(dev
))
2779 if (base
->lcpa_regulator
)
2780 ret
= regulator_disable(base
->lcpa_regulator
);
2784 static int dma40_runtime_suspend(struct device
*dev
)
2786 struct platform_device
*pdev
= to_platform_device(dev
);
2787 struct d40_base
*base
= platform_get_drvdata(pdev
);
2789 d40_save_restore_registers(base
, true);
2791 /* Don't disable/enable clocks for v1 due to HW bugs */
2793 writel_relaxed(base
->gcc_pwr_off_mask
,
2794 base
->virtbase
+ D40_DREG_GCC
);
2799 static int dma40_runtime_resume(struct device
*dev
)
2801 struct platform_device
*pdev
= to_platform_device(dev
);
2802 struct d40_base
*base
= platform_get_drvdata(pdev
);
2804 if (base
->initialized
)
2805 d40_save_restore_registers(base
, false);
2807 writel_relaxed(D40_DREG_GCC_ENABLE_ALL
,
2808 base
->virtbase
+ D40_DREG_GCC
);
2812 static int dma40_resume(struct device
*dev
)
2814 struct platform_device
*pdev
= to_platform_device(dev
);
2815 struct d40_base
*base
= platform_get_drvdata(pdev
);
2818 if (base
->lcpa_regulator
)
2819 ret
= regulator_enable(base
->lcpa_regulator
);
2824 static const struct dev_pm_ops dma40_pm_ops
= {
2825 .suspend
= dma40_pm_suspend
,
2826 .runtime_suspend
= dma40_runtime_suspend
,
2827 .runtime_resume
= dma40_runtime_resume
,
2828 .resume
= dma40_resume
,
2830 #define DMA40_PM_OPS (&dma40_pm_ops)
2832 #define DMA40_PM_OPS NULL
2835 /* Initialization functions. */
2837 static int __init
d40_phy_res_init(struct d40_base
*base
)
2840 int num_phy_chans_avail
= 0;
2842 int odd_even_bit
= -2;
2843 int gcc
= D40_DREG_GCC_ENA
;
2845 val
[0] = readl(base
->virtbase
+ D40_DREG_PRSME
);
2846 val
[1] = readl(base
->virtbase
+ D40_DREG_PRSMO
);
2848 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2849 base
->phy_res
[i
].num
= i
;
2850 odd_even_bit
+= 2 * ((i
% 2) == 0);
2851 if (((val
[i
% 2] >> odd_even_bit
) & 3) == 1) {
2852 /* Mark security only channels as occupied */
2853 base
->phy_res
[i
].allocated_src
= D40_ALLOC_PHY
;
2854 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_PHY
;
2855 base
->phy_res
[i
].reserved
= true;
2856 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i
),
2858 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i
),
2863 base
->phy_res
[i
].allocated_src
= D40_ALLOC_FREE
;
2864 base
->phy_res
[i
].allocated_dst
= D40_ALLOC_FREE
;
2865 base
->phy_res
[i
].reserved
= false;
2866 num_phy_chans_avail
++;
2868 spin_lock_init(&base
->phy_res
[i
].lock
);
2871 /* Mark disabled channels as occupied */
2872 for (i
= 0; base
->plat_data
->disabled_channels
[i
] != -1; i
++) {
2873 int chan
= base
->plat_data
->disabled_channels
[i
];
2875 base
->phy_res
[chan
].allocated_src
= D40_ALLOC_PHY
;
2876 base
->phy_res
[chan
].allocated_dst
= D40_ALLOC_PHY
;
2877 base
->phy_res
[chan
].reserved
= true;
2878 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan
),
2880 gcc
|= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan
),
2882 num_phy_chans_avail
--;
2885 dev_info(base
->dev
, "%d of %d physical DMA channels available\n",
2886 num_phy_chans_avail
, base
->num_phy_chans
);
2888 /* Verify settings extended vs standard */
2889 val
[0] = readl(base
->virtbase
+ D40_DREG_PRTYP
);
2891 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
2893 if (base
->phy_res
[i
].allocated_src
== D40_ALLOC_FREE
&&
2894 (val
[0] & 0x3) != 1)
2896 "[%s] INFO: channel %d is misconfigured (%d)\n",
2897 __func__
, i
, val
[0] & 0x3);
2899 val
[0] = val
[0] >> 2;
2903 * To keep things simple, Enable all clocks initially.
2904 * The clocks will get managed later post channel allocation.
2905 * The clocks for the event lines on which reserved channels exists
2906 * are not managed here.
2908 writel(D40_DREG_GCC_ENABLE_ALL
, base
->virtbase
+ D40_DREG_GCC
);
2909 base
->gcc_pwr_off_mask
= gcc
;
2911 return num_phy_chans_avail
;
2914 static struct d40_base
* __init
d40_hw_detect_init(struct platform_device
*pdev
)
2916 struct stedma40_platform_data
*plat_data
;
2917 struct clk
*clk
= NULL
;
2918 void __iomem
*virtbase
= NULL
;
2919 struct resource
*res
= NULL
;
2920 struct d40_base
*base
= NULL
;
2921 int num_log_chans
= 0;
2923 int clk_ret
= -EINVAL
;
2929 clk
= clk_get(&pdev
->dev
, NULL
);
2931 d40_err(&pdev
->dev
, "No matching clock found\n");
2935 clk_ret
= clk_prepare_enable(clk
);
2937 d40_err(&pdev
->dev
, "Failed to prepare/enable clock\n");
2941 /* Get IO for DMAC base address */
2942 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "base");
2946 if (request_mem_region(res
->start
, resource_size(res
),
2947 D40_NAME
" I/O base") == NULL
)
2950 virtbase
= ioremap(res
->start
, resource_size(res
));
2954 /* This is just a regular AMBA PrimeCell ID actually */
2955 for (pid
= 0, i
= 0; i
< 4; i
++)
2956 pid
|= (readl(virtbase
+ resource_size(res
) - 0x20 + 4 * i
)
2958 for (cid
= 0, i
= 0; i
< 4; i
++)
2959 cid
|= (readl(virtbase
+ resource_size(res
) - 0x10 + 4 * i
)
2962 if (cid
!= AMBA_CID
) {
2963 d40_err(&pdev
->dev
, "Unknown hardware! No PrimeCell ID\n");
2966 if (AMBA_MANF_BITS(pid
) != AMBA_VENDOR_ST
) {
2967 d40_err(&pdev
->dev
, "Unknown designer! Got %x wanted %x\n",
2968 AMBA_MANF_BITS(pid
),
2974 * DB8500ed has revision 0
2976 * DB8500v1 has revision 2
2977 * DB8500v2 has revision 3
2979 rev
= AMBA_REV_BITS(pid
);
2981 /* The number of physical channels on this HW */
2982 num_phy_chans
= 4 * (readl(virtbase
+ D40_DREG_ICFG
) & 0x7) + 4;
2984 dev_info(&pdev
->dev
, "hardware revision: %d @ 0x%x\n",
2988 d40_err(&pdev
->dev
, "hardware revision: %d is not supported",
2993 plat_data
= pdev
->dev
.platform_data
;
2995 /* Count the number of logical channels in use */
2996 for (i
= 0; i
< plat_data
->dev_len
; i
++)
2997 if (plat_data
->dev_rx
[i
] != 0)
3000 for (i
= 0; i
< plat_data
->dev_len
; i
++)
3001 if (plat_data
->dev_tx
[i
] != 0)
3004 base
= kzalloc(ALIGN(sizeof(struct d40_base
), 4) +
3005 (num_phy_chans
+ num_log_chans
+ plat_data
->memcpy_len
) *
3006 sizeof(struct d40_chan
), GFP_KERNEL
);
3009 d40_err(&pdev
->dev
, "Out of memory\n");
3015 base
->num_phy_chans
= num_phy_chans
;
3016 base
->num_log_chans
= num_log_chans
;
3017 base
->phy_start
= res
->start
;
3018 base
->phy_size
= resource_size(res
);
3019 base
->virtbase
= virtbase
;
3020 base
->plat_data
= plat_data
;
3021 base
->dev
= &pdev
->dev
;
3022 base
->phy_chans
= ((void *)base
) + ALIGN(sizeof(struct d40_base
), 4);
3023 base
->log_chans
= &base
->phy_chans
[num_phy_chans
];
3025 base
->phy_res
= kzalloc(num_phy_chans
* sizeof(struct d40_phy_res
),
3030 base
->lookup_phy_chans
= kzalloc(num_phy_chans
*
3031 sizeof(struct d40_chan
*),
3033 if (!base
->lookup_phy_chans
)
3036 if (num_log_chans
+ plat_data
->memcpy_len
) {
3038 * The max number of logical channels are event lines for all
3039 * src devices and dst devices
3041 base
->lookup_log_chans
= kzalloc(plat_data
->dev_len
* 2 *
3042 sizeof(struct d40_chan
*),
3044 if (!base
->lookup_log_chans
)
3048 base
->reg_val_backup_chan
= kmalloc(base
->num_phy_chans
*
3049 sizeof(d40_backup_regs_chan
),
3051 if (!base
->reg_val_backup_chan
)
3054 base
->lcla_pool
.alloc_map
=
3055 kzalloc(num_phy_chans
* sizeof(struct d40_desc
*)
3056 * D40_LCLA_LINK_PER_EVENT_GRP
, GFP_KERNEL
);
3057 if (!base
->lcla_pool
.alloc_map
)
3060 base
->desc_slab
= kmem_cache_create(D40_NAME
, sizeof(struct d40_desc
),
3061 0, SLAB_HWCACHE_ALIGN
,
3063 if (base
->desc_slab
== NULL
)
3070 clk_disable_unprepare(clk
);
3076 release_mem_region(res
->start
,
3077 resource_size(res
));
3082 kfree(base
->lcla_pool
.alloc_map
);
3083 kfree(base
->reg_val_backup_chan
);
3084 kfree(base
->lookup_log_chans
);
3085 kfree(base
->lookup_phy_chans
);
3086 kfree(base
->phy_res
);
3093 static void __init
d40_hw_init(struct d40_base
*base
)
3096 static struct d40_reg_val dma_init_reg
[] = {
3097 /* Clock every part of the DMA block from start */
3098 { .reg
= D40_DREG_GCC
, .val
= D40_DREG_GCC_ENABLE_ALL
},
3100 /* Interrupts on all logical channels */
3101 { .reg
= D40_DREG_LCMIS0
, .val
= 0xFFFFFFFF},
3102 { .reg
= D40_DREG_LCMIS1
, .val
= 0xFFFFFFFF},
3103 { .reg
= D40_DREG_LCMIS2
, .val
= 0xFFFFFFFF},
3104 { .reg
= D40_DREG_LCMIS3
, .val
= 0xFFFFFFFF},
3105 { .reg
= D40_DREG_LCICR0
, .val
= 0xFFFFFFFF},
3106 { .reg
= D40_DREG_LCICR1
, .val
= 0xFFFFFFFF},
3107 { .reg
= D40_DREG_LCICR2
, .val
= 0xFFFFFFFF},
3108 { .reg
= D40_DREG_LCICR3
, .val
= 0xFFFFFFFF},
3109 { .reg
= D40_DREG_LCTIS0
, .val
= 0xFFFFFFFF},
3110 { .reg
= D40_DREG_LCTIS1
, .val
= 0xFFFFFFFF},
3111 { .reg
= D40_DREG_LCTIS2
, .val
= 0xFFFFFFFF},
3112 { .reg
= D40_DREG_LCTIS3
, .val
= 0xFFFFFFFF}
3115 u32 prmseo
[2] = {0, 0};
3116 u32 activeo
[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3120 for (i
= 0; i
< ARRAY_SIZE(dma_init_reg
); i
++)
3121 writel(dma_init_reg
[i
].val
,
3122 base
->virtbase
+ dma_init_reg
[i
].reg
);
3124 /* Configure all our dma channels to default settings */
3125 for (i
= 0; i
< base
->num_phy_chans
; i
++) {
3127 activeo
[i
% 2] = activeo
[i
% 2] << 2;
3129 if (base
->phy_res
[base
->num_phy_chans
- i
- 1].allocated_src
3131 activeo
[i
% 2] |= 3;
3135 /* Enable interrupt # */
3136 pcmis
= (pcmis
<< 1) | 1;
3138 /* Clear interrupt # */
3139 pcicr
= (pcicr
<< 1) | 1;
3141 /* Set channel to physical mode */
3142 prmseo
[i
% 2] = prmseo
[i
% 2] << 2;
3147 writel(prmseo
[1], base
->virtbase
+ D40_DREG_PRMSE
);
3148 writel(prmseo
[0], base
->virtbase
+ D40_DREG_PRMSO
);
3149 writel(activeo
[1], base
->virtbase
+ D40_DREG_ACTIVE
);
3150 writel(activeo
[0], base
->virtbase
+ D40_DREG_ACTIVO
);
3152 /* Write which interrupt to enable */
3153 writel(pcmis
, base
->virtbase
+ D40_DREG_PCMIS
);
3155 /* Write which interrupt to clear */
3156 writel(pcicr
, base
->virtbase
+ D40_DREG_PCICR
);
3160 static int __init
d40_lcla_allocate(struct d40_base
*base
)
3162 struct d40_lcla_pool
*pool
= &base
->lcla_pool
;
3163 unsigned long *page_list
;
3168 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3169 * To full fill this hardware requirement without wasting 256 kb
3170 * we allocate pages until we get an aligned one.
3172 page_list
= kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS
,
3180 /* Calculating how many pages that are required */
3181 base
->lcla_pool
.pages
= SZ_1K
* base
->num_phy_chans
/ PAGE_SIZE
;
3183 for (i
= 0; i
< MAX_LCLA_ALLOC_ATTEMPTS
; i
++) {
3184 page_list
[i
] = __get_free_pages(GFP_KERNEL
,
3185 base
->lcla_pool
.pages
);
3186 if (!page_list
[i
]) {
3188 d40_err(base
->dev
, "Failed to allocate %d pages.\n",
3189 base
->lcla_pool
.pages
);
3191 for (j
= 0; j
< i
; j
++)
3192 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
3196 if ((virt_to_phys((void *)page_list
[i
]) &
3197 (LCLA_ALIGNMENT
- 1)) == 0)
3201 for (j
= 0; j
< i
; j
++)
3202 free_pages(page_list
[j
], base
->lcla_pool
.pages
);
3204 if (i
< MAX_LCLA_ALLOC_ATTEMPTS
) {
3205 base
->lcla_pool
.base
= (void *)page_list
[i
];
3208 * After many attempts and no succees with finding the correct
3209 * alignment, try with allocating a big buffer.
3212 "[%s] Failed to get %d pages @ 18 bit align.\n",
3213 __func__
, base
->lcla_pool
.pages
);
3214 base
->lcla_pool
.base_unaligned
= kmalloc(SZ_1K
*
3215 base
->num_phy_chans
+
3218 if (!base
->lcla_pool
.base_unaligned
) {
3223 base
->lcla_pool
.base
= PTR_ALIGN(base
->lcla_pool
.base_unaligned
,
3227 pool
->dma_addr
= dma_map_single(base
->dev
, pool
->base
,
3228 SZ_1K
* base
->num_phy_chans
,
3230 if (dma_mapping_error(base
->dev
, pool
->dma_addr
)) {
3236 writel(virt_to_phys(base
->lcla_pool
.base
),
3237 base
->virtbase
+ D40_DREG_LCLA
);
3243 static int __init
d40_probe(struct platform_device
*pdev
)
3247 struct d40_base
*base
;
3248 struct resource
*res
= NULL
;
3249 int num_reserved_chans
;
3252 base
= d40_hw_detect_init(pdev
);
3257 num_reserved_chans
= d40_phy_res_init(base
);
3259 platform_set_drvdata(pdev
, base
);
3261 spin_lock_init(&base
->interrupt_lock
);
3262 spin_lock_init(&base
->execmd_lock
);
3264 /* Get IO for logical channel parameter address */
3265 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "lcpa");
3268 d40_err(&pdev
->dev
, "No \"lcpa\" memory resource\n");
3271 base
->lcpa_size
= resource_size(res
);
3272 base
->phy_lcpa
= res
->start
;
3274 if (request_mem_region(res
->start
, resource_size(res
),
3275 D40_NAME
" I/O lcpa") == NULL
) {
3278 "Failed to request LCPA region 0x%x-0x%x\n",
3279 res
->start
, res
->end
);
3283 /* We make use of ESRAM memory for this. */
3284 val
= readl(base
->virtbase
+ D40_DREG_LCPA
);
3285 if (res
->start
!= val
&& val
!= 0) {
3286 dev_warn(&pdev
->dev
,
3287 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3288 __func__
, val
, res
->start
);
3290 writel(res
->start
, base
->virtbase
+ D40_DREG_LCPA
);
3292 base
->lcpa_base
= ioremap(res
->start
, resource_size(res
));
3293 if (!base
->lcpa_base
) {
3295 d40_err(&pdev
->dev
, "Failed to ioremap LCPA region\n");
3298 /* If lcla has to be located in ESRAM we don't need to allocate */
3299 if (base
->plat_data
->use_esram_lcla
) {
3300 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
3305 "No \"lcla_esram\" memory resource\n");
3308 base
->lcla_pool
.base
= ioremap(res
->start
,
3309 resource_size(res
));
3310 if (!base
->lcla_pool
.base
) {
3312 d40_err(&pdev
->dev
, "Failed to ioremap LCLA region\n");
3315 writel(res
->start
, base
->virtbase
+ D40_DREG_LCLA
);
3318 ret
= d40_lcla_allocate(base
);
3320 d40_err(&pdev
->dev
, "Failed to allocate LCLA area\n");
3325 spin_lock_init(&base
->lcla_pool
.lock
);
3327 base
->irq
= platform_get_irq(pdev
, 0);
3329 ret
= request_irq(base
->irq
, d40_handle_interrupt
, 0, D40_NAME
, base
);
3331 d40_err(&pdev
->dev
, "No IRQ defined\n");
3335 pm_runtime_irq_safe(base
->dev
);
3336 pm_runtime_set_autosuspend_delay(base
->dev
, DMA40_AUTOSUSPEND_DELAY
);
3337 pm_runtime_use_autosuspend(base
->dev
);
3338 pm_runtime_enable(base
->dev
);
3339 pm_runtime_resume(base
->dev
);
3341 if (base
->plat_data
->use_esram_lcla
) {
3343 base
->lcpa_regulator
= regulator_get(base
->dev
, "lcla_esram");
3344 if (IS_ERR(base
->lcpa_regulator
)) {
3345 d40_err(&pdev
->dev
, "Failed to get lcpa_regulator\n");
3346 base
->lcpa_regulator
= NULL
;
3350 ret
= regulator_enable(base
->lcpa_regulator
);
3353 "Failed to enable lcpa_regulator\n");
3354 regulator_put(base
->lcpa_regulator
);
3355 base
->lcpa_regulator
= NULL
;
3360 base
->initialized
= true;
3361 err
= d40_dmaengine_init(base
, num_reserved_chans
);
3367 dev_info(base
->dev
, "initialized\n");
3372 if (base
->desc_slab
)
3373 kmem_cache_destroy(base
->desc_slab
);
3375 iounmap(base
->virtbase
);
3377 if (base
->lcla_pool
.base
&& base
->plat_data
->use_esram_lcla
) {
3378 iounmap(base
->lcla_pool
.base
);
3379 base
->lcla_pool
.base
= NULL
;
3382 if (base
->lcla_pool
.dma_addr
)
3383 dma_unmap_single(base
->dev
, base
->lcla_pool
.dma_addr
,
3384 SZ_1K
* base
->num_phy_chans
,
3387 if (!base
->lcla_pool
.base_unaligned
&& base
->lcla_pool
.base
)
3388 free_pages((unsigned long)base
->lcla_pool
.base
,
3389 base
->lcla_pool
.pages
);
3391 kfree(base
->lcla_pool
.base_unaligned
);
3394 release_mem_region(base
->phy_lcpa
,
3396 if (base
->phy_start
)
3397 release_mem_region(base
->phy_start
,
3400 clk_disable(base
->clk
);
3404 if (base
->lcpa_regulator
) {
3405 regulator_disable(base
->lcpa_regulator
);
3406 regulator_put(base
->lcpa_regulator
);
3409 kfree(base
->lcla_pool
.alloc_map
);
3410 kfree(base
->lookup_log_chans
);
3411 kfree(base
->lookup_phy_chans
);
3412 kfree(base
->phy_res
);
3416 d40_err(&pdev
->dev
, "probe failed\n");
3420 static struct platform_driver d40_driver
= {
3422 .owner
= THIS_MODULE
,
3428 static int __init
stedma40_init(void)
3430 return platform_driver_probe(&d40_driver
, d40_probe
);
3432 subsys_initcall(stedma40_init
);