2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/sh_dma.h>
33 /* DMA descriptor control */
34 enum sh_dmae_desc_status
{
38 DESC_COMPLETED
, /* completed, have to call callback */
39 DESC_WAITING
, /* callback called, waiting for ack / re-submit */
42 #define NR_DESCS_PER_CHANNEL 32
43 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
44 #define LOG2_DEFAULT_XFER_SIZE 2
46 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
47 static unsigned long sh_dmae_slave_used
[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER
)];
49 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
);
51 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
53 __raw_writel(data
, sh_dc
->base
+ reg
/ sizeof(u32
));
56 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
58 return __raw_readl(sh_dc
->base
+ reg
/ sizeof(u32
));
61 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
63 return __raw_readw(shdev
->chan_reg
+ DMAOR
/ sizeof(u32
));
66 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
68 __raw_writew(data
, shdev
->chan_reg
+ DMAOR
/ sizeof(u32
));
72 * Reset DMA controller
74 * SH7780 has two DMAOR register
76 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
78 unsigned short dmaor
= dmaor_read(shdev
);
80 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
83 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
87 sh_dmae_ctl_stop(shdev
);
88 dmaor
= dmaor_read(shdev
) | shdev
->pdata
->dmaor_init
;
90 dmaor_write(shdev
, dmaor
);
91 if (dmaor_read(shdev
) & (DMAOR_AE
| DMAOR_NMIF
)) {
92 pr_warning("dma-sh: Can't initialize DMAOR.\n");
98 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
100 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
102 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
103 return true; /* working */
105 return false; /* waiting */
108 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
110 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
111 struct sh_dmae_device
, common
);
112 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
113 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
114 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
116 if (cnt
>= pdata
->ts_shift_num
)
119 return pdata
->ts_shift
[cnt
];
122 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
124 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
125 struct sh_dmae_device
, common
);
126 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
129 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
130 if (pdata
->ts_shift
[i
] == l2size
)
133 if (i
== pdata
->ts_shift_num
)
136 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
137 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
140 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
142 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
143 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
144 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
147 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
149 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
151 chcr
|= CHCR_DE
| CHCR_IE
;
152 sh_dmae_writel(sh_chan
, chcr
& ~CHCR_TE
, CHCR
);
155 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
157 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
159 chcr
&= ~(CHCR_DE
| CHCR_TE
| CHCR_IE
);
160 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
163 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
166 * Default configuration for dual address memory-memory transfer.
167 * 0x400 represents auto-request.
169 u32 chcr
= DM_INC
| SM_INC
| 0x400 | log2size_to_chcr(sh_chan
,
170 LOG2_DEFAULT_XFER_SIZE
);
171 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
172 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
175 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
177 /* When DMA was working, can not set data to CHCR */
178 if (dmae_is_busy(sh_chan
))
181 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
182 sh_dmae_writel(sh_chan
, val
, CHCR
);
187 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
189 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
190 struct sh_dmae_device
, common
);
191 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
192 const struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->id
];
193 u16 __iomem
*addr
= shdev
->dmars
+ chan_pdata
->dmars
/ sizeof(u16
);
194 int shift
= chan_pdata
->dmars_bit
;
196 if (dmae_is_busy(sh_chan
))
199 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
205 static dma_cookie_t
sh_dmae_tx_submit(struct dma_async_tx_descriptor
*tx
)
207 struct sh_desc
*desc
= tx_to_sh_desc(tx
), *chunk
, *last
= desc
, *c
;
208 struct sh_dmae_chan
*sh_chan
= to_sh_chan(tx
->chan
);
209 dma_async_tx_callback callback
= tx
->callback
;
212 spin_lock_bh(&sh_chan
->desc_lock
);
214 cookie
= sh_chan
->common
.cookie
;
219 sh_chan
->common
.cookie
= cookie
;
222 /* Mark all chunks of this descriptor as submitted, move to the queue */
223 list_for_each_entry_safe(chunk
, c
, desc
->node
.prev
, node
) {
225 * All chunks are on the global ld_free, so, we have to find
226 * the end of the chain ourselves
228 if (chunk
!= desc
&& (chunk
->mark
== DESC_IDLE
||
229 chunk
->async_tx
.cookie
> 0 ||
230 chunk
->async_tx
.cookie
== -EBUSY
||
231 &chunk
->node
== &sh_chan
->ld_free
))
233 chunk
->mark
= DESC_SUBMITTED
;
234 /* Callback goes to the last chunk */
235 chunk
->async_tx
.callback
= NULL
;
236 chunk
->cookie
= cookie
;
237 list_move_tail(&chunk
->node
, &sh_chan
->ld_queue
);
241 last
->async_tx
.callback
= callback
;
242 last
->async_tx
.callback_param
= tx
->callback_param
;
244 dev_dbg(sh_chan
->dev
, "submit #%d@%p on %d: %x[%d] -> %x\n",
245 tx
->cookie
, &last
->async_tx
, sh_chan
->id
,
246 desc
->hw
.sar
, desc
->hw
.tcr
, desc
->hw
.dar
);
248 spin_unlock_bh(&sh_chan
->desc_lock
);
253 /* Called with desc_lock held */
254 static struct sh_desc
*sh_dmae_get_desc(struct sh_dmae_chan
*sh_chan
)
256 struct sh_desc
*desc
;
258 list_for_each_entry(desc
, &sh_chan
->ld_free
, node
)
259 if (desc
->mark
!= DESC_PREPARED
) {
260 BUG_ON(desc
->mark
!= DESC_IDLE
);
261 list_del(&desc
->node
);
268 static const struct sh_dmae_slave_config
*sh_dmae_find_slave(
269 struct sh_dmae_chan
*sh_chan
, struct sh_dmae_slave
*param
)
271 struct dma_device
*dma_dev
= sh_chan
->common
.device
;
272 struct sh_dmae_device
*shdev
= container_of(dma_dev
,
273 struct sh_dmae_device
, common
);
274 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
277 if (param
->slave_id
>= SH_DMA_SLAVE_NUMBER
)
280 for (i
= 0; i
< pdata
->slave_num
; i
++)
281 if (pdata
->slave
[i
].slave_id
== param
->slave_id
)
282 return pdata
->slave
+ i
;
287 static int sh_dmae_alloc_chan_resources(struct dma_chan
*chan
)
289 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
290 struct sh_desc
*desc
;
291 struct sh_dmae_slave
*param
= chan
->private;
294 pm_runtime_get_sync(sh_chan
->dev
);
297 * This relies on the guarantee from dmaengine that alloc_chan_resources
298 * never runs concurrently with itself or free_chan_resources.
301 const struct sh_dmae_slave_config
*cfg
;
303 cfg
= sh_dmae_find_slave(sh_chan
, param
);
309 if (test_and_set_bit(param
->slave_id
, sh_dmae_slave_used
)) {
316 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
317 dmae_set_chcr(sh_chan
, cfg
->chcr
);
318 } else if ((sh_dmae_readl(sh_chan
, CHCR
) & 0xf00) != 0x400) {
322 spin_lock_bh(&sh_chan
->desc_lock
);
323 while (sh_chan
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
324 spin_unlock_bh(&sh_chan
->desc_lock
);
325 desc
= kzalloc(sizeof(struct sh_desc
), GFP_KERNEL
);
327 spin_lock_bh(&sh_chan
->desc_lock
);
330 dma_async_tx_descriptor_init(&desc
->async_tx
,
332 desc
->async_tx
.tx_submit
= sh_dmae_tx_submit
;
333 desc
->mark
= DESC_IDLE
;
335 spin_lock_bh(&sh_chan
->desc_lock
);
336 list_add(&desc
->node
, &sh_chan
->ld_free
);
337 sh_chan
->descs_allocated
++;
339 spin_unlock_bh(&sh_chan
->desc_lock
);
341 if (!sh_chan
->descs_allocated
) {
346 return sh_chan
->descs_allocated
;
350 clear_bit(param
->slave_id
, sh_dmae_slave_used
);
353 pm_runtime_put(sh_chan
->dev
);
358 * sh_dma_free_chan_resources - Free all resources of the channel.
360 static void sh_dmae_free_chan_resources(struct dma_chan
*chan
)
362 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
363 struct sh_desc
*desc
, *_desc
;
365 int descs
= sh_chan
->descs_allocated
;
369 /* Prepared and not submitted descriptors can still be on the queue */
370 if (!list_empty(&sh_chan
->ld_queue
))
371 sh_dmae_chan_ld_cleanup(sh_chan
, true);
374 /* The caller is holding dma_list_mutex */
375 struct sh_dmae_slave
*param
= chan
->private;
376 clear_bit(param
->slave_id
, sh_dmae_slave_used
);
379 spin_lock_bh(&sh_chan
->desc_lock
);
381 list_splice_init(&sh_chan
->ld_free
, &list
);
382 sh_chan
->descs_allocated
= 0;
384 spin_unlock_bh(&sh_chan
->desc_lock
);
387 pm_runtime_put(sh_chan
->dev
);
389 list_for_each_entry_safe(desc
, _desc
, &list
, node
)
394 * sh_dmae_add_desc - get, set up and return one transfer descriptor
395 * @sh_chan: DMA channel
396 * @flags: DMA transfer flags
397 * @dest: destination DMA address, incremented when direction equals
398 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
399 * @src: source DMA address, incremented when direction equals
400 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
401 * @len: DMA transfer length
402 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
403 * @direction: needed for slave DMA to decide which address to keep constant,
404 * equals DMA_BIDIRECTIONAL for MEMCPY
405 * Returns 0 or an error
406 * Locks: called with desc_lock held
408 static struct sh_desc
*sh_dmae_add_desc(struct sh_dmae_chan
*sh_chan
,
409 unsigned long flags
, dma_addr_t
*dest
, dma_addr_t
*src
, size_t *len
,
410 struct sh_desc
**first
, enum dma_data_direction direction
)
418 /* Allocate the link descriptor from the free list */
419 new = sh_dmae_get_desc(sh_chan
);
421 dev_err(sh_chan
->dev
, "No free link descriptor available\n");
425 copy_size
= min(*len
, (size_t)SH_DMA_TCR_MAX
+ 1);
429 new->hw
.tcr
= copy_size
;
433 new->async_tx
.cookie
= -EBUSY
;
436 /* Other desc - invisible to the user */
437 new->async_tx
.cookie
= -EINVAL
;
440 dev_dbg(sh_chan
->dev
,
441 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
442 copy_size
, *len
, *src
, *dest
, &new->async_tx
,
443 new->async_tx
.cookie
, sh_chan
->xmit_shift
);
445 new->mark
= DESC_PREPARED
;
446 new->async_tx
.flags
= flags
;
447 new->direction
= direction
;
450 if (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
)
452 if (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_FROM_DEVICE
)
459 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
461 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
462 * converted to scatter-gather to guarantee consistent locking and a correct
463 * list manipulation. For slave DMA direction carries the usual meaning, and,
464 * logically, the SG list is RAM and the addr variable contains slave address,
465 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
466 * and the SG list contains only one element and points at the source buffer.
468 static struct dma_async_tx_descriptor
*sh_dmae_prep_sg(struct sh_dmae_chan
*sh_chan
,
469 struct scatterlist
*sgl
, unsigned int sg_len
, dma_addr_t
*addr
,
470 enum dma_data_direction direction
, unsigned long flags
)
472 struct scatterlist
*sg
;
473 struct sh_desc
*first
= NULL
, *new = NULL
/* compiler... */;
481 for_each_sg(sgl
, sg
, sg_len
, i
)
482 chunks
+= (sg_dma_len(sg
) + SH_DMA_TCR_MAX
) /
483 (SH_DMA_TCR_MAX
+ 1);
485 /* Have to lock the whole loop to protect against concurrent release */
486 spin_lock_bh(&sh_chan
->desc_lock
);
490 * first descriptor is what user is dealing with in all API calls, its
491 * cookie is at first set to -EBUSY, at tx-submit to a positive
493 * if more than one chunk is needed further chunks have cookie = -EINVAL
494 * the last chunk, if not equal to the first, has cookie = -ENOSPC
495 * all chunks are linked onto the tx_list head with their .node heads
496 * only during this function, then they are immediately spliced
497 * back onto the free list in form of a chain
499 for_each_sg(sgl
, sg
, sg_len
, i
) {
500 dma_addr_t sg_addr
= sg_dma_address(sg
);
501 size_t len
= sg_dma_len(sg
);
507 dev_dbg(sh_chan
->dev
, "Add SG #%d@%p[%d], dma %llx\n",
508 i
, sg
, len
, (unsigned long long)sg_addr
);
510 if (direction
== DMA_FROM_DEVICE
)
511 new = sh_dmae_add_desc(sh_chan
, flags
,
512 &sg_addr
, addr
, &len
, &first
,
515 new = sh_dmae_add_desc(sh_chan
, flags
,
516 addr
, &sg_addr
, &len
, &first
,
521 new->chunks
= chunks
--;
522 list_add_tail(&new->node
, &tx_list
);
527 new->async_tx
.cookie
= -ENOSPC
;
529 /* Put them back on the free list, so, they don't get lost */
530 list_splice_tail(&tx_list
, &sh_chan
->ld_free
);
532 spin_unlock_bh(&sh_chan
->desc_lock
);
534 return &first
->async_tx
;
537 list_for_each_entry(new, &tx_list
, node
)
538 new->mark
= DESC_IDLE
;
539 list_splice(&tx_list
, &sh_chan
->ld_free
);
541 spin_unlock_bh(&sh_chan
->desc_lock
);
546 static struct dma_async_tx_descriptor
*sh_dmae_prep_memcpy(
547 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
548 size_t len
, unsigned long flags
)
550 struct sh_dmae_chan
*sh_chan
;
551 struct scatterlist sg
;
556 chan
->private = NULL
;
558 sh_chan
= to_sh_chan(chan
);
560 sg_init_table(&sg
, 1);
561 sg_set_page(&sg
, pfn_to_page(PFN_DOWN(dma_src
)), len
,
562 offset_in_page(dma_src
));
563 sg_dma_address(&sg
) = dma_src
;
564 sg_dma_len(&sg
) = len
;
566 return sh_dmae_prep_sg(sh_chan
, &sg
, 1, &dma_dest
, DMA_BIDIRECTIONAL
,
570 static struct dma_async_tx_descriptor
*sh_dmae_prep_slave_sg(
571 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned int sg_len
,
572 enum dma_data_direction direction
, unsigned long flags
)
574 struct sh_dmae_slave
*param
;
575 struct sh_dmae_chan
*sh_chan
;
576 dma_addr_t slave_addr
;
581 sh_chan
= to_sh_chan(chan
);
582 param
= chan
->private;
584 /* Someone calling slave DMA on a public channel? */
585 if (!param
|| !sg_len
) {
586 dev_warn(sh_chan
->dev
, "%s: bad parameter: %p, %d, %d\n",
587 __func__
, param
, sg_len
, param
? param
->slave_id
: -1);
591 slave_addr
= param
->config
->addr
;
594 * if (param != NULL), this is a successfully requested slave channel,
595 * therefore param->config != NULL too.
597 return sh_dmae_prep_sg(sh_chan
, sgl
, sg_len
, &slave_addr
,
601 static int sh_dmae_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
604 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
606 /* Only supports DMA_TERMINATE_ALL */
607 if (cmd
!= DMA_TERMINATE_ALL
)
615 spin_lock_bh(&sh_chan
->desc_lock
);
616 if (!list_empty(&sh_chan
->ld_queue
)) {
617 /* Record partial transfer */
618 struct sh_desc
*desc
= list_entry(sh_chan
->ld_queue
.next
,
619 struct sh_desc
, node
);
620 desc
->partial
= (desc
->hw
.tcr
- sh_dmae_readl(sh_chan
, TCR
)) <<
624 spin_unlock_bh(&sh_chan
->desc_lock
);
626 sh_dmae_chan_ld_cleanup(sh_chan
, true);
631 static dma_async_tx_callback
__ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
)
633 struct sh_desc
*desc
, *_desc
;
634 /* Is the "exposed" head of a chain acked? */
635 bool head_acked
= false;
636 dma_cookie_t cookie
= 0;
637 dma_async_tx_callback callback
= NULL
;
640 spin_lock_bh(&sh_chan
->desc_lock
);
641 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_queue
, node
) {
642 struct dma_async_tx_descriptor
*tx
= &desc
->async_tx
;
644 BUG_ON(tx
->cookie
> 0 && tx
->cookie
!= desc
->cookie
);
645 BUG_ON(desc
->mark
!= DESC_SUBMITTED
&&
646 desc
->mark
!= DESC_COMPLETED
&&
647 desc
->mark
!= DESC_WAITING
);
650 * queue is ordered, and we use this loop to (1) clean up all
651 * completed descriptors, and to (2) update descriptor flags of
652 * any chunks in a (partially) completed chain
654 if (!all
&& desc
->mark
== DESC_SUBMITTED
&&
655 desc
->cookie
!= cookie
)
661 if (desc
->mark
== DESC_COMPLETED
&& desc
->chunks
== 1) {
662 if (sh_chan
->completed_cookie
!= desc
->cookie
- 1)
663 dev_dbg(sh_chan
->dev
,
664 "Completing cookie %d, expected %d\n",
666 sh_chan
->completed_cookie
+ 1);
667 sh_chan
->completed_cookie
= desc
->cookie
;
670 /* Call callback on the last chunk */
671 if (desc
->mark
== DESC_COMPLETED
&& tx
->callback
) {
672 desc
->mark
= DESC_WAITING
;
673 callback
= tx
->callback
;
674 param
= tx
->callback_param
;
675 dev_dbg(sh_chan
->dev
, "descriptor #%d@%p on %d callback\n",
676 tx
->cookie
, tx
, sh_chan
->id
);
677 BUG_ON(desc
->chunks
!= 1);
681 if (tx
->cookie
> 0 || tx
->cookie
== -EBUSY
) {
682 if (desc
->mark
== DESC_COMPLETED
) {
683 BUG_ON(tx
->cookie
< 0);
684 desc
->mark
= DESC_WAITING
;
686 head_acked
= async_tx_test_ack(tx
);
688 switch (desc
->mark
) {
690 desc
->mark
= DESC_WAITING
;
694 async_tx_ack(&desc
->async_tx
);
698 dev_dbg(sh_chan
->dev
, "descriptor %p #%d completed.\n",
701 if (((desc
->mark
== DESC_COMPLETED
||
702 desc
->mark
== DESC_WAITING
) &&
703 async_tx_test_ack(&desc
->async_tx
)) || all
) {
704 /* Remove from ld_queue list */
705 desc
->mark
= DESC_IDLE
;
706 list_move(&desc
->node
, &sh_chan
->ld_free
);
709 spin_unlock_bh(&sh_chan
->desc_lock
);
718 * sh_chan_ld_cleanup - Clean up link descriptors
720 * This function cleans up the ld_queue of DMA channel.
722 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
)
724 while (__ld_cleanup(sh_chan
, all
))
728 /* Terminating - forgive uncompleted cookies */
729 sh_chan
->completed_cookie
= sh_chan
->common
.cookie
;
732 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan
*sh_chan
)
734 struct sh_desc
*desc
;
736 spin_lock_bh(&sh_chan
->desc_lock
);
738 if (dmae_is_busy(sh_chan
)) {
739 spin_unlock_bh(&sh_chan
->desc_lock
);
743 /* Find the first not transferred desciptor */
744 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
)
745 if (desc
->mark
== DESC_SUBMITTED
) {
746 dev_dbg(sh_chan
->dev
, "Queue #%d to %d: %u@%x -> %x\n",
747 desc
->async_tx
.cookie
, sh_chan
->id
,
748 desc
->hw
.tcr
, desc
->hw
.sar
, desc
->hw
.dar
);
749 /* Get the ld start address from ld_queue */
750 dmae_set_reg(sh_chan
, &desc
->hw
);
755 spin_unlock_bh(&sh_chan
->desc_lock
);
758 static void sh_dmae_memcpy_issue_pending(struct dma_chan
*chan
)
760 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
761 sh_chan_xfer_ld_queue(sh_chan
);
764 static enum dma_status
sh_dmae_tx_status(struct dma_chan
*chan
,
766 struct dma_tx_state
*txstate
)
768 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
769 dma_cookie_t last_used
;
770 dma_cookie_t last_complete
;
771 enum dma_status status
;
773 sh_dmae_chan_ld_cleanup(sh_chan
, false);
775 last_used
= chan
->cookie
;
776 last_complete
= sh_chan
->completed_cookie
;
777 BUG_ON(last_complete
< 0);
778 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
780 spin_lock_bh(&sh_chan
->desc_lock
);
782 status
= dma_async_is_complete(cookie
, last_complete
, last_used
);
785 * If we don't find cookie on the queue, it has been aborted and we have
788 if (status
!= DMA_SUCCESS
) {
789 struct sh_desc
*desc
;
791 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
)
792 if (desc
->cookie
== cookie
) {
793 status
= DMA_IN_PROGRESS
;
798 spin_unlock_bh(&sh_chan
->desc_lock
);
803 static irqreturn_t
sh_dmae_interrupt(int irq
, void *data
)
805 irqreturn_t ret
= IRQ_NONE
;
806 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
807 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
809 if (chcr
& CHCR_TE
) {
814 tasklet_schedule(&sh_chan
->tasklet
);
820 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
821 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
823 struct sh_dmae_device
*shdev
= (struct sh_dmae_device
*)data
;
826 /* halt the dma controller */
827 sh_dmae_ctl_stop(shdev
);
829 /* We cannot detect, which channel caused the error, have to reset all */
830 for (i
= 0; i
< SH_DMAC_MAX_CHANNELS
; i
++) {
831 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
833 struct sh_desc
*desc
;
834 /* Stop the channel */
837 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
) {
838 struct dma_async_tx_descriptor
*tx
= &desc
->async_tx
;
839 desc
->mark
= DESC_IDLE
;
841 tx
->callback(tx
->callback_param
);
843 list_splice_init(&sh_chan
->ld_queue
, &sh_chan
->ld_free
);
852 static void dmae_do_tasklet(unsigned long data
)
854 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
855 struct sh_desc
*desc
;
856 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
857 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
859 spin_lock(&sh_chan
->desc_lock
);
860 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
) {
861 if (desc
->mark
== DESC_SUBMITTED
&&
862 ((desc
->direction
== DMA_FROM_DEVICE
&&
863 (desc
->hw
.dar
+ desc
->hw
.tcr
) == dar_buf
) ||
864 (desc
->hw
.sar
+ desc
->hw
.tcr
) == sar_buf
)) {
865 dev_dbg(sh_chan
->dev
, "done #%d@%p dst %u\n",
866 desc
->async_tx
.cookie
, &desc
->async_tx
,
868 desc
->mark
= DESC_COMPLETED
;
872 spin_unlock(&sh_chan
->desc_lock
);
875 sh_chan_xfer_ld_queue(sh_chan
);
876 sh_dmae_chan_ld_cleanup(sh_chan
, false);
879 static int __devinit
sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
880 int irq
, unsigned long flags
)
883 const struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
884 struct platform_device
*pdev
= to_platform_device(shdev
->common
.dev
);
885 struct sh_dmae_chan
*new_sh_chan
;
888 new_sh_chan
= kzalloc(sizeof(struct sh_dmae_chan
), GFP_KERNEL
);
890 dev_err(shdev
->common
.dev
,
891 "No free memory for allocating dma channels!\n");
895 /* copy struct dma_device */
896 new_sh_chan
->common
.device
= &shdev
->common
;
898 new_sh_chan
->dev
= shdev
->common
.dev
;
899 new_sh_chan
->id
= id
;
900 new_sh_chan
->irq
= irq
;
901 new_sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
/ sizeof(u32
);
903 /* Init DMA tasklet */
904 tasklet_init(&new_sh_chan
->tasklet
, dmae_do_tasklet
,
905 (unsigned long)new_sh_chan
);
907 /* Init the channel */
908 dmae_init(new_sh_chan
);
910 spin_lock_init(&new_sh_chan
->desc_lock
);
912 /* Init descripter manage list */
913 INIT_LIST_HEAD(&new_sh_chan
->ld_queue
);
914 INIT_LIST_HEAD(&new_sh_chan
->ld_free
);
916 /* Add the channel to DMA device channel list */
917 list_add_tail(&new_sh_chan
->common
.device_node
,
918 &shdev
->common
.channels
);
919 shdev
->common
.chancnt
++;
922 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
923 "sh-dmae%d.%d", pdev
->id
, new_sh_chan
->id
);
925 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
926 "sh-dma%d", new_sh_chan
->id
);
928 /* set up channel irq */
929 err
= request_irq(irq
, &sh_dmae_interrupt
, flags
,
930 new_sh_chan
->dev_id
, new_sh_chan
);
932 dev_err(shdev
->common
.dev
, "DMA channel %d request_irq error "
933 "with return %d\n", id
, err
);
937 shdev
->chan
[id
] = new_sh_chan
;
941 /* remove from dmaengine device node */
942 list_del(&new_sh_chan
->common
.device_node
);
947 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
951 for (i
= shdev
->common
.chancnt
- 1 ; i
>= 0 ; i
--) {
952 if (shdev
->chan
[i
]) {
953 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
955 free_irq(sh_chan
->irq
, sh_chan
);
957 list_del(&sh_chan
->common
.device_node
);
959 shdev
->chan
[i
] = NULL
;
962 shdev
->common
.chancnt
= 0;
965 static int __init
sh_dmae_probe(struct platform_device
*pdev
)
967 struct sh_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
968 unsigned long irqflags
= IRQF_DISABLED
,
969 chan_flag
[SH_DMAC_MAX_CHANNELS
] = {};
970 int errirq
, chan_irq
[SH_DMAC_MAX_CHANNELS
];
971 int err
, i
, irq_cnt
= 0, irqres
= 0;
972 struct sh_dmae_device
*shdev
;
973 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
975 /* get platform data */
976 if (!pdata
|| !pdata
->channel_num
)
979 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
980 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
981 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
984 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
985 * the error IRQ, in which case it is the only IRQ in this resource:
986 * start == end. If it is the only IRQ resource, all channels also
988 * 2. DMA channel IRQ resources can be specified one per resource or in
989 * ranges (start != end)
990 * 3. iff all events (channels and, optionally, error) on this
991 * controller use the same IRQ, only one IRQ resource can be
992 * specified, otherwise there must be one IRQ per channel, even if
993 * some of them are equal
994 * 4. if all IRQs on this controller are equal or if some specific IRQs
995 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
996 * requested with the IRQF_SHARED flag
998 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
999 if (!chan
|| !errirq_res
)
1002 if (!request_mem_region(chan
->start
, resource_size(chan
), pdev
->name
)) {
1003 dev_err(&pdev
->dev
, "DMAC register region already claimed\n");
1007 if (dmars
&& !request_mem_region(dmars
->start
, resource_size(dmars
), pdev
->name
)) {
1008 dev_err(&pdev
->dev
, "DMAC DMARS region already claimed\n");
1014 shdev
= kzalloc(sizeof(struct sh_dmae_device
), GFP_KERNEL
);
1016 dev_err(&pdev
->dev
, "Not enough memory\n");
1020 shdev
->chan_reg
= ioremap(chan
->start
, resource_size(chan
));
1021 if (!shdev
->chan_reg
)
1024 shdev
->dmars
= ioremap(dmars
->start
, resource_size(dmars
));
1030 shdev
->pdata
= pdata
;
1032 pm_runtime_enable(&pdev
->dev
);
1033 pm_runtime_get_sync(&pdev
->dev
);
1035 /* reset dma controller */
1036 err
= sh_dmae_rst(shdev
);
1040 INIT_LIST_HEAD(&shdev
->common
.channels
);
1042 dma_cap_set(DMA_MEMCPY
, shdev
->common
.cap_mask
);
1044 dma_cap_set(DMA_SLAVE
, shdev
->common
.cap_mask
);
1046 shdev
->common
.device_alloc_chan_resources
1047 = sh_dmae_alloc_chan_resources
;
1048 shdev
->common
.device_free_chan_resources
= sh_dmae_free_chan_resources
;
1049 shdev
->common
.device_prep_dma_memcpy
= sh_dmae_prep_memcpy
;
1050 shdev
->common
.device_tx_status
= sh_dmae_tx_status
;
1051 shdev
->common
.device_issue_pending
= sh_dmae_memcpy_issue_pending
;
1053 /* Compulsory for DMA_SLAVE fields */
1054 shdev
->common
.device_prep_slave_sg
= sh_dmae_prep_slave_sg
;
1055 shdev
->common
.device_control
= sh_dmae_control
;
1057 shdev
->common
.dev
= &pdev
->dev
;
1058 /* Default transfer size of 32 bytes requires 32-byte alignment */
1059 shdev
->common
.copy_align
= LOG2_DEFAULT_XFER_SIZE
;
1061 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1062 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
1065 chanirq_res
= errirq_res
;
1069 if (chanirq_res
== errirq_res
||
1070 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
1071 irqflags
= IRQF_SHARED
;
1073 errirq
= errirq_res
->start
;
1075 err
= request_irq(errirq
, sh_dmae_err
, irqflags
,
1076 "DMAC Address Error", shdev
);
1079 "DMA failed requesting irq #%d, error %d\n",
1085 chanirq_res
= errirq_res
;
1086 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
1088 if (chanirq_res
->start
== chanirq_res
->end
&&
1089 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
1090 /* Special case - all multiplexed */
1091 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
1092 chan_irq
[irq_cnt
] = chanirq_res
->start
;
1093 chan_flag
[irq_cnt
] = IRQF_SHARED
;
1097 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
1098 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
1099 IORESOURCE_IRQ_SHAREABLE
)
1100 chan_flag
[irq_cnt
] = IRQF_SHARED
;
1102 chan_flag
[irq_cnt
] = IRQF_DISABLED
;
1104 "Found IRQ %d for channel %d\n",
1106 chan_irq
[irq_cnt
++] = i
;
1108 chanirq_res
= platform_get_resource(pdev
,
1109 IORESOURCE_IRQ
, ++irqres
);
1110 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
1113 if (irq_cnt
< pdata
->channel_num
)
1116 /* Create DMA Channel */
1117 for (i
= 0; i
< pdata
->channel_num
; i
++) {
1118 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
1120 goto chan_probe_err
;
1123 pm_runtime_put(&pdev
->dev
);
1125 platform_set_drvdata(pdev
, shdev
);
1126 dma_async_device_register(&shdev
->common
);
1131 sh_dmae_chan_remove(shdev
);
1133 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1134 free_irq(errirq
, shdev
);
1138 pm_runtime_put(&pdev
->dev
);
1140 iounmap(shdev
->dmars
);
1142 iounmap(shdev
->chan_reg
);
1147 release_mem_region(dmars
->start
, resource_size(dmars
));
1149 release_mem_region(chan
->start
, resource_size(chan
));
1154 static int __exit
sh_dmae_remove(struct platform_device
*pdev
)
1156 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
1157 struct resource
*res
;
1158 int errirq
= platform_get_irq(pdev
, 0);
1160 dma_async_device_unregister(&shdev
->common
);
1163 free_irq(errirq
, shdev
);
1165 /* channel data remove */
1166 sh_dmae_chan_remove(shdev
);
1168 pm_runtime_disable(&pdev
->dev
);
1171 iounmap(shdev
->dmars
);
1172 iounmap(shdev
->chan_reg
);
1176 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1178 release_mem_region(res
->start
, resource_size(res
));
1179 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1181 release_mem_region(res
->start
, resource_size(res
));
1186 static void sh_dmae_shutdown(struct platform_device
*pdev
)
1188 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
1189 sh_dmae_ctl_stop(shdev
);
1192 static struct platform_driver sh_dmae_driver
= {
1193 .remove
= __exit_p(sh_dmae_remove
),
1194 .shutdown
= sh_dmae_shutdown
,
1196 .owner
= THIS_MODULE
,
1197 .name
= "sh-dma-engine",
1201 static int __init
sh_dmae_init(void)
1203 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
1205 module_init(sh_dmae_init
);
1207 static void __exit
sh_dmae_exit(void)
1209 platform_driver_unregister(&sh_dmae_driver
);
1211 module_exit(sh_dmae_exit
);
1213 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1214 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1215 MODULE_LICENSE("GPL");