2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/sh_dma.h>
30 #include <linux/notifier.h>
31 #include <linux/kdebug.h>
32 #include <linux/spinlock.h>
33 #include <linux/rculist.h>
36 /* DMA descriptor control */
37 enum sh_dmae_desc_status
{
41 DESC_COMPLETED
, /* completed, have to call callback */
42 DESC_WAITING
, /* callback called, waiting for ack / re-submit */
45 #define NR_DESCS_PER_CHANNEL 32
46 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
47 #define LOG2_DEFAULT_XFER_SIZE 2
50 * Used for write-side mutual exclusion for the global device list,
51 * read-side synchronization by way of RCU, and per-controller data.
53 static DEFINE_SPINLOCK(sh_dmae_lock
);
54 static LIST_HEAD(sh_dmae_devices
);
56 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
57 static unsigned long sh_dmae_slave_used
[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER
)];
59 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
);
61 static void sh_dmae_writel(struct sh_dmae_chan
*sh_dc
, u32 data
, u32 reg
)
63 __raw_writel(data
, sh_dc
->base
+ reg
/ sizeof(u32
));
66 static u32
sh_dmae_readl(struct sh_dmae_chan
*sh_dc
, u32 reg
)
68 return __raw_readl(sh_dc
->base
+ reg
/ sizeof(u32
));
71 static u16
dmaor_read(struct sh_dmae_device
*shdev
)
73 return __raw_readw(shdev
->chan_reg
+ DMAOR
/ sizeof(u32
));
76 static void dmaor_write(struct sh_dmae_device
*shdev
, u16 data
)
78 __raw_writew(data
, shdev
->chan_reg
+ DMAOR
/ sizeof(u32
));
82 * Reset DMA controller
84 * SH7780 has two DMAOR register
86 static void sh_dmae_ctl_stop(struct sh_dmae_device
*shdev
)
91 spin_lock_irqsave(&sh_dmae_lock
, flags
);
93 dmaor
= dmaor_read(shdev
);
94 dmaor_write(shdev
, dmaor
& ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
));
96 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
99 static int sh_dmae_rst(struct sh_dmae_device
*shdev
)
101 unsigned short dmaor
;
104 spin_lock_irqsave(&sh_dmae_lock
, flags
);
106 dmaor
= dmaor_read(shdev
) & ~(DMAOR_NMIF
| DMAOR_AE
| DMAOR_DME
);
108 dmaor_write(shdev
, dmaor
| shdev
->pdata
->dmaor_init
);
110 dmaor
= dmaor_read(shdev
);
112 spin_unlock_irqrestore(&sh_dmae_lock
, flags
);
114 if (dmaor
& (DMAOR_AE
| DMAOR_NMIF
)) {
115 dev_warn(shdev
->common
.dev
, "Can't initialize DMAOR.\n");
121 static bool dmae_is_busy(struct sh_dmae_chan
*sh_chan
)
123 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
125 if ((chcr
& (CHCR_DE
| CHCR_TE
)) == CHCR_DE
)
126 return true; /* working */
128 return false; /* waiting */
131 static unsigned int calc_xmit_shift(struct sh_dmae_chan
*sh_chan
, u32 chcr
)
133 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
134 struct sh_dmae_device
, common
);
135 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
136 int cnt
= ((chcr
& pdata
->ts_low_mask
) >> pdata
->ts_low_shift
) |
137 ((chcr
& pdata
->ts_high_mask
) >> pdata
->ts_high_shift
);
139 if (cnt
>= pdata
->ts_shift_num
)
142 return pdata
->ts_shift
[cnt
];
145 static u32
log2size_to_chcr(struct sh_dmae_chan
*sh_chan
, int l2size
)
147 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
148 struct sh_dmae_device
, common
);
149 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
152 for (i
= 0; i
< pdata
->ts_shift_num
; i
++)
153 if (pdata
->ts_shift
[i
] == l2size
)
156 if (i
== pdata
->ts_shift_num
)
159 return ((i
<< pdata
->ts_low_shift
) & pdata
->ts_low_mask
) |
160 ((i
<< pdata
->ts_high_shift
) & pdata
->ts_high_mask
);
163 static void dmae_set_reg(struct sh_dmae_chan
*sh_chan
, struct sh_dmae_regs
*hw
)
165 sh_dmae_writel(sh_chan
, hw
->sar
, SAR
);
166 sh_dmae_writel(sh_chan
, hw
->dar
, DAR
);
167 sh_dmae_writel(sh_chan
, hw
->tcr
>> sh_chan
->xmit_shift
, TCR
);
170 static void dmae_start(struct sh_dmae_chan
*sh_chan
)
172 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
174 chcr
|= CHCR_DE
| CHCR_IE
;
175 sh_dmae_writel(sh_chan
, chcr
& ~CHCR_TE
, CHCR
);
178 static void dmae_halt(struct sh_dmae_chan
*sh_chan
)
180 u32 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
182 chcr
&= ~(CHCR_DE
| CHCR_TE
| CHCR_IE
);
183 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
186 static void dmae_init(struct sh_dmae_chan
*sh_chan
)
189 * Default configuration for dual address memory-memory transfer.
190 * 0x400 represents auto-request.
192 u32 chcr
= DM_INC
| SM_INC
| 0x400 | log2size_to_chcr(sh_chan
,
193 LOG2_DEFAULT_XFER_SIZE
);
194 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, chcr
);
195 sh_dmae_writel(sh_chan
, chcr
, CHCR
);
198 static int dmae_set_chcr(struct sh_dmae_chan
*sh_chan
, u32 val
)
200 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
201 if (dmae_is_busy(sh_chan
))
204 sh_chan
->xmit_shift
= calc_xmit_shift(sh_chan
, val
);
205 sh_dmae_writel(sh_chan
, val
, CHCR
);
210 static int dmae_set_dmars(struct sh_dmae_chan
*sh_chan
, u16 val
)
212 struct sh_dmae_device
*shdev
= container_of(sh_chan
->common
.device
,
213 struct sh_dmae_device
, common
);
214 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
215 const struct sh_dmae_channel
*chan_pdata
= &pdata
->channel
[sh_chan
->id
];
216 u16 __iomem
*addr
= shdev
->dmars
+ chan_pdata
->dmars
/ sizeof(u16
);
217 int shift
= chan_pdata
->dmars_bit
;
219 if (dmae_is_busy(sh_chan
))
222 __raw_writew((__raw_readw(addr
) & (0xff00 >> shift
)) | (val
<< shift
),
228 static dma_cookie_t
sh_dmae_tx_submit(struct dma_async_tx_descriptor
*tx
)
230 struct sh_desc
*desc
= tx_to_sh_desc(tx
), *chunk
, *last
= desc
, *c
;
231 struct sh_dmae_chan
*sh_chan
= to_sh_chan(tx
->chan
);
232 dma_async_tx_callback callback
= tx
->callback
;
235 spin_lock_bh(&sh_chan
->desc_lock
);
237 cookie
= sh_chan
->common
.cookie
;
242 sh_chan
->common
.cookie
= cookie
;
245 /* Mark all chunks of this descriptor as submitted, move to the queue */
246 list_for_each_entry_safe(chunk
, c
, desc
->node
.prev
, node
) {
248 * All chunks are on the global ld_free, so, we have to find
249 * the end of the chain ourselves
251 if (chunk
!= desc
&& (chunk
->mark
== DESC_IDLE
||
252 chunk
->async_tx
.cookie
> 0 ||
253 chunk
->async_tx
.cookie
== -EBUSY
||
254 &chunk
->node
== &sh_chan
->ld_free
))
256 chunk
->mark
= DESC_SUBMITTED
;
257 /* Callback goes to the last chunk */
258 chunk
->async_tx
.callback
= NULL
;
259 chunk
->cookie
= cookie
;
260 list_move_tail(&chunk
->node
, &sh_chan
->ld_queue
);
264 last
->async_tx
.callback
= callback
;
265 last
->async_tx
.callback_param
= tx
->callback_param
;
267 dev_dbg(sh_chan
->dev
, "submit #%d@%p on %d: %x[%d] -> %x\n",
268 tx
->cookie
, &last
->async_tx
, sh_chan
->id
,
269 desc
->hw
.sar
, desc
->hw
.tcr
, desc
->hw
.dar
);
271 spin_unlock_bh(&sh_chan
->desc_lock
);
276 /* Called with desc_lock held */
277 static struct sh_desc
*sh_dmae_get_desc(struct sh_dmae_chan
*sh_chan
)
279 struct sh_desc
*desc
;
281 list_for_each_entry(desc
, &sh_chan
->ld_free
, node
)
282 if (desc
->mark
!= DESC_PREPARED
) {
283 BUG_ON(desc
->mark
!= DESC_IDLE
);
284 list_del(&desc
->node
);
291 static const struct sh_dmae_slave_config
*sh_dmae_find_slave(
292 struct sh_dmae_chan
*sh_chan
, struct sh_dmae_slave
*param
)
294 struct dma_device
*dma_dev
= sh_chan
->common
.device
;
295 struct sh_dmae_device
*shdev
= container_of(dma_dev
,
296 struct sh_dmae_device
, common
);
297 struct sh_dmae_pdata
*pdata
= shdev
->pdata
;
300 if (param
->slave_id
>= SH_DMA_SLAVE_NUMBER
)
303 for (i
= 0; i
< pdata
->slave_num
; i
++)
304 if (pdata
->slave
[i
].slave_id
== param
->slave_id
)
305 return pdata
->slave
+ i
;
310 static int sh_dmae_alloc_chan_resources(struct dma_chan
*chan
)
312 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
313 struct sh_desc
*desc
;
314 struct sh_dmae_slave
*param
= chan
->private;
317 pm_runtime_get_sync(sh_chan
->dev
);
320 * This relies on the guarantee from dmaengine that alloc_chan_resources
321 * never runs concurrently with itself or free_chan_resources.
324 const struct sh_dmae_slave_config
*cfg
;
326 cfg
= sh_dmae_find_slave(sh_chan
, param
);
332 if (test_and_set_bit(param
->slave_id
, sh_dmae_slave_used
)) {
339 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
340 dmae_set_chcr(sh_chan
, cfg
->chcr
);
341 } else if ((sh_dmae_readl(sh_chan
, CHCR
) & 0xf00) != 0x400) {
345 spin_lock_bh(&sh_chan
->desc_lock
);
346 while (sh_chan
->descs_allocated
< NR_DESCS_PER_CHANNEL
) {
347 spin_unlock_bh(&sh_chan
->desc_lock
);
348 desc
= kzalloc(sizeof(struct sh_desc
), GFP_KERNEL
);
350 spin_lock_bh(&sh_chan
->desc_lock
);
353 dma_async_tx_descriptor_init(&desc
->async_tx
,
355 desc
->async_tx
.tx_submit
= sh_dmae_tx_submit
;
356 desc
->mark
= DESC_IDLE
;
358 spin_lock_bh(&sh_chan
->desc_lock
);
359 list_add(&desc
->node
, &sh_chan
->ld_free
);
360 sh_chan
->descs_allocated
++;
362 spin_unlock_bh(&sh_chan
->desc_lock
);
364 if (!sh_chan
->descs_allocated
) {
369 return sh_chan
->descs_allocated
;
373 clear_bit(param
->slave_id
, sh_dmae_slave_used
);
376 pm_runtime_put(sh_chan
->dev
);
381 * sh_dma_free_chan_resources - Free all resources of the channel.
383 static void sh_dmae_free_chan_resources(struct dma_chan
*chan
)
385 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
386 struct sh_desc
*desc
, *_desc
;
388 int descs
= sh_chan
->descs_allocated
;
390 /* Protect against ISR */
391 spin_lock_irq(&sh_chan
->desc_lock
);
393 spin_unlock_irq(&sh_chan
->desc_lock
);
395 /* Now no new interrupts will occur */
397 /* Prepared and not submitted descriptors can still be on the queue */
398 if (!list_empty(&sh_chan
->ld_queue
))
399 sh_dmae_chan_ld_cleanup(sh_chan
, true);
402 /* The caller is holding dma_list_mutex */
403 struct sh_dmae_slave
*param
= chan
->private;
404 clear_bit(param
->slave_id
, sh_dmae_slave_used
);
405 chan
->private = NULL
;
408 spin_lock_bh(&sh_chan
->desc_lock
);
410 list_splice_init(&sh_chan
->ld_free
, &list
);
411 sh_chan
->descs_allocated
= 0;
413 spin_unlock_bh(&sh_chan
->desc_lock
);
416 pm_runtime_put(sh_chan
->dev
);
418 list_for_each_entry_safe(desc
, _desc
, &list
, node
)
423 * sh_dmae_add_desc - get, set up and return one transfer descriptor
424 * @sh_chan: DMA channel
425 * @flags: DMA transfer flags
426 * @dest: destination DMA address, incremented when direction equals
427 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
428 * @src: source DMA address, incremented when direction equals
429 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
430 * @len: DMA transfer length
431 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
432 * @direction: needed for slave DMA to decide which address to keep constant,
433 * equals DMA_BIDIRECTIONAL for MEMCPY
434 * Returns 0 or an error
435 * Locks: called with desc_lock held
437 static struct sh_desc
*sh_dmae_add_desc(struct sh_dmae_chan
*sh_chan
,
438 unsigned long flags
, dma_addr_t
*dest
, dma_addr_t
*src
, size_t *len
,
439 struct sh_desc
**first
, enum dma_data_direction direction
)
447 /* Allocate the link descriptor from the free list */
448 new = sh_dmae_get_desc(sh_chan
);
450 dev_err(sh_chan
->dev
, "No free link descriptor available\n");
454 copy_size
= min(*len
, (size_t)SH_DMA_TCR_MAX
+ 1);
458 new->hw
.tcr
= copy_size
;
462 new->async_tx
.cookie
= -EBUSY
;
465 /* Other desc - invisible to the user */
466 new->async_tx
.cookie
= -EINVAL
;
469 dev_dbg(sh_chan
->dev
,
470 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
471 copy_size
, *len
, *src
, *dest
, &new->async_tx
,
472 new->async_tx
.cookie
, sh_chan
->xmit_shift
);
474 new->mark
= DESC_PREPARED
;
475 new->async_tx
.flags
= flags
;
476 new->direction
= direction
;
479 if (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_TO_DEVICE
)
481 if (direction
== DMA_BIDIRECTIONAL
|| direction
== DMA_FROM_DEVICE
)
488 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
490 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
491 * converted to scatter-gather to guarantee consistent locking and a correct
492 * list manipulation. For slave DMA direction carries the usual meaning, and,
493 * logically, the SG list is RAM and the addr variable contains slave address,
494 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
495 * and the SG list contains only one element and points at the source buffer.
497 static struct dma_async_tx_descriptor
*sh_dmae_prep_sg(struct sh_dmae_chan
*sh_chan
,
498 struct scatterlist
*sgl
, unsigned int sg_len
, dma_addr_t
*addr
,
499 enum dma_data_direction direction
, unsigned long flags
)
501 struct scatterlist
*sg
;
502 struct sh_desc
*first
= NULL
, *new = NULL
/* compiler... */;
510 for_each_sg(sgl
, sg
, sg_len
, i
)
511 chunks
+= (sg_dma_len(sg
) + SH_DMA_TCR_MAX
) /
512 (SH_DMA_TCR_MAX
+ 1);
514 /* Have to lock the whole loop to protect against concurrent release */
515 spin_lock_bh(&sh_chan
->desc_lock
);
519 * first descriptor is what user is dealing with in all API calls, its
520 * cookie is at first set to -EBUSY, at tx-submit to a positive
522 * if more than one chunk is needed further chunks have cookie = -EINVAL
523 * the last chunk, if not equal to the first, has cookie = -ENOSPC
524 * all chunks are linked onto the tx_list head with their .node heads
525 * only during this function, then they are immediately spliced
526 * back onto the free list in form of a chain
528 for_each_sg(sgl
, sg
, sg_len
, i
) {
529 dma_addr_t sg_addr
= sg_dma_address(sg
);
530 size_t len
= sg_dma_len(sg
);
536 dev_dbg(sh_chan
->dev
, "Add SG #%d@%p[%d], dma %llx\n",
537 i
, sg
, len
, (unsigned long long)sg_addr
);
539 if (direction
== DMA_FROM_DEVICE
)
540 new = sh_dmae_add_desc(sh_chan
, flags
,
541 &sg_addr
, addr
, &len
, &first
,
544 new = sh_dmae_add_desc(sh_chan
, flags
,
545 addr
, &sg_addr
, &len
, &first
,
550 new->chunks
= chunks
--;
551 list_add_tail(&new->node
, &tx_list
);
556 new->async_tx
.cookie
= -ENOSPC
;
558 /* Put them back on the free list, so, they don't get lost */
559 list_splice_tail(&tx_list
, &sh_chan
->ld_free
);
561 spin_unlock_bh(&sh_chan
->desc_lock
);
563 return &first
->async_tx
;
566 list_for_each_entry(new, &tx_list
, node
)
567 new->mark
= DESC_IDLE
;
568 list_splice(&tx_list
, &sh_chan
->ld_free
);
570 spin_unlock_bh(&sh_chan
->desc_lock
);
575 static struct dma_async_tx_descriptor
*sh_dmae_prep_memcpy(
576 struct dma_chan
*chan
, dma_addr_t dma_dest
, dma_addr_t dma_src
,
577 size_t len
, unsigned long flags
)
579 struct sh_dmae_chan
*sh_chan
;
580 struct scatterlist sg
;
585 sh_chan
= to_sh_chan(chan
);
587 sg_init_table(&sg
, 1);
588 sg_set_page(&sg
, pfn_to_page(PFN_DOWN(dma_src
)), len
,
589 offset_in_page(dma_src
));
590 sg_dma_address(&sg
) = dma_src
;
591 sg_dma_len(&sg
) = len
;
593 return sh_dmae_prep_sg(sh_chan
, &sg
, 1, &dma_dest
, DMA_BIDIRECTIONAL
,
597 static struct dma_async_tx_descriptor
*sh_dmae_prep_slave_sg(
598 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned int sg_len
,
599 enum dma_data_direction direction
, unsigned long flags
)
601 struct sh_dmae_slave
*param
;
602 struct sh_dmae_chan
*sh_chan
;
603 dma_addr_t slave_addr
;
608 sh_chan
= to_sh_chan(chan
);
609 param
= chan
->private;
611 /* Someone calling slave DMA on a public channel? */
612 if (!param
|| !sg_len
) {
613 dev_warn(sh_chan
->dev
, "%s: bad parameter: %p, %d, %d\n",
614 __func__
, param
, sg_len
, param
? param
->slave_id
: -1);
618 slave_addr
= param
->config
->addr
;
621 * if (param != NULL), this is a successfully requested slave channel,
622 * therefore param->config != NULL too.
624 return sh_dmae_prep_sg(sh_chan
, sgl
, sg_len
, &slave_addr
,
628 static int sh_dmae_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
631 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
633 /* Only supports DMA_TERMINATE_ALL */
634 if (cmd
!= DMA_TERMINATE_ALL
)
640 spin_lock_bh(&sh_chan
->desc_lock
);
643 if (!list_empty(&sh_chan
->ld_queue
)) {
644 /* Record partial transfer */
645 struct sh_desc
*desc
= list_entry(sh_chan
->ld_queue
.next
,
646 struct sh_desc
, node
);
647 desc
->partial
= (desc
->hw
.tcr
- sh_dmae_readl(sh_chan
, TCR
)) <<
651 spin_unlock_bh(&sh_chan
->desc_lock
);
653 sh_dmae_chan_ld_cleanup(sh_chan
, true);
658 static dma_async_tx_callback
__ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
)
660 struct sh_desc
*desc
, *_desc
;
661 /* Is the "exposed" head of a chain acked? */
662 bool head_acked
= false;
663 dma_cookie_t cookie
= 0;
664 dma_async_tx_callback callback
= NULL
;
667 spin_lock_bh(&sh_chan
->desc_lock
);
668 list_for_each_entry_safe(desc
, _desc
, &sh_chan
->ld_queue
, node
) {
669 struct dma_async_tx_descriptor
*tx
= &desc
->async_tx
;
671 BUG_ON(tx
->cookie
> 0 && tx
->cookie
!= desc
->cookie
);
672 BUG_ON(desc
->mark
!= DESC_SUBMITTED
&&
673 desc
->mark
!= DESC_COMPLETED
&&
674 desc
->mark
!= DESC_WAITING
);
677 * queue is ordered, and we use this loop to (1) clean up all
678 * completed descriptors, and to (2) update descriptor flags of
679 * any chunks in a (partially) completed chain
681 if (!all
&& desc
->mark
== DESC_SUBMITTED
&&
682 desc
->cookie
!= cookie
)
688 if (desc
->mark
== DESC_COMPLETED
&& desc
->chunks
== 1) {
689 if (sh_chan
->completed_cookie
!= desc
->cookie
- 1)
690 dev_dbg(sh_chan
->dev
,
691 "Completing cookie %d, expected %d\n",
693 sh_chan
->completed_cookie
+ 1);
694 sh_chan
->completed_cookie
= desc
->cookie
;
697 /* Call callback on the last chunk */
698 if (desc
->mark
== DESC_COMPLETED
&& tx
->callback
) {
699 desc
->mark
= DESC_WAITING
;
700 callback
= tx
->callback
;
701 param
= tx
->callback_param
;
702 dev_dbg(sh_chan
->dev
, "descriptor #%d@%p on %d callback\n",
703 tx
->cookie
, tx
, sh_chan
->id
);
704 BUG_ON(desc
->chunks
!= 1);
708 if (tx
->cookie
> 0 || tx
->cookie
== -EBUSY
) {
709 if (desc
->mark
== DESC_COMPLETED
) {
710 BUG_ON(tx
->cookie
< 0);
711 desc
->mark
= DESC_WAITING
;
713 head_acked
= async_tx_test_ack(tx
);
715 switch (desc
->mark
) {
717 desc
->mark
= DESC_WAITING
;
721 async_tx_ack(&desc
->async_tx
);
725 dev_dbg(sh_chan
->dev
, "descriptor %p #%d completed.\n",
728 if (((desc
->mark
== DESC_COMPLETED
||
729 desc
->mark
== DESC_WAITING
) &&
730 async_tx_test_ack(&desc
->async_tx
)) || all
) {
731 /* Remove from ld_queue list */
732 desc
->mark
= DESC_IDLE
;
733 list_move(&desc
->node
, &sh_chan
->ld_free
);
737 if (all
&& !callback
)
739 * Terminating and the loop completed normally: forgive
740 * uncompleted cookies
742 sh_chan
->completed_cookie
= sh_chan
->common
.cookie
;
744 spin_unlock_bh(&sh_chan
->desc_lock
);
753 * sh_chan_ld_cleanup - Clean up link descriptors
755 * This function cleans up the ld_queue of DMA channel.
757 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan
*sh_chan
, bool all
)
759 while (__ld_cleanup(sh_chan
, all
))
763 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan
*sh_chan
)
765 struct sh_desc
*desc
;
767 spin_lock_bh(&sh_chan
->desc_lock
);
769 if (dmae_is_busy(sh_chan
)) {
770 spin_unlock_bh(&sh_chan
->desc_lock
);
774 /* Find the first not transferred descriptor */
775 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
)
776 if (desc
->mark
== DESC_SUBMITTED
) {
777 dev_dbg(sh_chan
->dev
, "Queue #%d to %d: %u@%x -> %x\n",
778 desc
->async_tx
.cookie
, sh_chan
->id
,
779 desc
->hw
.tcr
, desc
->hw
.sar
, desc
->hw
.dar
);
780 /* Get the ld start address from ld_queue */
781 dmae_set_reg(sh_chan
, &desc
->hw
);
786 spin_unlock_bh(&sh_chan
->desc_lock
);
789 static void sh_dmae_memcpy_issue_pending(struct dma_chan
*chan
)
791 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
792 sh_chan_xfer_ld_queue(sh_chan
);
795 static enum dma_status
sh_dmae_tx_status(struct dma_chan
*chan
,
797 struct dma_tx_state
*txstate
)
799 struct sh_dmae_chan
*sh_chan
= to_sh_chan(chan
);
800 dma_cookie_t last_used
;
801 dma_cookie_t last_complete
;
802 enum dma_status status
;
804 sh_dmae_chan_ld_cleanup(sh_chan
, false);
806 /* First read completed cookie to avoid a skew */
807 last_complete
= sh_chan
->completed_cookie
;
809 last_used
= chan
->cookie
;
810 BUG_ON(last_complete
< 0);
811 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
813 spin_lock_bh(&sh_chan
->desc_lock
);
815 status
= dma_async_is_complete(cookie
, last_complete
, last_used
);
818 * If we don't find cookie on the queue, it has been aborted and we have
821 if (status
!= DMA_SUCCESS
) {
822 struct sh_desc
*desc
;
824 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
)
825 if (desc
->cookie
== cookie
) {
826 status
= DMA_IN_PROGRESS
;
831 spin_unlock_bh(&sh_chan
->desc_lock
);
836 static irqreturn_t
sh_dmae_interrupt(int irq
, void *data
)
838 irqreturn_t ret
= IRQ_NONE
;
839 struct sh_dmae_chan
*sh_chan
= data
;
842 spin_lock(&sh_chan
->desc_lock
);
844 chcr
= sh_dmae_readl(sh_chan
, CHCR
);
846 if (chcr
& CHCR_TE
) {
851 tasklet_schedule(&sh_chan
->tasklet
);
854 spin_unlock(&sh_chan
->desc_lock
);
859 /* Called from error IRQ or NMI */
860 static bool sh_dmae_reset(struct sh_dmae_device
*shdev
)
862 unsigned int handled
= 0;
865 /* halt the dma controller */
866 sh_dmae_ctl_stop(shdev
);
868 /* We cannot detect, which channel caused the error, have to reset all */
869 for (i
= 0; i
< SH_DMAC_MAX_CHANNELS
; i
++) {
870 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
871 struct sh_desc
*desc
;
877 spin_lock(&sh_chan
->desc_lock
);
879 /* Stop the channel */
882 list_splice_init(&sh_chan
->ld_queue
, &dl
);
884 spin_unlock(&sh_chan
->desc_lock
);
887 list_for_each_entry(desc
, &dl
, node
) {
888 struct dma_async_tx_descriptor
*tx
= &desc
->async_tx
;
889 desc
->mark
= DESC_IDLE
;
891 tx
->callback(tx
->callback_param
);
894 spin_lock(&sh_chan
->desc_lock
);
895 list_splice(&dl
, &sh_chan
->ld_free
);
896 spin_unlock(&sh_chan
->desc_lock
);
906 static irqreturn_t
sh_dmae_err(int irq
, void *data
)
908 struct sh_dmae_device
*shdev
= data
;
910 if (!(dmaor_read(shdev
) & DMAOR_AE
))
917 static void dmae_do_tasklet(unsigned long data
)
919 struct sh_dmae_chan
*sh_chan
= (struct sh_dmae_chan
*)data
;
920 struct sh_desc
*desc
;
921 u32 sar_buf
= sh_dmae_readl(sh_chan
, SAR
);
922 u32 dar_buf
= sh_dmae_readl(sh_chan
, DAR
);
924 spin_lock(&sh_chan
->desc_lock
);
925 list_for_each_entry(desc
, &sh_chan
->ld_queue
, node
) {
926 if (desc
->mark
== DESC_SUBMITTED
&&
927 ((desc
->direction
== DMA_FROM_DEVICE
&&
928 (desc
->hw
.dar
+ desc
->hw
.tcr
) == dar_buf
) ||
929 (desc
->hw
.sar
+ desc
->hw
.tcr
) == sar_buf
)) {
930 dev_dbg(sh_chan
->dev
, "done #%d@%p dst %u\n",
931 desc
->async_tx
.cookie
, &desc
->async_tx
,
933 desc
->mark
= DESC_COMPLETED
;
937 spin_unlock(&sh_chan
->desc_lock
);
940 sh_chan_xfer_ld_queue(sh_chan
);
941 sh_dmae_chan_ld_cleanup(sh_chan
, false);
944 static bool sh_dmae_nmi_notify(struct sh_dmae_device
*shdev
)
946 /* Fast path out if NMIF is not asserted for this controller */
947 if ((dmaor_read(shdev
) & DMAOR_NMIF
) == 0)
950 return sh_dmae_reset(shdev
);
953 static int sh_dmae_nmi_handler(struct notifier_block
*self
,
954 unsigned long cmd
, void *data
)
956 struct sh_dmae_device
*shdev
;
957 int ret
= NOTIFY_DONE
;
961 * Only concern ourselves with NMI events.
963 * Normally we would check the die chain value, but as this needs
964 * to be architecture independent, check for NMI context instead.
970 list_for_each_entry_rcu(shdev
, &sh_dmae_devices
, node
) {
972 * Only stop if one of the controllers has NMIF asserted,
973 * we do not want to interfere with regular address error
974 * handling or NMI events that don't concern the DMACs.
976 triggered
= sh_dmae_nmi_notify(shdev
);
977 if (triggered
== true)
985 static struct notifier_block sh_dmae_nmi_notifier __read_mostly
= {
986 .notifier_call
= sh_dmae_nmi_handler
,
988 /* Run before NMI debug handler and KGDB */
992 static int __devinit
sh_dmae_chan_probe(struct sh_dmae_device
*shdev
, int id
,
993 int irq
, unsigned long flags
)
996 const struct sh_dmae_channel
*chan_pdata
= &shdev
->pdata
->channel
[id
];
997 struct platform_device
*pdev
= to_platform_device(shdev
->common
.dev
);
998 struct sh_dmae_chan
*new_sh_chan
;
1001 new_sh_chan
= kzalloc(sizeof(struct sh_dmae_chan
), GFP_KERNEL
);
1003 dev_err(shdev
->common
.dev
,
1004 "No free memory for allocating dma channels!\n");
1008 /* copy struct dma_device */
1009 new_sh_chan
->common
.device
= &shdev
->common
;
1011 new_sh_chan
->dev
= shdev
->common
.dev
;
1012 new_sh_chan
->id
= id
;
1013 new_sh_chan
->irq
= irq
;
1014 new_sh_chan
->base
= shdev
->chan_reg
+ chan_pdata
->offset
/ sizeof(u32
);
1016 /* Init DMA tasklet */
1017 tasklet_init(&new_sh_chan
->tasklet
, dmae_do_tasklet
,
1018 (unsigned long)new_sh_chan
);
1020 spin_lock_init(&new_sh_chan
->desc_lock
);
1022 /* Init descripter manage list */
1023 INIT_LIST_HEAD(&new_sh_chan
->ld_queue
);
1024 INIT_LIST_HEAD(&new_sh_chan
->ld_free
);
1026 /* Add the channel to DMA device channel list */
1027 list_add_tail(&new_sh_chan
->common
.device_node
,
1028 &shdev
->common
.channels
);
1029 shdev
->common
.chancnt
++;
1032 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
1033 "sh-dmae%d.%d", pdev
->id
, new_sh_chan
->id
);
1035 snprintf(new_sh_chan
->dev_id
, sizeof(new_sh_chan
->dev_id
),
1036 "sh-dma%d", new_sh_chan
->id
);
1038 /* set up channel irq */
1039 err
= request_irq(irq
, &sh_dmae_interrupt
, flags
,
1040 new_sh_chan
->dev_id
, new_sh_chan
);
1042 dev_err(shdev
->common
.dev
, "DMA channel %d request_irq error "
1043 "with return %d\n", id
, err
);
1047 shdev
->chan
[id
] = new_sh_chan
;
1051 /* remove from dmaengine device node */
1052 list_del(&new_sh_chan
->common
.device_node
);
1057 static void sh_dmae_chan_remove(struct sh_dmae_device
*shdev
)
1061 for (i
= shdev
->common
.chancnt
- 1 ; i
>= 0 ; i
--) {
1062 if (shdev
->chan
[i
]) {
1063 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
1065 free_irq(sh_chan
->irq
, sh_chan
);
1067 list_del(&sh_chan
->common
.device_node
);
1069 shdev
->chan
[i
] = NULL
;
1072 shdev
->common
.chancnt
= 0;
1075 static int __init
sh_dmae_probe(struct platform_device
*pdev
)
1077 struct sh_dmae_pdata
*pdata
= pdev
->dev
.platform_data
;
1078 unsigned long irqflags
= IRQF_DISABLED
,
1079 chan_flag
[SH_DMAC_MAX_CHANNELS
] = {};
1080 int errirq
, chan_irq
[SH_DMAC_MAX_CHANNELS
];
1081 int err
, i
, irq_cnt
= 0, irqres
= 0;
1082 struct sh_dmae_device
*shdev
;
1083 struct resource
*chan
, *dmars
, *errirq_res
, *chanirq_res
;
1085 /* get platform data */
1086 if (!pdata
|| !pdata
->channel_num
)
1089 chan
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1090 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
1091 dmars
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1094 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
1095 * the error IRQ, in which case it is the only IRQ in this resource:
1096 * start == end. If it is the only IRQ resource, all channels also
1098 * 2. DMA channel IRQ resources can be specified one per resource or in
1099 * ranges (start != end)
1100 * 3. iff all events (channels and, optionally, error) on this
1101 * controller use the same IRQ, only one IRQ resource can be
1102 * specified, otherwise there must be one IRQ per channel, even if
1103 * some of them are equal
1104 * 4. if all IRQs on this controller are equal or if some specific IRQs
1105 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
1106 * requested with the IRQF_SHARED flag
1108 errirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1109 if (!chan
|| !errirq_res
)
1112 if (!request_mem_region(chan
->start
, resource_size(chan
), pdev
->name
)) {
1113 dev_err(&pdev
->dev
, "DMAC register region already claimed\n");
1117 if (dmars
&& !request_mem_region(dmars
->start
, resource_size(dmars
), pdev
->name
)) {
1118 dev_err(&pdev
->dev
, "DMAC DMARS region already claimed\n");
1124 shdev
= kzalloc(sizeof(struct sh_dmae_device
), GFP_KERNEL
);
1126 dev_err(&pdev
->dev
, "Not enough memory\n");
1130 shdev
->chan_reg
= ioremap(chan
->start
, resource_size(chan
));
1131 if (!shdev
->chan_reg
)
1134 shdev
->dmars
= ioremap(dmars
->start
, resource_size(dmars
));
1140 shdev
->pdata
= pdata
;
1142 pm_runtime_enable(&pdev
->dev
);
1143 pm_runtime_get_sync(&pdev
->dev
);
1145 spin_lock_irq(&sh_dmae_lock
);
1146 list_add_tail_rcu(&shdev
->node
, &sh_dmae_devices
);
1147 spin_unlock_irq(&sh_dmae_lock
);
1149 /* reset dma controller - only needed as a test */
1150 err
= sh_dmae_rst(shdev
);
1154 INIT_LIST_HEAD(&shdev
->common
.channels
);
1156 dma_cap_set(DMA_MEMCPY
, shdev
->common
.cap_mask
);
1158 dma_cap_set(DMA_SLAVE
, shdev
->common
.cap_mask
);
1160 shdev
->common
.device_alloc_chan_resources
1161 = sh_dmae_alloc_chan_resources
;
1162 shdev
->common
.device_free_chan_resources
= sh_dmae_free_chan_resources
;
1163 shdev
->common
.device_prep_dma_memcpy
= sh_dmae_prep_memcpy
;
1164 shdev
->common
.device_tx_status
= sh_dmae_tx_status
;
1165 shdev
->common
.device_issue_pending
= sh_dmae_memcpy_issue_pending
;
1167 /* Compulsory for DMA_SLAVE fields */
1168 shdev
->common
.device_prep_slave_sg
= sh_dmae_prep_slave_sg
;
1169 shdev
->common
.device_control
= sh_dmae_control
;
1171 shdev
->common
.dev
= &pdev
->dev
;
1172 /* Default transfer size of 32 bytes requires 32-byte alignment */
1173 shdev
->common
.copy_align
= LOG2_DEFAULT_XFER_SIZE
;
1175 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1176 chanirq_res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 1);
1179 chanirq_res
= errirq_res
;
1183 if (chanirq_res
== errirq_res
||
1184 (errirq_res
->flags
& IORESOURCE_BITS
) == IORESOURCE_IRQ_SHAREABLE
)
1185 irqflags
= IRQF_SHARED
;
1187 errirq
= errirq_res
->start
;
1189 err
= request_irq(errirq
, sh_dmae_err
, irqflags
,
1190 "DMAC Address Error", shdev
);
1193 "DMA failed requesting irq #%d, error %d\n",
1199 chanirq_res
= errirq_res
;
1200 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
1202 if (chanirq_res
->start
== chanirq_res
->end
&&
1203 !platform_get_resource(pdev
, IORESOURCE_IRQ
, 1)) {
1204 /* Special case - all multiplexed */
1205 for (; irq_cnt
< pdata
->channel_num
; irq_cnt
++) {
1206 chan_irq
[irq_cnt
] = chanirq_res
->start
;
1207 chan_flag
[irq_cnt
] = IRQF_SHARED
;
1211 for (i
= chanirq_res
->start
; i
<= chanirq_res
->end
; i
++) {
1212 if ((errirq_res
->flags
& IORESOURCE_BITS
) ==
1213 IORESOURCE_IRQ_SHAREABLE
)
1214 chan_flag
[irq_cnt
] = IRQF_SHARED
;
1216 chan_flag
[irq_cnt
] = IRQF_DISABLED
;
1218 "Found IRQ %d for channel %d\n",
1220 chan_irq
[irq_cnt
++] = i
;
1222 chanirq_res
= platform_get_resource(pdev
,
1223 IORESOURCE_IRQ
, ++irqres
);
1224 } while (irq_cnt
< pdata
->channel_num
&& chanirq_res
);
1227 if (irq_cnt
< pdata
->channel_num
)
1230 /* Create DMA Channel */
1231 for (i
= 0; i
< pdata
->channel_num
; i
++) {
1232 err
= sh_dmae_chan_probe(shdev
, i
, chan_irq
[i
], chan_flag
[i
]);
1234 goto chan_probe_err
;
1237 pm_runtime_put(&pdev
->dev
);
1239 platform_set_drvdata(pdev
, shdev
);
1240 dma_async_device_register(&shdev
->common
);
1245 sh_dmae_chan_remove(shdev
);
1247 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1248 free_irq(errirq
, shdev
);
1252 spin_lock_irq(&sh_dmae_lock
);
1253 list_del_rcu(&shdev
->node
);
1254 spin_unlock_irq(&sh_dmae_lock
);
1256 pm_runtime_put(&pdev
->dev
);
1257 pm_runtime_disable(&pdev
->dev
);
1260 iounmap(shdev
->dmars
);
1262 iounmap(shdev
->chan_reg
);
1268 release_mem_region(dmars
->start
, resource_size(dmars
));
1270 release_mem_region(chan
->start
, resource_size(chan
));
1275 static int __exit
sh_dmae_remove(struct platform_device
*pdev
)
1277 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
1278 struct resource
*res
;
1279 int errirq
= platform_get_irq(pdev
, 0);
1281 dma_async_device_unregister(&shdev
->common
);
1284 free_irq(errirq
, shdev
);
1286 spin_lock_irq(&sh_dmae_lock
);
1287 list_del_rcu(&shdev
->node
);
1288 spin_unlock_irq(&sh_dmae_lock
);
1290 /* channel data remove */
1291 sh_dmae_chan_remove(shdev
);
1293 pm_runtime_disable(&pdev
->dev
);
1296 iounmap(shdev
->dmars
);
1297 iounmap(shdev
->chan_reg
);
1302 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1304 release_mem_region(res
->start
, resource_size(res
));
1305 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1307 release_mem_region(res
->start
, resource_size(res
));
1312 static void sh_dmae_shutdown(struct platform_device
*pdev
)
1314 struct sh_dmae_device
*shdev
= platform_get_drvdata(pdev
);
1315 sh_dmae_ctl_stop(shdev
);
1318 static int sh_dmae_runtime_suspend(struct device
*dev
)
1323 static int sh_dmae_runtime_resume(struct device
*dev
)
1325 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
1327 return sh_dmae_rst(shdev
);
1331 static int sh_dmae_suspend(struct device
*dev
)
1333 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
1336 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
1337 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
1338 if (sh_chan
->descs_allocated
)
1339 sh_chan
->pm_error
= pm_runtime_put_sync(dev
);
1345 static int sh_dmae_resume(struct device
*dev
)
1347 struct sh_dmae_device
*shdev
= dev_get_drvdata(dev
);
1350 for (i
= 0; i
< shdev
->pdata
->channel_num
; i
++) {
1351 struct sh_dmae_chan
*sh_chan
= shdev
->chan
[i
];
1352 struct sh_dmae_slave
*param
= sh_chan
->common
.private;
1354 if (!sh_chan
->descs_allocated
)
1357 if (!sh_chan
->pm_error
)
1358 pm_runtime_get_sync(dev
);
1361 const struct sh_dmae_slave_config
*cfg
= param
->config
;
1362 dmae_set_dmars(sh_chan
, cfg
->mid_rid
);
1363 dmae_set_chcr(sh_chan
, cfg
->chcr
);
1372 #define sh_dmae_suspend NULL
1373 #define sh_dmae_resume NULL
1376 const struct dev_pm_ops sh_dmae_pm
= {
1377 .suspend
= sh_dmae_suspend
,
1378 .resume
= sh_dmae_resume
,
1379 .runtime_suspend
= sh_dmae_runtime_suspend
,
1380 .runtime_resume
= sh_dmae_runtime_resume
,
1383 static struct platform_driver sh_dmae_driver
= {
1384 .remove
= __exit_p(sh_dmae_remove
),
1385 .shutdown
= sh_dmae_shutdown
,
1387 .owner
= THIS_MODULE
,
1388 .name
= "sh-dma-engine",
1393 static int __init
sh_dmae_init(void)
1395 /* Wire up NMI handling */
1396 int err
= register_die_notifier(&sh_dmae_nmi_notifier
);
1400 return platform_driver_probe(&sh_dmae_driver
, sh_dmae_probe
);
1402 module_init(sh_dmae_init
);
1404 static void __exit
sh_dmae_exit(void)
1406 platform_driver_unregister(&sh_dmae_driver
);
1408 unregister_die_notifier(&sh_dmae_nmi_notifier
);
1410 module_exit(sh_dmae_exit
);
1412 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1413 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1414 MODULE_LICENSE("GPL");
1415 MODULE_ALIAS("platform:sh-dma-engine");