2 * Driver for the TXx9 SoC DMA Controller
4 * Copyright (C) 2009 Atsushi Nemoto
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/scatterlist.h>
20 static struct txx9dmac_chan
*to_txx9dmac_chan(struct dma_chan
*chan
)
22 return container_of(chan
, struct txx9dmac_chan
, chan
);
25 static struct txx9dmac_cregs __iomem
*__dma_regs(const struct txx9dmac_chan
*dc
)
30 static struct txx9dmac_cregs32 __iomem
*__dma_regs32(
31 const struct txx9dmac_chan
*dc
)
36 #define channel64_readq(dc, name) \
37 __raw_readq(&(__dma_regs(dc)->name))
38 #define channel64_writeq(dc, name, val) \
39 __raw_writeq((val), &(__dma_regs(dc)->name))
40 #define channel64_readl(dc, name) \
41 __raw_readl(&(__dma_regs(dc)->name))
42 #define channel64_writel(dc, name, val) \
43 __raw_writel((val), &(__dma_regs(dc)->name))
45 #define channel32_readl(dc, name) \
46 __raw_readl(&(__dma_regs32(dc)->name))
47 #define channel32_writel(dc, name, val) \
48 __raw_writel((val), &(__dma_regs32(dc)->name))
50 #define channel_readq(dc, name) channel64_readq(dc, name)
51 #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
52 #define channel_readl(dc, name) \
54 channel64_readl(dc, name) : channel32_readl(dc, name))
55 #define channel_writel(dc, name, val) \
57 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
59 static dma_addr_t
channel64_read_CHAR(const struct txx9dmac_chan
*dc
)
61 if (sizeof(__dma_regs(dc
)->CHAR
) == sizeof(u64
))
62 return channel64_readq(dc
, CHAR
);
64 return channel64_readl(dc
, CHAR
);
67 static void channel64_write_CHAR(const struct txx9dmac_chan
*dc
, dma_addr_t val
)
69 if (sizeof(__dma_regs(dc
)->CHAR
) == sizeof(u64
))
70 channel64_writeq(dc
, CHAR
, val
);
72 channel64_writel(dc
, CHAR
, val
);
75 static void channel64_clear_CHAR(const struct txx9dmac_chan
*dc
)
77 #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
78 channel64_writel(dc
, CHAR
, 0);
79 channel64_writel(dc
, __pad_CHAR
, 0);
81 channel64_writeq(dc
, CHAR
, 0);
85 static dma_addr_t
channel_read_CHAR(const struct txx9dmac_chan
*dc
)
88 return channel64_read_CHAR(dc
);
90 return channel32_readl(dc
, CHAR
);
93 static void channel_write_CHAR(const struct txx9dmac_chan
*dc
, dma_addr_t val
)
96 channel64_write_CHAR(dc
, val
);
98 channel32_writel(dc
, CHAR
, val
);
101 static struct txx9dmac_regs __iomem
*__txx9dmac_regs(
102 const struct txx9dmac_dev
*ddev
)
107 static struct txx9dmac_regs32 __iomem
*__txx9dmac_regs32(
108 const struct txx9dmac_dev
*ddev
)
113 #define dma64_readl(ddev, name) \
114 __raw_readl(&(__txx9dmac_regs(ddev)->name))
115 #define dma64_writel(ddev, name, val) \
116 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
118 #define dma32_readl(ddev, name) \
119 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
120 #define dma32_writel(ddev, name, val) \
121 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
123 #define dma_readl(ddev, name) \
124 (__is_dmac64(ddev) ? \
125 dma64_readl(ddev, name) : dma32_readl(ddev, name))
126 #define dma_writel(ddev, name, val) \
127 (__is_dmac64(ddev) ? \
128 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
130 static struct device
*chan2dev(struct dma_chan
*chan
)
132 return &chan
->dev
->device
;
134 static struct device
*chan2parent(struct dma_chan
*chan
)
136 return chan
->dev
->device
.parent
;
139 static struct txx9dmac_desc
*
140 txd_to_txx9dmac_desc(struct dma_async_tx_descriptor
*txd
)
142 return container_of(txd
, struct txx9dmac_desc
, txd
);
145 static dma_addr_t
desc_read_CHAR(const struct txx9dmac_chan
*dc
,
146 const struct txx9dmac_desc
*desc
)
148 return is_dmac64(dc
) ? desc
->hwdesc
.CHAR
: desc
->hwdesc32
.CHAR
;
151 static void desc_write_CHAR(const struct txx9dmac_chan
*dc
,
152 struct txx9dmac_desc
*desc
, dma_addr_t val
)
155 desc
->hwdesc
.CHAR
= val
;
157 desc
->hwdesc32
.CHAR
= val
;
160 #define TXX9_DMA_MAX_COUNT 0x04000000
162 #define TXX9_DMA_INITIAL_DESC_COUNT 64
164 static struct txx9dmac_desc
*txx9dmac_first_active(struct txx9dmac_chan
*dc
)
166 return list_entry(dc
->active_list
.next
,
167 struct txx9dmac_desc
, desc_node
);
170 static struct txx9dmac_desc
*txx9dmac_last_active(struct txx9dmac_chan
*dc
)
172 return list_entry(dc
->active_list
.prev
,
173 struct txx9dmac_desc
, desc_node
);
176 static struct txx9dmac_desc
*txx9dmac_first_queued(struct txx9dmac_chan
*dc
)
178 return list_entry(dc
->queue
.next
, struct txx9dmac_desc
, desc_node
);
181 static struct txx9dmac_desc
*txx9dmac_last_child(struct txx9dmac_desc
*desc
)
183 if (!list_empty(&desc
->txd
.tx_list
))
184 desc
= list_entry(desc
->txd
.tx_list
.prev
,
185 struct txx9dmac_desc
, desc_node
);
189 static dma_cookie_t
txx9dmac_tx_submit(struct dma_async_tx_descriptor
*tx
);
191 static struct txx9dmac_desc
*txx9dmac_desc_alloc(struct txx9dmac_chan
*dc
,
194 struct txx9dmac_dev
*ddev
= dc
->ddev
;
195 struct txx9dmac_desc
*desc
;
197 desc
= kzalloc(sizeof(*desc
), flags
);
200 dma_async_tx_descriptor_init(&desc
->txd
, &dc
->chan
);
201 desc
->txd
.tx_submit
= txx9dmac_tx_submit
;
202 /* txd.flags will be overwritten in prep funcs */
203 desc
->txd
.flags
= DMA_CTRL_ACK
;
204 desc
->txd
.phys
= dma_map_single(chan2parent(&dc
->chan
), &desc
->hwdesc
,
205 ddev
->descsize
, DMA_TO_DEVICE
);
209 static struct txx9dmac_desc
*txx9dmac_desc_get(struct txx9dmac_chan
*dc
)
211 struct txx9dmac_desc
*desc
, *_desc
;
212 struct txx9dmac_desc
*ret
= NULL
;
215 spin_lock_bh(&dc
->lock
);
216 list_for_each_entry_safe(desc
, _desc
, &dc
->free_list
, desc_node
) {
217 if (async_tx_test_ack(&desc
->txd
)) {
218 list_del(&desc
->desc_node
);
222 dev_dbg(chan2dev(&dc
->chan
), "desc %p not ACKed\n", desc
);
225 spin_unlock_bh(&dc
->lock
);
227 dev_vdbg(chan2dev(&dc
->chan
), "scanned %u descriptors on freelist\n",
230 ret
= txx9dmac_desc_alloc(dc
, GFP_ATOMIC
);
232 spin_lock_bh(&dc
->lock
);
233 dc
->descs_allocated
++;
234 spin_unlock_bh(&dc
->lock
);
236 dev_err(chan2dev(&dc
->chan
),
237 "not enough descriptors available\n");
242 static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan
*dc
,
243 struct txx9dmac_desc
*desc
)
245 struct txx9dmac_dev
*ddev
= dc
->ddev
;
246 struct txx9dmac_desc
*child
;
248 list_for_each_entry(child
, &desc
->txd
.tx_list
, desc_node
)
249 dma_sync_single_for_cpu(chan2parent(&dc
->chan
),
250 child
->txd
.phys
, ddev
->descsize
,
252 dma_sync_single_for_cpu(chan2parent(&dc
->chan
),
253 desc
->txd
.phys
, ddev
->descsize
,
258 * Move a descriptor, including any children, to the free list.
259 * `desc' must not be on any lists.
261 static void txx9dmac_desc_put(struct txx9dmac_chan
*dc
,
262 struct txx9dmac_desc
*desc
)
265 struct txx9dmac_desc
*child
;
267 txx9dmac_sync_desc_for_cpu(dc
, desc
);
269 spin_lock_bh(&dc
->lock
);
270 list_for_each_entry(child
, &desc
->txd
.tx_list
, desc_node
)
271 dev_vdbg(chan2dev(&dc
->chan
),
272 "moving child desc %p to freelist\n",
274 list_splice_init(&desc
->txd
.tx_list
, &dc
->free_list
);
275 dev_vdbg(chan2dev(&dc
->chan
), "moving desc %p to freelist\n",
277 list_add(&desc
->desc_node
, &dc
->free_list
);
278 spin_unlock_bh(&dc
->lock
);
282 /* Called with dc->lock held and bh disabled */
284 txx9dmac_assign_cookie(struct txx9dmac_chan
*dc
, struct txx9dmac_desc
*desc
)
286 dma_cookie_t cookie
= dc
->chan
.cookie
;
291 dc
->chan
.cookie
= cookie
;
292 desc
->txd
.cookie
= cookie
;
297 /*----------------------------------------------------------------------*/
299 static void txx9dmac_dump_regs(struct txx9dmac_chan
*dc
)
302 dev_err(chan2dev(&dc
->chan
),
303 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
304 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
305 (u64
)channel64_read_CHAR(dc
),
306 channel64_readq(dc
, SAR
),
307 channel64_readq(dc
, DAR
),
308 channel64_readl(dc
, CNTR
),
309 channel64_readl(dc
, SAIR
),
310 channel64_readl(dc
, DAIR
),
311 channel64_readl(dc
, CCR
),
312 channel64_readl(dc
, CSR
));
314 dev_err(chan2dev(&dc
->chan
),
315 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
316 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
317 channel32_readl(dc
, CHAR
),
318 channel32_readl(dc
, SAR
),
319 channel32_readl(dc
, DAR
),
320 channel32_readl(dc
, CNTR
),
321 channel32_readl(dc
, SAIR
),
322 channel32_readl(dc
, DAIR
),
323 channel32_readl(dc
, CCR
),
324 channel32_readl(dc
, CSR
));
327 static void txx9dmac_reset_chan(struct txx9dmac_chan
*dc
)
329 channel_writel(dc
, CCR
, TXX9_DMA_CCR_CHRST
);
331 channel64_clear_CHAR(dc
);
332 channel_writeq(dc
, SAR
, 0);
333 channel_writeq(dc
, DAR
, 0);
335 channel_writel(dc
, CHAR
, 0);
336 channel_writel(dc
, SAR
, 0);
337 channel_writel(dc
, DAR
, 0);
339 channel_writel(dc
, CNTR
, 0);
340 channel_writel(dc
, SAIR
, 0);
341 channel_writel(dc
, DAIR
, 0);
342 channel_writel(dc
, CCR
, 0);
346 /* Called with dc->lock held and bh disabled */
347 static void txx9dmac_dostart(struct txx9dmac_chan
*dc
,
348 struct txx9dmac_desc
*first
)
350 struct txx9dmac_slave
*ds
= dc
->chan
.private;
353 dev_vdbg(chan2dev(&dc
->chan
), "dostart %u %p\n",
354 first
->txd
.cookie
, first
);
355 /* ASSERT: channel is idle */
356 if (channel_readl(dc
, CSR
) & TXX9_DMA_CSR_XFACT
) {
357 dev_err(chan2dev(&dc
->chan
),
358 "BUG: Attempted to start non-idle channel\n");
359 txx9dmac_dump_regs(dc
);
360 /* The tasklet will hopefully advance the queue... */
365 channel64_writel(dc
, CNTR
, 0);
366 channel64_writel(dc
, CSR
, 0xffffffff);
379 channel64_writel(dc
, SAIR
, sai
);
380 channel64_writel(dc
, DAIR
, dai
);
381 /* All 64-bit DMAC supports SMPCHN */
382 channel64_writel(dc
, CCR
, dc
->ccr
);
383 /* Writing a non zero value to CHAR will assert XFACT */
384 channel64_write_CHAR(dc
, first
->txd
.phys
);
386 channel32_writel(dc
, CNTR
, 0);
387 channel32_writel(dc
, CSR
, 0xffffffff);
400 channel32_writel(dc
, SAIR
, sai
);
401 channel32_writel(dc
, DAIR
, dai
);
402 if (txx9_dma_have_SMPCHN()) {
403 channel32_writel(dc
, CCR
, dc
->ccr
);
404 /* Writing a non zero value to CHAR will assert XFACT */
405 channel32_writel(dc
, CHAR
, first
->txd
.phys
);
407 channel32_writel(dc
, CHAR
, first
->txd
.phys
);
408 channel32_writel(dc
, CCR
, dc
->ccr
);
413 /*----------------------------------------------------------------------*/
416 txx9dmac_descriptor_complete(struct txx9dmac_chan
*dc
,
417 struct txx9dmac_desc
*desc
)
419 dma_async_tx_callback callback
;
421 struct dma_async_tx_descriptor
*txd
= &desc
->txd
;
422 struct txx9dmac_slave
*ds
= dc
->chan
.private;
424 dev_vdbg(chan2dev(&dc
->chan
), "descriptor %u %p complete\n",
427 dc
->completed
= txd
->cookie
;
428 callback
= txd
->callback
;
429 param
= txd
->callback_param
;
431 txx9dmac_sync_desc_for_cpu(dc
, desc
);
432 list_splice_init(&txd
->tx_list
, &dc
->free_list
);
433 list_move(&desc
->desc_node
, &dc
->free_list
);
436 * We use dma_unmap_page() regardless of how the buffers were
437 * mapped before they were submitted...
441 if (!(txd
->flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
442 dmaaddr
= is_dmac64(dc
) ?
443 desc
->hwdesc
.DAR
: desc
->hwdesc32
.DAR
;
444 dma_unmap_page(chan2parent(&dc
->chan
), dmaaddr
,
445 desc
->len
, DMA_FROM_DEVICE
);
447 if (!(txd
->flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
448 dmaaddr
= is_dmac64(dc
) ?
449 desc
->hwdesc
.SAR
: desc
->hwdesc32
.SAR
;
450 dma_unmap_page(chan2parent(&dc
->chan
), dmaaddr
,
451 desc
->len
, DMA_TO_DEVICE
);
456 * The API requires that no submissions are done from a
457 * callback, so we don't need to drop the lock here
461 dma_run_dependencies(txd
);
464 static void txx9dmac_dequeue(struct txx9dmac_chan
*dc
, struct list_head
*list
)
466 struct txx9dmac_dev
*ddev
= dc
->ddev
;
467 struct txx9dmac_desc
*desc
;
468 struct txx9dmac_desc
*prev
= NULL
;
470 BUG_ON(!list_empty(list
));
472 desc
= txx9dmac_first_queued(dc
);
474 desc_write_CHAR(dc
, prev
, desc
->txd
.phys
);
475 dma_sync_single_for_device(chan2parent(&dc
->chan
),
476 prev
->txd
.phys
, ddev
->descsize
,
479 prev
= txx9dmac_last_child(desc
);
480 list_move_tail(&desc
->desc_node
, list
);
481 /* Make chain-completion interrupt happen */
482 if ((desc
->txd
.flags
& DMA_PREP_INTERRUPT
) &&
483 !txx9dmac_chan_INTENT(dc
))
485 } while (!list_empty(&dc
->queue
));
488 static void txx9dmac_complete_all(struct txx9dmac_chan
*dc
)
490 struct txx9dmac_desc
*desc
, *_desc
;
494 * Submit queued descriptors ASAP, i.e. before we go through
495 * the completed ones.
497 list_splice_init(&dc
->active_list
, &list
);
498 if (!list_empty(&dc
->queue
)) {
499 txx9dmac_dequeue(dc
, &dc
->active_list
);
500 txx9dmac_dostart(dc
, txx9dmac_first_active(dc
));
503 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
504 txx9dmac_descriptor_complete(dc
, desc
);
507 static void txx9dmac_dump_desc(struct txx9dmac_chan
*dc
,
508 struct txx9dmac_hwdesc
*desc
)
511 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
512 dev_crit(chan2dev(&dc
->chan
),
513 " desc: ch%#llx s%#llx d%#llx c%#x\n",
514 (u64
)desc
->CHAR
, desc
->SAR
, desc
->DAR
, desc
->CNTR
);
516 dev_crit(chan2dev(&dc
->chan
),
517 " desc: ch%#llx s%#llx d%#llx c%#x"
518 " si%#x di%#x cc%#x cs%#x\n",
519 (u64
)desc
->CHAR
, desc
->SAR
, desc
->DAR
, desc
->CNTR
,
520 desc
->SAIR
, desc
->DAIR
, desc
->CCR
, desc
->CSR
);
523 struct txx9dmac_hwdesc32
*d
= (struct txx9dmac_hwdesc32
*)desc
;
524 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
525 dev_crit(chan2dev(&dc
->chan
),
526 " desc: ch%#x s%#x d%#x c%#x\n",
527 d
->CHAR
, d
->SAR
, d
->DAR
, d
->CNTR
);
529 dev_crit(chan2dev(&dc
->chan
),
530 " desc: ch%#x s%#x d%#x c%#x"
531 " si%#x di%#x cc%#x cs%#x\n",
532 d
->CHAR
, d
->SAR
, d
->DAR
, d
->CNTR
,
533 d
->SAIR
, d
->DAIR
, d
->CCR
, d
->CSR
);
538 static void txx9dmac_handle_error(struct txx9dmac_chan
*dc
, u32 csr
)
540 struct txx9dmac_desc
*bad_desc
;
541 struct txx9dmac_desc
*child
;
545 * The descriptor currently at the head of the active list is
546 * borked. Since we don't have any way to report errors, we'll
547 * just have to scream loudly and try to carry on.
549 dev_crit(chan2dev(&dc
->chan
), "Abnormal Chain Completion\n");
550 txx9dmac_dump_regs(dc
);
552 bad_desc
= txx9dmac_first_active(dc
);
553 list_del_init(&bad_desc
->desc_node
);
555 /* Clear all error flags and try to restart the controller */
556 errors
= csr
& (TXX9_DMA_CSR_ABCHC
|
557 TXX9_DMA_CSR_CFERR
| TXX9_DMA_CSR_CHERR
|
558 TXX9_DMA_CSR_DESERR
| TXX9_DMA_CSR_SORERR
);
559 channel_writel(dc
, CSR
, errors
);
561 if (list_empty(&dc
->active_list
) && !list_empty(&dc
->queue
))
562 txx9dmac_dequeue(dc
, &dc
->active_list
);
563 if (!list_empty(&dc
->active_list
))
564 txx9dmac_dostart(dc
, txx9dmac_first_active(dc
));
566 dev_crit(chan2dev(&dc
->chan
),
567 "Bad descriptor submitted for DMA! (cookie: %d)\n",
568 bad_desc
->txd
.cookie
);
569 txx9dmac_dump_desc(dc
, &bad_desc
->hwdesc
);
570 list_for_each_entry(child
, &bad_desc
->txd
.tx_list
, desc_node
)
571 txx9dmac_dump_desc(dc
, &child
->hwdesc
);
572 /* Pretend the descriptor completed successfully */
573 txx9dmac_descriptor_complete(dc
, bad_desc
);
576 static void txx9dmac_scan_descriptors(struct txx9dmac_chan
*dc
)
579 struct txx9dmac_desc
*desc
, *_desc
;
580 struct txx9dmac_desc
*child
;
584 chain
= channel64_read_CHAR(dc
);
585 csr
= channel64_readl(dc
, CSR
);
586 channel64_writel(dc
, CSR
, csr
);
588 chain
= channel32_readl(dc
, CHAR
);
589 csr
= channel32_readl(dc
, CSR
);
590 channel32_writel(dc
, CSR
, csr
);
592 /* For dynamic chain, we should look at XFACT instead of NCHNC */
593 if (!(csr
& (TXX9_DMA_CSR_XFACT
| TXX9_DMA_CSR_ABCHC
))) {
594 /* Everything we've submitted is done */
595 txx9dmac_complete_all(dc
);
598 if (!(csr
& TXX9_DMA_CSR_CHNEN
))
599 chain
= 0; /* last descriptor of this chain */
601 dev_vdbg(chan2dev(&dc
->chan
), "scan_descriptors: char=%#llx\n",
604 list_for_each_entry_safe(desc
, _desc
, &dc
->active_list
, desc_node
) {
605 if (desc_read_CHAR(dc
, desc
) == chain
) {
606 /* This one is currently in progress */
607 if (csr
& TXX9_DMA_CSR_ABCHC
)
612 list_for_each_entry(child
, &desc
->txd
.tx_list
, desc_node
)
613 if (desc_read_CHAR(dc
, child
) == chain
) {
614 /* Currently in progress */
615 if (csr
& TXX9_DMA_CSR_ABCHC
)
621 * No descriptors so far seem to be in progress, i.e.
622 * this one must be done.
624 txx9dmac_descriptor_complete(dc
, desc
);
627 if (csr
& TXX9_DMA_CSR_ABCHC
) {
628 txx9dmac_handle_error(dc
, csr
);
632 dev_err(chan2dev(&dc
->chan
),
633 "BUG: All descriptors done, but channel not idle!\n");
635 /* Try to continue after resetting the channel... */
636 txx9dmac_reset_chan(dc
);
638 if (!list_empty(&dc
->queue
)) {
639 txx9dmac_dequeue(dc
, &dc
->active_list
);
640 txx9dmac_dostart(dc
, txx9dmac_first_active(dc
));
644 static void txx9dmac_chan_tasklet(unsigned long data
)
648 struct txx9dmac_chan
*dc
;
650 dc
= (struct txx9dmac_chan
*)data
;
651 csr
= channel_readl(dc
, CSR
);
652 dev_vdbg(chan2dev(&dc
->chan
), "tasklet: status=%x\n", csr
);
654 spin_lock(&dc
->lock
);
655 if (csr
& (TXX9_DMA_CSR_ABCHC
| TXX9_DMA_CSR_NCHNC
|
656 TXX9_DMA_CSR_NTRNFC
))
657 txx9dmac_scan_descriptors(dc
);
658 spin_unlock(&dc
->lock
);
664 static irqreturn_t
txx9dmac_chan_interrupt(int irq
, void *dev_id
)
666 struct txx9dmac_chan
*dc
= dev_id
;
668 dev_vdbg(chan2dev(&dc
->chan
), "interrupt: status=%#x\n",
669 channel_readl(dc
, CSR
));
671 tasklet_schedule(&dc
->tasklet
);
673 * Just disable the interrupts. We'll turn them back on in the
676 disable_irq_nosync(irq
);
681 static void txx9dmac_tasklet(unsigned long data
)
685 struct txx9dmac_chan
*dc
;
687 struct txx9dmac_dev
*ddev
= (struct txx9dmac_dev
*)data
;
691 mcr
= dma_readl(ddev
, MCR
);
692 dev_vdbg(ddev
->chan
[0]->dma
.dev
, "tasklet: mcr=%x\n", mcr
);
693 for (i
= 0; i
< TXX9_DMA_MAX_NR_CHANNELS
; i
++) {
694 if ((mcr
>> (24 + i
)) & 0x11) {
696 csr
= channel_readl(dc
, CSR
);
697 dev_vdbg(chan2dev(&dc
->chan
), "tasklet: status=%x\n",
699 spin_lock(&dc
->lock
);
700 if (csr
& (TXX9_DMA_CSR_ABCHC
| TXX9_DMA_CSR_NCHNC
|
701 TXX9_DMA_CSR_NTRNFC
))
702 txx9dmac_scan_descriptors(dc
);
703 spin_unlock(&dc
->lock
);
711 static irqreturn_t
txx9dmac_interrupt(int irq
, void *dev_id
)
713 struct txx9dmac_dev
*ddev
= dev_id
;
715 dev_vdbg(ddev
->chan
[0]->dma
.dev
, "interrupt: status=%#x\n",
716 dma_readl(ddev
, MCR
));
718 tasklet_schedule(&ddev
->tasklet
);
720 * Just disable the interrupts. We'll turn them back on in the
723 disable_irq_nosync(irq
);
728 /*----------------------------------------------------------------------*/
730 static dma_cookie_t
txx9dmac_tx_submit(struct dma_async_tx_descriptor
*tx
)
732 struct txx9dmac_desc
*desc
= txd_to_txx9dmac_desc(tx
);
733 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(tx
->chan
);
736 spin_lock_bh(&dc
->lock
);
737 cookie
= txx9dmac_assign_cookie(dc
, desc
);
739 dev_vdbg(chan2dev(tx
->chan
), "tx_submit: queued %u %p\n",
740 desc
->txd
.cookie
, desc
);
742 list_add_tail(&desc
->desc_node
, &dc
->queue
);
743 spin_unlock_bh(&dc
->lock
);
748 static struct dma_async_tx_descriptor
*
749 txx9dmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
750 size_t len
, unsigned long flags
)
752 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
753 struct txx9dmac_dev
*ddev
= dc
->ddev
;
754 struct txx9dmac_desc
*desc
;
755 struct txx9dmac_desc
*first
;
756 struct txx9dmac_desc
*prev
;
760 dev_vdbg(chan2dev(chan
), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
761 (u64
)dest
, (u64
)src
, len
, flags
);
763 if (unlikely(!len
)) {
764 dev_dbg(chan2dev(chan
), "prep_dma_memcpy: length is zero!\n");
770 for (offset
= 0; offset
< len
; offset
+= xfer_count
) {
771 xfer_count
= min_t(size_t, len
- offset
, TXX9_DMA_MAX_COUNT
);
773 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
774 * ERT-TX49H4-016 (slightly conservative)
776 if (__is_dmac64(ddev
)) {
777 if (xfer_count
> 0x100 &&
778 (xfer_count
& 0xff) >= 0xfa &&
779 (xfer_count
& 0xff) <= 0xff)
782 if (xfer_count
> 0x80 &&
783 (xfer_count
& 0x7f) >= 0x7e &&
784 (xfer_count
& 0x7f) <= 0x7f)
788 desc
= txx9dmac_desc_get(dc
);
790 txx9dmac_desc_put(dc
, first
);
794 if (__is_dmac64(ddev
)) {
795 desc
->hwdesc
.SAR
= src
+ offset
;
796 desc
->hwdesc
.DAR
= dest
+ offset
;
797 desc
->hwdesc
.CNTR
= xfer_count
;
798 txx9dmac_desc_set_nosimple(ddev
, desc
, 8, 8,
799 dc
->ccr
| TXX9_DMA_CCR_XFACT
);
801 desc
->hwdesc32
.SAR
= src
+ offset
;
802 desc
->hwdesc32
.DAR
= dest
+ offset
;
803 desc
->hwdesc32
.CNTR
= xfer_count
;
804 txx9dmac_desc_set_nosimple(ddev
, desc
, 4, 4,
805 dc
->ccr
| TXX9_DMA_CCR_XFACT
);
809 * The descriptors on tx_list are not reachable from
810 * the dc->queue list or dc->active_list after a
811 * submit. If we put all descriptors on active_list,
812 * calling of callback on the completion will be more
818 desc_write_CHAR(dc
, prev
, desc
->txd
.phys
);
819 dma_sync_single_for_device(chan2parent(&dc
->chan
),
820 prev
->txd
.phys
, ddev
->descsize
,
822 list_add_tail(&desc
->desc_node
,
823 &first
->txd
.tx_list
);
828 /* Trigger interrupt after last block */
829 if (flags
& DMA_PREP_INTERRUPT
)
830 txx9dmac_desc_set_INTENT(ddev
, prev
);
832 desc_write_CHAR(dc
, prev
, 0);
833 dma_sync_single_for_device(chan2parent(&dc
->chan
),
834 prev
->txd
.phys
, ddev
->descsize
,
837 first
->txd
.flags
= flags
;
843 static struct dma_async_tx_descriptor
*
844 txx9dmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
845 unsigned int sg_len
, enum dma_data_direction direction
,
848 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
849 struct txx9dmac_dev
*ddev
= dc
->ddev
;
850 struct txx9dmac_slave
*ds
= chan
->private;
851 struct txx9dmac_desc
*prev
;
852 struct txx9dmac_desc
*first
;
854 struct scatterlist
*sg
;
856 dev_vdbg(chan2dev(chan
), "prep_dma_slave\n");
858 BUG_ON(!ds
|| !ds
->reg_width
);
860 BUG_ON(direction
!= DMA_TO_DEVICE
);
862 BUG_ON(direction
!= DMA_FROM_DEVICE
);
863 if (unlikely(!sg_len
))
868 for_each_sg(sgl
, sg
, sg_len
, i
) {
869 struct txx9dmac_desc
*desc
;
873 desc
= txx9dmac_desc_get(dc
);
875 txx9dmac_desc_put(dc
, first
);
879 mem
= sg_dma_address(sg
);
881 if (__is_dmac64(ddev
)) {
882 if (direction
== DMA_TO_DEVICE
) {
883 desc
->hwdesc
.SAR
= mem
;
884 desc
->hwdesc
.DAR
= ds
->tx_reg
;
886 desc
->hwdesc
.SAR
= ds
->rx_reg
;
887 desc
->hwdesc
.DAR
= mem
;
889 desc
->hwdesc
.CNTR
= sg_dma_len(sg
);
891 if (direction
== DMA_TO_DEVICE
) {
892 desc
->hwdesc32
.SAR
= mem
;
893 desc
->hwdesc32
.DAR
= ds
->tx_reg
;
895 desc
->hwdesc32
.SAR
= ds
->rx_reg
;
896 desc
->hwdesc32
.DAR
= mem
;
898 desc
->hwdesc32
.CNTR
= sg_dma_len(sg
);
900 if (direction
== DMA_TO_DEVICE
) {
907 txx9dmac_desc_set_nosimple(ddev
, desc
, sai
, dai
,
908 dc
->ccr
| TXX9_DMA_CCR_XFACT
);
913 desc_write_CHAR(dc
, prev
, desc
->txd
.phys
);
914 dma_sync_single_for_device(chan2parent(&dc
->chan
),
918 list_add_tail(&desc
->desc_node
,
919 &first
->txd
.tx_list
);
924 /* Trigger interrupt after last block */
925 if (flags
& DMA_PREP_INTERRUPT
)
926 txx9dmac_desc_set_INTENT(ddev
, prev
);
928 desc_write_CHAR(dc
, prev
, 0);
929 dma_sync_single_for_device(chan2parent(&dc
->chan
),
930 prev
->txd
.phys
, ddev
->descsize
,
933 first
->txd
.flags
= flags
;
939 static void txx9dmac_terminate_all(struct dma_chan
*chan
)
941 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
942 struct txx9dmac_desc
*desc
, *_desc
;
945 dev_vdbg(chan2dev(chan
), "terminate_all\n");
946 spin_lock_bh(&dc
->lock
);
948 txx9dmac_reset_chan(dc
);
950 /* active_list entries will end up before queued entries */
951 list_splice_init(&dc
->queue
, &list
);
952 list_splice_init(&dc
->active_list
, &list
);
954 spin_unlock_bh(&dc
->lock
);
956 /* Flush all pending and queued descriptors */
957 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
)
958 txx9dmac_descriptor_complete(dc
, desc
);
961 static enum dma_status
962 txx9dmac_is_tx_complete(struct dma_chan
*chan
,
964 dma_cookie_t
*done
, dma_cookie_t
*used
)
966 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
967 dma_cookie_t last_used
;
968 dma_cookie_t last_complete
;
971 last_complete
= dc
->completed
;
972 last_used
= chan
->cookie
;
974 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
975 if (ret
!= DMA_SUCCESS
) {
976 spin_lock_bh(&dc
->lock
);
977 txx9dmac_scan_descriptors(dc
);
978 spin_unlock_bh(&dc
->lock
);
980 last_complete
= dc
->completed
;
981 last_used
= chan
->cookie
;
983 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
987 *done
= last_complete
;
994 static void txx9dmac_chain_dynamic(struct txx9dmac_chan
*dc
,
995 struct txx9dmac_desc
*prev
)
997 struct txx9dmac_dev
*ddev
= dc
->ddev
;
998 struct txx9dmac_desc
*desc
;
1001 prev
= txx9dmac_last_child(prev
);
1002 txx9dmac_dequeue(dc
, &list
);
1003 desc
= list_entry(list
.next
, struct txx9dmac_desc
, desc_node
);
1004 desc_write_CHAR(dc
, prev
, desc
->txd
.phys
);
1005 dma_sync_single_for_device(chan2parent(&dc
->chan
),
1006 prev
->txd
.phys
, ddev
->descsize
,
1009 if (!(channel_readl(dc
, CSR
) & TXX9_DMA_CSR_CHNEN
) &&
1010 channel_read_CHAR(dc
) == prev
->txd
.phys
)
1011 /* Restart chain DMA */
1012 channel_write_CHAR(dc
, desc
->txd
.phys
);
1013 list_splice_tail(&list
, &dc
->active_list
);
1016 static void txx9dmac_issue_pending(struct dma_chan
*chan
)
1018 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
1020 spin_lock_bh(&dc
->lock
);
1022 if (!list_empty(&dc
->active_list
))
1023 txx9dmac_scan_descriptors(dc
);
1024 if (!list_empty(&dc
->queue
)) {
1025 if (list_empty(&dc
->active_list
)) {
1026 txx9dmac_dequeue(dc
, &dc
->active_list
);
1027 txx9dmac_dostart(dc
, txx9dmac_first_active(dc
));
1028 } else if (txx9_dma_have_SMPCHN()) {
1029 struct txx9dmac_desc
*prev
= txx9dmac_last_active(dc
);
1031 if (!(prev
->txd
.flags
& DMA_PREP_INTERRUPT
) ||
1032 txx9dmac_chan_INTENT(dc
))
1033 txx9dmac_chain_dynamic(dc
, prev
);
1037 spin_unlock_bh(&dc
->lock
);
1040 static int txx9dmac_alloc_chan_resources(struct dma_chan
*chan
)
1042 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
1043 struct txx9dmac_slave
*ds
= chan
->private;
1044 struct txx9dmac_desc
*desc
;
1047 dev_vdbg(chan2dev(chan
), "alloc_chan_resources\n");
1049 /* ASSERT: channel is idle */
1050 if (channel_readl(dc
, CSR
) & TXX9_DMA_CSR_XFACT
) {
1051 dev_dbg(chan2dev(chan
), "DMA channel not idle?\n");
1055 dc
->completed
= chan
->cookie
= 1;
1057 dc
->ccr
= TXX9_DMA_CCR_IMMCHN
| TXX9_DMA_CCR_INTENE
| CCR_LE
;
1058 txx9dmac_chan_set_SMPCHN(dc
);
1059 if (!txx9_dma_have_SMPCHN() || (dc
->ccr
& TXX9_DMA_CCR_SMPCHN
))
1060 dc
->ccr
|= TXX9_DMA_CCR_INTENC
;
1061 if (chan
->device
->device_prep_dma_memcpy
) {
1064 dc
->ccr
|= TXX9_DMA_CCR_XFSZ_X8
;
1067 (ds
->tx_reg
&& ds
->rx_reg
) || (!ds
->tx_reg
&& !ds
->rx_reg
))
1069 dc
->ccr
|= TXX9_DMA_CCR_EXTRQ
|
1070 TXX9_DMA_CCR_XFSZ(__ffs(ds
->reg_width
));
1071 txx9dmac_chan_set_INTENT(dc
);
1074 spin_lock_bh(&dc
->lock
);
1075 i
= dc
->descs_allocated
;
1076 while (dc
->descs_allocated
< TXX9_DMA_INITIAL_DESC_COUNT
) {
1077 spin_unlock_bh(&dc
->lock
);
1079 desc
= txx9dmac_desc_alloc(dc
, GFP_KERNEL
);
1081 dev_info(chan2dev(chan
),
1082 "only allocated %d descriptors\n", i
);
1083 spin_lock_bh(&dc
->lock
);
1086 txx9dmac_desc_put(dc
, desc
);
1088 spin_lock_bh(&dc
->lock
);
1089 i
= ++dc
->descs_allocated
;
1091 spin_unlock_bh(&dc
->lock
);
1093 dev_dbg(chan2dev(chan
),
1094 "alloc_chan_resources allocated %d descriptors\n", i
);
1099 static void txx9dmac_free_chan_resources(struct dma_chan
*chan
)
1101 struct txx9dmac_chan
*dc
= to_txx9dmac_chan(chan
);
1102 struct txx9dmac_dev
*ddev
= dc
->ddev
;
1103 struct txx9dmac_desc
*desc
, *_desc
;
1106 dev_dbg(chan2dev(chan
), "free_chan_resources (descs allocated=%u)\n",
1107 dc
->descs_allocated
);
1109 /* ASSERT: channel is idle */
1110 BUG_ON(!list_empty(&dc
->active_list
));
1111 BUG_ON(!list_empty(&dc
->queue
));
1112 BUG_ON(channel_readl(dc
, CSR
) & TXX9_DMA_CSR_XFACT
);
1114 spin_lock_bh(&dc
->lock
);
1115 list_splice_init(&dc
->free_list
, &list
);
1116 dc
->descs_allocated
= 0;
1117 spin_unlock_bh(&dc
->lock
);
1119 list_for_each_entry_safe(desc
, _desc
, &list
, desc_node
) {
1120 dev_vdbg(chan2dev(chan
), " freeing descriptor %p\n", desc
);
1121 dma_unmap_single(chan2parent(chan
), desc
->txd
.phys
,
1122 ddev
->descsize
, DMA_TO_DEVICE
);
1126 dev_vdbg(chan2dev(chan
), "free_chan_resources done\n");
1129 /*----------------------------------------------------------------------*/
1131 static void txx9dmac_off(struct txx9dmac_dev
*ddev
)
1133 dma_writel(ddev
, MCR
, 0);
1137 static int __init
txx9dmac_chan_probe(struct platform_device
*pdev
)
1139 struct txx9dmac_chan_platform_data
*cpdata
= pdev
->dev
.platform_data
;
1140 struct platform_device
*dmac_dev
= cpdata
->dmac_dev
;
1141 struct txx9dmac_platform_data
*pdata
= dmac_dev
->dev
.platform_data
;
1142 struct txx9dmac_chan
*dc
;
1144 int ch
= pdev
->id
% TXX9_DMA_MAX_NR_CHANNELS
;
1147 dc
= devm_kzalloc(&pdev
->dev
, sizeof(*dc
), GFP_KERNEL
);
1151 dc
->dma
.dev
= &pdev
->dev
;
1152 dc
->dma
.device_alloc_chan_resources
= txx9dmac_alloc_chan_resources
;
1153 dc
->dma
.device_free_chan_resources
= txx9dmac_free_chan_resources
;
1154 dc
->dma
.device_terminate_all
= txx9dmac_terminate_all
;
1155 dc
->dma
.device_is_tx_complete
= txx9dmac_is_tx_complete
;
1156 dc
->dma
.device_issue_pending
= txx9dmac_issue_pending
;
1157 if (pdata
&& pdata
->memcpy_chan
== ch
) {
1158 dc
->dma
.device_prep_dma_memcpy
= txx9dmac_prep_dma_memcpy
;
1159 dma_cap_set(DMA_MEMCPY
, dc
->dma
.cap_mask
);
1161 dc
->dma
.device_prep_slave_sg
= txx9dmac_prep_slave_sg
;
1162 dma_cap_set(DMA_SLAVE
, dc
->dma
.cap_mask
);
1163 dma_cap_set(DMA_PRIVATE
, dc
->dma
.cap_mask
);
1166 INIT_LIST_HEAD(&dc
->dma
.channels
);
1167 dc
->ddev
= platform_get_drvdata(dmac_dev
);
1168 if (dc
->ddev
->irq
< 0) {
1169 irq
= platform_get_irq(pdev
, 0);
1172 tasklet_init(&dc
->tasklet
, txx9dmac_chan_tasklet
,
1175 err
= devm_request_irq(&pdev
->dev
, dc
->irq
,
1176 txx9dmac_chan_interrupt
, 0, dev_name(&pdev
->dev
), dc
);
1181 dc
->ddev
->chan
[ch
] = dc
;
1182 dc
->chan
.device
= &dc
->dma
;
1183 list_add_tail(&dc
->chan
.device_node
, &dc
->chan
.device
->channels
);
1184 dc
->chan
.cookie
= dc
->completed
= 1;
1187 dc
->ch_regs
= &__txx9dmac_regs(dc
->ddev
)->CHAN
[ch
];
1189 dc
->ch_regs
= &__txx9dmac_regs32(dc
->ddev
)->CHAN
[ch
];
1190 spin_lock_init(&dc
->lock
);
1192 INIT_LIST_HEAD(&dc
->active_list
);
1193 INIT_LIST_HEAD(&dc
->queue
);
1194 INIT_LIST_HEAD(&dc
->free_list
);
1196 txx9dmac_reset_chan(dc
);
1198 platform_set_drvdata(pdev
, dc
);
1200 err
= dma_async_device_register(&dc
->dma
);
1203 dev_dbg(&pdev
->dev
, "TXx9 DMA Channel (dma%d%s%s)\n",
1205 dma_has_cap(DMA_MEMCPY
, dc
->dma
.cap_mask
) ? " memcpy" : "",
1206 dma_has_cap(DMA_SLAVE
, dc
->dma
.cap_mask
) ? " slave" : "");
1211 static int __exit
txx9dmac_chan_remove(struct platform_device
*pdev
)
1213 struct txx9dmac_chan
*dc
= platform_get_drvdata(pdev
);
1215 dma_async_device_unregister(&dc
->dma
);
1217 tasklet_kill(&dc
->tasklet
);
1218 dc
->ddev
->chan
[pdev
->id
% TXX9_DMA_MAX_NR_CHANNELS
] = NULL
;
1222 static int __init
txx9dmac_probe(struct platform_device
*pdev
)
1224 struct txx9dmac_platform_data
*pdata
= pdev
->dev
.platform_data
;
1225 struct resource
*io
;
1226 struct txx9dmac_dev
*ddev
;
1230 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1234 ddev
= devm_kzalloc(&pdev
->dev
, sizeof(*ddev
), GFP_KERNEL
);
1238 if (!devm_request_mem_region(&pdev
->dev
, io
->start
, resource_size(io
),
1239 dev_name(&pdev
->dev
)))
1242 ddev
->regs
= devm_ioremap(&pdev
->dev
, io
->start
, resource_size(io
));
1245 ddev
->have_64bit_regs
= pdata
->have_64bit_regs
;
1246 if (__is_dmac64(ddev
))
1247 ddev
->descsize
= sizeof(struct txx9dmac_hwdesc
);
1249 ddev
->descsize
= sizeof(struct txx9dmac_hwdesc32
);
1251 /* force dma off, just in case */
1254 ddev
->irq
= platform_get_irq(pdev
, 0);
1255 if (ddev
->irq
>= 0) {
1256 tasklet_init(&ddev
->tasklet
, txx9dmac_tasklet
,
1257 (unsigned long)ddev
);
1258 err
= devm_request_irq(&pdev
->dev
, ddev
->irq
,
1259 txx9dmac_interrupt
, 0, dev_name(&pdev
->dev
), ddev
);
1264 mcr
= TXX9_DMA_MCR_MSTEN
| MCR_LE
;
1265 if (pdata
&& pdata
->memcpy_chan
>= 0)
1266 mcr
|= TXX9_DMA_MCR_FIFUM(pdata
->memcpy_chan
);
1267 dma_writel(ddev
, MCR
, mcr
);
1269 platform_set_drvdata(pdev
, ddev
);
1273 static int __exit
txx9dmac_remove(struct platform_device
*pdev
)
1275 struct txx9dmac_dev
*ddev
= platform_get_drvdata(pdev
);
1279 tasklet_kill(&ddev
->tasklet
);
1283 static void txx9dmac_shutdown(struct platform_device
*pdev
)
1285 struct txx9dmac_dev
*ddev
= platform_get_drvdata(pdev
);
1290 static int txx9dmac_suspend_late(struct platform_device
*pdev
,
1293 struct txx9dmac_dev
*ddev
= platform_get_drvdata(pdev
);
1299 static int txx9dmac_resume_early(struct platform_device
*pdev
)
1301 struct txx9dmac_dev
*ddev
= platform_get_drvdata(pdev
);
1302 struct txx9dmac_platform_data
*pdata
= pdev
->dev
.platform_data
;
1305 mcr
= TXX9_DMA_MCR_MSTEN
| MCR_LE
;
1306 if (pdata
&& pdata
->memcpy_chan
>= 0)
1307 mcr
|= TXX9_DMA_MCR_FIFUM(pdata
->memcpy_chan
);
1308 dma_writel(ddev
, MCR
, mcr
);
1313 static struct platform_driver txx9dmac_chan_driver
= {
1314 .remove
= __exit_p(txx9dmac_chan_remove
),
1316 .name
= "txx9dmac-chan",
1320 static struct platform_driver txx9dmac_driver
= {
1321 .remove
= __exit_p(txx9dmac_remove
),
1322 .shutdown
= txx9dmac_shutdown
,
1323 .suspend_late
= txx9dmac_suspend_late
,
1324 .resume_early
= txx9dmac_resume_early
,
1330 static int __init
txx9dmac_init(void)
1334 rc
= platform_driver_probe(&txx9dmac_driver
, txx9dmac_probe
);
1336 rc
= platform_driver_probe(&txx9dmac_chan_driver
,
1337 txx9dmac_chan_probe
);
1339 platform_driver_unregister(&txx9dmac_driver
);
1343 module_init(txx9dmac_init
);
1345 static void __exit
txx9dmac_exit(void)
1347 platform_driver_unregister(&txx9dmac_chan_driver
);
1348 platform_driver_unregister(&txx9dmac_driver
);
1350 module_exit(txx9dmac_exit
);
1352 MODULE_LICENSE("GPL");
1353 MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1354 MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");