2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * The full GNU General Public License is in this distribution in the
23 * file called COPYING.
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
37 * The PL080 has a dual bus master, PL081 has a single master.
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
45 * Raise terminal count interrupt
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
54 * ASSUMES default (little) endianness for DMA transfers
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
75 * - Break out common code from arch/arm/mach-s3c64xx and share
77 #include <linux/device.h>
78 #include <linux/init.h>
79 #include <linux/module.h>
80 #include <linux/interrupt.h>
81 #include <linux/slab.h>
82 #include <linux/dmapool.h>
83 #include <linux/dmaengine.h>
84 #include <linux/amba/bus.h>
85 #include <linux/amba/pl08x.h>
86 #include <linux/debugfs.h>
87 #include <linux/seq_file.h>
89 #include <asm/hardware/pl080.h>
91 #define DRIVER_NAME "pl08xdmac"
94 * struct vendor_data - vendor-specific config parameters
95 * for PL08x derivatives
96 * @channels: the number of channels available in this variant
97 * @dualmaster: whether this version supports dual AHB masters
106 * PL08X private data structures
107 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
108 * start & end do not - their bus bit info is in cctl. Also note that these
109 * are fixed 32-bit quantities.
119 * struct pl08x_driver_data - the local state holder for the PL08x
120 * @slave: slave engine for this instance
121 * @memcpy: memcpy engine for this instance
122 * @base: virtual memory base (remapped) for the PL08x
123 * @adev: the corresponding AMBA (PrimeCell) bus entry
124 * @vd: vendor data for this PL08x variant
125 * @pd: platform data passed in from the platform/machine
126 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool
129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
130 * @mem_buses: set to indicate memory transfers on AHB2.
131 * @lock: a spinlock for this struct
133 struct pl08x_driver_data
{
134 struct dma_device slave
;
135 struct dma_device memcpy
;
137 struct amba_device
*adev
;
138 const struct vendor_data
*vd
;
139 struct pl08x_platform_data
*pd
;
140 struct pl08x_phy_chan
*phy_chans
;
141 struct dma_pool
*pool
;
149 * PL08X specific defines
153 * Memory boundaries: the manual for PL08x says that the controller
154 * cannot read past a 1KiB boundary, so these defines are used to
155 * create transfer LLIs that do not cross such boundaries.
157 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
158 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
160 /* Minimum period between work queue runs */
161 #define PL08X_WQ_PERIODMIN 20
163 /* Size (bytes) of each LLI buffer allocated for one transfer */
164 # define PL08X_LLI_TSFR_SIZE 0x2000
166 /* Maximum times we call dma_pool_alloc on this pool without freeing */
167 #define PL08X_MAX_ALLOCS 0x40
168 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
169 #define PL08X_ALIGN 8
171 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
173 return container_of(chan
, struct pl08x_dma_chan
, chan
);
177 * Physical channel handling
180 /* Whether a certain channel is busy or not */
181 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
185 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
186 return val
& PL080_CONFIG_ACTIVE
;
190 * Set the initial DMA register values i.e. those for the first LLI
191 * The next LLI pointer and the configuration interrupt bit have
192 * been set when the LLIs were constructed. Poke them into the hardware
193 * and start the transfer.
195 static void pl08x_start_txd(struct pl08x_dma_chan
*plchan
,
196 struct pl08x_txd
*txd
)
198 struct pl08x_driver_data
*pl08x
= plchan
->host
;
199 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
200 struct pl08x_lli
*lli
= &txd
->llis_va
[0];
205 /* Wait for channel inactive */
206 while (pl08x_phy_channel_busy(phychan
))
209 dev_vdbg(&pl08x
->adev
->dev
,
210 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
211 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
212 phychan
->id
, lli
->src
, lli
->dst
, lli
->lli
, lli
->cctl
,
215 writel(lli
->src
, phychan
->base
+ PL080_CH_SRC_ADDR
);
216 writel(lli
->dst
, phychan
->base
+ PL080_CH_DST_ADDR
);
217 writel(lli
->lli
, phychan
->base
+ PL080_CH_LLI
);
218 writel(lli
->cctl
, phychan
->base
+ PL080_CH_CONTROL
);
219 writel(txd
->ccfg
, phychan
->base
+ PL080_CH_CONFIG
);
221 /* Enable the DMA channel */
222 /* Do not access config register until channel shows as disabled */
223 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & (1 << phychan
->id
))
226 /* Do not access config register until channel shows as inactive */
227 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
228 while ((val
& PL080_CONFIG_ACTIVE
) || (val
& PL080_CONFIG_ENABLE
))
229 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
231 writel(val
| PL080_CONFIG_ENABLE
, phychan
->base
+ PL080_CH_CONFIG
);
235 * Overall DMAC remains enabled always.
237 * Disabling individual channels could lose data.
239 * Disable the peripheral DMA after disabling the DMAC
240 * in order to allow the DMAC FIFO to drain, and
241 * hence allow the channel to show inactive
244 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
248 /* Set the HALT bit and wait for the FIFO to drain */
249 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
250 val
|= PL080_CONFIG_HALT
;
251 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
253 /* Wait for channel inactive */
254 while (pl08x_phy_channel_busy(ch
))
258 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
262 /* Clear the HALT bit */
263 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
264 val
&= ~PL080_CONFIG_HALT
;
265 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
269 /* Stops the channel */
270 static void pl08x_stop_phy_chan(struct pl08x_phy_chan
*ch
)
274 pl08x_pause_phy_chan(ch
);
276 /* Disable channel */
277 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
278 val
&= ~PL080_CONFIG_ENABLE
;
279 val
&= ~PL080_CONFIG_ERR_IRQ_MASK
;
280 val
&= ~PL080_CONFIG_TC_IRQ_MASK
;
281 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
284 static inline u32
get_bytes_in_cctl(u32 cctl
)
286 /* The source width defines the number of bytes */
287 u32 bytes
= cctl
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
289 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
290 case PL080_WIDTH_8BIT
:
292 case PL080_WIDTH_16BIT
:
295 case PL080_WIDTH_32BIT
:
302 /* The channel should be paused when calling this */
303 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
305 struct pl08x_phy_chan
*ch
;
306 struct pl08x_txd
*txd
;
310 spin_lock_irqsave(&plchan
->lock
, flags
);
311 ch
= plchan
->phychan
;
315 * Follow the LLIs to get the number of remaining
316 * bytes in the currently active transaction.
319 u32 clli
= readl(ch
->base
+ PL080_CH_LLI
) & ~PL080_LLI_LM_AHB2
;
321 /* First get the remaining bytes in the active transfer */
322 bytes
= get_bytes_in_cctl(readl(ch
->base
+ PL080_CH_CONTROL
));
325 struct pl08x_lli
*llis_va
= txd
->llis_va
;
326 dma_addr_t llis_bus
= txd
->llis_bus
;
329 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
330 sizeof(struct pl08x_lli
) * MAX_NUM_TSFR_LLIS
);
333 * Locate the next LLI - as this is an array,
334 * it's simple maths to find.
336 index
= (clli
- llis_bus
) / sizeof(struct pl08x_lli
);
338 for (; index
< MAX_NUM_TSFR_LLIS
; index
++) {
339 bytes
+= get_bytes_in_cctl(llis_va
[index
].cctl
);
342 * A LLI pointer of 0 terminates the LLI list
344 if (!llis_va
[index
].lli
)
350 /* Sum up all queued transactions */
351 if (!list_empty(&plchan
->desc_list
)) {
352 struct pl08x_txd
*txdi
;
353 list_for_each_entry(txdi
, &plchan
->desc_list
, node
) {
358 spin_unlock_irqrestore(&plchan
->lock
, flags
);
364 * Allocate a physical channel for a virtual channel
366 static struct pl08x_phy_chan
*
367 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
368 struct pl08x_dma_chan
*virt_chan
)
370 struct pl08x_phy_chan
*ch
= NULL
;
375 * Try to locate a physical channel to be used for
376 * this transfer. If all are taken return NULL and
377 * the requester will have to cope by using some fallback
378 * PIO mode or retrying later.
380 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
381 ch
= &pl08x
->phy_chans
[i
];
383 spin_lock_irqsave(&ch
->lock
, flags
);
386 ch
->serving
= virt_chan
;
388 spin_unlock_irqrestore(&ch
->lock
, flags
);
392 spin_unlock_irqrestore(&ch
->lock
, flags
);
395 if (i
== pl08x
->vd
->channels
) {
396 /* No physical channel available, cope with it */
403 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
404 struct pl08x_phy_chan
*ch
)
408 /* Stop the channel and clear its interrupts */
409 pl08x_stop_phy_chan(ch
);
410 writel((1 << ch
->id
), pl08x
->base
+ PL080_ERR_CLEAR
);
411 writel((1 << ch
->id
), pl08x
->base
+ PL080_TC_CLEAR
);
413 /* Mark it as free */
414 spin_lock_irqsave(&ch
->lock
, flags
);
416 spin_unlock_irqrestore(&ch
->lock
, flags
);
423 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded
)
426 case PL080_WIDTH_8BIT
:
428 case PL080_WIDTH_16BIT
:
430 case PL080_WIDTH_32BIT
:
439 static inline u32
pl08x_cctl_bits(u32 cctl
, u8 srcwidth
, u8 dstwidth
,
444 /* Remove all src, dst and transfer size bits */
445 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
446 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
447 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
449 /* Then set the bits according to the parameters */
452 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
455 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
458 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
467 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
470 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
473 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
480 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
484 struct pl08x_lli_build_data
{
485 struct pl08x_txd
*txd
;
486 struct pl08x_driver_data
*pl08x
;
487 struct pl08x_bus_data srcbus
;
488 struct pl08x_bus_data dstbus
;
493 * Autoselect a master bus to use for the transfer
494 * this prefers the destination bus if both available
495 * if fixed address on one bus the other will be chosen
497 static void pl08x_choose_master_bus(struct pl08x_lli_build_data
*bd
,
498 struct pl08x_bus_data
**mbus
, struct pl08x_bus_data
**sbus
, u32 cctl
)
500 if (!(cctl
& PL080_CONTROL_DST_INCR
)) {
503 } else if (!(cctl
& PL080_CONTROL_SRC_INCR
)) {
507 if (bd
->dstbus
.buswidth
== 4) {
510 } else if (bd
->srcbus
.buswidth
== 4) {
513 } else if (bd
->dstbus
.buswidth
== 2) {
516 } else if (bd
->srcbus
.buswidth
== 2) {
520 /* bd->srcbus.buswidth == 1 */
528 * Fills in one LLI for a certain transfer descriptor
529 * and advance the counter
531 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data
*bd
,
532 int num_llis
, int len
, u32 cctl
)
534 struct pl08x_lli
*llis_va
= bd
->txd
->llis_va
;
535 dma_addr_t llis_bus
= bd
->txd
->llis_bus
;
537 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
539 llis_va
[num_llis
].cctl
= cctl
;
540 llis_va
[num_llis
].src
= bd
->srcbus
.addr
;
541 llis_va
[num_llis
].dst
= bd
->dstbus
.addr
;
542 llis_va
[num_llis
].lli
= llis_bus
+ (num_llis
+ 1) * sizeof(struct pl08x_lli
);
543 if (bd
->pl08x
->lli_buses
& PL08X_AHB2
)
544 llis_va
[num_llis
].lli
|= PL080_LLI_LM_AHB2
;
546 if (cctl
& PL080_CONTROL_SRC_INCR
)
547 bd
->srcbus
.addr
+= len
;
548 if (cctl
& PL080_CONTROL_DST_INCR
)
549 bd
->dstbus
.addr
+= len
;
551 BUG_ON(bd
->remainder
< len
);
553 bd
->remainder
-= len
;
557 * Return number of bytes to fill to boundary, or len.
558 * This calculation works for any value of addr.
560 static inline size_t pl08x_pre_boundary(u32 addr
, size_t len
)
562 size_t boundary_len
= PL08X_BOUNDARY_SIZE
-
563 (addr
& (PL08X_BOUNDARY_SIZE
- 1));
565 return min(boundary_len
, len
);
569 * This fills in the table of LLIs for the transfer descriptor
570 * Note that we assume we never have to change the burst sizes
573 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
574 struct pl08x_txd
*txd
)
576 struct pl08x_bus_data
*mbus
, *sbus
;
577 struct pl08x_lli_build_data bd
;
580 size_t max_bytes_per_lli
;
581 size_t total_bytes
= 0;
582 struct pl08x_lli
*llis_va
;
584 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
,
587 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
593 /* Get the default CCTL */
598 bd
.srcbus
.addr
= txd
->src_addr
;
599 bd
.dstbus
.addr
= txd
->dst_addr
;
601 /* Find maximum width of the source bus */
603 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
604 PL080_CONTROL_SWIDTH_SHIFT
);
606 /* Find maximum width of the destination bus */
608 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
609 PL080_CONTROL_DWIDTH_SHIFT
);
611 /* Set up the bus widths to the maximum */
612 bd
.srcbus
.buswidth
= bd
.srcbus
.maxwidth
;
613 bd
.dstbus
.buswidth
= bd
.dstbus
.maxwidth
;
614 dev_vdbg(&pl08x
->adev
->dev
,
615 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
616 __func__
, bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
);
620 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
622 max_bytes_per_lli
= min(bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
) *
623 PL080_CONTROL_TRANSFER_SIZE_MASK
;
624 dev_vdbg(&pl08x
->adev
->dev
,
625 "%s max bytes per lli = %zu\n",
626 __func__
, max_bytes_per_lli
);
628 /* We need to count this down to zero */
629 bd
.remainder
= txd
->len
;
630 dev_vdbg(&pl08x
->adev
->dev
,
631 "%s remainder = %zu\n",
632 __func__
, bd
.remainder
);
635 * Choose bus to align to
636 * - prefers destination bus if both available
637 * - if fixed address on one bus chooses other
638 * - modifies cctl to choose an appropriate master
640 pl08x_choose_master_bus(&bd
, &mbus
, &sbus
, cctl
);
642 if (txd
->len
< mbus
->buswidth
) {
644 * Less than a bus width available
645 * - send as single bytes
647 while (bd
.remainder
) {
648 dev_vdbg(&pl08x
->adev
->dev
,
649 "%s single byte LLIs for a transfer of "
650 "less than a bus width (remain 0x%08x)\n",
651 __func__
, bd
.remainder
);
652 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
653 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
658 * Make one byte LLIs until master bus is aligned
659 * - slave will then be aligned also
661 while ((mbus
->addr
) % (mbus
->buswidth
)) {
662 dev_vdbg(&pl08x
->adev
->dev
,
663 "%s adjustment lli for less than bus width "
665 __func__
, bd
.remainder
);
666 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
667 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
673 * - if slave is not then we must set its width down
675 if (sbus
->addr
% sbus
->buswidth
) {
676 dev_dbg(&pl08x
->adev
->dev
,
677 "%s set down bus width to one byte\n",
684 * Make largest possible LLIs until less than one bus
687 while (bd
.remainder
> (mbus
->buswidth
- 1)) {
688 size_t lli_len
, target_len
, tsize
, odd_bytes
;
691 * If enough left try to send max possible,
692 * otherwise try to send the remainder
694 target_len
= min(bd
.remainder
, max_bytes_per_lli
);
697 * Set bus lengths for incrementing buses to the
698 * number of bytes which fill to next memory boundary,
699 * limiting on the target length calculated above.
701 if (cctl
& PL080_CONTROL_SRC_INCR
)
702 bd
.srcbus
.fill_bytes
=
703 pl08x_pre_boundary(bd
.srcbus
.addr
,
706 bd
.srcbus
.fill_bytes
= target_len
;
708 if (cctl
& PL080_CONTROL_DST_INCR
)
709 bd
.dstbus
.fill_bytes
=
710 pl08x_pre_boundary(bd
.dstbus
.addr
,
713 bd
.dstbus
.fill_bytes
= target_len
;
715 /* Find the nearest */
716 lli_len
= min(bd
.srcbus
.fill_bytes
,
717 bd
.dstbus
.fill_bytes
);
719 BUG_ON(lli_len
> bd
.remainder
);
722 dev_err(&pl08x
->adev
->dev
,
723 "%s lli_len is %zu, <= 0\n",
728 if (lli_len
== target_len
) {
730 * Can send what we wanted
735 lli_len
= (lli_len
/mbus
->buswidth
) *
740 * So now we know how many bytes to transfer
741 * to get to the nearest boundary
742 * The next LLI will past the boundary
743 * - however we may be working to a boundary
745 * We need to ensure the master stays aligned
747 odd_bytes
= lli_len
% mbus
->buswidth
;
749 * - and that we are working in multiples
752 lli_len
-= odd_bytes
;
758 * Check against minimum bus alignment:
759 * Calculate actual transfer size in relation
760 * to bus width an get a maximum remainder of
761 * the smallest bus width - 1
763 /* FIXME: use round_down()? */
764 tsize
= lli_len
/ min(mbus
->buswidth
,
766 lli_len
= tsize
* min(mbus
->buswidth
,
769 if (target_len
!= lli_len
) {
770 dev_vdbg(&pl08x
->adev
->dev
,
771 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
772 __func__
, target_len
, lli_len
, txd
->len
);
775 cctl
= pl08x_cctl_bits(cctl
,
780 dev_vdbg(&pl08x
->adev
->dev
,
781 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
782 __func__
, lli_len
, bd
.remainder
);
783 pl08x_fill_lli_for_desc(&bd
, num_llis
++,
785 total_bytes
+= lli_len
;
791 * Creep past the boundary,
792 * maintaining master alignment
795 for (j
= 0; (j
< mbus
->buswidth
)
796 && (bd
.remainder
); j
++) {
797 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
798 dev_vdbg(&pl08x
->adev
->dev
,
799 "%s align with boundary, single byte (remain 0x%08zx)\n",
800 __func__
, bd
.remainder
);
801 pl08x_fill_lli_for_desc(&bd
,
802 num_llis
++, 1, cctl
);
811 while (bd
.remainder
) {
812 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
813 dev_vdbg(&pl08x
->adev
->dev
,
814 "%s align with boundary, single odd byte (remain %zu)\n",
815 __func__
, bd
.remainder
);
816 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
820 if (total_bytes
!= txd
->len
) {
821 dev_err(&pl08x
->adev
->dev
,
822 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
823 __func__
, total_bytes
, txd
->len
);
827 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
828 dev_err(&pl08x
->adev
->dev
,
829 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
830 __func__
, (u32
) MAX_NUM_TSFR_LLIS
);
834 llis_va
= txd
->llis_va
;
836 * The final LLI terminates the LLI.
838 llis_va
[num_llis
- 1].lli
= 0;
840 * The final LLI element shall also fire an interrupt
842 llis_va
[num_llis
- 1].cctl
|= PL080_CONTROL_TC_IRQ_EN
;
848 for (i
= 0; i
< num_llis
; i
++) {
849 dev_vdbg(&pl08x
->adev
->dev
,
850 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
865 /* You should call this with the struct pl08x lock held */
866 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
867 struct pl08x_txd
*txd
)
870 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
877 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
878 struct pl08x_dma_chan
*plchan
)
880 struct pl08x_txd
*txdi
= NULL
;
881 struct pl08x_txd
*next
;
883 if (!list_empty(&plchan
->desc_list
)) {
884 list_for_each_entry_safe(txdi
,
885 next
, &plchan
->desc_list
, node
) {
886 list_del(&txdi
->node
);
887 pl08x_free_txd(pl08x
, txdi
);
896 static int pl08x_alloc_chan_resources(struct dma_chan
*chan
)
901 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
906 * This should be called with the channel plchan->lock held
908 static int prep_phy_channel(struct pl08x_dma_chan
*plchan
,
909 struct pl08x_txd
*txd
)
911 struct pl08x_driver_data
*pl08x
= plchan
->host
;
912 struct pl08x_phy_chan
*ch
;
915 /* Check if we already have a channel */
919 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
921 /* No physical channel available, cope with it */
922 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
927 * OK we have a physical channel: for memcpy() this is all we
928 * need, but for slaves the physical signals may be muxed!
929 * Can the platform allow us to use this channel?
933 pl08x
->pd
->get_signal
) {
934 ret
= pl08x
->pd
->get_signal(plchan
);
936 dev_dbg(&pl08x
->adev
->dev
,
937 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
938 ch
->id
, plchan
->name
);
939 /* Release physical channel & return */
940 pl08x_put_phy_channel(pl08x
, ch
);
945 /* Assign the flow control signal to this channel */
946 if (txd
->direction
== DMA_TO_DEVICE
)
947 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
948 else if (txd
->direction
== DMA_FROM_DEVICE
)
949 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
952 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d and signal %d for xfer on %s\n",
957 plchan
->phychan
= ch
;
962 static void release_phy_channel(struct pl08x_dma_chan
*plchan
)
964 struct pl08x_driver_data
*pl08x
= plchan
->host
;
966 if ((plchan
->phychan
->signal
>= 0) && pl08x
->pd
->put_signal
) {
967 pl08x
->pd
->put_signal(plchan
);
968 plchan
->phychan
->signal
= -1;
970 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
971 plchan
->phychan
= NULL
;
974 static dma_cookie_t
pl08x_tx_submit(struct dma_async_tx_descriptor
*tx
)
976 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(tx
->chan
);
978 plchan
->chan
.cookie
+= 1;
979 if (plchan
->chan
.cookie
< 0)
980 plchan
->chan
.cookie
= 1;
981 tx
->cookie
= plchan
->chan
.cookie
;
982 /* This unlock follows the lock in the prep() function */
983 spin_unlock_irqrestore(&plchan
->lock
, plchan
->lockflags
);
988 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
989 struct dma_chan
*chan
, unsigned long flags
)
991 struct dma_async_tx_descriptor
*retval
= NULL
;
997 * Code accessing dma_async_is_complete() in a tight loop
998 * may give problems - could schedule where indicated.
999 * If slaves are relying on interrupts to signal completion this
1000 * function must not be called with interrupts disabled
1002 static enum dma_status
1003 pl08x_dma_tx_status(struct dma_chan
*chan
,
1004 dma_cookie_t cookie
,
1005 struct dma_tx_state
*txstate
)
1007 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1008 dma_cookie_t last_used
;
1009 dma_cookie_t last_complete
;
1010 enum dma_status ret
;
1013 last_used
= plchan
->chan
.cookie
;
1014 last_complete
= plchan
->lc
;
1016 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1017 if (ret
== DMA_SUCCESS
) {
1018 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
1023 * schedule(); could be inserted here
1027 * This cookie not complete yet
1029 last_used
= plchan
->chan
.cookie
;
1030 last_complete
= plchan
->lc
;
1032 /* Get number of bytes left in the active transactions and queue */
1033 bytesleft
= pl08x_getbytes_chan(plchan
);
1035 dma_set_tx_state(txstate
, last_complete
, last_used
,
1038 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1041 /* Whether waiting or running, we're in progress */
1042 return DMA_IN_PROGRESS
;
1045 /* PrimeCell DMA extension */
1046 struct burst_table
{
1051 static const struct burst_table burst_sizes
[] = {
1054 .reg
= (PL080_BSIZE_256
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1055 (PL080_BSIZE_256
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1059 .reg
= (PL080_BSIZE_128
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1060 (PL080_BSIZE_128
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1064 .reg
= (PL080_BSIZE_64
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1065 (PL080_BSIZE_64
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1069 .reg
= (PL080_BSIZE_32
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1070 (PL080_BSIZE_32
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1074 .reg
= (PL080_BSIZE_16
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1075 (PL080_BSIZE_16
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1079 .reg
= (PL080_BSIZE_8
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1080 (PL080_BSIZE_8
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1084 .reg
= (PL080_BSIZE_4
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1085 (PL080_BSIZE_4
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1089 .reg
= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1090 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1094 static void dma_set_runtime_config(struct dma_chan
*chan
,
1095 struct dma_slave_config
*config
)
1097 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1098 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1099 struct pl08x_channel_data
*cd
= plchan
->cd
;
1100 enum dma_slave_buswidth addr_width
;
1105 /* Transfer direction */
1106 plchan
->runtime_direction
= config
->direction
;
1107 if (config
->direction
== DMA_TO_DEVICE
) {
1108 plchan
->runtime_addr
= config
->dst_addr
;
1109 addr_width
= config
->dst_addr_width
;
1110 maxburst
= config
->dst_maxburst
;
1111 } else if (config
->direction
== DMA_FROM_DEVICE
) {
1112 plchan
->runtime_addr
= config
->src_addr
;
1113 addr_width
= config
->src_addr_width
;
1114 maxburst
= config
->src_maxburst
;
1116 dev_err(&pl08x
->adev
->dev
,
1117 "bad runtime_config: alien transfer direction\n");
1121 switch (addr_width
) {
1122 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1123 cctl
|= (PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1124 (PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1126 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1127 cctl
|= (PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1128 (PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1130 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1131 cctl
|= (PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1132 (PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1135 dev_err(&pl08x
->adev
->dev
,
1136 "bad runtime_config: alien address width\n");
1141 * Now decide on a maxburst:
1142 * If this channel will only request single transfers, set this
1143 * down to ONE element. Also select one element if no maxburst
1146 if (plchan
->cd
->single
|| maxburst
== 0) {
1147 cctl
|= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1148 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
);
1150 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1151 if (burst_sizes
[i
].burstwords
<= maxburst
)
1153 cctl
|= burst_sizes
[i
].reg
;
1156 /* Modify the default channel data to fit PrimeCell request */
1159 dev_dbg(&pl08x
->adev
->dev
,
1160 "configured channel %s (%s) for %s, data width %d, "
1161 "maxburst %d words, LE, CCTL=0x%08x\n",
1162 dma_chan_name(chan
), plchan
->name
,
1163 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
1170 * Slave transactions callback to the slave device to allow
1171 * synchronization of slave DMA signals with the DMAC enable
1173 static void pl08x_issue_pending(struct dma_chan
*chan
)
1175 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1176 unsigned long flags
;
1178 spin_lock_irqsave(&plchan
->lock
, flags
);
1179 /* Something is already active, or we're waiting for a channel... */
1180 if (plchan
->at
|| plchan
->state
== PL08X_CHAN_WAITING
) {
1181 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1185 /* Take the first element in the queue and execute it */
1186 if (!list_empty(&plchan
->desc_list
)) {
1187 struct pl08x_txd
*next
;
1189 next
= list_first_entry(&plchan
->desc_list
,
1192 list_del(&next
->node
);
1193 plchan
->state
= PL08X_CHAN_RUNNING
;
1195 pl08x_start_txd(plchan
, next
);
1198 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1201 static int pl08x_prep_channel_resources(struct pl08x_dma_chan
*plchan
,
1202 struct pl08x_txd
*txd
)
1205 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1208 num_llis
= pl08x_fill_llis_for_desc(pl08x
, txd
);
1214 spin_lock_irqsave(&plchan
->lock
, plchan
->lockflags
);
1216 list_add_tail(&txd
->node
, &plchan
->desc_list
);
1219 * See if we already have a physical channel allocated,
1220 * else this is the time to try to get one.
1222 ret
= prep_phy_channel(plchan
, txd
);
1225 * No physical channel available, we will
1226 * stack up the memcpy channels until there is a channel
1227 * available to handle it whereas slave transfers may
1228 * have been denied due to platform channel muxing restrictions
1229 * and since there is no guarantee that this will ever be
1230 * resolved, and since the signal must be acquired AFTER
1231 * acquiring the physical channel, we will let them be NACK:ed
1232 * with -EBUSY here. The drivers can alway retry the prep()
1233 * call if they are eager on doing this using DMA.
1235 if (plchan
->slave
) {
1236 pl08x_free_txd_list(pl08x
, plchan
);
1237 spin_unlock_irqrestore(&plchan
->lock
, plchan
->lockflags
);
1240 /* Do this memcpy whenever there is a channel ready */
1241 plchan
->state
= PL08X_CHAN_WAITING
;
1242 plchan
->waiting
= txd
;
1245 * Else we're all set, paused and ready to roll,
1246 * status will switch to PL08X_CHAN_RUNNING when
1247 * we call issue_pending(). If there is something
1248 * running on the channel already we don't change
1251 if (plchan
->state
== PL08X_CHAN_IDLE
)
1252 plchan
->state
= PL08X_CHAN_PAUSED
;
1255 * Notice that we leave plchan->lock locked on purpose:
1256 * it will be unlocked in the subsequent tx_submit()
1257 * call. This is a consequence of the current API.
1264 * Given the source and destination available bus masks, select which
1265 * will be routed to each port. We try to have source and destination
1266 * on separate ports, but always respect the allowable settings.
1268 static u32
pl08x_select_bus(struct pl08x_driver_data
*pl08x
, u8 src
, u8 dst
)
1272 if (!(dst
& PL08X_AHB1
) || ((dst
& PL08X_AHB2
) && (src
& PL08X_AHB1
)))
1273 cctl
|= PL080_CONTROL_DST_AHB2
;
1274 if (!(src
& PL08X_AHB1
) || ((src
& PL08X_AHB2
) && !(dst
& PL08X_AHB2
)))
1275 cctl
|= PL080_CONTROL_SRC_AHB2
;
1280 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
)
1282 struct pl08x_txd
*txd
= kzalloc(sizeof(struct pl08x_txd
), GFP_NOWAIT
);
1285 dma_async_tx_descriptor_init(&txd
->tx
, &plchan
->chan
);
1286 txd
->tx
.tx_submit
= pl08x_tx_submit
;
1287 INIT_LIST_HEAD(&txd
->node
);
1289 /* Always enable error and terminal interrupts */
1290 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1291 PL080_CONFIG_TC_IRQ_MASK
;
1297 * Initialize a descriptor to be used by memcpy submit
1299 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1300 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1301 size_t len
, unsigned long flags
)
1303 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1304 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1305 struct pl08x_txd
*txd
;
1308 txd
= pl08x_get_txd(plchan
);
1310 dev_err(&pl08x
->adev
->dev
,
1311 "%s no memory for descriptor\n", __func__
);
1315 txd
->direction
= DMA_NONE
;
1316 txd
->src_addr
= src
;
1317 txd
->dst_addr
= dest
;
1320 /* Set platform data for m2m */
1321 txd
->ccfg
|= PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1322 txd
->cctl
= pl08x
->pd
->memcpy_channel
.cctl
&
1323 ~(PL080_CONTROL_DST_AHB2
| PL080_CONTROL_SRC_AHB2
);
1325 /* Both to be incremented or the code will break */
1326 txd
->cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1328 if (pl08x
->vd
->dualmaster
)
1329 txd
->cctl
|= pl08x_select_bus(pl08x
,
1330 pl08x
->mem_buses
, pl08x
->mem_buses
);
1332 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1336 * NB: the channel lock is held at this point so tx_submit()
1337 * must be called in direct succession.
1343 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
1344 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1345 unsigned int sg_len
, enum dma_data_direction direction
,
1346 unsigned long flags
)
1348 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1349 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1350 struct pl08x_txd
*txd
;
1351 u8 src_buses
, dst_buses
;
1355 * Current implementation ASSUMES only one sg
1358 dev_err(&pl08x
->adev
->dev
, "%s prepared too long sglist\n",
1363 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
1364 __func__
, sgl
->length
, plchan
->name
);
1366 txd
= pl08x_get_txd(plchan
);
1368 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1372 if (direction
!= plchan
->runtime_direction
)
1373 dev_err(&pl08x
->adev
->dev
, "%s DMA setup does not match "
1374 "the direction configured for the PrimeCell\n",
1378 * Set up addresses, the PrimeCell configured address
1379 * will take precedence since this may configure the
1380 * channel target address dynamically at runtime.
1382 txd
->direction
= direction
;
1383 txd
->len
= sgl
->length
;
1385 txd
->cctl
= plchan
->cd
->cctl
&
1386 ~(PL080_CONTROL_SRC_AHB2
| PL080_CONTROL_DST_AHB2
|
1387 PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
|
1388 PL080_CONTROL_PROT_MASK
);
1390 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1391 txd
->cctl
|= PL080_CONTROL_PROT_SYS
;
1393 if (direction
== DMA_TO_DEVICE
) {
1394 txd
->ccfg
|= PL080_FLOW_MEM2PER
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1395 txd
->cctl
|= PL080_CONTROL_SRC_INCR
;
1396 txd
->src_addr
= sgl
->dma_address
;
1397 if (plchan
->runtime_addr
)
1398 txd
->dst_addr
= plchan
->runtime_addr
;
1400 txd
->dst_addr
= plchan
->cd
->addr
;
1401 src_buses
= pl08x
->mem_buses
;
1402 dst_buses
= plchan
->cd
->periph_buses
;
1403 } else if (direction
== DMA_FROM_DEVICE
) {
1404 txd
->ccfg
|= PL080_FLOW_PER2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1405 txd
->cctl
|= PL080_CONTROL_DST_INCR
;
1406 if (plchan
->runtime_addr
)
1407 txd
->src_addr
= plchan
->runtime_addr
;
1409 txd
->src_addr
= plchan
->cd
->addr
;
1410 txd
->dst_addr
= sgl
->dma_address
;
1411 src_buses
= plchan
->cd
->periph_buses
;
1412 dst_buses
= pl08x
->mem_buses
;
1414 dev_err(&pl08x
->adev
->dev
,
1415 "%s direction unsupported\n", __func__
);
1419 txd
->cctl
|= pl08x_select_bus(pl08x
, src_buses
, dst_buses
);
1421 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1425 * NB: the channel lock is held at this point so tx_submit()
1426 * must be called in direct succession.
1432 static int pl08x_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1435 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1436 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1437 unsigned long flags
;
1440 /* Controls applicable to inactive channels */
1441 if (cmd
== DMA_SLAVE_CONFIG
) {
1442 dma_set_runtime_config(chan
,
1443 (struct dma_slave_config
*)
1449 * Anything succeeds on channels with no physical allocation and
1450 * no queued transfers.
1452 spin_lock_irqsave(&plchan
->lock
, flags
);
1453 if (!plchan
->phychan
&& !plchan
->at
) {
1454 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1459 case DMA_TERMINATE_ALL
:
1460 plchan
->state
= PL08X_CHAN_IDLE
;
1462 if (plchan
->phychan
) {
1463 pl08x_stop_phy_chan(plchan
->phychan
);
1466 * Mark physical channel as free and free any slave
1469 release_phy_channel(plchan
);
1471 /* Dequeue jobs and free LLIs */
1473 pl08x_free_txd(pl08x
, plchan
->at
);
1476 /* Dequeue jobs not yet fired as well */
1477 pl08x_free_txd_list(pl08x
, plchan
);
1480 pl08x_pause_phy_chan(plchan
->phychan
);
1481 plchan
->state
= PL08X_CHAN_PAUSED
;
1484 pl08x_resume_phy_chan(plchan
->phychan
);
1485 plchan
->state
= PL08X_CHAN_RUNNING
;
1488 /* Unknown command */
1493 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1498 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
1500 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1501 char *name
= chan_id
;
1503 /* Check that the channel is not taken! */
1504 if (!strcmp(plchan
->name
, name
))
1511 * Just check that the device is there and active
1512 * TODO: turn this bit on/off depending on the number of
1513 * physical channels actually used, if it is zero... well
1514 * shut it off. That will save some power. Cut the clock
1517 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
1521 val
= readl(pl08x
->base
+ PL080_CONFIG
);
1522 val
&= ~(PL080_CONFIG_M2_BE
| PL080_CONFIG_M1_BE
| PL080_CONFIG_ENABLE
);
1523 /* We implicitly clear bit 1 and that means little-endian mode */
1524 val
|= PL080_CONFIG_ENABLE
;
1525 writel(val
, pl08x
->base
+ PL080_CONFIG
);
1528 static void pl08x_tasklet(unsigned long data
)
1530 struct pl08x_dma_chan
*plchan
= (struct pl08x_dma_chan
*) data
;
1531 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1532 struct pl08x_txd
*txd
;
1533 dma_async_tx_callback callback
= NULL
;
1534 void *callback_param
= NULL
;
1535 unsigned long flags
;
1537 spin_lock_irqsave(&plchan
->lock
, flags
);
1543 callback
= txd
->tx
.callback
;
1544 callback_param
= txd
->tx
.callback_param
;
1547 * Update last completed
1549 plchan
->lc
= txd
->tx
.cookie
;
1552 * Free the descriptor
1554 pl08x_free_txd(pl08x
, txd
);
1557 * If a new descriptor is queued, set it up
1558 * plchan->at is NULL here
1560 if (!list_empty(&plchan
->desc_list
)) {
1561 struct pl08x_txd
*next
;
1563 next
= list_first_entry(&plchan
->desc_list
,
1566 list_del(&next
->node
);
1568 pl08x_start_txd(plchan
, next
);
1570 struct pl08x_dma_chan
*waiting
= NULL
;
1573 * No more jobs, so free up the physical channel
1574 * Free any allocated signal on slave transfers too
1576 release_phy_channel(plchan
);
1577 plchan
->state
= PL08X_CHAN_IDLE
;
1580 * And NOW before anyone else can grab that free:d
1581 * up physical channel, see if there is some memcpy
1582 * pending that seriously needs to start because of
1583 * being stacked up while we were choking the
1584 * physical channels with data.
1586 list_for_each_entry(waiting
, &pl08x
->memcpy
.channels
,
1588 if (waiting
->state
== PL08X_CHAN_WAITING
&&
1589 waiting
->waiting
!= NULL
) {
1592 /* This should REALLY not fail now */
1593 ret
= prep_phy_channel(waiting
,
1596 waiting
->state
= PL08X_CHAN_RUNNING
;
1597 waiting
->waiting
= NULL
;
1598 pl08x_issue_pending(&waiting
->chan
);
1604 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1606 /* Callback to signal completion */
1608 callback(callback_param
);
1611 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
1613 struct pl08x_driver_data
*pl08x
= dev
;
1618 val
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
1621 * An error interrupt (on one or more channels)
1623 dev_err(&pl08x
->adev
->dev
,
1624 "%s error interrupt, register value 0x%08x\n",
1627 * Simply clear ALL PL08X error interrupts,
1628 * regardless of channel and cause
1629 * FIXME: should be 0x00000003 on PL081 really.
1631 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1633 val
= readl(pl08x
->base
+ PL080_INT_STATUS
);
1634 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1635 if ((1 << i
) & val
) {
1636 /* Locate physical channel */
1637 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
1638 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
1640 /* Schedule tasklet on this channel */
1641 tasklet_schedule(&plchan
->tasklet
);
1647 * Clear only the terminal interrupts on channels we processed
1649 writel(mask
, pl08x
->base
+ PL080_TC_CLEAR
);
1651 return mask
? IRQ_HANDLED
: IRQ_NONE
;
1655 * Initialise the DMAC memcpy/slave channels.
1656 * Make a local wrapper to hold required data
1658 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
1659 struct dma_device
*dmadev
,
1660 unsigned int channels
,
1663 struct pl08x_dma_chan
*chan
;
1666 INIT_LIST_HEAD(&dmadev
->channels
);
1668 * Register as many many memcpy as we have physical channels,
1669 * we won't always be able to use all but the code will have
1670 * to cope with that situation.
1672 for (i
= 0; i
< channels
; i
++) {
1673 chan
= kzalloc(sizeof(struct pl08x_dma_chan
), GFP_KERNEL
);
1675 dev_err(&pl08x
->adev
->dev
,
1676 "%s no memory for channel\n", __func__
);
1681 chan
->state
= PL08X_CHAN_IDLE
;
1685 chan
->name
= pl08x
->pd
->slave_channels
[i
].bus_id
;
1686 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
1688 chan
->cd
= &pl08x
->pd
->memcpy_channel
;
1689 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
1695 if (chan
->cd
->circular_buffer
) {
1696 dev_err(&pl08x
->adev
->dev
,
1697 "channel %s: circular buffers not supported\n",
1702 dev_info(&pl08x
->adev
->dev
,
1703 "initialize virtual channel \"%s\"\n",
1706 chan
->chan
.device
= dmadev
;
1707 chan
->chan
.cookie
= 0;
1710 spin_lock_init(&chan
->lock
);
1711 INIT_LIST_HEAD(&chan
->desc_list
);
1712 tasklet_init(&chan
->tasklet
, pl08x_tasklet
,
1713 (unsigned long) chan
);
1715 list_add_tail(&chan
->chan
.device_node
, &dmadev
->channels
);
1717 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
1718 i
, slave
? "slave" : "memcpy");
1722 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
1724 struct pl08x_dma_chan
*chan
= NULL
;
1725 struct pl08x_dma_chan
*next
;
1727 list_for_each_entry_safe(chan
,
1728 next
, &dmadev
->channels
, chan
.device_node
) {
1729 list_del(&chan
->chan
.device_node
);
1734 #ifdef CONFIG_DEBUG_FS
1735 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
1738 case PL08X_CHAN_IDLE
:
1740 case PL08X_CHAN_RUNNING
:
1742 case PL08X_CHAN_PAUSED
:
1744 case PL08X_CHAN_WAITING
:
1749 return "UNKNOWN STATE";
1752 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
1754 struct pl08x_driver_data
*pl08x
= s
->private;
1755 struct pl08x_dma_chan
*chan
;
1756 struct pl08x_phy_chan
*ch
;
1757 unsigned long flags
;
1760 seq_printf(s
, "PL08x physical channels:\n");
1761 seq_printf(s
, "CHANNEL:\tUSER:\n");
1762 seq_printf(s
, "--------\t-----\n");
1763 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1764 struct pl08x_dma_chan
*virt_chan
;
1766 ch
= &pl08x
->phy_chans
[i
];
1768 spin_lock_irqsave(&ch
->lock
, flags
);
1769 virt_chan
= ch
->serving
;
1771 seq_printf(s
, "%d\t\t%s\n",
1772 ch
->id
, virt_chan
? virt_chan
->name
: "(none)");
1774 spin_unlock_irqrestore(&ch
->lock
, flags
);
1777 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
1778 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1779 seq_printf(s
, "--------\t------\n");
1780 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, chan
.device_node
) {
1781 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1782 pl08x_state_str(chan
->state
));
1785 seq_printf(s
, "\nPL08x virtual slave channels:\n");
1786 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1787 seq_printf(s
, "--------\t------\n");
1788 list_for_each_entry(chan
, &pl08x
->slave
.channels
, chan
.device_node
) {
1789 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1790 pl08x_state_str(chan
->state
));
1796 static int pl08x_debugfs_open(struct inode
*inode
, struct file
*file
)
1798 return single_open(file
, pl08x_debugfs_show
, inode
->i_private
);
1801 static const struct file_operations pl08x_debugfs_operations
= {
1802 .open
= pl08x_debugfs_open
,
1804 .llseek
= seq_lseek
,
1805 .release
= single_release
,
1808 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1810 /* Expose a simple debugfs interface to view all clocks */
1811 (void) debugfs_create_file(dev_name(&pl08x
->adev
->dev
), S_IFREG
| S_IRUGO
,
1813 &pl08x_debugfs_operations
);
1817 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1822 static int pl08x_probe(struct amba_device
*adev
, struct amba_id
*id
)
1824 struct pl08x_driver_data
*pl08x
;
1825 const struct vendor_data
*vd
= id
->data
;
1829 ret
= amba_request_regions(adev
, NULL
);
1833 /* Create the driver state holder */
1834 pl08x
= kzalloc(sizeof(struct pl08x_driver_data
), GFP_KERNEL
);
1840 /* Initialize memcpy engine */
1841 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
1842 pl08x
->memcpy
.dev
= &adev
->dev
;
1843 pl08x
->memcpy
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1844 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
1845 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
1846 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1847 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
1848 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
1849 pl08x
->memcpy
.device_control
= pl08x_control
;
1851 /* Initialize slave engine */
1852 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
1853 pl08x
->slave
.dev
= &adev
->dev
;
1854 pl08x
->slave
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1855 pl08x
->slave
.device_free_chan_resources
= pl08x_free_chan_resources
;
1856 pl08x
->slave
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1857 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
1858 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
1859 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
1860 pl08x
->slave
.device_control
= pl08x_control
;
1862 /* Get the platform data */
1863 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
1865 dev_err(&adev
->dev
, "no platform data supplied\n");
1866 goto out_no_platdata
;
1869 /* Assign useful pointers to the driver state */
1873 /* By default, AHB1 only. If dualmaster, from platform */
1874 pl08x
->lli_buses
= PL08X_AHB1
;
1875 pl08x
->mem_buses
= PL08X_AHB1
;
1876 if (pl08x
->vd
->dualmaster
) {
1877 pl08x
->lli_buses
= pl08x
->pd
->lli_buses
;
1878 pl08x
->mem_buses
= pl08x
->pd
->mem_buses
;
1881 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1882 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
1883 PL08X_LLI_TSFR_SIZE
, PL08X_ALIGN
, 0);
1886 goto out_no_lli_pool
;
1889 spin_lock_init(&pl08x
->lock
);
1891 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
1894 goto out_no_ioremap
;
1897 /* Turn on the PL08x */
1898 pl08x_ensure_on(pl08x
);
1901 * Attach the interrupt handler
1903 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1904 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
1906 ret
= request_irq(adev
->irq
[0], pl08x_irq
, IRQF_DISABLED
,
1907 DRIVER_NAME
, pl08x
);
1909 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
1910 __func__
, adev
->irq
[0]);
1914 /* Initialize physical channels */
1915 pl08x
->phy_chans
= kmalloc((vd
->channels
* sizeof(struct pl08x_phy_chan
)),
1917 if (!pl08x
->phy_chans
) {
1918 dev_err(&adev
->dev
, "%s failed to allocate "
1919 "physical channel holders\n",
1921 goto out_no_phychans
;
1924 for (i
= 0; i
< vd
->channels
; i
++) {
1925 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
1928 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
1929 spin_lock_init(&ch
->lock
);
1932 dev_info(&adev
->dev
,
1933 "physical channel %d is %s\n", i
,
1934 pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
1937 /* Register as many memcpy channels as there are physical channels */
1938 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
1939 pl08x
->vd
->channels
, false);
1941 dev_warn(&pl08x
->adev
->dev
,
1942 "%s failed to enumerate memcpy channels - %d\n",
1946 pl08x
->memcpy
.chancnt
= ret
;
1948 /* Register slave channels */
1949 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
1950 pl08x
->pd
->num_slave_channels
,
1953 dev_warn(&pl08x
->adev
->dev
,
1954 "%s failed to enumerate slave channels - %d\n",
1958 pl08x
->slave
.chancnt
= ret
;
1960 ret
= dma_async_device_register(&pl08x
->memcpy
);
1962 dev_warn(&pl08x
->adev
->dev
,
1963 "%s failed to register memcpy as an async device - %d\n",
1965 goto out_no_memcpy_reg
;
1968 ret
= dma_async_device_register(&pl08x
->slave
);
1970 dev_warn(&pl08x
->adev
->dev
,
1971 "%s failed to register slave as an async device - %d\n",
1973 goto out_no_slave_reg
;
1976 amba_set_drvdata(adev
, pl08x
);
1977 init_pl08x_debugfs(pl08x
);
1978 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1979 amba_part(adev
), amba_rev(adev
),
1980 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
1984 dma_async_device_unregister(&pl08x
->memcpy
);
1986 pl08x_free_virtual_channels(&pl08x
->slave
);
1988 pl08x_free_virtual_channels(&pl08x
->memcpy
);
1990 kfree(pl08x
->phy_chans
);
1992 free_irq(adev
->irq
[0], pl08x
);
1994 iounmap(pl08x
->base
);
1996 dma_pool_destroy(pl08x
->pool
);
2001 amba_release_regions(adev
);
2005 /* PL080 has 8 channels and the PL080 have just 2 */
2006 static struct vendor_data vendor_pl080
= {
2011 static struct vendor_data vendor_pl081
= {
2013 .dualmaster
= false,
2016 static struct amba_id pl08x_ids
[] = {
2021 .data
= &vendor_pl080
,
2027 .data
= &vendor_pl081
,
2029 /* Nomadik 8815 PL080 variant */
2033 .data
= &vendor_pl080
,
2038 static struct amba_driver pl08x_amba_driver
= {
2039 .drv
.name
= DRIVER_NAME
,
2040 .id_table
= pl08x_ids
,
2041 .probe
= pl08x_probe
,
2044 static int __init
pl08x_init(void)
2047 retval
= amba_driver_register(&pl08x_amba_driver
);
2049 printk(KERN_WARNING DRIVER_NAME
2050 "failed to register as an AMBA device (%d)\n",
2054 subsys_initcall(pl08x_init
);