2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * The full GNU General Public License is in this distribution in the file
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
37 * The PL080 has a dual bus master, PL081 has a single master.
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
45 * Raise terminal count interrupt
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
54 * ASSUMES default (little) endianness for DMA transfers
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
75 * - Break out common code from arch/arm/mach-s3c64xx and share
77 #include <linux/device.h>
78 #include <linux/init.h>
79 #include <linux/module.h>
80 #include <linux/interrupt.h>
81 #include <linux/slab.h>
82 #include <linux/dmapool.h>
83 #include <linux/dmaengine.h>
84 #include <linux/amba/bus.h>
85 #include <linux/amba/pl08x.h>
86 #include <linux/debugfs.h>
87 #include <linux/seq_file.h>
89 #include <asm/hardware/pl080.h>
91 #define DRIVER_NAME "pl08xdmac"
94 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
95 * @channels: the number of channels available in this variant
96 * @dualmaster: whether this version supports dual AHB masters or not.
104 * PL08X private data structures
105 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
106 * start & end do not - their bus bit info is in cctl. Also note that these
107 * are fixed 32-bit quantities.
117 * struct pl08x_driver_data - the local state holder for the PL08x
118 * @slave: slave engine for this instance
119 * @memcpy: memcpy engine for this instance
120 * @base: virtual memory base (remapped) for the PL08x
121 * @adev: the corresponding AMBA (PrimeCell) bus entry
122 * @vd: vendor data for this PL08x variant
123 * @pd: platform data passed in from the platform/machine
124 * @phy_chans: array of data for the physical channels
125 * @pool: a pool for the LLI descriptors
126 * @pool_ctr: counter of LLIs in the pool
127 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
128 * @mem_buses: set to indicate memory transfers on AHB2.
129 * @lock: a spinlock for this struct
131 struct pl08x_driver_data
{
132 struct dma_device slave
;
133 struct dma_device memcpy
;
135 struct amba_device
*adev
;
136 const struct vendor_data
*vd
;
137 struct pl08x_platform_data
*pd
;
138 struct pl08x_phy_chan
*phy_chans
;
139 struct dma_pool
*pool
;
147 * PL08X specific defines
151 * Memory boundaries: the manual for PL08x says that the controller
152 * cannot read past a 1KiB boundary, so these defines are used to
153 * create transfer LLIs that do not cross such boundaries.
155 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
156 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
158 /* Minimum period between work queue runs */
159 #define PL08X_WQ_PERIODMIN 20
161 /* Size (bytes) of each LLI buffer allocated for one transfer */
162 # define PL08X_LLI_TSFR_SIZE 0x2000
164 /* Maximum times we call dma_pool_alloc on this pool without freeing */
165 #define PL08X_MAX_ALLOCS 0x40
166 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
167 #define PL08X_ALIGN 8
169 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
171 return container_of(chan
, struct pl08x_dma_chan
, chan
);
174 static inline struct pl08x_txd
*to_pl08x_txd(struct dma_async_tx_descriptor
*tx
)
176 return container_of(tx
, struct pl08x_txd
, tx
);
180 * Physical channel handling
183 /* Whether a certain channel is busy or not */
184 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
188 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
189 return val
& PL080_CONFIG_ACTIVE
;
193 * Set the initial DMA register values i.e. those for the first LLI
194 * The next LLI pointer and the configuration interrupt bit have
195 * been set when the LLIs were constructed. Poke them into the hardware
196 * and start the transfer.
198 static void pl08x_start_txd(struct pl08x_dma_chan
*plchan
,
199 struct pl08x_txd
*txd
)
201 struct pl08x_driver_data
*pl08x
= plchan
->host
;
202 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
203 struct pl08x_lli
*lli
= &txd
->llis_va
[0];
208 /* Wait for channel inactive */
209 while (pl08x_phy_channel_busy(phychan
))
212 dev_vdbg(&pl08x
->adev
->dev
,
213 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
214 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
215 phychan
->id
, lli
->src
, lli
->dst
, lli
->lli
, lli
->cctl
,
218 writel(lli
->src
, phychan
->base
+ PL080_CH_SRC_ADDR
);
219 writel(lli
->dst
, phychan
->base
+ PL080_CH_DST_ADDR
);
220 writel(lli
->lli
, phychan
->base
+ PL080_CH_LLI
);
221 writel(lli
->cctl
, phychan
->base
+ PL080_CH_CONTROL
);
222 writel(txd
->ccfg
, phychan
->base
+ PL080_CH_CONFIG
);
224 /* Enable the DMA channel */
225 /* Do not access config register until channel shows as disabled */
226 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & (1 << phychan
->id
))
229 /* Do not access config register until channel shows as inactive */
230 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
231 while ((val
& PL080_CONFIG_ACTIVE
) || (val
& PL080_CONFIG_ENABLE
))
232 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
234 writel(val
| PL080_CONFIG_ENABLE
, phychan
->base
+ PL080_CH_CONFIG
);
238 * Overall DMAC remains enabled always.
240 * Disabling individual channels could lose data.
242 * Disable the peripheral DMA after disabling the DMAC in order to allow
243 * the DMAC FIFO to drain, and hence allow the channel to show inactive
245 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
249 /* Set the HALT bit and wait for the FIFO to drain */
250 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
251 val
|= PL080_CONFIG_HALT
;
252 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
254 /* Wait for channel inactive */
255 while (pl08x_phy_channel_busy(ch
))
259 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
263 /* Clear the HALT bit */
264 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
265 val
&= ~PL080_CONFIG_HALT
;
266 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
270 /* Stops the channel */
271 static void pl08x_stop_phy_chan(struct pl08x_phy_chan
*ch
)
275 pl08x_pause_phy_chan(ch
);
277 /* Disable channel */
278 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
279 val
&= ~PL080_CONFIG_ENABLE
;
280 val
&= ~PL080_CONFIG_ERR_IRQ_MASK
;
281 val
&= ~PL080_CONFIG_TC_IRQ_MASK
;
282 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
285 static inline u32
get_bytes_in_cctl(u32 cctl
)
287 /* The source width defines the number of bytes */
288 u32 bytes
= cctl
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
290 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
291 case PL080_WIDTH_8BIT
:
293 case PL080_WIDTH_16BIT
:
296 case PL080_WIDTH_32BIT
:
303 /* The channel should be paused when calling this */
304 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
306 struct pl08x_phy_chan
*ch
;
307 struct pl08x_txd
*txd
;
311 spin_lock_irqsave(&plchan
->lock
, flags
);
312 ch
= plchan
->phychan
;
316 * Follow the LLIs to get the number of remaining
317 * bytes in the currently active transaction.
320 u32 clli
= readl(ch
->base
+ PL080_CH_LLI
) & ~PL080_LLI_LM_AHB2
;
322 /* First get the remaining bytes in the active transfer */
323 bytes
= get_bytes_in_cctl(readl(ch
->base
+ PL080_CH_CONTROL
));
326 struct pl08x_lli
*llis_va
= txd
->llis_va
;
327 dma_addr_t llis_bus
= txd
->llis_bus
;
330 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
331 sizeof(struct pl08x_lli
) * MAX_NUM_TSFR_LLIS
);
334 * Locate the next LLI - as this is an array,
335 * it's simple maths to find.
337 index
= (clli
- llis_bus
) / sizeof(struct pl08x_lli
);
339 for (; index
< MAX_NUM_TSFR_LLIS
; index
++) {
340 bytes
+= get_bytes_in_cctl(llis_va
[index
].cctl
);
343 * A LLI pointer of 0 terminates the LLI list
345 if (!llis_va
[index
].lli
)
351 /* Sum up all queued transactions */
352 if (!list_empty(&plchan
->pend_list
)) {
353 struct pl08x_txd
*txdi
;
354 list_for_each_entry(txdi
, &plchan
->pend_list
, node
) {
359 spin_unlock_irqrestore(&plchan
->lock
, flags
);
365 * Allocate a physical channel for a virtual channel
367 * Try to locate a physical channel to be used for this transfer. If all
368 * are taken return NULL and the requester will have to cope by using
369 * some fallback PIO mode or retrying later.
371 static struct pl08x_phy_chan
*
372 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
373 struct pl08x_dma_chan
*virt_chan
)
375 struct pl08x_phy_chan
*ch
= NULL
;
379 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
380 ch
= &pl08x
->phy_chans
[i
];
382 spin_lock_irqsave(&ch
->lock
, flags
);
385 ch
->serving
= virt_chan
;
387 spin_unlock_irqrestore(&ch
->lock
, flags
);
391 spin_unlock_irqrestore(&ch
->lock
, flags
);
394 if (i
== pl08x
->vd
->channels
) {
395 /* No physical channel available, cope with it */
402 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
403 struct pl08x_phy_chan
*ch
)
407 /* Stop the channel and clear its interrupts */
408 pl08x_stop_phy_chan(ch
);
409 writel((1 << ch
->id
), pl08x
->base
+ PL080_ERR_CLEAR
);
410 writel((1 << ch
->id
), pl08x
->base
+ PL080_TC_CLEAR
);
412 /* Mark it as free */
413 spin_lock_irqsave(&ch
->lock
, flags
);
415 spin_unlock_irqrestore(&ch
->lock
, flags
);
422 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded
)
425 case PL080_WIDTH_8BIT
:
427 case PL080_WIDTH_16BIT
:
429 case PL080_WIDTH_32BIT
:
438 static inline u32
pl08x_cctl_bits(u32 cctl
, u8 srcwidth
, u8 dstwidth
,
443 /* Remove all src, dst and transfer size bits */
444 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
445 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
446 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
448 /* Then set the bits according to the parameters */
451 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
454 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
457 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
466 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
469 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
472 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
479 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
483 struct pl08x_lli_build_data
{
484 struct pl08x_txd
*txd
;
485 struct pl08x_driver_data
*pl08x
;
486 struct pl08x_bus_data srcbus
;
487 struct pl08x_bus_data dstbus
;
492 * Autoselect a master bus to use for the transfer this prefers the
493 * destination bus if both available if fixed address on one bus the
494 * other will be chosen
496 static void pl08x_choose_master_bus(struct pl08x_lli_build_data
*bd
,
497 struct pl08x_bus_data
**mbus
, struct pl08x_bus_data
**sbus
, u32 cctl
)
499 if (!(cctl
& PL080_CONTROL_DST_INCR
)) {
502 } else if (!(cctl
& PL080_CONTROL_SRC_INCR
)) {
506 if (bd
->dstbus
.buswidth
== 4) {
509 } else if (bd
->srcbus
.buswidth
== 4) {
512 } else if (bd
->dstbus
.buswidth
== 2) {
515 } else if (bd
->srcbus
.buswidth
== 2) {
519 /* bd->srcbus.buswidth == 1 */
527 * Fills in one LLI for a certain transfer descriptor and advance the counter
529 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data
*bd
,
530 int num_llis
, int len
, u32 cctl
)
532 struct pl08x_lli
*llis_va
= bd
->txd
->llis_va
;
533 dma_addr_t llis_bus
= bd
->txd
->llis_bus
;
535 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
537 llis_va
[num_llis
].cctl
= cctl
;
538 llis_va
[num_llis
].src
= bd
->srcbus
.addr
;
539 llis_va
[num_llis
].dst
= bd
->dstbus
.addr
;
540 llis_va
[num_llis
].lli
= llis_bus
+ (num_llis
+ 1) * sizeof(struct pl08x_lli
);
541 if (bd
->pl08x
->lli_buses
& PL08X_AHB2
)
542 llis_va
[num_llis
].lli
|= PL080_LLI_LM_AHB2
;
544 if (cctl
& PL080_CONTROL_SRC_INCR
)
545 bd
->srcbus
.addr
+= len
;
546 if (cctl
& PL080_CONTROL_DST_INCR
)
547 bd
->dstbus
.addr
+= len
;
549 BUG_ON(bd
->remainder
< len
);
551 bd
->remainder
-= len
;
555 * Return number of bytes to fill to boundary, or len.
556 * This calculation works for any value of addr.
558 static inline size_t pl08x_pre_boundary(u32 addr
, size_t len
)
560 size_t boundary_len
= PL08X_BOUNDARY_SIZE
-
561 (addr
& (PL08X_BOUNDARY_SIZE
- 1));
563 return min(boundary_len
, len
);
567 * This fills in the table of LLIs for the transfer descriptor
568 * Note that we assume we never have to change the burst sizes
571 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
572 struct pl08x_txd
*txd
)
574 struct pl08x_bus_data
*mbus
, *sbus
;
575 struct pl08x_lli_build_data bd
;
578 size_t max_bytes_per_lli
;
579 size_t total_bytes
= 0;
580 struct pl08x_lli
*llis_va
;
582 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
,
585 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
591 /* Get the default CCTL */
596 bd
.srcbus
.addr
= txd
->src_addr
;
597 bd
.dstbus
.addr
= txd
->dst_addr
;
599 /* Find maximum width of the source bus */
601 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
602 PL080_CONTROL_SWIDTH_SHIFT
);
604 /* Find maximum width of the destination bus */
606 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
607 PL080_CONTROL_DWIDTH_SHIFT
);
609 /* Set up the bus widths to the maximum */
610 bd
.srcbus
.buswidth
= bd
.srcbus
.maxwidth
;
611 bd
.dstbus
.buswidth
= bd
.dstbus
.maxwidth
;
612 dev_vdbg(&pl08x
->adev
->dev
,
613 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
614 __func__
, bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
);
618 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
620 max_bytes_per_lli
= min(bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
) *
621 PL080_CONTROL_TRANSFER_SIZE_MASK
;
622 dev_vdbg(&pl08x
->adev
->dev
,
623 "%s max bytes per lli = %zu\n",
624 __func__
, max_bytes_per_lli
);
626 /* We need to count this down to zero */
627 bd
.remainder
= txd
->len
;
628 dev_vdbg(&pl08x
->adev
->dev
,
629 "%s remainder = %zu\n",
630 __func__
, bd
.remainder
);
633 * Choose bus to align to
634 * - prefers destination bus if both available
635 * - if fixed address on one bus chooses other
637 pl08x_choose_master_bus(&bd
, &mbus
, &sbus
, cctl
);
639 if (txd
->len
< mbus
->buswidth
) {
640 /* Less than a bus width available - send as single bytes */
641 while (bd
.remainder
) {
642 dev_vdbg(&pl08x
->adev
->dev
,
643 "%s single byte LLIs for a transfer of "
644 "less than a bus width (remain 0x%08x)\n",
645 __func__
, bd
.remainder
);
646 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
647 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
651 /* Make one byte LLIs until master bus is aligned */
652 while ((mbus
->addr
) % (mbus
->buswidth
)) {
653 dev_vdbg(&pl08x
->adev
->dev
,
654 "%s adjustment lli for less than bus width "
656 __func__
, bd
.remainder
);
657 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
658 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
664 * - if slave is not then we must set its width down
666 if (sbus
->addr
% sbus
->buswidth
) {
667 dev_dbg(&pl08x
->adev
->dev
,
668 "%s set down bus width to one byte\n",
675 * Make largest possible LLIs until less than one bus
678 while (bd
.remainder
> (mbus
->buswidth
- 1)) {
679 size_t lli_len
, target_len
, tsize
, odd_bytes
;
682 * If enough left try to send max possible,
683 * otherwise try to send the remainder
685 target_len
= min(bd
.remainder
, max_bytes_per_lli
);
688 * Set bus lengths for incrementing buses to the
689 * number of bytes which fill to next memory boundary,
690 * limiting on the target length calculated above.
692 if (cctl
& PL080_CONTROL_SRC_INCR
)
693 bd
.srcbus
.fill_bytes
=
694 pl08x_pre_boundary(bd
.srcbus
.addr
,
697 bd
.srcbus
.fill_bytes
= target_len
;
699 if (cctl
& PL080_CONTROL_DST_INCR
)
700 bd
.dstbus
.fill_bytes
=
701 pl08x_pre_boundary(bd
.dstbus
.addr
,
704 bd
.dstbus
.fill_bytes
= target_len
;
706 /* Find the nearest */
707 lli_len
= min(bd
.srcbus
.fill_bytes
,
708 bd
.dstbus
.fill_bytes
);
710 BUG_ON(lli_len
> bd
.remainder
);
713 dev_err(&pl08x
->adev
->dev
,
714 "%s lli_len is %zu, <= 0\n",
719 if (lli_len
== target_len
) {
721 * Can send what we wanted.
724 lli_len
= (lli_len
/mbus
->buswidth
) *
729 * So now we know how many bytes to transfer
730 * to get to the nearest boundary. The next
731 * LLI will past the boundary. However, we
732 * may be working to a boundary on the slave
733 * bus. We need to ensure the master stays
734 * aligned, and that we are working in
735 * multiples of the bus widths.
737 odd_bytes
= lli_len
% mbus
->buswidth
;
738 lli_len
-= odd_bytes
;
744 * Check against minimum bus alignment:
745 * Calculate actual transfer size in relation
746 * to bus width an get a maximum remainder of
747 * the smallest bus width - 1
749 /* FIXME: use round_down()? */
750 tsize
= lli_len
/ min(mbus
->buswidth
,
752 lli_len
= tsize
* min(mbus
->buswidth
,
755 if (target_len
!= lli_len
) {
756 dev_vdbg(&pl08x
->adev
->dev
,
757 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
758 __func__
, target_len
, lli_len
, txd
->len
);
761 cctl
= pl08x_cctl_bits(cctl
,
766 dev_vdbg(&pl08x
->adev
->dev
,
767 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
768 __func__
, lli_len
, bd
.remainder
);
769 pl08x_fill_lli_for_desc(&bd
, num_llis
++,
771 total_bytes
+= lli_len
;
777 * Creep past the boundary, maintaining
781 for (j
= 0; (j
< mbus
->buswidth
)
782 && (bd
.remainder
); j
++) {
783 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
784 dev_vdbg(&pl08x
->adev
->dev
,
785 "%s align with boundary, single byte (remain 0x%08zx)\n",
786 __func__
, bd
.remainder
);
787 pl08x_fill_lli_for_desc(&bd
,
788 num_llis
++, 1, cctl
);
797 while (bd
.remainder
) {
798 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
799 dev_vdbg(&pl08x
->adev
->dev
,
800 "%s align with boundary, single odd byte (remain %zu)\n",
801 __func__
, bd
.remainder
);
802 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
806 if (total_bytes
!= txd
->len
) {
807 dev_err(&pl08x
->adev
->dev
,
808 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
809 __func__
, total_bytes
, txd
->len
);
813 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
814 dev_err(&pl08x
->adev
->dev
,
815 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
816 __func__
, (u32
) MAX_NUM_TSFR_LLIS
);
820 llis_va
= txd
->llis_va
;
821 /* The final LLI terminates the LLI. */
822 llis_va
[num_llis
- 1].lli
= 0;
823 /* The final LLI element shall also fire an interrupt. */
824 llis_va
[num_llis
- 1].cctl
|= PL080_CONTROL_TC_IRQ_EN
;
830 for (i
= 0; i
< num_llis
; i
++) {
831 dev_vdbg(&pl08x
->adev
->dev
,
832 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
847 /* You should call this with the struct pl08x lock held */
848 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
849 struct pl08x_txd
*txd
)
852 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
859 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
860 struct pl08x_dma_chan
*plchan
)
862 struct pl08x_txd
*txdi
= NULL
;
863 struct pl08x_txd
*next
;
865 if (!list_empty(&plchan
->pend_list
)) {
866 list_for_each_entry_safe(txdi
,
867 next
, &plchan
->pend_list
, node
) {
868 list_del(&txdi
->node
);
869 pl08x_free_txd(pl08x
, txdi
);
877 static int pl08x_alloc_chan_resources(struct dma_chan
*chan
)
882 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
887 * This should be called with the channel plchan->lock held
889 static int prep_phy_channel(struct pl08x_dma_chan
*plchan
,
890 struct pl08x_txd
*txd
)
892 struct pl08x_driver_data
*pl08x
= plchan
->host
;
893 struct pl08x_phy_chan
*ch
;
896 /* Check if we already have a channel */
900 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
902 /* No physical channel available, cope with it */
903 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
908 * OK we have a physical channel: for memcpy() this is all we
909 * need, but for slaves the physical signals may be muxed!
910 * Can the platform allow us to use this channel?
914 pl08x
->pd
->get_signal
) {
915 ret
= pl08x
->pd
->get_signal(plchan
);
917 dev_dbg(&pl08x
->adev
->dev
,
918 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
919 ch
->id
, plchan
->name
);
920 /* Release physical channel & return */
921 pl08x_put_phy_channel(pl08x
, ch
);
926 /* Assign the flow control signal to this channel */
927 if (txd
->direction
== DMA_TO_DEVICE
)
928 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
929 else if (txd
->direction
== DMA_FROM_DEVICE
)
930 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
933 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d and signal %d for xfer on %s\n",
938 plchan
->phychan_hold
++;
939 plchan
->phychan
= ch
;
944 static void release_phy_channel(struct pl08x_dma_chan
*plchan
)
946 struct pl08x_driver_data
*pl08x
= plchan
->host
;
948 if ((plchan
->phychan
->signal
>= 0) && pl08x
->pd
->put_signal
) {
949 pl08x
->pd
->put_signal(plchan
);
950 plchan
->phychan
->signal
= -1;
952 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
953 plchan
->phychan
= NULL
;
956 static dma_cookie_t
pl08x_tx_submit(struct dma_async_tx_descriptor
*tx
)
958 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(tx
->chan
);
959 struct pl08x_txd
*txd
= to_pl08x_txd(tx
);
962 spin_lock_irqsave(&plchan
->lock
, flags
);
964 plchan
->chan
.cookie
+= 1;
965 if (plchan
->chan
.cookie
< 0)
966 plchan
->chan
.cookie
= 1;
967 tx
->cookie
= plchan
->chan
.cookie
;
969 /* Put this onto the pending list */
970 list_add_tail(&txd
->node
, &plchan
->pend_list
);
973 * If there was no physical channel available for this memcpy,
974 * stack the request up and indicate that the channel is waiting
975 * for a free physical channel.
977 if (!plchan
->slave
&& !plchan
->phychan
) {
978 /* Do this memcpy whenever there is a channel ready */
979 plchan
->state
= PL08X_CHAN_WAITING
;
980 plchan
->waiting
= txd
;
982 plchan
->phychan_hold
--;
985 spin_unlock_irqrestore(&plchan
->lock
, flags
);
990 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
991 struct dma_chan
*chan
, unsigned long flags
)
993 struct dma_async_tx_descriptor
*retval
= NULL
;
999 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1000 * If slaves are relying on interrupts to signal completion this function
1001 * must not be called with interrupts disabled.
1003 static enum dma_status
1004 pl08x_dma_tx_status(struct dma_chan
*chan
,
1005 dma_cookie_t cookie
,
1006 struct dma_tx_state
*txstate
)
1008 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1009 dma_cookie_t last_used
;
1010 dma_cookie_t last_complete
;
1011 enum dma_status ret
;
1014 last_used
= plchan
->chan
.cookie
;
1015 last_complete
= plchan
->lc
;
1017 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1018 if (ret
== DMA_SUCCESS
) {
1019 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
1024 * This cookie not complete yet
1026 last_used
= plchan
->chan
.cookie
;
1027 last_complete
= plchan
->lc
;
1029 /* Get number of bytes left in the active transactions and queue */
1030 bytesleft
= pl08x_getbytes_chan(plchan
);
1032 dma_set_tx_state(txstate
, last_complete
, last_used
,
1035 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1038 /* Whether waiting or running, we're in progress */
1039 return DMA_IN_PROGRESS
;
1042 /* PrimeCell DMA extension */
1043 struct burst_table
{
1048 static const struct burst_table burst_sizes
[] = {
1051 .reg
= (PL080_BSIZE_256
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1052 (PL080_BSIZE_256
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1056 .reg
= (PL080_BSIZE_128
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1057 (PL080_BSIZE_128
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1061 .reg
= (PL080_BSIZE_64
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1062 (PL080_BSIZE_64
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1066 .reg
= (PL080_BSIZE_32
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1067 (PL080_BSIZE_32
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1071 .reg
= (PL080_BSIZE_16
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1072 (PL080_BSIZE_16
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1076 .reg
= (PL080_BSIZE_8
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1077 (PL080_BSIZE_8
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1081 .reg
= (PL080_BSIZE_4
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1082 (PL080_BSIZE_4
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1086 .reg
= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1087 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1091 static int dma_set_runtime_config(struct dma_chan
*chan
,
1092 struct dma_slave_config
*config
)
1094 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1095 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1096 struct pl08x_channel_data
*cd
= plchan
->cd
;
1097 enum dma_slave_buswidth addr_width
;
1106 /* Transfer direction */
1107 plchan
->runtime_direction
= config
->direction
;
1108 if (config
->direction
== DMA_TO_DEVICE
) {
1109 addr
= config
->dst_addr
;
1110 addr_width
= config
->dst_addr_width
;
1111 maxburst
= config
->dst_maxburst
;
1112 } else if (config
->direction
== DMA_FROM_DEVICE
) {
1113 addr
= config
->src_addr
;
1114 addr_width
= config
->src_addr_width
;
1115 maxburst
= config
->src_maxburst
;
1117 dev_err(&pl08x
->adev
->dev
,
1118 "bad runtime_config: alien transfer direction\n");
1122 switch (addr_width
) {
1123 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1124 cctl
|= (PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1125 (PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1127 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1128 cctl
|= (PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1129 (PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1131 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1132 cctl
|= (PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1133 (PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1136 dev_err(&pl08x
->adev
->dev
,
1137 "bad runtime_config: alien address width\n");
1142 * Now decide on a maxburst:
1143 * If this channel will only request single transfers, set this
1144 * down to ONE element. Also select one element if no maxburst
1147 if (plchan
->cd
->single
|| maxburst
== 0) {
1148 cctl
|= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1149 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
);
1151 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1152 if (burst_sizes
[i
].burstwords
<= maxburst
)
1154 cctl
|= burst_sizes
[i
].reg
;
1157 plchan
->runtime_addr
= addr
;
1159 /* Modify the default channel data to fit PrimeCell request */
1162 dev_dbg(&pl08x
->adev
->dev
,
1163 "configured channel %s (%s) for %s, data width %d, "
1164 "maxburst %d words, LE, CCTL=0x%08x\n",
1165 dma_chan_name(chan
), plchan
->name
,
1166 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
1175 * Slave transactions callback to the slave device to allow
1176 * synchronization of slave DMA signals with the DMAC enable
1178 static void pl08x_issue_pending(struct dma_chan
*chan
)
1180 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1181 unsigned long flags
;
1183 spin_lock_irqsave(&plchan
->lock
, flags
);
1184 /* Something is already active, or we're waiting for a channel... */
1185 if (plchan
->at
|| plchan
->state
== PL08X_CHAN_WAITING
) {
1186 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1190 /* Take the first element in the queue and execute it */
1191 if (!list_empty(&plchan
->pend_list
)) {
1192 struct pl08x_txd
*next
;
1194 next
= list_first_entry(&plchan
->pend_list
,
1197 list_del(&next
->node
);
1198 plchan
->state
= PL08X_CHAN_RUNNING
;
1200 pl08x_start_txd(plchan
, next
);
1203 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1206 static int pl08x_prep_channel_resources(struct pl08x_dma_chan
*plchan
,
1207 struct pl08x_txd
*txd
)
1209 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1210 unsigned long flags
;
1213 num_llis
= pl08x_fill_llis_for_desc(pl08x
, txd
);
1219 spin_lock_irqsave(&plchan
->lock
, flags
);
1222 * See if we already have a physical channel allocated,
1223 * else this is the time to try to get one.
1225 ret
= prep_phy_channel(plchan
, txd
);
1228 * No physical channel was available.
1230 * memcpy transfers can be sorted out at submission time.
1232 * Slave transfers may have been denied due to platform
1233 * channel muxing restrictions. Since there is no guarantee
1234 * that this will ever be resolved, and the signal must be
1235 * acquired AFTER acquiring the physical channel, we will let
1236 * them be NACK:ed with -EBUSY here. The drivers can retry
1237 * the prep() call if they are eager on doing this using DMA.
1239 if (plchan
->slave
) {
1240 pl08x_free_txd_list(pl08x
, plchan
);
1241 pl08x_free_txd(pl08x
, txd
);
1242 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1247 * Else we're all set, paused and ready to roll, status
1248 * will switch to PL08X_CHAN_RUNNING when we call
1249 * issue_pending(). If there is something running on the
1250 * channel already we don't change its state.
1252 if (plchan
->state
== PL08X_CHAN_IDLE
)
1253 plchan
->state
= PL08X_CHAN_PAUSED
;
1255 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1261 * Given the source and destination available bus masks, select which
1262 * will be routed to each port. We try to have source and destination
1263 * on separate ports, but always respect the allowable settings.
1265 static u32
pl08x_select_bus(struct pl08x_driver_data
*pl08x
, u8 src
, u8 dst
)
1269 if (!(dst
& PL08X_AHB1
) || ((dst
& PL08X_AHB2
) && (src
& PL08X_AHB1
)))
1270 cctl
|= PL080_CONTROL_DST_AHB2
;
1271 if (!(src
& PL08X_AHB1
) || ((src
& PL08X_AHB2
) && !(dst
& PL08X_AHB2
)))
1272 cctl
|= PL080_CONTROL_SRC_AHB2
;
1277 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
,
1278 unsigned long flags
)
1280 struct pl08x_txd
*txd
= kzalloc(sizeof(struct pl08x_txd
), GFP_NOWAIT
);
1283 dma_async_tx_descriptor_init(&txd
->tx
, &plchan
->chan
);
1284 txd
->tx
.flags
= flags
;
1285 txd
->tx
.tx_submit
= pl08x_tx_submit
;
1286 INIT_LIST_HEAD(&txd
->node
);
1288 /* Always enable error and terminal interrupts */
1289 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1290 PL080_CONFIG_TC_IRQ_MASK
;
1296 * Initialize a descriptor to be used by memcpy submit
1298 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1299 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1300 size_t len
, unsigned long flags
)
1302 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1303 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1304 struct pl08x_txd
*txd
;
1307 txd
= pl08x_get_txd(plchan
, flags
);
1309 dev_err(&pl08x
->adev
->dev
,
1310 "%s no memory for descriptor\n", __func__
);
1314 txd
->direction
= DMA_NONE
;
1315 txd
->src_addr
= src
;
1316 txd
->dst_addr
= dest
;
1319 /* Set platform data for m2m */
1320 txd
->ccfg
|= PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1321 txd
->cctl
= pl08x
->pd
->memcpy_channel
.cctl
&
1322 ~(PL080_CONTROL_DST_AHB2
| PL080_CONTROL_SRC_AHB2
);
1324 /* Both to be incremented or the code will break */
1325 txd
->cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1327 if (pl08x
->vd
->dualmaster
)
1328 txd
->cctl
|= pl08x_select_bus(pl08x
,
1329 pl08x
->mem_buses
, pl08x
->mem_buses
);
1331 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1338 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
1339 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1340 unsigned int sg_len
, enum dma_data_direction direction
,
1341 unsigned long flags
)
1343 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1344 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1345 struct pl08x_txd
*txd
;
1346 u8 src_buses
, dst_buses
;
1350 * Current implementation ASSUMES only one sg
1353 dev_err(&pl08x
->adev
->dev
, "%s prepared too long sglist\n",
1358 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
1359 __func__
, sgl
->length
, plchan
->name
);
1361 txd
= pl08x_get_txd(plchan
, flags
);
1363 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1367 if (direction
!= plchan
->runtime_direction
)
1368 dev_err(&pl08x
->adev
->dev
, "%s DMA setup does not match "
1369 "the direction configured for the PrimeCell\n",
1373 * Set up addresses, the PrimeCell configured address
1374 * will take precedence since this may configure the
1375 * channel target address dynamically at runtime.
1377 txd
->direction
= direction
;
1378 txd
->len
= sgl
->length
;
1380 txd
->cctl
= plchan
->cd
->cctl
&
1381 ~(PL080_CONTROL_SRC_AHB2
| PL080_CONTROL_DST_AHB2
|
1382 PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
|
1383 PL080_CONTROL_PROT_MASK
);
1385 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1386 txd
->cctl
|= PL080_CONTROL_PROT_SYS
;
1388 if (direction
== DMA_TO_DEVICE
) {
1389 txd
->ccfg
|= PL080_FLOW_MEM2PER
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1390 txd
->cctl
|= PL080_CONTROL_SRC_INCR
;
1391 txd
->src_addr
= sgl
->dma_address
;
1392 if (plchan
->runtime_addr
)
1393 txd
->dst_addr
= plchan
->runtime_addr
;
1395 txd
->dst_addr
= plchan
->cd
->addr
;
1396 src_buses
= pl08x
->mem_buses
;
1397 dst_buses
= plchan
->cd
->periph_buses
;
1398 } else if (direction
== DMA_FROM_DEVICE
) {
1399 txd
->ccfg
|= PL080_FLOW_PER2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1400 txd
->cctl
|= PL080_CONTROL_DST_INCR
;
1401 if (plchan
->runtime_addr
)
1402 txd
->src_addr
= plchan
->runtime_addr
;
1404 txd
->src_addr
= plchan
->cd
->addr
;
1405 txd
->dst_addr
= sgl
->dma_address
;
1406 src_buses
= plchan
->cd
->periph_buses
;
1407 dst_buses
= pl08x
->mem_buses
;
1409 dev_err(&pl08x
->adev
->dev
,
1410 "%s direction unsupported\n", __func__
);
1414 txd
->cctl
|= pl08x_select_bus(pl08x
, src_buses
, dst_buses
);
1416 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1423 static int pl08x_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1426 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1427 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1428 unsigned long flags
;
1431 /* Controls applicable to inactive channels */
1432 if (cmd
== DMA_SLAVE_CONFIG
) {
1433 return dma_set_runtime_config(chan
,
1434 (struct dma_slave_config
*)arg
);
1438 * Anything succeeds on channels with no physical allocation and
1439 * no queued transfers.
1441 spin_lock_irqsave(&plchan
->lock
, flags
);
1442 if (!plchan
->phychan
&& !plchan
->at
) {
1443 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1448 case DMA_TERMINATE_ALL
:
1449 plchan
->state
= PL08X_CHAN_IDLE
;
1451 if (plchan
->phychan
) {
1452 pl08x_stop_phy_chan(plchan
->phychan
);
1455 * Mark physical channel as free and free any slave
1458 release_phy_channel(plchan
);
1460 /* Dequeue jobs and free LLIs */
1462 pl08x_free_txd(pl08x
, plchan
->at
);
1465 /* Dequeue jobs not yet fired as well */
1466 pl08x_free_txd_list(pl08x
, plchan
);
1469 pl08x_pause_phy_chan(plchan
->phychan
);
1470 plchan
->state
= PL08X_CHAN_PAUSED
;
1473 pl08x_resume_phy_chan(plchan
->phychan
);
1474 plchan
->state
= PL08X_CHAN_RUNNING
;
1477 /* Unknown command */
1482 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1487 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
1489 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1490 char *name
= chan_id
;
1492 /* Check that the channel is not taken! */
1493 if (!strcmp(plchan
->name
, name
))
1500 * Just check that the device is there and active
1501 * TODO: turn this bit on/off depending on the number of physical channels
1502 * actually used, if it is zero... well shut it off. That will save some
1503 * power. Cut the clock at the same time.
1505 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
1509 val
= readl(pl08x
->base
+ PL080_CONFIG
);
1510 val
&= ~(PL080_CONFIG_M2_BE
| PL080_CONFIG_M1_BE
| PL080_CONFIG_ENABLE
);
1511 /* We implicitly clear bit 1 and that means little-endian mode */
1512 val
|= PL080_CONFIG_ENABLE
;
1513 writel(val
, pl08x
->base
+ PL080_CONFIG
);
1516 static void pl08x_unmap_buffers(struct pl08x_txd
*txd
)
1518 struct device
*dev
= txd
->tx
.chan
->device
->dev
;
1520 if (!(txd
->tx
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
1521 if (txd
->tx
.flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
1522 dma_unmap_single(dev
, txd
->src_addr
, txd
->len
,
1525 dma_unmap_page(dev
, txd
->src_addr
, txd
->len
,
1528 if (!(txd
->tx
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
1529 if (txd
->tx
.flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
1530 dma_unmap_single(dev
, txd
->dst_addr
, txd
->len
,
1533 dma_unmap_page(dev
, txd
->dst_addr
, txd
->len
,
1538 static void pl08x_tasklet(unsigned long data
)
1540 struct pl08x_dma_chan
*plchan
= (struct pl08x_dma_chan
*) data
;
1541 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1542 struct pl08x_txd
*txd
;
1543 unsigned long flags
;
1545 spin_lock_irqsave(&plchan
->lock
, flags
);
1551 /* Update last completed */
1552 plchan
->lc
= txd
->tx
.cookie
;
1555 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1556 if (!list_empty(&plchan
->pend_list
)) {
1557 struct pl08x_txd
*next
;
1559 next
= list_first_entry(&plchan
->pend_list
,
1562 list_del(&next
->node
);
1564 pl08x_start_txd(plchan
, next
);
1565 } else if (plchan
->phychan_hold
) {
1567 * This channel is still in use - we have a new txd being
1568 * prepared and will soon be queued. Don't give up the
1572 struct pl08x_dma_chan
*waiting
= NULL
;
1575 * No more jobs, so free up the physical channel
1576 * Free any allocated signal on slave transfers too
1578 release_phy_channel(plchan
);
1579 plchan
->state
= PL08X_CHAN_IDLE
;
1582 * And NOW before anyone else can grab that free:d up
1583 * physical channel, see if there is some memcpy pending
1584 * that seriously needs to start because of being stacked
1585 * up while we were choking the physical channels with data.
1587 list_for_each_entry(waiting
, &pl08x
->memcpy
.channels
,
1589 if (waiting
->state
== PL08X_CHAN_WAITING
&&
1590 waiting
->waiting
!= NULL
) {
1593 /* This should REALLY not fail now */
1594 ret
= prep_phy_channel(waiting
,
1597 waiting
->phychan_hold
--;
1598 waiting
->state
= PL08X_CHAN_RUNNING
;
1599 waiting
->waiting
= NULL
;
1600 pl08x_issue_pending(&waiting
->chan
);
1606 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1609 dma_async_tx_callback callback
= txd
->tx
.callback
;
1610 void *callback_param
= txd
->tx
.callback_param
;
1612 /* Don't try to unmap buffers on slave channels */
1614 pl08x_unmap_buffers(txd
);
1616 /* Free the descriptor */
1617 spin_lock_irqsave(&plchan
->lock
, flags
);
1618 pl08x_free_txd(pl08x
, txd
);
1619 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1621 /* Callback to signal completion */
1623 callback(callback_param
);
1627 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
1629 struct pl08x_driver_data
*pl08x
= dev
;
1634 val
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
1636 /* An error interrupt (on one or more channels) */
1637 dev_err(&pl08x
->adev
->dev
,
1638 "%s error interrupt, register value 0x%08x\n",
1641 * Simply clear ALL PL08X error interrupts,
1642 * regardless of channel and cause
1643 * FIXME: should be 0x00000003 on PL081 really.
1645 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1647 val
= readl(pl08x
->base
+ PL080_INT_STATUS
);
1648 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1649 if ((1 << i
) & val
) {
1650 /* Locate physical channel */
1651 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
1652 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
1654 /* Schedule tasklet on this channel */
1655 tasklet_schedule(&plchan
->tasklet
);
1660 /* Clear only the terminal interrupts on channels we processed */
1661 writel(mask
, pl08x
->base
+ PL080_TC_CLEAR
);
1663 return mask
? IRQ_HANDLED
: IRQ_NONE
;
1667 * Initialise the DMAC memcpy/slave channels.
1668 * Make a local wrapper to hold required data
1670 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
1671 struct dma_device
*dmadev
,
1672 unsigned int channels
,
1675 struct pl08x_dma_chan
*chan
;
1678 INIT_LIST_HEAD(&dmadev
->channels
);
1681 * Register as many many memcpy as we have physical channels,
1682 * we won't always be able to use all but the code will have
1683 * to cope with that situation.
1685 for (i
= 0; i
< channels
; i
++) {
1686 chan
= kzalloc(sizeof(struct pl08x_dma_chan
), GFP_KERNEL
);
1688 dev_err(&pl08x
->adev
->dev
,
1689 "%s no memory for channel\n", __func__
);
1694 chan
->state
= PL08X_CHAN_IDLE
;
1698 chan
->name
= pl08x
->pd
->slave_channels
[i
].bus_id
;
1699 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
1701 chan
->cd
= &pl08x
->pd
->memcpy_channel
;
1702 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
1708 if (chan
->cd
->circular_buffer
) {
1709 dev_err(&pl08x
->adev
->dev
,
1710 "channel %s: circular buffers not supported\n",
1715 dev_info(&pl08x
->adev
->dev
,
1716 "initialize virtual channel \"%s\"\n",
1719 chan
->chan
.device
= dmadev
;
1720 chan
->chan
.cookie
= 0;
1723 spin_lock_init(&chan
->lock
);
1724 INIT_LIST_HEAD(&chan
->pend_list
);
1725 tasklet_init(&chan
->tasklet
, pl08x_tasklet
,
1726 (unsigned long) chan
);
1728 list_add_tail(&chan
->chan
.device_node
, &dmadev
->channels
);
1730 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
1731 i
, slave
? "slave" : "memcpy");
1735 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
1737 struct pl08x_dma_chan
*chan
= NULL
;
1738 struct pl08x_dma_chan
*next
;
1740 list_for_each_entry_safe(chan
,
1741 next
, &dmadev
->channels
, chan
.device_node
) {
1742 list_del(&chan
->chan
.device_node
);
1747 #ifdef CONFIG_DEBUG_FS
1748 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
1751 case PL08X_CHAN_IDLE
:
1753 case PL08X_CHAN_RUNNING
:
1755 case PL08X_CHAN_PAUSED
:
1757 case PL08X_CHAN_WAITING
:
1762 return "UNKNOWN STATE";
1765 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
1767 struct pl08x_driver_data
*pl08x
= s
->private;
1768 struct pl08x_dma_chan
*chan
;
1769 struct pl08x_phy_chan
*ch
;
1770 unsigned long flags
;
1773 seq_printf(s
, "PL08x physical channels:\n");
1774 seq_printf(s
, "CHANNEL:\tUSER:\n");
1775 seq_printf(s
, "--------\t-----\n");
1776 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1777 struct pl08x_dma_chan
*virt_chan
;
1779 ch
= &pl08x
->phy_chans
[i
];
1781 spin_lock_irqsave(&ch
->lock
, flags
);
1782 virt_chan
= ch
->serving
;
1784 seq_printf(s
, "%d\t\t%s\n",
1785 ch
->id
, virt_chan
? virt_chan
->name
: "(none)");
1787 spin_unlock_irqrestore(&ch
->lock
, flags
);
1790 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
1791 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1792 seq_printf(s
, "--------\t------\n");
1793 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, chan
.device_node
) {
1794 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1795 pl08x_state_str(chan
->state
));
1798 seq_printf(s
, "\nPL08x virtual slave channels:\n");
1799 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1800 seq_printf(s
, "--------\t------\n");
1801 list_for_each_entry(chan
, &pl08x
->slave
.channels
, chan
.device_node
) {
1802 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1803 pl08x_state_str(chan
->state
));
1809 static int pl08x_debugfs_open(struct inode
*inode
, struct file
*file
)
1811 return single_open(file
, pl08x_debugfs_show
, inode
->i_private
);
1814 static const struct file_operations pl08x_debugfs_operations
= {
1815 .open
= pl08x_debugfs_open
,
1817 .llseek
= seq_lseek
,
1818 .release
= single_release
,
1821 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1823 /* Expose a simple debugfs interface to view all clocks */
1824 (void) debugfs_create_file(dev_name(&pl08x
->adev
->dev
), S_IFREG
| S_IRUGO
,
1826 &pl08x_debugfs_operations
);
1830 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1835 static int pl08x_probe(struct amba_device
*adev
, struct amba_id
*id
)
1837 struct pl08x_driver_data
*pl08x
;
1838 const struct vendor_data
*vd
= id
->data
;
1842 ret
= amba_request_regions(adev
, NULL
);
1846 /* Create the driver state holder */
1847 pl08x
= kzalloc(sizeof(struct pl08x_driver_data
), GFP_KERNEL
);
1853 /* Initialize memcpy engine */
1854 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
1855 pl08x
->memcpy
.dev
= &adev
->dev
;
1856 pl08x
->memcpy
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1857 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
1858 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
1859 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1860 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
1861 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
1862 pl08x
->memcpy
.device_control
= pl08x_control
;
1864 /* Initialize slave engine */
1865 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
1866 pl08x
->slave
.dev
= &adev
->dev
;
1867 pl08x
->slave
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1868 pl08x
->slave
.device_free_chan_resources
= pl08x_free_chan_resources
;
1869 pl08x
->slave
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1870 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
1871 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
1872 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
1873 pl08x
->slave
.device_control
= pl08x_control
;
1875 /* Get the platform data */
1876 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
1878 dev_err(&adev
->dev
, "no platform data supplied\n");
1879 goto out_no_platdata
;
1882 /* Assign useful pointers to the driver state */
1886 /* By default, AHB1 only. If dualmaster, from platform */
1887 pl08x
->lli_buses
= PL08X_AHB1
;
1888 pl08x
->mem_buses
= PL08X_AHB1
;
1889 if (pl08x
->vd
->dualmaster
) {
1890 pl08x
->lli_buses
= pl08x
->pd
->lli_buses
;
1891 pl08x
->mem_buses
= pl08x
->pd
->mem_buses
;
1894 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1895 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
1896 PL08X_LLI_TSFR_SIZE
, PL08X_ALIGN
, 0);
1899 goto out_no_lli_pool
;
1902 spin_lock_init(&pl08x
->lock
);
1904 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
1907 goto out_no_ioremap
;
1910 /* Turn on the PL08x */
1911 pl08x_ensure_on(pl08x
);
1913 /* Attach the interrupt handler */
1914 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1915 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
1917 ret
= request_irq(adev
->irq
[0], pl08x_irq
, IRQF_DISABLED
,
1918 DRIVER_NAME
, pl08x
);
1920 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
1921 __func__
, adev
->irq
[0]);
1925 /* Initialize physical channels */
1926 pl08x
->phy_chans
= kmalloc((vd
->channels
* sizeof(struct pl08x_phy_chan
)),
1928 if (!pl08x
->phy_chans
) {
1929 dev_err(&adev
->dev
, "%s failed to allocate "
1930 "physical channel holders\n",
1932 goto out_no_phychans
;
1935 for (i
= 0; i
< vd
->channels
; i
++) {
1936 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
1939 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
1940 spin_lock_init(&ch
->lock
);
1943 dev_info(&adev
->dev
,
1944 "physical channel %d is %s\n", i
,
1945 pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
1948 /* Register as many memcpy channels as there are physical channels */
1949 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
1950 pl08x
->vd
->channels
, false);
1952 dev_warn(&pl08x
->adev
->dev
,
1953 "%s failed to enumerate memcpy channels - %d\n",
1957 pl08x
->memcpy
.chancnt
= ret
;
1959 /* Register slave channels */
1960 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
1961 pl08x
->pd
->num_slave_channels
,
1964 dev_warn(&pl08x
->adev
->dev
,
1965 "%s failed to enumerate slave channels - %d\n",
1969 pl08x
->slave
.chancnt
= ret
;
1971 ret
= dma_async_device_register(&pl08x
->memcpy
);
1973 dev_warn(&pl08x
->adev
->dev
,
1974 "%s failed to register memcpy as an async device - %d\n",
1976 goto out_no_memcpy_reg
;
1979 ret
= dma_async_device_register(&pl08x
->slave
);
1981 dev_warn(&pl08x
->adev
->dev
,
1982 "%s failed to register slave as an async device - %d\n",
1984 goto out_no_slave_reg
;
1987 amba_set_drvdata(adev
, pl08x
);
1988 init_pl08x_debugfs(pl08x
);
1989 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1990 amba_part(adev
), amba_rev(adev
),
1991 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
1995 dma_async_device_unregister(&pl08x
->memcpy
);
1997 pl08x_free_virtual_channels(&pl08x
->slave
);
1999 pl08x_free_virtual_channels(&pl08x
->memcpy
);
2001 kfree(pl08x
->phy_chans
);
2003 free_irq(adev
->irq
[0], pl08x
);
2005 iounmap(pl08x
->base
);
2007 dma_pool_destroy(pl08x
->pool
);
2012 amba_release_regions(adev
);
2016 /* PL080 has 8 channels and the PL080 have just 2 */
2017 static struct vendor_data vendor_pl080
= {
2022 static struct vendor_data vendor_pl081
= {
2024 .dualmaster
= false,
2027 static struct amba_id pl08x_ids
[] = {
2032 .data
= &vendor_pl080
,
2038 .data
= &vendor_pl081
,
2040 /* Nomadik 8815 PL080 variant */
2044 .data
= &vendor_pl080
,
2049 static struct amba_driver pl08x_amba_driver
= {
2050 .drv
.name
= DRIVER_NAME
,
2051 .id_table
= pl08x_ids
,
2052 .probe
= pl08x_probe
,
2055 static int __init
pl08x_init(void)
2058 retval
= amba_driver_register(&pl08x_amba_driver
);
2060 printk(KERN_WARNING DRIVER_NAME
2061 "failed to register as an AMBA device (%d)\n",
2065 subsys_initcall(pl08x_init
);