2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * The full GNU General Public License is in this distribution in the file
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
37 * The PL080 has a dual bus master, PL081 has a single master.
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
45 * Raise terminal count interrupt
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
54 * ASSUMES default (little) endianness for DMA transfers
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
75 * - Break out common code from arch/arm/mach-s3c64xx and share
77 #include <linux/device.h>
78 #include <linux/init.h>
79 #include <linux/module.h>
80 #include <linux/interrupt.h>
81 #include <linux/slab.h>
82 #include <linux/dmapool.h>
83 #include <linux/dmaengine.h>
84 #include <linux/amba/bus.h>
85 #include <linux/amba/pl08x.h>
86 #include <linux/debugfs.h>
87 #include <linux/seq_file.h>
89 #include <asm/hardware/pl080.h>
91 #define DRIVER_NAME "pl08xdmac"
94 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
95 * @channels: the number of channels available in this variant
96 * @dualmaster: whether this version supports dual AHB masters or not.
104 * PL08X private data structures
105 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
106 * start & end do not - their bus bit info is in cctl. Also note that these
107 * are fixed 32-bit quantities.
117 * struct pl08x_driver_data - the local state holder for the PL08x
118 * @slave: slave engine for this instance
119 * @memcpy: memcpy engine for this instance
120 * @base: virtual memory base (remapped) for the PL08x
121 * @adev: the corresponding AMBA (PrimeCell) bus entry
122 * @vd: vendor data for this PL08x variant
123 * @pd: platform data passed in from the platform/machine
124 * @phy_chans: array of data for the physical channels
125 * @pool: a pool for the LLI descriptors
126 * @pool_ctr: counter of LLIs in the pool
127 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
128 * @mem_buses: set to indicate memory transfers on AHB2.
129 * @lock: a spinlock for this struct
131 struct pl08x_driver_data
{
132 struct dma_device slave
;
133 struct dma_device memcpy
;
135 struct amba_device
*adev
;
136 const struct vendor_data
*vd
;
137 struct pl08x_platform_data
*pd
;
138 struct pl08x_phy_chan
*phy_chans
;
139 struct dma_pool
*pool
;
147 * PL08X specific defines
151 * Memory boundaries: the manual for PL08x says that the controller
152 * cannot read past a 1KiB boundary, so these defines are used to
153 * create transfer LLIs that do not cross such boundaries.
155 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
156 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
158 /* Minimum period between work queue runs */
159 #define PL08X_WQ_PERIODMIN 20
161 /* Size (bytes) of each LLI buffer allocated for one transfer */
162 # define PL08X_LLI_TSFR_SIZE 0x2000
164 /* Maximum times we call dma_pool_alloc on this pool without freeing */
165 #define PL08X_MAX_ALLOCS 0x40
166 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
167 #define PL08X_ALIGN 8
169 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
171 return container_of(chan
, struct pl08x_dma_chan
, chan
);
174 static inline struct pl08x_txd
*to_pl08x_txd(struct dma_async_tx_descriptor
*tx
)
176 return container_of(tx
, struct pl08x_txd
, tx
);
180 * Physical channel handling
183 /* Whether a certain channel is busy or not */
184 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
188 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
189 return val
& PL080_CONFIG_ACTIVE
;
193 * Set the initial DMA register values i.e. those for the first LLI
194 * The next LLI pointer and the configuration interrupt bit have
195 * been set when the LLIs were constructed. Poke them into the hardware
196 * and start the transfer.
198 static void pl08x_start_txd(struct pl08x_dma_chan
*plchan
,
199 struct pl08x_txd
*txd
)
201 struct pl08x_driver_data
*pl08x
= plchan
->host
;
202 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
203 struct pl08x_lli
*lli
= &txd
->llis_va
[0];
208 /* Wait for channel inactive */
209 while (pl08x_phy_channel_busy(phychan
))
212 dev_vdbg(&pl08x
->adev
->dev
,
213 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
214 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
215 phychan
->id
, lli
->src
, lli
->dst
, lli
->lli
, lli
->cctl
,
218 writel(lli
->src
, phychan
->base
+ PL080_CH_SRC_ADDR
);
219 writel(lli
->dst
, phychan
->base
+ PL080_CH_DST_ADDR
);
220 writel(lli
->lli
, phychan
->base
+ PL080_CH_LLI
);
221 writel(lli
->cctl
, phychan
->base
+ PL080_CH_CONTROL
);
222 writel(txd
->ccfg
, phychan
->base
+ PL080_CH_CONFIG
);
224 /* Enable the DMA channel */
225 /* Do not access config register until channel shows as disabled */
226 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & (1 << phychan
->id
))
229 /* Do not access config register until channel shows as inactive */
230 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
231 while ((val
& PL080_CONFIG_ACTIVE
) || (val
& PL080_CONFIG_ENABLE
))
232 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
234 writel(val
| PL080_CONFIG_ENABLE
, phychan
->base
+ PL080_CH_CONFIG
);
238 * Overall DMAC remains enabled always.
240 * Disabling individual channels could lose data.
242 * Disable the peripheral DMA after disabling the DMAC in order to allow
243 * the DMAC FIFO to drain, and hence allow the channel to show inactive
245 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
249 /* Set the HALT bit and wait for the FIFO to drain */
250 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
251 val
|= PL080_CONFIG_HALT
;
252 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
254 /* Wait for channel inactive */
255 while (pl08x_phy_channel_busy(ch
))
259 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
263 /* Clear the HALT bit */
264 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
265 val
&= ~PL080_CONFIG_HALT
;
266 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
271 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
272 * clears any pending interrupt status. This should not be used for
273 * an on-going transfer, but as a method of shutting down a channel
274 * (eg, when it's no longer used) or terminating a transfer.
276 static void pl08x_terminate_phy_chan(struct pl08x_driver_data
*pl08x
,
277 struct pl08x_phy_chan
*ch
)
279 u32 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
281 val
&= ~(PL080_CONFIG_ENABLE
| PL080_CONFIG_ERR_IRQ_MASK
|
282 PL080_CONFIG_TC_IRQ_MASK
);
284 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
286 writel(1 << ch
->id
, pl08x
->base
+ PL080_ERR_CLEAR
);
287 writel(1 << ch
->id
, pl08x
->base
+ PL080_TC_CLEAR
);
290 static inline u32
get_bytes_in_cctl(u32 cctl
)
292 /* The source width defines the number of bytes */
293 u32 bytes
= cctl
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
295 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
296 case PL080_WIDTH_8BIT
:
298 case PL080_WIDTH_16BIT
:
301 case PL080_WIDTH_32BIT
:
308 /* The channel should be paused when calling this */
309 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
311 struct pl08x_phy_chan
*ch
;
312 struct pl08x_txd
*txd
;
316 spin_lock_irqsave(&plchan
->lock
, flags
);
317 ch
= plchan
->phychan
;
321 * Follow the LLIs to get the number of remaining
322 * bytes in the currently active transaction.
325 u32 clli
= readl(ch
->base
+ PL080_CH_LLI
) & ~PL080_LLI_LM_AHB2
;
327 /* First get the remaining bytes in the active transfer */
328 bytes
= get_bytes_in_cctl(readl(ch
->base
+ PL080_CH_CONTROL
));
331 struct pl08x_lli
*llis_va
= txd
->llis_va
;
332 dma_addr_t llis_bus
= txd
->llis_bus
;
335 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
336 sizeof(struct pl08x_lli
) * MAX_NUM_TSFR_LLIS
);
339 * Locate the next LLI - as this is an array,
340 * it's simple maths to find.
342 index
= (clli
- llis_bus
) / sizeof(struct pl08x_lli
);
344 for (; index
< MAX_NUM_TSFR_LLIS
; index
++) {
345 bytes
+= get_bytes_in_cctl(llis_va
[index
].cctl
);
348 * A LLI pointer of 0 terminates the LLI list
350 if (!llis_va
[index
].lli
)
356 /* Sum up all queued transactions */
357 if (!list_empty(&plchan
->pend_list
)) {
358 struct pl08x_txd
*txdi
;
359 list_for_each_entry(txdi
, &plchan
->pend_list
, node
) {
364 spin_unlock_irqrestore(&plchan
->lock
, flags
);
370 * Allocate a physical channel for a virtual channel
372 * Try to locate a physical channel to be used for this transfer. If all
373 * are taken return NULL and the requester will have to cope by using
374 * some fallback PIO mode or retrying later.
376 static struct pl08x_phy_chan
*
377 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
378 struct pl08x_dma_chan
*virt_chan
)
380 struct pl08x_phy_chan
*ch
= NULL
;
384 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
385 ch
= &pl08x
->phy_chans
[i
];
387 spin_lock_irqsave(&ch
->lock
, flags
);
390 ch
->serving
= virt_chan
;
392 spin_unlock_irqrestore(&ch
->lock
, flags
);
396 spin_unlock_irqrestore(&ch
->lock
, flags
);
399 if (i
== pl08x
->vd
->channels
) {
400 /* No physical channel available, cope with it */
407 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
408 struct pl08x_phy_chan
*ch
)
412 spin_lock_irqsave(&ch
->lock
, flags
);
414 /* Stop the channel and clear its interrupts */
415 pl08x_terminate_phy_chan(pl08x
, ch
);
417 /* Mark it as free */
419 spin_unlock_irqrestore(&ch
->lock
, flags
);
426 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded
)
429 case PL080_WIDTH_8BIT
:
431 case PL080_WIDTH_16BIT
:
433 case PL080_WIDTH_32BIT
:
442 static inline u32
pl08x_cctl_bits(u32 cctl
, u8 srcwidth
, u8 dstwidth
,
447 /* Remove all src, dst and transfer size bits */
448 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
449 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
450 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
452 /* Then set the bits according to the parameters */
455 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
458 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
461 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
470 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
473 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
476 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
483 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
487 struct pl08x_lli_build_data
{
488 struct pl08x_txd
*txd
;
489 struct pl08x_driver_data
*pl08x
;
490 struct pl08x_bus_data srcbus
;
491 struct pl08x_bus_data dstbus
;
496 * Autoselect a master bus to use for the transfer this prefers the
497 * destination bus if both available if fixed address on one bus the
498 * other will be chosen
500 static void pl08x_choose_master_bus(struct pl08x_lli_build_data
*bd
,
501 struct pl08x_bus_data
**mbus
, struct pl08x_bus_data
**sbus
, u32 cctl
)
503 if (!(cctl
& PL080_CONTROL_DST_INCR
)) {
506 } else if (!(cctl
& PL080_CONTROL_SRC_INCR
)) {
510 if (bd
->dstbus
.buswidth
== 4) {
513 } else if (bd
->srcbus
.buswidth
== 4) {
516 } else if (bd
->dstbus
.buswidth
== 2) {
519 } else if (bd
->srcbus
.buswidth
== 2) {
523 /* bd->srcbus.buswidth == 1 */
531 * Fills in one LLI for a certain transfer descriptor and advance the counter
533 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data
*bd
,
534 int num_llis
, int len
, u32 cctl
)
536 struct pl08x_lli
*llis_va
= bd
->txd
->llis_va
;
537 dma_addr_t llis_bus
= bd
->txd
->llis_bus
;
539 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
541 llis_va
[num_llis
].cctl
= cctl
;
542 llis_va
[num_llis
].src
= bd
->srcbus
.addr
;
543 llis_va
[num_llis
].dst
= bd
->dstbus
.addr
;
544 llis_va
[num_llis
].lli
= llis_bus
+ (num_llis
+ 1) * sizeof(struct pl08x_lli
);
545 if (bd
->pl08x
->lli_buses
& PL08X_AHB2
)
546 llis_va
[num_llis
].lli
|= PL080_LLI_LM_AHB2
;
548 if (cctl
& PL080_CONTROL_SRC_INCR
)
549 bd
->srcbus
.addr
+= len
;
550 if (cctl
& PL080_CONTROL_DST_INCR
)
551 bd
->dstbus
.addr
+= len
;
553 BUG_ON(bd
->remainder
< len
);
555 bd
->remainder
-= len
;
559 * Return number of bytes to fill to boundary, or len.
560 * This calculation works for any value of addr.
562 static inline size_t pl08x_pre_boundary(u32 addr
, size_t len
)
564 size_t boundary_len
= PL08X_BOUNDARY_SIZE
-
565 (addr
& (PL08X_BOUNDARY_SIZE
- 1));
567 return min(boundary_len
, len
);
571 * This fills in the table of LLIs for the transfer descriptor
572 * Note that we assume we never have to change the burst sizes
575 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
576 struct pl08x_txd
*txd
)
578 struct pl08x_bus_data
*mbus
, *sbus
;
579 struct pl08x_lli_build_data bd
;
582 size_t max_bytes_per_lli
;
583 size_t total_bytes
= 0;
584 struct pl08x_lli
*llis_va
;
586 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
,
589 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
595 /* Get the default CCTL */
600 bd
.srcbus
.addr
= txd
->src_addr
;
601 bd
.dstbus
.addr
= txd
->dst_addr
;
603 /* Find maximum width of the source bus */
605 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
606 PL080_CONTROL_SWIDTH_SHIFT
);
608 /* Find maximum width of the destination bus */
610 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
611 PL080_CONTROL_DWIDTH_SHIFT
);
613 /* Set up the bus widths to the maximum */
614 bd
.srcbus
.buswidth
= bd
.srcbus
.maxwidth
;
615 bd
.dstbus
.buswidth
= bd
.dstbus
.maxwidth
;
616 dev_vdbg(&pl08x
->adev
->dev
,
617 "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
618 __func__
, bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
);
622 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
624 max_bytes_per_lli
= min(bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
) *
625 PL080_CONTROL_TRANSFER_SIZE_MASK
;
626 dev_vdbg(&pl08x
->adev
->dev
,
627 "%s max bytes per lli = %zu\n",
628 __func__
, max_bytes_per_lli
);
630 /* We need to count this down to zero */
631 bd
.remainder
= txd
->len
;
632 dev_vdbg(&pl08x
->adev
->dev
,
633 "%s remainder = %zu\n",
634 __func__
, bd
.remainder
);
637 * Choose bus to align to
638 * - prefers destination bus if both available
639 * - if fixed address on one bus chooses other
641 pl08x_choose_master_bus(&bd
, &mbus
, &sbus
, cctl
);
643 if (txd
->len
< mbus
->buswidth
) {
644 /* Less than a bus width available - send as single bytes */
645 while (bd
.remainder
) {
646 dev_vdbg(&pl08x
->adev
->dev
,
647 "%s single byte LLIs for a transfer of "
648 "less than a bus width (remain 0x%08x)\n",
649 __func__
, bd
.remainder
);
650 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
651 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
655 /* Make one byte LLIs until master bus is aligned */
656 while ((mbus
->addr
) % (mbus
->buswidth
)) {
657 dev_vdbg(&pl08x
->adev
->dev
,
658 "%s adjustment lli for less than bus width "
660 __func__
, bd
.remainder
);
661 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
662 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
668 * - if slave is not then we must set its width down
670 if (sbus
->addr
% sbus
->buswidth
) {
671 dev_dbg(&pl08x
->adev
->dev
,
672 "%s set down bus width to one byte\n",
679 * Make largest possible LLIs until less than one bus
682 while (bd
.remainder
> (mbus
->buswidth
- 1)) {
683 size_t lli_len
, target_len
, tsize
, odd_bytes
;
686 * If enough left try to send max possible,
687 * otherwise try to send the remainder
689 target_len
= min(bd
.remainder
, max_bytes_per_lli
);
692 * Set bus lengths for incrementing buses to the
693 * number of bytes which fill to next memory boundary,
694 * limiting on the target length calculated above.
696 if (cctl
& PL080_CONTROL_SRC_INCR
)
697 bd
.srcbus
.fill_bytes
=
698 pl08x_pre_boundary(bd
.srcbus
.addr
,
701 bd
.srcbus
.fill_bytes
= target_len
;
703 if (cctl
& PL080_CONTROL_DST_INCR
)
704 bd
.dstbus
.fill_bytes
=
705 pl08x_pre_boundary(bd
.dstbus
.addr
,
708 bd
.dstbus
.fill_bytes
= target_len
;
710 /* Find the nearest */
711 lli_len
= min(bd
.srcbus
.fill_bytes
,
712 bd
.dstbus
.fill_bytes
);
714 BUG_ON(lli_len
> bd
.remainder
);
717 dev_err(&pl08x
->adev
->dev
,
718 "%s lli_len is %zu, <= 0\n",
723 if (lli_len
== target_len
) {
725 * Can send what we wanted.
728 lli_len
= (lli_len
/mbus
->buswidth
) *
733 * So now we know how many bytes to transfer
734 * to get to the nearest boundary. The next
735 * LLI will past the boundary. However, we
736 * may be working to a boundary on the slave
737 * bus. We need to ensure the master stays
738 * aligned, and that we are working in
739 * multiples of the bus widths.
741 odd_bytes
= lli_len
% mbus
->buswidth
;
742 lli_len
-= odd_bytes
;
748 * Check against minimum bus alignment:
749 * Calculate actual transfer size in relation
750 * to bus width an get a maximum remainder of
751 * the smallest bus width - 1
753 /* FIXME: use round_down()? */
754 tsize
= lli_len
/ min(mbus
->buswidth
,
756 lli_len
= tsize
* min(mbus
->buswidth
,
759 if (target_len
!= lli_len
) {
760 dev_vdbg(&pl08x
->adev
->dev
,
761 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
762 __func__
, target_len
, lli_len
, txd
->len
);
765 cctl
= pl08x_cctl_bits(cctl
,
770 dev_vdbg(&pl08x
->adev
->dev
,
771 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
772 __func__
, lli_len
, bd
.remainder
);
773 pl08x_fill_lli_for_desc(&bd
, num_llis
++,
775 total_bytes
+= lli_len
;
781 * Creep past the boundary, maintaining
785 for (j
= 0; (j
< mbus
->buswidth
)
786 && (bd
.remainder
); j
++) {
787 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
788 dev_vdbg(&pl08x
->adev
->dev
,
789 "%s align with boundary, single byte (remain 0x%08zx)\n",
790 __func__
, bd
.remainder
);
791 pl08x_fill_lli_for_desc(&bd
,
792 num_llis
++, 1, cctl
);
801 while (bd
.remainder
) {
802 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
803 dev_vdbg(&pl08x
->adev
->dev
,
804 "%s align with boundary, single odd byte (remain %zu)\n",
805 __func__
, bd
.remainder
);
806 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
810 if (total_bytes
!= txd
->len
) {
811 dev_err(&pl08x
->adev
->dev
,
812 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
813 __func__
, total_bytes
, txd
->len
);
817 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
818 dev_err(&pl08x
->adev
->dev
,
819 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
820 __func__
, (u32
) MAX_NUM_TSFR_LLIS
);
824 llis_va
= txd
->llis_va
;
825 /* The final LLI terminates the LLI. */
826 llis_va
[num_llis
- 1].lli
= 0;
827 /* The final LLI element shall also fire an interrupt. */
828 llis_va
[num_llis
- 1].cctl
|= PL080_CONTROL_TC_IRQ_EN
;
834 for (i
= 0; i
< num_llis
; i
++) {
835 dev_vdbg(&pl08x
->adev
->dev
,
836 "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
851 /* You should call this with the struct pl08x lock held */
852 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
853 struct pl08x_txd
*txd
)
856 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
863 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
864 struct pl08x_dma_chan
*plchan
)
866 struct pl08x_txd
*txdi
= NULL
;
867 struct pl08x_txd
*next
;
869 if (!list_empty(&plchan
->pend_list
)) {
870 list_for_each_entry_safe(txdi
,
871 next
, &plchan
->pend_list
, node
) {
872 list_del(&txdi
->node
);
873 pl08x_free_txd(pl08x
, txdi
);
881 static int pl08x_alloc_chan_resources(struct dma_chan
*chan
)
886 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
891 * This should be called with the channel plchan->lock held
893 static int prep_phy_channel(struct pl08x_dma_chan
*plchan
,
894 struct pl08x_txd
*txd
)
896 struct pl08x_driver_data
*pl08x
= plchan
->host
;
897 struct pl08x_phy_chan
*ch
;
900 /* Check if we already have a channel */
904 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
906 /* No physical channel available, cope with it */
907 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
912 * OK we have a physical channel: for memcpy() this is all we
913 * need, but for slaves the physical signals may be muxed!
914 * Can the platform allow us to use this channel?
918 pl08x
->pd
->get_signal
) {
919 ret
= pl08x
->pd
->get_signal(plchan
);
921 dev_dbg(&pl08x
->adev
->dev
,
922 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
923 ch
->id
, plchan
->name
);
924 /* Release physical channel & return */
925 pl08x_put_phy_channel(pl08x
, ch
);
930 /* Assign the flow control signal to this channel */
931 if (txd
->direction
== DMA_TO_DEVICE
)
932 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
933 else if (txd
->direction
== DMA_FROM_DEVICE
)
934 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
937 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d and signal %d for xfer on %s\n",
942 plchan
->phychan_hold
++;
943 plchan
->phychan
= ch
;
948 static void release_phy_channel(struct pl08x_dma_chan
*plchan
)
950 struct pl08x_driver_data
*pl08x
= plchan
->host
;
952 if ((plchan
->phychan
->signal
>= 0) && pl08x
->pd
->put_signal
) {
953 pl08x
->pd
->put_signal(plchan
);
954 plchan
->phychan
->signal
= -1;
956 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
957 plchan
->phychan
= NULL
;
960 static dma_cookie_t
pl08x_tx_submit(struct dma_async_tx_descriptor
*tx
)
962 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(tx
->chan
);
963 struct pl08x_txd
*txd
= to_pl08x_txd(tx
);
966 spin_lock_irqsave(&plchan
->lock
, flags
);
968 plchan
->chan
.cookie
+= 1;
969 if (plchan
->chan
.cookie
< 0)
970 plchan
->chan
.cookie
= 1;
971 tx
->cookie
= plchan
->chan
.cookie
;
973 /* Put this onto the pending list */
974 list_add_tail(&txd
->node
, &plchan
->pend_list
);
977 * If there was no physical channel available for this memcpy,
978 * stack the request up and indicate that the channel is waiting
979 * for a free physical channel.
981 if (!plchan
->slave
&& !plchan
->phychan
) {
982 /* Do this memcpy whenever there is a channel ready */
983 plchan
->state
= PL08X_CHAN_WAITING
;
984 plchan
->waiting
= txd
;
986 plchan
->phychan_hold
--;
989 spin_unlock_irqrestore(&plchan
->lock
, flags
);
994 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
995 struct dma_chan
*chan
, unsigned long flags
)
997 struct dma_async_tx_descriptor
*retval
= NULL
;
1003 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1004 * If slaves are relying on interrupts to signal completion this function
1005 * must not be called with interrupts disabled.
1007 static enum dma_status
1008 pl08x_dma_tx_status(struct dma_chan
*chan
,
1009 dma_cookie_t cookie
,
1010 struct dma_tx_state
*txstate
)
1012 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1013 dma_cookie_t last_used
;
1014 dma_cookie_t last_complete
;
1015 enum dma_status ret
;
1018 last_used
= plchan
->chan
.cookie
;
1019 last_complete
= plchan
->lc
;
1021 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1022 if (ret
== DMA_SUCCESS
) {
1023 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
1028 * This cookie not complete yet
1030 last_used
= plchan
->chan
.cookie
;
1031 last_complete
= plchan
->lc
;
1033 /* Get number of bytes left in the active transactions and queue */
1034 bytesleft
= pl08x_getbytes_chan(plchan
);
1036 dma_set_tx_state(txstate
, last_complete
, last_used
,
1039 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1042 /* Whether waiting or running, we're in progress */
1043 return DMA_IN_PROGRESS
;
1046 /* PrimeCell DMA extension */
1047 struct burst_table
{
1052 static const struct burst_table burst_sizes
[] = {
1055 .reg
= (PL080_BSIZE_256
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1056 (PL080_BSIZE_256
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1060 .reg
= (PL080_BSIZE_128
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1061 (PL080_BSIZE_128
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1065 .reg
= (PL080_BSIZE_64
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1066 (PL080_BSIZE_64
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1070 .reg
= (PL080_BSIZE_32
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1071 (PL080_BSIZE_32
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1075 .reg
= (PL080_BSIZE_16
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1076 (PL080_BSIZE_16
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1080 .reg
= (PL080_BSIZE_8
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1081 (PL080_BSIZE_8
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1085 .reg
= (PL080_BSIZE_4
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1086 (PL080_BSIZE_4
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1090 .reg
= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1091 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
),
1095 static int dma_set_runtime_config(struct dma_chan
*chan
,
1096 struct dma_slave_config
*config
)
1098 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1099 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1100 struct pl08x_channel_data
*cd
= plchan
->cd
;
1101 enum dma_slave_buswidth addr_width
;
1110 /* Transfer direction */
1111 plchan
->runtime_direction
= config
->direction
;
1112 if (config
->direction
== DMA_TO_DEVICE
) {
1113 addr
= config
->dst_addr
;
1114 addr_width
= config
->dst_addr_width
;
1115 maxburst
= config
->dst_maxburst
;
1116 } else if (config
->direction
== DMA_FROM_DEVICE
) {
1117 addr
= config
->src_addr
;
1118 addr_width
= config
->src_addr_width
;
1119 maxburst
= config
->src_maxburst
;
1121 dev_err(&pl08x
->adev
->dev
,
1122 "bad runtime_config: alien transfer direction\n");
1126 switch (addr_width
) {
1127 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1128 cctl
|= (PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1129 (PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1131 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1132 cctl
|= (PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1133 (PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1135 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1136 cctl
|= (PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
) |
1137 (PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
);
1140 dev_err(&pl08x
->adev
->dev
,
1141 "bad runtime_config: alien address width\n");
1146 * Now decide on a maxburst:
1147 * If this channel will only request single transfers, set this
1148 * down to ONE element. Also select one element if no maxburst
1151 if (plchan
->cd
->single
|| maxburst
== 0) {
1152 cctl
|= (PL080_BSIZE_1
<< PL080_CONTROL_SB_SIZE_SHIFT
) |
1153 (PL080_BSIZE_1
<< PL080_CONTROL_DB_SIZE_SHIFT
);
1155 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1156 if (burst_sizes
[i
].burstwords
<= maxburst
)
1158 cctl
|= burst_sizes
[i
].reg
;
1161 plchan
->runtime_addr
= addr
;
1163 /* Modify the default channel data to fit PrimeCell request */
1166 dev_dbg(&pl08x
->adev
->dev
,
1167 "configured channel %s (%s) for %s, data width %d, "
1168 "maxburst %d words, LE, CCTL=0x%08x\n",
1169 dma_chan_name(chan
), plchan
->name
,
1170 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
1179 * Slave transactions callback to the slave device to allow
1180 * synchronization of slave DMA signals with the DMAC enable
1182 static void pl08x_issue_pending(struct dma_chan
*chan
)
1184 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1185 unsigned long flags
;
1187 spin_lock_irqsave(&plchan
->lock
, flags
);
1188 /* Something is already active, or we're waiting for a channel... */
1189 if (plchan
->at
|| plchan
->state
== PL08X_CHAN_WAITING
) {
1190 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1194 /* Take the first element in the queue and execute it */
1195 if (!list_empty(&plchan
->pend_list
)) {
1196 struct pl08x_txd
*next
;
1198 next
= list_first_entry(&plchan
->pend_list
,
1201 list_del(&next
->node
);
1202 plchan
->state
= PL08X_CHAN_RUNNING
;
1204 pl08x_start_txd(plchan
, next
);
1207 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1210 static int pl08x_prep_channel_resources(struct pl08x_dma_chan
*plchan
,
1211 struct pl08x_txd
*txd
)
1213 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1214 unsigned long flags
;
1217 num_llis
= pl08x_fill_llis_for_desc(pl08x
, txd
);
1223 spin_lock_irqsave(&plchan
->lock
, flags
);
1226 * See if we already have a physical channel allocated,
1227 * else this is the time to try to get one.
1229 ret
= prep_phy_channel(plchan
, txd
);
1232 * No physical channel was available.
1234 * memcpy transfers can be sorted out at submission time.
1236 * Slave transfers may have been denied due to platform
1237 * channel muxing restrictions. Since there is no guarantee
1238 * that this will ever be resolved, and the signal must be
1239 * acquired AFTER acquiring the physical channel, we will let
1240 * them be NACK:ed with -EBUSY here. The drivers can retry
1241 * the prep() call if they are eager on doing this using DMA.
1243 if (plchan
->slave
) {
1244 pl08x_free_txd_list(pl08x
, plchan
);
1245 pl08x_free_txd(pl08x
, txd
);
1246 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1251 * Else we're all set, paused and ready to roll, status
1252 * will switch to PL08X_CHAN_RUNNING when we call
1253 * issue_pending(). If there is something running on the
1254 * channel already we don't change its state.
1256 if (plchan
->state
== PL08X_CHAN_IDLE
)
1257 plchan
->state
= PL08X_CHAN_PAUSED
;
1259 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1265 * Given the source and destination available bus masks, select which
1266 * will be routed to each port. We try to have source and destination
1267 * on separate ports, but always respect the allowable settings.
1269 static u32
pl08x_select_bus(struct pl08x_driver_data
*pl08x
, u8 src
, u8 dst
)
1273 if (!(dst
& PL08X_AHB1
) || ((dst
& PL08X_AHB2
) && (src
& PL08X_AHB1
)))
1274 cctl
|= PL080_CONTROL_DST_AHB2
;
1275 if (!(src
& PL08X_AHB1
) || ((src
& PL08X_AHB2
) && !(dst
& PL08X_AHB2
)))
1276 cctl
|= PL080_CONTROL_SRC_AHB2
;
1281 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
,
1282 unsigned long flags
)
1284 struct pl08x_txd
*txd
= kzalloc(sizeof(struct pl08x_txd
), GFP_NOWAIT
);
1287 dma_async_tx_descriptor_init(&txd
->tx
, &plchan
->chan
);
1288 txd
->tx
.flags
= flags
;
1289 txd
->tx
.tx_submit
= pl08x_tx_submit
;
1290 INIT_LIST_HEAD(&txd
->node
);
1292 /* Always enable error and terminal interrupts */
1293 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1294 PL080_CONFIG_TC_IRQ_MASK
;
1300 * Initialize a descriptor to be used by memcpy submit
1302 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1303 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1304 size_t len
, unsigned long flags
)
1306 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1307 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1308 struct pl08x_txd
*txd
;
1311 txd
= pl08x_get_txd(plchan
, flags
);
1313 dev_err(&pl08x
->adev
->dev
,
1314 "%s no memory for descriptor\n", __func__
);
1318 txd
->direction
= DMA_NONE
;
1319 txd
->src_addr
= src
;
1320 txd
->dst_addr
= dest
;
1323 /* Set platform data for m2m */
1324 txd
->ccfg
|= PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1325 txd
->cctl
= pl08x
->pd
->memcpy_channel
.cctl
&
1326 ~(PL080_CONTROL_DST_AHB2
| PL080_CONTROL_SRC_AHB2
);
1328 /* Both to be incremented or the code will break */
1329 txd
->cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1331 if (pl08x
->vd
->dualmaster
)
1332 txd
->cctl
|= pl08x_select_bus(pl08x
,
1333 pl08x
->mem_buses
, pl08x
->mem_buses
);
1335 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1342 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
1343 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1344 unsigned int sg_len
, enum dma_data_direction direction
,
1345 unsigned long flags
)
1347 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1348 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1349 struct pl08x_txd
*txd
;
1350 u8 src_buses
, dst_buses
;
1354 * Current implementation ASSUMES only one sg
1357 dev_err(&pl08x
->adev
->dev
, "%s prepared too long sglist\n",
1362 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
1363 __func__
, sgl
->length
, plchan
->name
);
1365 txd
= pl08x_get_txd(plchan
, flags
);
1367 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1371 if (direction
!= plchan
->runtime_direction
)
1372 dev_err(&pl08x
->adev
->dev
, "%s DMA setup does not match "
1373 "the direction configured for the PrimeCell\n",
1377 * Set up addresses, the PrimeCell configured address
1378 * will take precedence since this may configure the
1379 * channel target address dynamically at runtime.
1381 txd
->direction
= direction
;
1382 txd
->len
= sgl
->length
;
1384 txd
->cctl
= plchan
->cd
->cctl
&
1385 ~(PL080_CONTROL_SRC_AHB2
| PL080_CONTROL_DST_AHB2
|
1386 PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
|
1387 PL080_CONTROL_PROT_MASK
);
1389 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1390 txd
->cctl
|= PL080_CONTROL_PROT_SYS
;
1392 if (direction
== DMA_TO_DEVICE
) {
1393 txd
->ccfg
|= PL080_FLOW_MEM2PER
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1394 txd
->cctl
|= PL080_CONTROL_SRC_INCR
;
1395 txd
->src_addr
= sgl
->dma_address
;
1396 if (plchan
->runtime_addr
)
1397 txd
->dst_addr
= plchan
->runtime_addr
;
1399 txd
->dst_addr
= plchan
->cd
->addr
;
1400 src_buses
= pl08x
->mem_buses
;
1401 dst_buses
= plchan
->cd
->periph_buses
;
1402 } else if (direction
== DMA_FROM_DEVICE
) {
1403 txd
->ccfg
|= PL080_FLOW_PER2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1404 txd
->cctl
|= PL080_CONTROL_DST_INCR
;
1405 if (plchan
->runtime_addr
)
1406 txd
->src_addr
= plchan
->runtime_addr
;
1408 txd
->src_addr
= plchan
->cd
->addr
;
1409 txd
->dst_addr
= sgl
->dma_address
;
1410 src_buses
= plchan
->cd
->periph_buses
;
1411 dst_buses
= pl08x
->mem_buses
;
1413 dev_err(&pl08x
->adev
->dev
,
1414 "%s direction unsupported\n", __func__
);
1418 txd
->cctl
|= pl08x_select_bus(pl08x
, src_buses
, dst_buses
);
1420 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1427 static int pl08x_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1430 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1431 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1432 unsigned long flags
;
1435 /* Controls applicable to inactive channels */
1436 if (cmd
== DMA_SLAVE_CONFIG
) {
1437 return dma_set_runtime_config(chan
,
1438 (struct dma_slave_config
*)arg
);
1442 * Anything succeeds on channels with no physical allocation and
1443 * no queued transfers.
1445 spin_lock_irqsave(&plchan
->lock
, flags
);
1446 if (!plchan
->phychan
&& !plchan
->at
) {
1447 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1452 case DMA_TERMINATE_ALL
:
1453 plchan
->state
= PL08X_CHAN_IDLE
;
1455 if (plchan
->phychan
) {
1456 pl08x_terminate_phy_chan(pl08x
, plchan
->phychan
);
1459 * Mark physical channel as free and free any slave
1462 release_phy_channel(plchan
);
1464 /* Dequeue jobs and free LLIs */
1466 pl08x_free_txd(pl08x
, plchan
->at
);
1469 /* Dequeue jobs not yet fired as well */
1470 pl08x_free_txd_list(pl08x
, plchan
);
1473 pl08x_pause_phy_chan(plchan
->phychan
);
1474 plchan
->state
= PL08X_CHAN_PAUSED
;
1477 pl08x_resume_phy_chan(plchan
->phychan
);
1478 plchan
->state
= PL08X_CHAN_RUNNING
;
1481 /* Unknown command */
1486 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1491 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
1493 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1494 char *name
= chan_id
;
1496 /* Check that the channel is not taken! */
1497 if (!strcmp(plchan
->name
, name
))
1504 * Just check that the device is there and active
1505 * TODO: turn this bit on/off depending on the number of physical channels
1506 * actually used, if it is zero... well shut it off. That will save some
1507 * power. Cut the clock at the same time.
1509 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
1513 val
= readl(pl08x
->base
+ PL080_CONFIG
);
1514 val
&= ~(PL080_CONFIG_M2_BE
| PL080_CONFIG_M1_BE
| PL080_CONFIG_ENABLE
);
1515 /* We implicitly clear bit 1 and that means little-endian mode */
1516 val
|= PL080_CONFIG_ENABLE
;
1517 writel(val
, pl08x
->base
+ PL080_CONFIG
);
1520 static void pl08x_unmap_buffers(struct pl08x_txd
*txd
)
1522 struct device
*dev
= txd
->tx
.chan
->device
->dev
;
1524 if (!(txd
->tx
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
1525 if (txd
->tx
.flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
1526 dma_unmap_single(dev
, txd
->src_addr
, txd
->len
,
1529 dma_unmap_page(dev
, txd
->src_addr
, txd
->len
,
1532 if (!(txd
->tx
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
1533 if (txd
->tx
.flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
1534 dma_unmap_single(dev
, txd
->dst_addr
, txd
->len
,
1537 dma_unmap_page(dev
, txd
->dst_addr
, txd
->len
,
1542 static void pl08x_tasklet(unsigned long data
)
1544 struct pl08x_dma_chan
*plchan
= (struct pl08x_dma_chan
*) data
;
1545 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1546 struct pl08x_txd
*txd
;
1547 unsigned long flags
;
1549 spin_lock_irqsave(&plchan
->lock
, flags
);
1555 /* Update last completed */
1556 plchan
->lc
= txd
->tx
.cookie
;
1559 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1560 if (!list_empty(&plchan
->pend_list
)) {
1561 struct pl08x_txd
*next
;
1563 next
= list_first_entry(&plchan
->pend_list
,
1566 list_del(&next
->node
);
1568 pl08x_start_txd(plchan
, next
);
1569 } else if (plchan
->phychan_hold
) {
1571 * This channel is still in use - we have a new txd being
1572 * prepared and will soon be queued. Don't give up the
1576 struct pl08x_dma_chan
*waiting
= NULL
;
1579 * No more jobs, so free up the physical channel
1580 * Free any allocated signal on slave transfers too
1582 release_phy_channel(plchan
);
1583 plchan
->state
= PL08X_CHAN_IDLE
;
1586 * And NOW before anyone else can grab that free:d up
1587 * physical channel, see if there is some memcpy pending
1588 * that seriously needs to start because of being stacked
1589 * up while we were choking the physical channels with data.
1591 list_for_each_entry(waiting
, &pl08x
->memcpy
.channels
,
1593 if (waiting
->state
== PL08X_CHAN_WAITING
&&
1594 waiting
->waiting
!= NULL
) {
1597 /* This should REALLY not fail now */
1598 ret
= prep_phy_channel(waiting
,
1601 waiting
->phychan_hold
--;
1602 waiting
->state
= PL08X_CHAN_RUNNING
;
1603 waiting
->waiting
= NULL
;
1604 pl08x_issue_pending(&waiting
->chan
);
1610 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1613 dma_async_tx_callback callback
= txd
->tx
.callback
;
1614 void *callback_param
= txd
->tx
.callback_param
;
1616 /* Don't try to unmap buffers on slave channels */
1618 pl08x_unmap_buffers(txd
);
1620 /* Free the descriptor */
1621 spin_lock_irqsave(&plchan
->lock
, flags
);
1622 pl08x_free_txd(pl08x
, txd
);
1623 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1625 /* Callback to signal completion */
1627 callback(callback_param
);
1631 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
1633 struct pl08x_driver_data
*pl08x
= dev
;
1638 val
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
1640 /* An error interrupt (on one or more channels) */
1641 dev_err(&pl08x
->adev
->dev
,
1642 "%s error interrupt, register value 0x%08x\n",
1645 * Simply clear ALL PL08X error interrupts,
1646 * regardless of channel and cause
1647 * FIXME: should be 0x00000003 on PL081 really.
1649 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1651 val
= readl(pl08x
->base
+ PL080_INT_STATUS
);
1652 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1653 if ((1 << i
) & val
) {
1654 /* Locate physical channel */
1655 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
1656 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
1658 /* Schedule tasklet on this channel */
1659 tasklet_schedule(&plchan
->tasklet
);
1664 /* Clear only the terminal interrupts on channels we processed */
1665 writel(mask
, pl08x
->base
+ PL080_TC_CLEAR
);
1667 return mask
? IRQ_HANDLED
: IRQ_NONE
;
1671 * Initialise the DMAC memcpy/slave channels.
1672 * Make a local wrapper to hold required data
1674 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
1675 struct dma_device
*dmadev
,
1676 unsigned int channels
,
1679 struct pl08x_dma_chan
*chan
;
1682 INIT_LIST_HEAD(&dmadev
->channels
);
1685 * Register as many many memcpy as we have physical channels,
1686 * we won't always be able to use all but the code will have
1687 * to cope with that situation.
1689 for (i
= 0; i
< channels
; i
++) {
1690 chan
= kzalloc(sizeof(struct pl08x_dma_chan
), GFP_KERNEL
);
1692 dev_err(&pl08x
->adev
->dev
,
1693 "%s no memory for channel\n", __func__
);
1698 chan
->state
= PL08X_CHAN_IDLE
;
1702 chan
->name
= pl08x
->pd
->slave_channels
[i
].bus_id
;
1703 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
1705 chan
->cd
= &pl08x
->pd
->memcpy_channel
;
1706 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
1712 if (chan
->cd
->circular_buffer
) {
1713 dev_err(&pl08x
->adev
->dev
,
1714 "channel %s: circular buffers not supported\n",
1719 dev_info(&pl08x
->adev
->dev
,
1720 "initialize virtual channel \"%s\"\n",
1723 chan
->chan
.device
= dmadev
;
1724 chan
->chan
.cookie
= 0;
1727 spin_lock_init(&chan
->lock
);
1728 INIT_LIST_HEAD(&chan
->pend_list
);
1729 tasklet_init(&chan
->tasklet
, pl08x_tasklet
,
1730 (unsigned long) chan
);
1732 list_add_tail(&chan
->chan
.device_node
, &dmadev
->channels
);
1734 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
1735 i
, slave
? "slave" : "memcpy");
1739 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
1741 struct pl08x_dma_chan
*chan
= NULL
;
1742 struct pl08x_dma_chan
*next
;
1744 list_for_each_entry_safe(chan
,
1745 next
, &dmadev
->channels
, chan
.device_node
) {
1746 list_del(&chan
->chan
.device_node
);
1751 #ifdef CONFIG_DEBUG_FS
1752 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
1755 case PL08X_CHAN_IDLE
:
1757 case PL08X_CHAN_RUNNING
:
1759 case PL08X_CHAN_PAUSED
:
1761 case PL08X_CHAN_WAITING
:
1766 return "UNKNOWN STATE";
1769 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
1771 struct pl08x_driver_data
*pl08x
= s
->private;
1772 struct pl08x_dma_chan
*chan
;
1773 struct pl08x_phy_chan
*ch
;
1774 unsigned long flags
;
1777 seq_printf(s
, "PL08x physical channels:\n");
1778 seq_printf(s
, "CHANNEL:\tUSER:\n");
1779 seq_printf(s
, "--------\t-----\n");
1780 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1781 struct pl08x_dma_chan
*virt_chan
;
1783 ch
= &pl08x
->phy_chans
[i
];
1785 spin_lock_irqsave(&ch
->lock
, flags
);
1786 virt_chan
= ch
->serving
;
1788 seq_printf(s
, "%d\t\t%s\n",
1789 ch
->id
, virt_chan
? virt_chan
->name
: "(none)");
1791 spin_unlock_irqrestore(&ch
->lock
, flags
);
1794 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
1795 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1796 seq_printf(s
, "--------\t------\n");
1797 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, chan
.device_node
) {
1798 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1799 pl08x_state_str(chan
->state
));
1802 seq_printf(s
, "\nPL08x virtual slave channels:\n");
1803 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1804 seq_printf(s
, "--------\t------\n");
1805 list_for_each_entry(chan
, &pl08x
->slave
.channels
, chan
.device_node
) {
1806 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1807 pl08x_state_str(chan
->state
));
1813 static int pl08x_debugfs_open(struct inode
*inode
, struct file
*file
)
1815 return single_open(file
, pl08x_debugfs_show
, inode
->i_private
);
1818 static const struct file_operations pl08x_debugfs_operations
= {
1819 .open
= pl08x_debugfs_open
,
1821 .llseek
= seq_lseek
,
1822 .release
= single_release
,
1825 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1827 /* Expose a simple debugfs interface to view all clocks */
1828 (void) debugfs_create_file(dev_name(&pl08x
->adev
->dev
), S_IFREG
| S_IRUGO
,
1830 &pl08x_debugfs_operations
);
1834 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1839 static int pl08x_probe(struct amba_device
*adev
, struct amba_id
*id
)
1841 struct pl08x_driver_data
*pl08x
;
1842 const struct vendor_data
*vd
= id
->data
;
1846 ret
= amba_request_regions(adev
, NULL
);
1850 /* Create the driver state holder */
1851 pl08x
= kzalloc(sizeof(struct pl08x_driver_data
), GFP_KERNEL
);
1857 /* Initialize memcpy engine */
1858 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
1859 pl08x
->memcpy
.dev
= &adev
->dev
;
1860 pl08x
->memcpy
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1861 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
1862 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
1863 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1864 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
1865 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
1866 pl08x
->memcpy
.device_control
= pl08x_control
;
1868 /* Initialize slave engine */
1869 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
1870 pl08x
->slave
.dev
= &adev
->dev
;
1871 pl08x
->slave
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1872 pl08x
->slave
.device_free_chan_resources
= pl08x_free_chan_resources
;
1873 pl08x
->slave
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1874 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
1875 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
1876 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
1877 pl08x
->slave
.device_control
= pl08x_control
;
1879 /* Get the platform data */
1880 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
1882 dev_err(&adev
->dev
, "no platform data supplied\n");
1883 goto out_no_platdata
;
1886 /* Assign useful pointers to the driver state */
1890 /* By default, AHB1 only. If dualmaster, from platform */
1891 pl08x
->lli_buses
= PL08X_AHB1
;
1892 pl08x
->mem_buses
= PL08X_AHB1
;
1893 if (pl08x
->vd
->dualmaster
) {
1894 pl08x
->lli_buses
= pl08x
->pd
->lli_buses
;
1895 pl08x
->mem_buses
= pl08x
->pd
->mem_buses
;
1898 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1899 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
1900 PL08X_LLI_TSFR_SIZE
, PL08X_ALIGN
, 0);
1903 goto out_no_lli_pool
;
1906 spin_lock_init(&pl08x
->lock
);
1908 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
1911 goto out_no_ioremap
;
1914 /* Turn on the PL08x */
1915 pl08x_ensure_on(pl08x
);
1917 /* Attach the interrupt handler */
1918 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1919 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
1921 ret
= request_irq(adev
->irq
[0], pl08x_irq
, IRQF_DISABLED
,
1922 DRIVER_NAME
, pl08x
);
1924 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
1925 __func__
, adev
->irq
[0]);
1929 /* Initialize physical channels */
1930 pl08x
->phy_chans
= kmalloc((vd
->channels
* sizeof(struct pl08x_phy_chan
)),
1932 if (!pl08x
->phy_chans
) {
1933 dev_err(&adev
->dev
, "%s failed to allocate "
1934 "physical channel holders\n",
1936 goto out_no_phychans
;
1939 for (i
= 0; i
< vd
->channels
; i
++) {
1940 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
1943 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
1944 spin_lock_init(&ch
->lock
);
1947 dev_info(&adev
->dev
,
1948 "physical channel %d is %s\n", i
,
1949 pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
1952 /* Register as many memcpy channels as there are physical channels */
1953 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
1954 pl08x
->vd
->channels
, false);
1956 dev_warn(&pl08x
->adev
->dev
,
1957 "%s failed to enumerate memcpy channels - %d\n",
1961 pl08x
->memcpy
.chancnt
= ret
;
1963 /* Register slave channels */
1964 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
1965 pl08x
->pd
->num_slave_channels
,
1968 dev_warn(&pl08x
->adev
->dev
,
1969 "%s failed to enumerate slave channels - %d\n",
1973 pl08x
->slave
.chancnt
= ret
;
1975 ret
= dma_async_device_register(&pl08x
->memcpy
);
1977 dev_warn(&pl08x
->adev
->dev
,
1978 "%s failed to register memcpy as an async device - %d\n",
1980 goto out_no_memcpy_reg
;
1983 ret
= dma_async_device_register(&pl08x
->slave
);
1985 dev_warn(&pl08x
->adev
->dev
,
1986 "%s failed to register slave as an async device - %d\n",
1988 goto out_no_slave_reg
;
1991 amba_set_drvdata(adev
, pl08x
);
1992 init_pl08x_debugfs(pl08x
);
1993 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
1994 amba_part(adev
), amba_rev(adev
),
1995 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
1999 dma_async_device_unregister(&pl08x
->memcpy
);
2001 pl08x_free_virtual_channels(&pl08x
->slave
);
2003 pl08x_free_virtual_channels(&pl08x
->memcpy
);
2005 kfree(pl08x
->phy_chans
);
2007 free_irq(adev
->irq
[0], pl08x
);
2009 iounmap(pl08x
->base
);
2011 dma_pool_destroy(pl08x
->pool
);
2016 amba_release_regions(adev
);
2020 /* PL080 has 8 channels and the PL080 have just 2 */
2021 static struct vendor_data vendor_pl080
= {
2026 static struct vendor_data vendor_pl081
= {
2028 .dualmaster
= false,
2031 static struct amba_id pl08x_ids
[] = {
2036 .data
= &vendor_pl080
,
2042 .data
= &vendor_pl081
,
2044 /* Nomadik 8815 PL080 variant */
2048 .data
= &vendor_pl080
,
2053 static struct amba_driver pl08x_amba_driver
= {
2054 .drv
.name
= DRIVER_NAME
,
2055 .id_table
= pl08x_ids
,
2056 .probe
= pl08x_probe
,
2059 static int __init
pl08x_init(void)
2062 retval
= amba_driver_register(&pl08x_amba_driver
);
2064 printk(KERN_WARNING DRIVER_NAME
2065 "failed to register as an AMBA device (%d)\n",
2069 subsys_initcall(pl08x_init
);