2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * The full GNU General Public License is in this distribution in the file
25 * Documentation: ARM DDI 0196G == PL080
26 * Documentation: ARM DDI 0218E == PL081
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
37 * The PL080 has a dual bus master, PL081 has a single master.
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
45 * Raise terminal count interrupt
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
54 * ASSUMES default (little) endianness for DMA transfers
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
69 * Only the former works sanely with scatter lists, so we only implement
70 * the DMAC flow control method. However, peripherals which use the LBREQ
71 * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
72 * these hardware restrictions prevents them from using scatter DMA.
75 * - Break out common code from arch/arm/mach-s3c64xx and share
77 #include <linux/device.h>
78 #include <linux/init.h>
79 #include <linux/module.h>
80 #include <linux/interrupt.h>
81 #include <linux/slab.h>
82 #include <linux/delay.h>
83 #include <linux/dma-mapping.h>
84 #include <linux/dmapool.h>
85 #include <linux/dmaengine.h>
86 #include <linux/amba/bus.h>
87 #include <linux/amba/pl08x.h>
88 #include <linux/debugfs.h>
89 #include <linux/seq_file.h>
91 #include <asm/hardware/pl080.h>
93 #define DRIVER_NAME "pl08xdmac"
96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97 * @channels: the number of channels available in this variant
98 * @dualmaster: whether this version supports dual AHB masters or not.
106 * PL08X private data structures
107 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
108 * start & end do not - their bus bit info is in cctl. Also note that these
109 * are fixed 32-bit quantities.
119 * struct pl08x_driver_data - the local state holder for the PL08x
120 * @slave: slave engine for this instance
121 * @memcpy: memcpy engine for this instance
122 * @base: virtual memory base (remapped) for the PL08x
123 * @adev: the corresponding AMBA (PrimeCell) bus entry
124 * @vd: vendor data for this PL08x variant
125 * @pd: platform data passed in from the platform/machine
126 * @phy_chans: array of data for the physical channels
127 * @pool: a pool for the LLI descriptors
128 * @pool_ctr: counter of LLIs in the pool
129 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
130 * @mem_buses: set to indicate memory transfers on AHB2.
131 * @lock: a spinlock for this struct
133 struct pl08x_driver_data
{
134 struct dma_device slave
;
135 struct dma_device memcpy
;
137 struct amba_device
*adev
;
138 const struct vendor_data
*vd
;
139 struct pl08x_platform_data
*pd
;
140 struct pl08x_phy_chan
*phy_chans
;
141 struct dma_pool
*pool
;
149 * PL08X specific defines
153 * Memory boundaries: the manual for PL08x says that the controller
154 * cannot read past a 1KiB boundary, so these defines are used to
155 * create transfer LLIs that do not cross such boundaries.
157 #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
158 #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
160 /* Size (bytes) of each LLI buffer allocated for one transfer */
161 # define PL08X_LLI_TSFR_SIZE 0x2000
163 /* Maximum times we call dma_pool_alloc on this pool without freeing */
164 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
165 #define PL08X_ALIGN 8
167 static inline struct pl08x_dma_chan
*to_pl08x_chan(struct dma_chan
*chan
)
169 return container_of(chan
, struct pl08x_dma_chan
, chan
);
172 static inline struct pl08x_txd
*to_pl08x_txd(struct dma_async_tx_descriptor
*tx
)
174 return container_of(tx
, struct pl08x_txd
, tx
);
178 * Physical channel handling
181 /* Whether a certain channel is busy or not */
182 static int pl08x_phy_channel_busy(struct pl08x_phy_chan
*ch
)
186 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
187 return val
& PL080_CONFIG_ACTIVE
;
191 * Set the initial DMA register values i.e. those for the first LLI
192 * The next LLI pointer and the configuration interrupt bit have
193 * been set when the LLIs were constructed. Poke them into the hardware
194 * and start the transfer.
196 static void pl08x_start_txd(struct pl08x_dma_chan
*plchan
,
197 struct pl08x_txd
*txd
)
199 struct pl08x_driver_data
*pl08x
= plchan
->host
;
200 struct pl08x_phy_chan
*phychan
= plchan
->phychan
;
201 struct pl08x_lli
*lli
= &txd
->llis_va
[0];
206 /* Wait for channel inactive */
207 while (pl08x_phy_channel_busy(phychan
))
210 dev_vdbg(&pl08x
->adev
->dev
,
211 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
212 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
213 phychan
->id
, lli
->src
, lli
->dst
, lli
->lli
, lli
->cctl
,
216 writel(lli
->src
, phychan
->base
+ PL080_CH_SRC_ADDR
);
217 writel(lli
->dst
, phychan
->base
+ PL080_CH_DST_ADDR
);
218 writel(lli
->lli
, phychan
->base
+ PL080_CH_LLI
);
219 writel(lli
->cctl
, phychan
->base
+ PL080_CH_CONTROL
);
220 writel(txd
->ccfg
, phychan
->base
+ PL080_CH_CONFIG
);
222 /* Enable the DMA channel */
223 /* Do not access config register until channel shows as disabled */
224 while (readl(pl08x
->base
+ PL080_EN_CHAN
) & (1 << phychan
->id
))
227 /* Do not access config register until channel shows as inactive */
228 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
229 while ((val
& PL080_CONFIG_ACTIVE
) || (val
& PL080_CONFIG_ENABLE
))
230 val
= readl(phychan
->base
+ PL080_CH_CONFIG
);
232 writel(val
| PL080_CONFIG_ENABLE
, phychan
->base
+ PL080_CH_CONFIG
);
236 * Pause the channel by setting the HALT bit.
238 * For M->P transfers, pause the DMAC first and then stop the peripheral -
239 * the FIFO can only drain if the peripheral is still requesting data.
240 * (note: this can still timeout if the DMAC FIFO never drains of data.)
242 * For P->M transfers, disable the peripheral first to stop it filling
243 * the DMAC FIFO, and then pause the DMAC.
245 static void pl08x_pause_phy_chan(struct pl08x_phy_chan
*ch
)
250 /* Set the HALT bit and wait for the FIFO to drain */
251 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
252 val
|= PL080_CONFIG_HALT
;
253 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
255 /* Wait for channel inactive */
256 for (timeout
= 1000; timeout
; timeout
--) {
257 if (!pl08x_phy_channel_busy(ch
))
261 if (pl08x_phy_channel_busy(ch
))
262 pr_err("pl08x: channel%u timeout waiting for pause\n", ch
->id
);
265 static void pl08x_resume_phy_chan(struct pl08x_phy_chan
*ch
)
269 /* Clear the HALT bit */
270 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
271 val
&= ~PL080_CONFIG_HALT
;
272 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
277 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
278 * clears any pending interrupt status. This should not be used for
279 * an on-going transfer, but as a method of shutting down a channel
280 * (eg, when it's no longer used) or terminating a transfer.
282 static void pl08x_terminate_phy_chan(struct pl08x_driver_data
*pl08x
,
283 struct pl08x_phy_chan
*ch
)
285 u32 val
= readl(ch
->base
+ PL080_CH_CONFIG
);
287 val
&= ~(PL080_CONFIG_ENABLE
| PL080_CONFIG_ERR_IRQ_MASK
|
288 PL080_CONFIG_TC_IRQ_MASK
);
290 writel(val
, ch
->base
+ PL080_CH_CONFIG
);
292 writel(1 << ch
->id
, pl08x
->base
+ PL080_ERR_CLEAR
);
293 writel(1 << ch
->id
, pl08x
->base
+ PL080_TC_CLEAR
);
296 static inline u32
get_bytes_in_cctl(u32 cctl
)
298 /* The source width defines the number of bytes */
299 u32 bytes
= cctl
& PL080_CONTROL_TRANSFER_SIZE_MASK
;
301 switch (cctl
>> PL080_CONTROL_SWIDTH_SHIFT
) {
302 case PL080_WIDTH_8BIT
:
304 case PL080_WIDTH_16BIT
:
307 case PL080_WIDTH_32BIT
:
314 /* The channel should be paused when calling this */
315 static u32
pl08x_getbytes_chan(struct pl08x_dma_chan
*plchan
)
317 struct pl08x_phy_chan
*ch
;
318 struct pl08x_txd
*txd
;
322 spin_lock_irqsave(&plchan
->lock
, flags
);
323 ch
= plchan
->phychan
;
327 * Follow the LLIs to get the number of remaining
328 * bytes in the currently active transaction.
331 u32 clli
= readl(ch
->base
+ PL080_CH_LLI
) & ~PL080_LLI_LM_AHB2
;
333 /* First get the remaining bytes in the active transfer */
334 bytes
= get_bytes_in_cctl(readl(ch
->base
+ PL080_CH_CONTROL
));
337 struct pl08x_lli
*llis_va
= txd
->llis_va
;
338 dma_addr_t llis_bus
= txd
->llis_bus
;
341 BUG_ON(clli
< llis_bus
|| clli
>= llis_bus
+
342 sizeof(struct pl08x_lli
) * MAX_NUM_TSFR_LLIS
);
345 * Locate the next LLI - as this is an array,
346 * it's simple maths to find.
348 index
= (clli
- llis_bus
) / sizeof(struct pl08x_lli
);
350 for (; index
< MAX_NUM_TSFR_LLIS
; index
++) {
351 bytes
+= get_bytes_in_cctl(llis_va
[index
].cctl
);
354 * A LLI pointer of 0 terminates the LLI list
356 if (!llis_va
[index
].lli
)
362 /* Sum up all queued transactions */
363 if (!list_empty(&plchan
->pend_list
)) {
364 struct pl08x_txd
*txdi
;
365 list_for_each_entry(txdi
, &plchan
->pend_list
, node
) {
370 spin_unlock_irqrestore(&plchan
->lock
, flags
);
376 * Allocate a physical channel for a virtual channel
378 * Try to locate a physical channel to be used for this transfer. If all
379 * are taken return NULL and the requester will have to cope by using
380 * some fallback PIO mode or retrying later.
382 static struct pl08x_phy_chan
*
383 pl08x_get_phy_channel(struct pl08x_driver_data
*pl08x
,
384 struct pl08x_dma_chan
*virt_chan
)
386 struct pl08x_phy_chan
*ch
= NULL
;
390 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
391 ch
= &pl08x
->phy_chans
[i
];
393 spin_lock_irqsave(&ch
->lock
, flags
);
396 ch
->serving
= virt_chan
;
398 spin_unlock_irqrestore(&ch
->lock
, flags
);
402 spin_unlock_irqrestore(&ch
->lock
, flags
);
405 if (i
== pl08x
->vd
->channels
) {
406 /* No physical channel available, cope with it */
413 static inline void pl08x_put_phy_channel(struct pl08x_driver_data
*pl08x
,
414 struct pl08x_phy_chan
*ch
)
418 spin_lock_irqsave(&ch
->lock
, flags
);
420 /* Stop the channel and clear its interrupts */
421 pl08x_terminate_phy_chan(pl08x
, ch
);
423 /* Mark it as free */
425 spin_unlock_irqrestore(&ch
->lock
, flags
);
432 static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded
)
435 case PL080_WIDTH_8BIT
:
437 case PL080_WIDTH_16BIT
:
439 case PL080_WIDTH_32BIT
:
448 static inline u32
pl08x_cctl_bits(u32 cctl
, u8 srcwidth
, u8 dstwidth
,
453 /* Remove all src, dst and transfer size bits */
454 retbits
&= ~PL080_CONTROL_DWIDTH_MASK
;
455 retbits
&= ~PL080_CONTROL_SWIDTH_MASK
;
456 retbits
&= ~PL080_CONTROL_TRANSFER_SIZE_MASK
;
458 /* Then set the bits according to the parameters */
461 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
464 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
467 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_SWIDTH_SHIFT
;
476 retbits
|= PL080_WIDTH_8BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
479 retbits
|= PL080_WIDTH_16BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
482 retbits
|= PL080_WIDTH_32BIT
<< PL080_CONTROL_DWIDTH_SHIFT
;
489 retbits
|= tsize
<< PL080_CONTROL_TRANSFER_SIZE_SHIFT
;
493 struct pl08x_lli_build_data
{
494 struct pl08x_txd
*txd
;
495 struct pl08x_bus_data srcbus
;
496 struct pl08x_bus_data dstbus
;
502 * Autoselect a master bus to use for the transfer this prefers the
503 * destination bus if both available if fixed address on one bus the
504 * other will be chosen
506 static void pl08x_choose_master_bus(struct pl08x_lli_build_data
*bd
,
507 struct pl08x_bus_data
**mbus
, struct pl08x_bus_data
**sbus
, u32 cctl
)
509 if (!(cctl
& PL080_CONTROL_DST_INCR
)) {
512 } else if (!(cctl
& PL080_CONTROL_SRC_INCR
)) {
516 if (bd
->dstbus
.buswidth
== 4) {
519 } else if (bd
->srcbus
.buswidth
== 4) {
522 } else if (bd
->dstbus
.buswidth
== 2) {
525 } else if (bd
->srcbus
.buswidth
== 2) {
529 /* bd->srcbus.buswidth == 1 */
537 * Fills in one LLI for a certain transfer descriptor and advance the counter
539 static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data
*bd
,
540 int num_llis
, int len
, u32 cctl
)
542 struct pl08x_lli
*llis_va
= bd
->txd
->llis_va
;
543 dma_addr_t llis_bus
= bd
->txd
->llis_bus
;
545 BUG_ON(num_llis
>= MAX_NUM_TSFR_LLIS
);
547 llis_va
[num_llis
].cctl
= cctl
;
548 llis_va
[num_llis
].src
= bd
->srcbus
.addr
;
549 llis_va
[num_llis
].dst
= bd
->dstbus
.addr
;
550 llis_va
[num_llis
].lli
= llis_bus
+ (num_llis
+ 1) * sizeof(struct pl08x_lli
);
551 llis_va
[num_llis
].lli
|= bd
->lli_bus
;
553 if (cctl
& PL080_CONTROL_SRC_INCR
)
554 bd
->srcbus
.addr
+= len
;
555 if (cctl
& PL080_CONTROL_DST_INCR
)
556 bd
->dstbus
.addr
+= len
;
558 BUG_ON(bd
->remainder
< len
);
560 bd
->remainder
-= len
;
564 * Return number of bytes to fill to boundary, or len.
565 * This calculation works for any value of addr.
567 static inline size_t pl08x_pre_boundary(u32 addr
, size_t len
)
569 size_t boundary_len
= PL08X_BOUNDARY_SIZE
-
570 (addr
& (PL08X_BOUNDARY_SIZE
- 1));
572 return min(boundary_len
, len
);
576 * This fills in the table of LLIs for the transfer descriptor
577 * Note that we assume we never have to change the burst sizes
580 static int pl08x_fill_llis_for_desc(struct pl08x_driver_data
*pl08x
,
581 struct pl08x_txd
*txd
)
583 struct pl08x_bus_data
*mbus
, *sbus
;
584 struct pl08x_lli_build_data bd
;
587 size_t max_bytes_per_lli
;
588 size_t total_bytes
= 0;
589 struct pl08x_lli
*llis_va
;
591 txd
->llis_va
= dma_pool_alloc(pl08x
->pool
, GFP_NOWAIT
,
594 dev_err(&pl08x
->adev
->dev
, "%s no memory for llis\n", __func__
);
600 /* Get the default CCTL */
604 bd
.srcbus
.addr
= txd
->src_addr
;
605 bd
.dstbus
.addr
= txd
->dst_addr
;
606 bd
.lli_bus
= (pl08x
->lli_buses
& PL08X_AHB2
) ? PL080_LLI_LM_AHB2
: 0;
608 /* Find maximum width of the source bus */
610 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_SWIDTH_MASK
) >>
611 PL080_CONTROL_SWIDTH_SHIFT
);
613 /* Find maximum width of the destination bus */
615 pl08x_get_bytes_for_cctl((cctl
& PL080_CONTROL_DWIDTH_MASK
) >>
616 PL080_CONTROL_DWIDTH_SHIFT
);
618 /* Set up the bus widths to the maximum */
619 bd
.srcbus
.buswidth
= bd
.srcbus
.maxwidth
;
620 bd
.dstbus
.buswidth
= bd
.dstbus
.maxwidth
;
623 * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
625 max_bytes_per_lli
= min(bd
.srcbus
.buswidth
, bd
.dstbus
.buswidth
) *
626 PL080_CONTROL_TRANSFER_SIZE_MASK
;
628 /* We need to count this down to zero */
629 bd
.remainder
= txd
->len
;
632 * Choose bus to align to
633 * - prefers destination bus if both available
634 * - if fixed address on one bus chooses other
636 pl08x_choose_master_bus(&bd
, &mbus
, &sbus
, cctl
);
638 dev_vdbg(&pl08x
->adev
->dev
, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
639 bd
.srcbus
.addr
, cctl
& PL080_CONTROL_SRC_INCR
? "+" : "",
641 bd
.dstbus
.addr
, cctl
& PL080_CONTROL_DST_INCR
? "+" : "",
643 bd
.remainder
, max_bytes_per_lli
);
644 dev_vdbg(&pl08x
->adev
->dev
, "mbus=%s sbus=%s\n",
645 mbus
== &bd
.srcbus
? "src" : "dst",
646 sbus
== &bd
.srcbus
? "src" : "dst");
648 if (txd
->len
< mbus
->buswidth
) {
649 /* Less than a bus width available - send as single bytes */
650 while (bd
.remainder
) {
651 dev_vdbg(&pl08x
->adev
->dev
,
652 "%s single byte LLIs for a transfer of "
653 "less than a bus width (remain 0x%08x)\n",
654 __func__
, bd
.remainder
);
655 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
656 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
660 /* Make one byte LLIs until master bus is aligned */
661 while ((mbus
->addr
) % (mbus
->buswidth
)) {
662 dev_vdbg(&pl08x
->adev
->dev
,
663 "%s adjustment lli for less than bus width "
665 __func__
, bd
.remainder
);
666 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
667 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
673 * - if slave is not then we must set its width down
675 if (sbus
->addr
% sbus
->buswidth
) {
676 dev_dbg(&pl08x
->adev
->dev
,
677 "%s set down bus width to one byte\n",
684 * Make largest possible LLIs until less than one bus
687 while (bd
.remainder
> (mbus
->buswidth
- 1)) {
688 size_t lli_len
, target_len
, tsize
, odd_bytes
;
691 * If enough left try to send max possible,
692 * otherwise try to send the remainder
694 target_len
= min(bd
.remainder
, max_bytes_per_lli
);
697 * Set bus lengths for incrementing buses to the
698 * number of bytes which fill to next memory boundary,
699 * limiting on the target length calculated above.
701 if (cctl
& PL080_CONTROL_SRC_INCR
)
702 bd
.srcbus
.fill_bytes
=
703 pl08x_pre_boundary(bd
.srcbus
.addr
,
706 bd
.srcbus
.fill_bytes
= target_len
;
708 if (cctl
& PL080_CONTROL_DST_INCR
)
709 bd
.dstbus
.fill_bytes
=
710 pl08x_pre_boundary(bd
.dstbus
.addr
,
713 bd
.dstbus
.fill_bytes
= target_len
;
715 /* Find the nearest */
716 lli_len
= min(bd
.srcbus
.fill_bytes
,
717 bd
.dstbus
.fill_bytes
);
719 BUG_ON(lli_len
> bd
.remainder
);
722 dev_err(&pl08x
->adev
->dev
,
723 "%s lli_len is %zu, <= 0\n",
728 if (lli_len
== target_len
) {
730 * Can send what we wanted.
733 lli_len
= (lli_len
/mbus
->buswidth
) *
738 * So now we know how many bytes to transfer
739 * to get to the nearest boundary. The next
740 * LLI will past the boundary. However, we
741 * may be working to a boundary on the slave
742 * bus. We need to ensure the master stays
743 * aligned, and that we are working in
744 * multiples of the bus widths.
746 odd_bytes
= lli_len
% mbus
->buswidth
;
747 lli_len
-= odd_bytes
;
753 * Check against minimum bus alignment:
754 * Calculate actual transfer size in relation
755 * to bus width an get a maximum remainder of
756 * the smallest bus width - 1
758 /* FIXME: use round_down()? */
759 tsize
= lli_len
/ min(mbus
->buswidth
,
761 lli_len
= tsize
* min(mbus
->buswidth
,
764 if (target_len
!= lli_len
) {
765 dev_vdbg(&pl08x
->adev
->dev
,
766 "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
767 __func__
, target_len
, lli_len
, txd
->len
);
770 cctl
= pl08x_cctl_bits(cctl
,
775 dev_vdbg(&pl08x
->adev
->dev
,
776 "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
777 __func__
, lli_len
, bd
.remainder
);
778 pl08x_fill_lli_for_desc(&bd
, num_llis
++,
780 total_bytes
+= lli_len
;
786 * Creep past the boundary, maintaining
790 for (j
= 0; (j
< mbus
->buswidth
)
791 && (bd
.remainder
); j
++) {
792 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
793 dev_vdbg(&pl08x
->adev
->dev
,
794 "%s align with boundary, single byte (remain 0x%08zx)\n",
795 __func__
, bd
.remainder
);
796 pl08x_fill_lli_for_desc(&bd
,
797 num_llis
++, 1, cctl
);
806 while (bd
.remainder
) {
807 cctl
= pl08x_cctl_bits(cctl
, 1, 1, 1);
808 dev_vdbg(&pl08x
->adev
->dev
,
809 "%s align with boundary, single odd byte (remain %zu)\n",
810 __func__
, bd
.remainder
);
811 pl08x_fill_lli_for_desc(&bd
, num_llis
++, 1, cctl
);
815 if (total_bytes
!= txd
->len
) {
816 dev_err(&pl08x
->adev
->dev
,
817 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
818 __func__
, total_bytes
, txd
->len
);
822 if (num_llis
>= MAX_NUM_TSFR_LLIS
) {
823 dev_err(&pl08x
->adev
->dev
,
824 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
825 __func__
, (u32
) MAX_NUM_TSFR_LLIS
);
829 llis_va
= txd
->llis_va
;
830 /* The final LLI terminates the LLI. */
831 llis_va
[num_llis
- 1].lli
= 0;
832 /* The final LLI element shall also fire an interrupt. */
833 llis_va
[num_llis
- 1].cctl
|= PL080_CONTROL_TC_IRQ_EN
;
839 dev_vdbg(&pl08x
->adev
->dev
,
840 "%-3s %-9s %-10s %-10s %-10s %s\n",
841 "lli", "", "csrc", "cdst", "clli", "cctl");
842 for (i
= 0; i
< num_llis
; i
++) {
843 dev_vdbg(&pl08x
->adev
->dev
,
844 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
845 i
, &llis_va
[i
], llis_va
[i
].src
,
846 llis_va
[i
].dst
, llis_va
[i
].lli
, llis_va
[i
].cctl
855 /* You should call this with the struct pl08x lock held */
856 static void pl08x_free_txd(struct pl08x_driver_data
*pl08x
,
857 struct pl08x_txd
*txd
)
860 dma_pool_free(pl08x
->pool
, txd
->llis_va
, txd
->llis_bus
);
867 static void pl08x_free_txd_list(struct pl08x_driver_data
*pl08x
,
868 struct pl08x_dma_chan
*plchan
)
870 struct pl08x_txd
*txdi
= NULL
;
871 struct pl08x_txd
*next
;
873 if (!list_empty(&plchan
->pend_list
)) {
874 list_for_each_entry_safe(txdi
,
875 next
, &plchan
->pend_list
, node
) {
876 list_del(&txdi
->node
);
877 pl08x_free_txd(pl08x
, txdi
);
885 static int pl08x_alloc_chan_resources(struct dma_chan
*chan
)
890 static void pl08x_free_chan_resources(struct dma_chan
*chan
)
895 * This should be called with the channel plchan->lock held
897 static int prep_phy_channel(struct pl08x_dma_chan
*plchan
,
898 struct pl08x_txd
*txd
)
900 struct pl08x_driver_data
*pl08x
= plchan
->host
;
901 struct pl08x_phy_chan
*ch
;
904 /* Check if we already have a channel */
908 ch
= pl08x_get_phy_channel(pl08x
, plchan
);
910 /* No physical channel available, cope with it */
911 dev_dbg(&pl08x
->adev
->dev
, "no physical channel available for xfer on %s\n", plchan
->name
);
916 * OK we have a physical channel: for memcpy() this is all we
917 * need, but for slaves the physical signals may be muxed!
918 * Can the platform allow us to use this channel?
922 pl08x
->pd
->get_signal
) {
923 ret
= pl08x
->pd
->get_signal(plchan
);
925 dev_dbg(&pl08x
->adev
->dev
,
926 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
927 ch
->id
, plchan
->name
);
928 /* Release physical channel & return */
929 pl08x_put_phy_channel(pl08x
, ch
);
934 /* Assign the flow control signal to this channel */
935 if (txd
->direction
== DMA_TO_DEVICE
)
936 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_DST_SEL_SHIFT
;
937 else if (txd
->direction
== DMA_FROM_DEVICE
)
938 txd
->ccfg
|= ch
->signal
<< PL080_CONFIG_SRC_SEL_SHIFT
;
941 dev_dbg(&pl08x
->adev
->dev
, "allocated physical channel %d and signal %d for xfer on %s\n",
946 plchan
->phychan_hold
++;
947 plchan
->phychan
= ch
;
952 static void release_phy_channel(struct pl08x_dma_chan
*plchan
)
954 struct pl08x_driver_data
*pl08x
= plchan
->host
;
956 if ((plchan
->phychan
->signal
>= 0) && pl08x
->pd
->put_signal
) {
957 pl08x
->pd
->put_signal(plchan
);
958 plchan
->phychan
->signal
= -1;
960 pl08x_put_phy_channel(pl08x
, plchan
->phychan
);
961 plchan
->phychan
= NULL
;
964 static dma_cookie_t
pl08x_tx_submit(struct dma_async_tx_descriptor
*tx
)
966 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(tx
->chan
);
967 struct pl08x_txd
*txd
= to_pl08x_txd(tx
);
970 spin_lock_irqsave(&plchan
->lock
, flags
);
972 plchan
->chan
.cookie
+= 1;
973 if (plchan
->chan
.cookie
< 0)
974 plchan
->chan
.cookie
= 1;
975 tx
->cookie
= plchan
->chan
.cookie
;
977 /* Put this onto the pending list */
978 list_add_tail(&txd
->node
, &plchan
->pend_list
);
981 * If there was no physical channel available for this memcpy,
982 * stack the request up and indicate that the channel is waiting
983 * for a free physical channel.
985 if (!plchan
->slave
&& !plchan
->phychan
) {
986 /* Do this memcpy whenever there is a channel ready */
987 plchan
->state
= PL08X_CHAN_WAITING
;
988 plchan
->waiting
= txd
;
990 plchan
->phychan_hold
--;
993 spin_unlock_irqrestore(&plchan
->lock
, flags
);
998 static struct dma_async_tx_descriptor
*pl08x_prep_dma_interrupt(
999 struct dma_chan
*chan
, unsigned long flags
)
1001 struct dma_async_tx_descriptor
*retval
= NULL
;
1007 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1008 * If slaves are relying on interrupts to signal completion this function
1009 * must not be called with interrupts disabled.
1011 static enum dma_status
1012 pl08x_dma_tx_status(struct dma_chan
*chan
,
1013 dma_cookie_t cookie
,
1014 struct dma_tx_state
*txstate
)
1016 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1017 dma_cookie_t last_used
;
1018 dma_cookie_t last_complete
;
1019 enum dma_status ret
;
1022 last_used
= plchan
->chan
.cookie
;
1023 last_complete
= plchan
->lc
;
1025 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1026 if (ret
== DMA_SUCCESS
) {
1027 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
1032 * This cookie not complete yet
1034 last_used
= plchan
->chan
.cookie
;
1035 last_complete
= plchan
->lc
;
1037 /* Get number of bytes left in the active transactions and queue */
1038 bytesleft
= pl08x_getbytes_chan(plchan
);
1040 dma_set_tx_state(txstate
, last_complete
, last_used
,
1043 if (plchan
->state
== PL08X_CHAN_PAUSED
)
1046 /* Whether waiting or running, we're in progress */
1047 return DMA_IN_PROGRESS
;
1050 /* PrimeCell DMA extension */
1051 struct burst_table
{
1056 static const struct burst_table burst_sizes
[] = {
1059 .reg
= PL080_BSIZE_256
,
1063 .reg
= PL080_BSIZE_128
,
1067 .reg
= PL080_BSIZE_64
,
1071 .reg
= PL080_BSIZE_32
,
1075 .reg
= PL080_BSIZE_16
,
1079 .reg
= PL080_BSIZE_8
,
1083 .reg
= PL080_BSIZE_4
,
1087 .reg
= PL080_BSIZE_1
,
1092 * Given the source and destination available bus masks, select which
1093 * will be routed to each port. We try to have source and destination
1094 * on separate ports, but always respect the allowable settings.
1096 static u32
pl08x_select_bus(u8 src
, u8 dst
)
1100 if (!(dst
& PL08X_AHB1
) || ((dst
& PL08X_AHB2
) && (src
& PL08X_AHB1
)))
1101 cctl
|= PL080_CONTROL_DST_AHB2
;
1102 if (!(src
& PL08X_AHB1
) || ((src
& PL08X_AHB2
) && !(dst
& PL08X_AHB2
)))
1103 cctl
|= PL080_CONTROL_SRC_AHB2
;
1108 static u32
pl08x_cctl(u32 cctl
)
1110 cctl
&= ~(PL080_CONTROL_SRC_AHB2
| PL080_CONTROL_DST_AHB2
|
1111 PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
|
1112 PL080_CONTROL_PROT_MASK
);
1114 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1115 return cctl
| PL080_CONTROL_PROT_SYS
;
1118 static u32
pl08x_width(enum dma_slave_buswidth width
)
1121 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
1122 return PL080_WIDTH_8BIT
;
1123 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
1124 return PL080_WIDTH_16BIT
;
1125 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
1126 return PL080_WIDTH_32BIT
;
1132 static u32
pl08x_burst(u32 maxburst
)
1136 for (i
= 0; i
< ARRAY_SIZE(burst_sizes
); i
++)
1137 if (burst_sizes
[i
].burstwords
<= maxburst
)
1140 return burst_sizes
[i
].reg
;
1143 static int dma_set_runtime_config(struct dma_chan
*chan
,
1144 struct dma_slave_config
*config
)
1146 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1147 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1148 enum dma_slave_buswidth addr_width
;
1149 u32 width
, burst
, maxburst
;
1155 /* Transfer direction */
1156 plchan
->runtime_direction
= config
->direction
;
1157 if (config
->direction
== DMA_TO_DEVICE
) {
1158 addr_width
= config
->dst_addr_width
;
1159 maxburst
= config
->dst_maxburst
;
1160 } else if (config
->direction
== DMA_FROM_DEVICE
) {
1161 addr_width
= config
->src_addr_width
;
1162 maxburst
= config
->src_maxburst
;
1164 dev_err(&pl08x
->adev
->dev
,
1165 "bad runtime_config: alien transfer direction\n");
1169 width
= pl08x_width(addr_width
);
1171 dev_err(&pl08x
->adev
->dev
,
1172 "bad runtime_config: alien address width\n");
1176 cctl
|= width
<< PL080_CONTROL_SWIDTH_SHIFT
;
1177 cctl
|= width
<< PL080_CONTROL_DWIDTH_SHIFT
;
1180 * If this channel will only request single transfers, set this
1181 * down to ONE element. Also select one element if no maxburst
1184 if (plchan
->cd
->single
)
1187 burst
= pl08x_burst(maxburst
);
1188 cctl
|= burst
<< PL080_CONTROL_SB_SIZE_SHIFT
;
1189 cctl
|= burst
<< PL080_CONTROL_DB_SIZE_SHIFT
;
1191 if (plchan
->runtime_direction
== DMA_FROM_DEVICE
) {
1192 plchan
->src_addr
= config
->src_addr
;
1193 plchan
->src_cctl
= pl08x_cctl(cctl
) | PL080_CONTROL_DST_INCR
|
1194 pl08x_select_bus(plchan
->cd
->periph_buses
,
1197 plchan
->dst_addr
= config
->dst_addr
;
1198 plchan
->dst_cctl
= pl08x_cctl(cctl
) | PL080_CONTROL_SRC_INCR
|
1199 pl08x_select_bus(pl08x
->mem_buses
,
1200 plchan
->cd
->periph_buses
);
1203 dev_dbg(&pl08x
->adev
->dev
,
1204 "configured channel %s (%s) for %s, data width %d, "
1205 "maxburst %d words, LE, CCTL=0x%08x\n",
1206 dma_chan_name(chan
), plchan
->name
,
1207 (config
->direction
== DMA_FROM_DEVICE
) ? "RX" : "TX",
1216 * Slave transactions callback to the slave device to allow
1217 * synchronization of slave DMA signals with the DMAC enable
1219 static void pl08x_issue_pending(struct dma_chan
*chan
)
1221 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1222 unsigned long flags
;
1224 spin_lock_irqsave(&plchan
->lock
, flags
);
1225 /* Something is already active, or we're waiting for a channel... */
1226 if (plchan
->at
|| plchan
->state
== PL08X_CHAN_WAITING
) {
1227 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1231 /* Take the first element in the queue and execute it */
1232 if (!list_empty(&plchan
->pend_list
)) {
1233 struct pl08x_txd
*next
;
1235 next
= list_first_entry(&plchan
->pend_list
,
1238 list_del(&next
->node
);
1239 plchan
->state
= PL08X_CHAN_RUNNING
;
1241 pl08x_start_txd(plchan
, next
);
1244 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1247 static int pl08x_prep_channel_resources(struct pl08x_dma_chan
*plchan
,
1248 struct pl08x_txd
*txd
)
1250 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1251 unsigned long flags
;
1254 num_llis
= pl08x_fill_llis_for_desc(pl08x
, txd
);
1260 spin_lock_irqsave(&plchan
->lock
, flags
);
1263 * See if we already have a physical channel allocated,
1264 * else this is the time to try to get one.
1266 ret
= prep_phy_channel(plchan
, txd
);
1269 * No physical channel was available.
1271 * memcpy transfers can be sorted out at submission time.
1273 * Slave transfers may have been denied due to platform
1274 * channel muxing restrictions. Since there is no guarantee
1275 * that this will ever be resolved, and the signal must be
1276 * acquired AFTER acquiring the physical channel, we will let
1277 * them be NACK:ed with -EBUSY here. The drivers can retry
1278 * the prep() call if they are eager on doing this using DMA.
1280 if (plchan
->slave
) {
1281 pl08x_free_txd_list(pl08x
, plchan
);
1282 pl08x_free_txd(pl08x
, txd
);
1283 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1288 * Else we're all set, paused and ready to roll, status
1289 * will switch to PL08X_CHAN_RUNNING when we call
1290 * issue_pending(). If there is something running on the
1291 * channel already we don't change its state.
1293 if (plchan
->state
== PL08X_CHAN_IDLE
)
1294 plchan
->state
= PL08X_CHAN_PAUSED
;
1296 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1301 static struct pl08x_txd
*pl08x_get_txd(struct pl08x_dma_chan
*plchan
,
1302 unsigned long flags
)
1304 struct pl08x_txd
*txd
= kzalloc(sizeof(struct pl08x_txd
), GFP_NOWAIT
);
1307 dma_async_tx_descriptor_init(&txd
->tx
, &plchan
->chan
);
1308 txd
->tx
.flags
= flags
;
1309 txd
->tx
.tx_submit
= pl08x_tx_submit
;
1310 INIT_LIST_HEAD(&txd
->node
);
1312 /* Always enable error and terminal interrupts */
1313 txd
->ccfg
= PL080_CONFIG_ERR_IRQ_MASK
|
1314 PL080_CONFIG_TC_IRQ_MASK
;
1320 * Initialize a descriptor to be used by memcpy submit
1322 static struct dma_async_tx_descriptor
*pl08x_prep_dma_memcpy(
1323 struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
1324 size_t len
, unsigned long flags
)
1326 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1327 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1328 struct pl08x_txd
*txd
;
1331 txd
= pl08x_get_txd(plchan
, flags
);
1333 dev_err(&pl08x
->adev
->dev
,
1334 "%s no memory for descriptor\n", __func__
);
1338 txd
->direction
= DMA_NONE
;
1339 txd
->src_addr
= src
;
1340 txd
->dst_addr
= dest
;
1343 /* Set platform data for m2m */
1344 txd
->ccfg
|= PL080_FLOW_MEM2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1345 txd
->cctl
= pl08x
->pd
->memcpy_channel
.cctl
&
1346 ~(PL080_CONTROL_DST_AHB2
| PL080_CONTROL_SRC_AHB2
);
1348 /* Both to be incremented or the code will break */
1349 txd
->cctl
|= PL080_CONTROL_SRC_INCR
| PL080_CONTROL_DST_INCR
;
1351 if (pl08x
->vd
->dualmaster
)
1352 txd
->cctl
|= pl08x_select_bus(pl08x
->mem_buses
,
1355 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1362 static struct dma_async_tx_descriptor
*pl08x_prep_slave_sg(
1363 struct dma_chan
*chan
, struct scatterlist
*sgl
,
1364 unsigned int sg_len
, enum dma_data_direction direction
,
1365 unsigned long flags
)
1367 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1368 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1369 struct pl08x_txd
*txd
;
1373 * Current implementation ASSUMES only one sg
1376 dev_err(&pl08x
->adev
->dev
, "%s prepared too long sglist\n",
1381 dev_dbg(&pl08x
->adev
->dev
, "%s prepare transaction of %d bytes from %s\n",
1382 __func__
, sgl
->length
, plchan
->name
);
1384 txd
= pl08x_get_txd(plchan
, flags
);
1386 dev_err(&pl08x
->adev
->dev
, "%s no txd\n", __func__
);
1390 if (direction
!= plchan
->runtime_direction
)
1391 dev_err(&pl08x
->adev
->dev
, "%s DMA setup does not match "
1392 "the direction configured for the PrimeCell\n",
1396 * Set up addresses, the PrimeCell configured address
1397 * will take precedence since this may configure the
1398 * channel target address dynamically at runtime.
1400 txd
->direction
= direction
;
1401 txd
->len
= sgl
->length
;
1403 if (direction
== DMA_TO_DEVICE
) {
1404 txd
->ccfg
|= PL080_FLOW_MEM2PER
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1405 txd
->cctl
= plchan
->dst_cctl
;
1406 txd
->src_addr
= sgl
->dma_address
;
1407 txd
->dst_addr
= plchan
->dst_addr
;
1408 } else if (direction
== DMA_FROM_DEVICE
) {
1409 txd
->ccfg
|= PL080_FLOW_PER2MEM
<< PL080_CONFIG_FLOW_CONTROL_SHIFT
;
1410 txd
->cctl
= plchan
->src_cctl
;
1411 txd
->src_addr
= plchan
->src_addr
;
1412 txd
->dst_addr
= sgl
->dma_address
;
1414 dev_err(&pl08x
->adev
->dev
,
1415 "%s direction unsupported\n", __func__
);
1419 ret
= pl08x_prep_channel_resources(plchan
, txd
);
1426 static int pl08x_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
1429 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1430 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1431 unsigned long flags
;
1434 /* Controls applicable to inactive channels */
1435 if (cmd
== DMA_SLAVE_CONFIG
) {
1436 return dma_set_runtime_config(chan
,
1437 (struct dma_slave_config
*)arg
);
1441 * Anything succeeds on channels with no physical allocation and
1442 * no queued transfers.
1444 spin_lock_irqsave(&plchan
->lock
, flags
);
1445 if (!plchan
->phychan
&& !plchan
->at
) {
1446 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1451 case DMA_TERMINATE_ALL
:
1452 plchan
->state
= PL08X_CHAN_IDLE
;
1454 if (plchan
->phychan
) {
1455 pl08x_terminate_phy_chan(pl08x
, plchan
->phychan
);
1458 * Mark physical channel as free and free any slave
1461 release_phy_channel(plchan
);
1463 /* Dequeue jobs and free LLIs */
1465 pl08x_free_txd(pl08x
, plchan
->at
);
1468 /* Dequeue jobs not yet fired as well */
1469 pl08x_free_txd_list(pl08x
, plchan
);
1472 pl08x_pause_phy_chan(plchan
->phychan
);
1473 plchan
->state
= PL08X_CHAN_PAUSED
;
1476 pl08x_resume_phy_chan(plchan
->phychan
);
1477 plchan
->state
= PL08X_CHAN_RUNNING
;
1480 /* Unknown command */
1485 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1490 bool pl08x_filter_id(struct dma_chan
*chan
, void *chan_id
)
1492 struct pl08x_dma_chan
*plchan
= to_pl08x_chan(chan
);
1493 char *name
= chan_id
;
1495 /* Check that the channel is not taken! */
1496 if (!strcmp(plchan
->name
, name
))
1503 * Just check that the device is there and active
1504 * TODO: turn this bit on/off depending on the number of physical channels
1505 * actually used, if it is zero... well shut it off. That will save some
1506 * power. Cut the clock at the same time.
1508 static void pl08x_ensure_on(struct pl08x_driver_data
*pl08x
)
1512 val
= readl(pl08x
->base
+ PL080_CONFIG
);
1513 val
&= ~(PL080_CONFIG_M2_BE
| PL080_CONFIG_M1_BE
| PL080_CONFIG_ENABLE
);
1514 /* We implicitly clear bit 1 and that means little-endian mode */
1515 val
|= PL080_CONFIG_ENABLE
;
1516 writel(val
, pl08x
->base
+ PL080_CONFIG
);
1519 static void pl08x_unmap_buffers(struct pl08x_txd
*txd
)
1521 struct device
*dev
= txd
->tx
.chan
->device
->dev
;
1523 if (!(txd
->tx
.flags
& DMA_COMPL_SKIP_SRC_UNMAP
)) {
1524 if (txd
->tx
.flags
& DMA_COMPL_SRC_UNMAP_SINGLE
)
1525 dma_unmap_single(dev
, txd
->src_addr
, txd
->len
,
1528 dma_unmap_page(dev
, txd
->src_addr
, txd
->len
,
1531 if (!(txd
->tx
.flags
& DMA_COMPL_SKIP_DEST_UNMAP
)) {
1532 if (txd
->tx
.flags
& DMA_COMPL_DEST_UNMAP_SINGLE
)
1533 dma_unmap_single(dev
, txd
->dst_addr
, txd
->len
,
1536 dma_unmap_page(dev
, txd
->dst_addr
, txd
->len
,
1541 static void pl08x_tasklet(unsigned long data
)
1543 struct pl08x_dma_chan
*plchan
= (struct pl08x_dma_chan
*) data
;
1544 struct pl08x_driver_data
*pl08x
= plchan
->host
;
1545 struct pl08x_txd
*txd
;
1546 unsigned long flags
;
1548 spin_lock_irqsave(&plchan
->lock
, flags
);
1554 /* Update last completed */
1555 plchan
->lc
= txd
->tx
.cookie
;
1558 /* If a new descriptor is queued, set it up plchan->at is NULL here */
1559 if (!list_empty(&plchan
->pend_list
)) {
1560 struct pl08x_txd
*next
;
1562 next
= list_first_entry(&plchan
->pend_list
,
1565 list_del(&next
->node
);
1567 pl08x_start_txd(plchan
, next
);
1568 } else if (plchan
->phychan_hold
) {
1570 * This channel is still in use - we have a new txd being
1571 * prepared and will soon be queued. Don't give up the
1575 struct pl08x_dma_chan
*waiting
= NULL
;
1578 * No more jobs, so free up the physical channel
1579 * Free any allocated signal on slave transfers too
1581 release_phy_channel(plchan
);
1582 plchan
->state
= PL08X_CHAN_IDLE
;
1585 * And NOW before anyone else can grab that free:d up
1586 * physical channel, see if there is some memcpy pending
1587 * that seriously needs to start because of being stacked
1588 * up while we were choking the physical channels with data.
1590 list_for_each_entry(waiting
, &pl08x
->memcpy
.channels
,
1592 if (waiting
->state
== PL08X_CHAN_WAITING
&&
1593 waiting
->waiting
!= NULL
) {
1596 /* This should REALLY not fail now */
1597 ret
= prep_phy_channel(waiting
,
1600 waiting
->phychan_hold
--;
1601 waiting
->state
= PL08X_CHAN_RUNNING
;
1602 waiting
->waiting
= NULL
;
1603 pl08x_issue_pending(&waiting
->chan
);
1609 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1612 dma_async_tx_callback callback
= txd
->tx
.callback
;
1613 void *callback_param
= txd
->tx
.callback_param
;
1615 /* Don't try to unmap buffers on slave channels */
1617 pl08x_unmap_buffers(txd
);
1619 /* Free the descriptor */
1620 spin_lock_irqsave(&plchan
->lock
, flags
);
1621 pl08x_free_txd(pl08x
, txd
);
1622 spin_unlock_irqrestore(&plchan
->lock
, flags
);
1624 /* Callback to signal completion */
1626 callback(callback_param
);
1630 static irqreturn_t
pl08x_irq(int irq
, void *dev
)
1632 struct pl08x_driver_data
*pl08x
= dev
;
1637 val
= readl(pl08x
->base
+ PL080_ERR_STATUS
);
1639 /* An error interrupt (on one or more channels) */
1640 dev_err(&pl08x
->adev
->dev
,
1641 "%s error interrupt, register value 0x%08x\n",
1644 * Simply clear ALL PL08X error interrupts,
1645 * regardless of channel and cause
1646 * FIXME: should be 0x00000003 on PL081 really.
1648 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1650 val
= readl(pl08x
->base
+ PL080_INT_STATUS
);
1651 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1652 if ((1 << i
) & val
) {
1653 /* Locate physical channel */
1654 struct pl08x_phy_chan
*phychan
= &pl08x
->phy_chans
[i
];
1655 struct pl08x_dma_chan
*plchan
= phychan
->serving
;
1657 /* Schedule tasklet on this channel */
1658 tasklet_schedule(&plchan
->tasklet
);
1663 /* Clear only the terminal interrupts on channels we processed */
1664 writel(mask
, pl08x
->base
+ PL080_TC_CLEAR
);
1666 return mask
? IRQ_HANDLED
: IRQ_NONE
;
1669 static void pl08x_dma_slave_init(struct pl08x_dma_chan
*chan
)
1671 u32 cctl
= pl08x_cctl(chan
->cd
->cctl
);
1674 chan
->name
= chan
->cd
->bus_id
;
1675 chan
->src_addr
= chan
->cd
->addr
;
1676 chan
->dst_addr
= chan
->cd
->addr
;
1677 chan
->src_cctl
= cctl
| PL080_CONTROL_DST_INCR
|
1678 pl08x_select_bus(chan
->cd
->periph_buses
, chan
->host
->mem_buses
);
1679 chan
->dst_cctl
= cctl
| PL080_CONTROL_SRC_INCR
|
1680 pl08x_select_bus(chan
->host
->mem_buses
, chan
->cd
->periph_buses
);
1684 * Initialise the DMAC memcpy/slave channels.
1685 * Make a local wrapper to hold required data
1687 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data
*pl08x
,
1688 struct dma_device
*dmadev
,
1689 unsigned int channels
,
1692 struct pl08x_dma_chan
*chan
;
1695 INIT_LIST_HEAD(&dmadev
->channels
);
1698 * Register as many many memcpy as we have physical channels,
1699 * we won't always be able to use all but the code will have
1700 * to cope with that situation.
1702 for (i
= 0; i
< channels
; i
++) {
1703 chan
= kzalloc(sizeof(struct pl08x_dma_chan
), GFP_KERNEL
);
1705 dev_err(&pl08x
->adev
->dev
,
1706 "%s no memory for channel\n", __func__
);
1711 chan
->state
= PL08X_CHAN_IDLE
;
1714 chan
->cd
= &pl08x
->pd
->slave_channels
[i
];
1715 pl08x_dma_slave_init(chan
);
1717 chan
->cd
= &pl08x
->pd
->memcpy_channel
;
1718 chan
->name
= kasprintf(GFP_KERNEL
, "memcpy%d", i
);
1724 if (chan
->cd
->circular_buffer
) {
1725 dev_err(&pl08x
->adev
->dev
,
1726 "channel %s: circular buffers not supported\n",
1731 dev_info(&pl08x
->adev
->dev
,
1732 "initialize virtual channel \"%s\"\n",
1735 chan
->chan
.device
= dmadev
;
1736 chan
->chan
.cookie
= 0;
1739 spin_lock_init(&chan
->lock
);
1740 INIT_LIST_HEAD(&chan
->pend_list
);
1741 tasklet_init(&chan
->tasklet
, pl08x_tasklet
,
1742 (unsigned long) chan
);
1744 list_add_tail(&chan
->chan
.device_node
, &dmadev
->channels
);
1746 dev_info(&pl08x
->adev
->dev
, "initialized %d virtual %s channels\n",
1747 i
, slave
? "slave" : "memcpy");
1751 static void pl08x_free_virtual_channels(struct dma_device
*dmadev
)
1753 struct pl08x_dma_chan
*chan
= NULL
;
1754 struct pl08x_dma_chan
*next
;
1756 list_for_each_entry_safe(chan
,
1757 next
, &dmadev
->channels
, chan
.device_node
) {
1758 list_del(&chan
->chan
.device_node
);
1763 #ifdef CONFIG_DEBUG_FS
1764 static const char *pl08x_state_str(enum pl08x_dma_chan_state state
)
1767 case PL08X_CHAN_IDLE
:
1769 case PL08X_CHAN_RUNNING
:
1771 case PL08X_CHAN_PAUSED
:
1773 case PL08X_CHAN_WAITING
:
1778 return "UNKNOWN STATE";
1781 static int pl08x_debugfs_show(struct seq_file
*s
, void *data
)
1783 struct pl08x_driver_data
*pl08x
= s
->private;
1784 struct pl08x_dma_chan
*chan
;
1785 struct pl08x_phy_chan
*ch
;
1786 unsigned long flags
;
1789 seq_printf(s
, "PL08x physical channels:\n");
1790 seq_printf(s
, "CHANNEL:\tUSER:\n");
1791 seq_printf(s
, "--------\t-----\n");
1792 for (i
= 0; i
< pl08x
->vd
->channels
; i
++) {
1793 struct pl08x_dma_chan
*virt_chan
;
1795 ch
= &pl08x
->phy_chans
[i
];
1797 spin_lock_irqsave(&ch
->lock
, flags
);
1798 virt_chan
= ch
->serving
;
1800 seq_printf(s
, "%d\t\t%s\n",
1801 ch
->id
, virt_chan
? virt_chan
->name
: "(none)");
1803 spin_unlock_irqrestore(&ch
->lock
, flags
);
1806 seq_printf(s
, "\nPL08x virtual memcpy channels:\n");
1807 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1808 seq_printf(s
, "--------\t------\n");
1809 list_for_each_entry(chan
, &pl08x
->memcpy
.channels
, chan
.device_node
) {
1810 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1811 pl08x_state_str(chan
->state
));
1814 seq_printf(s
, "\nPL08x virtual slave channels:\n");
1815 seq_printf(s
, "CHANNEL:\tSTATE:\n");
1816 seq_printf(s
, "--------\t------\n");
1817 list_for_each_entry(chan
, &pl08x
->slave
.channels
, chan
.device_node
) {
1818 seq_printf(s
, "%s\t\t%s\n", chan
->name
,
1819 pl08x_state_str(chan
->state
));
1825 static int pl08x_debugfs_open(struct inode
*inode
, struct file
*file
)
1827 return single_open(file
, pl08x_debugfs_show
, inode
->i_private
);
1830 static const struct file_operations pl08x_debugfs_operations
= {
1831 .open
= pl08x_debugfs_open
,
1833 .llseek
= seq_lseek
,
1834 .release
= single_release
,
1837 static void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1839 /* Expose a simple debugfs interface to view all clocks */
1840 (void) debugfs_create_file(dev_name(&pl08x
->adev
->dev
), S_IFREG
| S_IRUGO
,
1842 &pl08x_debugfs_operations
);
1846 static inline void init_pl08x_debugfs(struct pl08x_driver_data
*pl08x
)
1851 static int pl08x_probe(struct amba_device
*adev
, const struct amba_id
*id
)
1853 struct pl08x_driver_data
*pl08x
;
1854 const struct vendor_data
*vd
= id
->data
;
1858 ret
= amba_request_regions(adev
, NULL
);
1862 /* Create the driver state holder */
1863 pl08x
= kzalloc(sizeof(struct pl08x_driver_data
), GFP_KERNEL
);
1869 /* Initialize memcpy engine */
1870 dma_cap_set(DMA_MEMCPY
, pl08x
->memcpy
.cap_mask
);
1871 pl08x
->memcpy
.dev
= &adev
->dev
;
1872 pl08x
->memcpy
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1873 pl08x
->memcpy
.device_free_chan_resources
= pl08x_free_chan_resources
;
1874 pl08x
->memcpy
.device_prep_dma_memcpy
= pl08x_prep_dma_memcpy
;
1875 pl08x
->memcpy
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1876 pl08x
->memcpy
.device_tx_status
= pl08x_dma_tx_status
;
1877 pl08x
->memcpy
.device_issue_pending
= pl08x_issue_pending
;
1878 pl08x
->memcpy
.device_control
= pl08x_control
;
1880 /* Initialize slave engine */
1881 dma_cap_set(DMA_SLAVE
, pl08x
->slave
.cap_mask
);
1882 pl08x
->slave
.dev
= &adev
->dev
;
1883 pl08x
->slave
.device_alloc_chan_resources
= pl08x_alloc_chan_resources
;
1884 pl08x
->slave
.device_free_chan_resources
= pl08x_free_chan_resources
;
1885 pl08x
->slave
.device_prep_dma_interrupt
= pl08x_prep_dma_interrupt
;
1886 pl08x
->slave
.device_tx_status
= pl08x_dma_tx_status
;
1887 pl08x
->slave
.device_issue_pending
= pl08x_issue_pending
;
1888 pl08x
->slave
.device_prep_slave_sg
= pl08x_prep_slave_sg
;
1889 pl08x
->slave
.device_control
= pl08x_control
;
1891 /* Get the platform data */
1892 pl08x
->pd
= dev_get_platdata(&adev
->dev
);
1894 dev_err(&adev
->dev
, "no platform data supplied\n");
1895 goto out_no_platdata
;
1898 /* Assign useful pointers to the driver state */
1902 /* By default, AHB1 only. If dualmaster, from platform */
1903 pl08x
->lli_buses
= PL08X_AHB1
;
1904 pl08x
->mem_buses
= PL08X_AHB1
;
1905 if (pl08x
->vd
->dualmaster
) {
1906 pl08x
->lli_buses
= pl08x
->pd
->lli_buses
;
1907 pl08x
->mem_buses
= pl08x
->pd
->mem_buses
;
1910 /* A DMA memory pool for LLIs, align on 1-byte boundary */
1911 pl08x
->pool
= dma_pool_create(DRIVER_NAME
, &pl08x
->adev
->dev
,
1912 PL08X_LLI_TSFR_SIZE
, PL08X_ALIGN
, 0);
1915 goto out_no_lli_pool
;
1918 spin_lock_init(&pl08x
->lock
);
1920 pl08x
->base
= ioremap(adev
->res
.start
, resource_size(&adev
->res
));
1923 goto out_no_ioremap
;
1926 /* Turn on the PL08x */
1927 pl08x_ensure_on(pl08x
);
1929 /* Attach the interrupt handler */
1930 writel(0x000000FF, pl08x
->base
+ PL080_ERR_CLEAR
);
1931 writel(0x000000FF, pl08x
->base
+ PL080_TC_CLEAR
);
1933 ret
= request_irq(adev
->irq
[0], pl08x_irq
, IRQF_DISABLED
,
1934 DRIVER_NAME
, pl08x
);
1936 dev_err(&adev
->dev
, "%s failed to request interrupt %d\n",
1937 __func__
, adev
->irq
[0]);
1941 /* Initialize physical channels */
1942 pl08x
->phy_chans
= kmalloc((vd
->channels
* sizeof(struct pl08x_phy_chan
)),
1944 if (!pl08x
->phy_chans
) {
1945 dev_err(&adev
->dev
, "%s failed to allocate "
1946 "physical channel holders\n",
1948 goto out_no_phychans
;
1951 for (i
= 0; i
< vd
->channels
; i
++) {
1952 struct pl08x_phy_chan
*ch
= &pl08x
->phy_chans
[i
];
1955 ch
->base
= pl08x
->base
+ PL080_Cx_BASE(i
);
1956 spin_lock_init(&ch
->lock
);
1959 dev_info(&adev
->dev
,
1960 "physical channel %d is %s\n", i
,
1961 pl08x_phy_channel_busy(ch
) ? "BUSY" : "FREE");
1964 /* Register as many memcpy channels as there are physical channels */
1965 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->memcpy
,
1966 pl08x
->vd
->channels
, false);
1968 dev_warn(&pl08x
->adev
->dev
,
1969 "%s failed to enumerate memcpy channels - %d\n",
1973 pl08x
->memcpy
.chancnt
= ret
;
1975 /* Register slave channels */
1976 ret
= pl08x_dma_init_virtual_channels(pl08x
, &pl08x
->slave
,
1977 pl08x
->pd
->num_slave_channels
,
1980 dev_warn(&pl08x
->adev
->dev
,
1981 "%s failed to enumerate slave channels - %d\n",
1985 pl08x
->slave
.chancnt
= ret
;
1987 ret
= dma_async_device_register(&pl08x
->memcpy
);
1989 dev_warn(&pl08x
->adev
->dev
,
1990 "%s failed to register memcpy as an async device - %d\n",
1992 goto out_no_memcpy_reg
;
1995 ret
= dma_async_device_register(&pl08x
->slave
);
1997 dev_warn(&pl08x
->adev
->dev
,
1998 "%s failed to register slave as an async device - %d\n",
2000 goto out_no_slave_reg
;
2003 amba_set_drvdata(adev
, pl08x
);
2004 init_pl08x_debugfs(pl08x
);
2005 dev_info(&pl08x
->adev
->dev
, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2006 amba_part(adev
), amba_rev(adev
),
2007 (unsigned long long)adev
->res
.start
, adev
->irq
[0]);
2011 dma_async_device_unregister(&pl08x
->memcpy
);
2013 pl08x_free_virtual_channels(&pl08x
->slave
);
2015 pl08x_free_virtual_channels(&pl08x
->memcpy
);
2017 kfree(pl08x
->phy_chans
);
2019 free_irq(adev
->irq
[0], pl08x
);
2021 iounmap(pl08x
->base
);
2023 dma_pool_destroy(pl08x
->pool
);
2028 amba_release_regions(adev
);
2032 /* PL080 has 8 channels and the PL080 have just 2 */
2033 static struct vendor_data vendor_pl080
= {
2038 static struct vendor_data vendor_pl081
= {
2040 .dualmaster
= false,
2043 static struct amba_id pl08x_ids
[] = {
2048 .data
= &vendor_pl080
,
2054 .data
= &vendor_pl081
,
2056 /* Nomadik 8815 PL080 variant */
2060 .data
= &vendor_pl080
,
2065 static struct amba_driver pl08x_amba_driver
= {
2066 .drv
.name
= DRIVER_NAME
,
2067 .id_table
= pl08x_ids
,
2068 .probe
= pl08x_probe
,
2071 static int __init
pl08x_init(void)
2074 retval
= amba_driver_register(&pl08x_amba_driver
);
2076 printk(KERN_WARNING DRIVER_NAME
2077 "failed to register as an AMBA device (%d)\n",
2081 subsys_initcall(pl08x_init
);