2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
31 #include "dmaengine.h"
32 #define PL330_MAX_CHAN 8
33 #define PL330_MAX_IRQS 32
34 #define PL330_MAX_PERI 32
36 enum pl330_srccachectrl
{
37 SCCTRL0
, /* Noncacheable and nonbufferable */
38 SCCTRL1
, /* Bufferable only */
39 SCCTRL2
, /* Cacheable, but do not allocate */
40 SCCTRL3
, /* Cacheable and bufferable, but do not allocate */
43 SCCTRL6
, /* Cacheable write-through, allocate on reads only */
44 SCCTRL7
, /* Cacheable write-back, allocate on reads only */
47 enum pl330_dstcachectrl
{
48 DCCTRL0
, /* Noncacheable and nonbufferable */
49 DCCTRL1
, /* Bufferable only */
50 DCCTRL2
, /* Cacheable, but do not allocate */
51 DCCTRL3
, /* Cacheable and bufferable, but do not allocate */
52 DINVALID1
, /* AWCACHE = 0x1000 */
54 DCCTRL6
, /* Cacheable write-through, allocate on writes only */
55 DCCTRL7
, /* Cacheable write-back, allocate on writes only */
73 /* Register and Bit field Definitions */
75 #define DS_ST_STOP 0x0
76 #define DS_ST_EXEC 0x1
77 #define DS_ST_CMISS 0x2
78 #define DS_ST_UPDTPC 0x3
80 #define DS_ST_ATBRR 0x5
81 #define DS_ST_QBUSY 0x6
83 #define DS_ST_KILL 0x8
84 #define DS_ST_CMPLT 0x9
85 #define DS_ST_FLTCMP 0xe
86 #define DS_ST_FAULT 0xf
91 #define INTSTATUS 0x28
98 #define FTC(n) (_FTC + (n)*0x4)
101 #define CS(n) (_CS + (n)*0x8)
102 #define CS_CNS (1 << 21)
105 #define CPC(n) (_CPC + (n)*0x8)
108 #define SA(n) (_SA + (n)*0x20)
111 #define DA(n) (_DA + (n)*0x20)
114 #define CC(n) (_CC + (n)*0x20)
116 #define CC_SRCINC (1 << 0)
117 #define CC_DSTINC (1 << 14)
118 #define CC_SRCPRI (1 << 8)
119 #define CC_DSTPRI (1 << 22)
120 #define CC_SRCNS (1 << 9)
121 #define CC_DSTNS (1 << 23)
122 #define CC_SRCIA (1 << 10)
123 #define CC_DSTIA (1 << 24)
124 #define CC_SRCBRSTLEN_SHFT 4
125 #define CC_DSTBRSTLEN_SHFT 18
126 #define CC_SRCBRSTSIZE_SHFT 1
127 #define CC_DSTBRSTSIZE_SHFT 15
128 #define CC_SRCCCTRL_SHFT 11
129 #define CC_SRCCCTRL_MASK 0x7
130 #define CC_DSTCCTRL_SHFT 25
131 #define CC_DRCCCTRL_MASK 0x7
132 #define CC_SWAP_SHFT 28
135 #define LC0(n) (_LC0 + (n)*0x20)
138 #define LC1(n) (_LC1 + (n)*0x20)
140 #define DBGSTATUS 0xd00
141 #define DBG_BUSY (1 << 0)
144 #define DBGINST0 0xd08
145 #define DBGINST1 0xd0c
154 #define PERIPH_ID 0xfe0
155 #define PERIPH_REV_SHIFT 20
156 #define PERIPH_REV_MASK 0xf
157 #define PERIPH_REV_R0P0 0
158 #define PERIPH_REV_R1P0 1
159 #define PERIPH_REV_R1P1 2
161 #define CR0_PERIPH_REQ_SET (1 << 0)
162 #define CR0_BOOT_EN_SET (1 << 1)
163 #define CR0_BOOT_MAN_NS (1 << 2)
164 #define CR0_NUM_CHANS_SHIFT 4
165 #define CR0_NUM_CHANS_MASK 0x7
166 #define CR0_NUM_PERIPH_SHIFT 12
167 #define CR0_NUM_PERIPH_MASK 0x1f
168 #define CR0_NUM_EVENTS_SHIFT 17
169 #define CR0_NUM_EVENTS_MASK 0x1f
171 #define CR1_ICACHE_LEN_SHIFT 0
172 #define CR1_ICACHE_LEN_MASK 0x7
173 #define CR1_NUM_ICACHELINES_SHIFT 4
174 #define CR1_NUM_ICACHELINES_MASK 0xf
176 #define CRD_DATA_WIDTH_SHIFT 0
177 #define CRD_DATA_WIDTH_MASK 0x7
178 #define CRD_WR_CAP_SHIFT 4
179 #define CRD_WR_CAP_MASK 0x7
180 #define CRD_WR_Q_DEP_SHIFT 8
181 #define CRD_WR_Q_DEP_MASK 0xf
182 #define CRD_RD_CAP_SHIFT 12
183 #define CRD_RD_CAP_MASK 0x7
184 #define CRD_RD_Q_DEP_SHIFT 16
185 #define CRD_RD_Q_DEP_MASK 0xf
186 #define CRD_DATA_BUFF_SHIFT 20
187 #define CRD_DATA_BUFF_MASK 0x3ff
190 #define DESIGNER 0x41
192 #define INTEG_CFG 0x0
193 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
195 #define PL330_STATE_STOPPED (1 << 0)
196 #define PL330_STATE_EXECUTING (1 << 1)
197 #define PL330_STATE_WFE (1 << 2)
198 #define PL330_STATE_FAULTING (1 << 3)
199 #define PL330_STATE_COMPLETING (1 << 4)
200 #define PL330_STATE_WFP (1 << 5)
201 #define PL330_STATE_KILLING (1 << 6)
202 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
203 #define PL330_STATE_CACHEMISS (1 << 8)
204 #define PL330_STATE_UPDTPC (1 << 9)
205 #define PL330_STATE_ATBARRIER (1 << 10)
206 #define PL330_STATE_QUEUEBUSY (1 << 11)
207 #define PL330_STATE_INVALID (1 << 15)
209 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
210 | PL330_STATE_WFE | PL330_STATE_FAULTING)
212 #define CMD_DMAADDH 0x54
213 #define CMD_DMAEND 0x00
214 #define CMD_DMAFLUSHP 0x35
215 #define CMD_DMAGO 0xa0
216 #define CMD_DMALD 0x04
217 #define CMD_DMALDP 0x25
218 #define CMD_DMALP 0x20
219 #define CMD_DMALPEND 0x28
220 #define CMD_DMAKILL 0x01
221 #define CMD_DMAMOV 0xbc
222 #define CMD_DMANOP 0x18
223 #define CMD_DMARMB 0x12
224 #define CMD_DMASEV 0x34
225 #define CMD_DMAST 0x08
226 #define CMD_DMASTP 0x29
227 #define CMD_DMASTZ 0x0c
228 #define CMD_DMAWFE 0x36
229 #define CMD_DMAWFP 0x30
230 #define CMD_DMAWMB 0x13
234 #define SZ_DMAFLUSHP 2
238 #define SZ_DMALPEND 2
252 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
253 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
255 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
256 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
259 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
260 * at 1byte/burst for P<->M and M<->M respectively.
261 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
262 * should be enough for P<->M and M<->M respectively.
264 #define MCODE_BUFF_PER_REQ 256
266 /* If the _pl330_req is available to the client */
267 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
269 /* Use this _only_ to wait on transient states */
270 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
272 #ifdef PL330_DEBUG_MCGEN
273 static unsigned cmd_line
;
274 #define PL330_DBGCMD_DUMP(off, x...) do { \
275 printk("%x:", cmd_line); \
279 #define PL330_DBGMC_START(addr) (cmd_line = addr)
281 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
282 #define PL330_DBGMC_START(addr) do {} while (0)
285 /* The number of default descriptors */
287 #define NR_DEFAULT_DESC 16
289 /* Populated by the PL330 core driver for DMA API driver's info */
290 struct pl330_config
{
292 #define DMAC_MODE_NS (1 << 0)
294 unsigned int data_bus_width
:10; /* In number of bits */
295 unsigned int data_buf_dep
:10;
296 unsigned int num_chan
:4;
297 unsigned int num_peri
:6;
299 unsigned int num_events
:6;
303 /* Handle to the DMAC provided to the PL330 core */
307 /* Size of MicroCode buffers for each channel. */
309 /* ioremap'ed address of PL330 registers. */
311 /* Client can freely use it. */
313 /* PL330 core data, Client must not touch it. */
315 /* Populated by the PL330 core driver during pl330_add */
316 struct pl330_config pcfg
;
318 * If the DMAC has some reset mechanism, then the
319 * client may want to provide pointer to the method.
321 void (*dmac_reset
)(struct pl330_info
*pi
);
325 * Request Configuration.
326 * The PL330 core does not modify this and uses the last
327 * working configuration if the request doesn't provide any.
329 * The Client may want to provide this info only for the
330 * first request and a request with new settings.
332 struct pl330_reqcfg
{
333 /* Address Incrementing */
338 * For now, the SRC & DST protection levels
339 * and burst size/length are assumed same.
345 unsigned brst_size
:3; /* in power of 2 */
347 enum pl330_dstcachectrl dcctl
;
348 enum pl330_srccachectrl scctl
;
349 enum pl330_byteswap swap
;
350 struct pl330_config
*pcfg
;
354 * One cycle of DMAC operation.
355 * There may be more than one xfer in a request.
363 * Pointer to next xfer in the list.
364 * The last xfer in the req must point to NULL.
366 struct pl330_xfer
*next
;
369 /* The xfer callbacks are made with one of these arguments. */
371 /* The all xfers in the request were success. */
373 /* If req aborted due to global error. */
375 /* If req failed due to problem with Channel. */
379 /* A request defining Scatter-Gather List ending with NULL xfer. */
381 enum pl330_reqtype rqtype
;
382 /* Index of peripheral for the xfer. */
384 /* Unique token for this xfer, set by the client. */
386 /* Callback to be called after xfer. */
387 void (*xfer_cb
)(void *token
, enum pl330_op_err err
);
388 /* If NULL, req will be done at last set parameters. */
389 struct pl330_reqcfg
*cfg
;
390 /* Pointer to first xfer in the request. */
391 struct pl330_xfer
*x
;
392 /* Hook to attach to DMAC's list of reqs with due callback */
393 struct list_head rqd
;
397 * To know the status of the channel and DMAC, the client
398 * provides a pointer to this structure. The PL330 core
399 * fills it with current information.
401 struct pl330_chanstatus
{
403 * If the DMAC engine halted due to some error,
404 * the client should remove-add DMAC.
408 * If channel is halted due to some error,
409 * the client should ABORT/FLUSH and START the channel.
412 /* Location of last load */
414 /* Location of last store */
417 * Pointer to the currently active req, NULL if channel is
418 * inactive, even though the requests may be present.
420 struct pl330_req
*top_req
;
421 /* Pointer to req waiting second in the queue if any. */
422 struct pl330_req
*wait_req
;
426 /* Start the channel */
428 /* Abort the active xfer */
430 /* Stop xfer and flush queue */
437 struct pl330_xfer
*x
;
460 /* Number of bytes taken to setup MC for the req */
465 /* ToBeDone for tasklet */
473 struct pl330_thread
{
476 /* If the channel is not yet acquired by any client */
479 struct pl330_dmac
*dmac
;
480 /* Only two at a time */
481 struct _pl330_req req
[2];
482 /* Index of the last enqueued request */
484 /* Index of the last submitted request or -1 if the DMA is stopped */
488 enum pl330_dmac_state
{
497 /* Holds list of reqs with due callbacks */
498 struct list_head req_done
;
499 /* Pointer to platform specific stuff */
500 struct pl330_info
*pinfo
;
501 /* Maximum possible events/irqs */
503 /* BUS address of MicroCode buffer */
504 dma_addr_t mcode_bus
;
505 /* CPU address of MicroCode buffer */
507 /* List of all Channel threads */
508 struct pl330_thread
*channels
;
509 /* Pointer to the MANAGER thread */
510 struct pl330_thread
*manager
;
511 /* To handle bad news in interrupt */
512 struct tasklet_struct tasks
;
513 struct _pl330_tbd dmac_tbd
;
514 /* State of DMAC operation */
515 enum pl330_dmac_state state
;
519 /* In the DMAC pool */
522 * Allocated to some channel during prep_xxx
523 * Also may be sitting on the work_list.
527 * Sitting on the work_list and already submitted
528 * to the PL330 core. Not more than two descriptors
529 * of a channel can be BUSY at any time.
533 * Sitting on the channel work_list but xfer done
539 struct dma_pl330_chan
{
540 /* Schedule desc completion */
541 struct tasklet_struct task
;
543 /* DMA-Engine Channel */
544 struct dma_chan chan
;
546 /* List of to be xfered descriptors */
547 struct list_head work_list
;
549 /* Pointer to the DMAC that manages this channel,
550 * NULL if the channel is available to be acquired.
551 * As the parent, this DMAC also provides descriptors
554 struct dma_pl330_dmac
*dmac
;
556 /* To protect channel manipulation */
559 /* Token of a hardware channel thread of PL330 DMAC
560 * NULL if the channel is available to be acquired.
564 /* For D-to-M and M-to-D channels */
565 int burst_sz
; /* the peripheral fifo width */
566 int burst_len
; /* the number of burst */
567 dma_addr_t fifo_addr
;
569 /* for cyclic capability */
573 struct dma_pl330_dmac
{
574 struct pl330_info pif
;
576 /* DMA-Engine Device */
577 struct dma_device ddma
;
579 /* Pool of descriptors available for the DMAC's channels */
580 struct list_head desc_pool
;
581 /* To protect desc_pool manipulation */
582 spinlock_t pool_lock
;
584 /* Peripheral channels connected to this DMAC */
585 struct dma_pl330_chan
*peripherals
; /* keep at end */
588 struct dma_pl330_desc
{
589 /* To attach to a queue as child */
590 struct list_head node
;
592 /* Descriptor for the DMA Engine API */
593 struct dma_async_tx_descriptor txd
;
595 /* Xfer for PL330 core */
596 struct pl330_xfer px
;
598 struct pl330_reqcfg rqcfg
;
599 struct pl330_req req
;
601 enum desc_status status
;
603 /* The channel which currently holds this desc */
604 struct dma_pl330_chan
*pchan
;
607 struct dma_pl330_filter_args
{
608 struct dma_pl330_dmac
*pdmac
;
609 unsigned int chan_id
;
612 static inline void _callback(struct pl330_req
*r
, enum pl330_op_err err
)
615 r
->xfer_cb(r
->token
, err
);
618 static inline bool _queue_empty(struct pl330_thread
*thrd
)
620 return (IS_FREE(&thrd
->req
[0]) && IS_FREE(&thrd
->req
[1]))
624 static inline bool _queue_full(struct pl330_thread
*thrd
)
626 return (IS_FREE(&thrd
->req
[0]) || IS_FREE(&thrd
->req
[1]))
630 static inline bool is_manager(struct pl330_thread
*thrd
)
632 struct pl330_dmac
*pl330
= thrd
->dmac
;
634 /* MANAGER is indexed at the end */
635 if (thrd
->id
== pl330
->pinfo
->pcfg
.num_chan
)
641 /* If manager of the thread is in Non-Secure mode */
642 static inline bool _manager_ns(struct pl330_thread
*thrd
)
644 struct pl330_dmac
*pl330
= thrd
->dmac
;
646 return (pl330
->pinfo
->pcfg
.mode
& DMAC_MODE_NS
) ? true : false;
649 static inline u32
get_revision(u32 periph_id
)
651 return (periph_id
>> PERIPH_REV_SHIFT
) & PERIPH_REV_MASK
;
654 static inline u32
_emit_ADDH(unsigned dry_run
, u8 buf
[],
655 enum pl330_dst da
, u16 val
)
660 buf
[0] = CMD_DMAADDH
;
662 *((u16
*)&buf
[1]) = val
;
664 PL330_DBGCMD_DUMP(SZ_DMAADDH
, "\tDMAADDH %s %u\n",
665 da
== 1 ? "DA" : "SA", val
);
670 static inline u32
_emit_END(unsigned dry_run
, u8 buf
[])
677 PL330_DBGCMD_DUMP(SZ_DMAEND
, "\tDMAEND\n");
682 static inline u32
_emit_FLUSHP(unsigned dry_run
, u8 buf
[], u8 peri
)
687 buf
[0] = CMD_DMAFLUSHP
;
693 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP
, "\tDMAFLUSHP %u\n", peri
>> 3);
698 static inline u32
_emit_LD(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
706 buf
[0] |= (0 << 1) | (1 << 0);
707 else if (cond
== BURST
)
708 buf
[0] |= (1 << 1) | (1 << 0);
710 PL330_DBGCMD_DUMP(SZ_DMALD
, "\tDMALD%c\n",
711 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
716 static inline u32
_emit_LDP(unsigned dry_run
, u8 buf
[],
717 enum pl330_cond cond
, u8 peri
)
731 PL330_DBGCMD_DUMP(SZ_DMALDP
, "\tDMALDP%c %u\n",
732 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
737 static inline u32
_emit_LP(unsigned dry_run
, u8 buf
[],
738 unsigned loop
, u8 cnt
)
748 cnt
--; /* DMAC increments by 1 internally */
751 PL330_DBGCMD_DUMP(SZ_DMALP
, "\tDMALP_%c %u\n", loop
? '1' : '0', cnt
);
757 enum pl330_cond cond
;
763 static inline u32
_emit_LPEND(unsigned dry_run
, u8 buf
[],
764 const struct _arg_LPEND
*arg
)
766 enum pl330_cond cond
= arg
->cond
;
767 bool forever
= arg
->forever
;
768 unsigned loop
= arg
->loop
;
769 u8 bjump
= arg
->bjump
;
774 buf
[0] = CMD_DMALPEND
;
783 buf
[0] |= (0 << 1) | (1 << 0);
784 else if (cond
== BURST
)
785 buf
[0] |= (1 << 1) | (1 << 0);
789 PL330_DBGCMD_DUMP(SZ_DMALPEND
, "\tDMALP%s%c_%c bjmpto_%x\n",
790 forever
? "FE" : "END",
791 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'),
798 static inline u32
_emit_KILL(unsigned dry_run
, u8 buf
[])
803 buf
[0] = CMD_DMAKILL
;
808 static inline u32
_emit_MOV(unsigned dry_run
, u8 buf
[],
809 enum dmamov_dst dst
, u32 val
)
816 *((u32
*)&buf
[2]) = val
;
818 PL330_DBGCMD_DUMP(SZ_DMAMOV
, "\tDMAMOV %s 0x%x\n",
819 dst
== SAR
? "SAR" : (dst
== DAR
? "DAR" : "CCR"), val
);
824 static inline u32
_emit_NOP(unsigned dry_run
, u8 buf
[])
831 PL330_DBGCMD_DUMP(SZ_DMANOP
, "\tDMANOP\n");
836 static inline u32
_emit_RMB(unsigned dry_run
, u8 buf
[])
843 PL330_DBGCMD_DUMP(SZ_DMARMB
, "\tDMARMB\n");
848 static inline u32
_emit_SEV(unsigned dry_run
, u8 buf
[], u8 ev
)
859 PL330_DBGCMD_DUMP(SZ_DMASEV
, "\tDMASEV %u\n", ev
>> 3);
864 static inline u32
_emit_ST(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
872 buf
[0] |= (0 << 1) | (1 << 0);
873 else if (cond
== BURST
)
874 buf
[0] |= (1 << 1) | (1 << 0);
876 PL330_DBGCMD_DUMP(SZ_DMAST
, "\tDMAST%c\n",
877 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
882 static inline u32
_emit_STP(unsigned dry_run
, u8 buf
[],
883 enum pl330_cond cond
, u8 peri
)
897 PL330_DBGCMD_DUMP(SZ_DMASTP
, "\tDMASTP%c %u\n",
898 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
903 static inline u32
_emit_STZ(unsigned dry_run
, u8 buf
[])
910 PL330_DBGCMD_DUMP(SZ_DMASTZ
, "\tDMASTZ\n");
915 static inline u32
_emit_WFE(unsigned dry_run
, u8 buf
[], u8 ev
,
930 PL330_DBGCMD_DUMP(SZ_DMAWFE
, "\tDMAWFE %u%s\n",
931 ev
>> 3, invalidate
? ", I" : "");
936 static inline u32
_emit_WFP(unsigned dry_run
, u8 buf
[],
937 enum pl330_cond cond
, u8 peri
)
945 buf
[0] |= (0 << 1) | (0 << 0);
946 else if (cond
== BURST
)
947 buf
[0] |= (1 << 1) | (0 << 0);
949 buf
[0] |= (0 << 1) | (1 << 0);
955 PL330_DBGCMD_DUMP(SZ_DMAWFP
, "\tDMAWFP%c %u\n",
956 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'P'), peri
>> 3);
961 static inline u32
_emit_WMB(unsigned dry_run
, u8 buf
[])
968 PL330_DBGCMD_DUMP(SZ_DMAWMB
, "\tDMAWMB\n");
979 static inline u32
_emit_GO(unsigned dry_run
, u8 buf
[],
980 const struct _arg_GO
*arg
)
983 u32 addr
= arg
->addr
;
984 unsigned ns
= arg
->ns
;
994 *((u32
*)&buf
[2]) = addr
;
999 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1001 /* Returns Time-Out */
1002 static bool _until_dmac_idle(struct pl330_thread
*thrd
)
1004 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1005 unsigned long loops
= msecs_to_loops(5);
1008 /* Until Manager is Idle */
1009 if (!(readl(regs
+ DBGSTATUS
) & DBG_BUSY
))
1021 static inline void _execute_DBGINSN(struct pl330_thread
*thrd
,
1022 u8 insn
[], bool as_manager
)
1024 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1027 val
= (insn
[0] << 16) | (insn
[1] << 24);
1030 val
|= (thrd
->id
<< 8); /* Channel Number */
1032 writel(val
, regs
+ DBGINST0
);
1034 val
= *((u32
*)&insn
[2]);
1035 writel(val
, regs
+ DBGINST1
);
1037 /* If timed out due to halted state-machine */
1038 if (_until_dmac_idle(thrd
)) {
1039 dev_err(thrd
->dmac
->pinfo
->dev
, "DMAC halted!\n");
1044 writel(0, regs
+ DBGCMD
);
1048 * Mark a _pl330_req as free.
1049 * We do it by writing DMAEND as the first instruction
1050 * because no valid request is going to have DMAEND as
1051 * its first instruction to execute.
1053 static void mark_free(struct pl330_thread
*thrd
, int idx
)
1055 struct _pl330_req
*req
= &thrd
->req
[idx
];
1057 _emit_END(0, req
->mc_cpu
);
1060 thrd
->req_running
= -1;
1063 static inline u32
_state(struct pl330_thread
*thrd
)
1065 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1068 if (is_manager(thrd
))
1069 val
= readl(regs
+ DS
) & 0xf;
1071 val
= readl(regs
+ CS(thrd
->id
)) & 0xf;
1075 return PL330_STATE_STOPPED
;
1077 return PL330_STATE_EXECUTING
;
1079 return PL330_STATE_CACHEMISS
;
1081 return PL330_STATE_UPDTPC
;
1083 return PL330_STATE_WFE
;
1085 return PL330_STATE_FAULTING
;
1087 if (is_manager(thrd
))
1088 return PL330_STATE_INVALID
;
1090 return PL330_STATE_ATBARRIER
;
1092 if (is_manager(thrd
))
1093 return PL330_STATE_INVALID
;
1095 return PL330_STATE_QUEUEBUSY
;
1097 if (is_manager(thrd
))
1098 return PL330_STATE_INVALID
;
1100 return PL330_STATE_WFP
;
1102 if (is_manager(thrd
))
1103 return PL330_STATE_INVALID
;
1105 return PL330_STATE_KILLING
;
1107 if (is_manager(thrd
))
1108 return PL330_STATE_INVALID
;
1110 return PL330_STATE_COMPLETING
;
1112 if (is_manager(thrd
))
1113 return PL330_STATE_INVALID
;
1115 return PL330_STATE_FAULT_COMPLETING
;
1117 return PL330_STATE_INVALID
;
1121 static void _stop(struct pl330_thread
*thrd
)
1123 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1124 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1126 if (_state(thrd
) == PL330_STATE_FAULT_COMPLETING
)
1127 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1129 /* Return if nothing needs to be done */
1130 if (_state(thrd
) == PL330_STATE_COMPLETING
1131 || _state(thrd
) == PL330_STATE_KILLING
1132 || _state(thrd
) == PL330_STATE_STOPPED
)
1135 _emit_KILL(0, insn
);
1137 /* Stop generating interrupts for SEV */
1138 writel(readl(regs
+ INTEN
) & ~(1 << thrd
->ev
), regs
+ INTEN
);
1140 _execute_DBGINSN(thrd
, insn
, is_manager(thrd
));
1143 /* Start doing req 'idx' of thread 'thrd' */
1144 static bool _trigger(struct pl330_thread
*thrd
)
1146 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1147 struct _pl330_req
*req
;
1148 struct pl330_req
*r
;
1151 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1154 /* Return if already ACTIVE */
1155 if (_state(thrd
) != PL330_STATE_STOPPED
)
1158 idx
= 1 - thrd
->lstenq
;
1159 if (!IS_FREE(&thrd
->req
[idx
]))
1160 req
= &thrd
->req
[idx
];
1163 if (!IS_FREE(&thrd
->req
[idx
]))
1164 req
= &thrd
->req
[idx
];
1169 /* Return if no request */
1170 if (!req
|| !req
->r
)
1176 ns
= r
->cfg
->nonsecure
? 1 : 0;
1177 else if (readl(regs
+ CS(thrd
->id
)) & CS_CNS
)
1182 /* See 'Abort Sources' point-4 at Page 2-25 */
1183 if (_manager_ns(thrd
) && !ns
)
1184 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Recipe for ABORT!\n",
1185 __func__
, __LINE__
);
1188 go
.addr
= req
->mc_bus
;
1190 _emit_GO(0, insn
, &go
);
1192 /* Set to generate interrupts for SEV */
1193 writel(readl(regs
+ INTEN
) | (1 << thrd
->ev
), regs
+ INTEN
);
1195 /* Only manager can execute GO */
1196 _execute_DBGINSN(thrd
, insn
, true);
1198 thrd
->req_running
= idx
;
1203 static bool _start(struct pl330_thread
*thrd
)
1205 switch (_state(thrd
)) {
1206 case PL330_STATE_FAULT_COMPLETING
:
1207 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1209 if (_state(thrd
) == PL330_STATE_KILLING
)
1210 UNTIL(thrd
, PL330_STATE_STOPPED
)
1212 case PL330_STATE_FAULTING
:
1215 case PL330_STATE_KILLING
:
1216 case PL330_STATE_COMPLETING
:
1217 UNTIL(thrd
, PL330_STATE_STOPPED
)
1219 case PL330_STATE_STOPPED
:
1220 return _trigger(thrd
);
1222 case PL330_STATE_WFP
:
1223 case PL330_STATE_QUEUEBUSY
:
1224 case PL330_STATE_ATBARRIER
:
1225 case PL330_STATE_UPDTPC
:
1226 case PL330_STATE_CACHEMISS
:
1227 case PL330_STATE_EXECUTING
:
1230 case PL330_STATE_WFE
: /* For RESUME, nothing yet */
1236 static inline int _ldst_memtomem(unsigned dry_run
, u8 buf
[],
1237 const struct _xfer_spec
*pxs
, int cyc
)
1240 struct pl330_config
*pcfg
= pxs
->r
->cfg
->pcfg
;
1242 /* check lock-up free version */
1243 if (get_revision(pcfg
->periph_id
) >= PERIPH_REV_R1P0
) {
1245 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1246 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1250 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1251 off
+= _emit_RMB(dry_run
, &buf
[off
]);
1252 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1253 off
+= _emit_WMB(dry_run
, &buf
[off
]);
1260 static inline int _ldst_devtomem(unsigned dry_run
, u8 buf
[],
1261 const struct _xfer_spec
*pxs
, int cyc
)
1266 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1267 off
+= _emit_LDP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1268 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1269 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1275 static inline int _ldst_memtodev(unsigned dry_run
, u8 buf
[],
1276 const struct _xfer_spec
*pxs
, int cyc
)
1281 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1282 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1283 off
+= _emit_STP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1284 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1290 static int _bursts(unsigned dry_run
, u8 buf
[],
1291 const struct _xfer_spec
*pxs
, int cyc
)
1295 switch (pxs
->r
->rqtype
) {
1297 off
+= _ldst_memtodev(dry_run
, &buf
[off
], pxs
, cyc
);
1300 off
+= _ldst_devtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1303 off
+= _ldst_memtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1306 off
+= 0x40000000; /* Scare off the Client */
1313 /* Returns bytes consumed and updates bursts */
1314 static inline int _loop(unsigned dry_run
, u8 buf
[],
1315 unsigned long *bursts
, const struct _xfer_spec
*pxs
)
1317 int cyc
, cycmax
, szlp
, szlpend
, szbrst
, off
;
1318 unsigned lcnt0
, lcnt1
, ljmp0
, ljmp1
;
1319 struct _arg_LPEND lpend
;
1321 /* Max iterations possible in DMALP is 256 */
1322 if (*bursts
>= 256*256) {
1325 cyc
= *bursts
/ lcnt1
/ lcnt0
;
1326 } else if (*bursts
> 256) {
1328 lcnt0
= *bursts
/ lcnt1
;
1336 szlp
= _emit_LP(1, buf
, 0, 0);
1337 szbrst
= _bursts(1, buf
, pxs
, 1);
1339 lpend
.cond
= ALWAYS
;
1340 lpend
.forever
= false;
1343 szlpend
= _emit_LPEND(1, buf
, &lpend
);
1351 * Max bursts that we can unroll due to limit on the
1352 * size of backward jump that can be encoded in DMALPEND
1353 * which is 8-bits and hence 255
1355 cycmax
= (255 - (szlp
+ szlpend
)) / szbrst
;
1357 cyc
= (cycmax
< cyc
) ? cycmax
: cyc
;
1362 off
+= _emit_LP(dry_run
, &buf
[off
], 0, lcnt0
);
1366 off
+= _emit_LP(dry_run
, &buf
[off
], 1, lcnt1
);
1369 off
+= _bursts(dry_run
, &buf
[off
], pxs
, cyc
);
1371 lpend
.cond
= ALWAYS
;
1372 lpend
.forever
= false;
1374 lpend
.bjump
= off
- ljmp1
;
1375 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1378 lpend
.cond
= ALWAYS
;
1379 lpend
.forever
= false;
1381 lpend
.bjump
= off
- ljmp0
;
1382 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1385 *bursts
= lcnt1
* cyc
;
1392 static inline int _setup_loops(unsigned dry_run
, u8 buf
[],
1393 const struct _xfer_spec
*pxs
)
1395 struct pl330_xfer
*x
= pxs
->x
;
1397 unsigned long c
, bursts
= BYTE_TO_BURST(x
->bytes
, ccr
);
1402 off
+= _loop(dry_run
, &buf
[off
], &c
, pxs
);
1409 static inline int _setup_xfer(unsigned dry_run
, u8 buf
[],
1410 const struct _xfer_spec
*pxs
)
1412 struct pl330_xfer
*x
= pxs
->x
;
1415 /* DMAMOV SAR, x->src_addr */
1416 off
+= _emit_MOV(dry_run
, &buf
[off
], SAR
, x
->src_addr
);
1417 /* DMAMOV DAR, x->dst_addr */
1418 off
+= _emit_MOV(dry_run
, &buf
[off
], DAR
, x
->dst_addr
);
1421 off
+= _setup_loops(dry_run
, &buf
[off
], pxs
);
1427 * A req is a sequence of one or more xfer units.
1428 * Returns the number of bytes taken to setup the MC for the req.
1430 static int _setup_req(unsigned dry_run
, struct pl330_thread
*thrd
,
1431 unsigned index
, struct _xfer_spec
*pxs
)
1433 struct _pl330_req
*req
= &thrd
->req
[index
];
1434 struct pl330_xfer
*x
;
1435 u8
*buf
= req
->mc_cpu
;
1438 PL330_DBGMC_START(req
->mc_bus
);
1440 /* DMAMOV CCR, ccr */
1441 off
+= _emit_MOV(dry_run
, &buf
[off
], CCR
, pxs
->ccr
);
1445 /* Error if xfer length is not aligned at burst size */
1446 if (x
->bytes
% (BRST_SIZE(pxs
->ccr
) * BRST_LEN(pxs
->ccr
)))
1450 off
+= _setup_xfer(dry_run
, &buf
[off
], pxs
);
1455 /* DMASEV peripheral/event */
1456 off
+= _emit_SEV(dry_run
, &buf
[off
], thrd
->ev
);
1458 off
+= _emit_END(dry_run
, &buf
[off
]);
1463 static inline u32
_prepare_ccr(const struct pl330_reqcfg
*rqc
)
1473 /* We set same protection levels for Src and DST for now */
1474 if (rqc
->privileged
)
1475 ccr
|= CC_SRCPRI
| CC_DSTPRI
;
1477 ccr
|= CC_SRCNS
| CC_DSTNS
;
1478 if (rqc
->insnaccess
)
1479 ccr
|= CC_SRCIA
| CC_DSTIA
;
1481 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_SRCBRSTLEN_SHFT
);
1482 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_DSTBRSTLEN_SHFT
);
1484 ccr
|= (rqc
->brst_size
<< CC_SRCBRSTSIZE_SHFT
);
1485 ccr
|= (rqc
->brst_size
<< CC_DSTBRSTSIZE_SHFT
);
1487 ccr
|= (rqc
->scctl
<< CC_SRCCCTRL_SHFT
);
1488 ccr
|= (rqc
->dcctl
<< CC_DSTCCTRL_SHFT
);
1490 ccr
|= (rqc
->swap
<< CC_SWAP_SHFT
);
1495 static inline bool _is_valid(u32 ccr
)
1497 enum pl330_dstcachectrl dcctl
;
1498 enum pl330_srccachectrl scctl
;
1500 dcctl
= (ccr
>> CC_DSTCCTRL_SHFT
) & CC_DRCCCTRL_MASK
;
1501 scctl
= (ccr
>> CC_SRCCCTRL_SHFT
) & CC_SRCCCTRL_MASK
;
1503 if (dcctl
== DINVALID1
|| dcctl
== DINVALID2
1504 || scctl
== SINVALID1
|| scctl
== SINVALID2
)
1511 * Submit a list of xfers after which the client wants notification.
1512 * Client is not notified after each xfer unit, just once after all
1513 * xfer units are done or some error occurs.
1515 static int pl330_submit_req(void *ch_id
, struct pl330_req
*r
)
1517 struct pl330_thread
*thrd
= ch_id
;
1518 struct pl330_dmac
*pl330
;
1519 struct pl330_info
*pi
;
1520 struct _xfer_spec xs
;
1521 unsigned long flags
;
1527 /* No Req or Unacquired Channel or DMAC */
1528 if (!r
|| !thrd
|| thrd
->free
)
1535 if (pl330
->state
== DYING
1536 || pl330
->dmac_tbd
.reset_chan
& (1 << thrd
->id
)) {
1537 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d\n",
1538 __func__
, __LINE__
);
1542 /* If request for non-existing peripheral */
1543 if (r
->rqtype
!= MEMTOMEM
&& r
->peri
>= pi
->pcfg
.num_peri
) {
1544 dev_info(thrd
->dmac
->pinfo
->dev
,
1545 "%s:%d Invalid peripheral(%u)!\n",
1546 __func__
, __LINE__
, r
->peri
);
1550 spin_lock_irqsave(&pl330
->lock
, flags
);
1552 if (_queue_full(thrd
)) {
1558 /* Use last settings, if not provided */
1560 /* Prefer Secure Channel */
1561 if (!_manager_ns(thrd
))
1562 r
->cfg
->nonsecure
= 0;
1564 r
->cfg
->nonsecure
= 1;
1566 ccr
= _prepare_ccr(r
->cfg
);
1568 ccr
= readl(regs
+ CC(thrd
->id
));
1571 /* If this req doesn't have valid xfer settings */
1572 if (!_is_valid(ccr
)) {
1574 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Invalid CCR(%x)!\n",
1575 __func__
, __LINE__
, ccr
);
1579 idx
= IS_FREE(&thrd
->req
[0]) ? 0 : 1;
1584 /* First dry run to check if req is acceptable */
1585 ret
= _setup_req(1, thrd
, idx
, &xs
);
1589 if (ret
> pi
->mcbufsz
/ 2) {
1590 dev_info(thrd
->dmac
->pinfo
->dev
,
1591 "%s:%d Trying increasing mcbufsz\n",
1592 __func__
, __LINE__
);
1597 /* Hook the request */
1599 thrd
->req
[idx
].mc_len
= _setup_req(0, thrd
, idx
, &xs
);
1600 thrd
->req
[idx
].r
= r
;
1605 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1610 static void pl330_dotask(unsigned long data
)
1612 struct pl330_dmac
*pl330
= (struct pl330_dmac
*) data
;
1613 struct pl330_info
*pi
= pl330
->pinfo
;
1614 unsigned long flags
;
1617 spin_lock_irqsave(&pl330
->lock
, flags
);
1619 /* The DMAC itself gone nuts */
1620 if (pl330
->dmac_tbd
.reset_dmac
) {
1621 pl330
->state
= DYING
;
1622 /* Reset the manager too */
1623 pl330
->dmac_tbd
.reset_mngr
= true;
1624 /* Clear the reset flag */
1625 pl330
->dmac_tbd
.reset_dmac
= false;
1628 if (pl330
->dmac_tbd
.reset_mngr
) {
1629 _stop(pl330
->manager
);
1630 /* Reset all channels */
1631 pl330
->dmac_tbd
.reset_chan
= (1 << pi
->pcfg
.num_chan
) - 1;
1632 /* Clear the reset flag */
1633 pl330
->dmac_tbd
.reset_mngr
= false;
1636 for (i
= 0; i
< pi
->pcfg
.num_chan
; i
++) {
1638 if (pl330
->dmac_tbd
.reset_chan
& (1 << i
)) {
1639 struct pl330_thread
*thrd
= &pl330
->channels
[i
];
1640 void __iomem
*regs
= pi
->base
;
1641 enum pl330_op_err err
;
1645 if (readl(regs
+ FSC
) & (1 << thrd
->id
))
1646 err
= PL330_ERR_FAIL
;
1648 err
= PL330_ERR_ABORT
;
1650 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1652 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, err
);
1653 _callback(thrd
->req
[thrd
->lstenq
].r
, err
);
1655 spin_lock_irqsave(&pl330
->lock
, flags
);
1657 thrd
->req
[0].r
= NULL
;
1658 thrd
->req
[1].r
= NULL
;
1662 /* Clear the reset flag */
1663 pl330
->dmac_tbd
.reset_chan
&= ~(1 << i
);
1667 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1672 /* Returns 1 if state was updated, 0 otherwise */
1673 static int pl330_update(const struct pl330_info
*pi
)
1675 struct pl330_req
*rqdone
, *tmp
;
1676 struct pl330_dmac
*pl330
;
1677 unsigned long flags
;
1680 int id
, ev
, ret
= 0;
1682 if (!pi
|| !pi
->pl330_data
)
1686 pl330
= pi
->pl330_data
;
1688 spin_lock_irqsave(&pl330
->lock
, flags
);
1690 val
= readl(regs
+ FSM
) & 0x1;
1692 pl330
->dmac_tbd
.reset_mngr
= true;
1694 pl330
->dmac_tbd
.reset_mngr
= false;
1696 val
= readl(regs
+ FSC
) & ((1 << pi
->pcfg
.num_chan
) - 1);
1697 pl330
->dmac_tbd
.reset_chan
|= val
;
1700 while (i
< pi
->pcfg
.num_chan
) {
1701 if (val
& (1 << i
)) {
1703 "Reset Channel-%d\t CS-%x FTC-%x\n",
1704 i
, readl(regs
+ CS(i
)),
1705 readl(regs
+ FTC(i
)));
1706 _stop(&pl330
->channels
[i
]);
1712 /* Check which event happened i.e, thread notified */
1713 val
= readl(regs
+ ES
);
1714 if (pi
->pcfg
.num_events
< 32
1715 && val
& ~((1 << pi
->pcfg
.num_events
) - 1)) {
1716 pl330
->dmac_tbd
.reset_dmac
= true;
1717 dev_err(pi
->dev
, "%s:%d Unexpected!\n", __func__
, __LINE__
);
1722 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++) {
1723 if (val
& (1 << ev
)) { /* Event occurred */
1724 struct pl330_thread
*thrd
;
1725 u32 inten
= readl(regs
+ INTEN
);
1728 /* Clear the event */
1729 if (inten
& (1 << ev
))
1730 writel(1 << ev
, regs
+ INTCLR
);
1734 id
= pl330
->events
[ev
];
1736 thrd
= &pl330
->channels
[id
];
1738 active
= thrd
->req_running
;
1739 if (active
== -1) /* Aborted */
1742 /* Detach the req */
1743 rqdone
= thrd
->req
[active
].r
;
1744 thrd
->req
[active
].r
= NULL
;
1746 mark_free(thrd
, active
);
1748 /* Get going again ASAP */
1751 /* For now, just make a list of callbacks to be done */
1752 list_add_tail(&rqdone
->rqd
, &pl330
->req_done
);
1756 /* Now that we are in no hurry, do the callbacks */
1757 list_for_each_entry_safe(rqdone
, tmp
, &pl330
->req_done
, rqd
) {
1758 list_del(&rqdone
->rqd
);
1760 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1761 _callback(rqdone
, PL330_ERR_NONE
);
1762 spin_lock_irqsave(&pl330
->lock
, flags
);
1766 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1768 if (pl330
->dmac_tbd
.reset_dmac
1769 || pl330
->dmac_tbd
.reset_mngr
1770 || pl330
->dmac_tbd
.reset_chan
) {
1772 tasklet_schedule(&pl330
->tasks
);
1778 static int pl330_chan_ctrl(void *ch_id
, enum pl330_chan_op op
)
1780 struct pl330_thread
*thrd
= ch_id
;
1781 struct pl330_dmac
*pl330
;
1782 unsigned long flags
;
1783 int ret
= 0, active
;
1785 if (!thrd
|| thrd
->free
|| thrd
->dmac
->state
== DYING
)
1789 active
= thrd
->req_running
;
1791 spin_lock_irqsave(&pl330
->lock
, flags
);
1794 case PL330_OP_FLUSH
:
1795 /* Make sure the channel is stopped */
1798 thrd
->req
[0].r
= NULL
;
1799 thrd
->req
[1].r
= NULL
;
1804 case PL330_OP_ABORT
:
1805 /* Make sure the channel is stopped */
1808 /* ABORT is only for the active req */
1812 thrd
->req
[active
].r
= NULL
;
1813 mark_free(thrd
, active
);
1815 /* Start the next */
1816 case PL330_OP_START
:
1817 if ((active
== -1) && !_start(thrd
))
1825 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1829 /* Reserve an event */
1830 static inline int _alloc_event(struct pl330_thread
*thrd
)
1832 struct pl330_dmac
*pl330
= thrd
->dmac
;
1833 struct pl330_info
*pi
= pl330
->pinfo
;
1836 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++)
1837 if (pl330
->events
[ev
] == -1) {
1838 pl330
->events
[ev
] = thrd
->id
;
1845 static bool _chan_ns(const struct pl330_info
*pi
, int i
)
1847 return pi
->pcfg
.irq_ns
& (1 << i
);
1850 /* Upon success, returns IdentityToken for the
1851 * allocated channel, NULL otherwise.
1853 static void *pl330_request_channel(const struct pl330_info
*pi
)
1855 struct pl330_thread
*thrd
= NULL
;
1856 struct pl330_dmac
*pl330
;
1857 unsigned long flags
;
1860 if (!pi
|| !pi
->pl330_data
)
1863 pl330
= pi
->pl330_data
;
1865 if (pl330
->state
== DYING
)
1868 chans
= pi
->pcfg
.num_chan
;
1870 spin_lock_irqsave(&pl330
->lock
, flags
);
1872 for (i
= 0; i
< chans
; i
++) {
1873 thrd
= &pl330
->channels
[i
];
1874 if ((thrd
->free
) && (!_manager_ns(thrd
) ||
1876 thrd
->ev
= _alloc_event(thrd
);
1877 if (thrd
->ev
>= 0) {
1880 thrd
->req
[0].r
= NULL
;
1882 thrd
->req
[1].r
= NULL
;
1890 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1895 /* Release an event */
1896 static inline void _free_event(struct pl330_thread
*thrd
, int ev
)
1898 struct pl330_dmac
*pl330
= thrd
->dmac
;
1899 struct pl330_info
*pi
= pl330
->pinfo
;
1901 /* If the event is valid and was held by the thread */
1902 if (ev
>= 0 && ev
< pi
->pcfg
.num_events
1903 && pl330
->events
[ev
] == thrd
->id
)
1904 pl330
->events
[ev
] = -1;
1907 static void pl330_release_channel(void *ch_id
)
1909 struct pl330_thread
*thrd
= ch_id
;
1910 struct pl330_dmac
*pl330
;
1911 unsigned long flags
;
1913 if (!thrd
|| thrd
->free
)
1918 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1919 _callback(thrd
->req
[thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1923 spin_lock_irqsave(&pl330
->lock
, flags
);
1924 _free_event(thrd
, thrd
->ev
);
1926 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1929 /* Initialize the structure for PL330 configuration, that can be used
1930 * by the client driver the make best use of the DMAC
1932 static void read_dmac_config(struct pl330_info
*pi
)
1934 void __iomem
*regs
= pi
->base
;
1937 val
= readl(regs
+ CRD
) >> CRD_DATA_WIDTH_SHIFT
;
1938 val
&= CRD_DATA_WIDTH_MASK
;
1939 pi
->pcfg
.data_bus_width
= 8 * (1 << val
);
1941 val
= readl(regs
+ CRD
) >> CRD_DATA_BUFF_SHIFT
;
1942 val
&= CRD_DATA_BUFF_MASK
;
1943 pi
->pcfg
.data_buf_dep
= val
+ 1;
1945 val
= readl(regs
+ CR0
) >> CR0_NUM_CHANS_SHIFT
;
1946 val
&= CR0_NUM_CHANS_MASK
;
1948 pi
->pcfg
.num_chan
= val
;
1950 val
= readl(regs
+ CR0
);
1951 if (val
& CR0_PERIPH_REQ_SET
) {
1952 val
= (val
>> CR0_NUM_PERIPH_SHIFT
) & CR0_NUM_PERIPH_MASK
;
1954 pi
->pcfg
.num_peri
= val
;
1955 pi
->pcfg
.peri_ns
= readl(regs
+ CR4
);
1957 pi
->pcfg
.num_peri
= 0;
1960 val
= readl(regs
+ CR0
);
1961 if (val
& CR0_BOOT_MAN_NS
)
1962 pi
->pcfg
.mode
|= DMAC_MODE_NS
;
1964 pi
->pcfg
.mode
&= ~DMAC_MODE_NS
;
1966 val
= readl(regs
+ CR0
) >> CR0_NUM_EVENTS_SHIFT
;
1967 val
&= CR0_NUM_EVENTS_MASK
;
1969 pi
->pcfg
.num_events
= val
;
1971 pi
->pcfg
.irq_ns
= readl(regs
+ CR3
);
1974 static inline void _reset_thread(struct pl330_thread
*thrd
)
1976 struct pl330_dmac
*pl330
= thrd
->dmac
;
1977 struct pl330_info
*pi
= pl330
->pinfo
;
1979 thrd
->req
[0].mc_cpu
= pl330
->mcode_cpu
1980 + (thrd
->id
* pi
->mcbufsz
);
1981 thrd
->req
[0].mc_bus
= pl330
->mcode_bus
1982 + (thrd
->id
* pi
->mcbufsz
);
1983 thrd
->req
[0].r
= NULL
;
1986 thrd
->req
[1].mc_cpu
= thrd
->req
[0].mc_cpu
1988 thrd
->req
[1].mc_bus
= thrd
->req
[0].mc_bus
1990 thrd
->req
[1].r
= NULL
;
1994 static int dmac_alloc_threads(struct pl330_dmac
*pl330
)
1996 struct pl330_info
*pi
= pl330
->pinfo
;
1997 int chans
= pi
->pcfg
.num_chan
;
1998 struct pl330_thread
*thrd
;
2001 /* Allocate 1 Manager and 'chans' Channel threads */
2002 pl330
->channels
= kzalloc((1 + chans
) * sizeof(*thrd
),
2004 if (!pl330
->channels
)
2007 /* Init Channel threads */
2008 for (i
= 0; i
< chans
; i
++) {
2009 thrd
= &pl330
->channels
[i
];
2012 _reset_thread(thrd
);
2016 /* MANAGER is indexed at the end */
2017 thrd
= &pl330
->channels
[chans
];
2021 pl330
->manager
= thrd
;
2026 static int dmac_alloc_resources(struct pl330_dmac
*pl330
)
2028 struct pl330_info
*pi
= pl330
->pinfo
;
2029 int chans
= pi
->pcfg
.num_chan
;
2033 * Alloc MicroCode buffer for 'chans' Channel threads.
2034 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2036 pl330
->mcode_cpu
= dma_alloc_coherent(pi
->dev
,
2037 chans
* pi
->mcbufsz
,
2038 &pl330
->mcode_bus
, GFP_KERNEL
);
2039 if (!pl330
->mcode_cpu
) {
2040 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2041 __func__
, __LINE__
);
2045 ret
= dmac_alloc_threads(pl330
);
2047 dev_err(pi
->dev
, "%s:%d Can't to create channels for DMAC!\n",
2048 __func__
, __LINE__
);
2049 dma_free_coherent(pi
->dev
,
2050 chans
* pi
->mcbufsz
,
2051 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2058 static int pl330_add(struct pl330_info
*pi
)
2060 struct pl330_dmac
*pl330
;
2064 if (!pi
|| !pi
->dev
)
2067 /* If already added */
2072 * If the SoC can perform reset on the DMAC, then do it
2073 * before reading its configuration.
2080 /* Check if we can handle this DMAC */
2081 if ((pi
->pcfg
.periph_id
& 0xfffff) != PERIPH_ID_VAL
) {
2082 dev_err(pi
->dev
, "PERIPH_ID 0x%x !\n", pi
->pcfg
.periph_id
);
2086 /* Read the configuration of the DMAC */
2087 read_dmac_config(pi
);
2089 if (pi
->pcfg
.num_events
== 0) {
2090 dev_err(pi
->dev
, "%s:%d Can't work without events!\n",
2091 __func__
, __LINE__
);
2095 pl330
= kzalloc(sizeof(*pl330
), GFP_KERNEL
);
2097 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2098 __func__
, __LINE__
);
2102 /* Assign the info structure and private data */
2104 pi
->pl330_data
= pl330
;
2106 spin_lock_init(&pl330
->lock
);
2108 INIT_LIST_HEAD(&pl330
->req_done
);
2110 /* Use default MC buffer size if not provided */
2112 pi
->mcbufsz
= MCODE_BUFF_PER_REQ
* 2;
2114 /* Mark all events as free */
2115 for (i
= 0; i
< pi
->pcfg
.num_events
; i
++)
2116 pl330
->events
[i
] = -1;
2118 /* Allocate resources needed by the DMAC */
2119 ret
= dmac_alloc_resources(pl330
);
2121 dev_err(pi
->dev
, "Unable to create channels for DMAC\n");
2126 tasklet_init(&pl330
->tasks
, pl330_dotask
, (unsigned long) pl330
);
2128 pl330
->state
= INIT
;
2133 static int dmac_free_threads(struct pl330_dmac
*pl330
)
2135 struct pl330_info
*pi
= pl330
->pinfo
;
2136 int chans
= pi
->pcfg
.num_chan
;
2137 struct pl330_thread
*thrd
;
2140 /* Release Channel threads */
2141 for (i
= 0; i
< chans
; i
++) {
2142 thrd
= &pl330
->channels
[i
];
2143 pl330_release_channel((void *)thrd
);
2147 kfree(pl330
->channels
);
2152 static void dmac_free_resources(struct pl330_dmac
*pl330
)
2154 struct pl330_info
*pi
= pl330
->pinfo
;
2155 int chans
= pi
->pcfg
.num_chan
;
2157 dmac_free_threads(pl330
);
2159 dma_free_coherent(pi
->dev
, chans
* pi
->mcbufsz
,
2160 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2163 static void pl330_del(struct pl330_info
*pi
)
2165 struct pl330_dmac
*pl330
;
2167 if (!pi
|| !pi
->pl330_data
)
2170 pl330
= pi
->pl330_data
;
2172 pl330
->state
= UNINIT
;
2174 tasklet_kill(&pl330
->tasks
);
2176 /* Free DMAC resources */
2177 dmac_free_resources(pl330
);
2180 pi
->pl330_data
= NULL
;
2183 /* forward declaration */
2184 static struct amba_driver pl330_driver
;
2186 static inline struct dma_pl330_chan
*
2187 to_pchan(struct dma_chan
*ch
)
2192 return container_of(ch
, struct dma_pl330_chan
, chan
);
2195 static inline struct dma_pl330_desc
*
2196 to_desc(struct dma_async_tx_descriptor
*tx
)
2198 return container_of(tx
, struct dma_pl330_desc
, txd
);
2201 static inline void free_desc_list(struct list_head
*list
)
2203 struct dma_pl330_dmac
*pdmac
;
2204 struct dma_pl330_desc
*desc
;
2205 struct dma_pl330_chan
*pch
= NULL
;
2206 unsigned long flags
;
2208 /* Finish off the work list */
2209 list_for_each_entry(desc
, list
, node
) {
2210 dma_async_tx_callback callback
;
2213 /* All desc in a list belong to same channel */
2215 callback
= desc
->txd
.callback
;
2216 param
= desc
->txd
.callback_param
;
2224 /* pch will be unset if list was empty */
2230 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2231 list_splice_tail_init(list
, &pdmac
->desc_pool
);
2232 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2235 static inline void handle_cyclic_desc_list(struct list_head
*list
)
2237 struct dma_pl330_desc
*desc
;
2238 struct dma_pl330_chan
*pch
= NULL
;
2239 unsigned long flags
;
2241 list_for_each_entry(desc
, list
, node
) {
2242 dma_async_tx_callback callback
;
2244 /* Change status to reload it */
2245 desc
->status
= PREP
;
2247 callback
= desc
->txd
.callback
;
2249 callback(desc
->txd
.callback_param
);
2252 /* pch will be unset if list was empty */
2256 spin_lock_irqsave(&pch
->lock
, flags
);
2257 list_splice_tail_init(list
, &pch
->work_list
);
2258 spin_unlock_irqrestore(&pch
->lock
, flags
);
2261 static inline void fill_queue(struct dma_pl330_chan
*pch
)
2263 struct dma_pl330_desc
*desc
;
2266 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2268 /* If already submitted */
2269 if (desc
->status
== BUSY
)
2272 ret
= pl330_submit_req(pch
->pl330_chid
,
2275 desc
->status
= BUSY
;
2276 } else if (ret
== -EAGAIN
) {
2277 /* QFull or DMAC Dying */
2280 /* Unacceptable request */
2281 desc
->status
= DONE
;
2282 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Bad Desc(%d)\n",
2283 __func__
, __LINE__
, desc
->txd
.cookie
);
2284 tasklet_schedule(&pch
->task
);
2289 static void pl330_tasklet(unsigned long data
)
2291 struct dma_pl330_chan
*pch
= (struct dma_pl330_chan
*)data
;
2292 struct dma_pl330_desc
*desc
, *_dt
;
2293 unsigned long flags
;
2296 spin_lock_irqsave(&pch
->lock
, flags
);
2298 /* Pick up ripe tomatoes */
2299 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
)
2300 if (desc
->status
== DONE
) {
2302 dma_cookie_complete(&desc
->txd
);
2303 list_move_tail(&desc
->node
, &list
);
2306 /* Try to submit a req imm. next to the last completed cookie */
2309 /* Make sure the PL330 Channel thread is active */
2310 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_START
);
2312 spin_unlock_irqrestore(&pch
->lock
, flags
);
2315 handle_cyclic_desc_list(&list
);
2317 free_desc_list(&list
);
2320 static void dma_pl330_rqcb(void *token
, enum pl330_op_err err
)
2322 struct dma_pl330_desc
*desc
= token
;
2323 struct dma_pl330_chan
*pch
= desc
->pchan
;
2324 unsigned long flags
;
2326 /* If desc aborted */
2330 spin_lock_irqsave(&pch
->lock
, flags
);
2332 desc
->status
= DONE
;
2334 spin_unlock_irqrestore(&pch
->lock
, flags
);
2336 tasklet_schedule(&pch
->task
);
2339 static bool pl330_dt_filter(struct dma_chan
*chan
, void *param
)
2341 struct dma_pl330_filter_args
*fargs
= param
;
2343 if (chan
->device
!= &fargs
->pdmac
->ddma
)
2346 return (chan
->chan_id
== fargs
->chan_id
);
2349 bool pl330_filter(struct dma_chan
*chan
, void *param
)
2353 if (chan
->device
->dev
->driver
!= &pl330_driver
.drv
)
2356 peri_id
= chan
->private;
2357 return *peri_id
== (unsigned)param
;
2359 EXPORT_SYMBOL(pl330_filter
);
2361 static struct dma_chan
*of_dma_pl330_xlate(struct of_phandle_args
*dma_spec
,
2362 struct of_dma
*ofdma
)
2364 int count
= dma_spec
->args_count
;
2365 struct dma_pl330_dmac
*pdmac
= ofdma
->of_dma_data
;
2366 struct dma_pl330_filter_args fargs
;
2375 fargs
.pdmac
= pdmac
;
2376 fargs
.chan_id
= dma_spec
->args
[0];
2379 dma_cap_set(DMA_SLAVE
, cap
);
2380 dma_cap_set(DMA_CYCLIC
, cap
);
2382 return dma_request_channel(cap
, pl330_dt_filter
, &fargs
);
2385 static int pl330_alloc_chan_resources(struct dma_chan
*chan
)
2387 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2388 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2389 unsigned long flags
;
2391 spin_lock_irqsave(&pch
->lock
, flags
);
2393 dma_cookie_init(chan
);
2394 pch
->cyclic
= false;
2396 pch
->pl330_chid
= pl330_request_channel(&pdmac
->pif
);
2397 if (!pch
->pl330_chid
) {
2398 spin_unlock_irqrestore(&pch
->lock
, flags
);
2402 tasklet_init(&pch
->task
, pl330_tasklet
, (unsigned long) pch
);
2404 spin_unlock_irqrestore(&pch
->lock
, flags
);
2409 static int pl330_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
, unsigned long arg
)
2411 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2412 struct dma_pl330_desc
*desc
, *_dt
;
2413 unsigned long flags
;
2414 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2415 struct dma_slave_config
*slave_config
;
2419 case DMA_TERMINATE_ALL
:
2420 spin_lock_irqsave(&pch
->lock
, flags
);
2422 /* FLUSH the PL330 Channel thread */
2423 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_FLUSH
);
2425 /* Mark all desc done */
2426 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
) {
2427 desc
->status
= DONE
;
2428 list_move_tail(&desc
->node
, &list
);
2431 list_splice_tail_init(&list
, &pdmac
->desc_pool
);
2432 spin_unlock_irqrestore(&pch
->lock
, flags
);
2434 case DMA_SLAVE_CONFIG
:
2435 slave_config
= (struct dma_slave_config
*)arg
;
2437 if (slave_config
->direction
== DMA_MEM_TO_DEV
) {
2438 if (slave_config
->dst_addr
)
2439 pch
->fifo_addr
= slave_config
->dst_addr
;
2440 if (slave_config
->dst_addr_width
)
2441 pch
->burst_sz
= __ffs(slave_config
->dst_addr_width
);
2442 if (slave_config
->dst_maxburst
)
2443 pch
->burst_len
= slave_config
->dst_maxburst
;
2444 } else if (slave_config
->direction
== DMA_DEV_TO_MEM
) {
2445 if (slave_config
->src_addr
)
2446 pch
->fifo_addr
= slave_config
->src_addr
;
2447 if (slave_config
->src_addr_width
)
2448 pch
->burst_sz
= __ffs(slave_config
->src_addr_width
);
2449 if (slave_config
->src_maxburst
)
2450 pch
->burst_len
= slave_config
->src_maxburst
;
2454 dev_err(pch
->dmac
->pif
.dev
, "Not supported command.\n");
2461 static void pl330_free_chan_resources(struct dma_chan
*chan
)
2463 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2464 unsigned long flags
;
2466 tasklet_kill(&pch
->task
);
2468 spin_lock_irqsave(&pch
->lock
, flags
);
2470 pl330_release_channel(pch
->pl330_chid
);
2471 pch
->pl330_chid
= NULL
;
2474 list_splice_tail_init(&pch
->work_list
, &pch
->dmac
->desc_pool
);
2476 spin_unlock_irqrestore(&pch
->lock
, flags
);
2479 static enum dma_status
2480 pl330_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
2481 struct dma_tx_state
*txstate
)
2483 return dma_cookie_status(chan
, cookie
, txstate
);
2486 static void pl330_issue_pending(struct dma_chan
*chan
)
2488 pl330_tasklet((unsigned long) to_pchan(chan
));
2492 * We returned the last one of the circular list of descriptor(s)
2493 * from prep_xxx, so the argument to submit corresponds to the last
2494 * descriptor of the list.
2496 static dma_cookie_t
pl330_tx_submit(struct dma_async_tx_descriptor
*tx
)
2498 struct dma_pl330_desc
*desc
, *last
= to_desc(tx
);
2499 struct dma_pl330_chan
*pch
= to_pchan(tx
->chan
);
2500 dma_cookie_t cookie
;
2501 unsigned long flags
;
2503 spin_lock_irqsave(&pch
->lock
, flags
);
2505 /* Assign cookies to all nodes */
2506 while (!list_empty(&last
->node
)) {
2507 desc
= list_entry(last
->node
.next
, struct dma_pl330_desc
, node
);
2509 dma_cookie_assign(&desc
->txd
);
2511 list_move_tail(&desc
->node
, &pch
->work_list
);
2514 cookie
= dma_cookie_assign(&last
->txd
);
2515 list_add_tail(&last
->node
, &pch
->work_list
);
2516 spin_unlock_irqrestore(&pch
->lock
, flags
);
2521 static inline void _init_desc(struct dma_pl330_desc
*desc
)
2524 desc
->req
.x
= &desc
->px
;
2525 desc
->req
.token
= desc
;
2526 desc
->rqcfg
.swap
= SWAP_NO
;
2527 desc
->rqcfg
.privileged
= 0;
2528 desc
->rqcfg
.insnaccess
= 0;
2529 desc
->rqcfg
.scctl
= SCCTRL0
;
2530 desc
->rqcfg
.dcctl
= DCCTRL0
;
2531 desc
->req
.cfg
= &desc
->rqcfg
;
2532 desc
->req
.xfer_cb
= dma_pl330_rqcb
;
2533 desc
->txd
.tx_submit
= pl330_tx_submit
;
2535 INIT_LIST_HEAD(&desc
->node
);
2538 /* Returns the number of descriptors added to the DMAC pool */
2539 static int add_desc(struct dma_pl330_dmac
*pdmac
, gfp_t flg
, int count
)
2541 struct dma_pl330_desc
*desc
;
2542 unsigned long flags
;
2548 desc
= kmalloc(count
* sizeof(*desc
), flg
);
2552 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2554 for (i
= 0; i
< count
; i
++) {
2555 _init_desc(&desc
[i
]);
2556 list_add_tail(&desc
[i
].node
, &pdmac
->desc_pool
);
2559 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2564 static struct dma_pl330_desc
*
2565 pluck_desc(struct dma_pl330_dmac
*pdmac
)
2567 struct dma_pl330_desc
*desc
= NULL
;
2568 unsigned long flags
;
2573 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2575 if (!list_empty(&pdmac
->desc_pool
)) {
2576 desc
= list_entry(pdmac
->desc_pool
.next
,
2577 struct dma_pl330_desc
, node
);
2579 list_del_init(&desc
->node
);
2581 desc
->status
= PREP
;
2582 desc
->txd
.callback
= NULL
;
2585 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2590 static struct dma_pl330_desc
*pl330_get_desc(struct dma_pl330_chan
*pch
)
2592 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2593 u8
*peri_id
= pch
->chan
.private;
2594 struct dma_pl330_desc
*desc
;
2596 /* Pluck one desc from the pool of DMAC */
2597 desc
= pluck_desc(pdmac
);
2599 /* If the DMAC pool is empty, alloc new */
2601 if (!add_desc(pdmac
, GFP_ATOMIC
, 1))
2605 desc
= pluck_desc(pdmac
);
2607 dev_err(pch
->dmac
->pif
.dev
,
2608 "%s:%d ALERT!\n", __func__
, __LINE__
);
2613 /* Initialize the descriptor */
2615 desc
->txd
.cookie
= 0;
2616 async_tx_ack(&desc
->txd
);
2618 desc
->req
.peri
= peri_id
? pch
->chan
.chan_id
: 0;
2619 desc
->rqcfg
.pcfg
= &pch
->dmac
->pif
.pcfg
;
2621 dma_async_tx_descriptor_init(&desc
->txd
, &pch
->chan
);
2626 static inline void fill_px(struct pl330_xfer
*px
,
2627 dma_addr_t dst
, dma_addr_t src
, size_t len
)
2635 static struct dma_pl330_desc
*
2636 __pl330_prep_dma_memcpy(struct dma_pl330_chan
*pch
, dma_addr_t dst
,
2637 dma_addr_t src
, size_t len
)
2639 struct dma_pl330_desc
*desc
= pl330_get_desc(pch
);
2642 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2643 __func__
, __LINE__
);
2648 * Ideally we should lookout for reqs bigger than
2649 * those that can be programmed with 256 bytes of
2650 * MC buffer, but considering a req size is seldom
2651 * going to be word-unaligned and more than 200MB,
2653 * Also, should the limit is reached we'd rather
2654 * have the platform increase MC buffer size than
2655 * complicating this API driver.
2657 fill_px(&desc
->px
, dst
, src
, len
);
2662 /* Call after fixing burst size */
2663 static inline int get_burst_len(struct dma_pl330_desc
*desc
, size_t len
)
2665 struct dma_pl330_chan
*pch
= desc
->pchan
;
2666 struct pl330_info
*pi
= &pch
->dmac
->pif
;
2669 burst_len
= pi
->pcfg
.data_bus_width
/ 8;
2670 burst_len
*= pi
->pcfg
.data_buf_dep
;
2671 burst_len
>>= desc
->rqcfg
.brst_size
;
2673 /* src/dst_burst_len can't be more than 16 */
2677 while (burst_len
> 1) {
2678 if (!(len
% (burst_len
<< desc
->rqcfg
.brst_size
)))
2686 static struct dma_async_tx_descriptor
*pl330_prep_dma_cyclic(
2687 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t len
,
2688 size_t period_len
, enum dma_transfer_direction direction
,
2689 unsigned long flags
, void *context
)
2691 struct dma_pl330_desc
*desc
;
2692 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2696 desc
= pl330_get_desc(pch
);
2698 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2699 __func__
, __LINE__
);
2703 switch (direction
) {
2704 case DMA_MEM_TO_DEV
:
2705 desc
->rqcfg
.src_inc
= 1;
2706 desc
->rqcfg
.dst_inc
= 0;
2707 desc
->req
.rqtype
= MEMTODEV
;
2709 dst
= pch
->fifo_addr
;
2711 case DMA_DEV_TO_MEM
:
2712 desc
->rqcfg
.src_inc
= 0;
2713 desc
->rqcfg
.dst_inc
= 1;
2714 desc
->req
.rqtype
= DEVTOMEM
;
2715 src
= pch
->fifo_addr
;
2719 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Invalid dma direction\n",
2720 __func__
, __LINE__
);
2724 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2725 desc
->rqcfg
.brst_len
= 1;
2729 fill_px(&desc
->px
, dst
, src
, period_len
);
2734 static struct dma_async_tx_descriptor
*
2735 pl330_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
2736 dma_addr_t src
, size_t len
, unsigned long flags
)
2738 struct dma_pl330_desc
*desc
;
2739 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2740 struct pl330_info
*pi
;
2743 if (unlikely(!pch
|| !len
))
2746 pi
= &pch
->dmac
->pif
;
2748 desc
= __pl330_prep_dma_memcpy(pch
, dst
, src
, len
);
2752 desc
->rqcfg
.src_inc
= 1;
2753 desc
->rqcfg
.dst_inc
= 1;
2754 desc
->req
.rqtype
= MEMTOMEM
;
2756 /* Select max possible burst size */
2757 burst
= pi
->pcfg
.data_bus_width
/ 8;
2765 desc
->rqcfg
.brst_size
= 0;
2766 while (burst
!= (1 << desc
->rqcfg
.brst_size
))
2767 desc
->rqcfg
.brst_size
++;
2769 desc
->rqcfg
.brst_len
= get_burst_len(desc
, len
);
2771 desc
->txd
.flags
= flags
;
2776 static struct dma_async_tx_descriptor
*
2777 pl330_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2778 unsigned int sg_len
, enum dma_transfer_direction direction
,
2779 unsigned long flg
, void *context
)
2781 struct dma_pl330_desc
*first
, *desc
= NULL
;
2782 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2783 struct scatterlist
*sg
;
2784 unsigned long flags
;
2788 if (unlikely(!pch
|| !sgl
|| !sg_len
))
2791 addr
= pch
->fifo_addr
;
2795 for_each_sg(sgl
, sg
, sg_len
, i
) {
2797 desc
= pl330_get_desc(pch
);
2799 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2801 dev_err(pch
->dmac
->pif
.dev
,
2802 "%s:%d Unable to fetch desc\n",
2803 __func__
, __LINE__
);
2807 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2809 while (!list_empty(&first
->node
)) {
2810 desc
= list_entry(first
->node
.next
,
2811 struct dma_pl330_desc
, node
);
2812 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2815 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2817 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2825 list_add_tail(&desc
->node
, &first
->node
);
2827 if (direction
== DMA_MEM_TO_DEV
) {
2828 desc
->rqcfg
.src_inc
= 1;
2829 desc
->rqcfg
.dst_inc
= 0;
2830 desc
->req
.rqtype
= MEMTODEV
;
2832 addr
, sg_dma_address(sg
), sg_dma_len(sg
));
2834 desc
->rqcfg
.src_inc
= 0;
2835 desc
->rqcfg
.dst_inc
= 1;
2836 desc
->req
.rqtype
= DEVTOMEM
;
2838 sg_dma_address(sg
), addr
, sg_dma_len(sg
));
2841 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2842 desc
->rqcfg
.brst_len
= 1;
2845 /* Return the last desc in the chain */
2846 desc
->txd
.flags
= flg
;
2850 static irqreturn_t
pl330_irq_handler(int irq
, void *data
)
2852 if (pl330_update(data
))
2859 pl330_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2861 struct dma_pl330_platdata
*pdat
;
2862 struct dma_pl330_dmac
*pdmac
;
2863 struct dma_pl330_chan
*pch
, *_p
;
2864 struct pl330_info
*pi
;
2865 struct dma_device
*pd
;
2866 struct resource
*res
;
2870 pdat
= adev
->dev
.platform_data
;
2872 /* Allocate a new DMAC and its Channels */
2873 pdmac
= devm_kzalloc(&adev
->dev
, sizeof(*pdmac
), GFP_KERNEL
);
2875 dev_err(&adev
->dev
, "unable to allocate mem\n");
2880 pi
->dev
= &adev
->dev
;
2881 pi
->pl330_data
= NULL
;
2882 pi
->mcbufsz
= pdat
? pdat
->mcbuf_sz
: 0;
2885 pi
->base
= devm_ioremap_resource(&adev
->dev
, res
);
2886 if (IS_ERR(pi
->base
))
2887 return PTR_ERR(pi
->base
);
2889 amba_set_drvdata(adev
, pdmac
);
2892 ret
= request_irq(irq
, pl330_irq_handler
, 0,
2893 dev_name(&adev
->dev
), pi
);
2897 pi
->pcfg
.periph_id
= adev
->periphid
;
2898 ret
= pl330_add(pi
);
2902 INIT_LIST_HEAD(&pdmac
->desc_pool
);
2903 spin_lock_init(&pdmac
->pool_lock
);
2905 /* Create a descriptor pool of default size */
2906 if (!add_desc(pdmac
, GFP_KERNEL
, NR_DEFAULT_DESC
))
2907 dev_warn(&adev
->dev
, "unable to allocate desc\n");
2910 INIT_LIST_HEAD(&pd
->channels
);
2912 /* Initialize channel parameters */
2914 num_chan
= max_t(int, pdat
->nr_valid_peri
, pi
->pcfg
.num_chan
);
2916 num_chan
= max_t(int, pi
->pcfg
.num_peri
, pi
->pcfg
.num_chan
);
2918 pdmac
->peripherals
= kzalloc(num_chan
* sizeof(*pch
), GFP_KERNEL
);
2919 if (!pdmac
->peripherals
) {
2921 dev_err(&adev
->dev
, "unable to allocate pdmac->peripherals\n");
2925 for (i
= 0; i
< num_chan
; i
++) {
2926 pch
= &pdmac
->peripherals
[i
];
2927 if (!adev
->dev
.of_node
)
2928 pch
->chan
.private = pdat
? &pdat
->peri_id
[i
] : NULL
;
2930 pch
->chan
.private = adev
->dev
.of_node
;
2932 INIT_LIST_HEAD(&pch
->work_list
);
2933 spin_lock_init(&pch
->lock
);
2934 pch
->pl330_chid
= NULL
;
2935 pch
->chan
.device
= pd
;
2938 /* Add the channel to the DMAC list */
2939 list_add_tail(&pch
->chan
.device_node
, &pd
->channels
);
2942 pd
->dev
= &adev
->dev
;
2944 pd
->cap_mask
= pdat
->cap_mask
;
2946 dma_cap_set(DMA_MEMCPY
, pd
->cap_mask
);
2947 if (pi
->pcfg
.num_peri
) {
2948 dma_cap_set(DMA_SLAVE
, pd
->cap_mask
);
2949 dma_cap_set(DMA_CYCLIC
, pd
->cap_mask
);
2950 dma_cap_set(DMA_PRIVATE
, pd
->cap_mask
);
2954 pd
->device_alloc_chan_resources
= pl330_alloc_chan_resources
;
2955 pd
->device_free_chan_resources
= pl330_free_chan_resources
;
2956 pd
->device_prep_dma_memcpy
= pl330_prep_dma_memcpy
;
2957 pd
->device_prep_dma_cyclic
= pl330_prep_dma_cyclic
;
2958 pd
->device_tx_status
= pl330_tx_status
;
2959 pd
->device_prep_slave_sg
= pl330_prep_slave_sg
;
2960 pd
->device_control
= pl330_control
;
2961 pd
->device_issue_pending
= pl330_issue_pending
;
2963 ret
= dma_async_device_register(pd
);
2965 dev_err(&adev
->dev
, "unable to register DMAC\n");
2969 if (adev
->dev
.of_node
) {
2970 ret
= of_dma_controller_register(adev
->dev
.of_node
,
2971 of_dma_pl330_xlate
, pdmac
);
2974 "unable to register DMA to the generic DT DMA helpers\n");
2978 dev_info(&adev
->dev
,
2979 "Loaded driver for PL330 DMAC-%d\n", adev
->periphid
);
2980 dev_info(&adev
->dev
,
2981 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2982 pi
->pcfg
.data_buf_dep
,
2983 pi
->pcfg
.data_bus_width
/ 8, pi
->pcfg
.num_chan
,
2984 pi
->pcfg
.num_peri
, pi
->pcfg
.num_events
);
2988 amba_set_drvdata(adev
, NULL
);
2991 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
2994 /* Remove the channel */
2995 list_del(&pch
->chan
.device_node
);
2997 /* Flush the channel */
2998 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
2999 pl330_free_chan_resources(&pch
->chan
);
3009 static int pl330_remove(struct amba_device
*adev
)
3011 struct dma_pl330_dmac
*pdmac
= amba_get_drvdata(adev
);
3012 struct dma_pl330_chan
*pch
, *_p
;
3013 struct pl330_info
*pi
;
3019 if (adev
->dev
.of_node
)
3020 of_dma_controller_free(adev
->dev
.of_node
);
3022 dma_async_device_unregister(&pdmac
->ddma
);
3023 amba_set_drvdata(adev
, NULL
);
3026 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
3029 /* Remove the channel */
3030 list_del(&pch
->chan
.device_node
);
3032 /* Flush the channel */
3033 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
3034 pl330_free_chan_resources(&pch
->chan
);
3047 static struct amba_id pl330_ids
[] = {
3055 MODULE_DEVICE_TABLE(amba
, pl330_ids
);
3057 static struct amba_driver pl330_driver
= {
3059 .owner
= THIS_MODULE
,
3060 .name
= "dma-pl330",
3062 .id_table
= pl330_ids
,
3063 .probe
= pl330_probe
,
3064 .remove
= pl330_remove
,
3067 module_amba_driver(pl330_driver
);
3069 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3070 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3071 MODULE_LICENSE("GPL");