AT91: dm9000 initialization update
[linux-2.6/libata-dev.git] / arch / arm / common / pl330.c
blob5ebbab6242a78ccb1c23d649cb9a4ee608e2ed3c
1 /* linux/arch/arm/common/pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/string.h>
26 #include <linux/io.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
31 #include <asm/hardware/pl330.h>
33 /* Register and Bit field Definitions */
34 #define DS 0x0
35 #define DS_ST_STOP 0x0
36 #define DS_ST_EXEC 0x1
37 #define DS_ST_CMISS 0x2
38 #define DS_ST_UPDTPC 0x3
39 #define DS_ST_WFE 0x4
40 #define DS_ST_ATBRR 0x5
41 #define DS_ST_QBUSY 0x6
42 #define DS_ST_WFP 0x7
43 #define DS_ST_KILL 0x8
44 #define DS_ST_CMPLT 0x9
45 #define DS_ST_FLTCMP 0xe
46 #define DS_ST_FAULT 0xf
48 #define DPC 0x4
49 #define INTEN 0x20
50 #define ES 0x24
51 #define INTSTATUS 0x28
52 #define INTCLR 0x2c
53 #define FSM 0x30
54 #define FSC 0x34
55 #define FTM 0x38
57 #define _FTC 0x40
58 #define FTC(n) (_FTC + (n)*0x4)
60 #define _CS 0x100
61 #define CS(n) (_CS + (n)*0x8)
62 #define CS_CNS (1 << 21)
64 #define _CPC 0x104
65 #define CPC(n) (_CPC + (n)*0x8)
67 #define _SA 0x400
68 #define SA(n) (_SA + (n)*0x20)
70 #define _DA 0x404
71 #define DA(n) (_DA + (n)*0x20)
73 #define _CC 0x408
74 #define CC(n) (_CC + (n)*0x20)
76 #define CC_SRCINC (1 << 0)
77 #define CC_DSTINC (1 << 14)
78 #define CC_SRCPRI (1 << 8)
79 #define CC_DSTPRI (1 << 22)
80 #define CC_SRCNS (1 << 9)
81 #define CC_DSTNS (1 << 23)
82 #define CC_SRCIA (1 << 10)
83 #define CC_DSTIA (1 << 24)
84 #define CC_SRCBRSTLEN_SHFT 4
85 #define CC_DSTBRSTLEN_SHFT 18
86 #define CC_SRCBRSTSIZE_SHFT 1
87 #define CC_DSTBRSTSIZE_SHFT 15
88 #define CC_SRCCCTRL_SHFT 11
89 #define CC_SRCCCTRL_MASK 0x7
90 #define CC_DSTCCTRL_SHFT 25
91 #define CC_DRCCCTRL_MASK 0x7
92 #define CC_SWAP_SHFT 28
94 #define _LC0 0x40c
95 #define LC0(n) (_LC0 + (n)*0x20)
97 #define _LC1 0x410
98 #define LC1(n) (_LC1 + (n)*0x20)
100 #define DBGSTATUS 0xd00
101 #define DBG_BUSY (1 << 0)
103 #define DBGCMD 0xd04
104 #define DBGINST0 0xd08
105 #define DBGINST1 0xd0c
107 #define CR0 0xe00
108 #define CR1 0xe04
109 #define CR2 0xe08
110 #define CR3 0xe0c
111 #define CR4 0xe10
112 #define CRD 0xe14
114 #define PERIPH_ID 0xfe0
115 #define PCELL_ID 0xff0
117 #define CR0_PERIPH_REQ_SET (1 << 0)
118 #define CR0_BOOT_EN_SET (1 << 1)
119 #define CR0_BOOT_MAN_NS (1 << 2)
120 #define CR0_NUM_CHANS_SHIFT 4
121 #define CR0_NUM_CHANS_MASK 0x7
122 #define CR0_NUM_PERIPH_SHIFT 12
123 #define CR0_NUM_PERIPH_MASK 0x1f
124 #define CR0_NUM_EVENTS_SHIFT 17
125 #define CR0_NUM_EVENTS_MASK 0x1f
127 #define CR1_ICACHE_LEN_SHIFT 0
128 #define CR1_ICACHE_LEN_MASK 0x7
129 #define CR1_NUM_ICACHELINES_SHIFT 4
130 #define CR1_NUM_ICACHELINES_MASK 0xf
132 #define CRD_DATA_WIDTH_SHIFT 0
133 #define CRD_DATA_WIDTH_MASK 0x7
134 #define CRD_WR_CAP_SHIFT 4
135 #define CRD_WR_CAP_MASK 0x7
136 #define CRD_WR_Q_DEP_SHIFT 8
137 #define CRD_WR_Q_DEP_MASK 0xf
138 #define CRD_RD_CAP_SHIFT 12
139 #define CRD_RD_CAP_MASK 0x7
140 #define CRD_RD_Q_DEP_SHIFT 16
141 #define CRD_RD_Q_DEP_MASK 0xf
142 #define CRD_DATA_BUFF_SHIFT 20
143 #define CRD_DATA_BUFF_MASK 0x3ff
145 #define PART 0x330
146 #define DESIGNER 0x41
147 #define REVISION 0x0
148 #define INTEG_CFG 0x0
149 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12) \
150 | (REVISION << 20) | (INTEG_CFG << 24))
152 #define PCELL_ID_VAL 0xb105f00d
154 #define PL330_STATE_STOPPED (1 << 0)
155 #define PL330_STATE_EXECUTING (1 << 1)
156 #define PL330_STATE_WFE (1 << 2)
157 #define PL330_STATE_FAULTING (1 << 3)
158 #define PL330_STATE_COMPLETING (1 << 4)
159 #define PL330_STATE_WFP (1 << 5)
160 #define PL330_STATE_KILLING (1 << 6)
161 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
162 #define PL330_STATE_CACHEMISS (1 << 8)
163 #define PL330_STATE_UPDTPC (1 << 9)
164 #define PL330_STATE_ATBARRIER (1 << 10)
165 #define PL330_STATE_QUEUEBUSY (1 << 11)
166 #define PL330_STATE_INVALID (1 << 15)
168 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
169 | PL330_STATE_WFE | PL330_STATE_FAULTING)
171 #define CMD_DMAADDH 0x54
172 #define CMD_DMAEND 0x00
173 #define CMD_DMAFLUSHP 0x35
174 #define CMD_DMAGO 0xa0
175 #define CMD_DMALD 0x04
176 #define CMD_DMALDP 0x25
177 #define CMD_DMALP 0x20
178 #define CMD_DMALPEND 0x28
179 #define CMD_DMAKILL 0x01
180 #define CMD_DMAMOV 0xbc
181 #define CMD_DMANOP 0x18
182 #define CMD_DMARMB 0x12
183 #define CMD_DMASEV 0x34
184 #define CMD_DMAST 0x08
185 #define CMD_DMASTP 0x29
186 #define CMD_DMASTZ 0x0c
187 #define CMD_DMAWFE 0x36
188 #define CMD_DMAWFP 0x30
189 #define CMD_DMAWMB 0x13
191 #define SZ_DMAADDH 3
192 #define SZ_DMAEND 1
193 #define SZ_DMAFLUSHP 2
194 #define SZ_DMALD 1
195 #define SZ_DMALDP 2
196 #define SZ_DMALP 2
197 #define SZ_DMALPEND 2
198 #define SZ_DMAKILL 1
199 #define SZ_DMAMOV 6
200 #define SZ_DMANOP 1
201 #define SZ_DMARMB 1
202 #define SZ_DMASEV 2
203 #define SZ_DMAST 1
204 #define SZ_DMASTP 2
205 #define SZ_DMASTZ 1
206 #define SZ_DMAWFE 2
207 #define SZ_DMAWFP 2
208 #define SZ_DMAWMB 1
209 #define SZ_DMAGO 6
211 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
212 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
214 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
215 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
218 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
219 * at 1byte/burst for P<->M and M<->M respectively.
220 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
221 * should be enough for P<->M and M<->M respectively.
223 #define MCODE_BUFF_PER_REQ 256
226 * Mark a _pl330_req as free.
227 * We do it by writing DMAEND as the first instruction
228 * because no valid request is going to have DMAEND as
229 * its first instruction to execute.
231 #define MARK_FREE(req) do { \
232 _emit_END(0, (req)->mc_cpu); \
233 (req)->mc_len = 0; \
234 } while (0)
236 /* If the _pl330_req is available to the client */
237 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
239 /* Use this _only_ to wait on transient states */
240 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
242 #ifdef PL330_DEBUG_MCGEN
243 static unsigned cmd_line;
244 #define PL330_DBGCMD_DUMP(off, x...) do { \
245 printk("%x:", cmd_line); \
246 printk(x); \
247 cmd_line += off; \
248 } while (0)
249 #define PL330_DBGMC_START(addr) (cmd_line = addr)
250 #else
251 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
252 #define PL330_DBGMC_START(addr) do {} while (0)
253 #endif
255 struct _xfer_spec {
256 u32 ccr;
257 struct pl330_req *r;
258 struct pl330_xfer *x;
261 enum dmamov_dst {
262 SAR = 0,
263 CCR,
264 DAR,
267 enum pl330_dst {
268 SRC = 0,
269 DST,
272 enum pl330_cond {
273 SINGLE,
274 BURST,
275 ALWAYS,
278 struct _pl330_req {
279 u32 mc_bus;
280 void *mc_cpu;
281 /* Number of bytes taken to setup MC for the req */
282 u32 mc_len;
283 struct pl330_req *r;
284 /* Hook to attach to DMAC's list of reqs with due callback */
285 struct list_head rqd;
288 /* ToBeDone for tasklet */
289 struct _pl330_tbd {
290 bool reset_dmac;
291 bool reset_mngr;
292 u8 reset_chan;
295 /* A DMAC Thread */
296 struct pl330_thread {
297 u8 id;
298 int ev;
299 /* If the channel is not yet acquired by any client */
300 bool free;
301 /* Parent DMAC */
302 struct pl330_dmac *dmac;
303 /* Only two at a time */
304 struct _pl330_req req[2];
305 /* Index of the last submitted request */
306 unsigned lstenq;
309 enum pl330_dmac_state {
310 UNINIT,
311 INIT,
312 DYING,
315 /* A DMAC */
316 struct pl330_dmac {
317 spinlock_t lock;
318 /* Holds list of reqs with due callbacks */
319 struct list_head req_done;
320 /* Pointer to platform specific stuff */
321 struct pl330_info *pinfo;
322 /* Maximum possible events/irqs */
323 int events[32];
324 /* BUS address of MicroCode buffer */
325 u32 mcode_bus;
326 /* CPU address of MicroCode buffer */
327 void *mcode_cpu;
328 /* List of all Channel threads */
329 struct pl330_thread *channels;
330 /* Pointer to the MANAGER thread */
331 struct pl330_thread *manager;
332 /* To handle bad news in interrupt */
333 struct tasklet_struct tasks;
334 struct _pl330_tbd dmac_tbd;
335 /* State of DMAC operation */
336 enum pl330_dmac_state state;
339 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
341 if (r && r->xfer_cb)
342 r->xfer_cb(r->token, err);
345 static inline bool _queue_empty(struct pl330_thread *thrd)
347 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
348 ? true : false;
351 static inline bool _queue_full(struct pl330_thread *thrd)
353 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
354 ? false : true;
357 static inline bool is_manager(struct pl330_thread *thrd)
359 struct pl330_dmac *pl330 = thrd->dmac;
361 /* MANAGER is indexed at the end */
362 if (thrd->id == pl330->pinfo->pcfg.num_chan)
363 return true;
364 else
365 return false;
368 /* If manager of the thread is in Non-Secure mode */
369 static inline bool _manager_ns(struct pl330_thread *thrd)
371 struct pl330_dmac *pl330 = thrd->dmac;
373 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
376 static inline u32 get_id(struct pl330_info *pi, u32 off)
378 void __iomem *regs = pi->base;
379 u32 id = 0;
381 id |= (readb(regs + off + 0x0) << 0);
382 id |= (readb(regs + off + 0x4) << 8);
383 id |= (readb(regs + off + 0x8) << 16);
384 id |= (readb(regs + off + 0xc) << 24);
386 return id;
389 static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
390 enum pl330_dst da, u16 val)
392 if (dry_run)
393 return SZ_DMAADDH;
395 buf[0] = CMD_DMAADDH;
396 buf[0] |= (da << 1);
397 *((u16 *)&buf[1]) = val;
399 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
400 da == 1 ? "DA" : "SA", val);
402 return SZ_DMAADDH;
405 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
407 if (dry_run)
408 return SZ_DMAEND;
410 buf[0] = CMD_DMAEND;
412 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
414 return SZ_DMAEND;
417 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
419 if (dry_run)
420 return SZ_DMAFLUSHP;
422 buf[0] = CMD_DMAFLUSHP;
424 peri &= 0x1f;
425 peri <<= 3;
426 buf[1] = peri;
428 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
430 return SZ_DMAFLUSHP;
433 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
435 if (dry_run)
436 return SZ_DMALD;
438 buf[0] = CMD_DMALD;
440 if (cond == SINGLE)
441 buf[0] |= (0 << 1) | (1 << 0);
442 else if (cond == BURST)
443 buf[0] |= (1 << 1) | (1 << 0);
445 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
446 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
448 return SZ_DMALD;
451 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
452 enum pl330_cond cond, u8 peri)
454 if (dry_run)
455 return SZ_DMALDP;
457 buf[0] = CMD_DMALDP;
459 if (cond == BURST)
460 buf[0] |= (1 << 1);
462 peri &= 0x1f;
463 peri <<= 3;
464 buf[1] = peri;
466 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
467 cond == SINGLE ? 'S' : 'B', peri >> 3);
469 return SZ_DMALDP;
472 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
473 unsigned loop, u8 cnt)
475 if (dry_run)
476 return SZ_DMALP;
478 buf[0] = CMD_DMALP;
480 if (loop)
481 buf[0] |= (1 << 1);
483 cnt--; /* DMAC increments by 1 internally */
484 buf[1] = cnt;
486 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
488 return SZ_DMALP;
491 struct _arg_LPEND {
492 enum pl330_cond cond;
493 bool forever;
494 unsigned loop;
495 u8 bjump;
498 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
499 const struct _arg_LPEND *arg)
501 enum pl330_cond cond = arg->cond;
502 bool forever = arg->forever;
503 unsigned loop = arg->loop;
504 u8 bjump = arg->bjump;
506 if (dry_run)
507 return SZ_DMALPEND;
509 buf[0] = CMD_DMALPEND;
511 if (loop)
512 buf[0] |= (1 << 2);
514 if (!forever)
515 buf[0] |= (1 << 4);
517 if (cond == SINGLE)
518 buf[0] |= (0 << 1) | (1 << 0);
519 else if (cond == BURST)
520 buf[0] |= (1 << 1) | (1 << 0);
522 buf[1] = bjump;
524 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
525 forever ? "FE" : "END",
526 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
527 loop ? '1' : '0',
528 bjump);
530 return SZ_DMALPEND;
533 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
535 if (dry_run)
536 return SZ_DMAKILL;
538 buf[0] = CMD_DMAKILL;
540 return SZ_DMAKILL;
543 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
544 enum dmamov_dst dst, u32 val)
546 if (dry_run)
547 return SZ_DMAMOV;
549 buf[0] = CMD_DMAMOV;
550 buf[1] = dst;
551 *((u32 *)&buf[2]) = val;
553 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
554 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
556 return SZ_DMAMOV;
559 static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
561 if (dry_run)
562 return SZ_DMANOP;
564 buf[0] = CMD_DMANOP;
566 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
568 return SZ_DMANOP;
571 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
573 if (dry_run)
574 return SZ_DMARMB;
576 buf[0] = CMD_DMARMB;
578 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
580 return SZ_DMARMB;
583 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
585 if (dry_run)
586 return SZ_DMASEV;
588 buf[0] = CMD_DMASEV;
590 ev &= 0x1f;
591 ev <<= 3;
592 buf[1] = ev;
594 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
596 return SZ_DMASEV;
599 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
601 if (dry_run)
602 return SZ_DMAST;
604 buf[0] = CMD_DMAST;
606 if (cond == SINGLE)
607 buf[0] |= (0 << 1) | (1 << 0);
608 else if (cond == BURST)
609 buf[0] |= (1 << 1) | (1 << 0);
611 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
612 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
614 return SZ_DMAST;
617 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
618 enum pl330_cond cond, u8 peri)
620 if (dry_run)
621 return SZ_DMASTP;
623 buf[0] = CMD_DMASTP;
625 if (cond == BURST)
626 buf[0] |= (1 << 1);
628 peri &= 0x1f;
629 peri <<= 3;
630 buf[1] = peri;
632 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
633 cond == SINGLE ? 'S' : 'B', peri >> 3);
635 return SZ_DMASTP;
638 static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
640 if (dry_run)
641 return SZ_DMASTZ;
643 buf[0] = CMD_DMASTZ;
645 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
647 return SZ_DMASTZ;
650 static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
651 unsigned invalidate)
653 if (dry_run)
654 return SZ_DMAWFE;
656 buf[0] = CMD_DMAWFE;
658 ev &= 0x1f;
659 ev <<= 3;
660 buf[1] = ev;
662 if (invalidate)
663 buf[1] |= (1 << 1);
665 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
666 ev >> 3, invalidate ? ", I" : "");
668 return SZ_DMAWFE;
671 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
672 enum pl330_cond cond, u8 peri)
674 if (dry_run)
675 return SZ_DMAWFP;
677 buf[0] = CMD_DMAWFP;
679 if (cond == SINGLE)
680 buf[0] |= (0 << 1) | (0 << 0);
681 else if (cond == BURST)
682 buf[0] |= (1 << 1) | (0 << 0);
683 else
684 buf[0] |= (0 << 1) | (1 << 0);
686 peri &= 0x1f;
687 peri <<= 3;
688 buf[1] = peri;
690 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
691 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
693 return SZ_DMAWFP;
696 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
698 if (dry_run)
699 return SZ_DMAWMB;
701 buf[0] = CMD_DMAWMB;
703 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
705 return SZ_DMAWMB;
708 struct _arg_GO {
709 u8 chan;
710 u32 addr;
711 unsigned ns;
714 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
715 const struct _arg_GO *arg)
717 u8 chan = arg->chan;
718 u32 addr = arg->addr;
719 unsigned ns = arg->ns;
721 if (dry_run)
722 return SZ_DMAGO;
724 buf[0] = CMD_DMAGO;
725 buf[0] |= (ns << 1);
727 buf[1] = chan & 0x7;
729 *((u32 *)&buf[2]) = addr;
731 return SZ_DMAGO;
734 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
736 /* Returns Time-Out */
737 static bool _until_dmac_idle(struct pl330_thread *thrd)
739 void __iomem *regs = thrd->dmac->pinfo->base;
740 unsigned long loops = msecs_to_loops(5);
742 do {
743 /* Until Manager is Idle */
744 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
745 break;
747 cpu_relax();
748 } while (--loops);
750 if (!loops)
751 return true;
753 return false;
756 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
757 u8 insn[], bool as_manager)
759 void __iomem *regs = thrd->dmac->pinfo->base;
760 u32 val;
762 val = (insn[0] << 16) | (insn[1] << 24);
763 if (!as_manager) {
764 val |= (1 << 0);
765 val |= (thrd->id << 8); /* Channel Number */
767 writel(val, regs + DBGINST0);
769 val = *((u32 *)&insn[2]);
770 writel(val, regs + DBGINST1);
772 /* If timed out due to halted state-machine */
773 if (_until_dmac_idle(thrd)) {
774 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
775 return;
778 /* Get going */
779 writel(0, regs + DBGCMD);
782 static inline u32 _state(struct pl330_thread *thrd)
784 void __iomem *regs = thrd->dmac->pinfo->base;
785 u32 val;
787 if (is_manager(thrd))
788 val = readl(regs + DS) & 0xf;
789 else
790 val = readl(regs + CS(thrd->id)) & 0xf;
792 switch (val) {
793 case DS_ST_STOP:
794 return PL330_STATE_STOPPED;
795 case DS_ST_EXEC:
796 return PL330_STATE_EXECUTING;
797 case DS_ST_CMISS:
798 return PL330_STATE_CACHEMISS;
799 case DS_ST_UPDTPC:
800 return PL330_STATE_UPDTPC;
801 case DS_ST_WFE:
802 return PL330_STATE_WFE;
803 case DS_ST_FAULT:
804 return PL330_STATE_FAULTING;
805 case DS_ST_ATBRR:
806 if (is_manager(thrd))
807 return PL330_STATE_INVALID;
808 else
809 return PL330_STATE_ATBARRIER;
810 case DS_ST_QBUSY:
811 if (is_manager(thrd))
812 return PL330_STATE_INVALID;
813 else
814 return PL330_STATE_QUEUEBUSY;
815 case DS_ST_WFP:
816 if (is_manager(thrd))
817 return PL330_STATE_INVALID;
818 else
819 return PL330_STATE_WFP;
820 case DS_ST_KILL:
821 if (is_manager(thrd))
822 return PL330_STATE_INVALID;
823 else
824 return PL330_STATE_KILLING;
825 case DS_ST_CMPLT:
826 if (is_manager(thrd))
827 return PL330_STATE_INVALID;
828 else
829 return PL330_STATE_COMPLETING;
830 case DS_ST_FLTCMP:
831 if (is_manager(thrd))
832 return PL330_STATE_INVALID;
833 else
834 return PL330_STATE_FAULT_COMPLETING;
835 default:
836 return PL330_STATE_INVALID;
840 /* If the request 'req' of thread 'thrd' is currently active */
841 static inline bool _req_active(struct pl330_thread *thrd,
842 struct _pl330_req *req)
844 void __iomem *regs = thrd->dmac->pinfo->base;
845 u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
847 if (IS_FREE(req))
848 return false;
850 return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
853 /* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
854 static inline unsigned _thrd_active(struct pl330_thread *thrd)
856 if (_req_active(thrd, &thrd->req[0]))
857 return 1; /* First req active */
859 if (_req_active(thrd, &thrd->req[1]))
860 return 2; /* Second req active */
862 return 0;
865 static void _stop(struct pl330_thread *thrd)
867 void __iomem *regs = thrd->dmac->pinfo->base;
868 u8 insn[6] = {0, 0, 0, 0, 0, 0};
870 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
871 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
873 /* Return if nothing needs to be done */
874 if (_state(thrd) == PL330_STATE_COMPLETING
875 || _state(thrd) == PL330_STATE_KILLING
876 || _state(thrd) == PL330_STATE_STOPPED)
877 return;
879 _emit_KILL(0, insn);
881 /* Stop generating interrupts for SEV */
882 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
884 _execute_DBGINSN(thrd, insn, is_manager(thrd));
887 /* Start doing req 'idx' of thread 'thrd' */
888 static bool _trigger(struct pl330_thread *thrd)
890 void __iomem *regs = thrd->dmac->pinfo->base;
891 struct _pl330_req *req;
892 struct pl330_req *r;
893 struct _arg_GO go;
894 unsigned ns;
895 u8 insn[6] = {0, 0, 0, 0, 0, 0};
897 /* Return if already ACTIVE */
898 if (_state(thrd) != PL330_STATE_STOPPED)
899 return true;
901 if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
902 req = &thrd->req[1 - thrd->lstenq];
903 else if (!IS_FREE(&thrd->req[thrd->lstenq]))
904 req = &thrd->req[thrd->lstenq];
905 else
906 req = NULL;
908 /* Return if no request */
909 if (!req || !req->r)
910 return true;
912 r = req->r;
914 if (r->cfg)
915 ns = r->cfg->nonsecure ? 1 : 0;
916 else if (readl(regs + CS(thrd->id)) & CS_CNS)
917 ns = 1;
918 else
919 ns = 0;
921 /* See 'Abort Sources' point-4 at Page 2-25 */
922 if (_manager_ns(thrd) && !ns)
923 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
924 __func__, __LINE__);
926 go.chan = thrd->id;
927 go.addr = req->mc_bus;
928 go.ns = ns;
929 _emit_GO(0, insn, &go);
931 /* Set to generate interrupts for SEV */
932 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
934 /* Only manager can execute GO */
935 _execute_DBGINSN(thrd, insn, true);
937 return true;
940 static bool _start(struct pl330_thread *thrd)
942 switch (_state(thrd)) {
943 case PL330_STATE_FAULT_COMPLETING:
944 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
946 if (_state(thrd) == PL330_STATE_KILLING)
947 UNTIL(thrd, PL330_STATE_STOPPED)
949 case PL330_STATE_FAULTING:
950 _stop(thrd);
952 case PL330_STATE_KILLING:
953 case PL330_STATE_COMPLETING:
954 UNTIL(thrd, PL330_STATE_STOPPED)
956 case PL330_STATE_STOPPED:
957 return _trigger(thrd);
959 case PL330_STATE_WFP:
960 case PL330_STATE_QUEUEBUSY:
961 case PL330_STATE_ATBARRIER:
962 case PL330_STATE_UPDTPC:
963 case PL330_STATE_CACHEMISS:
964 case PL330_STATE_EXECUTING:
965 return true;
967 case PL330_STATE_WFE: /* For RESUME, nothing yet */
968 default:
969 return false;
973 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
974 const struct _xfer_spec *pxs, int cyc)
976 int off = 0;
978 while (cyc--) {
979 off += _emit_LD(dry_run, &buf[off], ALWAYS);
980 off += _emit_RMB(dry_run, &buf[off]);
981 off += _emit_ST(dry_run, &buf[off], ALWAYS);
982 off += _emit_WMB(dry_run, &buf[off]);
985 return off;
988 static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
989 const struct _xfer_spec *pxs, int cyc)
991 int off = 0;
993 while (cyc--) {
994 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
995 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
996 off += _emit_ST(dry_run, &buf[off], ALWAYS);
997 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1000 return off;
1003 static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1004 const struct _xfer_spec *pxs, int cyc)
1006 int off = 0;
1008 while (cyc--) {
1009 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1010 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1011 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1012 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1015 return off;
1018 static int _bursts(unsigned dry_run, u8 buf[],
1019 const struct _xfer_spec *pxs, int cyc)
1021 int off = 0;
1023 switch (pxs->r->rqtype) {
1024 case MEMTODEV:
1025 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1026 break;
1027 case DEVTOMEM:
1028 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1029 break;
1030 case MEMTOMEM:
1031 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1032 break;
1033 default:
1034 off += 0x40000000; /* Scare off the Client */
1035 break;
1038 return off;
1041 /* Returns bytes consumed and updates bursts */
1042 static inline int _loop(unsigned dry_run, u8 buf[],
1043 unsigned long *bursts, const struct _xfer_spec *pxs)
1045 int cyc, cycmax, szlp, szlpend, szbrst, off;
1046 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1047 struct _arg_LPEND lpend;
1049 /* Max iterations possibile in DMALP is 256 */
1050 if (*bursts >= 256*256) {
1051 lcnt1 = 256;
1052 lcnt0 = 256;
1053 cyc = *bursts / lcnt1 / lcnt0;
1054 } else if (*bursts > 256) {
1055 lcnt1 = 256;
1056 lcnt0 = *bursts / lcnt1;
1057 cyc = 1;
1058 } else {
1059 lcnt1 = *bursts;
1060 lcnt0 = 0;
1061 cyc = 1;
1064 szlp = _emit_LP(1, buf, 0, 0);
1065 szbrst = _bursts(1, buf, pxs, 1);
1067 lpend.cond = ALWAYS;
1068 lpend.forever = false;
1069 lpend.loop = 0;
1070 lpend.bjump = 0;
1071 szlpend = _emit_LPEND(1, buf, &lpend);
1073 if (lcnt0) {
1074 szlp *= 2;
1075 szlpend *= 2;
1079 * Max bursts that we can unroll due to limit on the
1080 * size of backward jump that can be encoded in DMALPEND
1081 * which is 8-bits and hence 255
1083 cycmax = (255 - (szlp + szlpend)) / szbrst;
1085 cyc = (cycmax < cyc) ? cycmax : cyc;
1087 off = 0;
1089 if (lcnt0) {
1090 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1091 ljmp0 = off;
1094 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1095 ljmp1 = off;
1097 off += _bursts(dry_run, &buf[off], pxs, cyc);
1099 lpend.cond = ALWAYS;
1100 lpend.forever = false;
1101 lpend.loop = 1;
1102 lpend.bjump = off - ljmp1;
1103 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1105 if (lcnt0) {
1106 lpend.cond = ALWAYS;
1107 lpend.forever = false;
1108 lpend.loop = 0;
1109 lpend.bjump = off - ljmp0;
1110 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1113 *bursts = lcnt1 * cyc;
1114 if (lcnt0)
1115 *bursts *= lcnt0;
1117 return off;
1120 static inline int _setup_loops(unsigned dry_run, u8 buf[],
1121 const struct _xfer_spec *pxs)
1123 struct pl330_xfer *x = pxs->x;
1124 u32 ccr = pxs->ccr;
1125 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1126 int off = 0;
1128 while (bursts) {
1129 c = bursts;
1130 off += _loop(dry_run, &buf[off], &c, pxs);
1131 bursts -= c;
1134 return off;
1137 static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1138 const struct _xfer_spec *pxs)
1140 struct pl330_xfer *x = pxs->x;
1141 int off = 0;
1143 /* DMAMOV SAR, x->src_addr */
1144 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1145 /* DMAMOV DAR, x->dst_addr */
1146 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1148 /* Setup Loop(s) */
1149 off += _setup_loops(dry_run, &buf[off], pxs);
1151 return off;
1155 * A req is a sequence of one or more xfer units.
1156 * Returns the number of bytes taken to setup the MC for the req.
1158 static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1159 unsigned index, struct _xfer_spec *pxs)
1161 struct _pl330_req *req = &thrd->req[index];
1162 struct pl330_xfer *x;
1163 u8 *buf = req->mc_cpu;
1164 int off = 0;
1166 PL330_DBGMC_START(req->mc_bus);
1168 /* DMAMOV CCR, ccr */
1169 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1171 x = pxs->r->x;
1172 do {
1173 /* Error if xfer length is not aligned at burst size */
1174 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1175 return -EINVAL;
1177 pxs->x = x;
1178 off += _setup_xfer(dry_run, &buf[off], pxs);
1180 x = x->next;
1181 } while (x);
1183 /* DMASEV peripheral/event */
1184 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1185 /* DMAEND */
1186 off += _emit_END(dry_run, &buf[off]);
1188 return off;
1191 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1193 u32 ccr = 0;
1195 if (rqc->src_inc)
1196 ccr |= CC_SRCINC;
1198 if (rqc->dst_inc)
1199 ccr |= CC_DSTINC;
1201 /* We set same protection levels for Src and DST for now */
1202 if (rqc->privileged)
1203 ccr |= CC_SRCPRI | CC_DSTPRI;
1204 if (rqc->nonsecure)
1205 ccr |= CC_SRCNS | CC_DSTNS;
1206 if (rqc->insnaccess)
1207 ccr |= CC_SRCIA | CC_DSTIA;
1209 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1210 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1212 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1213 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1215 ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT);
1216 ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT);
1218 ccr |= (rqc->swap << CC_SWAP_SHFT);
1220 return ccr;
1223 static inline bool _is_valid(u32 ccr)
1225 enum pl330_dstcachectrl dcctl;
1226 enum pl330_srccachectrl scctl;
1228 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1229 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1231 if (dcctl == DINVALID1 || dcctl == DINVALID2
1232 || scctl == SINVALID1 || scctl == SINVALID2)
1233 return false;
1234 else
1235 return true;
1239 * Submit a list of xfers after which the client wants notification.
1240 * Client is not notified after each xfer unit, just once after all
1241 * xfer units are done or some error occurs.
1243 int pl330_submit_req(void *ch_id, struct pl330_req *r)
1245 struct pl330_thread *thrd = ch_id;
1246 struct pl330_dmac *pl330;
1247 struct pl330_info *pi;
1248 struct _xfer_spec xs;
1249 unsigned long flags;
1250 void __iomem *regs;
1251 unsigned idx;
1252 u32 ccr;
1253 int ret = 0;
1255 /* No Req or Unacquired Channel or DMAC */
1256 if (!r || !thrd || thrd->free)
1257 return -EINVAL;
1259 pl330 = thrd->dmac;
1260 pi = pl330->pinfo;
1261 regs = pi->base;
1263 if (pl330->state == DYING
1264 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1265 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1266 __func__, __LINE__);
1267 return -EAGAIN;
1270 /* If request for non-existing peripheral */
1271 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1272 dev_info(thrd->dmac->pinfo->dev,
1273 "%s:%d Invalid peripheral(%u)!\n",
1274 __func__, __LINE__, r->peri);
1275 return -EINVAL;
1278 spin_lock_irqsave(&pl330->lock, flags);
1280 if (_queue_full(thrd)) {
1281 ret = -EAGAIN;
1282 goto xfer_exit;
1285 /* Prefer Secure Channel */
1286 if (!_manager_ns(thrd))
1287 r->cfg->nonsecure = 0;
1288 else
1289 r->cfg->nonsecure = 1;
1291 /* Use last settings, if not provided */
1292 if (r->cfg)
1293 ccr = _prepare_ccr(r->cfg);
1294 else
1295 ccr = readl(regs + CC(thrd->id));
1297 /* If this req doesn't have valid xfer settings */
1298 if (!_is_valid(ccr)) {
1299 ret = -EINVAL;
1300 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1301 __func__, __LINE__, ccr);
1302 goto xfer_exit;
1305 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1307 xs.ccr = ccr;
1308 xs.r = r;
1310 /* First dry run to check if req is acceptable */
1311 ret = _setup_req(1, thrd, idx, &xs);
1312 if (ret < 0)
1313 goto xfer_exit;
1315 if (ret > pi->mcbufsz / 2) {
1316 dev_info(thrd->dmac->pinfo->dev,
1317 "%s:%d Trying increasing mcbufsz\n",
1318 __func__, __LINE__);
1319 ret = -ENOMEM;
1320 goto xfer_exit;
1323 /* Hook the request */
1324 thrd->lstenq = idx;
1325 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1326 thrd->req[idx].r = r;
1328 ret = 0;
1330 xfer_exit:
1331 spin_unlock_irqrestore(&pl330->lock, flags);
1333 return ret;
1335 EXPORT_SYMBOL(pl330_submit_req);
1337 static void pl330_dotask(unsigned long data)
1339 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1340 struct pl330_info *pi = pl330->pinfo;
1341 unsigned long flags;
1342 int i;
1344 spin_lock_irqsave(&pl330->lock, flags);
1346 /* The DMAC itself gone nuts */
1347 if (pl330->dmac_tbd.reset_dmac) {
1348 pl330->state = DYING;
1349 /* Reset the manager too */
1350 pl330->dmac_tbd.reset_mngr = true;
1351 /* Clear the reset flag */
1352 pl330->dmac_tbd.reset_dmac = false;
1355 if (pl330->dmac_tbd.reset_mngr) {
1356 _stop(pl330->manager);
1357 /* Reset all channels */
1358 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1359 /* Clear the reset flag */
1360 pl330->dmac_tbd.reset_mngr = false;
1363 for (i = 0; i < pi->pcfg.num_chan; i++) {
1365 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1366 struct pl330_thread *thrd = &pl330->channels[i];
1367 void __iomem *regs = pi->base;
1368 enum pl330_op_err err;
1370 _stop(thrd);
1372 if (readl(regs + FSC) & (1 << thrd->id))
1373 err = PL330_ERR_FAIL;
1374 else
1375 err = PL330_ERR_ABORT;
1377 spin_unlock_irqrestore(&pl330->lock, flags);
1379 _callback(thrd->req[1 - thrd->lstenq].r, err);
1380 _callback(thrd->req[thrd->lstenq].r, err);
1382 spin_lock_irqsave(&pl330->lock, flags);
1384 thrd->req[0].r = NULL;
1385 thrd->req[1].r = NULL;
1386 MARK_FREE(&thrd->req[0]);
1387 MARK_FREE(&thrd->req[1]);
1389 /* Clear the reset flag */
1390 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1394 spin_unlock_irqrestore(&pl330->lock, flags);
1396 return;
1399 /* Returns 1 if state was updated, 0 otherwise */
1400 int pl330_update(const struct pl330_info *pi)
1402 struct _pl330_req *rqdone;
1403 struct pl330_dmac *pl330;
1404 unsigned long flags;
1405 void __iomem *regs;
1406 u32 val;
1407 int id, ev, ret = 0;
1409 if (!pi || !pi->pl330_data)
1410 return 0;
1412 regs = pi->base;
1413 pl330 = pi->pl330_data;
1415 spin_lock_irqsave(&pl330->lock, flags);
1417 val = readl(regs + FSM) & 0x1;
1418 if (val)
1419 pl330->dmac_tbd.reset_mngr = true;
1420 else
1421 pl330->dmac_tbd.reset_mngr = false;
1423 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1424 pl330->dmac_tbd.reset_chan |= val;
1425 if (val) {
1426 int i = 0;
1427 while (i < pi->pcfg.num_chan) {
1428 if (val & (1 << i)) {
1429 dev_info(pi->dev,
1430 "Reset Channel-%d\t CS-%x FTC-%x\n",
1431 i, readl(regs + CS(i)),
1432 readl(regs + FTC(i)));
1433 _stop(&pl330->channels[i]);
1435 i++;
1439 /* Check which event happened i.e, thread notified */
1440 val = readl(regs + ES);
1441 if (pi->pcfg.num_events < 32
1442 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1443 pl330->dmac_tbd.reset_dmac = true;
1444 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1445 ret = 1;
1446 goto updt_exit;
1449 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1450 if (val & (1 << ev)) { /* Event occured */
1451 struct pl330_thread *thrd;
1452 u32 inten = readl(regs + INTEN);
1453 int active;
1455 /* Clear the event */
1456 if (inten & (1 << ev))
1457 writel(1 << ev, regs + INTCLR);
1459 ret = 1;
1461 id = pl330->events[ev];
1463 thrd = &pl330->channels[id];
1465 active = _thrd_active(thrd);
1466 if (!active) /* Aborted */
1467 continue;
1469 active -= 1;
1471 rqdone = &thrd->req[active];
1472 MARK_FREE(rqdone);
1474 /* Get going again ASAP */
1475 _start(thrd);
1477 /* For now, just make a list of callbacks to be done */
1478 list_add_tail(&rqdone->rqd, &pl330->req_done);
1482 /* Now that we are in no hurry, do the callbacks */
1483 while (!list_empty(&pl330->req_done)) {
1484 rqdone = container_of(pl330->req_done.next,
1485 struct _pl330_req, rqd);
1487 list_del_init(&rqdone->rqd);
1489 spin_unlock_irqrestore(&pl330->lock, flags);
1490 _callback(rqdone->r, PL330_ERR_NONE);
1491 spin_lock_irqsave(&pl330->lock, flags);
1494 updt_exit:
1495 spin_unlock_irqrestore(&pl330->lock, flags);
1497 if (pl330->dmac_tbd.reset_dmac
1498 || pl330->dmac_tbd.reset_mngr
1499 || pl330->dmac_tbd.reset_chan) {
1500 ret = 1;
1501 tasklet_schedule(&pl330->tasks);
1504 return ret;
1506 EXPORT_SYMBOL(pl330_update);
1508 int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1510 struct pl330_thread *thrd = ch_id;
1511 struct pl330_dmac *pl330;
1512 unsigned long flags;
1513 int ret = 0, active;
1515 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1516 return -EINVAL;
1518 pl330 = thrd->dmac;
1520 spin_lock_irqsave(&pl330->lock, flags);
1522 switch (op) {
1523 case PL330_OP_FLUSH:
1524 /* Make sure the channel is stopped */
1525 _stop(thrd);
1527 thrd->req[0].r = NULL;
1528 thrd->req[1].r = NULL;
1529 MARK_FREE(&thrd->req[0]);
1530 MARK_FREE(&thrd->req[1]);
1531 break;
1533 case PL330_OP_ABORT:
1534 active = _thrd_active(thrd);
1536 /* Make sure the channel is stopped */
1537 _stop(thrd);
1539 /* ABORT is only for the active req */
1540 if (!active)
1541 break;
1543 active--;
1545 thrd->req[active].r = NULL;
1546 MARK_FREE(&thrd->req[active]);
1548 /* Start the next */
1549 case PL330_OP_START:
1550 if (!_start(thrd))
1551 ret = -EIO;
1552 break;
1554 default:
1555 ret = -EINVAL;
1558 spin_unlock_irqrestore(&pl330->lock, flags);
1559 return ret;
1561 EXPORT_SYMBOL(pl330_chan_ctrl);
1563 int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
1565 struct pl330_thread *thrd = ch_id;
1566 struct pl330_dmac *pl330;
1567 struct pl330_info *pi;
1568 void __iomem *regs;
1569 int active;
1570 u32 val;
1572 if (!pstatus || !thrd || thrd->free)
1573 return -EINVAL;
1575 pl330 = thrd->dmac;
1576 pi = pl330->pinfo;
1577 regs = pi->base;
1579 /* The client should remove the DMAC and add again */
1580 if (pl330->state == DYING)
1581 pstatus->dmac_halted = true;
1582 else
1583 pstatus->dmac_halted = false;
1585 val = readl(regs + FSC);
1586 if (val & (1 << thrd->id))
1587 pstatus->faulting = true;
1588 else
1589 pstatus->faulting = false;
1591 active = _thrd_active(thrd);
1593 if (!active) {
1594 /* Indicate that the thread is not running */
1595 pstatus->top_req = NULL;
1596 pstatus->wait_req = NULL;
1597 } else {
1598 active--;
1599 pstatus->top_req = thrd->req[active].r;
1600 pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
1601 ? thrd->req[1 - active].r : NULL;
1604 pstatus->src_addr = readl(regs + SA(thrd->id));
1605 pstatus->dst_addr = readl(regs + DA(thrd->id));
1607 return 0;
1609 EXPORT_SYMBOL(pl330_chan_status);
1611 /* Reserve an event */
1612 static inline int _alloc_event(struct pl330_thread *thrd)
1614 struct pl330_dmac *pl330 = thrd->dmac;
1615 struct pl330_info *pi = pl330->pinfo;
1616 int ev;
1618 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1619 if (pl330->events[ev] == -1) {
1620 pl330->events[ev] = thrd->id;
1621 return ev;
1624 return -1;
1627 /* Upon success, returns IdentityToken for the
1628 * allocated channel, NULL otherwise.
1630 void *pl330_request_channel(const struct pl330_info *pi)
1632 struct pl330_thread *thrd = NULL;
1633 struct pl330_dmac *pl330;
1634 unsigned long flags;
1635 int chans, i;
1637 if (!pi || !pi->pl330_data)
1638 return NULL;
1640 pl330 = pi->pl330_data;
1642 if (pl330->state == DYING)
1643 return NULL;
1645 chans = pi->pcfg.num_chan;
1647 spin_lock_irqsave(&pl330->lock, flags);
1649 for (i = 0; i < chans; i++) {
1650 thrd = &pl330->channels[i];
1651 if (thrd->free) {
1652 thrd->ev = _alloc_event(thrd);
1653 if (thrd->ev >= 0) {
1654 thrd->free = false;
1655 thrd->lstenq = 1;
1656 thrd->req[0].r = NULL;
1657 MARK_FREE(&thrd->req[0]);
1658 thrd->req[1].r = NULL;
1659 MARK_FREE(&thrd->req[1]);
1660 break;
1663 thrd = NULL;
1666 spin_unlock_irqrestore(&pl330->lock, flags);
1668 return thrd;
1670 EXPORT_SYMBOL(pl330_request_channel);
1672 /* Release an event */
1673 static inline void _free_event(struct pl330_thread *thrd, int ev)
1675 struct pl330_dmac *pl330 = thrd->dmac;
1676 struct pl330_info *pi = pl330->pinfo;
1678 /* If the event is valid and was held by the thread */
1679 if (ev >= 0 && ev < pi->pcfg.num_events
1680 && pl330->events[ev] == thrd->id)
1681 pl330->events[ev] = -1;
1684 void pl330_release_channel(void *ch_id)
1686 struct pl330_thread *thrd = ch_id;
1687 struct pl330_dmac *pl330;
1688 unsigned long flags;
1690 if (!thrd || thrd->free)
1691 return;
1693 _stop(thrd);
1695 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1696 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1698 pl330 = thrd->dmac;
1700 spin_lock_irqsave(&pl330->lock, flags);
1701 _free_event(thrd, thrd->ev);
1702 thrd->free = true;
1703 spin_unlock_irqrestore(&pl330->lock, flags);
1705 EXPORT_SYMBOL(pl330_release_channel);
1707 /* Initialize the structure for PL330 configuration, that can be used
1708 * by the client driver the make best use of the DMAC
1710 static void read_dmac_config(struct pl330_info *pi)
1712 void __iomem *regs = pi->base;
1713 u32 val;
1715 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1716 val &= CRD_DATA_WIDTH_MASK;
1717 pi->pcfg.data_bus_width = 8 * (1 << val);
1719 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1720 val &= CRD_DATA_BUFF_MASK;
1721 pi->pcfg.data_buf_dep = val + 1;
1723 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1724 val &= CR0_NUM_CHANS_MASK;
1725 val += 1;
1726 pi->pcfg.num_chan = val;
1728 val = readl(regs + CR0);
1729 if (val & CR0_PERIPH_REQ_SET) {
1730 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1731 val += 1;
1732 pi->pcfg.num_peri = val;
1733 pi->pcfg.peri_ns = readl(regs + CR4);
1734 } else {
1735 pi->pcfg.num_peri = 0;
1738 val = readl(regs + CR0);
1739 if (val & CR0_BOOT_MAN_NS)
1740 pi->pcfg.mode |= DMAC_MODE_NS;
1741 else
1742 pi->pcfg.mode &= ~DMAC_MODE_NS;
1744 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1745 val &= CR0_NUM_EVENTS_MASK;
1746 val += 1;
1747 pi->pcfg.num_events = val;
1749 pi->pcfg.irq_ns = readl(regs + CR3);
1751 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1752 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1755 static inline void _reset_thread(struct pl330_thread *thrd)
1757 struct pl330_dmac *pl330 = thrd->dmac;
1758 struct pl330_info *pi = pl330->pinfo;
1760 thrd->req[0].mc_cpu = pl330->mcode_cpu
1761 + (thrd->id * pi->mcbufsz);
1762 thrd->req[0].mc_bus = pl330->mcode_bus
1763 + (thrd->id * pi->mcbufsz);
1764 thrd->req[0].r = NULL;
1765 MARK_FREE(&thrd->req[0]);
1767 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1768 + pi->mcbufsz / 2;
1769 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1770 + pi->mcbufsz / 2;
1771 thrd->req[1].r = NULL;
1772 MARK_FREE(&thrd->req[1]);
1775 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1777 struct pl330_info *pi = pl330->pinfo;
1778 int chans = pi->pcfg.num_chan;
1779 struct pl330_thread *thrd;
1780 int i;
1782 /* Allocate 1 Manager and 'chans' Channel threads */
1783 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1784 GFP_KERNEL);
1785 if (!pl330->channels)
1786 return -ENOMEM;
1788 /* Init Channel threads */
1789 for (i = 0; i < chans; i++) {
1790 thrd = &pl330->channels[i];
1791 thrd->id = i;
1792 thrd->dmac = pl330;
1793 _reset_thread(thrd);
1794 thrd->free = true;
1797 /* MANAGER is indexed at the end */
1798 thrd = &pl330->channels[chans];
1799 thrd->id = chans;
1800 thrd->dmac = pl330;
1801 thrd->free = false;
1802 pl330->manager = thrd;
1804 return 0;
1807 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1809 struct pl330_info *pi = pl330->pinfo;
1810 int chans = pi->pcfg.num_chan;
1811 int ret;
1814 * Alloc MicroCode buffer for 'chans' Channel threads.
1815 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1817 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
1818 chans * pi->mcbufsz,
1819 &pl330->mcode_bus, GFP_KERNEL);
1820 if (!pl330->mcode_cpu) {
1821 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1822 __func__, __LINE__);
1823 return -ENOMEM;
1826 ret = dmac_alloc_threads(pl330);
1827 if (ret) {
1828 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
1829 __func__, __LINE__);
1830 dma_free_coherent(pi->dev,
1831 chans * pi->mcbufsz,
1832 pl330->mcode_cpu, pl330->mcode_bus);
1833 return ret;
1836 return 0;
1839 int pl330_add(struct pl330_info *pi)
1841 struct pl330_dmac *pl330;
1842 void __iomem *regs;
1843 int i, ret;
1845 if (!pi || !pi->dev)
1846 return -EINVAL;
1848 /* If already added */
1849 if (pi->pl330_data)
1850 return -EINVAL;
1853 * If the SoC can perform reset on the DMAC, then do it
1854 * before reading its configuration.
1856 if (pi->dmac_reset)
1857 pi->dmac_reset(pi);
1859 regs = pi->base;
1861 /* Check if we can handle this DMAC */
1862 if (get_id(pi, PERIPH_ID) != PERIPH_ID_VAL
1863 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
1864 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1865 readl(regs + PERIPH_ID), readl(regs + PCELL_ID));
1866 return -EINVAL;
1869 /* Read the configuration of the DMAC */
1870 read_dmac_config(pi);
1872 if (pi->pcfg.num_events == 0) {
1873 dev_err(pi->dev, "%s:%d Can't work without events!\n",
1874 __func__, __LINE__);
1875 return -EINVAL;
1878 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
1879 if (!pl330) {
1880 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1881 __func__, __LINE__);
1882 return -ENOMEM;
1885 /* Assign the info structure and private data */
1886 pl330->pinfo = pi;
1887 pi->pl330_data = pl330;
1889 spin_lock_init(&pl330->lock);
1891 INIT_LIST_HEAD(&pl330->req_done);
1893 /* Use default MC buffer size if not provided */
1894 if (!pi->mcbufsz)
1895 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1897 /* Mark all events as free */
1898 for (i = 0; i < pi->pcfg.num_events; i++)
1899 pl330->events[i] = -1;
1901 /* Allocate resources needed by the DMAC */
1902 ret = dmac_alloc_resources(pl330);
1903 if (ret) {
1904 dev_err(pi->dev, "Unable to create channels for DMAC\n");
1905 kfree(pl330);
1906 return ret;
1909 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1911 pl330->state = INIT;
1913 return 0;
1915 EXPORT_SYMBOL(pl330_add);
1917 static int dmac_free_threads(struct pl330_dmac *pl330)
1919 struct pl330_info *pi = pl330->pinfo;
1920 int chans = pi->pcfg.num_chan;
1921 struct pl330_thread *thrd;
1922 int i;
1924 /* Release Channel threads */
1925 for (i = 0; i < chans; i++) {
1926 thrd = &pl330->channels[i];
1927 pl330_release_channel((void *)thrd);
1930 /* Free memory */
1931 kfree(pl330->channels);
1933 return 0;
1936 static void dmac_free_resources(struct pl330_dmac *pl330)
1938 struct pl330_info *pi = pl330->pinfo;
1939 int chans = pi->pcfg.num_chan;
1941 dmac_free_threads(pl330);
1943 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
1944 pl330->mcode_cpu, pl330->mcode_bus);
1947 void pl330_del(struct pl330_info *pi)
1949 struct pl330_dmac *pl330;
1951 if (!pi || !pi->pl330_data)
1952 return;
1954 pl330 = pi->pl330_data;
1956 pl330->state = UNINIT;
1958 tasklet_kill(&pl330->tasks);
1960 /* Free DMAC resources */
1961 dmac_free_resources(pl330);
1963 kfree(pl330);
1964 pi->pl330_data = NULL;
1966 EXPORT_SYMBOL(pl330_del);