2 * Arm PrimeCell PL080 DMA controller
4 * Copyright (c) 2006 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licenced under the GPL.
12 #define PL080_NUM_CHANNELS 8
13 #define PL080_CONF_E 0x1
14 #define PL080_CONF_M1 0x2
15 #define PL080_CONF_M2 0x4
17 #define PL080_CCONF_H 0x40000
18 #define PL080_CCONF_A 0x20000
19 #define PL080_CCONF_L 0x10000
20 #define PL080_CCONF_ITC 0x08000
21 #define PL080_CCONF_IE 0x04000
22 #define PL080_CCONF_E 0x00001
24 #define PL080_CCTRL_I 0x80000000
25 #define PL080_CCTRL_DI 0x08000000
26 #define PL080_CCTRL_SI 0x04000000
27 #define PL080_CCTRL_D 0x02000000
28 #define PL080_CCTRL_S 0x01000000
48 pl080_channel chan
[PL080_NUM_CHANNELS
];
49 /* Flag to avoid recursive DMA invocations. */
55 static const unsigned char pl080_id
[] =
56 { 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
58 static void pl080_update(pl080_state
*s
)
60 if ((s
->tc_int
& s
->tc_mask
)
61 || (s
->err_int
& s
->err_mask
))
62 pic_set_irq_new(s
->pic
, s
->irq
, 1);
64 pic_set_irq_new(s
->pic
, s
->irq
, 1);
67 static void pl080_run(pl080_state
*s
)
83 for (c
= 0; c
< PL080_NUM_CHANNELS
; c
++) {
84 if (s
->chan
[c
].conf
& PL080_CCONF_ITC
)
86 if (s
->chan
[c
].conf
& PL080_CCONF_IE
)
87 s
->err_mask
|= 1 << c
;
90 if ((s
->conf
& PL080_CONF_E
) == 0)
93 cpu_abort(cpu_single_env
, "DMA active\n");
94 /* If we are already in the middle of a DMA operation then indicate that
95 there may be new DMA requests and return immediately. */
102 for (c
= 0; c
< PL080_NUM_CHANNELS
; c
++) {
105 /* Test if thiws channel has any pending DMA requests. */
106 if ((ch
->conf
& (PL080_CCONF_H
| PL080_CCONF_E
))
109 flow
= (ch
->conf
>> 11) & 7;
111 cpu_abort(cpu_single_env
,
112 "pl080_run: Peripheral flow control not implemented\n");
114 src_id
= (ch
->conf
>> 1) & 0x1f;
115 dest_id
= (ch
->conf
>> 6) & 0x1f;
116 size
= ch
->ctrl
& 0xfff;
117 req
= s
->req_single
| s
->req_burst
;
122 if ((req
& (1u << dest_id
)) == 0)
126 if ((req
& (1u << src_id
)) == 0)
130 if ((req
& (1u << src_id
)) == 0
131 || (req
& (1u << dest_id
)) == 0)
138 /* Transfer one element. */
139 /* ??? Should transfer multiple elements for a burst request. */
140 /* ??? Unclear what the proper behavior is when source and
141 destination widths are different. */
142 swidth
= 1 << ((ch
->ctrl
>> 18) & 7);
143 dwidth
= 1 << ((ch
->ctrl
>> 21) & 7);
144 for (n
= 0; n
< dwidth
; n
+= swidth
) {
145 cpu_physical_memory_read(ch
->src
, buff
+ n
, swidth
);
146 if (ch
->ctrl
& PL080_CCTRL_SI
)
149 xsize
= (dwidth
< swidth
) ? swidth
: dwidth
;
150 /* ??? This may pad the value incorrectly for dwidth < 32. */
151 for (n
= 0; n
< xsize
; n
+= dwidth
) {
152 cpu_physical_memory_write(ch
->dest
+ n
, buff
+ n
, dwidth
);
153 if (ch
->ctrl
& PL080_CCTRL_DI
)
158 ch
->ctrl
= (ch
->ctrl
& 0xfffff000) | size
;
160 /* Transfer complete. */
162 ch
->src
= ldl_phys(ch
->lli
);
163 ch
->dest
= ldl_phys(ch
->lli
+ 4);
164 ch
->ctrl
= ldl_phys(ch
->lli
+ 12);
165 ch
->lli
= ldl_phys(ch
->lli
+ 8);
167 ch
->conf
&= ~PL080_CCONF_E
;
169 if (ch
->ctrl
& PL080_CCTRL_I
) {
180 static uint32_t pl080_read(void *opaque
, target_phys_addr_t offset
)
182 pl080_state
*s
= (pl080_state
*)opaque
;
187 if (offset
>= 0xfe0 && offset
< 0x1000) {
188 return pl080_id
[(offset
- 0xfe0) >> 2];
190 if (offset
>= 0x100 && offset
< 0x200) {
191 i
= (offset
& 0xe0) >> 5;
192 switch (offset
>> 2) {
193 case 0: /* SrcAddr */
194 return s
->chan
[i
].src
;
195 case 1: /* DestAddr */
196 return s
->chan
[i
].dest
;
198 return s
->chan
[i
].lli
;
199 case 3: /* Control */
200 return s
->chan
[i
].ctrl
;
201 case 4: /* Configuration */
202 return s
->chan
[i
].conf
;
207 switch (offset
>> 2) {
208 case 0: /* IntStatus */
209 return (s
->tc_int
& s
->tc_mask
) | (s
->err_int
& s
->err_mask
);
210 case 1: /* IntTCStatus */
211 return (s
->tc_int
& s
->tc_mask
);
212 case 3: /* IntErrorStatus */
213 return (s
->err_int
& s
->err_mask
);
214 case 5: /* RawIntTCStatus */
216 case 6: /* RawIntErrorStatus */
218 case 7: /* EnbldChns */
220 for (i
= 0; i
< PL080_NUM_CHANNELS
; i
++) {
221 if (s
->chan
[i
].conf
& PL080_CCONF_E
)
225 case 8: /* SoftBReq */
226 case 9: /* SoftSReq */
227 case 10: /* SoftLBReq */
228 case 11: /* SoftLSReq */
229 /* ??? Implement these. */
231 case 12: /* Configuration */
237 cpu_abort(cpu_single_env
, "pl080_read: Bad offset %x\n", offset
);
242 static void pl080_write(void *opaque
, target_phys_addr_t offset
,
245 pl080_state
*s
= (pl080_state
*)opaque
;
249 if (offset
>= 0x100 && offset
< 0x200) {
250 i
= (offset
& 0xe0) >> 5;
251 switch (offset
>> 2) {
252 case 0: /* SrcAddr */
253 s
->chan
[i
].src
= value
;
255 case 1: /* DestAddr */
256 s
->chan
[i
].dest
= value
;
259 s
->chan
[i
].lli
= value
;
261 case 3: /* Control */
262 s
->chan
[i
].ctrl
= value
;
264 case 4: /* Configuration */
265 s
->chan
[i
].conf
= value
;
270 switch (offset
>> 2) {
271 case 2: /* IntTCClear */
274 case 4: /* IntErrorClear */
275 s
->err_int
&= ~value
;
277 case 8: /* SoftBReq */
278 case 9: /* SoftSReq */
279 case 10: /* SoftLBReq */
280 case 11: /* SoftLSReq */
281 /* ??? Implement these. */
282 cpu_abort(cpu_single_env
, "pl080_write: Soft DMA not implemented\n");
284 case 12: /* Configuration */
286 if (s
->conf
& (PL080_CONF_M1
| PL080_CONF_M1
)) {
287 cpu_abort(cpu_single_env
,
288 "pl080_write: Big-endian DMA not implemented\n");
296 cpu_abort(cpu_single_env
, "pl080_write: Bad offset %x\n", offset
);
301 static CPUReadMemoryFunc
*pl080_readfn
[] = {
307 static CPUWriteMemoryFunc
*pl080_writefn
[] = {
313 void *pl080_init(uint32_t base
, void *pic
, int irq
)
318 s
= (pl080_state
*)qemu_mallocz(sizeof(pl080_state
));
319 iomemtype
= cpu_register_io_memory(0, pl080_readfn
,
321 cpu_register_physical_memory(base
, 0x00000fff, iomemtype
);
325 /* ??? Save/restore. */