2 * On-chip DMA controller framework.
4 * Copyright (C) 2008 Nokia Corporation
5 * Written by Andrzej Zaborowski <andrew@openedhand.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 or
10 * (at your option) version 3 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/error-report.h"
22 #include "qemu-common.h"
23 #include "qemu/timer.h"
24 #include "hw/arm/soc_dma.h"
26 static void transfer_mem2mem(struct soc_dma_ch_s
*ch
)
28 memcpy(ch
->paddr
[0], ch
->paddr
[1], ch
->bytes
);
29 ch
->paddr
[0] += ch
->bytes
;
30 ch
->paddr
[1] += ch
->bytes
;
33 static void transfer_mem2fifo(struct soc_dma_ch_s
*ch
)
35 ch
->io_fn
[1](ch
->io_opaque
[1], ch
->paddr
[0], ch
->bytes
);
36 ch
->paddr
[0] += ch
->bytes
;
39 static void transfer_fifo2mem(struct soc_dma_ch_s
*ch
)
41 ch
->io_fn
[0](ch
->io_opaque
[0], ch
->paddr
[1], ch
->bytes
);
42 ch
->paddr
[1] += ch
->bytes
;
45 /* This is further optimisable but isn't very important because often
46 * DMA peripherals forbid this kind of transfers and even when they don't,
47 * oprating systems may not need to use them. */
48 static void *fifo_buf
;
50 static void transfer_fifo2fifo(struct soc_dma_ch_s
*ch
)
52 if (ch
->bytes
> fifo_size
)
53 fifo_buf
= g_realloc(fifo_buf
, fifo_size
= ch
->bytes
);
55 /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
56 ch
->io_fn
[0](ch
->io_opaque
[0], fifo_buf
, ch
->bytes
);
57 ch
->io_fn
[1](ch
->io_opaque
[1], fifo_buf
, ch
->bytes
);
63 uint64_t ch_enable_mask
;
67 struct memmap_entry_s
{
68 enum soc_dma_port_type type
;
84 struct soc_dma_ch_s ch
[0];
87 static void soc_dma_ch_schedule(struct soc_dma_ch_s
*ch
, int delay_bytes
)
89 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
90 struct dma_s
*dma
= (struct dma_s
*) ch
->dma
;
92 timer_mod(ch
->timer
, now
+ delay_bytes
/ dma
->channel_freq
);
95 static void soc_dma_ch_run(void *opaque
)
97 struct soc_dma_ch_s
*ch
= (struct soc_dma_ch_s
*) opaque
;
100 ch
->dma
->setup_fn(ch
);
105 soc_dma_ch_schedule(ch
, ch
->bytes
);
109 static inline struct memmap_entry_s
*soc_dma_lookup(struct dma_s
*dma
,
112 struct memmap_entry_s
*lo
;
116 hi
= dma
->memmap_size
;
120 if (lo
[hi
].addr
<= addr
)
127 static inline enum soc_dma_port_type
soc_dma_ch_update_type(
128 struct soc_dma_ch_s
*ch
, int port
)
130 struct dma_s
*dma
= (struct dma_s
*) ch
->dma
;
131 struct memmap_entry_s
*entry
= soc_dma_lookup(dma
, ch
->vaddr
[port
]);
133 if (entry
->type
== soc_dma_port_fifo
) {
134 while (entry
< dma
->memmap
+ dma
->memmap_size
&&
135 entry
->u
.fifo
.out
!= port
)
137 if (entry
->addr
!= ch
->vaddr
[port
] || entry
->u
.fifo
.out
!= port
)
138 return soc_dma_port_other
;
140 if (ch
->type
[port
] != soc_dma_access_const
)
141 return soc_dma_port_other
;
143 ch
->io_fn
[port
] = entry
->u
.fifo
.fn
;
144 ch
->io_opaque
[port
] = entry
->u
.fifo
.opaque
;
145 return soc_dma_port_fifo
;
146 } else if (entry
->type
== soc_dma_port_mem
) {
147 if (entry
->addr
> ch
->vaddr
[port
] ||
148 entry
->addr
+ entry
->u
.mem
.size
<= ch
->vaddr
[port
])
149 return soc_dma_port_other
;
151 /* TODO: support constant memory address for source port as used for
152 * drawing solid rectangles by PalmOS(R). */
153 if (ch
->type
[port
] != soc_dma_access_const
)
154 return soc_dma_port_other
;
156 ch
->paddr
[port
] = (uint8_t *) entry
->u
.mem
.base
+
157 (ch
->vaddr
[port
] - entry
->addr
);
158 /* TODO: save bytes left to the end of the mapping somewhere so we
159 * can check we're not reading beyond it. */
160 return soc_dma_port_mem
;
162 return soc_dma_port_other
;
165 void soc_dma_ch_update(struct soc_dma_ch_s
*ch
)
167 enum soc_dma_port_type src
, dst
;
169 src
= soc_dma_ch_update_type(ch
, 0);
170 if (src
== soc_dma_port_other
) {
172 ch
->transfer_fn
= ch
->dma
->transfer_fn
;
175 dst
= soc_dma_ch_update_type(ch
, 1);
177 /* TODO: use src and dst as array indices. */
178 if (src
== soc_dma_port_mem
&& dst
== soc_dma_port_mem
)
179 ch
->transfer_fn
= transfer_mem2mem
;
180 else if (src
== soc_dma_port_mem
&& dst
== soc_dma_port_fifo
)
181 ch
->transfer_fn
= transfer_mem2fifo
;
182 else if (src
== soc_dma_port_fifo
&& dst
== soc_dma_port_mem
)
183 ch
->transfer_fn
= transfer_fifo2mem
;
184 else if (src
== soc_dma_port_fifo
&& dst
== soc_dma_port_fifo
)
185 ch
->transfer_fn
= transfer_fifo2fifo
;
187 ch
->transfer_fn
= ch
->dma
->transfer_fn
;
189 ch
->update
= (dst
!= soc_dma_port_other
);
192 static void soc_dma_ch_freq_update(struct dma_s
*s
)
194 if (s
->enabled_count
)
195 /* We completely ignore channel priorities and stuff */
196 s
->channel_freq
= s
->soc
.freq
/ s
->enabled_count
;
198 /* TODO: Signal that we want to disable the functional clock and let
199 * the platform code decide what to do with it, i.e. check that
200 * auto-idle is enabled in the clock controller and if we are stopping
201 * the clock, do the same with any parent clocks that had only one
202 * user keeping them on and auto-idle enabled. */
206 void soc_dma_set_request(struct soc_dma_ch_s
*ch
, int level
)
208 struct dma_s
*dma
= (struct dma_s
*) ch
->dma
;
210 dma
->enabled_count
+= level
- ch
->enable
;
213 dma
->ch_enable_mask
|= 1 << ch
->num
;
215 dma
->ch_enable_mask
&= ~(1 << ch
->num
);
217 if (level
!= ch
->enable
) {
218 soc_dma_ch_freq_update(dma
);
222 timer_del(ch
->timer
);
223 else if (!ch
->running
)
226 soc_dma_ch_schedule(ch
, 1);
230 void soc_dma_reset(struct soc_dma_s
*soc
)
232 struct dma_s
*s
= (struct dma_s
*) soc
;
235 s
->ch_enable_mask
= 0;
236 s
->enabled_count
= 0;
237 soc_dma_ch_freq_update(s
);
240 /* TODO: take a functional-clock argument */
241 struct soc_dma_s
*soc_dma_init(int n
)
244 struct dma_s
*s
= g_malloc0(sizeof(*s
) + n
* sizeof(*s
->ch
));
248 for (i
= 0; i
< n
; i
++) {
249 s
->ch
[i
].dma
= &s
->soc
;
251 s
->ch
[i
].timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, soc_dma_ch_run
, &s
->ch
[i
]);
254 soc_dma_reset(&s
->soc
);
260 void soc_dma_port_add_fifo(struct soc_dma_s
*soc
, hwaddr virt_base
,
261 soc_dma_io_t fn
, void *opaque
, int out
)
263 struct memmap_entry_s
*entry
;
264 struct dma_s
*dma
= (struct dma_s
*) soc
;
266 dma
->memmap
= g_realloc(dma
->memmap
, sizeof(*entry
) *
267 (dma
->memmap_size
+ 1));
268 entry
= soc_dma_lookup(dma
, virt_base
);
270 if (dma
->memmap_size
) {
271 if (entry
->type
== soc_dma_port_mem
) {
272 if (entry
->addr
<= virt_base
&&
273 entry
->addr
+ entry
->u
.mem
.size
> virt_base
) {
274 error_report("%s: FIFO at %"PRIx64
275 " collides with RAM region at %"PRIx64
276 "-%"PRIx64
, __func__
,
277 virt_base
, entry
->addr
,
278 (entry
->addr
+ entry
->u
.mem
.size
));
282 if (entry
->addr
<= virt_base
)
285 while (entry
< dma
->memmap
+ dma
->memmap_size
&&
286 entry
->addr
<= virt_base
) {
287 if (entry
->addr
== virt_base
&& entry
->u
.fifo
.out
== out
) {
288 error_report("%s: FIFO at %"PRIx64
289 " collides FIFO at %"PRIx64
,
290 __func__
, virt_base
, entry
->addr
);
297 memmove(entry
+ 1, entry
,
298 (uint8_t *) (dma
->memmap
+ dma
->memmap_size
++) -
303 entry
->addr
= virt_base
;
304 entry
->type
= soc_dma_port_fifo
;
305 entry
->u
.fifo
.fn
= fn
;
306 entry
->u
.fifo
.opaque
= opaque
;
307 entry
->u
.fifo
.out
= out
;
310 void soc_dma_port_add_mem(struct soc_dma_s
*soc
, uint8_t *phys_base
,
311 hwaddr virt_base
, size_t size
)
313 struct memmap_entry_s
*entry
;
314 struct dma_s
*dma
= (struct dma_s
*) soc
;
316 dma
->memmap
= g_realloc(dma
->memmap
, sizeof(*entry
) *
317 (dma
->memmap_size
+ 1));
318 entry
= soc_dma_lookup(dma
, virt_base
);
320 if (dma
->memmap_size
) {
321 if (entry
->type
== soc_dma_port_mem
) {
322 if ((entry
->addr
>= virt_base
&& entry
->addr
< virt_base
+ size
) ||
323 (entry
->addr
<= virt_base
&&
324 entry
->addr
+ entry
->u
.mem
.size
> virt_base
)) {
325 error_report("%s: RAM at %"PRIx64
"-%"PRIx64
326 " collides with RAM region at %"PRIx64
327 "-%"PRIx64
, __func__
,
328 virt_base
, virt_base
+ size
,
329 entry
->addr
, entry
->addr
+ entry
->u
.mem
.size
);
333 if (entry
->addr
<= virt_base
)
336 if (entry
->addr
>= virt_base
&&
337 entry
->addr
< virt_base
+ size
) {
338 error_report("%s: RAM at %"PRIx64
"-%"PRIx64
339 " collides with FIFO at %"PRIx64
,
340 __func__
, virt_base
, virt_base
+ size
,
345 while (entry
< dma
->memmap
+ dma
->memmap_size
&&
346 entry
->addr
<= virt_base
)
350 memmove(entry
+ 1, entry
,
351 (uint8_t *) (dma
->memmap
+ dma
->memmap_size
++) -
356 entry
->addr
= virt_base
;
357 entry
->type
= soc_dma_port_mem
;
358 entry
->u
.mem
.base
= phys_base
;
359 entry
->u
.mem
.size
= size
;
362 /* TODO: port removal for ports like PCMCIA memory */