switch vmware_vga to pci vgabios
[qemu/ar7.git] / hw / soc_dma.c
blob23ec51695abdb0e9b323fc160cad6ce5e9a49bee
1 /*
2 * On-chip DMA controller framework.
4 * Copyright (C) 2008 Nokia Corporation
5 * Written by Andrzej Zaborowski <andrew@openedhand.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 or
10 * (at your option) version 3 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #include "qemu-timer.h"
22 #include "soc_dma.h"
24 static void transfer_mem2mem(struct soc_dma_ch_s *ch)
26 memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
27 ch->paddr[0] += ch->bytes;
28 ch->paddr[1] += ch->bytes;
31 static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
33 ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
34 ch->paddr[0] += ch->bytes;
37 static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
39 ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
40 ch->paddr[1] += ch->bytes;
43 /* This is further optimisable but isn't very important because often
44 * DMA peripherals forbid this kind of transfers and even when they don't,
45 * oprating systems may not need to use them. */
46 static void *fifo_buf;
47 static int fifo_size;
48 static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
50 if (ch->bytes > fifo_size)
51 fifo_buf = qemu_realloc(fifo_buf, fifo_size = ch->bytes);
53 /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
54 ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
55 ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
58 struct dma_s {
59 struct soc_dma_s soc;
60 int chnum;
61 uint64_t ch_enable_mask;
62 int64_t channel_freq;
63 int enabled_count;
65 struct memmap_entry_s {
66 enum soc_dma_port_type type;
67 target_phys_addr_t addr;
68 union {
69 struct {
70 void *opaque;
71 soc_dma_io_t fn;
72 int out;
73 } fifo;
74 struct {
75 void *base;
76 size_t size;
77 } mem;
78 } u;
79 } *memmap;
80 int memmap_size;
82 struct soc_dma_ch_s ch[0];
85 static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
87 int64_t now = qemu_get_clock(vm_clock);
88 struct dma_s *dma = (struct dma_s *) ch->dma;
90 qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
93 static void soc_dma_ch_run(void *opaque)
95 struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
97 ch->running = 1;
98 ch->dma->setup_fn(ch);
99 ch->transfer_fn(ch);
100 ch->running = 0;
102 if (ch->enable)
103 soc_dma_ch_schedule(ch, ch->bytes);
104 ch->bytes = 0;
107 static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
108 target_phys_addr_t addr)
110 struct memmap_entry_s *lo;
111 int hi;
113 lo = dma->memmap;
114 hi = dma->memmap_size;
116 while (hi > 1) {
117 hi /= 2;
118 if (lo[hi].addr <= addr)
119 lo += hi;
122 return lo;
125 static inline enum soc_dma_port_type soc_dma_ch_update_type(
126 struct soc_dma_ch_s *ch, int port)
128 struct dma_s *dma = (struct dma_s *) ch->dma;
129 struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
131 if (entry->type == soc_dma_port_fifo) {
132 while (entry < dma->memmap + dma->memmap_size &&
133 entry->u.fifo.out != port)
134 entry ++;
135 if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
136 return soc_dma_port_other;
138 if (ch->type[port] != soc_dma_access_const)
139 return soc_dma_port_other;
141 ch->io_fn[port] = entry->u.fifo.fn;
142 ch->io_opaque[port] = entry->u.fifo.opaque;
143 return soc_dma_port_fifo;
144 } else if (entry->type == soc_dma_port_mem) {
145 if (entry->addr > ch->vaddr[port] ||
146 entry->addr + entry->u.mem.size <= ch->vaddr[port])
147 return soc_dma_port_other;
149 /* TODO: support constant memory address for source port as used for
150 * drawing solid rectangles by PalmOS(R). */
151 if (ch->type[port] != soc_dma_access_const)
152 return soc_dma_port_other;
154 ch->paddr[port] = (uint8_t *) entry->u.mem.base +
155 (ch->vaddr[port] - entry->addr);
156 /* TODO: save bytes left to the end of the mapping somewhere so we
157 * can check we're not reading beyond it. */
158 return soc_dma_port_mem;
159 } else
160 return soc_dma_port_other;
163 void soc_dma_ch_update(struct soc_dma_ch_s *ch)
165 enum soc_dma_port_type src, dst;
167 src = soc_dma_ch_update_type(ch, 0);
168 if (src == soc_dma_port_other) {
169 ch->update = 0;
170 ch->transfer_fn = ch->dma->transfer_fn;
171 return;
173 dst = soc_dma_ch_update_type(ch, 1);
175 /* TODO: use src and dst as array indices. */
176 if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
177 ch->transfer_fn = transfer_mem2mem;
178 else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
179 ch->transfer_fn = transfer_mem2fifo;
180 else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
181 ch->transfer_fn = transfer_fifo2mem;
182 else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
183 ch->transfer_fn = transfer_fifo2fifo;
184 else
185 ch->transfer_fn = ch->dma->transfer_fn;
187 ch->update = (dst != soc_dma_port_other);
190 static void soc_dma_ch_freq_update(struct dma_s *s)
192 if (s->enabled_count)
193 /* We completely ignore channel priorities and stuff */
194 s->channel_freq = s->soc.freq / s->enabled_count;
195 else {
196 /* TODO: Signal that we want to disable the functional clock and let
197 * the platform code decide what to do with it, i.e. check that
198 * auto-idle is enabled in the clock controller and if we are stopping
199 * the clock, do the same with any parent clocks that had only one
200 * user keeping them on and auto-idle enabled. */
204 void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
206 struct dma_s *dma = (struct dma_s *) ch->dma;
208 dma->enabled_count += level - ch->enable;
210 if (level)
211 dma->ch_enable_mask |= 1 << ch->num;
212 else
213 dma->ch_enable_mask &= ~(1 << ch->num);
215 if (level != ch->enable) {
216 soc_dma_ch_freq_update(dma);
217 ch->enable = level;
219 if (!ch->enable)
220 qemu_del_timer(ch->timer);
221 else if (!ch->running)
222 soc_dma_ch_run(ch);
223 else
224 soc_dma_ch_schedule(ch, 1);
228 void soc_dma_reset(struct soc_dma_s *soc)
230 struct dma_s *s = (struct dma_s *) soc;
232 s->soc.drqbmp = 0;
233 s->ch_enable_mask = 0;
234 s->enabled_count = 0;
235 soc_dma_ch_freq_update(s);
238 /* TODO: take a functional-clock argument */
239 struct soc_dma_s *soc_dma_init(int n)
241 int i;
242 struct dma_s *s = qemu_mallocz(sizeof(*s) + n * sizeof(*s->ch));
244 s->chnum = n;
245 s->soc.ch = s->ch;
246 for (i = 0; i < n; i ++) {
247 s->ch[i].dma = &s->soc;
248 s->ch[i].num = i;
249 s->ch[i].timer = qemu_new_timer(vm_clock, soc_dma_ch_run, &s->ch[i]);
252 soc_dma_reset(&s->soc);
253 fifo_size = 0;
255 return &s->soc;
258 void soc_dma_port_add_fifo(struct soc_dma_s *soc, target_phys_addr_t virt_base,
259 soc_dma_io_t fn, void *opaque, int out)
261 struct memmap_entry_s *entry;
262 struct dma_s *dma = (struct dma_s *) soc;
264 dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) *
265 (dma->memmap_size + 1));
266 entry = soc_dma_lookup(dma, virt_base);
268 if (dma->memmap_size) {
269 if (entry->type == soc_dma_port_mem) {
270 if (entry->addr <= virt_base &&
271 entry->addr + entry->u.mem.size > virt_base) {
272 fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
273 " collides with RAM region at " TARGET_FMT_lx
274 "-" TARGET_FMT_lx "\n", __FUNCTION__,
275 (target_ulong) virt_base,
276 (target_ulong) entry->addr, (target_ulong)
277 (entry->addr + entry->u.mem.size));
278 exit(-1);
281 if (entry->addr <= virt_base)
282 entry ++;
283 } else
284 while (entry < dma->memmap + dma->memmap_size &&
285 entry->addr <= virt_base) {
286 if (entry->addr == virt_base && entry->u.fifo.out == out) {
287 fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
288 " collides FIFO at " TARGET_FMT_lx "\n",
289 __FUNCTION__, (target_ulong) virt_base,
290 (target_ulong) entry->addr);
291 exit(-1);
294 entry ++;
297 memmove(entry + 1, entry,
298 (uint8_t *) (dma->memmap + dma->memmap_size ++) -
299 (uint8_t *) entry);
300 } else
301 dma->memmap_size ++;
303 entry->addr = virt_base;
304 entry->type = soc_dma_port_fifo;
305 entry->u.fifo.fn = fn;
306 entry->u.fifo.opaque = opaque;
307 entry->u.fifo.out = out;
310 void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
311 target_phys_addr_t virt_base, size_t size)
313 struct memmap_entry_s *entry;
314 struct dma_s *dma = (struct dma_s *) soc;
316 dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) *
317 (dma->memmap_size + 1));
318 entry = soc_dma_lookup(dma, virt_base);
320 if (dma->memmap_size) {
321 if (entry->type == soc_dma_port_mem) {
322 if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
323 (entry->addr <= virt_base &&
324 entry->addr + entry->u.mem.size > virt_base)) {
325 fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
326 " collides with RAM region at " TARGET_FMT_lx
327 "-" TARGET_FMT_lx "\n", __FUNCTION__,
328 (target_ulong) virt_base,
329 (target_ulong) (virt_base + size),
330 (target_ulong) entry->addr, (target_ulong)
331 (entry->addr + entry->u.mem.size));
332 exit(-1);
335 if (entry->addr <= virt_base)
336 entry ++;
337 } else {
338 if (entry->addr >= virt_base &&
339 entry->addr < virt_base + size) {
340 fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
341 " collides with FIFO at " TARGET_FMT_lx
342 "\n", __FUNCTION__,
343 (target_ulong) virt_base,
344 (target_ulong) (virt_base + size),
345 (target_ulong) entry->addr);
346 exit(-1);
349 while (entry < dma->memmap + dma->memmap_size &&
350 entry->addr <= virt_base)
351 entry ++;
354 memmove(entry + 1, entry,
355 (uint8_t *) (dma->memmap + dma->memmap_size ++) -
356 (uint8_t *) entry);
357 } else
358 dma->memmap_size ++;
360 entry->addr = virt_base;
361 entry->type = soc_dma_port_mem;
362 entry->u.mem.base = phys_base;
363 entry->u.mem.size = size;
366 /* TODO: port removal for ports like PCMCIA memory */