Merge branch 'upstream-merge'
[qemu-kvm/markmc.git] / hw / soc_dma.c
blobe116e6373a599306ff43f840131eda39dd6840ed
1 /*
2 * On-chip DMA controller framework.
4 * Copyright (C) 2008 Nokia Corporation
5 * Written by Andrzej Zaborowski <andrew@openedhand.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 or
10 * (at your option) version 3 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #include "qemu-timer.h"
22 #include "soc_dma.h"
24 static void transfer_mem2mem(struct soc_dma_ch_s *ch)
26 memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
27 ch->paddr[0] += ch->bytes;
28 ch->paddr[1] += ch->bytes;
31 static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
33 ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
34 ch->paddr[0] += ch->bytes;
37 static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
39 ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
40 ch->paddr[1] += ch->bytes;
43 /* This is further optimisable but isn't very important because often
44 * DMA peripherals forbid this kind of transfers and even when they don't,
45 * oprating systems may not need to use them. */
46 static void *fifo_buf;
47 static int fifo_size;
48 static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
50 if (ch->bytes > fifo_size)
51 fifo_buf = qemu_realloc(fifo_buf, fifo_size = ch->bytes);
53 /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
54 ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
55 ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
58 struct dma_s {
59 struct soc_dma_s soc;
60 int chnum;
61 uint64_t ch_enable_mask;
62 int64_t channel_freq;
63 int enabled_count;
65 struct memmap_entry_s {
66 enum soc_dma_port_type type;
67 target_phys_addr_t addr;
68 union {
69 struct {
70 void *opaque;
71 soc_dma_io_t fn;
72 int out;
73 } fifo;
74 struct {
75 void *base;
76 size_t size;
77 } mem;
78 } u;
79 } *memmap;
80 int memmap_size;
82 struct soc_dma_ch_s ch[0];
85 static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
87 int64_t now = qemu_get_clock(vm_clock);
88 struct dma_s *dma = (struct dma_s *) ch->dma;
90 qemu_mod_timer(ch->timer, now + delay_bytes / dma->channel_freq);
93 static void soc_dma_ch_run(void *opaque)
95 struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
97 ch->running = 1;
98 ch->dma->setup_fn(ch);
99 ch->transfer_fn(ch);
100 ch->running = 0;
102 if (ch->enable)
103 soc_dma_ch_schedule(ch, ch->bytes);
104 ch->bytes = 0;
107 static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
108 target_phys_addr_t addr)
110 struct memmap_entry_s *lo;
111 int hi;
113 lo = dma->memmap;
114 hi = dma->memmap_size;
116 while (hi > 1) {
117 hi /= 2;
118 if (lo[hi].addr <= addr)
119 lo += hi;
122 return lo;
125 static inline enum soc_dma_port_type soc_dma_ch_update_type(
126 struct soc_dma_ch_s *ch, int port)
128 struct dma_s *dma = (struct dma_s *) ch->dma;
129 struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
131 if (entry->type == soc_dma_port_fifo) {
132 while (entry < dma->memmap + dma->memmap_size &&
133 entry->u.fifo.out != port)
134 entry ++;
135 if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
136 return soc_dma_port_other;
138 if (ch->type[port] != soc_dma_access_const)
139 return soc_dma_port_other;
141 ch->io_fn[port] = entry->u.fifo.fn;
142 ch->io_opaque[port] = entry->u.fifo.opaque;
143 return soc_dma_port_fifo;
144 } else if (entry->type == soc_dma_port_mem) {
145 if (entry->addr > ch->vaddr[port] ||
146 entry->addr + entry->u.mem.size <= ch->vaddr[port])
147 return soc_dma_port_other;
149 /* TODO: support constant memory address for source port as used for
150 * drawing solid rectangles by PalmOS(R). */
151 if (ch->type[port] != soc_dma_access_const)
152 return soc_dma_port_other;
154 ch->paddr[port] = (uint8_t *) entry->u.mem.base +
155 (ch->vaddr[port] - entry->addr);
156 /* TODO: save bytes left to the end of the mapping somewhere so we
157 * can check we're not reading beyond it. */
158 return soc_dma_port_mem;
159 } else
160 return soc_dma_port_other;
163 void soc_dma_ch_update(struct soc_dma_ch_s *ch)
165 enum soc_dma_port_type src, dst;
167 src = soc_dma_ch_update_type(ch, 0);
168 if (src == soc_dma_port_other) {
169 ch->update = 0;
170 ch->transfer_fn = ch->dma->transfer_fn;
171 return;
173 dst = soc_dma_ch_update_type(ch, 1);
175 /* TODO: use src and dst as array indices. */
176 if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
177 ch->transfer_fn = transfer_mem2mem;
178 else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
179 ch->transfer_fn = transfer_mem2fifo;
180 else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
181 ch->transfer_fn = transfer_fifo2mem;
182 else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
183 ch->transfer_fn = transfer_fifo2fifo;
184 else
185 ch->transfer_fn = ch->dma->transfer_fn;
187 ch->update = (dst != soc_dma_port_other);
190 static void soc_dma_ch_freq_update(struct dma_s *s)
192 if (s->enabled_count)
193 /* We completely ignore channel priorities and stuff */
194 s->channel_freq = s->soc.freq / s->enabled_count;
195 else
196 /* TODO: Signal that we want to disable the functional clock and let
197 * the platform code decide what to do with it, i.e. check that
198 * auto-idle is enabled in the clock controller and if we are stopping
199 * the clock, do the same with any parent clocks that had only one
200 * user keeping them on and auto-idle enabled. */;
203 void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
205 struct dma_s *dma = (struct dma_s *) ch->dma;
207 dma->enabled_count += level - ch->enable;
209 if (level)
210 dma->ch_enable_mask |= 1 << ch->num;
211 else
212 dma->ch_enable_mask &= ~(1 << ch->num);
214 if (level != ch->enable) {
215 soc_dma_ch_freq_update(dma);
216 ch->enable = level;
218 if (!ch->enable)
219 qemu_del_timer(ch->timer);
220 else if (!ch->running)
221 soc_dma_ch_run(ch);
222 else
223 soc_dma_ch_schedule(ch, 1);
227 void soc_dma_reset(struct soc_dma_s *soc)
229 struct dma_s *s = (struct dma_s *) soc;
231 s->soc.drqbmp = 0;
232 s->ch_enable_mask = 0;
233 s->enabled_count = 0;
234 soc_dma_ch_freq_update(s);
237 /* TODO: take a functional-clock argument */
238 struct soc_dma_s *soc_dma_init(int n)
240 int i;
241 struct dma_s *s = qemu_mallocz(sizeof(*s) + n * sizeof(*s->ch));
243 s->chnum = n;
244 s->soc.ch = s->ch;
245 for (i = 0; i < n; i ++) {
246 s->ch[i].dma = &s->soc;
247 s->ch[i].num = i;
248 s->ch[i].timer = qemu_new_timer(vm_clock, soc_dma_ch_run, &s->ch[i]);
251 soc_dma_reset(&s->soc);
252 fifo_size = 0;
254 return &s->soc;
257 void soc_dma_port_add_fifo(struct soc_dma_s *soc, target_phys_addr_t virt_base,
258 soc_dma_io_t fn, void *opaque, int out)
260 struct memmap_entry_s *entry;
261 struct dma_s *dma = (struct dma_s *) soc;
263 dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) *
264 (dma->memmap_size + 1));
265 entry = soc_dma_lookup(dma, virt_base);
267 if (dma->memmap_size) {
268 if (entry->type == soc_dma_port_mem) {
269 if (entry->addr <= virt_base &&
270 entry->addr + entry->u.mem.size > virt_base) {
271 fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
272 " collides with RAM region at " TARGET_FMT_lx
273 "-" TARGET_FMT_lx "\n", __FUNCTION__,
274 (target_ulong) virt_base,
275 (target_ulong) entry->addr, (target_ulong)
276 (entry->addr + entry->u.mem.size));
277 exit(-1);
280 if (entry->addr <= virt_base)
281 entry ++;
282 } else
283 while (entry < dma->memmap + dma->memmap_size &&
284 entry->addr <= virt_base) {
285 if (entry->addr == virt_base && entry->u.fifo.out == out) {
286 fprintf(stderr, "%s: FIFO at " TARGET_FMT_lx
287 " collides FIFO at " TARGET_FMT_lx "\n",
288 __FUNCTION__, (target_ulong) virt_base,
289 (target_ulong) entry->addr);
290 exit(-1);
293 entry ++;
296 memmove(entry + 1, entry,
297 (uint8_t *) (dma->memmap + dma->memmap_size ++) -
298 (uint8_t *) entry);
299 } else
300 dma->memmap_size ++;
302 entry->addr = virt_base;
303 entry->type = soc_dma_port_fifo;
304 entry->u.fifo.fn = fn;
305 entry->u.fifo.opaque = opaque;
306 entry->u.fifo.out = out;
309 void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
310 target_phys_addr_t virt_base, size_t size)
312 struct memmap_entry_s *entry;
313 struct dma_s *dma = (struct dma_s *) soc;
315 dma->memmap = qemu_realloc(dma->memmap, sizeof(*entry) *
316 (dma->memmap_size + 1));
317 entry = soc_dma_lookup(dma, virt_base);
319 if (dma->memmap_size) {
320 if (entry->type == soc_dma_port_mem) {
321 if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
322 (entry->addr <= virt_base &&
323 entry->addr + entry->u.mem.size > virt_base)) {
324 fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
325 " collides with RAM region at " TARGET_FMT_lx
326 "-" TARGET_FMT_lx "\n", __FUNCTION__,
327 (target_ulong) virt_base,
328 (target_ulong) (virt_base + size),
329 (target_ulong) entry->addr, (target_ulong)
330 (entry->addr + entry->u.mem.size));
331 exit(-1);
334 if (entry->addr <= virt_base)
335 entry ++;
336 } else {
337 if (entry->addr >= virt_base &&
338 entry->addr < virt_base + size) {
339 fprintf(stderr, "%s: RAM at " TARGET_FMT_lx "-" TARGET_FMT_lx
340 " collides with FIFO at " TARGET_FMT_lx
341 "\n", __FUNCTION__,
342 (target_ulong) virt_base,
343 (target_ulong) (virt_base + size),
344 (target_ulong) entry->addr);
345 exit(-1);
348 while (entry < dma->memmap + dma->memmap_size &&
349 entry->addr <= virt_base)
350 entry ++;
353 memmove(entry + 1, entry,
354 (uint8_t *) (dma->memmap + dma->memmap_size ++) -
355 (uint8_t *) entry);
356 } else
357 dma->memmap_size ++;
359 entry->addr = virt_base;
360 entry->type = soc_dma_port_mem;
361 entry->u.mem.base = phys_base;
362 entry->u.mem.size = size;
365 /* TODO: port removal for ports like PCMCIA memory */