migration: move some declarations to migration.h
[qemu/lumag.git] / hw / xilinx_axidma.c
blobe32534feaf7b0911578025cf2ac8816868159119
1 /*
2 * QEMU model of Xilinx AXI-DMA block.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "sysbus.h"
26 #include "qemu-char.h"
27 #include "qemu-timer.h"
28 #include "qemu-log.h"
29 #include "qdev-addr.h"
31 #include "xilinx_axidma.h"
33 #define D(x)
35 #define R_DMACR (0x00 / 4)
36 #define R_DMASR (0x04 / 4)
37 #define R_CURDESC (0x08 / 4)
38 #define R_TAILDESC (0x10 / 4)
39 #define R_MAX (0x30 / 4)
41 enum {
42 DMACR_RUNSTOP = 1,
43 DMACR_TAILPTR_MODE = 2,
44 DMACR_RESET = 4
47 enum {
48 DMASR_HALTED = 1,
49 DMASR_IDLE = 2,
50 DMASR_IOC_IRQ = 1 << 12,
51 DMASR_DLY_IRQ = 1 << 13,
53 DMASR_IRQ_MASK = 7 << 12
56 struct SDesc {
57 uint64_t nxtdesc;
58 uint64_t buffer_address;
59 uint64_t reserved;
60 uint32_t control;
61 uint32_t status;
62 uint32_t app[6];
65 enum {
66 SDESC_CTRL_EOF = (1 << 26),
67 SDESC_CTRL_SOF = (1 << 27),
69 SDESC_CTRL_LEN_MASK = (1 << 23) - 1
72 enum {
73 SDESC_STATUS_EOF = (1 << 26),
74 SDESC_STATUS_SOF_BIT = 27,
75 SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
76 SDESC_STATUS_COMPLETE = (1 << 31)
79 struct AXIStream {
80 QEMUBH *bh;
81 ptimer_state *ptimer;
82 qemu_irq irq;
84 int nr;
86 struct SDesc desc;
87 int pos;
88 unsigned int complete_cnt;
89 uint32_t regs[R_MAX];
92 struct XilinxAXIDMA {
93 SysBusDevice busdev;
94 uint32_t freqhz;
95 void *dmach;
97 struct AXIStream streams[2];
101 * Helper calls to extract info from desriptors and other trivial
102 * state from regs.
104 static inline int stream_desc_sof(struct SDesc *d)
106 return d->control & SDESC_CTRL_SOF;
109 static inline int stream_desc_eof(struct SDesc *d)
111 return d->control & SDESC_CTRL_EOF;
114 static inline int stream_resetting(struct AXIStream *s)
116 return !!(s->regs[R_DMACR] & DMACR_RESET);
119 static inline int stream_running(struct AXIStream *s)
121 return s->regs[R_DMACR] & DMACR_RUNSTOP;
124 static inline int stream_halted(struct AXIStream *s)
126 return s->regs[R_DMASR] & DMASR_HALTED;
129 static inline int stream_idle(struct AXIStream *s)
131 return !!(s->regs[R_DMASR] & DMASR_IDLE);
134 static void stream_reset(struct AXIStream *s)
136 s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
137 s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshhold. */
140 /* Mapp an offset addr into a channel index. */
141 static inline int streamid_from_addr(target_phys_addr_t addr)
143 int sid;
145 sid = addr / (0x30);
146 sid &= 1;
147 return sid;
150 #ifdef DEBUG_ENET
151 static void stream_desc_show(struct SDesc *d)
153 qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
154 qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
155 qemu_log("control = %x\n", d->control);
156 qemu_log("status = %x\n", d->status);
158 #endif
160 static void stream_desc_load(struct AXIStream *s, target_phys_addr_t addr)
162 struct SDesc *d = &s->desc;
163 int i;
165 cpu_physical_memory_read(addr, (void *) d, sizeof *d);
167 /* Convert from LE into host endianness. */
168 d->buffer_address = le64_to_cpu(d->buffer_address);
169 d->nxtdesc = le64_to_cpu(d->nxtdesc);
170 d->control = le32_to_cpu(d->control);
171 d->status = le32_to_cpu(d->status);
172 for (i = 0; i < ARRAY_SIZE(d->app); i++) {
173 d->app[i] = le32_to_cpu(d->app[i]);
177 static void stream_desc_store(struct AXIStream *s, target_phys_addr_t addr)
179 struct SDesc *d = &s->desc;
180 int i;
182 /* Convert from host endianness into LE. */
183 d->buffer_address = cpu_to_le64(d->buffer_address);
184 d->nxtdesc = cpu_to_le64(d->nxtdesc);
185 d->control = cpu_to_le32(d->control);
186 d->status = cpu_to_le32(d->status);
187 for (i = 0; i < ARRAY_SIZE(d->app); i++) {
188 d->app[i] = cpu_to_le32(d->app[i]);
190 cpu_physical_memory_write(addr, (void *) d, sizeof *d);
193 static void stream_update_irq(struct AXIStream *s)
195 unsigned int pending, mask, irq;
197 pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
198 mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
200 irq = pending & mask;
202 qemu_set_irq(s->irq, !!irq);
205 static void stream_reload_complete_cnt(struct AXIStream *s)
207 unsigned int comp_th;
208 comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
209 s->complete_cnt = comp_th;
212 static void timer_hit(void *opaque)
214 struct AXIStream *s = opaque;
216 stream_reload_complete_cnt(s);
217 s->regs[R_DMASR] |= DMASR_DLY_IRQ;
218 stream_update_irq(s);
221 static void stream_complete(struct AXIStream *s)
223 unsigned int comp_delay;
225 /* Start the delayed timer. */
226 comp_delay = s->regs[R_DMACR] >> 24;
227 if (comp_delay) {
228 ptimer_stop(s->ptimer);
229 ptimer_set_count(s->ptimer, comp_delay);
230 ptimer_run(s->ptimer, 1);
233 s->complete_cnt--;
234 if (s->complete_cnt == 0) {
235 /* Raise the IOC irq. */
236 s->regs[R_DMASR] |= DMASR_IOC_IRQ;
237 stream_reload_complete_cnt(s);
241 static void stream_process_mem2s(struct AXIStream *s,
242 struct XilinxDMAConnection *dmach)
244 uint32_t prev_d;
245 unsigned char txbuf[16 * 1024];
246 unsigned int txlen;
247 uint32_t app[6];
249 if (!stream_running(s) || stream_idle(s)) {
250 return;
253 while (1) {
254 stream_desc_load(s, s->regs[R_CURDESC]);
256 if (s->desc.status & SDESC_STATUS_COMPLETE) {
257 s->regs[R_DMASR] |= DMASR_IDLE;
258 break;
261 if (stream_desc_sof(&s->desc)) {
262 s->pos = 0;
263 memcpy(app, s->desc.app, sizeof app);
266 txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
267 if ((txlen + s->pos) > sizeof txbuf) {
268 hw_error("%s: too small internal txbuf! %d\n", __func__,
269 txlen + s->pos);
272 cpu_physical_memory_read(s->desc.buffer_address,
273 txbuf + s->pos, txlen);
274 s->pos += txlen;
276 if (stream_desc_eof(&s->desc)) {
277 xlx_dma_push_to_client(dmach, txbuf, s->pos, app);
278 s->pos = 0;
279 stream_complete(s);
282 /* Update the descriptor. */
283 s->desc.status = txlen | SDESC_STATUS_COMPLETE;
284 stream_desc_store(s, s->regs[R_CURDESC]);
286 /* Advance. */
287 prev_d = s->regs[R_CURDESC];
288 s->regs[R_CURDESC] = s->desc.nxtdesc;
289 if (prev_d == s->regs[R_TAILDESC]) {
290 s->regs[R_DMASR] |= DMASR_IDLE;
291 break;
296 static void stream_process_s2mem(struct AXIStream *s,
297 unsigned char *buf, size_t len, uint32_t *app)
299 uint32_t prev_d;
300 unsigned int rxlen;
301 int pos = 0;
302 int sof = 1;
304 if (!stream_running(s) || stream_idle(s)) {
305 return;
308 while (len) {
309 stream_desc_load(s, s->regs[R_CURDESC]);
311 if (s->desc.status & SDESC_STATUS_COMPLETE) {
312 s->regs[R_DMASR] |= DMASR_IDLE;
313 break;
316 rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
317 if (rxlen > len) {
318 /* It fits. */
319 rxlen = len;
322 cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
323 len -= rxlen;
324 pos += rxlen;
326 /* Update the descriptor. */
327 if (!len) {
328 int i;
330 stream_complete(s);
331 for (i = 0; i < 5; i++) {
332 s->desc.app[i] = app[i];
334 s->desc.status |= SDESC_STATUS_EOF;
337 s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
338 s->desc.status |= SDESC_STATUS_COMPLETE;
339 stream_desc_store(s, s->regs[R_CURDESC]);
340 sof = 0;
342 /* Advance. */
343 prev_d = s->regs[R_CURDESC];
344 s->regs[R_CURDESC] = s->desc.nxtdesc;
345 if (prev_d == s->regs[R_TAILDESC]) {
346 s->regs[R_DMASR] |= DMASR_IDLE;
347 break;
352 static
353 void axidma_push(void *opaque, unsigned char *buf, size_t len, uint32_t *app)
355 struct XilinxAXIDMA *d = opaque;
356 struct AXIStream *s = &d->streams[1];
358 if (!app) {
359 hw_error("No stream app data!\n");
361 stream_process_s2mem(s, buf, len, app);
362 stream_update_irq(s);
365 static uint32_t axidma_readl(void *opaque, target_phys_addr_t addr)
367 struct XilinxAXIDMA *d = opaque;
368 struct AXIStream *s;
369 uint32_t r = 0;
370 int sid;
372 sid = streamid_from_addr(addr);
373 s = &d->streams[sid];
375 addr = addr % 0x30;
376 addr >>= 2;
377 switch (addr) {
378 case R_DMACR:
379 /* Simulate one cycles reset delay. */
380 s->regs[addr] &= ~DMACR_RESET;
381 r = s->regs[addr];
382 break;
383 case R_DMASR:
384 s->regs[addr] &= 0xffff;
385 s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
386 s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
387 r = s->regs[addr];
388 break;
389 default:
390 r = s->regs[addr];
391 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
392 __func__, sid, addr * 4, r));
393 break;
395 return r;
399 static void
400 axidma_writel(void *opaque, target_phys_addr_t addr, uint32_t value)
402 struct XilinxAXIDMA *d = opaque;
403 struct AXIStream *s;
404 int sid;
406 sid = streamid_from_addr(addr);
407 s = &d->streams[sid];
409 addr = addr % 0x30;
410 addr >>= 2;
411 switch (addr) {
412 case R_DMACR:
413 /* Tailptr mode is always on. */
414 value |= DMACR_TAILPTR_MODE;
415 /* Remember our previous reset state. */
416 value |= (s->regs[addr] & DMACR_RESET);
417 s->regs[addr] = value;
419 if (value & DMACR_RESET) {
420 stream_reset(s);
423 if ((value & 1) && !stream_resetting(s)) {
424 /* Start processing. */
425 s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
427 stream_reload_complete_cnt(s);
428 break;
430 case R_DMASR:
431 /* Mask away write to clear irq lines. */
432 value &= ~(value & DMASR_IRQ_MASK);
433 s->regs[addr] = value;
434 break;
436 case R_TAILDESC:
437 s->regs[addr] = value;
438 s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
439 if (!sid) {
440 stream_process_mem2s(s, d->dmach);
442 break;
443 default:
444 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
445 __func__, sid, addr * 4, value));
446 s->regs[addr] = value;
447 break;
449 stream_update_irq(s);
452 static CPUReadMemoryFunc * const axidma_read[] = {
453 &axidma_readl,
454 &axidma_readl,
455 &axidma_readl,
458 static CPUWriteMemoryFunc * const axidma_write[] = {
459 &axidma_writel,
460 &axidma_writel,
461 &axidma_writel,
464 static int xilinx_axidma_init(SysBusDevice *dev)
466 struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
467 int axidma_regs;
468 int i;
470 sysbus_init_irq(dev, &s->streams[1].irq);
471 sysbus_init_irq(dev, &s->streams[0].irq);
473 if (!s->dmach) {
474 hw_error("Unconnected DMA channel.\n");
477 xlx_dma_connect_dma(s->dmach, s, axidma_push);
479 axidma_regs = cpu_register_io_memory(axidma_read, axidma_write, s,
480 DEVICE_NATIVE_ENDIAN);
481 sysbus_init_mmio(dev, R_MAX * 4 * 2, axidma_regs);
483 for (i = 0; i < 2; i++) {
484 stream_reset(&s->streams[i]);
485 s->streams[i].nr = i;
486 s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
487 s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
488 ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
490 return 0;
493 static SysBusDeviceInfo axidma_info = {
494 .init = xilinx_axidma_init,
495 .qdev.name = "xilinx,axidma",
496 .qdev.size = sizeof(struct XilinxAXIDMA),
497 .qdev.props = (Property[]) {
498 DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
499 DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA, dmach),
500 DEFINE_PROP_END_OF_LIST(),
504 static void xilinx_axidma_register(void)
506 sysbus_register_withprop(&axidma_info);
509 device_init(xilinx_axidma_register)