pc: Use PC_COMPAT_* for CPUID feature compatibility
[qemu/ar7.git] / hw / dma / xilinx_axidma.c
blobcf842a3cc7b3c267ce15e2af8ea85d6fea6a0116
1 /*
2 * QEMU model of Xilinx AXI-DMA block.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "hw/sysbus.h"
26 #include "qemu/timer.h"
27 #include "hw/ptimer.h"
28 #include "qemu/log.h"
29 #include "qemu/main-loop.h"
31 #include "hw/stream.h"
33 #define D(x)
35 #define TYPE_XILINX_AXI_DMA "xlnx.axi-dma"
36 #define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream"
37 #define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream"
39 #define XILINX_AXI_DMA(obj) \
40 OBJECT_CHECK(XilinxAXIDMA, (obj), TYPE_XILINX_AXI_DMA)
42 #define XILINX_AXI_DMA_DATA_STREAM(obj) \
43 OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
44 TYPE_XILINX_AXI_DMA_DATA_STREAM)
46 #define XILINX_AXI_DMA_CONTROL_STREAM(obj) \
47 OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
48 TYPE_XILINX_AXI_DMA_CONTROL_STREAM)
50 #define R_DMACR (0x00 / 4)
51 #define R_DMASR (0x04 / 4)
52 #define R_CURDESC (0x08 / 4)
53 #define R_TAILDESC (0x10 / 4)
54 #define R_MAX (0x30 / 4)
56 #define CONTROL_PAYLOAD_WORDS 5
57 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
59 typedef struct XilinxAXIDMA XilinxAXIDMA;
60 typedef struct XilinxAXIDMAStreamSlave XilinxAXIDMAStreamSlave;
62 enum {
63 DMACR_RUNSTOP = 1,
64 DMACR_TAILPTR_MODE = 2,
65 DMACR_RESET = 4
68 enum {
69 DMASR_HALTED = 1,
70 DMASR_IDLE = 2,
71 DMASR_IOC_IRQ = 1 << 12,
72 DMASR_DLY_IRQ = 1 << 13,
74 DMASR_IRQ_MASK = 7 << 12
77 struct SDesc {
78 uint64_t nxtdesc;
79 uint64_t buffer_address;
80 uint64_t reserved;
81 uint32_t control;
82 uint32_t status;
83 uint8_t app[CONTROL_PAYLOAD_SIZE];
86 enum {
87 SDESC_CTRL_EOF = (1 << 26),
88 SDESC_CTRL_SOF = (1 << 27),
90 SDESC_CTRL_LEN_MASK = (1 << 23) - 1
93 enum {
94 SDESC_STATUS_EOF = (1 << 26),
95 SDESC_STATUS_SOF_BIT = 27,
96 SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
97 SDESC_STATUS_COMPLETE = (1 << 31)
100 struct Stream {
101 QEMUBH *bh;
102 ptimer_state *ptimer;
103 qemu_irq irq;
105 int nr;
107 struct SDesc desc;
108 int pos;
109 unsigned int complete_cnt;
110 uint32_t regs[R_MAX];
111 uint8_t app[20];
114 struct XilinxAXIDMAStreamSlave {
115 Object parent;
117 struct XilinxAXIDMA *dma;
120 struct XilinxAXIDMA {
121 SysBusDevice busdev;
122 MemoryRegion iomem;
123 uint32_t freqhz;
124 StreamSlave *tx_data_dev;
125 StreamSlave *tx_control_dev;
126 XilinxAXIDMAStreamSlave rx_data_dev;
127 XilinxAXIDMAStreamSlave rx_control_dev;
129 struct Stream streams[2];
131 StreamCanPushNotifyFn notify;
132 void *notify_opaque;
136 * Helper calls to extract info from desriptors and other trivial
137 * state from regs.
139 static inline int stream_desc_sof(struct SDesc *d)
141 return d->control & SDESC_CTRL_SOF;
144 static inline int stream_desc_eof(struct SDesc *d)
146 return d->control & SDESC_CTRL_EOF;
149 static inline int stream_resetting(struct Stream *s)
151 return !!(s->regs[R_DMACR] & DMACR_RESET);
154 static inline int stream_running(struct Stream *s)
156 return s->regs[R_DMACR] & DMACR_RUNSTOP;
159 static inline int stream_idle(struct Stream *s)
161 return !!(s->regs[R_DMASR] & DMASR_IDLE);
164 static void stream_reset(struct Stream *s)
166 s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
167 s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
170 /* Map an offset addr into a channel index. */
171 static inline int streamid_from_addr(hwaddr addr)
173 int sid;
175 sid = addr / (0x30);
176 sid &= 1;
177 return sid;
180 #ifdef DEBUG_ENET
181 static void stream_desc_show(struct SDesc *d)
183 qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
184 qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
185 qemu_log("control = %x\n", d->control);
186 qemu_log("status = %x\n", d->status);
188 #endif
190 static void stream_desc_load(struct Stream *s, hwaddr addr)
192 struct SDesc *d = &s->desc;
194 cpu_physical_memory_read(addr, d, sizeof *d);
196 /* Convert from LE into host endianness. */
197 d->buffer_address = le64_to_cpu(d->buffer_address);
198 d->nxtdesc = le64_to_cpu(d->nxtdesc);
199 d->control = le32_to_cpu(d->control);
200 d->status = le32_to_cpu(d->status);
203 static void stream_desc_store(struct Stream *s, hwaddr addr)
205 struct SDesc *d = &s->desc;
207 /* Convert from host endianness into LE. */
208 d->buffer_address = cpu_to_le64(d->buffer_address);
209 d->nxtdesc = cpu_to_le64(d->nxtdesc);
210 d->control = cpu_to_le32(d->control);
211 d->status = cpu_to_le32(d->status);
212 cpu_physical_memory_write(addr, d, sizeof *d);
215 static void stream_update_irq(struct Stream *s)
217 unsigned int pending, mask, irq;
219 pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
220 mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
222 irq = pending & mask;
224 qemu_set_irq(s->irq, !!irq);
227 static void stream_reload_complete_cnt(struct Stream *s)
229 unsigned int comp_th;
230 comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
231 s->complete_cnt = comp_th;
234 static void timer_hit(void *opaque)
236 struct Stream *s = opaque;
238 stream_reload_complete_cnt(s);
239 s->regs[R_DMASR] |= DMASR_DLY_IRQ;
240 stream_update_irq(s);
243 static void stream_complete(struct Stream *s)
245 unsigned int comp_delay;
247 /* Start the delayed timer. */
248 comp_delay = s->regs[R_DMACR] >> 24;
249 if (comp_delay) {
250 ptimer_stop(s->ptimer);
251 ptimer_set_count(s->ptimer, comp_delay);
252 ptimer_run(s->ptimer, 1);
255 s->complete_cnt--;
256 if (s->complete_cnt == 0) {
257 /* Raise the IOC irq. */
258 s->regs[R_DMASR] |= DMASR_IOC_IRQ;
259 stream_reload_complete_cnt(s);
263 static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
264 StreamSlave *tx_control_dev)
266 uint32_t prev_d;
267 unsigned char txbuf[16 * 1024];
268 unsigned int txlen;
270 if (!stream_running(s) || stream_idle(s)) {
271 return;
274 while (1) {
275 stream_desc_load(s, s->regs[R_CURDESC]);
277 if (s->desc.status & SDESC_STATUS_COMPLETE) {
278 s->regs[R_DMASR] |= DMASR_HALTED;
279 break;
282 if (stream_desc_sof(&s->desc)) {
283 s->pos = 0;
284 stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app));
287 txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
288 if ((txlen + s->pos) > sizeof txbuf) {
289 hw_error("%s: too small internal txbuf! %d\n", __func__,
290 txlen + s->pos);
293 cpu_physical_memory_read(s->desc.buffer_address,
294 txbuf + s->pos, txlen);
295 s->pos += txlen;
297 if (stream_desc_eof(&s->desc)) {
298 stream_push(tx_data_dev, txbuf, s->pos);
299 s->pos = 0;
300 stream_complete(s);
303 /* Update the descriptor. */
304 s->desc.status = txlen | SDESC_STATUS_COMPLETE;
305 stream_desc_store(s, s->regs[R_CURDESC]);
307 /* Advance. */
308 prev_d = s->regs[R_CURDESC];
309 s->regs[R_CURDESC] = s->desc.nxtdesc;
310 if (prev_d == s->regs[R_TAILDESC]) {
311 s->regs[R_DMASR] |= DMASR_IDLE;
312 break;
317 static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
318 size_t len)
320 uint32_t prev_d;
321 unsigned int rxlen;
322 size_t pos = 0;
323 int sof = 1;
325 if (!stream_running(s) || stream_idle(s)) {
326 return 0;
329 while (len) {
330 stream_desc_load(s, s->regs[R_CURDESC]);
332 if (s->desc.status & SDESC_STATUS_COMPLETE) {
333 s->regs[R_DMASR] |= DMASR_HALTED;
334 break;
337 rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
338 if (rxlen > len) {
339 /* It fits. */
340 rxlen = len;
343 cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
344 len -= rxlen;
345 pos += rxlen;
347 /* Update the descriptor. */
348 if (!len) {
349 stream_complete(s);
350 memcpy(s->desc.app, s->app, sizeof(s->desc.app));
351 s->desc.status |= SDESC_STATUS_EOF;
354 s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
355 s->desc.status |= SDESC_STATUS_COMPLETE;
356 stream_desc_store(s, s->regs[R_CURDESC]);
357 sof = 0;
359 /* Advance. */
360 prev_d = s->regs[R_CURDESC];
361 s->regs[R_CURDESC] = s->desc.nxtdesc;
362 if (prev_d == s->regs[R_TAILDESC]) {
363 s->regs[R_DMASR] |= DMASR_IDLE;
364 break;
368 return pos;
371 static void xilinx_axidma_reset(DeviceState *dev)
373 int i;
374 XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
376 for (i = 0; i < 2; i++) {
377 stream_reset(&s->streams[i]);
381 static size_t
382 xilinx_axidma_control_stream_push(StreamSlave *obj, unsigned char *buf,
383 size_t len)
385 XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj);
386 struct Stream *s = &cs->dma->streams[1];
388 if (len != CONTROL_PAYLOAD_SIZE) {
389 hw_error("AXI DMA requires %d byte control stream payload\n",
390 (int)CONTROL_PAYLOAD_SIZE);
393 memcpy(s->app, buf, len);
394 return len;
397 static bool
398 xilinx_axidma_data_stream_can_push(StreamSlave *obj,
399 StreamCanPushNotifyFn notify,
400 void *notify_opaque)
402 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
403 struct Stream *s = &ds->dma->streams[1];
405 if (!stream_running(s) || stream_idle(s)) {
406 ds->dma->notify = notify;
407 ds->dma->notify_opaque = notify_opaque;
408 return false;
411 return true;
414 static size_t
415 xilinx_axidma_data_stream_push(StreamSlave *obj, unsigned char *buf, size_t len)
417 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
418 struct Stream *s = &ds->dma->streams[1];
419 size_t ret;
421 ret = stream_process_s2mem(s, buf, len);
422 stream_update_irq(s);
423 return ret;
426 static uint64_t axidma_read(void *opaque, hwaddr addr,
427 unsigned size)
429 XilinxAXIDMA *d = opaque;
430 struct Stream *s;
431 uint32_t r = 0;
432 int sid;
434 sid = streamid_from_addr(addr);
435 s = &d->streams[sid];
437 addr = addr % 0x30;
438 addr >>= 2;
439 switch (addr) {
440 case R_DMACR:
441 /* Simulate one cycles reset delay. */
442 s->regs[addr] &= ~DMACR_RESET;
443 r = s->regs[addr];
444 break;
445 case R_DMASR:
446 s->regs[addr] &= 0xffff;
447 s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
448 s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
449 r = s->regs[addr];
450 break;
451 default:
452 r = s->regs[addr];
453 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
454 __func__, sid, addr * 4, r));
455 break;
457 return r;
461 static void axidma_write(void *opaque, hwaddr addr,
462 uint64_t value, unsigned size)
464 XilinxAXIDMA *d = opaque;
465 struct Stream *s;
466 int sid;
468 sid = streamid_from_addr(addr);
469 s = &d->streams[sid];
471 addr = addr % 0x30;
472 addr >>= 2;
473 switch (addr) {
474 case R_DMACR:
475 /* Tailptr mode is always on. */
476 value |= DMACR_TAILPTR_MODE;
477 /* Remember our previous reset state. */
478 value |= (s->regs[addr] & DMACR_RESET);
479 s->regs[addr] = value;
481 if (value & DMACR_RESET) {
482 stream_reset(s);
485 if ((value & 1) && !stream_resetting(s)) {
486 /* Start processing. */
487 s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
489 stream_reload_complete_cnt(s);
490 break;
492 case R_DMASR:
493 /* Mask away write to clear irq lines. */
494 value &= ~(value & DMASR_IRQ_MASK);
495 s->regs[addr] = value;
496 break;
498 case R_TAILDESC:
499 s->regs[addr] = value;
500 s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
501 if (!sid) {
502 stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev);
504 break;
505 default:
506 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
507 __func__, sid, addr * 4, (unsigned)value));
508 s->regs[addr] = value;
509 break;
511 if (sid == 1 && d->notify) {
512 StreamCanPushNotifyFn notifytmp = d->notify;
513 d->notify = NULL;
514 notifytmp(d->notify_opaque);
516 stream_update_irq(s);
519 static const MemoryRegionOps axidma_ops = {
520 .read = axidma_read,
521 .write = axidma_write,
522 .endianness = DEVICE_NATIVE_ENDIAN,
525 static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
527 XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
528 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev);
529 XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(
530 &s->rx_control_dev);
531 Error *local_err = NULL;
533 object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
534 (Object **)&ds->dma,
535 object_property_allow_set_link,
536 OBJ_PROP_LINK_UNREF_ON_RELEASE,
537 &local_err);
538 object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA,
539 (Object **)&cs->dma,
540 object_property_allow_set_link,
541 OBJ_PROP_LINK_UNREF_ON_RELEASE,
542 &local_err);
543 if (local_err) {
544 goto xilinx_axidma_realize_fail;
546 object_property_set_link(OBJECT(ds), OBJECT(s), "dma", &local_err);
547 object_property_set_link(OBJECT(cs), OBJECT(s), "dma", &local_err);
548 if (local_err) {
549 goto xilinx_axidma_realize_fail;
552 int i;
554 for (i = 0; i < 2; i++) {
555 struct Stream *st = &s->streams[i];
557 st->nr = i;
558 st->bh = qemu_bh_new(timer_hit, st);
559 st->ptimer = ptimer_init(st->bh);
560 ptimer_set_freq(st->ptimer, s->freqhz);
562 return;
564 xilinx_axidma_realize_fail:
565 if (!*errp) {
566 *errp = local_err;
570 static void xilinx_axidma_init(Object *obj)
572 XilinxAXIDMA *s = XILINX_AXI_DMA(obj);
573 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
575 object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
576 (Object **)&s->tx_data_dev,
577 qdev_prop_allow_set_link_before_realize,
578 OBJ_PROP_LINK_UNREF_ON_RELEASE,
579 &error_abort);
580 object_property_add_link(obj, "axistream-control-connected",
581 TYPE_STREAM_SLAVE,
582 (Object **)&s->tx_control_dev,
583 qdev_prop_allow_set_link_before_realize,
584 OBJ_PROP_LINK_UNREF_ON_RELEASE,
585 &error_abort);
587 object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
588 TYPE_XILINX_AXI_DMA_DATA_STREAM);
589 object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
590 TYPE_XILINX_AXI_DMA_CONTROL_STREAM);
591 object_property_add_child(OBJECT(s), "axistream-connected-target",
592 (Object *)&s->rx_data_dev, &error_abort);
593 object_property_add_child(OBJECT(s), "axistream-control-connected-target",
594 (Object *)&s->rx_control_dev, &error_abort);
596 sysbus_init_irq(sbd, &s->streams[0].irq);
597 sysbus_init_irq(sbd, &s->streams[1].irq);
599 memory_region_init_io(&s->iomem, obj, &axidma_ops, s,
600 "xlnx.axi-dma", R_MAX * 4 * 2);
601 sysbus_init_mmio(sbd, &s->iomem);
604 static Property axidma_properties[] = {
605 DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
606 DEFINE_PROP_END_OF_LIST(),
609 static void axidma_class_init(ObjectClass *klass, void *data)
611 DeviceClass *dc = DEVICE_CLASS(klass);
613 dc->realize = xilinx_axidma_realize,
614 dc->reset = xilinx_axidma_reset;
615 dc->props = axidma_properties;
618 static StreamSlaveClass xilinx_axidma_data_stream_class = {
619 .push = xilinx_axidma_data_stream_push,
620 .can_push = xilinx_axidma_data_stream_can_push,
623 static StreamSlaveClass xilinx_axidma_control_stream_class = {
624 .push = xilinx_axidma_control_stream_push,
627 static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
629 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
631 ssc->push = ((StreamSlaveClass *)data)->push;
632 ssc->can_push = ((StreamSlaveClass *)data)->can_push;
635 static const TypeInfo axidma_info = {
636 .name = TYPE_XILINX_AXI_DMA,
637 .parent = TYPE_SYS_BUS_DEVICE,
638 .instance_size = sizeof(XilinxAXIDMA),
639 .class_init = axidma_class_init,
640 .instance_init = xilinx_axidma_init,
643 static const TypeInfo xilinx_axidma_data_stream_info = {
644 .name = TYPE_XILINX_AXI_DMA_DATA_STREAM,
645 .parent = TYPE_OBJECT,
646 .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
647 .class_init = xilinx_axidma_stream_class_init,
648 .class_data = &xilinx_axidma_data_stream_class,
649 .interfaces = (InterfaceInfo[]) {
650 { TYPE_STREAM_SLAVE },
655 static const TypeInfo xilinx_axidma_control_stream_info = {
656 .name = TYPE_XILINX_AXI_DMA_CONTROL_STREAM,
657 .parent = TYPE_OBJECT,
658 .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
659 .class_init = xilinx_axidma_stream_class_init,
660 .class_data = &xilinx_axidma_control_stream_class,
661 .interfaces = (InterfaceInfo[]) {
662 { TYPE_STREAM_SLAVE },
667 static void xilinx_axidma_register_types(void)
669 type_register_static(&axidma_info);
670 type_register_static(&xilinx_axidma_data_stream_info);
671 type_register_static(&xilinx_axidma_control_stream_info);
674 type_init(xilinx_axidma_register_types)