2 * QEMU model of Xilinx AXI-DMA block.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu-char.h"
27 #include "qemu-timer.h"
29 #include "qdev-addr.h"
31 #include "xilinx_axidma.h"
35 #define R_DMACR (0x00 / 4)
36 #define R_DMASR (0x04 / 4)
37 #define R_CURDESC (0x08 / 4)
38 #define R_TAILDESC (0x10 / 4)
39 #define R_MAX (0x30 / 4)
43 DMACR_TAILPTR_MODE
= 2,
50 DMASR_IOC_IRQ
= 1 << 12,
51 DMASR_DLY_IRQ
= 1 << 13,
53 DMASR_IRQ_MASK
= 7 << 12
58 uint64_t buffer_address
;
66 SDESC_CTRL_EOF
= (1 << 26),
67 SDESC_CTRL_SOF
= (1 << 27),
69 SDESC_CTRL_LEN_MASK
= (1 << 23) - 1
73 SDESC_STATUS_EOF
= (1 << 26),
74 SDESC_STATUS_SOF_BIT
= 27,
75 SDESC_STATUS_SOF
= (1 << SDESC_STATUS_SOF_BIT
),
76 SDESC_STATUS_COMPLETE
= (1 << 31)
88 unsigned int complete_cnt
;
98 struct AXIStream streams
[2];
102 * Helper calls to extract info from desriptors and other trivial
105 static inline int stream_desc_sof(struct SDesc
*d
)
107 return d
->control
& SDESC_CTRL_SOF
;
110 static inline int stream_desc_eof(struct SDesc
*d
)
112 return d
->control
& SDESC_CTRL_EOF
;
115 static inline int stream_resetting(struct AXIStream
*s
)
117 return !!(s
->regs
[R_DMACR
] & DMACR_RESET
);
120 static inline int stream_running(struct AXIStream
*s
)
122 return s
->regs
[R_DMACR
] & DMACR_RUNSTOP
;
125 static inline int stream_halted(struct AXIStream
*s
)
127 return s
->regs
[R_DMASR
] & DMASR_HALTED
;
130 static inline int stream_idle(struct AXIStream
*s
)
132 return !!(s
->regs
[R_DMASR
] & DMASR_IDLE
);
135 static void stream_reset(struct AXIStream
*s
)
137 s
->regs
[R_DMASR
] = DMASR_HALTED
; /* starts up halted. */
138 s
->regs
[R_DMACR
] = 1 << 16; /* Starts with one in compl threshold. */
141 /* Map an offset addr into a channel index. */
142 static inline int streamid_from_addr(target_phys_addr_t addr
)
152 static void stream_desc_show(struct SDesc
*d
)
154 qemu_log("buffer_addr = " PRIx64
"\n", d
->buffer_address
);
155 qemu_log("nxtdesc = " PRIx64
"\n", d
->nxtdesc
);
156 qemu_log("control = %x\n", d
->control
);
157 qemu_log("status = %x\n", d
->status
);
161 static void stream_desc_load(struct AXIStream
*s
, target_phys_addr_t addr
)
163 struct SDesc
*d
= &s
->desc
;
166 cpu_physical_memory_read(addr
, (void *) d
, sizeof *d
);
168 /* Convert from LE into host endianness. */
169 d
->buffer_address
= le64_to_cpu(d
->buffer_address
);
170 d
->nxtdesc
= le64_to_cpu(d
->nxtdesc
);
171 d
->control
= le32_to_cpu(d
->control
);
172 d
->status
= le32_to_cpu(d
->status
);
173 for (i
= 0; i
< ARRAY_SIZE(d
->app
); i
++) {
174 d
->app
[i
] = le32_to_cpu(d
->app
[i
]);
178 static void stream_desc_store(struct AXIStream
*s
, target_phys_addr_t addr
)
180 struct SDesc
*d
= &s
->desc
;
183 /* Convert from host endianness into LE. */
184 d
->buffer_address
= cpu_to_le64(d
->buffer_address
);
185 d
->nxtdesc
= cpu_to_le64(d
->nxtdesc
);
186 d
->control
= cpu_to_le32(d
->control
);
187 d
->status
= cpu_to_le32(d
->status
);
188 for (i
= 0; i
< ARRAY_SIZE(d
->app
); i
++) {
189 d
->app
[i
] = cpu_to_le32(d
->app
[i
]);
191 cpu_physical_memory_write(addr
, (void *) d
, sizeof *d
);
194 static void stream_update_irq(struct AXIStream
*s
)
196 unsigned int pending
, mask
, irq
;
198 pending
= s
->regs
[R_DMASR
] & DMASR_IRQ_MASK
;
199 mask
= s
->regs
[R_DMACR
] & DMASR_IRQ_MASK
;
201 irq
= pending
& mask
;
203 qemu_set_irq(s
->irq
, !!irq
);
206 static void stream_reload_complete_cnt(struct AXIStream
*s
)
208 unsigned int comp_th
;
209 comp_th
= (s
->regs
[R_DMACR
] >> 16) & 0xff;
210 s
->complete_cnt
= comp_th
;
213 static void timer_hit(void *opaque
)
215 struct AXIStream
*s
= opaque
;
217 stream_reload_complete_cnt(s
);
218 s
->regs
[R_DMASR
] |= DMASR_DLY_IRQ
;
219 stream_update_irq(s
);
222 static void stream_complete(struct AXIStream
*s
)
224 unsigned int comp_delay
;
226 /* Start the delayed timer. */
227 comp_delay
= s
->regs
[R_DMACR
] >> 24;
229 ptimer_stop(s
->ptimer
);
230 ptimer_set_count(s
->ptimer
, comp_delay
);
231 ptimer_run(s
->ptimer
, 1);
235 if (s
->complete_cnt
== 0) {
236 /* Raise the IOC irq. */
237 s
->regs
[R_DMASR
] |= DMASR_IOC_IRQ
;
238 stream_reload_complete_cnt(s
);
242 static void stream_process_mem2s(struct AXIStream
*s
,
243 struct XilinxDMAConnection
*dmach
)
246 unsigned char txbuf
[16 * 1024];
250 if (!stream_running(s
) || stream_idle(s
)) {
255 stream_desc_load(s
, s
->regs
[R_CURDESC
]);
257 if (s
->desc
.status
& SDESC_STATUS_COMPLETE
) {
258 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
262 if (stream_desc_sof(&s
->desc
)) {
264 memcpy(app
, s
->desc
.app
, sizeof app
);
267 txlen
= s
->desc
.control
& SDESC_CTRL_LEN_MASK
;
268 if ((txlen
+ s
->pos
) > sizeof txbuf
) {
269 hw_error("%s: too small internal txbuf! %d\n", __func__
,
273 cpu_physical_memory_read(s
->desc
.buffer_address
,
274 txbuf
+ s
->pos
, txlen
);
277 if (stream_desc_eof(&s
->desc
)) {
278 xlx_dma_push_to_client(dmach
, txbuf
, s
->pos
, app
);
283 /* Update the descriptor. */
284 s
->desc
.status
= txlen
| SDESC_STATUS_COMPLETE
;
285 stream_desc_store(s
, s
->regs
[R_CURDESC
]);
288 prev_d
= s
->regs
[R_CURDESC
];
289 s
->regs
[R_CURDESC
] = s
->desc
.nxtdesc
;
290 if (prev_d
== s
->regs
[R_TAILDESC
]) {
291 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
297 static void stream_process_s2mem(struct AXIStream
*s
,
298 unsigned char *buf
, size_t len
, uint32_t *app
)
305 if (!stream_running(s
) || stream_idle(s
)) {
310 stream_desc_load(s
, s
->regs
[R_CURDESC
]);
312 if (s
->desc
.status
& SDESC_STATUS_COMPLETE
) {
313 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
317 rxlen
= s
->desc
.control
& SDESC_CTRL_LEN_MASK
;
323 cpu_physical_memory_write(s
->desc
.buffer_address
, buf
+ pos
, rxlen
);
327 /* Update the descriptor. */
332 for (i
= 0; i
< 5; i
++) {
333 s
->desc
.app
[i
] = app
[i
];
335 s
->desc
.status
|= SDESC_STATUS_EOF
;
338 s
->desc
.status
|= sof
<< SDESC_STATUS_SOF_BIT
;
339 s
->desc
.status
|= SDESC_STATUS_COMPLETE
;
340 stream_desc_store(s
, s
->regs
[R_CURDESC
]);
344 prev_d
= s
->regs
[R_CURDESC
];
345 s
->regs
[R_CURDESC
] = s
->desc
.nxtdesc
;
346 if (prev_d
== s
->regs
[R_TAILDESC
]) {
347 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
354 void axidma_push(void *opaque
, unsigned char *buf
, size_t len
, uint32_t *app
)
356 struct XilinxAXIDMA
*d
= opaque
;
357 struct AXIStream
*s
= &d
->streams
[1];
360 hw_error("No stream app data!\n");
362 stream_process_s2mem(s
, buf
, len
, app
);
363 stream_update_irq(s
);
366 static uint64_t axidma_read(void *opaque
, target_phys_addr_t addr
,
369 struct XilinxAXIDMA
*d
= opaque
;
374 sid
= streamid_from_addr(addr
);
375 s
= &d
->streams
[sid
];
381 /* Simulate one cycles reset delay. */
382 s
->regs
[addr
] &= ~DMACR_RESET
;
386 s
->regs
[addr
] &= 0xffff;
387 s
->regs
[addr
] |= (s
->complete_cnt
& 0xff) << 16;
388 s
->regs
[addr
] |= (ptimer_get_count(s
->ptimer
) & 0xff) << 24;
393 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx
" v=%x\n",
394 __func__
, sid
, addr
* 4, r
));
401 static void axidma_write(void *opaque
, target_phys_addr_t addr
,
402 uint64_t value
, unsigned size
)
404 struct XilinxAXIDMA
*d
= opaque
;
408 sid
= streamid_from_addr(addr
);
409 s
= &d
->streams
[sid
];
415 /* Tailptr mode is always on. */
416 value
|= DMACR_TAILPTR_MODE
;
417 /* Remember our previous reset state. */
418 value
|= (s
->regs
[addr
] & DMACR_RESET
);
419 s
->regs
[addr
] = value
;
421 if (value
& DMACR_RESET
) {
425 if ((value
& 1) && !stream_resetting(s
)) {
426 /* Start processing. */
427 s
->regs
[R_DMASR
] &= ~(DMASR_HALTED
| DMASR_IDLE
);
429 stream_reload_complete_cnt(s
);
433 /* Mask away write to clear irq lines. */
434 value
&= ~(value
& DMASR_IRQ_MASK
);
435 s
->regs
[addr
] = value
;
439 s
->regs
[addr
] = value
;
440 s
->regs
[R_DMASR
] &= ~DMASR_IDLE
; /* Not idle. */
442 stream_process_mem2s(s
, d
->dmach
);
446 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx
" v=%x\n",
447 __func__
, sid
, addr
* 4, value
));
448 s
->regs
[addr
] = value
;
451 stream_update_irq(s
);
454 static const MemoryRegionOps axidma_ops
= {
456 .write
= axidma_write
,
457 .endianness
= DEVICE_NATIVE_ENDIAN
,
460 static int xilinx_axidma_init(SysBusDevice
*dev
)
462 struct XilinxAXIDMA
*s
= FROM_SYSBUS(typeof(*s
), dev
);
465 sysbus_init_irq(dev
, &s
->streams
[1].irq
);
466 sysbus_init_irq(dev
, &s
->streams
[0].irq
);
469 hw_error("Unconnected DMA channel.\n");
472 xlx_dma_connect_dma(s
->dmach
, s
, axidma_push
);
474 memory_region_init_io(&s
->iomem
, &axidma_ops
, s
,
475 "axidma", R_MAX
* 4 * 2);
476 sysbus_init_mmio(dev
, &s
->iomem
);
478 for (i
= 0; i
< 2; i
++) {
479 stream_reset(&s
->streams
[i
]);
480 s
->streams
[i
].nr
= i
;
481 s
->streams
[i
].bh
= qemu_bh_new(timer_hit
, &s
->streams
[i
]);
482 s
->streams
[i
].ptimer
= ptimer_init(s
->streams
[i
].bh
);
483 ptimer_set_freq(s
->streams
[i
].ptimer
, s
->freqhz
);
488 static SysBusDeviceInfo axidma_info
= {
489 .init
= xilinx_axidma_init
,
490 .qdev
.name
= "xilinx,axidma",
491 .qdev
.size
= sizeof(struct XilinxAXIDMA
),
492 .qdev
.props
= (Property
[]) {
493 DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA
, freqhz
, 50000000),
494 DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA
, dmach
),
495 DEFINE_PROP_END_OF_LIST(),
499 static void xilinx_axidma_register(void)
501 sysbus_register_withprop(&axidma_info
);
504 device_init(xilinx_axidma_register
)