2 * QEMU model of Xilinx AXI-DMA block.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu-char.h"
27 #include "qemu-timer.h"
29 #include "qdev-addr.h"
31 #include "xilinx_axidma.h"
35 #define R_DMACR (0x00 / 4)
36 #define R_DMASR (0x04 / 4)
37 #define R_CURDESC (0x08 / 4)
38 #define R_TAILDESC (0x10 / 4)
39 #define R_MAX (0x30 / 4)
43 DMACR_TAILPTR_MODE
= 2,
50 DMASR_IOC_IRQ
= 1 << 12,
51 DMASR_DLY_IRQ
= 1 << 13,
53 DMASR_IRQ_MASK
= 7 << 12
58 uint64_t buffer_address
;
66 SDESC_CTRL_EOF
= (1 << 26),
67 SDESC_CTRL_SOF
= (1 << 27),
69 SDESC_CTRL_LEN_MASK
= (1 << 23) - 1
73 SDESC_STATUS_EOF
= (1 << 26),
74 SDESC_STATUS_SOF_BIT
= 27,
75 SDESC_STATUS_SOF
= (1 << SDESC_STATUS_SOF_BIT
),
76 SDESC_STATUS_COMPLETE
= (1 << 31)
88 unsigned int complete_cnt
;
97 struct AXIStream streams
[2];
101 * Helper calls to extract info from desriptors and other trivial
104 static inline int stream_desc_sof(struct SDesc
*d
)
106 return d
->control
& SDESC_CTRL_SOF
;
109 static inline int stream_desc_eof(struct SDesc
*d
)
111 return d
->control
& SDESC_CTRL_EOF
;
114 static inline int stream_resetting(struct AXIStream
*s
)
116 return !!(s
->regs
[R_DMACR
] & DMACR_RESET
);
119 static inline int stream_running(struct AXIStream
*s
)
121 return s
->regs
[R_DMACR
] & DMACR_RUNSTOP
;
124 static inline int stream_halted(struct AXIStream
*s
)
126 return s
->regs
[R_DMASR
] & DMASR_HALTED
;
129 static inline int stream_idle(struct AXIStream
*s
)
131 return !!(s
->regs
[R_DMASR
] & DMASR_IDLE
);
134 static void stream_reset(struct AXIStream
*s
)
136 s
->regs
[R_DMASR
] = DMASR_HALTED
; /* starts up halted. */
137 s
->regs
[R_DMACR
] = 1 << 16; /* Starts with one in compl threshhold. */
140 /* Mapp an offset addr into a channel index. */
141 static inline int streamid_from_addr(target_phys_addr_t addr
)
151 static void stream_desc_show(struct SDesc
*d
)
153 qemu_log("buffer_addr = " PRIx64
"\n", d
->buffer_address
);
154 qemu_log("nxtdesc = " PRIx64
"\n", d
->nxtdesc
);
155 qemu_log("control = %x\n", d
->control
);
156 qemu_log("status = %x\n", d
->status
);
160 static void stream_desc_load(struct AXIStream
*s
, target_phys_addr_t addr
)
162 struct SDesc
*d
= &s
->desc
;
165 cpu_physical_memory_read(addr
, (void *) d
, sizeof *d
);
167 /* Convert from LE into host endianness. */
168 d
->buffer_address
= le64_to_cpu(d
->buffer_address
);
169 d
->nxtdesc
= le64_to_cpu(d
->nxtdesc
);
170 d
->control
= le32_to_cpu(d
->control
);
171 d
->status
= le32_to_cpu(d
->status
);
172 for (i
= 0; i
< ARRAY_SIZE(d
->app
); i
++) {
173 d
->app
[i
] = le32_to_cpu(d
->app
[i
]);
177 static void stream_desc_store(struct AXIStream
*s
, target_phys_addr_t addr
)
179 struct SDesc
*d
= &s
->desc
;
182 /* Convert from host endianness into LE. */
183 d
->buffer_address
= cpu_to_le64(d
->buffer_address
);
184 d
->nxtdesc
= cpu_to_le64(d
->nxtdesc
);
185 d
->control
= cpu_to_le32(d
->control
);
186 d
->status
= cpu_to_le32(d
->status
);
187 for (i
= 0; i
< ARRAY_SIZE(d
->app
); i
++) {
188 d
->app
[i
] = cpu_to_le32(d
->app
[i
]);
190 cpu_physical_memory_write(addr
, (void *) d
, sizeof *d
);
193 static void stream_update_irq(struct AXIStream
*s
)
195 unsigned int pending
, mask
, irq
;
197 pending
= s
->regs
[R_DMASR
] & DMASR_IRQ_MASK
;
198 mask
= s
->regs
[R_DMACR
] & DMASR_IRQ_MASK
;
200 irq
= pending
& mask
;
202 qemu_set_irq(s
->irq
, !!irq
);
205 static void stream_reload_complete_cnt(struct AXIStream
*s
)
207 unsigned int comp_th
;
208 comp_th
= (s
->regs
[R_DMACR
] >> 16) & 0xff;
209 s
->complete_cnt
= comp_th
;
212 static void timer_hit(void *opaque
)
214 struct AXIStream
*s
= opaque
;
216 stream_reload_complete_cnt(s
);
217 s
->regs
[R_DMASR
] |= DMASR_DLY_IRQ
;
218 stream_update_irq(s
);
221 static void stream_complete(struct AXIStream
*s
)
223 unsigned int comp_delay
;
225 /* Start the delayed timer. */
226 comp_delay
= s
->regs
[R_DMACR
] >> 24;
228 ptimer_stop(s
->ptimer
);
229 ptimer_set_count(s
->ptimer
, comp_delay
);
230 ptimer_run(s
->ptimer
, 1);
234 if (s
->complete_cnt
== 0) {
235 /* Raise the IOC irq. */
236 s
->regs
[R_DMASR
] |= DMASR_IOC_IRQ
;
237 stream_reload_complete_cnt(s
);
241 static void stream_process_mem2s(struct AXIStream
*s
,
242 struct XilinxDMAConnection
*dmach
)
245 unsigned char txbuf
[16 * 1024];
249 if (!stream_running(s
) || stream_idle(s
)) {
254 stream_desc_load(s
, s
->regs
[R_CURDESC
]);
256 if (s
->desc
.status
& SDESC_STATUS_COMPLETE
) {
257 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
261 if (stream_desc_sof(&s
->desc
)) {
263 memcpy(app
, s
->desc
.app
, sizeof app
);
266 txlen
= s
->desc
.control
& SDESC_CTRL_LEN_MASK
;
267 if ((txlen
+ s
->pos
) > sizeof txbuf
) {
268 hw_error("%s: too small internal txbuf! %d\n", __func__
,
272 cpu_physical_memory_read(s
->desc
.buffer_address
,
273 txbuf
+ s
->pos
, txlen
);
276 if (stream_desc_eof(&s
->desc
)) {
277 xlx_dma_push_to_client(dmach
, txbuf
, s
->pos
, app
);
282 /* Update the descriptor. */
283 s
->desc
.status
= txlen
| SDESC_STATUS_COMPLETE
;
284 stream_desc_store(s
, s
->regs
[R_CURDESC
]);
287 prev_d
= s
->regs
[R_CURDESC
];
288 s
->regs
[R_CURDESC
] = s
->desc
.nxtdesc
;
289 if (prev_d
== s
->regs
[R_TAILDESC
]) {
290 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
296 static void stream_process_s2mem(struct AXIStream
*s
,
297 unsigned char *buf
, size_t len
, uint32_t *app
)
304 if (!stream_running(s
) || stream_idle(s
)) {
309 stream_desc_load(s
, s
->regs
[R_CURDESC
]);
311 if (s
->desc
.status
& SDESC_STATUS_COMPLETE
) {
312 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
316 rxlen
= s
->desc
.control
& SDESC_CTRL_LEN_MASK
;
322 cpu_physical_memory_write(s
->desc
.buffer_address
, buf
+ pos
, rxlen
);
326 /* Update the descriptor. */
331 for (i
= 0; i
< 5; i
++) {
332 s
->desc
.app
[i
] = app
[i
];
334 s
->desc
.status
|= SDESC_STATUS_EOF
;
337 s
->desc
.status
|= sof
<< SDESC_STATUS_SOF_BIT
;
338 s
->desc
.status
|= SDESC_STATUS_COMPLETE
;
339 stream_desc_store(s
, s
->regs
[R_CURDESC
]);
343 prev_d
= s
->regs
[R_CURDESC
];
344 s
->regs
[R_CURDESC
] = s
->desc
.nxtdesc
;
345 if (prev_d
== s
->regs
[R_TAILDESC
]) {
346 s
->regs
[R_DMASR
] |= DMASR_IDLE
;
353 void axidma_push(void *opaque
, unsigned char *buf
, size_t len
, uint32_t *app
)
355 struct XilinxAXIDMA
*d
= opaque
;
356 struct AXIStream
*s
= &d
->streams
[1];
359 hw_error("No stream app data!\n");
361 stream_process_s2mem(s
, buf
, len
, app
);
362 stream_update_irq(s
);
365 static uint32_t axidma_readl(void *opaque
, target_phys_addr_t addr
)
367 struct XilinxAXIDMA
*d
= opaque
;
372 sid
= streamid_from_addr(addr
);
373 s
= &d
->streams
[sid
];
379 /* Simulate one cycles reset delay. */
380 s
->regs
[addr
] &= ~DMACR_RESET
;
384 s
->regs
[addr
] &= 0xffff;
385 s
->regs
[addr
] |= (s
->complete_cnt
& 0xff) << 16;
386 s
->regs
[addr
] |= (ptimer_get_count(s
->ptimer
) & 0xff) << 24;
391 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx
" v=%x\n",
392 __func__
, sid
, addr
* 4, r
));
400 axidma_writel(void *opaque
, target_phys_addr_t addr
, uint32_t value
)
402 struct XilinxAXIDMA
*d
= opaque
;
406 sid
= streamid_from_addr(addr
);
407 s
= &d
->streams
[sid
];
413 /* Tailptr mode is always on. */
414 value
|= DMACR_TAILPTR_MODE
;
415 /* Remember our previous reset state. */
416 value
|= (s
->regs
[addr
] & DMACR_RESET
);
417 s
->regs
[addr
] = value
;
419 if (value
& DMACR_RESET
) {
423 if ((value
& 1) && !stream_resetting(s
)) {
424 /* Start processing. */
425 s
->regs
[R_DMASR
] &= ~(DMASR_HALTED
| DMASR_IDLE
);
427 stream_reload_complete_cnt(s
);
431 /* Mask away write to clear irq lines. */
432 value
&= ~(value
& DMASR_IRQ_MASK
);
433 s
->regs
[addr
] = value
;
437 s
->regs
[addr
] = value
;
438 s
->regs
[R_DMASR
] &= ~DMASR_IDLE
; /* Not idle. */
440 stream_process_mem2s(s
, d
->dmach
);
444 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx
" v=%x\n",
445 __func__
, sid
, addr
* 4, value
));
446 s
->regs
[addr
] = value
;
449 stream_update_irq(s
);
452 static CPUReadMemoryFunc
* const axidma_read
[] = {
458 static CPUWriteMemoryFunc
* const axidma_write
[] = {
464 static int xilinx_axidma_init(SysBusDevice
*dev
)
466 struct XilinxAXIDMA
*s
= FROM_SYSBUS(typeof(*s
), dev
);
470 sysbus_init_irq(dev
, &s
->streams
[1].irq
);
471 sysbus_init_irq(dev
, &s
->streams
[0].irq
);
474 hw_error("Unconnected DMA channel.\n");
477 xlx_dma_connect_dma(s
->dmach
, s
, axidma_push
);
479 axidma_regs
= cpu_register_io_memory(axidma_read
, axidma_write
, s
,
480 DEVICE_NATIVE_ENDIAN
);
481 sysbus_init_mmio(dev
, R_MAX
* 4 * 2, axidma_regs
);
483 for (i
= 0; i
< 2; i
++) {
484 stream_reset(&s
->streams
[i
]);
485 s
->streams
[i
].nr
= i
;
486 s
->streams
[i
].bh
= qemu_bh_new(timer_hit
, &s
->streams
[i
]);
487 s
->streams
[i
].ptimer
= ptimer_init(s
->streams
[i
].bh
);
488 ptimer_set_freq(s
->streams
[i
].ptimer
, s
->freqhz
);
493 static SysBusDeviceInfo axidma_info
= {
494 .init
= xilinx_axidma_init
,
495 .qdev
.name
= "xilinx,axidma",
496 .qdev
.size
= sizeof(struct XilinxAXIDMA
),
497 .qdev
.props
= (Property
[]) {
498 DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA
, freqhz
, 50000000),
499 DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA
, dmach
),
500 DEFINE_PROP_END_OF_LIST(),
504 static void xilinx_axidma_register(void)
506 sysbus_register_withprop(&axidma_info
);
509 device_init(xilinx_axidma_register
)