Merge remote-tracking branch 'remotes/armbru/tags/pull-build-2019-07-02-v2' into...
[qemu/ar7.git] / hw / dma / xlnx_dpdma.c
blobe834f81e17896f720fa89bb85f119f4502b77ce4
1 /*
2 * xlnx_dpdma.c
4 * Copyright (C) 2015 : GreenSocs Ltd
5 * http://www.greensocs.com/ , email: info@greensocs.com
7 * Developed by :
8 * Frederic Konrad <fred.konrad@greensocs.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "qemu/log.h"
28 #include "qemu/module.h"
29 #include "hw/dma/xlnx_dpdma.h"
31 #ifndef DEBUG_DPDMA
32 #define DEBUG_DPDMA 0
33 #endif
35 #define DPRINTF(fmt, ...) do { \
36 if (DEBUG_DPDMA) { \
37 qemu_log("xlnx_dpdma: " fmt , ## __VA_ARGS__); \
38 } \
39 } while (0)
42 * Registers offset for DPDMA.
44 #define DPDMA_ERR_CTRL (0x0000)
45 #define DPDMA_ISR (0x0004 >> 2)
46 #define DPDMA_IMR (0x0008 >> 2)
47 #define DPDMA_IEN (0x000C >> 2)
48 #define DPDMA_IDS (0x0010 >> 2)
49 #define DPDMA_EISR (0x0014 >> 2)
50 #define DPDMA_EIMR (0x0018 >> 2)
51 #define DPDMA_EIEN (0x001C >> 2)
52 #define DPDMA_EIDS (0x0020 >> 2)
53 #define DPDMA_CNTL (0x0100 >> 2)
55 #define DPDMA_GBL (0x0104 >> 2)
56 #define DPDMA_GBL_TRG_CH(n) (1 << n)
57 #define DPDMA_GBL_RTRG_CH(n) (1 << 6 << n)
59 #define DPDMA_ALC0_CNTL (0x0108 >> 2)
60 #define DPDMA_ALC0_STATUS (0x010C >> 2)
61 #define DPDMA_ALC0_MAX (0x0110 >> 2)
62 #define DPDMA_ALC0_MIN (0x0114 >> 2)
63 #define DPDMA_ALC0_ACC (0x0118 >> 2)
64 #define DPDMA_ALC0_ACC_TRAN (0x011C >> 2)
65 #define DPDMA_ALC1_CNTL (0x0120 >> 2)
66 #define DPDMA_ALC1_STATUS (0x0124 >> 2)
67 #define DPDMA_ALC1_MAX (0x0128 >> 2)
68 #define DPDMA_ALC1_MIN (0x012C >> 2)
69 #define DPDMA_ALC1_ACC (0x0130 >> 2)
70 #define DPDMA_ALC1_ACC_TRAN (0x0134 >> 2)
72 #define DPDMA_DSCR_STRT_ADDRE_CH(n) ((0x0200 + n * 0x100) >> 2)
73 #define DPDMA_DSCR_STRT_ADDR_CH(n) ((0x0204 + n * 0x100) >> 2)
74 #define DPDMA_DSCR_NEXT_ADDRE_CH(n) ((0x0208 + n * 0x100) >> 2)
75 #define DPDMA_DSCR_NEXT_ADDR_CH(n) ((0x020C + n * 0x100) >> 2)
76 #define DPDMA_PYLD_CUR_ADDRE_CH(n) ((0x0210 + n * 0x100) >> 2)
77 #define DPDMA_PYLD_CUR_ADDR_CH(n) ((0x0214 + n * 0x100) >> 2)
79 #define DPDMA_CNTL_CH(n) ((0x0218 + n * 0x100) >> 2)
80 #define DPDMA_CNTL_CH_EN (1)
81 #define DPDMA_CNTL_CH_PAUSED (1 << 1)
83 #define DPDMA_STATUS_CH(n) ((0x021C + n * 0x100) >> 2)
84 #define DPDMA_STATUS_BURST_TYPE (1 << 4)
85 #define DPDMA_STATUS_MODE (1 << 5)
86 #define DPDMA_STATUS_EN_CRC (1 << 6)
87 #define DPDMA_STATUS_LAST_DSCR (1 << 7)
88 #define DPDMA_STATUS_LDSCR_FRAME (1 << 8)
89 #define DPDMA_STATUS_IGNR_DONE (1 << 9)
90 #define DPDMA_STATUS_DSCR_DONE (1 << 10)
91 #define DPDMA_STATUS_EN_DSCR_UP (1 << 11)
92 #define DPDMA_STATUS_EN_DSCR_INTR (1 << 12)
93 #define DPDMA_STATUS_PREAMBLE_OFF (13)
95 #define DPDMA_VDO_CH(n) ((0x0220 + n * 0x100) >> 2)
96 #define DPDMA_PYLD_SZ_CH(n) ((0x0224 + n * 0x100) >> 2)
97 #define DPDMA_DSCR_ID_CH(n) ((0x0228 + n * 0x100) >> 2)
100 * Descriptor control field.
102 #define CONTROL_PREAMBLE_VALUE 0xA5
104 #define DSCR_CTRL_PREAMBLE 0xFF
105 #define DSCR_CTRL_EN_DSCR_DONE_INTR (1 << 8)
106 #define DSCR_CTRL_EN_DSCR_UPDATE (1 << 9)
107 #define DSCR_CTRL_IGNORE_DONE (1 << 10)
108 #define DSCR_CTRL_AXI_BURST_TYPE (1 << 11)
109 #define DSCR_CTRL_AXCACHE (0x0F << 12)
110 #define DSCR_CTRL_AXPROT (0x2 << 16)
111 #define DSCR_CTRL_DESCRIPTOR_MODE (1 << 18)
112 #define DSCR_CTRL_LAST_DESCRIPTOR (1 << 19)
113 #define DSCR_CTRL_ENABLE_CRC (1 << 20)
114 #define DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME (1 << 21)
117 * Descriptor timestamp field.
119 #define STATUS_DONE (1 << 31)
121 #define DPDMA_FRAG_MAX_SZ (4096)
123 enum DPDMABurstType {
124 DPDMA_INCR = 0,
125 DPDMA_FIXED = 1
128 enum DPDMAMode {
129 DPDMA_CONTIGOUS = 0,
130 DPDMA_FRAGMENTED = 1
133 struct DPDMADescriptor {
134 uint32_t control;
135 uint32_t descriptor_id;
136 /* transfer size in byte. */
137 uint32_t xfer_size;
138 uint32_t line_size_stride;
139 uint32_t timestamp_lsb;
140 uint32_t timestamp_msb;
141 /* contains extension for both descriptor and source. */
142 uint32_t address_extension;
143 uint32_t next_descriptor;
144 uint32_t source_address;
145 uint32_t address_extension_23;
146 uint32_t address_extension_45;
147 uint32_t source_address2;
148 uint32_t source_address3;
149 uint32_t source_address4;
150 uint32_t source_address5;
151 uint32_t crc;
154 typedef enum DPDMABurstType DPDMABurstType;
155 typedef enum DPDMAMode DPDMAMode;
156 typedef struct DPDMADescriptor DPDMADescriptor;
158 static bool xlnx_dpdma_desc_is_last(DPDMADescriptor *desc)
160 return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0);
163 static bool xlnx_dpdma_desc_is_last_of_frame(DPDMADescriptor *desc)
165 return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0);
168 static uint64_t xlnx_dpdma_desc_get_source_address(DPDMADescriptor *desc,
169 uint8_t frag)
171 uint64_t addr = 0;
172 assert(frag < 5);
174 switch (frag) {
175 case 0:
176 addr = desc->source_address
177 + (extract32(desc->address_extension, 16, 12) << 20);
178 break;
179 case 1:
180 addr = desc->source_address2
181 + (extract32(desc->address_extension_23, 0, 12) << 8);
182 break;
183 case 2:
184 addr = desc->source_address3
185 + (extract32(desc->address_extension_23, 16, 12) << 20);
186 break;
187 case 3:
188 addr = desc->source_address4
189 + (extract32(desc->address_extension_45, 0, 12) << 8);
190 break;
191 case 4:
192 addr = desc->source_address5
193 + (extract32(desc->address_extension_45, 16, 12) << 20);
194 break;
195 default:
196 addr = 0;
197 break;
200 return addr;
203 static uint32_t xlnx_dpdma_desc_get_transfer_size(DPDMADescriptor *desc)
205 return desc->xfer_size;
208 static uint32_t xlnx_dpdma_desc_get_line_size(DPDMADescriptor *desc)
210 return extract32(desc->line_size_stride, 0, 18);
213 static uint32_t xlnx_dpdma_desc_get_line_stride(DPDMADescriptor *desc)
215 return extract32(desc->line_size_stride, 18, 14) * 16;
218 static inline bool xlnx_dpdma_desc_crc_enabled(DPDMADescriptor *desc)
220 return (desc->control & DSCR_CTRL_ENABLE_CRC) != 0;
223 static inline bool xlnx_dpdma_desc_check_crc(DPDMADescriptor *desc)
225 uint32_t *p = (uint32_t *)desc;
226 uint32_t crc = 0;
227 uint8_t i;
230 * CRC is calculated on the whole descriptor except the last 32bits word
231 * using 32bits addition.
233 for (i = 0; i < 15; i++) {
234 crc += p[i];
237 return crc == desc->crc;
240 static inline bool xlnx_dpdma_desc_completion_interrupt(DPDMADescriptor *desc)
242 return (desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0;
245 static inline bool xlnx_dpdma_desc_is_valid(DPDMADescriptor *desc)
247 return (desc->control & DSCR_CTRL_PREAMBLE) == CONTROL_PREAMBLE_VALUE;
250 static inline bool xlnx_dpdma_desc_is_contiguous(DPDMADescriptor *desc)
252 return (desc->control & DSCR_CTRL_DESCRIPTOR_MODE) == 0;
255 static inline bool xlnx_dpdma_desc_update_enabled(DPDMADescriptor *desc)
257 return (desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0;
260 static inline void xlnx_dpdma_desc_set_done(DPDMADescriptor *desc)
262 desc->timestamp_msb |= STATUS_DONE;
265 static inline bool xlnx_dpdma_desc_is_already_done(DPDMADescriptor *desc)
267 return (desc->timestamp_msb & STATUS_DONE) != 0;
270 static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc)
272 return (desc->control & DSCR_CTRL_IGNORE_DONE) != 0;
275 static const VMStateDescription vmstate_xlnx_dpdma = {
276 .name = TYPE_XLNX_DPDMA,
277 .version_id = 1,
278 .fields = (VMStateField[]) {
279 VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState,
280 XLNX_DPDMA_REG_ARRAY_SIZE),
281 VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6),
282 VMSTATE_END_OF_LIST()
286 static void xlnx_dpdma_update_irq(XlnxDPDMAState *s)
288 bool flags;
290 flags = ((s->registers[DPDMA_ISR] & (~s->registers[DPDMA_IMR]))
291 || (s->registers[DPDMA_EISR] & (~s->registers[DPDMA_EIMR])));
292 qemu_set_irq(s->irq, flags);
295 static uint64_t xlnx_dpdma_descriptor_start_address(XlnxDPDMAState *s,
296 uint8_t channel)
298 return (s->registers[DPDMA_DSCR_STRT_ADDRE_CH(channel)] << 16)
299 + s->registers[DPDMA_DSCR_STRT_ADDR_CH(channel)];
302 static uint64_t xlnx_dpdma_descriptor_next_address(XlnxDPDMAState *s,
303 uint8_t channel)
305 return ((uint64_t)s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] << 32)
306 + s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)];
309 static bool xlnx_dpdma_is_channel_enabled(XlnxDPDMAState *s,
310 uint8_t channel)
312 return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_EN) != 0;
315 static bool xlnx_dpdma_is_channel_paused(XlnxDPDMAState *s,
316 uint8_t channel)
318 return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_PAUSED) != 0;
321 static inline bool xlnx_dpdma_is_channel_retriggered(XlnxDPDMAState *s,
322 uint8_t channel)
324 /* Clear the retriggered bit after reading it. */
325 bool channel_is_retriggered = s->registers[DPDMA_GBL]
326 & DPDMA_GBL_RTRG_CH(channel);
327 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_RTRG_CH(channel);
328 return channel_is_retriggered;
331 static inline bool xlnx_dpdma_is_channel_triggered(XlnxDPDMAState *s,
332 uint8_t channel)
334 return s->registers[DPDMA_GBL] & DPDMA_GBL_TRG_CH(channel);
337 static void xlnx_dpdma_update_desc_info(XlnxDPDMAState *s, uint8_t channel,
338 DPDMADescriptor *desc)
340 s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] =
341 extract32(desc->address_extension, 0, 16);
342 s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)] = desc->next_descriptor;
343 s->registers[DPDMA_PYLD_CUR_ADDRE_CH(channel)] =
344 extract32(desc->address_extension, 16, 16);
345 s->registers[DPDMA_PYLD_CUR_ADDR_CH(channel)] = desc->source_address;
346 s->registers[DPDMA_VDO_CH(channel)] =
347 extract32(desc->line_size_stride, 18, 14)
348 + (extract32(desc->line_size_stride, 0, 18)
349 << 14);
350 s->registers[DPDMA_PYLD_SZ_CH(channel)] = desc->xfer_size;
351 s->registers[DPDMA_DSCR_ID_CH(channel)] = desc->descriptor_id;
353 /* Compute the status register with the descriptor information. */
354 s->registers[DPDMA_STATUS_CH(channel)] =
355 extract32(desc->control, 0, 8) << 13;
356 if ((desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0) {
357 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_INTR;
359 if ((desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0) {
360 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_UP;
362 if ((desc->timestamp_msb & STATUS_DONE) != 0) {
363 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_DSCR_DONE;
365 if ((desc->control & DSCR_CTRL_IGNORE_DONE) != 0) {
366 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_IGNR_DONE;
368 if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0) {
369 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LDSCR_FRAME;
371 if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0) {
372 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LAST_DSCR;
374 if ((desc->control & DSCR_CTRL_ENABLE_CRC) != 0) {
375 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_CRC;
377 if ((desc->control & DSCR_CTRL_DESCRIPTOR_MODE) != 0) {
378 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_MODE;
380 if ((desc->control & DSCR_CTRL_AXI_BURST_TYPE) != 0) {
381 s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_BURST_TYPE;
385 static void xlnx_dpdma_dump_descriptor(DPDMADescriptor *desc)
387 if (DEBUG_DPDMA) {
388 qemu_log("DUMP DESCRIPTOR:\n");
389 qemu_hexdump((char *)desc, stdout, "", sizeof(DPDMADescriptor));
393 static uint64_t xlnx_dpdma_read(void *opaque, hwaddr offset,
394 unsigned size)
396 XlnxDPDMAState *s = XLNX_DPDMA(opaque);
398 DPRINTF("read @%" HWADDR_PRIx "\n", offset);
399 offset = offset >> 2;
401 switch (offset) {
403 * Trying to read a write only register.
405 case DPDMA_GBL:
406 return 0;
407 default:
408 assert(offset <= (0xFFC >> 2));
409 return s->registers[offset];
411 return 0;
414 static void xlnx_dpdma_write(void *opaque, hwaddr offset,
415 uint64_t value, unsigned size)
417 XlnxDPDMAState *s = XLNX_DPDMA(opaque);
419 DPRINTF("write @%" HWADDR_PRIx " = %" PRIx64 "\n", offset, value);
420 offset = offset >> 2;
422 switch (offset) {
423 case DPDMA_ISR:
424 s->registers[DPDMA_ISR] &= ~value;
425 xlnx_dpdma_update_irq(s);
426 break;
427 case DPDMA_IEN:
428 s->registers[DPDMA_IMR] &= ~value;
429 break;
430 case DPDMA_IDS:
431 s->registers[DPDMA_IMR] |= value;
432 break;
433 case DPDMA_EISR:
434 s->registers[DPDMA_EISR] &= ~value;
435 xlnx_dpdma_update_irq(s);
436 break;
437 case DPDMA_EIEN:
438 s->registers[DPDMA_EIMR] &= ~value;
439 break;
440 case DPDMA_EIDS:
441 s->registers[DPDMA_EIMR] |= value;
442 break;
443 case DPDMA_IMR:
444 case DPDMA_EIMR:
445 case DPDMA_DSCR_NEXT_ADDRE_CH(0):
446 case DPDMA_DSCR_NEXT_ADDRE_CH(1):
447 case DPDMA_DSCR_NEXT_ADDRE_CH(2):
448 case DPDMA_DSCR_NEXT_ADDRE_CH(3):
449 case DPDMA_DSCR_NEXT_ADDRE_CH(4):
450 case DPDMA_DSCR_NEXT_ADDRE_CH(5):
451 case DPDMA_DSCR_NEXT_ADDR_CH(0):
452 case DPDMA_DSCR_NEXT_ADDR_CH(1):
453 case DPDMA_DSCR_NEXT_ADDR_CH(2):
454 case DPDMA_DSCR_NEXT_ADDR_CH(3):
455 case DPDMA_DSCR_NEXT_ADDR_CH(4):
456 case DPDMA_DSCR_NEXT_ADDR_CH(5):
457 case DPDMA_PYLD_CUR_ADDRE_CH(0):
458 case DPDMA_PYLD_CUR_ADDRE_CH(1):
459 case DPDMA_PYLD_CUR_ADDRE_CH(2):
460 case DPDMA_PYLD_CUR_ADDRE_CH(3):
461 case DPDMA_PYLD_CUR_ADDRE_CH(4):
462 case DPDMA_PYLD_CUR_ADDRE_CH(5):
463 case DPDMA_PYLD_CUR_ADDR_CH(0):
464 case DPDMA_PYLD_CUR_ADDR_CH(1):
465 case DPDMA_PYLD_CUR_ADDR_CH(2):
466 case DPDMA_PYLD_CUR_ADDR_CH(3):
467 case DPDMA_PYLD_CUR_ADDR_CH(4):
468 case DPDMA_PYLD_CUR_ADDR_CH(5):
469 case DPDMA_STATUS_CH(0):
470 case DPDMA_STATUS_CH(1):
471 case DPDMA_STATUS_CH(2):
472 case DPDMA_STATUS_CH(3):
473 case DPDMA_STATUS_CH(4):
474 case DPDMA_STATUS_CH(5):
475 case DPDMA_VDO_CH(0):
476 case DPDMA_VDO_CH(1):
477 case DPDMA_VDO_CH(2):
478 case DPDMA_VDO_CH(3):
479 case DPDMA_VDO_CH(4):
480 case DPDMA_VDO_CH(5):
481 case DPDMA_PYLD_SZ_CH(0):
482 case DPDMA_PYLD_SZ_CH(1):
483 case DPDMA_PYLD_SZ_CH(2):
484 case DPDMA_PYLD_SZ_CH(3):
485 case DPDMA_PYLD_SZ_CH(4):
486 case DPDMA_PYLD_SZ_CH(5):
487 case DPDMA_DSCR_ID_CH(0):
488 case DPDMA_DSCR_ID_CH(1):
489 case DPDMA_DSCR_ID_CH(2):
490 case DPDMA_DSCR_ID_CH(3):
491 case DPDMA_DSCR_ID_CH(4):
492 case DPDMA_DSCR_ID_CH(5):
494 * Trying to write to a read only register..
496 break;
497 case DPDMA_GBL:
499 * This is a write only register so it's read as zero in the read
500 * callback.
501 * We store the value anyway so we can know if the channel is
502 * enabled.
504 s->registers[offset] |= value & 0x00000FFF;
505 break;
506 case DPDMA_DSCR_STRT_ADDRE_CH(0):
507 case DPDMA_DSCR_STRT_ADDRE_CH(1):
508 case DPDMA_DSCR_STRT_ADDRE_CH(2):
509 case DPDMA_DSCR_STRT_ADDRE_CH(3):
510 case DPDMA_DSCR_STRT_ADDRE_CH(4):
511 case DPDMA_DSCR_STRT_ADDRE_CH(5):
512 value &= 0x0000FFFF;
513 s->registers[offset] = value;
514 break;
515 case DPDMA_CNTL_CH(0):
516 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(0);
517 value &= 0x3FFFFFFF;
518 s->registers[offset] = value;
519 break;
520 case DPDMA_CNTL_CH(1):
521 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(1);
522 value &= 0x3FFFFFFF;
523 s->registers[offset] = value;
524 break;
525 case DPDMA_CNTL_CH(2):
526 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(2);
527 value &= 0x3FFFFFFF;
528 s->registers[offset] = value;
529 break;
530 case DPDMA_CNTL_CH(3):
531 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(3);
532 value &= 0x3FFFFFFF;
533 s->registers[offset] = value;
534 break;
535 case DPDMA_CNTL_CH(4):
536 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(4);
537 value &= 0x3FFFFFFF;
538 s->registers[offset] = value;
539 break;
540 case DPDMA_CNTL_CH(5):
541 s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(5);
542 value &= 0x3FFFFFFF;
543 s->registers[offset] = value;
544 break;
545 default:
546 assert(offset <= (0xFFC >> 2));
547 s->registers[offset] = value;
548 break;
552 static const MemoryRegionOps dma_ops = {
553 .read = xlnx_dpdma_read,
554 .write = xlnx_dpdma_write,
555 .endianness = DEVICE_NATIVE_ENDIAN,
556 .valid = {
557 .min_access_size = 4,
558 .max_access_size = 4,
560 .impl = {
561 .min_access_size = 4,
562 .max_access_size = 4,
566 static void xlnx_dpdma_init(Object *obj)
568 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
569 XlnxDPDMAState *s = XLNX_DPDMA(obj);
571 memory_region_init_io(&s->iomem, obj, &dma_ops, s,
572 TYPE_XLNX_DPDMA, 0x1000);
573 sysbus_init_mmio(sbd, &s->iomem);
574 sysbus_init_irq(sbd, &s->irq);
577 static void xlnx_dpdma_reset(DeviceState *dev)
579 XlnxDPDMAState *s = XLNX_DPDMA(dev);
580 size_t i;
582 memset(s->registers, 0, sizeof(s->registers));
583 s->registers[DPDMA_IMR] = 0x07FFFFFF;
584 s->registers[DPDMA_EIMR] = 0xFFFFFFFF;
585 s->registers[DPDMA_ALC0_MIN] = 0x0000FFFF;
586 s->registers[DPDMA_ALC1_MIN] = 0x0000FFFF;
588 for (i = 0; i < 6; i++) {
589 s->data[i] = NULL;
590 s->operation_finished[i] = true;
594 static void xlnx_dpdma_class_init(ObjectClass *oc, void *data)
596 DeviceClass *dc = DEVICE_CLASS(oc);
598 dc->vmsd = &vmstate_xlnx_dpdma;
599 dc->reset = xlnx_dpdma_reset;
602 static const TypeInfo xlnx_dpdma_info = {
603 .name = TYPE_XLNX_DPDMA,
604 .parent = TYPE_SYS_BUS_DEVICE,
605 .instance_size = sizeof(XlnxDPDMAState),
606 .instance_init = xlnx_dpdma_init,
607 .class_init = xlnx_dpdma_class_init,
610 static void xlnx_dpdma_register_types(void)
612 type_register_static(&xlnx_dpdma_info);
615 size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
616 bool one_desc)
618 uint64_t desc_addr;
619 uint64_t source_addr[6];
620 DPDMADescriptor desc;
621 bool done = false;
622 size_t ptr = 0;
624 assert(channel <= 5);
626 DPRINTF("start dpdma channel 0x%" PRIX8 "\n", channel);
628 if (!xlnx_dpdma_is_channel_triggered(s, channel)) {
629 DPRINTF("Channel isn't triggered..\n");
630 return 0;
633 if (!xlnx_dpdma_is_channel_enabled(s, channel)) {
634 DPRINTF("Channel isn't enabled..\n");
635 return 0;
638 if (xlnx_dpdma_is_channel_paused(s, channel)) {
639 DPRINTF("Channel is paused..\n");
640 return 0;
643 do {
644 if ((s->operation_finished[channel])
645 || xlnx_dpdma_is_channel_retriggered(s, channel)) {
646 desc_addr = xlnx_dpdma_descriptor_start_address(s, channel);
647 s->operation_finished[channel] = false;
648 } else {
649 desc_addr = xlnx_dpdma_descriptor_next_address(s, channel);
652 if (dma_memory_read(&address_space_memory, desc_addr, &desc,
653 sizeof(DPDMADescriptor))) {
654 s->registers[DPDMA_EISR] |= ((1 << 1) << channel);
655 xlnx_dpdma_update_irq(s);
656 s->operation_finished[channel] = true;
657 DPRINTF("Can't get the descriptor.\n");
658 break;
661 xlnx_dpdma_update_desc_info(s, channel, &desc);
663 #ifdef DEBUG_DPDMA
664 xlnx_dpdma_dump_descriptor(&desc);
665 #endif
667 DPRINTF("location of the descriptor: %" PRIx64 "\n", desc_addr);
668 if (!xlnx_dpdma_desc_is_valid(&desc)) {
669 s->registers[DPDMA_EISR] |= ((1 << 7) << channel);
670 xlnx_dpdma_update_irq(s);
671 s->operation_finished[channel] = true;
672 DPRINTF("Invalid descriptor..\n");
673 break;
676 if (xlnx_dpdma_desc_crc_enabled(&desc)
677 && !xlnx_dpdma_desc_check_crc(&desc)) {
678 s->registers[DPDMA_EISR] |= ((1 << 13) << channel);
679 xlnx_dpdma_update_irq(s);
680 s->operation_finished[channel] = true;
681 DPRINTF("Bad CRC for descriptor..\n");
682 break;
685 if (xlnx_dpdma_desc_is_already_done(&desc)
686 && !xlnx_dpdma_desc_ignore_done_bit(&desc)) {
687 /* We are trying to process an already processed descriptor. */
688 s->registers[DPDMA_EISR] |= ((1 << 25) << channel);
689 xlnx_dpdma_update_irq(s);
690 s->operation_finished[channel] = true;
691 DPRINTF("Already processed descriptor..\n");
692 break;
695 done = xlnx_dpdma_desc_is_last(&desc)
696 || xlnx_dpdma_desc_is_last_of_frame(&desc);
698 s->operation_finished[channel] = done;
699 if (s->data[channel]) {
700 int64_t transfer_len = xlnx_dpdma_desc_get_transfer_size(&desc);
701 uint32_t line_size = xlnx_dpdma_desc_get_line_size(&desc);
702 uint32_t line_stride = xlnx_dpdma_desc_get_line_stride(&desc);
703 if (xlnx_dpdma_desc_is_contiguous(&desc)) {
704 source_addr[0] = xlnx_dpdma_desc_get_source_address(&desc, 0);
705 while (transfer_len != 0) {
706 if (dma_memory_read(&address_space_memory,
707 source_addr[0],
708 &s->data[channel][ptr],
709 line_size)) {
710 s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
711 xlnx_dpdma_update_irq(s);
712 DPRINTF("Can't get data.\n");
713 break;
715 ptr += line_size;
716 transfer_len -= line_size;
717 source_addr[0] += line_stride;
719 } else {
720 DPRINTF("Source address:\n");
721 int frag;
722 for (frag = 0; frag < 5; frag++) {
723 source_addr[frag] =
724 xlnx_dpdma_desc_get_source_address(&desc, frag);
725 DPRINTF("Fragment %u: %" PRIx64 "\n", frag + 1,
726 source_addr[frag]);
729 frag = 0;
730 while ((transfer_len < 0) && (frag < 5)) {
731 size_t fragment_len = DPDMA_FRAG_MAX_SZ
732 - (source_addr[frag] % DPDMA_FRAG_MAX_SZ);
734 if (dma_memory_read(&address_space_memory,
735 source_addr[frag],
736 &(s->data[channel][ptr]),
737 fragment_len)) {
738 s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
739 xlnx_dpdma_update_irq(s);
740 DPRINTF("Can't get data.\n");
741 break;
743 ptr += fragment_len;
744 transfer_len -= fragment_len;
745 frag += 1;
750 if (xlnx_dpdma_desc_update_enabled(&desc)) {
751 /* The descriptor need to be updated when it's completed. */
752 DPRINTF("update the descriptor with the done flag set.\n");
753 xlnx_dpdma_desc_set_done(&desc);
754 dma_memory_write(&address_space_memory, desc_addr, &desc,
755 sizeof(DPDMADescriptor));
758 if (xlnx_dpdma_desc_completion_interrupt(&desc)) {
759 DPRINTF("completion interrupt enabled!\n");
760 s->registers[DPDMA_ISR] |= (1 << channel);
761 xlnx_dpdma_update_irq(s);
764 } while (!done && !one_desc);
766 return ptr;
769 void xlnx_dpdma_set_host_data_location(XlnxDPDMAState *s, uint8_t channel,
770 void *p)
772 if (!s) {
773 qemu_log_mask(LOG_UNIMP, "DPDMA client not attached to valid DPDMA"
774 " instance\n");
775 return;
778 assert(channel <= 5);
779 s->data[channel] = p;
782 void xlnx_dpdma_trigger_vsync_irq(XlnxDPDMAState *s)
784 s->registers[DPDMA_ISR] |= (1 << 27);
785 xlnx_dpdma_update_irq(s);
788 type_init(xlnx_dpdma_register_types)