s390x: refine pci dependencies
[qemu/ar7.git] / hw / misc / macio / mac_dbdma.c
blob15452b9a28331e2abfea7f6c1cd7ead43f38ccf8
1 /*
2 * PowerMac descriptor-based DMA emulation
4 * Copyright (c) 2005-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2009 Laurent Vivier
8 * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h
10 * Definitions for using the Apple Descriptor-Based DMA controller
11 * in Power Macintosh computers.
13 * Copyright (C) 1996 Paul Mackerras.
15 * some parts from mol 0.9.71
17 * Descriptor based DMA emulation
19 * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
21 * Permission is hereby granted, free of charge, to any person obtaining a copy
22 * of this software and associated documentation files (the "Software"), to deal
23 * in the Software without restriction, including without limitation the rights
24 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25 * copies of the Software, and to permit persons to whom the Software is
26 * furnished to do so, subject to the following conditions:
28 * The above copyright notice and this permission notice shall be included in
29 * all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37 * THE SOFTWARE.
39 #include "qemu/osdep.h"
40 #include "hw/hw.h"
41 #include "hw/isa/isa.h"
42 #include "hw/ppc/mac_dbdma.h"
43 #include "qemu/main-loop.h"
44 #include "qemu/log.h"
45 #include "sysemu/dma.h"
47 /* debug DBDMA */
48 #define DEBUG_DBDMA 0
49 #define DEBUG_DBDMA_CHANMASK ((1ull << DBDMA_CHANNELS) - 1)
51 #define DBDMA_DPRINTF(fmt, ...) do { \
52 if (DEBUG_DBDMA) { \
53 printf("DBDMA: " fmt , ## __VA_ARGS__); \
54 } \
55 } while (0);
57 #define DBDMA_DPRINTFCH(ch, fmt, ...) do { \
58 if (DEBUG_DBDMA) { \
59 if ((1ul << (ch)->channel) & DEBUG_DBDMA_CHANMASK) { \
60 printf("DBDMA[%02x]: " fmt , (ch)->channel, ## __VA_ARGS__); \
61 } \
62 } \
63 } while (0);
68 static DBDMAState *dbdma_from_ch(DBDMA_channel *ch)
70 return container_of(ch, DBDMAState, channels[ch->channel]);
73 #if DEBUG_DBDMA
74 static void dump_dbdma_cmd(dbdma_cmd *cmd)
76 printf("dbdma_cmd %p\n", cmd);
77 printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
78 printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
79 printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
80 printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
81 printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
82 printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
84 #else
85 static void dump_dbdma_cmd(dbdma_cmd *cmd)
88 #endif
89 static void dbdma_cmdptr_load(DBDMA_channel *ch)
91 DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n",
92 ch->regs[DBDMA_CMDPTR_LO]);
93 dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO],
94 &ch->current, sizeof(dbdma_cmd));
97 static void dbdma_cmdptr_save(DBDMA_channel *ch)
99 DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_save 0x%08x\n",
100 ch->regs[DBDMA_CMDPTR_LO]);
101 DBDMA_DPRINTFCH(ch, "xfer_status 0x%08x res_count 0x%04x\n",
102 le16_to_cpu(ch->current.xfer_status),
103 le16_to_cpu(ch->current.res_count));
104 dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO],
105 &ch->current, sizeof(dbdma_cmd));
108 static void kill_channel(DBDMA_channel *ch)
110 DBDMA_DPRINTFCH(ch, "kill_channel\n");
112 ch->regs[DBDMA_STATUS] |= DEAD;
113 ch->regs[DBDMA_STATUS] &= ~ACTIVE;
115 qemu_irq_raise(ch->irq);
118 static void conditional_interrupt(DBDMA_channel *ch)
120 dbdma_cmd *current = &ch->current;
121 uint16_t intr;
122 uint16_t sel_mask, sel_value;
123 uint32_t status;
124 int cond;
126 DBDMA_DPRINTFCH(ch, "%s\n", __func__);
128 intr = le16_to_cpu(current->command) & INTR_MASK;
130 switch(intr) {
131 case INTR_NEVER: /* don't interrupt */
132 return;
133 case INTR_ALWAYS: /* always interrupt */
134 qemu_irq_raise(ch->irq);
135 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__);
136 return;
139 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
141 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
142 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
144 cond = (status & sel_mask) == (sel_value & sel_mask);
146 switch(intr) {
147 case INTR_IFSET: /* intr if condition bit is 1 */
148 if (cond) {
149 qemu_irq_raise(ch->irq);
150 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__);
152 return;
153 case INTR_IFCLR: /* intr if condition bit is 0 */
154 if (!cond) {
155 qemu_irq_raise(ch->irq);
156 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__);
158 return;
162 static int conditional_wait(DBDMA_channel *ch)
164 dbdma_cmd *current = &ch->current;
165 uint16_t wait;
166 uint16_t sel_mask, sel_value;
167 uint32_t status;
168 int cond;
170 DBDMA_DPRINTFCH(ch, "conditional_wait\n");
172 wait = le16_to_cpu(current->command) & WAIT_MASK;
174 switch(wait) {
175 case WAIT_NEVER: /* don't wait */
176 return 0;
177 case WAIT_ALWAYS: /* always wait */
178 return 1;
181 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
183 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
184 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
186 cond = (status & sel_mask) == (sel_value & sel_mask);
188 switch(wait) {
189 case WAIT_IFSET: /* wait if condition bit is 1 */
190 if (cond)
191 return 1;
192 return 0;
193 case WAIT_IFCLR: /* wait if condition bit is 0 */
194 if (!cond)
195 return 1;
196 return 0;
198 return 0;
201 static void next(DBDMA_channel *ch)
203 uint32_t cp;
205 ch->regs[DBDMA_STATUS] &= ~BT;
207 cp = ch->regs[DBDMA_CMDPTR_LO];
208 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
209 dbdma_cmdptr_load(ch);
212 static void branch(DBDMA_channel *ch)
214 dbdma_cmd *current = &ch->current;
216 ch->regs[DBDMA_CMDPTR_LO] = le32_to_cpu(current->cmd_dep);
217 ch->regs[DBDMA_STATUS] |= BT;
218 dbdma_cmdptr_load(ch);
221 static void conditional_branch(DBDMA_channel *ch)
223 dbdma_cmd *current = &ch->current;
224 uint16_t br;
225 uint16_t sel_mask, sel_value;
226 uint32_t status;
227 int cond;
229 DBDMA_DPRINTFCH(ch, "conditional_branch\n");
231 /* check if we must branch */
233 br = le16_to_cpu(current->command) & BR_MASK;
235 switch(br) {
236 case BR_NEVER: /* don't branch */
237 next(ch);
238 return;
239 case BR_ALWAYS: /* always branch */
240 branch(ch);
241 return;
244 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
246 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
247 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
249 cond = (status & sel_mask) == (sel_value & sel_mask);
251 switch(br) {
252 case BR_IFSET: /* branch if condition bit is 1 */
253 if (cond)
254 branch(ch);
255 else
256 next(ch);
257 return;
258 case BR_IFCLR: /* branch if condition bit is 0 */
259 if (!cond)
260 branch(ch);
261 else
262 next(ch);
263 return;
267 static void channel_run(DBDMA_channel *ch);
269 static void dbdma_end(DBDMA_io *io)
271 DBDMA_channel *ch = io->channel;
272 dbdma_cmd *current = &ch->current;
274 DBDMA_DPRINTFCH(ch, "%s\n", __func__);
276 if (conditional_wait(ch))
277 goto wait;
279 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
280 current->res_count = cpu_to_le16(io->len);
281 dbdma_cmdptr_save(ch);
282 if (io->is_last)
283 ch->regs[DBDMA_STATUS] &= ~FLUSH;
285 conditional_interrupt(ch);
286 conditional_branch(ch);
288 wait:
289 /* Indicate that we're ready for a new DMA round */
290 ch->io.processing = false;
292 if ((ch->regs[DBDMA_STATUS] & RUN) &&
293 (ch->regs[DBDMA_STATUS] & ACTIVE))
294 channel_run(ch);
297 static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
298 uint16_t req_count, int is_last)
300 DBDMA_DPRINTFCH(ch, "start_output\n");
302 /* KEY_REGS, KEY_DEVICE and KEY_STREAM
303 * are not implemented in the mac-io chip
306 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key);
307 if (!addr || key > KEY_STREAM3) {
308 kill_channel(ch);
309 return;
312 ch->io.addr = addr;
313 ch->io.len = req_count;
314 ch->io.is_last = is_last;
315 ch->io.dma_end = dbdma_end;
316 ch->io.is_dma_out = 1;
317 ch->io.processing = true;
318 if (ch->rw) {
319 ch->rw(&ch->io);
323 static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
324 uint16_t req_count, int is_last)
326 DBDMA_DPRINTFCH(ch, "start_input\n");
328 /* KEY_REGS, KEY_DEVICE and KEY_STREAM
329 * are not implemented in the mac-io chip
332 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key);
333 if (!addr || key > KEY_STREAM3) {
334 kill_channel(ch);
335 return;
338 ch->io.addr = addr;
339 ch->io.len = req_count;
340 ch->io.is_last = is_last;
341 ch->io.dma_end = dbdma_end;
342 ch->io.is_dma_out = 0;
343 ch->io.processing = true;
344 if (ch->rw) {
345 ch->rw(&ch->io);
349 static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
350 uint16_t len)
352 dbdma_cmd *current = &ch->current;
354 DBDMA_DPRINTFCH(ch, "load_word %d bytes, addr=%08x\n", len, addr);
356 /* only implements KEY_SYSTEM */
358 if (key != KEY_SYSTEM) {
359 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
360 kill_channel(ch);
361 return;
364 dma_memory_read(&address_space_memory, addr, &current->cmd_dep, len);
366 if (conditional_wait(ch))
367 goto wait;
369 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
370 dbdma_cmdptr_save(ch);
371 ch->regs[DBDMA_STATUS] &= ~FLUSH;
373 conditional_interrupt(ch);
374 next(ch);
376 wait:
377 DBDMA_kick(dbdma_from_ch(ch));
380 static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
381 uint16_t len)
383 dbdma_cmd *current = &ch->current;
385 DBDMA_DPRINTFCH(ch, "store_word %d bytes, addr=%08x pa=%x\n",
386 len, addr, le32_to_cpu(current->cmd_dep));
388 /* only implements KEY_SYSTEM */
390 if (key != KEY_SYSTEM) {
391 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
392 kill_channel(ch);
393 return;
396 dma_memory_write(&address_space_memory, addr, &current->cmd_dep, len);
398 if (conditional_wait(ch))
399 goto wait;
401 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
402 dbdma_cmdptr_save(ch);
403 ch->regs[DBDMA_STATUS] &= ~FLUSH;
405 conditional_interrupt(ch);
406 next(ch);
408 wait:
409 DBDMA_kick(dbdma_from_ch(ch));
412 static void nop(DBDMA_channel *ch)
414 dbdma_cmd *current = &ch->current;
416 if (conditional_wait(ch))
417 goto wait;
419 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
420 dbdma_cmdptr_save(ch);
422 conditional_interrupt(ch);
423 conditional_branch(ch);
425 wait:
426 DBDMA_kick(dbdma_from_ch(ch));
429 static void stop(DBDMA_channel *ch)
431 ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH);
433 /* the stop command does not increment command pointer */
436 static void channel_run(DBDMA_channel *ch)
438 dbdma_cmd *current = &ch->current;
439 uint16_t cmd, key;
440 uint16_t req_count;
441 uint32_t phy_addr;
443 DBDMA_DPRINTFCH(ch, "channel_run\n");
444 dump_dbdma_cmd(current);
446 /* clear WAKE flag at command fetch */
448 ch->regs[DBDMA_STATUS] &= ~WAKE;
450 cmd = le16_to_cpu(current->command) & COMMAND_MASK;
452 switch (cmd) {
453 case DBDMA_NOP:
454 nop(ch);
455 return;
457 case DBDMA_STOP:
458 stop(ch);
459 return;
462 key = le16_to_cpu(current->command) & 0x0700;
463 req_count = le16_to_cpu(current->req_count);
464 phy_addr = le32_to_cpu(current->phy_addr);
466 if (key == KEY_STREAM4) {
467 printf("command %x, invalid key 4\n", cmd);
468 kill_channel(ch);
469 return;
472 switch (cmd) {
473 case OUTPUT_MORE:
474 start_output(ch, key, phy_addr, req_count, 0);
475 return;
477 case OUTPUT_LAST:
478 start_output(ch, key, phy_addr, req_count, 1);
479 return;
481 case INPUT_MORE:
482 start_input(ch, key, phy_addr, req_count, 0);
483 return;
485 case INPUT_LAST:
486 start_input(ch, key, phy_addr, req_count, 1);
487 return;
490 if (key < KEY_REGS) {
491 printf("command %x, invalid key %x\n", cmd, key);
492 key = KEY_SYSTEM;
495 /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits
496 * and BRANCH is invalid
499 req_count = req_count & 0x0007;
500 if (req_count & 0x4) {
501 req_count = 4;
502 phy_addr &= ~3;
503 } else if (req_count & 0x2) {
504 req_count = 2;
505 phy_addr &= ~1;
506 } else
507 req_count = 1;
509 switch (cmd) {
510 case LOAD_WORD:
511 load_word(ch, key, phy_addr, req_count);
512 return;
514 case STORE_WORD:
515 store_word(ch, key, phy_addr, req_count);
516 return;
520 static void DBDMA_run(DBDMAState *s)
522 int channel;
524 for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
525 DBDMA_channel *ch = &s->channels[channel];
526 uint32_t status = ch->regs[DBDMA_STATUS];
527 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) {
528 channel_run(ch);
533 static void DBDMA_run_bh(void *opaque)
535 DBDMAState *s = opaque;
537 DBDMA_DPRINTF("-> DBDMA_run_bh\n");
538 DBDMA_run(s);
539 DBDMA_DPRINTF("<- DBDMA_run_bh\n");
542 void DBDMA_kick(DBDMAState *dbdma)
544 qemu_bh_schedule(dbdma->bh);
547 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
548 DBDMA_rw rw, DBDMA_flush flush,
549 void *opaque)
551 DBDMAState *s = dbdma;
552 DBDMA_channel *ch = &s->channels[nchan];
554 DBDMA_DPRINTFCH(ch, "DBDMA_register_channel 0x%x\n", nchan);
556 assert(rw);
557 assert(flush);
559 ch->irq = irq;
560 ch->rw = rw;
561 ch->flush = flush;
562 ch->io.opaque = opaque;
565 static void
566 dbdma_control_write(DBDMA_channel *ch)
568 uint16_t mask, value;
569 uint32_t status;
571 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
572 value = ch->regs[DBDMA_CONTROL] & 0xffff;
574 value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
576 status = ch->regs[DBDMA_STATUS];
578 status = (value & mask) | (status & ~mask);
580 if (status & WAKE)
581 status |= ACTIVE;
582 if (status & RUN) {
583 status |= ACTIVE;
584 status &= ~DEAD;
586 if (status & PAUSE)
587 status &= ~ACTIVE;
588 if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
589 /* RUN is cleared */
590 status &= ~(ACTIVE|DEAD);
593 if ((status & FLUSH) && ch->flush) {
594 ch->flush(&ch->io);
595 status &= ~FLUSH;
598 DBDMA_DPRINTFCH(ch, " status 0x%08x\n", status);
600 ch->regs[DBDMA_STATUS] = status;
602 if (status & ACTIVE) {
603 DBDMA_kick(dbdma_from_ch(ch));
607 static void dbdma_write(void *opaque, hwaddr addr,
608 uint64_t value, unsigned size)
610 int channel = addr >> DBDMA_CHANNEL_SHIFT;
611 DBDMAState *s = opaque;
612 DBDMA_channel *ch = &s->channels[channel];
613 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
615 DBDMA_DPRINTFCH(ch, "writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n",
616 addr, value);
617 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n",
618 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
620 /* cmdptr cannot be modified if channel is ACTIVE */
622 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) {
623 return;
626 ch->regs[reg] = value;
628 switch(reg) {
629 case DBDMA_CONTROL:
630 dbdma_control_write(ch);
631 break;
632 case DBDMA_CMDPTR_LO:
633 /* 16-byte aligned */
634 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
635 dbdma_cmdptr_load(ch);
636 break;
637 case DBDMA_STATUS:
638 case DBDMA_INTR_SEL:
639 case DBDMA_BRANCH_SEL:
640 case DBDMA_WAIT_SEL:
641 /* nothing to do */
642 break;
643 case DBDMA_XFER_MODE:
644 case DBDMA_CMDPTR_HI:
645 case DBDMA_DATA2PTR_HI:
646 case DBDMA_DATA2PTR_LO:
647 case DBDMA_ADDRESS_HI:
648 case DBDMA_BRANCH_ADDR_HI:
649 case DBDMA_RES1:
650 case DBDMA_RES2:
651 case DBDMA_RES3:
652 case DBDMA_RES4:
653 /* unused */
654 break;
658 static uint64_t dbdma_read(void *opaque, hwaddr addr,
659 unsigned size)
661 uint32_t value;
662 int channel = addr >> DBDMA_CHANNEL_SHIFT;
663 DBDMAState *s = opaque;
664 DBDMA_channel *ch = &s->channels[channel];
665 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
667 value = ch->regs[reg];
669 DBDMA_DPRINTFCH(ch, "readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
670 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n",
671 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
673 switch(reg) {
674 case DBDMA_CONTROL:
675 value = 0;
676 break;
677 case DBDMA_STATUS:
678 case DBDMA_CMDPTR_LO:
679 case DBDMA_INTR_SEL:
680 case DBDMA_BRANCH_SEL:
681 case DBDMA_WAIT_SEL:
682 /* nothing to do */
683 break;
684 case DBDMA_XFER_MODE:
685 case DBDMA_CMDPTR_HI:
686 case DBDMA_DATA2PTR_HI:
687 case DBDMA_DATA2PTR_LO:
688 case DBDMA_ADDRESS_HI:
689 case DBDMA_BRANCH_ADDR_HI:
690 /* unused */
691 value = 0;
692 break;
693 case DBDMA_RES1:
694 case DBDMA_RES2:
695 case DBDMA_RES3:
696 case DBDMA_RES4:
697 /* reserved */
698 break;
701 return value;
704 static const MemoryRegionOps dbdma_ops = {
705 .read = dbdma_read,
706 .write = dbdma_write,
707 .endianness = DEVICE_LITTLE_ENDIAN,
708 .valid = {
709 .min_access_size = 4,
710 .max_access_size = 4,
714 static const VMStateDescription vmstate_dbdma_io = {
715 .name = "dbdma_io",
716 .version_id = 0,
717 .minimum_version_id = 0,
718 .fields = (VMStateField[]) {
719 VMSTATE_UINT64(addr, struct DBDMA_io),
720 VMSTATE_INT32(len, struct DBDMA_io),
721 VMSTATE_INT32(is_last, struct DBDMA_io),
722 VMSTATE_INT32(is_dma_out, struct DBDMA_io),
723 VMSTATE_BOOL(processing, struct DBDMA_io),
724 VMSTATE_END_OF_LIST()
728 static const VMStateDescription vmstate_dbdma_cmd = {
729 .name = "dbdma_cmd",
730 .version_id = 0,
731 .minimum_version_id = 0,
732 .fields = (VMStateField[]) {
733 VMSTATE_UINT16(req_count, dbdma_cmd),
734 VMSTATE_UINT16(command, dbdma_cmd),
735 VMSTATE_UINT32(phy_addr, dbdma_cmd),
736 VMSTATE_UINT32(cmd_dep, dbdma_cmd),
737 VMSTATE_UINT16(res_count, dbdma_cmd),
738 VMSTATE_UINT16(xfer_status, dbdma_cmd),
739 VMSTATE_END_OF_LIST()
743 static const VMStateDescription vmstate_dbdma_channel = {
744 .name = "dbdma_channel",
745 .version_id = 1,
746 .minimum_version_id = 1,
747 .fields = (VMStateField[]) {
748 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
749 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io),
750 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd,
751 dbdma_cmd),
752 VMSTATE_END_OF_LIST()
756 static const VMStateDescription vmstate_dbdma = {
757 .name = "dbdma",
758 .version_id = 3,
759 .minimum_version_id = 3,
760 .fields = (VMStateField[]) {
761 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
762 vmstate_dbdma_channel, DBDMA_channel),
763 VMSTATE_END_OF_LIST()
767 static void dbdma_reset(void *opaque)
769 DBDMAState *s = opaque;
770 int i;
772 for (i = 0; i < DBDMA_CHANNELS; i++)
773 memset(s->channels[i].regs, 0, DBDMA_SIZE);
776 static void dbdma_unassigned_rw(DBDMA_io *io)
778 DBDMA_channel *ch = io->channel;
779 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
780 __func__, ch->channel);
781 ch->io.processing = false;
784 static void dbdma_unassigned_flush(DBDMA_io *io)
786 DBDMA_channel *ch = io->channel;
787 dbdma_cmd *current = &ch->current;
788 uint16_t cmd;
789 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
790 __func__, ch->channel);
792 cmd = le16_to_cpu(current->command) & COMMAND_MASK;
793 if (cmd == OUTPUT_MORE || cmd == OUTPUT_LAST ||
794 cmd == INPUT_MORE || cmd == INPUT_LAST) {
795 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS] | FLUSH);
796 current->res_count = cpu_to_le16(io->len);
797 dbdma_cmdptr_save(ch);
801 void* DBDMA_init (MemoryRegion **dbdma_mem)
803 DBDMAState *s;
804 int i;
806 s = g_malloc0(sizeof(DBDMAState));
808 for (i = 0; i < DBDMA_CHANNELS; i++) {
809 DBDMA_io *io = &s->channels[i].io;
810 DBDMA_channel *ch = &s->channels[i];
811 qemu_iovec_init(&io->iov, 1);
813 ch->rw = dbdma_unassigned_rw;
814 ch->flush = dbdma_unassigned_flush;
815 ch->channel = i;
816 ch->io.channel = ch;
819 memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000);
820 *dbdma_mem = &s->mem;
821 vmstate_register(NULL, -1, &vmstate_dbdma, s);
822 qemu_register_reset(dbdma_reset, s);
824 s->bh = qemu_bh_new(DBDMA_run_bh, s);
826 return s;