spapr: handle cpu core unplug via hotplug handler chain
[qemu/ar7.git] / hw / misc / macio / mac_dbdma.c
blob1b2a69b3efcaee925f6e27bde40c1d2d4a0a9679
1 /*
2 * PowerMac descriptor-based DMA emulation
4 * Copyright (c) 2005-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2009 Laurent Vivier
8 * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h
10 * Definitions for using the Apple Descriptor-Based DMA controller
11 * in Power Macintosh computers.
13 * Copyright (C) 1996 Paul Mackerras.
15 * some parts from mol 0.9.71
17 * Descriptor based DMA emulation
19 * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se)
21 * Permission is hereby granted, free of charge, to any person obtaining a copy
22 * of this software and associated documentation files (the "Software"), to deal
23 * in the Software without restriction, including without limitation the rights
24 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25 * copies of the Software, and to permit persons to whom the Software is
26 * furnished to do so, subject to the following conditions:
28 * The above copyright notice and this permission notice shall be included in
29 * all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
37 * THE SOFTWARE.
39 #include "qemu/osdep.h"
40 #include "hw/hw.h"
41 #include "hw/isa/isa.h"
42 #include "hw/ppc/mac_dbdma.h"
43 #include "qemu/main-loop.h"
44 #include "qemu/log.h"
45 #include "sysemu/dma.h"
47 /* debug DBDMA */
48 #define DEBUG_DBDMA 0
49 #define DEBUG_DBDMA_CHANMASK ((1ull << DBDMA_CHANNELS) - 1)
51 #define DBDMA_DPRINTF(fmt, ...) do { \
52 if (DEBUG_DBDMA) { \
53 printf("DBDMA: " fmt , ## __VA_ARGS__); \
54 } \
55 } while (0)
57 #define DBDMA_DPRINTFCH(ch, fmt, ...) do { \
58 if (DEBUG_DBDMA) { \
59 if ((1ul << (ch)->channel) & DEBUG_DBDMA_CHANMASK) { \
60 printf("DBDMA[%02x]: " fmt , (ch)->channel, ## __VA_ARGS__); \
61 } \
62 } \
63 } while (0)
68 static DBDMAState *dbdma_from_ch(DBDMA_channel *ch)
70 return container_of(ch, DBDMAState, channels[ch->channel]);
73 #if DEBUG_DBDMA
74 static void dump_dbdma_cmd(dbdma_cmd *cmd)
76 printf("dbdma_cmd %p\n", cmd);
77 printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count));
78 printf(" command 0x%04x\n", le16_to_cpu(cmd->command));
79 printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr));
80 printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep));
81 printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count));
82 printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status));
84 #else
85 static void dump_dbdma_cmd(dbdma_cmd *cmd)
88 #endif
89 static void dbdma_cmdptr_load(DBDMA_channel *ch)
91 DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n",
92 ch->regs[DBDMA_CMDPTR_LO]);
93 dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO],
94 &ch->current, sizeof(dbdma_cmd));
97 static void dbdma_cmdptr_save(DBDMA_channel *ch)
99 DBDMA_DPRINTFCH(ch, "-> update 0x%08x stat=0x%08x, res=0x%04x\n",
100 ch->regs[DBDMA_CMDPTR_LO],
101 le16_to_cpu(ch->current.xfer_status),
102 le16_to_cpu(ch->current.res_count));
103 dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO],
104 &ch->current, sizeof(dbdma_cmd));
107 static void kill_channel(DBDMA_channel *ch)
109 DBDMA_DPRINTFCH(ch, "kill_channel\n");
111 ch->regs[DBDMA_STATUS] |= DEAD;
112 ch->regs[DBDMA_STATUS] &= ~ACTIVE;
114 qemu_irq_raise(ch->irq);
117 static void conditional_interrupt(DBDMA_channel *ch)
119 dbdma_cmd *current = &ch->current;
120 uint16_t intr;
121 uint16_t sel_mask, sel_value;
122 uint32_t status;
123 int cond;
125 DBDMA_DPRINTFCH(ch, "%s\n", __func__);
127 intr = le16_to_cpu(current->command) & INTR_MASK;
129 switch(intr) {
130 case INTR_NEVER: /* don't interrupt */
131 return;
132 case INTR_ALWAYS: /* always interrupt */
133 qemu_irq_raise(ch->irq);
134 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__);
135 return;
138 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
140 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f;
141 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f;
143 cond = (status & sel_mask) == (sel_value & sel_mask);
145 switch(intr) {
146 case INTR_IFSET: /* intr if condition bit is 1 */
147 if (cond) {
148 qemu_irq_raise(ch->irq);
149 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__);
151 return;
152 case INTR_IFCLR: /* intr if condition bit is 0 */
153 if (!cond) {
154 qemu_irq_raise(ch->irq);
155 DBDMA_DPRINTFCH(ch, "%s: raise\n", __func__);
157 return;
161 static int conditional_wait(DBDMA_channel *ch)
163 dbdma_cmd *current = &ch->current;
164 uint16_t wait;
165 uint16_t sel_mask, sel_value;
166 uint32_t status;
167 int cond;
168 int res = 0;
170 wait = le16_to_cpu(current->command) & WAIT_MASK;
171 switch(wait) {
172 case WAIT_NEVER: /* don't wait */
173 return 0;
174 case WAIT_ALWAYS: /* always wait */
175 DBDMA_DPRINTFCH(ch, " [WAIT_ALWAYS]\n");
176 return 1;
179 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
181 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f;
182 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f;
184 cond = (status & sel_mask) == (sel_value & sel_mask);
186 switch(wait) {
187 case WAIT_IFSET: /* wait if condition bit is 1 */
188 if (cond) {
189 res = 1;
191 DBDMA_DPRINTFCH(ch, " [WAIT_IFSET=%d]\n", res);
192 break;
193 case WAIT_IFCLR: /* wait if condition bit is 0 */
194 if (!cond) {
195 res = 1;
197 DBDMA_DPRINTFCH(ch, " [WAIT_IFCLR=%d]\n", res);
198 break;
200 return res;
203 static void next(DBDMA_channel *ch)
205 uint32_t cp;
207 ch->regs[DBDMA_STATUS] &= ~BT;
209 cp = ch->regs[DBDMA_CMDPTR_LO];
210 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd);
211 dbdma_cmdptr_load(ch);
214 static void branch(DBDMA_channel *ch)
216 dbdma_cmd *current = &ch->current;
218 ch->regs[DBDMA_CMDPTR_LO] = le32_to_cpu(current->cmd_dep);
219 ch->regs[DBDMA_STATUS] |= BT;
220 dbdma_cmdptr_load(ch);
223 static void conditional_branch(DBDMA_channel *ch)
225 dbdma_cmd *current = &ch->current;
226 uint16_t br;
227 uint16_t sel_mask, sel_value;
228 uint32_t status;
229 int cond;
231 /* check if we must branch */
233 br = le16_to_cpu(current->command) & BR_MASK;
235 switch(br) {
236 case BR_NEVER: /* don't branch */
237 next(ch);
238 return;
239 case BR_ALWAYS: /* always branch */
240 DBDMA_DPRINTFCH(ch, " [BR_ALWAYS]\n");
241 branch(ch);
242 return;
245 status = ch->regs[DBDMA_STATUS] & DEVSTAT;
247 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f;
248 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f;
250 cond = (status & sel_mask) == (sel_value & sel_mask);
252 switch(br) {
253 case BR_IFSET: /* branch if condition bit is 1 */
254 if (cond) {
255 DBDMA_DPRINTFCH(ch, " [BR_IFSET = 1]\n");
256 branch(ch);
257 } else {
258 DBDMA_DPRINTFCH(ch, " [BR_IFSET = 0]\n");
259 next(ch);
261 return;
262 case BR_IFCLR: /* branch if condition bit is 0 */
263 if (!cond) {
264 DBDMA_DPRINTFCH(ch, " [BR_IFCLR = 1]\n");
265 branch(ch);
266 } else {
267 DBDMA_DPRINTFCH(ch, " [BR_IFCLR = 0]\n");
268 next(ch);
270 return;
274 static void channel_run(DBDMA_channel *ch);
276 static void dbdma_end(DBDMA_io *io)
278 DBDMA_channel *ch = io->channel;
279 dbdma_cmd *current = &ch->current;
281 DBDMA_DPRINTFCH(ch, "%s\n", __func__);
283 if (conditional_wait(ch))
284 goto wait;
286 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
287 current->res_count = cpu_to_le16(io->len);
288 dbdma_cmdptr_save(ch);
289 if (io->is_last)
290 ch->regs[DBDMA_STATUS] &= ~FLUSH;
292 conditional_interrupt(ch);
293 conditional_branch(ch);
295 wait:
296 /* Indicate that we're ready for a new DMA round */
297 ch->io.processing = false;
299 if ((ch->regs[DBDMA_STATUS] & RUN) &&
300 (ch->regs[DBDMA_STATUS] & ACTIVE))
301 channel_run(ch);
304 static void start_output(DBDMA_channel *ch, int key, uint32_t addr,
305 uint16_t req_count, int is_last)
307 DBDMA_DPRINTFCH(ch, "start_output\n");
309 /* KEY_REGS, KEY_DEVICE and KEY_STREAM
310 * are not implemented in the mac-io chip
313 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key);
314 if (!addr || key > KEY_STREAM3) {
315 kill_channel(ch);
316 return;
319 ch->io.addr = addr;
320 ch->io.len = req_count;
321 ch->io.is_last = is_last;
322 ch->io.dma_end = dbdma_end;
323 ch->io.is_dma_out = 1;
324 ch->io.processing = true;
325 if (ch->rw) {
326 ch->rw(&ch->io);
330 static void start_input(DBDMA_channel *ch, int key, uint32_t addr,
331 uint16_t req_count, int is_last)
333 DBDMA_DPRINTFCH(ch, "start_input\n");
335 /* KEY_REGS, KEY_DEVICE and KEY_STREAM
336 * are not implemented in the mac-io chip
339 DBDMA_DPRINTFCH(ch, "addr 0x%x key 0x%x\n", addr, key);
340 if (!addr || key > KEY_STREAM3) {
341 kill_channel(ch);
342 return;
345 ch->io.addr = addr;
346 ch->io.len = req_count;
347 ch->io.is_last = is_last;
348 ch->io.dma_end = dbdma_end;
349 ch->io.is_dma_out = 0;
350 ch->io.processing = true;
351 if (ch->rw) {
352 ch->rw(&ch->io);
356 static void load_word(DBDMA_channel *ch, int key, uint32_t addr,
357 uint16_t len)
359 dbdma_cmd *current = &ch->current;
361 DBDMA_DPRINTFCH(ch, "load_word %d bytes, addr=%08x\n", len, addr);
363 /* only implements KEY_SYSTEM */
365 if (key != KEY_SYSTEM) {
366 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key);
367 kill_channel(ch);
368 return;
371 dma_memory_read(&address_space_memory, addr, &current->cmd_dep, len);
373 if (conditional_wait(ch))
374 goto wait;
376 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
377 dbdma_cmdptr_save(ch);
378 ch->regs[DBDMA_STATUS] &= ~FLUSH;
380 conditional_interrupt(ch);
381 next(ch);
383 wait:
384 DBDMA_kick(dbdma_from_ch(ch));
387 static void store_word(DBDMA_channel *ch, int key, uint32_t addr,
388 uint16_t len)
390 dbdma_cmd *current = &ch->current;
392 DBDMA_DPRINTFCH(ch, "store_word %d bytes, addr=%08x pa=%x\n",
393 len, addr, le32_to_cpu(current->cmd_dep));
395 /* only implements KEY_SYSTEM */
397 if (key != KEY_SYSTEM) {
398 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key);
399 kill_channel(ch);
400 return;
403 dma_memory_write(&address_space_memory, addr, &current->cmd_dep, len);
405 if (conditional_wait(ch))
406 goto wait;
408 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
409 dbdma_cmdptr_save(ch);
410 ch->regs[DBDMA_STATUS] &= ~FLUSH;
412 conditional_interrupt(ch);
413 next(ch);
415 wait:
416 DBDMA_kick(dbdma_from_ch(ch));
419 static void nop(DBDMA_channel *ch)
421 dbdma_cmd *current = &ch->current;
423 if (conditional_wait(ch))
424 goto wait;
426 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
427 dbdma_cmdptr_save(ch);
429 conditional_interrupt(ch);
430 conditional_branch(ch);
432 wait:
433 DBDMA_kick(dbdma_from_ch(ch));
436 static void stop(DBDMA_channel *ch)
438 ch->regs[DBDMA_STATUS] &= ~(ACTIVE);
440 /* the stop command does not increment command pointer */
443 static void channel_run(DBDMA_channel *ch)
445 dbdma_cmd *current = &ch->current;
446 uint16_t cmd, key;
447 uint16_t req_count;
448 uint32_t phy_addr;
450 DBDMA_DPRINTFCH(ch, "channel_run\n");
451 dump_dbdma_cmd(current);
453 /* clear WAKE flag at command fetch */
455 ch->regs[DBDMA_STATUS] &= ~WAKE;
457 cmd = le16_to_cpu(current->command) & COMMAND_MASK;
459 switch (cmd) {
460 case DBDMA_NOP:
461 nop(ch);
462 return;
464 case DBDMA_STOP:
465 stop(ch);
466 return;
469 key = le16_to_cpu(current->command) & 0x0700;
470 req_count = le16_to_cpu(current->req_count);
471 phy_addr = le32_to_cpu(current->phy_addr);
473 if (key == KEY_STREAM4) {
474 printf("command %x, invalid key 4\n", cmd);
475 kill_channel(ch);
476 return;
479 switch (cmd) {
480 case OUTPUT_MORE:
481 DBDMA_DPRINTFCH(ch, "* OUTPUT_MORE *\n");
482 start_output(ch, key, phy_addr, req_count, 0);
483 return;
485 case OUTPUT_LAST:
486 DBDMA_DPRINTFCH(ch, "* OUTPUT_LAST *\n");
487 start_output(ch, key, phy_addr, req_count, 1);
488 return;
490 case INPUT_MORE:
491 DBDMA_DPRINTFCH(ch, "* INPUT_MORE *\n");
492 start_input(ch, key, phy_addr, req_count, 0);
493 return;
495 case INPUT_LAST:
496 DBDMA_DPRINTFCH(ch, "* INPUT_LAST *\n");
497 start_input(ch, key, phy_addr, req_count, 1);
498 return;
501 if (key < KEY_REGS) {
502 printf("command %x, invalid key %x\n", cmd, key);
503 key = KEY_SYSTEM;
506 /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits
507 * and BRANCH is invalid
510 req_count = req_count & 0x0007;
511 if (req_count & 0x4) {
512 req_count = 4;
513 phy_addr &= ~3;
514 } else if (req_count & 0x2) {
515 req_count = 2;
516 phy_addr &= ~1;
517 } else
518 req_count = 1;
520 switch (cmd) {
521 case LOAD_WORD:
522 DBDMA_DPRINTFCH(ch, "* LOAD_WORD *\n");
523 load_word(ch, key, phy_addr, req_count);
524 return;
526 case STORE_WORD:
527 DBDMA_DPRINTFCH(ch, "* STORE_WORD *\n");
528 store_word(ch, key, phy_addr, req_count);
529 return;
533 static void DBDMA_run(DBDMAState *s)
535 int channel;
537 for (channel = 0; channel < DBDMA_CHANNELS; channel++) {
538 DBDMA_channel *ch = &s->channels[channel];
539 uint32_t status = ch->regs[DBDMA_STATUS];
540 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) {
541 channel_run(ch);
546 static void DBDMA_run_bh(void *opaque)
548 DBDMAState *s = opaque;
550 DBDMA_DPRINTF("-> DBDMA_run_bh\n");
551 DBDMA_run(s);
552 DBDMA_DPRINTF("<- DBDMA_run_bh\n");
555 void DBDMA_kick(DBDMAState *dbdma)
557 qemu_bh_schedule(dbdma->bh);
560 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq,
561 DBDMA_rw rw, DBDMA_flush flush,
562 void *opaque)
564 DBDMAState *s = dbdma;
565 DBDMA_channel *ch = &s->channels[nchan];
567 DBDMA_DPRINTFCH(ch, "DBDMA_register_channel 0x%x\n", nchan);
569 assert(rw);
570 assert(flush);
572 ch->irq = irq;
573 ch->rw = rw;
574 ch->flush = flush;
575 ch->io.opaque = opaque;
578 static void dbdma_control_write(DBDMA_channel *ch)
580 uint16_t mask, value;
581 uint32_t status;
582 bool do_flush = false;
584 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
585 value = ch->regs[DBDMA_CONTROL] & 0xffff;
587 /* This is the status register which we'll update
588 * appropriately and store back
590 status = ch->regs[DBDMA_STATUS];
592 /* RUN and PAUSE are bits under SW control only
593 * FLUSH and WAKE are set by SW and cleared by HW
594 * DEAD, ACTIVE and BT are only under HW control
596 * We handle ACTIVE separately at the end of the
597 * logic to ensure all cases are covered.
600 /* Setting RUN will tentatively activate the channel
602 if ((mask & RUN) && (value & RUN)) {
603 status |= RUN;
604 DBDMA_DPRINTFCH(ch, " Setting RUN !\n");
607 /* Clearing RUN 1->0 will stop the channel */
608 if ((mask & RUN) && !(value & RUN)) {
609 /* This has the side effect of clearing the DEAD bit */
610 status &= ~(DEAD | RUN);
611 DBDMA_DPRINTFCH(ch, " Clearing RUN !\n");
614 /* Setting WAKE wakes up an idle channel if it's running
616 * Note: The doc doesn't say so but assume that only works
617 * on a channel whose RUN bit is set.
619 * We set WAKE in status, it's not terribly useful as it will
620 * be cleared on the next command fetch but it seems to mimmic
621 * the HW behaviour and is useful for the way we handle
622 * ACTIVE further down.
624 if ((mask & WAKE) && (value & WAKE) && (status & RUN)) {
625 status |= WAKE;
626 DBDMA_DPRINTFCH(ch, " Setting WAKE !\n");
629 /* PAUSE being set will deactivate (or prevent activation)
630 * of the channel. We just copy it over for now, ACTIVE will
631 * be re-evaluated later.
633 if (mask & PAUSE) {
634 status = (status & ~PAUSE) | (value & PAUSE);
635 DBDMA_DPRINTFCH(ch, " %sing PAUSE !\n",
636 (value & PAUSE) ? "sett" : "clear");
639 /* FLUSH is its own thing */
640 if ((mask & FLUSH) && (value & FLUSH)) {
641 DBDMA_DPRINTFCH(ch, " Setting FLUSH !\n");
642 /* We set flush directly in the status register, we do *NOT*
643 * set it in "status" so that it gets naturally cleared when
644 * we update the status register further down. That way it
645 * will be set only during the HW flush operation so it is
646 * visible to any completions happening during that time.
648 ch->regs[DBDMA_STATUS] |= FLUSH;
649 do_flush = true;
652 /* If either RUN or PAUSE is clear, so should ACTIVE be,
653 * otherwise, ACTIVE will be set if we modified RUN, PAUSE or
654 * set WAKE. That means that PAUSE was just cleared, RUN was
655 * just set or WAKE was just set.
657 if ((status & PAUSE) || !(status & RUN)) {
658 status &= ~ACTIVE;
659 DBDMA_DPRINTFCH(ch, " -> ACTIVE down !\n");
661 /* We stopped processing, we want the underlying HW command
662 * to complete *before* we clear the ACTIVE bit. Otherwise
663 * we can get into a situation where the command status will
664 * have RUN or ACTIVE not set which is going to confuse the
665 * MacOS driver.
667 do_flush = true;
668 } else if (mask & (RUN | PAUSE)) {
669 status |= ACTIVE;
670 DBDMA_DPRINTFCH(ch, " -> ACTIVE up !\n");
671 } else if ((mask & WAKE) && (value & WAKE)) {
672 status |= ACTIVE;
673 DBDMA_DPRINTFCH(ch, " -> ACTIVE up !\n");
676 DBDMA_DPRINTFCH(ch, " new status=0x%08x\n", status);
678 /* If we need to flush the underlying HW, do it now, this happens
679 * both on FLUSH commands and when stopping the channel for safety.
681 if (do_flush && ch->flush) {
682 ch->flush(&ch->io);
685 /* Finally update the status register image */
686 ch->regs[DBDMA_STATUS] = status;
688 /* If active, make sure the BH gets to run */
689 if (status & ACTIVE) {
690 DBDMA_kick(dbdma_from_ch(ch));
694 static void dbdma_write(void *opaque, hwaddr addr,
695 uint64_t value, unsigned size)
697 int channel = addr >> DBDMA_CHANNEL_SHIFT;
698 DBDMAState *s = opaque;
699 DBDMA_channel *ch = &s->channels[channel];
700 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
702 DBDMA_DPRINTFCH(ch, "writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n",
703 addr, value);
704 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n",
705 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
707 /* cmdptr cannot be modified if channel is ACTIVE */
709 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) {
710 return;
713 ch->regs[reg] = value;
715 switch(reg) {
716 case DBDMA_CONTROL:
717 dbdma_control_write(ch);
718 break;
719 case DBDMA_CMDPTR_LO:
720 /* 16-byte aligned */
721 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf;
722 dbdma_cmdptr_load(ch);
723 break;
724 case DBDMA_STATUS:
725 case DBDMA_INTR_SEL:
726 case DBDMA_BRANCH_SEL:
727 case DBDMA_WAIT_SEL:
728 /* nothing to do */
729 break;
730 case DBDMA_XFER_MODE:
731 case DBDMA_CMDPTR_HI:
732 case DBDMA_DATA2PTR_HI:
733 case DBDMA_DATA2PTR_LO:
734 case DBDMA_ADDRESS_HI:
735 case DBDMA_BRANCH_ADDR_HI:
736 case DBDMA_RES1:
737 case DBDMA_RES2:
738 case DBDMA_RES3:
739 case DBDMA_RES4:
740 /* unused */
741 break;
745 static uint64_t dbdma_read(void *opaque, hwaddr addr,
746 unsigned size)
748 uint32_t value;
749 int channel = addr >> DBDMA_CHANNEL_SHIFT;
750 DBDMAState *s = opaque;
751 DBDMA_channel *ch = &s->channels[channel];
752 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2;
754 value = ch->regs[reg];
756 switch(reg) {
757 case DBDMA_CONTROL:
758 value = ch->regs[DBDMA_STATUS];
759 break;
760 case DBDMA_STATUS:
761 case DBDMA_CMDPTR_LO:
762 case DBDMA_INTR_SEL:
763 case DBDMA_BRANCH_SEL:
764 case DBDMA_WAIT_SEL:
765 /* nothing to do */
766 break;
767 case DBDMA_XFER_MODE:
768 case DBDMA_CMDPTR_HI:
769 case DBDMA_DATA2PTR_HI:
770 case DBDMA_DATA2PTR_LO:
771 case DBDMA_ADDRESS_HI:
772 case DBDMA_BRANCH_ADDR_HI:
773 /* unused */
774 value = 0;
775 break;
776 case DBDMA_RES1:
777 case DBDMA_RES2:
778 case DBDMA_RES3:
779 case DBDMA_RES4:
780 /* reserved */
781 break;
784 DBDMA_DPRINTFCH(ch, "readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value);
785 DBDMA_DPRINTFCH(ch, "channel 0x%x reg 0x%x\n",
786 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg);
788 return value;
791 static const MemoryRegionOps dbdma_ops = {
792 .read = dbdma_read,
793 .write = dbdma_write,
794 .endianness = DEVICE_LITTLE_ENDIAN,
795 .valid = {
796 .min_access_size = 4,
797 .max_access_size = 4,
801 static const VMStateDescription vmstate_dbdma_io = {
802 .name = "dbdma_io",
803 .version_id = 0,
804 .minimum_version_id = 0,
805 .fields = (VMStateField[]) {
806 VMSTATE_UINT64(addr, struct DBDMA_io),
807 VMSTATE_INT32(len, struct DBDMA_io),
808 VMSTATE_INT32(is_last, struct DBDMA_io),
809 VMSTATE_INT32(is_dma_out, struct DBDMA_io),
810 VMSTATE_BOOL(processing, struct DBDMA_io),
811 VMSTATE_END_OF_LIST()
815 static const VMStateDescription vmstate_dbdma_cmd = {
816 .name = "dbdma_cmd",
817 .version_id = 0,
818 .minimum_version_id = 0,
819 .fields = (VMStateField[]) {
820 VMSTATE_UINT16(req_count, dbdma_cmd),
821 VMSTATE_UINT16(command, dbdma_cmd),
822 VMSTATE_UINT32(phy_addr, dbdma_cmd),
823 VMSTATE_UINT32(cmd_dep, dbdma_cmd),
824 VMSTATE_UINT16(res_count, dbdma_cmd),
825 VMSTATE_UINT16(xfer_status, dbdma_cmd),
826 VMSTATE_END_OF_LIST()
830 static const VMStateDescription vmstate_dbdma_channel = {
831 .name = "dbdma_channel",
832 .version_id = 1,
833 .minimum_version_id = 1,
834 .fields = (VMStateField[]) {
835 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS),
836 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io),
837 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd,
838 dbdma_cmd),
839 VMSTATE_END_OF_LIST()
843 static const VMStateDescription vmstate_dbdma = {
844 .name = "dbdma",
845 .version_id = 3,
846 .minimum_version_id = 3,
847 .fields = (VMStateField[]) {
848 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1,
849 vmstate_dbdma_channel, DBDMA_channel),
850 VMSTATE_END_OF_LIST()
854 static void mac_dbdma_reset(DeviceState *d)
856 DBDMAState *s = MAC_DBDMA(d);
857 int i;
859 for (i = 0; i < DBDMA_CHANNELS; i++) {
860 memset(s->channels[i].regs, 0, DBDMA_SIZE);
864 static void dbdma_unassigned_rw(DBDMA_io *io)
866 DBDMA_channel *ch = io->channel;
867 dbdma_cmd *current = &ch->current;
868 uint16_t cmd;
869 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
870 __func__, ch->channel);
871 ch->io.processing = false;
873 cmd = le16_to_cpu(current->command) & COMMAND_MASK;
874 if (cmd == OUTPUT_MORE || cmd == OUTPUT_LAST ||
875 cmd == INPUT_MORE || cmd == INPUT_LAST) {
876 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]);
877 current->res_count = cpu_to_le16(io->len);
878 dbdma_cmdptr_save(ch);
882 static void dbdma_unassigned_flush(DBDMA_io *io)
884 DBDMA_channel *ch = io->channel;
885 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n",
886 __func__, ch->channel);
889 static void mac_dbdma_init(Object *obj)
891 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
892 DBDMAState *s = MAC_DBDMA(obj);
893 int i;
895 for (i = 0; i < DBDMA_CHANNELS; i++) {
896 DBDMA_channel *ch = &s->channels[i];
898 ch->rw = dbdma_unassigned_rw;
899 ch->flush = dbdma_unassigned_flush;
900 ch->channel = i;
901 ch->io.channel = ch;
904 memory_region_init_io(&s->mem, obj, &dbdma_ops, s, "dbdma", 0x1000);
905 sysbus_init_mmio(sbd, &s->mem);
908 static void mac_dbdma_realize(DeviceState *dev, Error **errp)
910 DBDMAState *s = MAC_DBDMA(dev);
912 s->bh = qemu_bh_new(DBDMA_run_bh, s);
915 static void mac_dbdma_class_init(ObjectClass *oc, void *data)
917 DeviceClass *dc = DEVICE_CLASS(oc);
919 dc->realize = mac_dbdma_realize;
920 dc->reset = mac_dbdma_reset;
921 dc->vmsd = &vmstate_dbdma;
924 static const TypeInfo mac_dbdma_type_info = {
925 .name = TYPE_MAC_DBDMA,
926 .parent = TYPE_SYS_BUS_DEVICE,
927 .instance_size = sizeof(DBDMAState),
928 .instance_init = mac_dbdma_init,
929 .class_init = mac_dbdma_class_init
932 static void mac_dbdma_register_types(void)
934 type_register_static(&mac_dbdma_type_info);
937 type_init(mac_dbdma_register_types)