target/ppc/cpu-models: Rename power5+ and power7+ for new QOM naming rules
[qemu/ar7.git] / hw / dma / etraxfs_dma.c
blob9c0003de51d9ef3cd8ab17d4f114d6cf6e41b21c
1 /*
2 * QEMU ETRAX DMA Controller.
4 * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/irq.h"
28 #include "qemu/main-loop.h"
29 #include "sysemu/runstate.h"
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
33 #include "hw/cris/etraxfs_dma.h"
35 #define D(x)
37 #define RW_DATA (0x0 / 4)
38 #define RW_SAVED_DATA (0x58 / 4)
39 #define RW_SAVED_DATA_BUF (0x5c / 4)
40 #define RW_GROUP (0x60 / 4)
41 #define RW_GROUP_DOWN (0x7c / 4)
42 #define RW_CMD (0x80 / 4)
43 #define RW_CFG (0x84 / 4)
44 #define RW_STAT (0x88 / 4)
45 #define RW_INTR_MASK (0x8c / 4)
46 #define RW_ACK_INTR (0x90 / 4)
47 #define R_INTR (0x94 / 4)
48 #define R_MASKED_INTR (0x98 / 4)
49 #define RW_STREAM_CMD (0x9c / 4)
51 #define DMA_REG_MAX (0x100 / 4)
53 /* descriptors */
55 // ------------------------------------------------------------ dma_descr_group
56 typedef struct dma_descr_group {
57 uint32_t next;
58 unsigned eol : 1;
59 unsigned tol : 1;
60 unsigned bol : 1;
61 unsigned : 1;
62 unsigned intr : 1;
63 unsigned : 2;
64 unsigned en : 1;
65 unsigned : 7;
66 unsigned dis : 1;
67 unsigned md : 16;
68 struct dma_descr_group *up;
69 union {
70 struct dma_descr_context *context;
71 struct dma_descr_group *group;
72 } down;
73 } dma_descr_group;
75 // ---------------------------------------------------------- dma_descr_context
76 typedef struct dma_descr_context {
77 uint32_t next;
78 unsigned eol : 1;
79 unsigned : 3;
80 unsigned intr : 1;
81 unsigned : 1;
82 unsigned store_mode : 1;
83 unsigned en : 1;
84 unsigned : 7;
85 unsigned dis : 1;
86 unsigned md0 : 16;
87 unsigned md1;
88 unsigned md2;
89 unsigned md3;
90 unsigned md4;
91 uint32_t saved_data;
92 uint32_t saved_data_buf;
93 } dma_descr_context;
95 // ------------------------------------------------------------- dma_descr_data
96 typedef struct dma_descr_data {
97 uint32_t next;
98 uint32_t buf;
99 unsigned eol : 1;
100 unsigned : 2;
101 unsigned out_eop : 1;
102 unsigned intr : 1;
103 unsigned wait : 1;
104 unsigned : 2;
105 unsigned : 3;
106 unsigned in_eop : 1;
107 unsigned : 4;
108 unsigned md : 16;
109 uint32_t after;
110 } dma_descr_data;
112 /* Constants */
113 enum {
114 regk_dma_ack_pkt = 0x00000100,
115 regk_dma_anytime = 0x00000001,
116 regk_dma_array = 0x00000008,
117 regk_dma_burst = 0x00000020,
118 regk_dma_client = 0x00000002,
119 regk_dma_copy_next = 0x00000010,
120 regk_dma_copy_up = 0x00000020,
121 regk_dma_data_at_eol = 0x00000001,
122 regk_dma_dis_c = 0x00000010,
123 regk_dma_dis_g = 0x00000020,
124 regk_dma_idle = 0x00000001,
125 regk_dma_intern = 0x00000004,
126 regk_dma_load_c = 0x00000200,
127 regk_dma_load_c_n = 0x00000280,
128 regk_dma_load_c_next = 0x00000240,
129 regk_dma_load_d = 0x00000140,
130 regk_dma_load_g = 0x00000300,
131 regk_dma_load_g_down = 0x000003c0,
132 regk_dma_load_g_next = 0x00000340,
133 regk_dma_load_g_up = 0x00000380,
134 regk_dma_next_en = 0x00000010,
135 regk_dma_next_pkt = 0x00000010,
136 regk_dma_no = 0x00000000,
137 regk_dma_only_at_wait = 0x00000000,
138 regk_dma_restore = 0x00000020,
139 regk_dma_rst = 0x00000001,
140 regk_dma_running = 0x00000004,
141 regk_dma_rw_cfg_default = 0x00000000,
142 regk_dma_rw_cmd_default = 0x00000000,
143 regk_dma_rw_intr_mask_default = 0x00000000,
144 regk_dma_rw_stat_default = 0x00000101,
145 regk_dma_rw_stream_cmd_default = 0x00000000,
146 regk_dma_save_down = 0x00000020,
147 regk_dma_save_up = 0x00000020,
148 regk_dma_set_reg = 0x00000050,
149 regk_dma_set_w_size1 = 0x00000190,
150 regk_dma_set_w_size2 = 0x000001a0,
151 regk_dma_set_w_size4 = 0x000001c0,
152 regk_dma_stopped = 0x00000002,
153 regk_dma_store_c = 0x00000002,
154 regk_dma_store_descr = 0x00000000,
155 regk_dma_store_g = 0x00000004,
156 regk_dma_store_md = 0x00000001,
157 regk_dma_sw = 0x00000008,
158 regk_dma_update_down = 0x00000020,
159 regk_dma_yes = 0x00000001
162 enum dma_ch_state
164 RST = 1,
165 STOPPED = 2,
166 RUNNING = 4
169 struct fs_dma_channel
171 qemu_irq irq;
172 struct etraxfs_dma_client *client;
174 /* Internal status. */
175 int stream_cmd_src;
176 enum dma_ch_state state;
178 unsigned int input : 1;
179 unsigned int eol : 1;
181 struct dma_descr_group current_g;
182 struct dma_descr_context current_c;
183 struct dma_descr_data current_d;
185 /* Control registers. */
186 uint32_t regs[DMA_REG_MAX];
189 struct fs_dma_ctrl
191 MemoryRegion mmio;
192 int nr_channels;
193 struct fs_dma_channel *channels;
195 QEMUBH *bh;
198 static void DMA_run(void *opaque);
199 static int channel_out_run(struct fs_dma_ctrl *ctrl, int c);
201 static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg)
203 return ctrl->channels[c].regs[reg];
206 static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c)
208 return channel_reg(ctrl, c, RW_CFG) & 2;
211 static inline int channel_en(struct fs_dma_ctrl *ctrl, int c)
213 return (channel_reg(ctrl, c, RW_CFG) & 1)
214 && ctrl->channels[c].client;
217 static inline int fs_channel(hwaddr addr)
219 /* Every channel has a 0x2000 ctrl register map. */
220 return addr >> 13;
223 #ifdef USE_THIS_DEAD_CODE
224 static void channel_load_g(struct fs_dma_ctrl *ctrl, int c)
226 hwaddr addr = channel_reg(ctrl, c, RW_GROUP);
228 /* Load and decode. FIXME: handle endianness. */
229 cpu_physical_memory_read(addr, &ctrl->channels[c].current_g,
230 sizeof(ctrl->channels[c].current_g));
233 static void dump_c(int ch, struct dma_descr_context *c)
235 printf("%s ch=%d\n", __func__, ch);
236 printf("next=%x\n", c->next);
237 printf("saved_data=%x\n", c->saved_data);
238 printf("saved_data_buf=%x\n", c->saved_data_buf);
239 printf("eol=%x\n", (uint32_t) c->eol);
242 static void dump_d(int ch, struct dma_descr_data *d)
244 printf("%s ch=%d\n", __func__, ch);
245 printf("next=%x\n", d->next);
246 printf("buf=%x\n", d->buf);
247 printf("after=%x\n", d->after);
248 printf("intr=%x\n", (uint32_t) d->intr);
249 printf("out_eop=%x\n", (uint32_t) d->out_eop);
250 printf("in_eop=%x\n", (uint32_t) d->in_eop);
251 printf("eol=%x\n", (uint32_t) d->eol);
253 #endif
255 static void channel_load_c(struct fs_dma_ctrl *ctrl, int c)
257 hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
259 /* Load and decode. FIXME: handle endianness. */
260 cpu_physical_memory_read(addr, &ctrl->channels[c].current_c,
261 sizeof(ctrl->channels[c].current_c));
263 D(dump_c(c, &ctrl->channels[c].current_c));
264 /* I guess this should update the current pos. */
265 ctrl->channels[c].regs[RW_SAVED_DATA] =
266 (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data;
267 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
268 (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf;
271 static void channel_load_d(struct fs_dma_ctrl *ctrl, int c)
273 hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
275 /* Load and decode. FIXME: handle endianness. */
276 D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
277 cpu_physical_memory_read(addr, &ctrl->channels[c].current_d,
278 sizeof(ctrl->channels[c].current_d));
280 D(dump_d(c, &ctrl->channels[c].current_d));
281 ctrl->channels[c].regs[RW_DATA] = addr;
284 static void channel_store_c(struct fs_dma_ctrl *ctrl, int c)
286 hwaddr addr = channel_reg(ctrl, c, RW_GROUP_DOWN);
288 /* Encode and store. FIXME: handle endianness. */
289 D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
290 D(dump_d(c, &ctrl->channels[c].current_d));
291 cpu_physical_memory_write(addr, &ctrl->channels[c].current_c,
292 sizeof(ctrl->channels[c].current_c));
295 static void channel_store_d(struct fs_dma_ctrl *ctrl, int c)
297 hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA);
299 /* Encode and store. FIXME: handle endianness. */
300 D(printf("%s ch=%d addr=" HWADDR_FMT_plx "\n", __func__, c, addr));
301 cpu_physical_memory_write(addr, &ctrl->channels[c].current_d,
302 sizeof(ctrl->channels[c].current_d));
305 static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c)
307 /* FIXME: */
310 static inline void channel_start(struct fs_dma_ctrl *ctrl, int c)
312 if (ctrl->channels[c].client)
314 ctrl->channels[c].eol = 0;
315 ctrl->channels[c].state = RUNNING;
316 if (!ctrl->channels[c].input)
317 channel_out_run(ctrl, c);
318 } else
319 printf("WARNING: starting DMA ch %d with no client\n", c);
321 qemu_bh_schedule_idle(ctrl->bh);
324 static void channel_continue(struct fs_dma_ctrl *ctrl, int c)
326 if (!channel_en(ctrl, c)
327 || channel_stopped(ctrl, c)
328 || ctrl->channels[c].state != RUNNING
329 /* Only reload the current data descriptor if it has eol set. */
330 || !ctrl->channels[c].current_d.eol) {
331 D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n",
332 c, ctrl->channels[c].state,
333 channel_stopped(ctrl, c),
334 channel_en(ctrl,c),
335 ctrl->channels[c].eol));
336 D(dump_d(c, &ctrl->channels[c].current_d));
337 return;
340 /* Reload the current descriptor. */
341 channel_load_d(ctrl, c);
343 /* If the current descriptor cleared the eol flag and we had already
344 reached eol state, do the continue. */
345 if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) {
346 D(printf("continue %d ok %x\n", c,
347 ctrl->channels[c].current_d.next));
348 ctrl->channels[c].regs[RW_SAVED_DATA] =
349 (uint32_t)(unsigned long)ctrl->channels[c].current_d.next;
350 channel_load_d(ctrl, c);
351 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
352 (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
354 channel_start(ctrl, c);
356 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
357 (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf;
360 static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v)
362 unsigned int cmd = v & ((1 << 10) - 1);
364 D(printf("%s ch=%d cmd=%x\n",
365 __func__, c, cmd));
366 if (cmd & regk_dma_load_d) {
367 channel_load_d(ctrl, c);
368 if (cmd & regk_dma_burst)
369 channel_start(ctrl, c);
372 if (cmd & regk_dma_load_c) {
373 channel_load_c(ctrl, c);
377 static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c)
379 D(printf("%s %d\n", __func__, c));
380 ctrl->channels[c].regs[R_INTR] &=
381 ~(ctrl->channels[c].regs[RW_ACK_INTR]);
383 ctrl->channels[c].regs[R_MASKED_INTR] =
384 ctrl->channels[c].regs[R_INTR]
385 & ctrl->channels[c].regs[RW_INTR_MASK];
387 D(printf("%s: chan=%d masked_intr=%x\n", __func__,
389 ctrl->channels[c].regs[R_MASKED_INTR]));
391 qemu_set_irq(ctrl->channels[c].irq,
392 !!ctrl->channels[c].regs[R_MASKED_INTR]);
395 static int channel_out_run(struct fs_dma_ctrl *ctrl, int c)
397 uint32_t len;
398 uint32_t saved_data_buf;
399 unsigned char buf[2 * 1024];
401 struct dma_context_metadata meta;
402 bool send_context = true;
404 if (ctrl->channels[c].eol)
405 return 0;
407 do {
408 bool out_eop;
409 D(printf("ch=%d buf=%x after=%x\n",
411 (uint32_t)ctrl->channels[c].current_d.buf,
412 (uint32_t)ctrl->channels[c].current_d.after));
414 if (send_context) {
415 if (ctrl->channels[c].client->client.metadata_push) {
416 meta.metadata = ctrl->channels[c].current_d.md;
417 ctrl->channels[c].client->client.metadata_push(
418 ctrl->channels[c].client->client.opaque,
419 &meta);
421 send_context = false;
424 channel_load_d(ctrl, c);
425 saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
426 len = (uint32_t)(unsigned long)
427 ctrl->channels[c].current_d.after;
428 len -= saved_data_buf;
430 if (len > sizeof buf)
431 len = sizeof buf;
432 cpu_physical_memory_read (saved_data_buf, buf, len);
434 out_eop = ((saved_data_buf + len) ==
435 ctrl->channels[c].current_d.after) &&
436 ctrl->channels[c].current_d.out_eop;
438 D(printf("channel %d pushes %x %u bytes eop=%u\n", c,
439 saved_data_buf, len, out_eop));
441 if (ctrl->channels[c].client->client.push) {
442 if (len > 0) {
443 ctrl->channels[c].client->client.push(
444 ctrl->channels[c].client->client.opaque,
445 buf, len, out_eop);
447 } else {
448 printf("WARNING: DMA ch%d dataloss,"
449 " no attached client.\n", c);
452 saved_data_buf += len;
454 if (saved_data_buf == (uint32_t)(unsigned long)
455 ctrl->channels[c].current_d.after) {
456 /* Done. Step to next. */
457 if (ctrl->channels[c].current_d.out_eop) {
458 send_context = true;
460 if (ctrl->channels[c].current_d.intr) {
461 /* data intr. */
462 D(printf("signal intr %d eol=%d\n",
463 len, ctrl->channels[c].current_d.eol));
464 ctrl->channels[c].regs[R_INTR] |= (1 << 2);
465 channel_update_irq(ctrl, c);
467 channel_store_d(ctrl, c);
468 if (ctrl->channels[c].current_d.eol) {
469 D(printf("channel %d EOL\n", c));
470 ctrl->channels[c].eol = 1;
472 /* Mark the context as disabled. */
473 ctrl->channels[c].current_c.dis = 1;
474 channel_store_c(ctrl, c);
476 channel_stop(ctrl, c);
477 } else {
478 ctrl->channels[c].regs[RW_SAVED_DATA] =
479 (uint32_t)(unsigned long)ctrl->
480 channels[c].current_d.next;
481 /* Load new descriptor. */
482 channel_load_d(ctrl, c);
483 saved_data_buf = (uint32_t)(unsigned long)
484 ctrl->channels[c].current_d.buf;
487 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] =
488 saved_data_buf;
489 D(dump_d(c, &ctrl->channels[c].current_d));
491 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
492 } while (!ctrl->channels[c].eol);
493 return 1;
496 static int channel_in_process(struct fs_dma_ctrl *ctrl, int c,
497 unsigned char *buf, int buflen, int eop)
499 uint32_t len;
500 uint32_t saved_data_buf;
502 if (ctrl->channels[c].eol == 1)
503 return 0;
505 channel_load_d(ctrl, c);
506 saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF);
507 len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after;
508 len -= saved_data_buf;
510 if (len > buflen)
511 len = buflen;
513 cpu_physical_memory_write (saved_data_buf, buf, len);
514 saved_data_buf += len;
516 if (saved_data_buf ==
517 (uint32_t)(unsigned long)ctrl->channels[c].current_d.after
518 || eop) {
519 uint32_t r_intr = ctrl->channels[c].regs[R_INTR];
521 D(printf("in dscr end len=%d\n",
522 ctrl->channels[c].current_d.after
523 - ctrl->channels[c].current_d.buf));
524 ctrl->channels[c].current_d.after = saved_data_buf;
526 /* Done. Step to next. */
527 if (ctrl->channels[c].current_d.intr) {
528 /* TODO: signal eop to the client. */
529 /* data intr. */
530 ctrl->channels[c].regs[R_INTR] |= 3;
532 if (eop) {
533 ctrl->channels[c].current_d.in_eop = 1;
534 ctrl->channels[c].regs[R_INTR] |= 8;
536 if (r_intr != ctrl->channels[c].regs[R_INTR])
537 channel_update_irq(ctrl, c);
539 channel_store_d(ctrl, c);
540 D(dump_d(c, &ctrl->channels[c].current_d));
542 if (ctrl->channels[c].current_d.eol) {
543 D(printf("channel %d EOL\n", c));
544 ctrl->channels[c].eol = 1;
546 /* Mark the context as disabled. */
547 ctrl->channels[c].current_c.dis = 1;
548 channel_store_c(ctrl, c);
550 channel_stop(ctrl, c);
551 } else {
552 ctrl->channels[c].regs[RW_SAVED_DATA] =
553 (uint32_t)(unsigned long)ctrl->
554 channels[c].current_d.next;
555 /* Load new descriptor. */
556 channel_load_d(ctrl, c);
557 saved_data_buf = (uint32_t)(unsigned long)
558 ctrl->channels[c].current_d.buf;
562 ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf;
563 return len;
566 static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c)
568 if (ctrl->channels[c].client->client.pull) {
569 ctrl->channels[c].client->client.pull(
570 ctrl->channels[c].client->client.opaque);
571 return 1;
572 } else
573 return 0;
576 static uint32_t dma_rinvalid (void *opaque, hwaddr addr)
578 hw_error("Unsupported short raccess. reg=" HWADDR_FMT_plx "\n", addr);
579 return 0;
582 static uint64_t
583 dma_read(void *opaque, hwaddr addr, unsigned int size)
585 struct fs_dma_ctrl *ctrl = opaque;
586 int c;
587 uint32_t r = 0;
589 if (size != 4) {
590 dma_rinvalid(opaque, addr);
593 /* Make addr relative to this channel and bounded to nr regs. */
594 c = fs_channel(addr);
595 addr &= 0xff;
596 addr >>= 2;
597 switch (addr)
599 case RW_STAT:
600 r = ctrl->channels[c].state & 7;
601 r |= ctrl->channels[c].eol << 5;
602 r |= ctrl->channels[c].stream_cmd_src << 8;
603 break;
605 default:
606 r = ctrl->channels[c].regs[addr];
607 D(printf("%s c=%d addr=" HWADDR_FMT_plx "\n",
608 __func__, c, addr));
609 break;
611 return r;
614 static void
615 dma_winvalid (void *opaque, hwaddr addr, uint32_t value)
617 hw_error("Unsupported short waccess. reg=" HWADDR_FMT_plx "\n", addr);
620 static void
621 dma_update_state(struct fs_dma_ctrl *ctrl, int c)
623 if (ctrl->channels[c].regs[RW_CFG] & 2)
624 ctrl->channels[c].state = STOPPED;
625 if (!(ctrl->channels[c].regs[RW_CFG] & 1))
626 ctrl->channels[c].state = RST;
629 static void
630 dma_write(void *opaque, hwaddr addr,
631 uint64_t val64, unsigned int size)
633 struct fs_dma_ctrl *ctrl = opaque;
634 uint32_t value = val64;
635 int c;
637 if (size != 4) {
638 dma_winvalid(opaque, addr, value);
641 /* Make addr relative to this channel and bounded to nr regs. */
642 c = fs_channel(addr);
643 addr &= 0xff;
644 addr >>= 2;
645 switch (addr)
647 case RW_DATA:
648 ctrl->channels[c].regs[addr] = value;
649 break;
651 case RW_CFG:
652 ctrl->channels[c].regs[addr] = value;
653 dma_update_state(ctrl, c);
654 break;
655 case RW_CMD:
656 /* continue. */
657 if (value & ~1)
658 printf("Invalid store to ch=%d RW_CMD %x\n",
659 c, value);
660 ctrl->channels[c].regs[addr] = value;
661 channel_continue(ctrl, c);
662 break;
664 case RW_SAVED_DATA:
665 case RW_SAVED_DATA_BUF:
666 case RW_GROUP:
667 case RW_GROUP_DOWN:
668 ctrl->channels[c].regs[addr] = value;
669 break;
671 case RW_ACK_INTR:
672 case RW_INTR_MASK:
673 ctrl->channels[c].regs[addr] = value;
674 channel_update_irq(ctrl, c);
675 if (addr == RW_ACK_INTR)
676 ctrl->channels[c].regs[RW_ACK_INTR] = 0;
677 break;
679 case RW_STREAM_CMD:
680 if (value & ~1023)
681 printf("Invalid store to ch=%d "
682 "RW_STREAMCMD %x\n",
683 c, value);
684 ctrl->channels[c].regs[addr] = value;
685 D(printf("stream_cmd ch=%d\n", c));
686 channel_stream_cmd(ctrl, c, value);
687 break;
689 default:
690 D(printf("%s c=%d " HWADDR_FMT_plx "\n",
691 __func__, c, addr));
692 break;
696 static const MemoryRegionOps dma_ops = {
697 .read = dma_read,
698 .write = dma_write,
699 .endianness = DEVICE_NATIVE_ENDIAN,
700 .valid = {
701 .min_access_size = 1,
702 .max_access_size = 4
706 static int etraxfs_dmac_run(void *opaque)
708 struct fs_dma_ctrl *ctrl = opaque;
709 int i;
710 int p = 0;
712 for (i = 0;
713 i < ctrl->nr_channels;
714 i++)
716 if (ctrl->channels[i].state == RUNNING)
718 if (ctrl->channels[i].input) {
719 p += channel_in_run(ctrl, i);
720 } else {
721 p += channel_out_run(ctrl, i);
725 return p;
728 int etraxfs_dmac_input(struct etraxfs_dma_client *client,
729 void *buf, int len, int eop)
731 return channel_in_process(client->ctrl, client->channel,
732 buf, len, eop);
735 /* Connect an IRQ line with a channel. */
736 void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input)
738 struct fs_dma_ctrl *ctrl = opaque;
739 ctrl->channels[c].irq = *line;
740 ctrl->channels[c].input = input;
743 void etraxfs_dmac_connect_client(void *opaque, int c,
744 struct etraxfs_dma_client *cl)
746 struct fs_dma_ctrl *ctrl = opaque;
747 cl->ctrl = ctrl;
748 cl->channel = c;
749 ctrl->channels[c].client = cl;
753 static void DMA_run(void *opaque)
755 struct fs_dma_ctrl *etraxfs_dmac = opaque;
756 int p = 1;
758 if (runstate_is_running())
759 p = etraxfs_dmac_run(etraxfs_dmac);
761 if (p)
762 qemu_bh_schedule_idle(etraxfs_dmac->bh);
765 void *etraxfs_dmac_init(hwaddr base, int nr_channels)
767 struct fs_dma_ctrl *ctrl = NULL;
769 ctrl = g_malloc0(sizeof *ctrl);
771 ctrl->bh = qemu_bh_new(DMA_run, ctrl);
773 ctrl->nr_channels = nr_channels;
774 ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels);
776 memory_region_init_io(&ctrl->mmio, NULL, &dma_ops, ctrl, "etraxfs-dma",
777 nr_channels * 0x2000);
778 memory_region_add_subregion(get_system_memory(), base, &ctrl->mmio);
780 return ctrl;