cuda: port SET_DEVICE_LIST command to new framework
[qemu/ar7.git] / hw / misc / macio / cuda.c
bloba2e31d0473f7bb948cee0735efee6a9879c1448a
1 /*
2 * QEMU PowerMac CUDA device support
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/ppc/mac.h"
28 #include "hw/input/adb.h"
29 #include "qemu/timer.h"
30 #include "sysemu/sysemu.h"
32 /* XXX: implement all timer modes */
34 /* debug CUDA */
35 //#define DEBUG_CUDA
37 /* debug CUDA packets */
38 //#define DEBUG_CUDA_PACKET
40 #ifdef DEBUG_CUDA
41 #define CUDA_DPRINTF(fmt, ...) \
42 do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0)
43 #else
44 #define CUDA_DPRINTF(fmt, ...)
45 #endif
47 /* Bits in B data register: all active low */
48 #define TREQ 0x08 /* Transfer request (input) */
49 #define TACK 0x10 /* Transfer acknowledge (output) */
50 #define TIP 0x20 /* Transfer in progress (output) */
52 /* Bits in ACR */
53 #define SR_CTRL 0x1c /* Shift register control bits */
54 #define SR_EXT 0x0c /* Shift on external clock */
55 #define SR_OUT 0x10 /* Shift out if 1 */
57 /* Bits in IFR and IER */
58 #define IER_SET 0x80 /* set bits in IER */
59 #define IER_CLR 0 /* clear bits in IER */
60 #define SR_INT 0x04 /* Shift register full/empty */
61 #define SR_DATA_INT 0x08
62 #define SR_CLOCK_INT 0x10
63 #define T1_INT 0x40 /* Timer 1 interrupt */
64 #define T2_INT 0x20 /* Timer 2 interrupt */
66 /* Bits in ACR */
67 #define T1MODE 0xc0 /* Timer 1 mode */
68 #define T1MODE_CONT 0x40 /* continuous interrupts */
70 /* commands (1st byte) */
71 #define ADB_PACKET 0
72 #define CUDA_PACKET 1
73 #define ERROR_PACKET 2
74 #define TIMER_PACKET 3
75 #define POWER_PACKET 4
76 #define MACIIC_PACKET 5
77 #define PMU_PACKET 6
80 /* CUDA commands (2nd byte) */
81 #define CUDA_WARM_START 0x0
82 #define CUDA_AUTOPOLL 0x1
83 #define CUDA_GET_6805_ADDR 0x2
84 #define CUDA_GET_TIME 0x3
85 #define CUDA_GET_PRAM 0x7
86 #define CUDA_SET_6805_ADDR 0x8
87 #define CUDA_SET_TIME 0x9
88 #define CUDA_POWERDOWN 0xa
89 #define CUDA_POWERUP_TIME 0xb
90 #define CUDA_SET_PRAM 0xc
91 #define CUDA_MS_RESET 0xd
92 #define CUDA_SEND_DFAC 0xe
93 #define CUDA_BATTERY_SWAP_SENSE 0x10
94 #define CUDA_RESET_SYSTEM 0x11
95 #define CUDA_SET_IPL 0x12
96 #define CUDA_FILE_SERVER_FLAG 0x13
97 #define CUDA_SET_AUTO_RATE 0x14
98 #define CUDA_GET_AUTO_RATE 0x16
99 #define CUDA_SET_DEVICE_LIST 0x19
100 #define CUDA_GET_DEVICE_LIST 0x1a
101 #define CUDA_SET_ONE_SECOND_MODE 0x1b
102 #define CUDA_SET_POWER_MESSAGES 0x21
103 #define CUDA_GET_SET_IIC 0x22
104 #define CUDA_WAKEUP 0x23
105 #define CUDA_TIMER_TICKLE 0x24
106 #define CUDA_COMBINED_FORMAT_IIC 0x25
108 #define CUDA_TIMER_FREQ (4700000 / 6)
110 /* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */
111 #define RTC_OFFSET 2082844800
113 /* CUDA registers */
114 #define CUDA_REG_B 0x00
115 #define CUDA_REG_A 0x01
116 #define CUDA_REG_DIRB 0x02
117 #define CUDA_REG_DIRA 0x03
118 #define CUDA_REG_T1CL 0x04
119 #define CUDA_REG_T1CH 0x05
120 #define CUDA_REG_T1LL 0x06
121 #define CUDA_REG_T1LH 0x07
122 #define CUDA_REG_T2CL 0x08
123 #define CUDA_REG_T2CH 0x09
124 #define CUDA_REG_SR 0x0a
125 #define CUDA_REG_ACR 0x0b
126 #define CUDA_REG_PCR 0x0c
127 #define CUDA_REG_IFR 0x0d
128 #define CUDA_REG_IER 0x0e
129 #define CUDA_REG_ANH 0x0f
131 static void cuda_update(CUDAState *s);
132 static void cuda_receive_packet_from_host(CUDAState *s,
133 const uint8_t *data, int len);
134 static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
135 int64_t current_time);
137 static void cuda_update_irq(CUDAState *s)
139 if (s->ifr & s->ier & (SR_INT | T1_INT | T2_INT)) {
140 qemu_irq_raise(s->irq);
141 } else {
142 qemu_irq_lower(s->irq);
146 static uint64_t get_tb(uint64_t time, uint64_t freq)
148 return muldiv64(time, freq, get_ticks_per_sec());
151 static unsigned int get_counter(CUDATimer *ti)
153 int64_t d;
154 unsigned int counter;
155 uint64_t tb_diff;
156 uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
158 /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup. */
159 tb_diff = get_tb(current_time, ti->frequency) - ti->load_time;
160 d = (tb_diff * 0xBF401675E5DULL) / (ti->frequency << 24);
162 if (ti->index == 0) {
163 /* the timer goes down from latch to -1 (period of latch + 2) */
164 if (d <= (ti->counter_value + 1)) {
165 counter = (ti->counter_value - d) & 0xffff;
166 } else {
167 counter = (d - (ti->counter_value + 1)) % (ti->latch + 2);
168 counter = (ti->latch - counter) & 0xffff;
170 } else {
171 counter = (ti->counter_value - d) & 0xffff;
173 return counter;
176 static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val)
178 CUDA_DPRINTF("T%d.counter=%d\n", 1 + ti->index, val);
179 ti->load_time = get_tb(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
180 s->frequency);
181 ti->counter_value = val;
182 cuda_timer_update(s, ti, ti->load_time);
185 static int64_t get_next_irq_time(CUDATimer *s, int64_t current_time)
187 int64_t d, next_time;
188 unsigned int counter;
190 /* current counter value */
191 d = muldiv64(current_time - s->load_time,
192 CUDA_TIMER_FREQ, get_ticks_per_sec());
193 /* the timer goes down from latch to -1 (period of latch + 2) */
194 if (d <= (s->counter_value + 1)) {
195 counter = (s->counter_value - d) & 0xffff;
196 } else {
197 counter = (d - (s->counter_value + 1)) % (s->latch + 2);
198 counter = (s->latch - counter) & 0xffff;
201 /* Note: we consider the irq is raised on 0 */
202 if (counter == 0xffff) {
203 next_time = d + s->latch + 1;
204 } else if (counter == 0) {
205 next_time = d + s->latch + 2;
206 } else {
207 next_time = d + counter;
209 CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n",
210 s->latch, d, next_time - d);
211 next_time = muldiv64(next_time, get_ticks_per_sec(), CUDA_TIMER_FREQ) +
212 s->load_time;
213 if (next_time <= current_time)
214 next_time = current_time + 1;
215 return next_time;
218 static void cuda_timer_update(CUDAState *s, CUDATimer *ti,
219 int64_t current_time)
221 if (!ti->timer)
222 return;
223 if (ti->index == 0 && (s->acr & T1MODE) != T1MODE_CONT) {
224 timer_del(ti->timer);
225 } else {
226 ti->next_irq_time = get_next_irq_time(ti, current_time);
227 timer_mod(ti->timer, ti->next_irq_time);
231 static void cuda_timer1(void *opaque)
233 CUDAState *s = opaque;
234 CUDATimer *ti = &s->timers[0];
236 cuda_timer_update(s, ti, ti->next_irq_time);
237 s->ifr |= T1_INT;
238 cuda_update_irq(s);
241 static void cuda_timer2(void *opaque)
243 CUDAState *s = opaque;
244 CUDATimer *ti = &s->timers[1];
246 cuda_timer_update(s, ti, ti->next_irq_time);
247 s->ifr |= T2_INT;
248 cuda_update_irq(s);
251 static void cuda_set_sr_int(void *opaque)
253 CUDAState *s = opaque;
255 CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__);
256 s->ifr |= SR_INT;
257 cuda_update_irq(s);
260 static void cuda_delay_set_sr_int(CUDAState *s)
262 int64_t expire;
264 if (s->dirb == 0xff) {
265 /* Not in Mac OS, fire the IRQ directly */
266 cuda_set_sr_int(s);
267 return;
270 CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__);
272 expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 300 * SCALE_US;
273 timer_mod(s->sr_delay_timer, expire);
276 static uint32_t cuda_readb(void *opaque, hwaddr addr)
278 CUDAState *s = opaque;
279 uint32_t val;
281 addr = (addr >> 9) & 0xf;
282 switch(addr) {
283 case CUDA_REG_B:
284 val = s->b;
285 break;
286 case CUDA_REG_A:
287 val = s->a;
288 break;
289 case CUDA_REG_DIRB:
290 val = s->dirb;
291 break;
292 case CUDA_REG_DIRA:
293 val = s->dira;
294 break;
295 case CUDA_REG_T1CL:
296 val = get_counter(&s->timers[0]) & 0xff;
297 s->ifr &= ~T1_INT;
298 cuda_update_irq(s);
299 break;
300 case CUDA_REG_T1CH:
301 val = get_counter(&s->timers[0]) >> 8;
302 cuda_update_irq(s);
303 break;
304 case CUDA_REG_T1LL:
305 val = s->timers[0].latch & 0xff;
306 break;
307 case CUDA_REG_T1LH:
308 /* XXX: check this */
309 val = (s->timers[0].latch >> 8) & 0xff;
310 break;
311 case CUDA_REG_T2CL:
312 val = get_counter(&s->timers[1]) & 0xff;
313 s->ifr &= ~T2_INT;
314 cuda_update_irq(s);
315 break;
316 case CUDA_REG_T2CH:
317 val = get_counter(&s->timers[1]) >> 8;
318 break;
319 case CUDA_REG_SR:
320 val = s->sr;
321 s->ifr &= ~(SR_INT | SR_CLOCK_INT | SR_DATA_INT);
322 cuda_update_irq(s);
323 break;
324 case CUDA_REG_ACR:
325 val = s->acr;
326 break;
327 case CUDA_REG_PCR:
328 val = s->pcr;
329 break;
330 case CUDA_REG_IFR:
331 val = s->ifr;
332 if (s->ifr & s->ier) {
333 val |= 0x80;
335 break;
336 case CUDA_REG_IER:
337 val = s->ier | 0x80;
338 break;
339 default:
340 case CUDA_REG_ANH:
341 val = s->anh;
342 break;
344 if (addr != CUDA_REG_IFR || val != 0) {
345 CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val);
348 return val;
351 static void cuda_writeb(void *opaque, hwaddr addr, uint32_t val)
353 CUDAState *s = opaque;
355 addr = (addr >> 9) & 0xf;
356 CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val);
358 switch(addr) {
359 case CUDA_REG_B:
360 s->b = val;
361 cuda_update(s);
362 break;
363 case CUDA_REG_A:
364 s->a = val;
365 break;
366 case CUDA_REG_DIRB:
367 s->dirb = val;
368 break;
369 case CUDA_REG_DIRA:
370 s->dira = val;
371 break;
372 case CUDA_REG_T1CL:
373 s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
374 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
375 break;
376 case CUDA_REG_T1CH:
377 s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
378 s->ifr &= ~T1_INT;
379 set_counter(s, &s->timers[0], s->timers[0].latch);
380 break;
381 case CUDA_REG_T1LL:
382 s->timers[0].latch = (s->timers[0].latch & 0xff00) | val;
383 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
384 break;
385 case CUDA_REG_T1LH:
386 s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8);
387 s->ifr &= ~T1_INT;
388 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
389 break;
390 case CUDA_REG_T2CL:
391 s->timers[1].latch = (s->timers[1].latch & 0xff00) | val;
392 break;
393 case CUDA_REG_T2CH:
394 /* To ensure T2 generates an interrupt on zero crossing with the
395 common timer code, write the value directly from the latch to
396 the counter */
397 s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8);
398 s->ifr &= ~T2_INT;
399 set_counter(s, &s->timers[1], s->timers[1].latch);
400 break;
401 case CUDA_REG_SR:
402 s->sr = val;
403 break;
404 case CUDA_REG_ACR:
405 s->acr = val;
406 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
407 cuda_update(s);
408 break;
409 case CUDA_REG_PCR:
410 s->pcr = val;
411 break;
412 case CUDA_REG_IFR:
413 /* reset bits */
414 s->ifr &= ~val;
415 cuda_update_irq(s);
416 break;
417 case CUDA_REG_IER:
418 if (val & IER_SET) {
419 /* set bits */
420 s->ier |= val & 0x7f;
421 } else {
422 /* reset bits */
423 s->ier &= ~val;
425 cuda_update_irq(s);
426 break;
427 default:
428 case CUDA_REG_ANH:
429 s->anh = val;
430 break;
434 /* NOTE: TIP and TREQ are negated */
435 static void cuda_update(CUDAState *s)
437 int packet_received, len;
439 packet_received = 0;
440 if (!(s->b & TIP)) {
441 /* transfer requested from host */
443 if (s->acr & SR_OUT) {
444 /* data output */
445 if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
446 if (s->data_out_index < sizeof(s->data_out)) {
447 CUDA_DPRINTF("send: %02x\n", s->sr);
448 s->data_out[s->data_out_index++] = s->sr;
449 cuda_delay_set_sr_int(s);
452 } else {
453 if (s->data_in_index < s->data_in_size) {
454 /* data input */
455 if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) {
456 s->sr = s->data_in[s->data_in_index++];
457 CUDA_DPRINTF("recv: %02x\n", s->sr);
458 /* indicate end of transfer */
459 if (s->data_in_index >= s->data_in_size) {
460 s->b = (s->b | TREQ);
462 cuda_delay_set_sr_int(s);
466 } else {
467 /* no transfer requested: handle sync case */
468 if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) {
469 /* update TREQ state each time TACK change state */
470 if (s->b & TACK)
471 s->b = (s->b | TREQ);
472 else
473 s->b = (s->b & ~TREQ);
474 cuda_delay_set_sr_int(s);
475 } else {
476 if (!(s->last_b & TIP)) {
477 /* handle end of host to cuda transfer */
478 packet_received = (s->data_out_index > 0);
479 /* always an IRQ at the end of transfer */
480 cuda_delay_set_sr_int(s);
482 /* signal if there is data to read */
483 if (s->data_in_index < s->data_in_size) {
484 s->b = (s->b & ~TREQ);
489 s->last_acr = s->acr;
490 s->last_b = s->b;
492 /* NOTE: cuda_receive_packet_from_host() can call cuda_update()
493 recursively */
494 if (packet_received) {
495 len = s->data_out_index;
496 s->data_out_index = 0;
497 cuda_receive_packet_from_host(s, s->data_out, len);
501 static void cuda_send_packet_to_host(CUDAState *s,
502 const uint8_t *data, int len)
504 #ifdef DEBUG_CUDA_PACKET
506 int i;
507 printf("cuda_send_packet_to_host:\n");
508 for(i = 0; i < len; i++)
509 printf(" %02x", data[i]);
510 printf("\n");
512 #endif
513 memcpy(s->data_in, data, len);
514 s->data_in_size = len;
515 s->data_in_index = 0;
516 cuda_update(s);
517 cuda_delay_set_sr_int(s);
520 static void cuda_adb_poll(void *opaque)
522 CUDAState *s = opaque;
523 uint8_t obuf[ADB_MAX_OUT_LEN + 2];
524 int olen;
526 olen = adb_poll(&s->adb_bus, obuf + 2, s->adb_poll_mask);
527 if (olen > 0) {
528 obuf[0] = ADB_PACKET;
529 obuf[1] = 0x40; /* polled data */
530 cuda_send_packet_to_host(s, obuf, olen + 2);
532 timer_mod(s->adb_poll_timer,
533 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
534 (get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
537 /* description of commands */
538 typedef struct CudaCommand {
539 uint8_t command;
540 const char *name;
541 bool (*handler)(CUDAState *s,
542 const uint8_t *in_args, int in_len,
543 uint8_t *out_args, int *out_len);
544 } CudaCommand;
546 static bool cuda_cmd_autopoll(CUDAState *s,
547 const uint8_t *in_data, int in_len,
548 uint8_t *out_data, int *out_len)
550 int autopoll;
552 if (in_len != 1) {
553 return false;
556 autopoll = (in_data[0] != 0);
557 if (autopoll != s->autopoll) {
558 s->autopoll = autopoll;
559 if (autopoll) {
560 timer_mod(s->adb_poll_timer,
561 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
562 (get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
563 } else {
564 timer_del(s->adb_poll_timer);
567 return true;
570 static bool cuda_cmd_set_autorate(CUDAState *s,
571 const uint8_t *in_data, int in_len,
572 uint8_t *out_data, int *out_len)
574 if (in_len != 1) {
575 return false;
578 /* we don't want a period of 0 ms */
579 /* FIXME: check what real hardware does */
580 if (in_data[0] == 0) {
581 return false;
584 s->autopoll_rate_ms = in_data[0];
585 if (s->autopoll) {
586 timer_mod(s->adb_poll_timer,
587 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
588 (get_ticks_per_sec() / (1000 / s->autopoll_rate_ms)));
590 return true;
593 static bool cuda_cmd_set_device_list(CUDAState *s,
594 const uint8_t *in_data, int in_len,
595 uint8_t *out_data, int *out_len)
597 if (in_len != 2) {
598 return false;
601 s->adb_poll_mask = (((uint16_t)in_data[0]) << 8) | in_data[1];
602 return true;
605 static const CudaCommand handlers[] = {
606 { CUDA_AUTOPOLL, "AUTOPOLL", cuda_cmd_autopoll },
607 { CUDA_SET_AUTO_RATE, "SET_AUTO_RATE", cuda_cmd_set_autorate },
608 { CUDA_SET_DEVICE_LIST, "SET_DEVICE_LIST", cuda_cmd_set_device_list },
611 static void cuda_receive_packet(CUDAState *s,
612 const uint8_t *data, int len)
614 uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] };
615 int i, out_len = 0;
616 uint32_t ti;
618 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
619 const CudaCommand *desc = &handlers[i];
620 if (desc->command == data[0]) {
621 CUDA_DPRINTF("handling command %s\n", desc->name);
622 out_len = 0;
623 if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) {
624 cuda_send_packet_to_host(s, obuf, 3 + out_len);
625 } else {
626 qemu_log_mask(LOG_GUEST_ERROR,
627 "CUDA: %s: wrong parameters %d\n",
628 desc->name, len);
629 obuf[0] = ERROR_PACKET;
630 obuf[1] = 0x5; /* bad parameters */
631 obuf[2] = CUDA_PACKET;
632 obuf[3] = data[0];
633 cuda_send_packet_to_host(s, obuf, 4);
635 return;
639 switch(data[0]) {
640 case CUDA_GET_6805_ADDR:
641 cuda_send_packet_to_host(s, obuf, 3);
642 return;
643 case CUDA_SET_TIME:
644 ti = (((uint32_t)data[1]) << 24) + (((uint32_t)data[2]) << 16) + (((uint32_t)data[3]) << 8) + data[4];
645 s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
646 cuda_send_packet_to_host(s, obuf, 3);
647 return;
648 case CUDA_GET_TIME:
649 ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / get_ticks_per_sec());
650 obuf[3] = ti >> 24;
651 obuf[4] = ti >> 16;
652 obuf[5] = ti >> 8;
653 obuf[6] = ti;
654 cuda_send_packet_to_host(s, obuf, 7);
655 return;
656 case CUDA_FILE_SERVER_FLAG:
657 case CUDA_SET_POWER_MESSAGES:
658 cuda_send_packet_to_host(s, obuf, 3);
659 return;
660 case CUDA_POWERDOWN:
661 cuda_send_packet_to_host(s, obuf, 3);
662 qemu_system_shutdown_request();
663 return;
664 case CUDA_RESET_SYSTEM:
665 cuda_send_packet_to_host(s, obuf, 3);
666 qemu_system_reset_request();
667 return;
668 case CUDA_COMBINED_FORMAT_IIC:
669 obuf[0] = ERROR_PACKET;
670 obuf[1] = 0x5;
671 obuf[2] = CUDA_PACKET;
672 obuf[3] = data[0];
673 cuda_send_packet_to_host(s, obuf, 4);
674 return;
675 case CUDA_GET_SET_IIC:
676 if (len == 4) {
677 cuda_send_packet_to_host(s, obuf, 3);
678 } else {
679 obuf[0] = ERROR_PACKET;
680 obuf[1] = 0x2;
681 obuf[2] = CUDA_PACKET;
682 obuf[3] = data[0];
683 cuda_send_packet_to_host(s, obuf, 4);
685 return;
686 default:
687 break;
690 qemu_log_mask(LOG_GUEST_ERROR, "CUDA: unknown command 0x%02x\n", data[0]);
691 obuf[0] = ERROR_PACKET;
692 obuf[1] = 0x2; /* unknown command */
693 obuf[2] = CUDA_PACKET;
694 obuf[3] = data[0];
695 cuda_send_packet_to_host(s, obuf, 4);
698 static void cuda_receive_packet_from_host(CUDAState *s,
699 const uint8_t *data, int len)
701 #ifdef DEBUG_CUDA_PACKET
703 int i;
704 printf("cuda_receive_packet_from_host:\n");
705 for(i = 0; i < len; i++)
706 printf(" %02x", data[i]);
707 printf("\n");
709 #endif
710 switch(data[0]) {
711 case ADB_PACKET:
713 uint8_t obuf[ADB_MAX_OUT_LEN + 3];
714 int olen;
715 olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1);
716 if (olen > 0) {
717 obuf[0] = ADB_PACKET;
718 obuf[1] = 0x00;
719 cuda_send_packet_to_host(s, obuf, olen + 2);
720 } else {
721 /* error */
722 obuf[0] = ADB_PACKET;
723 obuf[1] = -olen;
724 obuf[2] = data[1];
725 olen = 0;
726 cuda_send_packet_to_host(s, obuf, olen + 3);
729 break;
730 case CUDA_PACKET:
731 cuda_receive_packet(s, data + 1, len - 1);
732 break;
736 static void cuda_writew (void *opaque, hwaddr addr, uint32_t value)
740 static void cuda_writel (void *opaque, hwaddr addr, uint32_t value)
744 static uint32_t cuda_readw (void *opaque, hwaddr addr)
746 return 0;
749 static uint32_t cuda_readl (void *opaque, hwaddr addr)
751 return 0;
754 static const MemoryRegionOps cuda_ops = {
755 .old_mmio = {
756 .write = {
757 cuda_writeb,
758 cuda_writew,
759 cuda_writel,
761 .read = {
762 cuda_readb,
763 cuda_readw,
764 cuda_readl,
767 .endianness = DEVICE_NATIVE_ENDIAN,
770 static bool cuda_timer_exist(void *opaque, int version_id)
772 CUDATimer *s = opaque;
774 return s->timer != NULL;
777 static const VMStateDescription vmstate_cuda_timer = {
778 .name = "cuda_timer",
779 .version_id = 0,
780 .minimum_version_id = 0,
781 .fields = (VMStateField[]) {
782 VMSTATE_UINT16(latch, CUDATimer),
783 VMSTATE_UINT16(counter_value, CUDATimer),
784 VMSTATE_INT64(load_time, CUDATimer),
785 VMSTATE_INT64(next_irq_time, CUDATimer),
786 VMSTATE_TIMER_PTR_TEST(timer, CUDATimer, cuda_timer_exist),
787 VMSTATE_END_OF_LIST()
791 static const VMStateDescription vmstate_cuda = {
792 .name = "cuda",
793 .version_id = 4,
794 .minimum_version_id = 4,
795 .fields = (VMStateField[]) {
796 VMSTATE_UINT8(a, CUDAState),
797 VMSTATE_UINT8(b, CUDAState),
798 VMSTATE_UINT8(last_b, CUDAState),
799 VMSTATE_UINT8(dira, CUDAState),
800 VMSTATE_UINT8(dirb, CUDAState),
801 VMSTATE_UINT8(sr, CUDAState),
802 VMSTATE_UINT8(acr, CUDAState),
803 VMSTATE_UINT8(last_acr, CUDAState),
804 VMSTATE_UINT8(pcr, CUDAState),
805 VMSTATE_UINT8(ifr, CUDAState),
806 VMSTATE_UINT8(ier, CUDAState),
807 VMSTATE_UINT8(anh, CUDAState),
808 VMSTATE_INT32(data_in_size, CUDAState),
809 VMSTATE_INT32(data_in_index, CUDAState),
810 VMSTATE_INT32(data_out_index, CUDAState),
811 VMSTATE_UINT8(autopoll, CUDAState),
812 VMSTATE_UINT8(autopoll_rate_ms, CUDAState),
813 VMSTATE_UINT16(adb_poll_mask, CUDAState),
814 VMSTATE_BUFFER(data_in, CUDAState),
815 VMSTATE_BUFFER(data_out, CUDAState),
816 VMSTATE_UINT32(tick_offset, CUDAState),
817 VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1,
818 vmstate_cuda_timer, CUDATimer),
819 VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState),
820 VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState),
821 VMSTATE_END_OF_LIST()
825 static void cuda_reset(DeviceState *dev)
827 CUDAState *s = CUDA(dev);
829 s->b = 0;
830 s->a = 0;
831 s->dirb = 0xff;
832 s->dira = 0;
833 s->sr = 0;
834 s->acr = 0;
835 s->pcr = 0;
836 s->ifr = 0;
837 s->ier = 0;
838 // s->ier = T1_INT | SR_INT;
839 s->anh = 0;
840 s->data_in_size = 0;
841 s->data_in_index = 0;
842 s->data_out_index = 0;
843 s->autopoll = 0;
845 s->timers[0].latch = 0xffff;
846 set_counter(s, &s->timers[0], 0xffff);
848 s->timers[1].latch = 0xffff;
850 s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s);
853 static void cuda_realizefn(DeviceState *dev, Error **errp)
855 CUDAState *s = CUDA(dev);
856 struct tm tm;
858 s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s);
859 s->timers[0].frequency = s->frequency;
860 s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer2, s);
861 s->timers[1].frequency = (SCALE_US * 6000) / 4700;
863 qemu_get_timedate(&tm, 0);
864 s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET;
866 s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s);
867 s->autopoll_rate_ms = 20;
868 s->adb_poll_mask = 0xffff;
871 static void cuda_initfn(Object *obj)
873 SysBusDevice *d = SYS_BUS_DEVICE(obj);
874 CUDAState *s = CUDA(obj);
875 int i;
877 memory_region_init_io(&s->mem, obj, &cuda_ops, s, "cuda", 0x2000);
878 sysbus_init_mmio(d, &s->mem);
879 sysbus_init_irq(d, &s->irq);
881 for (i = 0; i < ARRAY_SIZE(s->timers); i++) {
882 s->timers[i].index = i;
885 qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS,
886 DEVICE(obj), "adb.0");
889 static Property cuda_properties[] = {
890 DEFINE_PROP_UINT64("frequency", CUDAState, frequency, 0),
891 DEFINE_PROP_END_OF_LIST()
894 static void cuda_class_init(ObjectClass *oc, void *data)
896 DeviceClass *dc = DEVICE_CLASS(oc);
898 dc->realize = cuda_realizefn;
899 dc->reset = cuda_reset;
900 dc->vmsd = &vmstate_cuda;
901 dc->props = cuda_properties;
902 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
905 static const TypeInfo cuda_type_info = {
906 .name = TYPE_CUDA,
907 .parent = TYPE_SYS_BUS_DEVICE,
908 .instance_size = sizeof(CUDAState),
909 .instance_init = cuda_initfn,
910 .class_init = cuda_class_init,
913 static void cuda_register_types(void)
915 type_register_static(&cuda_type_info);
918 type_init(cuda_register_types)