iotests: ensure we print nbd server log on error
[qemu/ar7.git] / hw / ppc / ppc.c
blobcffdc3914a6ccf6817299fd93f2f4970e5cae7d0
1 /*
2 * QEMU generic PowerPC hardware System Emulator
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "hw/hw.h"
28 #include "hw/ppc/ppc.h"
29 #include "hw/ppc/ppc_e500.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/cpus.h"
33 #include "qemu/log.h"
34 #include "qemu/error-report.h"
35 #include "sysemu/kvm.h"
36 #include "kvm_ppc.h"
37 #include "trace.h"
39 //#define PPC_DEBUG_IRQ
40 //#define PPC_DEBUG_TB
42 #ifdef PPC_DEBUG_IRQ
43 # define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
44 #else
45 # define LOG_IRQ(...) do { } while (0)
46 #endif
49 #ifdef PPC_DEBUG_TB
50 # define LOG_TB(...) qemu_log(__VA_ARGS__)
51 #else
52 # define LOG_TB(...) do { } while (0)
53 #endif
55 static void cpu_ppc_tb_stop (CPUPPCState *env);
56 static void cpu_ppc_tb_start (CPUPPCState *env);
58 void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
60 CPUState *cs = CPU(cpu);
61 CPUPPCState *env = &cpu->env;
62 unsigned int old_pending;
63 bool locked = false;
65 /* We may already have the BQL if coming from the reset path */
66 if (!qemu_mutex_iothread_locked()) {
67 locked = true;
68 qemu_mutex_lock_iothread();
71 old_pending = env->pending_interrupts;
73 if (level) {
74 env->pending_interrupts |= 1 << n_IRQ;
75 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
76 } else {
77 env->pending_interrupts &= ~(1 << n_IRQ);
78 if (env->pending_interrupts == 0) {
79 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
83 if (old_pending != env->pending_interrupts) {
84 #ifdef CONFIG_KVM
85 kvmppc_set_interrupt(cpu, n_IRQ, level);
86 #endif
90 LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
91 "req %08x\n", __func__, env, n_IRQ, level,
92 env->pending_interrupts, CPU(cpu)->interrupt_request);
94 if (locked) {
95 qemu_mutex_unlock_iothread();
99 /* PowerPC 6xx / 7xx internal IRQ controller */
100 static void ppc6xx_set_irq(void *opaque, int pin, int level)
102 PowerPCCPU *cpu = opaque;
103 CPUPPCState *env = &cpu->env;
104 int cur_level;
106 LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
107 env, pin, level);
108 cur_level = (env->irq_input_state >> pin) & 1;
109 /* Don't generate spurious events */
110 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
111 CPUState *cs = CPU(cpu);
113 switch (pin) {
114 case PPC6xx_INPUT_TBEN:
115 /* Level sensitive - active high */
116 LOG_IRQ("%s: %s the time base\n",
117 __func__, level ? "start" : "stop");
118 if (level) {
119 cpu_ppc_tb_start(env);
120 } else {
121 cpu_ppc_tb_stop(env);
123 case PPC6xx_INPUT_INT:
124 /* Level sensitive - active high */
125 LOG_IRQ("%s: set the external IRQ state to %d\n",
126 __func__, level);
127 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
128 break;
129 case PPC6xx_INPUT_SMI:
130 /* Level sensitive - active high */
131 LOG_IRQ("%s: set the SMI IRQ state to %d\n",
132 __func__, level);
133 ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
134 break;
135 case PPC6xx_INPUT_MCP:
136 /* Negative edge sensitive */
137 /* XXX: TODO: actual reaction may depends on HID0 status
138 * 603/604/740/750: check HID0[EMCP]
140 if (cur_level == 1 && level == 0) {
141 LOG_IRQ("%s: raise machine check state\n",
142 __func__);
143 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
145 break;
146 case PPC6xx_INPUT_CKSTP_IN:
147 /* Level sensitive - active low */
148 /* XXX: TODO: relay the signal to CKSTP_OUT pin */
149 /* XXX: Note that the only way to restart the CPU is to reset it */
150 if (level) {
151 LOG_IRQ("%s: stop the CPU\n", __func__);
152 cs->halted = 1;
154 break;
155 case PPC6xx_INPUT_HRESET:
156 /* Level sensitive - active low */
157 if (level) {
158 LOG_IRQ("%s: reset the CPU\n", __func__);
159 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
161 break;
162 case PPC6xx_INPUT_SRESET:
163 LOG_IRQ("%s: set the RESET IRQ state to %d\n",
164 __func__, level);
165 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
166 break;
167 default:
168 /* Unknown pin - do nothing */
169 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
170 return;
172 if (level)
173 env->irq_input_state |= 1 << pin;
174 else
175 env->irq_input_state &= ~(1 << pin);
179 void ppc6xx_irq_init(PowerPCCPU *cpu)
181 CPUPPCState *env = &cpu->env;
183 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
184 PPC6xx_INPUT_NB);
187 #if defined(TARGET_PPC64)
188 /* PowerPC 970 internal IRQ controller */
189 static void ppc970_set_irq(void *opaque, int pin, int level)
191 PowerPCCPU *cpu = opaque;
192 CPUPPCState *env = &cpu->env;
193 int cur_level;
195 LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
196 env, pin, level);
197 cur_level = (env->irq_input_state >> pin) & 1;
198 /* Don't generate spurious events */
199 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
200 CPUState *cs = CPU(cpu);
202 switch (pin) {
203 case PPC970_INPUT_INT:
204 /* Level sensitive - active high */
205 LOG_IRQ("%s: set the external IRQ state to %d\n",
206 __func__, level);
207 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
208 break;
209 case PPC970_INPUT_THINT:
210 /* Level sensitive - active high */
211 LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
212 level);
213 ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
214 break;
215 case PPC970_INPUT_MCP:
216 /* Negative edge sensitive */
217 /* XXX: TODO: actual reaction may depends on HID0 status
218 * 603/604/740/750: check HID0[EMCP]
220 if (cur_level == 1 && level == 0) {
221 LOG_IRQ("%s: raise machine check state\n",
222 __func__);
223 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
225 break;
226 case PPC970_INPUT_CKSTP:
227 /* Level sensitive - active low */
228 /* XXX: TODO: relay the signal to CKSTP_OUT pin */
229 if (level) {
230 LOG_IRQ("%s: stop the CPU\n", __func__);
231 cs->halted = 1;
232 } else {
233 LOG_IRQ("%s: restart the CPU\n", __func__);
234 cs->halted = 0;
235 qemu_cpu_kick(cs);
237 break;
238 case PPC970_INPUT_HRESET:
239 /* Level sensitive - active low */
240 if (level) {
241 cpu_interrupt(cs, CPU_INTERRUPT_RESET);
243 break;
244 case PPC970_INPUT_SRESET:
245 LOG_IRQ("%s: set the RESET IRQ state to %d\n",
246 __func__, level);
247 ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
248 break;
249 case PPC970_INPUT_TBEN:
250 LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
251 level);
252 /* XXX: TODO */
253 break;
254 default:
255 /* Unknown pin - do nothing */
256 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
257 return;
259 if (level)
260 env->irq_input_state |= 1 << pin;
261 else
262 env->irq_input_state &= ~(1 << pin);
266 void ppc970_irq_init(PowerPCCPU *cpu)
268 CPUPPCState *env = &cpu->env;
270 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
271 PPC970_INPUT_NB);
274 /* POWER7 internal IRQ controller */
275 static void power7_set_irq(void *opaque, int pin, int level)
277 PowerPCCPU *cpu = opaque;
278 CPUPPCState *env = &cpu->env;
280 LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
281 env, pin, level);
283 switch (pin) {
284 case POWER7_INPUT_INT:
285 /* Level sensitive - active high */
286 LOG_IRQ("%s: set the external IRQ state to %d\n",
287 __func__, level);
288 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
289 break;
290 default:
291 /* Unknown pin - do nothing */
292 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
293 return;
295 if (level) {
296 env->irq_input_state |= 1 << pin;
297 } else {
298 env->irq_input_state &= ~(1 << pin);
302 void ppcPOWER7_irq_init(PowerPCCPU *cpu)
304 CPUPPCState *env = &cpu->env;
306 env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
307 POWER7_INPUT_NB);
309 #endif /* defined(TARGET_PPC64) */
311 void ppc40x_core_reset(PowerPCCPU *cpu)
313 CPUPPCState *env = &cpu->env;
314 target_ulong dbsr;
316 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
317 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
318 dbsr = env->spr[SPR_40x_DBSR];
319 dbsr &= ~0x00000300;
320 dbsr |= 0x00000100;
321 env->spr[SPR_40x_DBSR] = dbsr;
324 void ppc40x_chip_reset(PowerPCCPU *cpu)
326 CPUPPCState *env = &cpu->env;
327 target_ulong dbsr;
329 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
330 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
331 /* XXX: TODO reset all internal peripherals */
332 dbsr = env->spr[SPR_40x_DBSR];
333 dbsr &= ~0x00000300;
334 dbsr |= 0x00000200;
335 env->spr[SPR_40x_DBSR] = dbsr;
338 void ppc40x_system_reset(PowerPCCPU *cpu)
340 qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
341 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
344 void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
346 PowerPCCPU *cpu = ppc_env_get_cpu(env);
348 switch ((val >> 28) & 0x3) {
349 case 0x0:
350 /* No action */
351 break;
352 case 0x1:
353 /* Core reset */
354 ppc40x_core_reset(cpu);
355 break;
356 case 0x2:
357 /* Chip reset */
358 ppc40x_chip_reset(cpu);
359 break;
360 case 0x3:
361 /* System reset */
362 ppc40x_system_reset(cpu);
363 break;
367 /* PowerPC 40x internal IRQ controller */
368 static void ppc40x_set_irq(void *opaque, int pin, int level)
370 PowerPCCPU *cpu = opaque;
371 CPUPPCState *env = &cpu->env;
372 int cur_level;
374 LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
375 env, pin, level);
376 cur_level = (env->irq_input_state >> pin) & 1;
377 /* Don't generate spurious events */
378 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
379 CPUState *cs = CPU(cpu);
381 switch (pin) {
382 case PPC40x_INPUT_RESET_SYS:
383 if (level) {
384 LOG_IRQ("%s: reset the PowerPC system\n",
385 __func__);
386 ppc40x_system_reset(cpu);
388 break;
389 case PPC40x_INPUT_RESET_CHIP:
390 if (level) {
391 LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
392 ppc40x_chip_reset(cpu);
394 break;
395 case PPC40x_INPUT_RESET_CORE:
396 /* XXX: TODO: update DBSR[MRR] */
397 if (level) {
398 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
399 ppc40x_core_reset(cpu);
401 break;
402 case PPC40x_INPUT_CINT:
403 /* Level sensitive - active high */
404 LOG_IRQ("%s: set the critical IRQ state to %d\n",
405 __func__, level);
406 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
407 break;
408 case PPC40x_INPUT_INT:
409 /* Level sensitive - active high */
410 LOG_IRQ("%s: set the external IRQ state to %d\n",
411 __func__, level);
412 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
413 break;
414 case PPC40x_INPUT_HALT:
415 /* Level sensitive - active low */
416 if (level) {
417 LOG_IRQ("%s: stop the CPU\n", __func__);
418 cs->halted = 1;
419 } else {
420 LOG_IRQ("%s: restart the CPU\n", __func__);
421 cs->halted = 0;
422 qemu_cpu_kick(cs);
424 break;
425 case PPC40x_INPUT_DEBUG:
426 /* Level sensitive - active high */
427 LOG_IRQ("%s: set the debug pin state to %d\n",
428 __func__, level);
429 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
430 break;
431 default:
432 /* Unknown pin - do nothing */
433 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
434 return;
436 if (level)
437 env->irq_input_state |= 1 << pin;
438 else
439 env->irq_input_state &= ~(1 << pin);
443 void ppc40x_irq_init(PowerPCCPU *cpu)
445 CPUPPCState *env = &cpu->env;
447 env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
448 cpu, PPC40x_INPUT_NB);
451 /* PowerPC E500 internal IRQ controller */
452 static void ppce500_set_irq(void *opaque, int pin, int level)
454 PowerPCCPU *cpu = opaque;
455 CPUPPCState *env = &cpu->env;
456 int cur_level;
458 LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
459 env, pin, level);
460 cur_level = (env->irq_input_state >> pin) & 1;
461 /* Don't generate spurious events */
462 if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
463 switch (pin) {
464 case PPCE500_INPUT_MCK:
465 if (level) {
466 LOG_IRQ("%s: reset the PowerPC system\n",
467 __func__);
468 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
470 break;
471 case PPCE500_INPUT_RESET_CORE:
472 if (level) {
473 LOG_IRQ("%s: reset the PowerPC core\n", __func__);
474 ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
476 break;
477 case PPCE500_INPUT_CINT:
478 /* Level sensitive - active high */
479 LOG_IRQ("%s: set the critical IRQ state to %d\n",
480 __func__, level);
481 ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
482 break;
483 case PPCE500_INPUT_INT:
484 /* Level sensitive - active high */
485 LOG_IRQ("%s: set the core IRQ state to %d\n",
486 __func__, level);
487 ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
488 break;
489 case PPCE500_INPUT_DEBUG:
490 /* Level sensitive - active high */
491 LOG_IRQ("%s: set the debug pin state to %d\n",
492 __func__, level);
493 ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
494 break;
495 default:
496 /* Unknown pin - do nothing */
497 LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
498 return;
500 if (level)
501 env->irq_input_state |= 1 << pin;
502 else
503 env->irq_input_state &= ~(1 << pin);
507 void ppce500_irq_init(PowerPCCPU *cpu)
509 CPUPPCState *env = &cpu->env;
511 env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
512 cpu, PPCE500_INPUT_NB);
515 /* Enable or Disable the E500 EPR capability */
516 void ppce500_set_mpic_proxy(bool enabled)
518 CPUState *cs;
520 CPU_FOREACH(cs) {
521 PowerPCCPU *cpu = POWERPC_CPU(cs);
523 cpu->env.mpic_proxy = enabled;
524 if (kvm_enabled()) {
525 kvmppc_set_mpic_proxy(cpu, enabled);
530 /*****************************************************************************/
531 /* PowerPC time base and decrementer emulation */
533 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
535 /* TB time in tb periods */
536 return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
539 uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
541 ppc_tb_t *tb_env = env->tb_env;
542 uint64_t tb;
544 if (kvm_enabled()) {
545 return env->spr[SPR_TBL];
548 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
549 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
551 return tb;
554 static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
556 ppc_tb_t *tb_env = env->tb_env;
557 uint64_t tb;
559 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
560 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
562 return tb >> 32;
565 uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
567 if (kvm_enabled()) {
568 return env->spr[SPR_TBU];
571 return _cpu_ppc_load_tbu(env);
574 static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
575 int64_t *tb_offsetp, uint64_t value)
577 *tb_offsetp = value -
578 muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
580 LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
581 __func__, value, *tb_offsetp);
584 void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
586 ppc_tb_t *tb_env = env->tb_env;
587 uint64_t tb;
589 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
590 tb &= 0xFFFFFFFF00000000ULL;
591 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
592 &tb_env->tb_offset, tb | (uint64_t)value);
595 static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
597 ppc_tb_t *tb_env = env->tb_env;
598 uint64_t tb;
600 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
601 tb &= 0x00000000FFFFFFFFULL;
602 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
603 &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
606 void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
608 _cpu_ppc_store_tbu(env, value);
611 uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
613 ppc_tb_t *tb_env = env->tb_env;
614 uint64_t tb;
616 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
617 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
619 return tb;
622 uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
624 ppc_tb_t *tb_env = env->tb_env;
625 uint64_t tb;
627 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
628 LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
630 return tb >> 32;
633 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
635 ppc_tb_t *tb_env = env->tb_env;
636 uint64_t tb;
638 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
639 tb &= 0xFFFFFFFF00000000ULL;
640 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
641 &tb_env->atb_offset, tb | (uint64_t)value);
644 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
646 ppc_tb_t *tb_env = env->tb_env;
647 uint64_t tb;
649 tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
650 tb &= 0x00000000FFFFFFFFULL;
651 cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
652 &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
655 static void cpu_ppc_tb_stop (CPUPPCState *env)
657 ppc_tb_t *tb_env = env->tb_env;
658 uint64_t tb, atb, vmclk;
660 /* If the time base is already frozen, do nothing */
661 if (tb_env->tb_freq != 0) {
662 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
663 /* Get the time base */
664 tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
665 /* Get the alternate time base */
666 atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
667 /* Store the time base value (ie compute the current offset) */
668 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
669 /* Store the alternate time base value (compute the current offset) */
670 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
671 /* Set the time base frequency to zero */
672 tb_env->tb_freq = 0;
673 /* Now, the time bases are frozen to tb_offset / atb_offset value */
677 static void cpu_ppc_tb_start (CPUPPCState *env)
679 ppc_tb_t *tb_env = env->tb_env;
680 uint64_t tb, atb, vmclk;
682 /* If the time base is not frozen, do nothing */
683 if (tb_env->tb_freq == 0) {
684 vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
685 /* Get the time base from tb_offset */
686 tb = tb_env->tb_offset;
687 /* Get the alternate time base from atb_offset */
688 atb = tb_env->atb_offset;
689 /* Restore the tb frequency from the decrementer frequency */
690 tb_env->tb_freq = tb_env->decr_freq;
691 /* Store the time base value */
692 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
693 /* Store the alternate time base value */
694 cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
698 bool ppc_decr_clear_on_delivery(CPUPPCState *env)
700 ppc_tb_t *tb_env = env->tb_env;
701 int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
702 return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
705 static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
707 ppc_tb_t *tb_env = env->tb_env;
708 uint32_t decr;
709 int64_t diff;
711 diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
712 if (diff >= 0) {
713 decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
714 } else if (tb_env->flags & PPC_TIMER_BOOKE) {
715 decr = 0;
716 } else {
717 decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
719 LOG_TB("%s: %08" PRIx32 "\n", __func__, decr);
721 return decr;
724 uint32_t cpu_ppc_load_decr (CPUPPCState *env)
726 ppc_tb_t *tb_env = env->tb_env;
728 if (kvm_enabled()) {
729 return env->spr[SPR_DECR];
732 return _cpu_ppc_load_decr(env, tb_env->decr_next);
735 uint32_t cpu_ppc_load_hdecr (CPUPPCState *env)
737 ppc_tb_t *tb_env = env->tb_env;
739 return _cpu_ppc_load_decr(env, tb_env->hdecr_next);
742 uint64_t cpu_ppc_load_purr (CPUPPCState *env)
744 ppc_tb_t *tb_env = env->tb_env;
745 uint64_t diff;
747 diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
749 return tb_env->purr_load +
750 muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
753 /* When decrementer expires,
754 * all we need to do is generate or queue a CPU exception
756 static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
758 /* Raise it */
759 LOG_TB("raise decrementer exception\n");
760 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
763 static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
765 ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
768 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
770 CPUPPCState *env = &cpu->env;
772 /* Raise it */
773 LOG_TB("raise hv decrementer exception\n");
775 /* The architecture specifies that we don't deliver HDEC
776 * interrupts in a PM state. Not only they don't cause a
777 * wakeup but they also get effectively discarded.
779 if (!env->in_pm_state) {
780 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
784 static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
786 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
789 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
790 QEMUTimer *timer,
791 void (*raise_excp)(void *),
792 void (*lower_excp)(PowerPCCPU *),
793 uint32_t decr, uint32_t value)
795 CPUPPCState *env = &cpu->env;
796 ppc_tb_t *tb_env = env->tb_env;
797 uint64_t now, next;
799 LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__,
800 decr, value);
802 if (kvm_enabled()) {
803 /* KVM handles decrementer exceptions, we don't need our own timer */
804 return;
808 * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
809 * interrupt.
811 * If we get a really small DEC value, we can assume that by the time we
812 * handled it we should inject an interrupt already.
814 * On MSB level based DEC implementations the MSB always means the interrupt
815 * is pending, so raise it on those.
817 * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
818 * an edge interrupt, so raise it here too.
820 if ((value < 3) ||
821 ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) ||
822 ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000)
823 && !(decr & 0x80000000))) {
824 (*raise_excp)(cpu);
825 return;
828 /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
829 if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
830 (*lower_excp)(cpu);
833 /* Calculate the next timer event */
834 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
835 next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
836 *nextp = next;
838 /* Adjust timer */
839 timer_mod(timer, next);
842 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
843 uint32_t value)
845 ppc_tb_t *tb_env = cpu->env.tb_env;
847 __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
848 tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
849 value);
852 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
854 PowerPCCPU *cpu = ppc_env_get_cpu(env);
856 _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value);
859 static void cpu_ppc_decr_cb(void *opaque)
861 PowerPCCPU *cpu = opaque;
863 cpu_ppc_decr_excp(cpu);
866 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
867 uint32_t value)
869 ppc_tb_t *tb_env = cpu->env.tb_env;
871 if (tb_env->hdecr_timer != NULL) {
872 __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
873 tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
874 hdecr, value);
878 void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
880 PowerPCCPU *cpu = ppc_env_get_cpu(env);
882 _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value);
885 static void cpu_ppc_hdecr_cb(void *opaque)
887 PowerPCCPU *cpu = opaque;
889 cpu_ppc_hdecr_excp(cpu);
892 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
894 ppc_tb_t *tb_env = cpu->env.tb_env;
896 tb_env->purr_load = value;
897 tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
900 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
902 CPUPPCState *env = opaque;
903 PowerPCCPU *cpu = ppc_env_get_cpu(env);
904 ppc_tb_t *tb_env = env->tb_env;
906 tb_env->tb_freq = freq;
907 tb_env->decr_freq = freq;
908 /* There is a bug in Linux 2.4 kernels:
909 * if a decrementer exception is pending when it enables msr_ee at startup,
910 * it's not ready to handle it...
912 _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
913 _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
914 cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
917 static void timebase_save(PPCTimebase *tb)
919 uint64_t ticks = cpu_get_host_ticks();
920 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
922 if (!first_ppc_cpu->env.tb_env) {
923 error_report("No timebase object");
924 return;
927 /* not used anymore, we keep it for compatibility */
928 tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
930 * tb_offset is only expected to be changed by QEMU so
931 * there is no need to update it from KVM here
933 tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
936 static void timebase_load(PPCTimebase *tb)
938 CPUState *cpu;
939 PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
940 int64_t tb_off_adj, tb_off;
941 unsigned long freq;
943 if (!first_ppc_cpu->env.tb_env) {
944 error_report("No timebase object");
945 return;
948 freq = first_ppc_cpu->env.tb_env->tb_freq;
950 tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
952 tb_off = first_ppc_cpu->env.tb_env->tb_offset;
953 trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
954 (tb_off_adj - tb_off) / freq);
956 /* Set new offset to all CPUs */
957 CPU_FOREACH(cpu) {
958 PowerPCCPU *pcpu = POWERPC_CPU(cpu);
959 pcpu->env.tb_env->tb_offset = tb_off_adj;
960 #if defined(CONFIG_KVM)
961 kvm_set_one_reg(cpu, KVM_REG_PPC_TB_OFFSET,
962 &pcpu->env.tb_env->tb_offset);
963 #endif
967 void cpu_ppc_clock_vm_state_change(void *opaque, int running,
968 RunState state)
970 PPCTimebase *tb = opaque;
972 if (running) {
973 timebase_load(tb);
974 } else {
975 timebase_save(tb);
980 * When migrating, read the clock just before migration,
981 * so that the guest clock counts during the events
982 * between:
984 * * vm_stop()
986 * * pre_save()
988 * This reduces clock difference on migration from 5s
989 * to 0.1s (when max_downtime == 5s), because sending the
990 * final pages of memory (which happens between vm_stop()
991 * and pre_save()) takes max_downtime.
993 static int timebase_pre_save(void *opaque)
995 PPCTimebase *tb = opaque;
997 timebase_save(tb);
999 return 0;
1002 const VMStateDescription vmstate_ppc_timebase = {
1003 .name = "timebase",
1004 .version_id = 1,
1005 .minimum_version_id = 1,
1006 .minimum_version_id_old = 1,
1007 .pre_save = timebase_pre_save,
1008 .fields = (VMStateField []) {
1009 VMSTATE_UINT64(guest_timebase, PPCTimebase),
1010 VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1011 VMSTATE_END_OF_LIST()
1015 /* Set up (once) timebase frequency (in Hz) */
1016 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1018 PowerPCCPU *cpu = ppc_env_get_cpu(env);
1019 ppc_tb_t *tb_env;
1021 tb_env = g_malloc0(sizeof(ppc_tb_t));
1022 env->tb_env = tb_env;
1023 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1024 if (env->insns_flags & PPC_SEGMENT_64B) {
1025 /* All Book3S 64bit CPUs implement level based DEC logic */
1026 tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1028 /* Create new timer */
1029 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1030 if (env->has_hv_mode) {
1031 tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1032 cpu);
1033 } else {
1034 tb_env->hdecr_timer = NULL;
1036 cpu_ppc_set_tb_clk(env, freq);
1038 return &cpu_ppc_set_tb_clk;
1041 /* Specific helpers for POWER & PowerPC 601 RTC */
1042 void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
1044 _cpu_ppc_store_tbu(env, value);
1047 uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
1049 return _cpu_ppc_load_tbu(env);
1052 void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
1054 cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
1057 uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
1059 return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1062 /*****************************************************************************/
1063 /* PowerPC 40x timers */
1065 /* PIT, FIT & WDT */
1066 typedef struct ppc40x_timer_t ppc40x_timer_t;
1067 struct ppc40x_timer_t {
1068 uint64_t pit_reload; /* PIT auto-reload value */
1069 uint64_t fit_next; /* Tick for next FIT interrupt */
1070 QEMUTimer *fit_timer;
1071 uint64_t wdt_next; /* Tick for next WDT interrupt */
1072 QEMUTimer *wdt_timer;
1074 /* 405 have the PIT, 440 have a DECR. */
1075 unsigned int decr_excp;
1078 /* Fixed interval timer */
1079 static void cpu_4xx_fit_cb (void *opaque)
1081 PowerPCCPU *cpu;
1082 CPUPPCState *env;
1083 ppc_tb_t *tb_env;
1084 ppc40x_timer_t *ppc40x_timer;
1085 uint64_t now, next;
1087 env = opaque;
1088 cpu = ppc_env_get_cpu(env);
1089 tb_env = env->tb_env;
1090 ppc40x_timer = tb_env->opaque;
1091 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1092 switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1093 case 0:
1094 next = 1 << 9;
1095 break;
1096 case 1:
1097 next = 1 << 13;
1098 break;
1099 case 2:
1100 next = 1 << 17;
1101 break;
1102 case 3:
1103 next = 1 << 21;
1104 break;
1105 default:
1106 /* Cannot occur, but makes gcc happy */
1107 return;
1109 next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1110 if (next == now)
1111 next++;
1112 timer_mod(ppc40x_timer->fit_timer, next);
1113 env->spr[SPR_40x_TSR] |= 1 << 26;
1114 if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1115 ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1117 LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1118 (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1119 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1122 /* Programmable interval timer */
1123 static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1125 ppc40x_timer_t *ppc40x_timer;
1126 uint64_t now, next;
1128 ppc40x_timer = tb_env->opaque;
1129 if (ppc40x_timer->pit_reload <= 1 ||
1130 !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1131 (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1132 /* Stop PIT */
1133 LOG_TB("%s: stop PIT\n", __func__);
1134 timer_del(tb_env->decr_timer);
1135 } else {
1136 LOG_TB("%s: start PIT %016" PRIx64 "\n",
1137 __func__, ppc40x_timer->pit_reload);
1138 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1139 next = now + muldiv64(ppc40x_timer->pit_reload,
1140 NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1141 if (is_excp)
1142 next += tb_env->decr_next - now;
1143 if (next == now)
1144 next++;
1145 timer_mod(tb_env->decr_timer, next);
1146 tb_env->decr_next = next;
1150 static void cpu_4xx_pit_cb (void *opaque)
1152 PowerPCCPU *cpu;
1153 CPUPPCState *env;
1154 ppc_tb_t *tb_env;
1155 ppc40x_timer_t *ppc40x_timer;
1157 env = opaque;
1158 cpu = ppc_env_get_cpu(env);
1159 tb_env = env->tb_env;
1160 ppc40x_timer = tb_env->opaque;
1161 env->spr[SPR_40x_TSR] |= 1 << 27;
1162 if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1163 ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1165 start_stop_pit(env, tb_env, 1);
1166 LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
1167 "%016" PRIx64 "\n", __func__,
1168 (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1169 (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1170 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1171 ppc40x_timer->pit_reload);
1174 /* Watchdog timer */
1175 static void cpu_4xx_wdt_cb (void *opaque)
1177 PowerPCCPU *cpu;
1178 CPUPPCState *env;
1179 ppc_tb_t *tb_env;
1180 ppc40x_timer_t *ppc40x_timer;
1181 uint64_t now, next;
1183 env = opaque;
1184 cpu = ppc_env_get_cpu(env);
1185 tb_env = env->tb_env;
1186 ppc40x_timer = tb_env->opaque;
1187 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1188 switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1189 case 0:
1190 next = 1 << 17;
1191 break;
1192 case 1:
1193 next = 1 << 21;
1194 break;
1195 case 2:
1196 next = 1 << 25;
1197 break;
1198 case 3:
1199 next = 1 << 29;
1200 break;
1201 default:
1202 /* Cannot occur, but makes gcc happy */
1203 return;
1205 next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1206 if (next == now)
1207 next++;
1208 LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1209 env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1210 switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1211 case 0x0:
1212 case 0x1:
1213 timer_mod(ppc40x_timer->wdt_timer, next);
1214 ppc40x_timer->wdt_next = next;
1215 env->spr[SPR_40x_TSR] |= 1U << 31;
1216 break;
1217 case 0x2:
1218 timer_mod(ppc40x_timer->wdt_timer, next);
1219 ppc40x_timer->wdt_next = next;
1220 env->spr[SPR_40x_TSR] |= 1 << 30;
1221 if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1222 ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1224 break;
1225 case 0x3:
1226 env->spr[SPR_40x_TSR] &= ~0x30000000;
1227 env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1228 switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1229 case 0x0:
1230 /* No reset */
1231 break;
1232 case 0x1: /* Core reset */
1233 ppc40x_core_reset(cpu);
1234 break;
1235 case 0x2: /* Chip reset */
1236 ppc40x_chip_reset(cpu);
1237 break;
1238 case 0x3: /* System reset */
1239 ppc40x_system_reset(cpu);
1240 break;
1245 void store_40x_pit (CPUPPCState *env, target_ulong val)
1247 ppc_tb_t *tb_env;
1248 ppc40x_timer_t *ppc40x_timer;
1250 tb_env = env->tb_env;
1251 ppc40x_timer = tb_env->opaque;
1252 LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
1253 ppc40x_timer->pit_reload = val;
1254 start_stop_pit(env, tb_env, 0);
1257 target_ulong load_40x_pit (CPUPPCState *env)
1259 return cpu_ppc_load_decr(env);
1262 static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1264 CPUPPCState *env = opaque;
1265 ppc_tb_t *tb_env = env->tb_env;
1267 LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
1268 freq);
1269 tb_env->tb_freq = freq;
1270 tb_env->decr_freq = freq;
1271 /* XXX: we should also update all timers */
1274 clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1275 unsigned int decr_excp)
1277 ppc_tb_t *tb_env;
1278 ppc40x_timer_t *ppc40x_timer;
1280 tb_env = g_malloc0(sizeof(ppc_tb_t));
1281 env->tb_env = tb_env;
1282 tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1283 ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
1284 tb_env->tb_freq = freq;
1285 tb_env->decr_freq = freq;
1286 tb_env->opaque = ppc40x_timer;
1287 LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
1288 if (ppc40x_timer != NULL) {
1289 /* We use decr timer for PIT */
1290 tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
1291 ppc40x_timer->fit_timer =
1292 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
1293 ppc40x_timer->wdt_timer =
1294 timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
1295 ppc40x_timer->decr_excp = decr_excp;
1298 return &ppc_40x_set_tb_clk;
1301 /*****************************************************************************/
1302 /* Embedded PowerPC Device Control Registers */
1303 typedef struct ppc_dcrn_t ppc_dcrn_t;
1304 struct ppc_dcrn_t {
1305 dcr_read_cb dcr_read;
1306 dcr_write_cb dcr_write;
1307 void *opaque;
1310 /* XXX: on 460, DCR addresses are 32 bits wide,
1311 * using DCRIPR to get the 22 upper bits of the DCR address
1313 #define DCRN_NB 1024
1314 struct ppc_dcr_t {
1315 ppc_dcrn_t dcrn[DCRN_NB];
1316 int (*read_error)(int dcrn);
1317 int (*write_error)(int dcrn);
1320 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1322 ppc_dcrn_t *dcr;
1324 if (dcrn < 0 || dcrn >= DCRN_NB)
1325 goto error;
1326 dcr = &dcr_env->dcrn[dcrn];
1327 if (dcr->dcr_read == NULL)
1328 goto error;
1329 *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1331 return 0;
1333 error:
1334 if (dcr_env->read_error != NULL)
1335 return (*dcr_env->read_error)(dcrn);
1337 return -1;
1340 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1342 ppc_dcrn_t *dcr;
1344 if (dcrn < 0 || dcrn >= DCRN_NB)
1345 goto error;
1346 dcr = &dcr_env->dcrn[dcrn];
1347 if (dcr->dcr_write == NULL)
1348 goto error;
1349 (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1351 return 0;
1353 error:
1354 if (dcr_env->write_error != NULL)
1355 return (*dcr_env->write_error)(dcrn);
1357 return -1;
1360 int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1361 dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1363 ppc_dcr_t *dcr_env;
1364 ppc_dcrn_t *dcr;
1366 dcr_env = env->dcr_env;
1367 if (dcr_env == NULL)
1368 return -1;
1369 if (dcrn < 0 || dcrn >= DCRN_NB)
1370 return -1;
1371 dcr = &dcr_env->dcrn[dcrn];
1372 if (dcr->opaque != NULL ||
1373 dcr->dcr_read != NULL ||
1374 dcr->dcr_write != NULL)
1375 return -1;
1376 dcr->opaque = opaque;
1377 dcr->dcr_read = dcr_read;
1378 dcr->dcr_write = dcr_write;
1380 return 0;
1383 int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1384 int (*write_error)(int dcrn))
1386 ppc_dcr_t *dcr_env;
1388 dcr_env = g_malloc0(sizeof(ppc_dcr_t));
1389 dcr_env->read_error = read_error;
1390 dcr_env->write_error = write_error;
1391 env->dcr_env = dcr_env;
1393 return 0;
1396 /*****************************************************************************/
1397 /* Debug port */
1398 void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val)
1400 addr &= 0xF;
1401 switch (addr) {
1402 case 0:
1403 printf("%c", val);
1404 break;
1405 case 1:
1406 printf("\n");
1407 fflush(stdout);
1408 break;
1409 case 2:
1410 printf("Set loglevel to %04" PRIx32 "\n", val);
1411 qemu_set_log(val | 0x100);
1412 break;