2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "helper_regs.h"
28 /* #define DEBUG_OP */
29 /* #define DEBUG_SOFTWARE_TLB */
30 /* #define DEBUG_EXCEPTIONS */
32 #ifdef DEBUG_EXCEPTIONS
33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
35 # define LOG_EXCP(...) do { } while (0)
38 /*****************************************************************************/
39 /* Exception processing */
40 #if defined(CONFIG_USER_ONLY)
41 void ppc_cpu_do_interrupt(CPUState
*cs
)
43 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
44 CPUPPCState
*env
= &cpu
->env
;
46 cs
->exception_index
= POWERPC_EXCP_NONE
;
50 static void ppc_hw_interrupt(CPUPPCState
*env
)
52 CPUState
*cs
= env_cpu(env
);
54 cs
->exception_index
= POWERPC_EXCP_NONE
;
57 #else /* defined(CONFIG_USER_ONLY) */
58 static inline void dump_syscall(CPUPPCState
*env
)
60 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
" r3=%016" PRIx64
61 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
62 " nip=" TARGET_FMT_lx
"\n",
63 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
64 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
65 ppc_dump_gpr(env
, 6), env
->nip
);
68 static int powerpc_reset_wakeup(CPUState
*cs
, CPUPPCState
*env
, int excp
,
71 /* We no longer are in a PM state */
72 env
->resume_as_sreset
= false;
74 /* Pretend to be returning from doze always as we don't lose state */
75 *msr
|= (0x1ull
<< (63 - 47));
77 /* Machine checks are sent normally */
78 if (excp
== POWERPC_EXCP_MCHECK
) {
82 case POWERPC_EXCP_RESET
:
83 *msr
|= 0x4ull
<< (63 - 45);
85 case POWERPC_EXCP_EXTERNAL
:
86 *msr
|= 0x8ull
<< (63 - 45);
88 case POWERPC_EXCP_DECR
:
89 *msr
|= 0x6ull
<< (63 - 45);
91 case POWERPC_EXCP_SDOOR
:
92 *msr
|= 0x5ull
<< (63 - 45);
94 case POWERPC_EXCP_SDOOR_HV
:
95 *msr
|= 0x3ull
<< (63 - 45);
97 case POWERPC_EXCP_HV_MAINT
:
98 *msr
|= 0xaull
<< (63 - 45);
100 case POWERPC_EXCP_HVIRT
:
101 *msr
|= 0x9ull
<< (63 - 45);
104 cpu_abort(cs
, "Unsupported exception %d in Power Save mode\n",
107 return POWERPC_EXCP_RESET
;
110 static uint64_t ppc_excp_vector_offset(CPUState
*cs
, int ail
)
120 case AIL_C000_0000_0000_4000
:
121 offset
= 0xc000000000004000ull
;
124 cpu_abort(cs
, "Invalid AIL combination %d\n", ail
);
132 * Note that this function should be greatly optimized when called
133 * with a constant excp, from ppc_hw_interrupt
135 static inline void powerpc_excp(PowerPCCPU
*cpu
, int excp_model
, int excp
)
137 CPUState
*cs
= CPU(cpu
);
138 CPUPPCState
*env
= &cpu
->env
;
139 target_ulong msr
, new_msr
, vector
;
140 int srr0
, srr1
, asrr0
, asrr1
, lev
, ail
;
143 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
144 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
146 /* new srr1 value excluding must-be-zero bits */
147 if (excp_model
== POWERPC_EXCP_BOOKE
) {
150 msr
= env
->msr
& ~0x783f0000ULL
;
154 * new interrupt handler msr preserves existing HV and ME unless
155 * explicitly overriden
157 new_msr
= env
->msr
& (((target_ulong
)1 << MSR_ME
) | MSR_HVB
);
159 /* target registers */
166 * check for special resume at 0x100 from doze/nap/sleep/winkle on
169 if (env
->resume_as_sreset
) {
170 excp
= powerpc_reset_wakeup(cs
, env
, excp
, &msr
);
174 * Exception targetting modifiers
176 * LPES0 is supported on POWER7/8/9
177 * LPES1 is not supported (old iSeries mode)
179 * On anything else, we behave as if LPES0 is 1
180 * (externals don't alter MSR:HV)
182 * AIL is initialized here but can be cleared by
183 * selected exceptions
185 #if defined(TARGET_PPC64)
186 if (excp_model
== POWERPC_EXCP_POWER7
||
187 excp_model
== POWERPC_EXCP_POWER8
||
188 excp_model
== POWERPC_EXCP_POWER9
) {
189 lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
190 if (excp_model
!= POWERPC_EXCP_POWER7
) {
191 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
196 #endif /* defined(TARGET_PPC64) */
203 * Hypervisor emulation assistance interrupt only exists on server
204 * arch 2.05 server or later. We also don't want to generate it if
205 * we don't have HVB in msr_mask (PAPR mode).
207 if (excp
== POWERPC_EXCP_HV_EMU
208 #if defined(TARGET_PPC64)
209 && !((env
->mmu_model
& POWERPC_MMU_64
) && (env
->msr_mask
& MSR_HVB
))
210 #endif /* defined(TARGET_PPC64) */
213 excp
= POWERPC_EXCP_PROGRAM
;
217 case POWERPC_EXCP_NONE
:
218 /* Should never happen */
220 case POWERPC_EXCP_CRITICAL
: /* Critical input */
221 switch (excp_model
) {
222 case POWERPC_EXCP_40x
:
226 case POWERPC_EXCP_BOOKE
:
227 srr0
= SPR_BOOKE_CSRR0
;
228 srr1
= SPR_BOOKE_CSRR1
;
230 case POWERPC_EXCP_G2
:
236 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
239 * Machine check exception is not enabled. Enter
242 fprintf(stderr
, "Machine check while not allowed. "
243 "Entering checkstop state\n");
244 if (qemu_log_separate()) {
245 qemu_log("Machine check while not allowed. "
246 "Entering checkstop state\n");
249 cpu_interrupt_exittb(cs
);
251 if (env
->msr_mask
& MSR_HVB
) {
253 * ISA specifies HV, but can be delivered to guest with HV
254 * clear (e.g., see FWNMI in PAPR).
256 new_msr
|= (target_ulong
)MSR_HVB
;
260 /* machine check exceptions don't have ME set */
261 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
263 /* XXX: should also have something loaded in DAR / DSISR */
264 switch (excp_model
) {
265 case POWERPC_EXCP_40x
:
269 case POWERPC_EXCP_BOOKE
:
270 /* FIXME: choose one or the other based on CPU type */
271 srr0
= SPR_BOOKE_MCSRR0
;
272 srr1
= SPR_BOOKE_MCSRR1
;
273 asrr0
= SPR_BOOKE_CSRR0
;
274 asrr1
= SPR_BOOKE_CSRR1
;
280 case POWERPC_EXCP_DSI
: /* Data storage exception */
281 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx
" DAR=" TARGET_FMT_lx
282 "\n", env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
284 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
285 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx
", nip=" TARGET_FMT_lx
286 "\n", msr
, env
->nip
);
287 msr
|= env
->error_code
;
289 case POWERPC_EXCP_EXTERNAL
: /* External input */
293 new_msr
|= (target_ulong
)MSR_HVB
;
294 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
298 if (env
->mpic_proxy
) {
299 /* IACK the IRQ on delivery */
300 env
->spr
[SPR_BOOKE_EPR
] = ldl_phys(cs
->as
, env
->mpic_iack
);
303 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
304 /* Get rS/rD and rA from faulting opcode */
306 * Note: the opcode fields will not be set properly for a
307 * direct store load/store, but nobody cares as nobody
308 * actually uses direct store segments.
310 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
312 case POWERPC_EXCP_PROGRAM
: /* Program exception */
313 switch (env
->error_code
& ~0xF) {
314 case POWERPC_EXCP_FP
:
315 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
316 LOG_EXCP("Ignore floating point exception\n");
317 cs
->exception_index
= POWERPC_EXCP_NONE
;
323 * FP exceptions always have NIP pointing to the faulting
324 * instruction, so always use store_next and claim we are
325 * precise in the MSR.
328 env
->spr
[SPR_BOOKE_ESR
] = ESR_FP
;
330 case POWERPC_EXCP_INVAL
:
331 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx
"\n", env
->nip
);
333 env
->spr
[SPR_BOOKE_ESR
] = ESR_PIL
;
335 case POWERPC_EXCP_PRIV
:
337 env
->spr
[SPR_BOOKE_ESR
] = ESR_PPR
;
339 case POWERPC_EXCP_TRAP
:
341 env
->spr
[SPR_BOOKE_ESR
] = ESR_PTR
;
344 /* Should never occur */
345 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
350 case POWERPC_EXCP_SYSCALL
: /* System call exception */
352 lev
= env
->error_code
;
355 * We need to correct the NIP which in this case is supposed
356 * to point to the next instruction
360 /* "PAPR mode" built-in hypercall emulation */
361 if ((lev
== 1) && cpu
->vhyp
) {
362 PPCVirtualHypervisorClass
*vhc
=
363 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
364 vhc
->hypercall(cpu
->vhyp
, cpu
);
368 new_msr
|= (target_ulong
)MSR_HVB
;
371 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
372 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
373 case POWERPC_EXCP_DECR
: /* Decrementer exception */
375 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
377 LOG_EXCP("FIT exception\n");
379 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
380 LOG_EXCP("WDT exception\n");
381 switch (excp_model
) {
382 case POWERPC_EXCP_BOOKE
:
383 srr0
= SPR_BOOKE_CSRR0
;
384 srr1
= SPR_BOOKE_CSRR1
;
390 case POWERPC_EXCP_DTLB
: /* Data TLB error */
391 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
393 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
394 if (env
->flags
& POWERPC_FLAG_DE
) {
395 /* FIXME: choose one or the other based on CPU type */
396 srr0
= SPR_BOOKE_DSRR0
;
397 srr1
= SPR_BOOKE_DSRR1
;
398 asrr0
= SPR_BOOKE_CSRR0
;
399 asrr1
= SPR_BOOKE_CSRR1
;
400 /* DBSR already modified by caller */
402 cpu_abort(cs
, "Debug exception triggered on unsupported model\n");
405 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable */
406 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
408 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
410 cpu_abort(cs
, "Embedded floating point data exception "
411 "is not implemented yet !\n");
412 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
414 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
416 cpu_abort(cs
, "Embedded floating point round exception "
417 "is not implemented yet !\n");
418 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
420 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
423 "Performance counter exception is not implemented yet !\n");
425 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
427 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
428 srr0
= SPR_BOOKE_CSRR0
;
429 srr1
= SPR_BOOKE_CSRR1
;
431 case POWERPC_EXCP_RESET
: /* System reset exception */
432 /* A power-saving exception sets ME, otherwise it is unchanged */
434 /* indicate that we resumed from power save mode */
436 new_msr
|= ((target_ulong
)1 << MSR_ME
);
438 if (env
->msr_mask
& MSR_HVB
) {
440 * ISA specifies HV, but can be delivered to guest with HV
441 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
443 new_msr
|= (target_ulong
)MSR_HVB
;
446 cpu_abort(cs
, "Trying to deliver power-saving system reset "
447 "exception %d with no HV support\n", excp
);
452 case POWERPC_EXCP_DSEG
: /* Data segment exception */
453 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
454 case POWERPC_EXCP_TRACE
: /* Trace exception */
456 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
457 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
458 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
459 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
460 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
461 case POWERPC_EXCP_SDOOR_HV
: /* Hypervisor Doorbell interrupt */
462 case POWERPC_EXCP_HV_EMU
:
463 case POWERPC_EXCP_HVIRT
: /* Hypervisor virtualization */
466 new_msr
|= (target_ulong
)MSR_HVB
;
467 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
469 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
470 case POWERPC_EXCP_VSXU
: /* VSX unavailable exception */
471 case POWERPC_EXCP_FU
: /* Facility unavailable exception */
473 env
->spr
[SPR_FSCR
] |= ((target_ulong
)env
->error_code
<< 56);
476 case POWERPC_EXCP_HV_FU
: /* Hypervisor Facility Unavailable Exception */
478 env
->spr
[SPR_HFSCR
] |= ((target_ulong
)env
->error_code
<< FSCR_IC_POS
);
481 new_msr
|= (target_ulong
)MSR_HVB
;
482 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
485 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
486 LOG_EXCP("PIT exception\n");
488 case POWERPC_EXCP_IO
: /* IO error exception */
490 cpu_abort(cs
, "601 IO error exception is not implemented yet !\n");
492 case POWERPC_EXCP_RUNM
: /* Run mode exception */
494 cpu_abort(cs
, "601 run mode exception is not implemented yet !\n");
496 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
498 cpu_abort(cs
, "602 emulation trap exception "
499 "is not implemented yet !\n");
501 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
502 switch (excp_model
) {
503 case POWERPC_EXCP_602
:
504 case POWERPC_EXCP_603
:
505 case POWERPC_EXCP_603E
:
506 case POWERPC_EXCP_G2
:
508 case POWERPC_EXCP_7x5
:
510 case POWERPC_EXCP_74xx
:
513 cpu_abort(cs
, "Invalid instruction TLB miss exception\n");
517 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
518 switch (excp_model
) {
519 case POWERPC_EXCP_602
:
520 case POWERPC_EXCP_603
:
521 case POWERPC_EXCP_603E
:
522 case POWERPC_EXCP_G2
:
524 case POWERPC_EXCP_7x5
:
526 case POWERPC_EXCP_74xx
:
529 cpu_abort(cs
, "Invalid data load TLB miss exception\n");
533 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
534 switch (excp_model
) {
535 case POWERPC_EXCP_602
:
536 case POWERPC_EXCP_603
:
537 case POWERPC_EXCP_603E
:
538 case POWERPC_EXCP_G2
:
540 /* Swap temporary saved registers with GPRs */
541 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
542 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
543 hreg_swap_gpr_tgpr(env
);
546 case POWERPC_EXCP_7x5
:
548 #if defined(DEBUG_SOFTWARE_TLB)
549 if (qemu_log_enabled()) {
551 target_ulong
*miss
, *cmp
;
554 if (excp
== POWERPC_EXCP_IFTLB
) {
557 miss
= &env
->spr
[SPR_IMISS
];
558 cmp
= &env
->spr
[SPR_ICMP
];
560 if (excp
== POWERPC_EXCP_DLTLB
) {
566 miss
= &env
->spr
[SPR_DMISS
];
567 cmp
= &env
->spr
[SPR_DCMP
];
569 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
570 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
571 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
572 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
576 msr
|= env
->crf
[0] << 28;
577 msr
|= env
->error_code
; /* key, D/I, S/L bits */
578 /* Set way using a LRU mechanism */
579 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
581 case POWERPC_EXCP_74xx
:
583 #if defined(DEBUG_SOFTWARE_TLB)
584 if (qemu_log_enabled()) {
586 target_ulong
*miss
, *cmp
;
589 if (excp
== POWERPC_EXCP_IFTLB
) {
592 miss
= &env
->spr
[SPR_TLBMISS
];
593 cmp
= &env
->spr
[SPR_PTEHI
];
595 if (excp
== POWERPC_EXCP_DLTLB
) {
601 miss
= &env
->spr
[SPR_TLBMISS
];
602 cmp
= &env
->spr
[SPR_PTEHI
];
604 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
605 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
609 msr
|= env
->error_code
; /* key bit */
612 cpu_abort(cs
, "Invalid data store TLB miss exception\n");
616 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
618 cpu_abort(cs
, "Floating point assist exception "
619 "is not implemented yet !\n");
621 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
623 cpu_abort(cs
, "DABR exception is not implemented yet !\n");
625 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
627 cpu_abort(cs
, "IABR exception is not implemented yet !\n");
629 case POWERPC_EXCP_SMI
: /* System management interrupt */
631 cpu_abort(cs
, "SMI exception is not implemented yet !\n");
633 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
635 cpu_abort(cs
, "Thermal management exception "
636 "is not implemented yet !\n");
638 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
641 "Performance counter exception is not implemented yet !\n");
643 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
645 cpu_abort(cs
, "VPU assist exception is not implemented yet !\n");
647 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
650 "970 soft-patch exception is not implemented yet !\n");
652 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
655 "970 maintenance exception is not implemented yet !\n");
657 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
659 cpu_abort(cs
, "Maskable external exception "
660 "is not implemented yet !\n");
662 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
664 cpu_abort(cs
, "Non maskable external exception "
665 "is not implemented yet !\n");
669 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
674 env
->spr
[srr0
] = env
->nip
;
677 env
->spr
[srr1
] = msr
;
680 if (!(env
->msr_mask
& MSR_HVB
)) {
681 if (new_msr
& MSR_HVB
) {
682 cpu_abort(cs
, "Trying to deliver HV exception (MSR) %d with "
683 "no HV support\n", excp
);
685 if (srr0
== SPR_HSRR0
) {
686 cpu_abort(cs
, "Trying to deliver HV exception (HSRR) %d with "
687 "no HV support\n", excp
);
691 /* If any alternate SRR register are defined, duplicate saved values */
693 env
->spr
[asrr0
] = env
->spr
[srr0
];
696 env
->spr
[asrr1
] = env
->spr
[srr1
];
700 * Sort out endianness of interrupt, this differs depending on the
701 * CPU, the HV mode, etc...
704 if (excp_model
== POWERPC_EXCP_POWER7
) {
705 if (!(new_msr
& MSR_HVB
) && (env
->spr
[SPR_LPCR
] & LPCR_ILE
)) {
706 new_msr
|= (target_ulong
)1 << MSR_LE
;
708 } else if (excp_model
== POWERPC_EXCP_POWER8
) {
709 if (new_msr
& MSR_HVB
) {
710 if (env
->spr
[SPR_HID0
] & HID0_HILE
) {
711 new_msr
|= (target_ulong
)1 << MSR_LE
;
713 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
714 new_msr
|= (target_ulong
)1 << MSR_LE
;
716 } else if (excp_model
== POWERPC_EXCP_POWER9
) {
717 if (new_msr
& MSR_HVB
) {
718 if (env
->spr
[SPR_HID0
] & HID0_POWER9_HILE
) {
719 new_msr
|= (target_ulong
)1 << MSR_LE
;
721 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
722 new_msr
|= (target_ulong
)1 << MSR_LE
;
724 } else if (msr_ile
) {
725 new_msr
|= (target_ulong
)1 << MSR_LE
;
729 new_msr
|= (target_ulong
)1 << MSR_LE
;
733 /* Jump to handler */
734 vector
= env
->excp_vectors
[excp
];
735 if (vector
== (target_ulong
)-1ULL) {
736 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
739 vector
|= env
->excp_prefix
;
742 * AIL only works if there is no HV transition and we are running
743 * with translations enabled
745 if (!((msr
>> MSR_IR
) & 1) || !((msr
>> MSR_DR
) & 1) ||
746 ((new_msr
& MSR_HVB
) && !(msr
& MSR_HVB
))) {
751 new_msr
|= (1 << MSR_IR
) | (1 << MSR_DR
);
752 vector
|= ppc_excp_vector_offset(cs
, ail
);
755 #if defined(TARGET_PPC64)
756 if (excp_model
== POWERPC_EXCP_BOOKE
) {
757 if (env
->spr
[SPR_BOOKE_EPCR
] & EPCR_ICM
) {
758 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
759 new_msr
|= (target_ulong
)1 << MSR_CM
;
761 vector
= (uint32_t)vector
;
764 if (!msr_isf
&& !(env
->mmu_model
& POWERPC_MMU_64
)) {
765 vector
= (uint32_t)vector
;
767 new_msr
|= (target_ulong
)1 << MSR_SF
;
772 * We don't use hreg_store_msr here as already have treated any
773 * special case that could occur. Just store MSR and update hflags
775 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
776 * will prevent setting of the HV bit which some exceptions might need
779 env
->msr
= new_msr
& env
->msr_mask
;
780 hreg_compute_hflags(env
);
782 /* Reset exception state */
783 cs
->exception_index
= POWERPC_EXCP_NONE
;
786 /* Reset the reservation */
787 env
->reserve_addr
= -1;
790 * Any interrupt is context synchronizing, check if TCG TLB needs
791 * a delayed flush on ppc64
793 check_tlb_flush(env
, false);
796 void ppc_cpu_do_interrupt(CPUState
*cs
)
798 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
799 CPUPPCState
*env
= &cpu
->env
;
801 powerpc_excp(cpu
, env
->excp_model
, cs
->exception_index
);
804 static void ppc_hw_interrupt(CPUPPCState
*env
)
806 PowerPCCPU
*cpu
= env_archcpu(env
);
810 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
811 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
812 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
815 /* Machine check exception */
816 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
817 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
818 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
822 /* External debug exception */
823 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
824 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
825 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
831 * For interrupts that gate on MSR:EE, we need to do something a
832 * bit more subtle, as we need to let them through even when EE is
833 * clear when coming out of some power management states (in order
834 * for them to become a 0x100).
836 async_deliver
= (msr_ee
!= 0) || env
->resume_as_sreset
;
838 /* Hypervisor decrementer exception */
839 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
840 /* LPCR will be clear when not supported so this will work */
841 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
842 if ((async_deliver
|| msr_hv
== 0) && hdice
) {
843 /* HDEC clears on delivery */
844 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
845 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HDECR
);
850 /* Hypervisor virtualization interrupt */
851 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HVIRT
)) {
852 /* LPCR will be clear when not supported so this will work */
853 bool hvice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HVICE
);
854 if ((async_deliver
|| msr_hv
== 0) && hvice
) {
855 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HVIRT
);
860 /* External interrupt can ignore MSR:EE under some circumstances */
861 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
862 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
863 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
864 /* HEIC blocks delivery to the hypervisor */
865 if ((async_deliver
&& !(heic
&& msr_hv
&& !msr_pr
)) ||
866 (env
->has_hv_mode
&& msr_hv
== 0 && !lpes0
)) {
867 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
872 /* External critical interrupt */
873 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
874 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
878 if (async_deliver
!= 0) {
879 /* Watchdog timer on embedded PowerPC */
880 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
881 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
882 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_WDT
);
885 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
886 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
887 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
890 /* Fixed interval timer on embedded PowerPC */
891 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
892 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
893 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_FIT
);
896 /* Programmable interval timer on embedded PowerPC */
897 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
898 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
899 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PIT
);
902 /* Decrementer exception */
903 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
904 if (ppc_decr_clear_on_delivery(env
)) {
905 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
907 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DECR
);
910 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
911 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
912 if (is_book3s_arch2x(env
)) {
913 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR
);
915 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORI
);
919 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDOORBELL
)) {
920 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
921 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR_HV
);
924 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
925 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
926 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PERFM
);
929 /* Thermal interrupt */
930 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
931 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
932 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_THERM
);
937 if (env
->resume_as_sreset
) {
939 * This is a bug ! It means that has_work took us out of halt without
940 * anything to deliver while in a PM state that requires getting
943 * This means we will incorrectly execute past the power management
944 * instruction instead of triggering a reset.
946 * It generally means a discrepancy between the wakup conditions in the
947 * processor has_work implementation and the logic in this function.
949 cpu_abort(env_cpu(env
),
950 "Wakeup from PM state but interrupt Undelivered");
954 void ppc_cpu_do_system_reset(CPUState
*cs
)
956 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
957 CPUPPCState
*env
= &cpu
->env
;
959 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
961 #endif /* !CONFIG_USER_ONLY */
963 bool ppc_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
965 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
966 CPUPPCState
*env
= &cpu
->env
;
968 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
969 ppc_hw_interrupt(env
);
970 if (env
->pending_interrupts
== 0) {
971 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
978 #if defined(DEBUG_OP)
979 static void cpu_dump_rfi(target_ulong RA
, target_ulong msr
)
981 qemu_log("Return from exception at " TARGET_FMT_lx
" with flags "
982 TARGET_FMT_lx
"\n", RA
, msr
);
986 /*****************************************************************************/
987 /* Exceptions processing helpers */
989 void raise_exception_err_ra(CPUPPCState
*env
, uint32_t exception
,
990 uint32_t error_code
, uintptr_t raddr
)
992 CPUState
*cs
= env_cpu(env
);
994 cs
->exception_index
= exception
;
995 env
->error_code
= error_code
;
996 cpu_loop_exit_restore(cs
, raddr
);
999 void raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1000 uint32_t error_code
)
1002 raise_exception_err_ra(env
, exception
, error_code
, 0);
1005 void raise_exception(CPUPPCState
*env
, uint32_t exception
)
1007 raise_exception_err_ra(env
, exception
, 0, 0);
1010 void raise_exception_ra(CPUPPCState
*env
, uint32_t exception
,
1013 raise_exception_err_ra(env
, exception
, 0, raddr
);
1016 void helper_raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1017 uint32_t error_code
)
1019 raise_exception_err_ra(env
, exception
, error_code
, 0);
1022 void helper_raise_exception(CPUPPCState
*env
, uint32_t exception
)
1024 raise_exception_err_ra(env
, exception
, 0, 0);
1027 #if !defined(CONFIG_USER_ONLY)
1028 void helper_store_msr(CPUPPCState
*env
, target_ulong val
)
1030 uint32_t excp
= hreg_store_msr(env
, val
, 0);
1033 CPUState
*cs
= env_cpu(env
);
1034 cpu_interrupt_exittb(cs
);
1035 raise_exception(env
, excp
);
1039 #if defined(TARGET_PPC64)
1040 void helper_pminsn(CPUPPCState
*env
, powerpc_pm_insn_t insn
)
1048 * The architecture specifies that HDEC interrupts are discarded
1051 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
1053 /* Condition for waking up at 0x100 */
1054 env
->resume_as_sreset
= (insn
!= PPC_PM_STOP
) ||
1055 (env
->spr
[SPR_PSSCR
] & PSSCR_EC
);
1057 #endif /* defined(TARGET_PPC64) */
1059 static inline void do_rfi(CPUPPCState
*env
, target_ulong nip
, target_ulong msr
)
1061 CPUState
*cs
= env_cpu(env
);
1063 /* MSR:POW cannot be set by any form of rfi */
1064 msr
&= ~(1ULL << MSR_POW
);
1066 #if defined(TARGET_PPC64)
1067 /* Switching to 32-bit ? Crop the nip */
1068 if (!msr_is_64bit(env
, msr
)) {
1069 nip
= (uint32_t)nip
;
1072 nip
= (uint32_t)nip
;
1074 /* XXX: beware: this is false if VLE is supported */
1075 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1076 hreg_store_msr(env
, msr
, 1);
1077 #if defined(DEBUG_OP)
1078 cpu_dump_rfi(env
->nip
, env
->msr
);
1081 * No need to raise an exception here, as rfi is always the last
1084 cpu_interrupt_exittb(cs
);
1085 /* Reset the reservation */
1086 env
->reserve_addr
= -1;
1088 /* Context synchronizing: check if TCG TLB needs flush */
1089 check_tlb_flush(env
, false);
1092 void helper_rfi(CPUPPCState
*env
)
1094 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
] & 0xfffffffful
);
1097 #define MSR_BOOK3S_MASK
1098 #if defined(TARGET_PPC64)
1099 void helper_rfid(CPUPPCState
*env
)
1102 * The architeture defines a number of rules for which bits can
1103 * change but in practice, we handle this in hreg_store_msr()
1104 * which will be called by do_rfi(), so there is no need to filter
1107 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
]);
1110 void helper_hrfid(CPUPPCState
*env
)
1112 do_rfi(env
, env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
]);
1116 /*****************************************************************************/
1117 /* Embedded PowerPC specific helpers */
1118 void helper_40x_rfci(CPUPPCState
*env
)
1120 do_rfi(env
, env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
]);
1123 void helper_rfci(CPUPPCState
*env
)
1125 do_rfi(env
, env
->spr
[SPR_BOOKE_CSRR0
], env
->spr
[SPR_BOOKE_CSRR1
]);
1128 void helper_rfdi(CPUPPCState
*env
)
1130 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1131 do_rfi(env
, env
->spr
[SPR_BOOKE_DSRR0
], env
->spr
[SPR_BOOKE_DSRR1
]);
1134 void helper_rfmci(CPUPPCState
*env
)
1136 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1137 do_rfi(env
, env
->spr
[SPR_BOOKE_MCSRR0
], env
->spr
[SPR_BOOKE_MCSRR1
]);
1141 void helper_tw(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1144 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1145 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1146 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1147 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1148 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1149 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1150 POWERPC_EXCP_TRAP
, GETPC());
1154 #if defined(TARGET_PPC64)
1155 void helper_td(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1158 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1159 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1160 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1161 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1162 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01))))) {
1163 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1164 POWERPC_EXCP_TRAP
, GETPC());
1169 #if !defined(CONFIG_USER_ONLY)
1170 /*****************************************************************************/
1171 /* PowerPC 601 specific instructions (POWER bridge) */
1173 void helper_rfsvc(CPUPPCState
*env
)
1175 do_rfi(env
, env
->lr
, env
->ctr
& 0x0000FFFF);
1178 /* Embedded.Processor Control */
1179 static int dbell2irq(target_ulong rb
)
1181 int msg
= rb
& DBELL_TYPE_MASK
;
1185 case DBELL_TYPE_DBELL
:
1186 irq
= PPC_INTERRUPT_DOORBELL
;
1188 case DBELL_TYPE_DBELL_CRIT
:
1189 irq
= PPC_INTERRUPT_CDOORBELL
;
1191 case DBELL_TYPE_G_DBELL
:
1192 case DBELL_TYPE_G_DBELL_CRIT
:
1193 case DBELL_TYPE_G_DBELL_MC
:
1202 void helper_msgclr(CPUPPCState
*env
, target_ulong rb
)
1204 int irq
= dbell2irq(rb
);
1210 env
->pending_interrupts
&= ~(1 << irq
);
1213 void helper_msgsnd(target_ulong rb
)
1215 int irq
= dbell2irq(rb
);
1216 int pir
= rb
& DBELL_PIRTAG_MASK
;
1223 qemu_mutex_lock_iothread();
1225 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1226 CPUPPCState
*cenv
= &cpu
->env
;
1228 if ((rb
& DBELL_BRDCAST
) || (cenv
->spr
[SPR_BOOKE_PIR
] == pir
)) {
1229 cenv
->pending_interrupts
|= 1 << irq
;
1230 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1233 qemu_mutex_unlock_iothread();
1236 /* Server Processor Control */
1238 static bool dbell_type_server(target_ulong rb
)
1241 * A Directed Hypervisor Doorbell message is sent only if the
1242 * message type is 5. All other types are reserved and the
1243 * instruction is a no-op
1245 return (rb
& DBELL_TYPE_MASK
) == DBELL_TYPE_DBELL_SERVER
;
1248 void helper_book3s_msgclr(CPUPPCState
*env
, target_ulong rb
)
1250 if (!dbell_type_server(rb
)) {
1254 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1257 static void book3s_msgsnd_common(int pir
, int irq
)
1261 qemu_mutex_lock_iothread();
1263 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1264 CPUPPCState
*cenv
= &cpu
->env
;
1266 /* TODO: broadcast message to all threads of the same processor */
1267 if (cenv
->spr_cb
[SPR_PIR
].default_value
== pir
) {
1268 cenv
->pending_interrupts
|= 1 << irq
;
1269 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1272 qemu_mutex_unlock_iothread();
1275 void helper_book3s_msgsnd(target_ulong rb
)
1277 int pir
= rb
& DBELL_PROCIDTAG_MASK
;
1279 if (!dbell_type_server(rb
)) {
1283 book3s_msgsnd_common(pir
, PPC_INTERRUPT_HDOORBELL
);
1286 #if defined(TARGET_PPC64)
1287 void helper_book3s_msgclrp(CPUPPCState
*env
, target_ulong rb
)
1289 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgclrp", HFSCR_IC_MSGP
);
1291 if (!dbell_type_server(rb
)) {
1295 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1299 * sends a message to other threads that are on the same
1300 * multi-threaded processor
1302 void helper_book3s_msgsndp(CPUPPCState
*env
, target_ulong rb
)
1304 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
1306 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgsndp", HFSCR_IC_MSGP
);
1308 if (!dbell_type_server(rb
)) {
1312 /* TODO: TCG supports only one thread */
1314 book3s_msgsnd_common(pir
, PPC_INTERRUPT_DOORBELL
);
1319 void ppc_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
1320 MMUAccessType access_type
,
1321 int mmu_idx
, uintptr_t retaddr
)
1323 CPUPPCState
*env
= cs
->env_ptr
;
1326 /* Restore state and reload the insn we executed, for filling in DSISR. */
1327 cpu_restore_state(cs
, retaddr
, true);
1328 insn
= cpu_ldl_code(env
, env
->nip
);
1330 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1331 env
->error_code
= insn
& 0x03FF0000;