2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "helper_regs.h"
28 /* #define DEBUG_OP */
29 /* #define DEBUG_SOFTWARE_TLB */
30 /* #define DEBUG_EXCEPTIONS */
32 #ifdef DEBUG_EXCEPTIONS
33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
35 # define LOG_EXCP(...) do { } while (0)
38 /*****************************************************************************/
39 /* Exception processing */
40 #if defined(CONFIG_USER_ONLY)
41 void ppc_cpu_do_interrupt(CPUState
*cs
)
43 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
44 CPUPPCState
*env
= &cpu
->env
;
46 cs
->exception_index
= POWERPC_EXCP_NONE
;
50 static void ppc_hw_interrupt(CPUPPCState
*env
)
52 CPUState
*cs
= env_cpu(env
);
54 cs
->exception_index
= POWERPC_EXCP_NONE
;
57 #else /* defined(CONFIG_USER_ONLY) */
58 static inline void dump_syscall(CPUPPCState
*env
)
60 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
61 " r3=%016" PRIx64
" r4=%016" PRIx64
" r5=%016" PRIx64
62 " r6=%016" PRIx64
" r7=%016" PRIx64
" r8=%016" PRIx64
63 " nip=" TARGET_FMT_lx
"\n",
64 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
65 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
66 ppc_dump_gpr(env
, 6), ppc_dump_gpr(env
, 7),
67 ppc_dump_gpr(env
, 8), env
->nip
);
70 static inline void dump_syscall_vectored(CPUPPCState
*env
)
72 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
73 " r3=%016" PRIx64
" r4=%016" PRIx64
" r5=%016" PRIx64
74 " r6=%016" PRIx64
" r7=%016" PRIx64
" r8=%016" PRIx64
75 " nip=" TARGET_FMT_lx
"\n",
76 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
77 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
78 ppc_dump_gpr(env
, 6), ppc_dump_gpr(env
, 7),
79 ppc_dump_gpr(env
, 8), env
->nip
);
82 static inline void dump_hcall(CPUPPCState
*env
)
84 qemu_log_mask(CPU_LOG_INT
, "hypercall r3=%016" PRIx64
85 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
86 " r7=%016" PRIx64
" r8=%016" PRIx64
" r9=%016" PRIx64
87 " r10=%016" PRIx64
" r11=%016" PRIx64
" r12=%016" PRIx64
88 " nip=" TARGET_FMT_lx
"\n",
89 ppc_dump_gpr(env
, 3), ppc_dump_gpr(env
, 4),
90 ppc_dump_gpr(env
, 5), ppc_dump_gpr(env
, 6),
91 ppc_dump_gpr(env
, 7), ppc_dump_gpr(env
, 8),
92 ppc_dump_gpr(env
, 9), ppc_dump_gpr(env
, 10),
93 ppc_dump_gpr(env
, 11), ppc_dump_gpr(env
, 12),
97 static int powerpc_reset_wakeup(CPUState
*cs
, CPUPPCState
*env
, int excp
,
100 /* We no longer are in a PM state */
101 env
->resume_as_sreset
= false;
103 /* Pretend to be returning from doze always as we don't lose state */
104 *msr
|= SRR1_WS_NOLOSS
;
106 /* Machine checks are sent normally */
107 if (excp
== POWERPC_EXCP_MCHECK
) {
111 case POWERPC_EXCP_RESET
:
112 *msr
|= SRR1_WAKERESET
;
114 case POWERPC_EXCP_EXTERNAL
:
117 case POWERPC_EXCP_DECR
:
118 *msr
|= SRR1_WAKEDEC
;
120 case POWERPC_EXCP_SDOOR
:
121 *msr
|= SRR1_WAKEDBELL
;
123 case POWERPC_EXCP_SDOOR_HV
:
124 *msr
|= SRR1_WAKEHDBELL
;
126 case POWERPC_EXCP_HV_MAINT
:
127 *msr
|= SRR1_WAKEHMI
;
129 case POWERPC_EXCP_HVIRT
:
130 *msr
|= SRR1_WAKEHVI
;
133 cpu_abort(cs
, "Unsupported exception %d in Power Save mode\n",
136 return POWERPC_EXCP_RESET
;
140 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
141 * taken with the MMU on, and which uses an alternate location (e.g., so the
142 * kernel/hv can map the vectors there with an effective address).
144 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
145 * are delivered in this way. AIL requires the LPCR to be set to enable this
146 * mode, and then a number of conditions have to be true for AIL to apply.
148 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
149 * they specifically want to be in real mode (e.g., the MCE might be signaling
150 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
152 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
153 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
154 * radix mode (LPCR[HR]).
156 * POWER8, POWER9 with LPCR[HR]=0
157 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
158 * +-----------+-------------+---------+-------------+-----+
159 * | a | 00/01/10 | x | x | 0 |
160 * | a | 11 | 0 | 1 | 0 |
161 * | a | 11 | 1 | 1 | a |
162 * | a | 11 | 0 | 0 | a |
163 * +-------------------------------------------------------+
165 * POWER9 with LPCR[HR]=1
166 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
167 * +-----------+-------------+---------+-------------+-----+
168 * | a | 00/01/10 | x | x | 0 |
169 * | a | 11 | x | x | a |
170 * +-------------------------------------------------------+
172 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
173 * the hypervisor in AIL mode if the guest is radix. This is good for
174 * performance but allows the guest to influence the AIL of hypervisor
175 * interrupts using its MSR, and also the hypervisor must disallow guest
176 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
177 * use AIL for its MSR[HV] 0->1 interrupts.
179 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
180 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
183 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
185 * POWER10 behaviour is
186 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
187 * +-----------+------------+-------------+---------+-------------+-----+
188 * | a | h | 00/01/10 | 0 | 0 | 0 |
189 * | a | h | 11 | 0 | 0 | a |
190 * | a | h | x | 0 | 1 | h |
191 * | a | h | 00/01/10 | 1 | 1 | 0 |
192 * | a | h | 11 | 1 | 1 | h |
193 * +--------------------------------------------------------------------+
195 static inline void ppc_excp_apply_ail(PowerPCCPU
*cpu
, int excp_model
, int excp
,
197 target_ulong
*new_msr
,
198 target_ulong
*vector
)
200 #if defined(TARGET_PPC64)
201 CPUPPCState
*env
= &cpu
->env
;
202 bool mmu_all_on
= ((msr
>> MSR_IR
) & 1) && ((msr
>> MSR_DR
) & 1);
203 bool hv_escalation
= !(msr
& MSR_HVB
) && (*new_msr
& MSR_HVB
);
206 if (excp
== POWERPC_EXCP_MCHECK
||
207 excp
== POWERPC_EXCP_RESET
||
208 excp
== POWERPC_EXCP_HV_MAINT
) {
209 /* SRESET, MCE, HMI never apply AIL */
213 if (excp_model
== POWERPC_EXCP_POWER8
||
214 excp_model
== POWERPC_EXCP_POWER9
) {
216 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
219 if (hv_escalation
&& !(env
->spr
[SPR_LPCR
] & LPCR_HR
)) {
221 * AIL does not work if there is a MSR[HV] 0->1 transition and the
222 * partition is in HPT mode. For radix guests, such interrupts are
223 * allowed to be delivered to the hypervisor in ail mode.
228 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
233 /* AIL=1 is reserved, treat it like AIL=0 */
237 } else if (excp_model
== POWERPC_EXCP_POWER10
) {
238 if (!mmu_all_on
&& !hv_escalation
) {
240 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
241 * Guest->guest and HV->HV interrupts do require MMU on.
246 if (*new_msr
& MSR_HVB
) {
247 if (!(env
->spr
[SPR_LPCR
] & LPCR_HAIL
)) {
248 /* HV interrupts depend on LPCR[HAIL] */
251 ail
= 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
253 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
258 if (ail
== 1 || ail
== 2) {
259 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
263 /* Other processors do not support AIL */
268 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
271 *new_msr
|= (1 << MSR_IR
) | (1 << MSR_DR
);
273 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
275 *vector
|= 0x0000000000018000ull
;
276 } else if (ail
== 3) {
277 *vector
|= 0xc000000000004000ull
;
281 * scv AIL is a little different. AIL=2 does not change the address,
282 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
285 *vector
&= ~0x0000000000017000ull
; /* Un-apply the base offset */
286 *vector
|= 0xc000000000003000ull
; /* Apply scv's AIL=3 offset */
292 static inline void powerpc_set_excp_state(PowerPCCPU
*cpu
,
293 target_ulong vector
, target_ulong msr
)
295 CPUState
*cs
= CPU(cpu
);
296 CPUPPCState
*env
= &cpu
->env
;
299 * We don't use hreg_store_msr here as already have treated any
300 * special case that could occur. Just store MSR and update hflags
302 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
303 * will prevent setting of the HV bit which some exceptions might need
306 env
->msr
= msr
& env
->msr_mask
;
307 hreg_compute_hflags(env
);
309 /* Reset exception state */
310 cs
->exception_index
= POWERPC_EXCP_NONE
;
313 /* Reset the reservation */
314 env
->reserve_addr
= -1;
317 * Any interrupt is context synchronizing, check if TCG TLB needs
318 * a delayed flush on ppc64
320 check_tlb_flush(env
, false);
324 * Note that this function should be greatly optimized when called
325 * with a constant excp, from ppc_hw_interrupt
327 static inline void powerpc_excp(PowerPCCPU
*cpu
, int excp_model
, int excp
)
329 CPUState
*cs
= CPU(cpu
);
330 CPUPPCState
*env
= &cpu
->env
;
331 target_ulong msr
, new_msr
, vector
;
332 int srr0
, srr1
, asrr0
, asrr1
, lev
= -1;
335 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
336 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
338 /* new srr1 value excluding must-be-zero bits */
339 if (excp_model
== POWERPC_EXCP_BOOKE
) {
342 msr
= env
->msr
& ~0x783f0000ULL
;
346 * new interrupt handler msr preserves existing HV and ME unless
347 * explicitly overriden
349 new_msr
= env
->msr
& (((target_ulong
)1 << MSR_ME
) | MSR_HVB
);
351 /* target registers */
358 * check for special resume at 0x100 from doze/nap/sleep/winkle on
361 if (env
->resume_as_sreset
) {
362 excp
= powerpc_reset_wakeup(cs
, env
, excp
, &msr
);
366 * Exception targeting modifiers
368 * LPES0 is supported on POWER7/8/9
369 * LPES1 is not supported (old iSeries mode)
371 * On anything else, we behave as if LPES0 is 1
372 * (externals don't alter MSR:HV)
374 #if defined(TARGET_PPC64)
375 if (excp_model
== POWERPC_EXCP_POWER7
||
376 excp_model
== POWERPC_EXCP_POWER8
||
377 excp_model
== POWERPC_EXCP_POWER9
||
378 excp_model
== POWERPC_EXCP_POWER10
) {
379 lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
381 #endif /* defined(TARGET_PPC64) */
387 * Hypervisor emulation assistance interrupt only exists on server
388 * arch 2.05 server or later. We also don't want to generate it if
389 * we don't have HVB in msr_mask (PAPR mode).
391 if (excp
== POWERPC_EXCP_HV_EMU
392 #if defined(TARGET_PPC64)
393 && !(mmu_is_64bit(env
->mmu_model
) && (env
->msr_mask
& MSR_HVB
))
394 #endif /* defined(TARGET_PPC64) */
397 excp
= POWERPC_EXCP_PROGRAM
;
401 case POWERPC_EXCP_NONE
:
402 /* Should never happen */
404 case POWERPC_EXCP_CRITICAL
: /* Critical input */
405 switch (excp_model
) {
406 case POWERPC_EXCP_40x
:
410 case POWERPC_EXCP_BOOKE
:
411 srr0
= SPR_BOOKE_CSRR0
;
412 srr1
= SPR_BOOKE_CSRR1
;
414 case POWERPC_EXCP_G2
:
420 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
423 * Machine check exception is not enabled. Enter
426 fprintf(stderr
, "Machine check while not allowed. "
427 "Entering checkstop state\n");
428 if (qemu_log_separate()) {
429 qemu_log("Machine check while not allowed. "
430 "Entering checkstop state\n");
433 cpu_interrupt_exittb(cs
);
435 if (env
->msr_mask
& MSR_HVB
) {
437 * ISA specifies HV, but can be delivered to guest with HV
438 * clear (e.g., see FWNMI in PAPR).
440 new_msr
|= (target_ulong
)MSR_HVB
;
443 /* machine check exceptions don't have ME set */
444 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
446 /* XXX: should also have something loaded in DAR / DSISR */
447 switch (excp_model
) {
448 case POWERPC_EXCP_40x
:
452 case POWERPC_EXCP_BOOKE
:
453 /* FIXME: choose one or the other based on CPU type */
454 srr0
= SPR_BOOKE_MCSRR0
;
455 srr1
= SPR_BOOKE_MCSRR1
;
456 asrr0
= SPR_BOOKE_CSRR0
;
457 asrr1
= SPR_BOOKE_CSRR1
;
463 case POWERPC_EXCP_DSI
: /* Data storage exception */
464 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx
" DAR=" TARGET_FMT_lx
465 "\n", env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
467 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
468 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx
", nip=" TARGET_FMT_lx
469 "\n", msr
, env
->nip
);
470 msr
|= env
->error_code
;
472 case POWERPC_EXCP_EXTERNAL
: /* External input */
476 new_msr
|= (target_ulong
)MSR_HVB
;
477 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
481 if (env
->mpic_proxy
) {
482 /* IACK the IRQ on delivery */
483 env
->spr
[SPR_BOOKE_EPR
] = ldl_phys(cs
->as
, env
->mpic_iack
);
486 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
487 /* Get rS/rD and rA from faulting opcode */
489 * Note: the opcode fields will not be set properly for a
490 * direct store load/store, but nobody cares as nobody
491 * actually uses direct store segments.
493 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
495 case POWERPC_EXCP_PROGRAM
: /* Program exception */
496 switch (env
->error_code
& ~0xF) {
497 case POWERPC_EXCP_FP
:
498 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
499 LOG_EXCP("Ignore floating point exception\n");
500 cs
->exception_index
= POWERPC_EXCP_NONE
;
506 * FP exceptions always have NIP pointing to the faulting
507 * instruction, so always use store_next and claim we are
508 * precise in the MSR.
511 env
->spr
[SPR_BOOKE_ESR
] = ESR_FP
;
513 case POWERPC_EXCP_INVAL
:
514 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx
"\n", env
->nip
);
516 env
->spr
[SPR_BOOKE_ESR
] = ESR_PIL
;
518 case POWERPC_EXCP_PRIV
:
520 env
->spr
[SPR_BOOKE_ESR
] = ESR_PPR
;
522 case POWERPC_EXCP_TRAP
:
524 env
->spr
[SPR_BOOKE_ESR
] = ESR_PTR
;
527 /* Should never occur */
528 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
533 case POWERPC_EXCP_SYSCALL
: /* System call exception */
534 lev
= env
->error_code
;
536 if ((lev
== 1) && cpu
->vhyp
) {
543 * We need to correct the NIP which in this case is supposed
544 * to point to the next instruction
548 /* "PAPR mode" built-in hypercall emulation */
549 if ((lev
== 1) && cpu
->vhyp
) {
550 PPCVirtualHypervisorClass
*vhc
=
551 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
552 vhc
->hypercall(cpu
->vhyp
, cpu
);
556 new_msr
|= (target_ulong
)MSR_HVB
;
559 case POWERPC_EXCP_SYSCALL_VECTORED
: /* scv exception */
560 lev
= env
->error_code
;
561 dump_syscall_vectored(env
);
563 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_EE
);
564 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
566 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
567 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
568 case POWERPC_EXCP_DECR
: /* Decrementer exception */
570 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
572 LOG_EXCP("FIT exception\n");
574 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
575 LOG_EXCP("WDT exception\n");
576 switch (excp_model
) {
577 case POWERPC_EXCP_BOOKE
:
578 srr0
= SPR_BOOKE_CSRR0
;
579 srr1
= SPR_BOOKE_CSRR1
;
585 case POWERPC_EXCP_DTLB
: /* Data TLB error */
586 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
588 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
589 if (env
->flags
& POWERPC_FLAG_DE
) {
590 /* FIXME: choose one or the other based on CPU type */
591 srr0
= SPR_BOOKE_DSRR0
;
592 srr1
= SPR_BOOKE_DSRR1
;
593 asrr0
= SPR_BOOKE_CSRR0
;
594 asrr1
= SPR_BOOKE_CSRR1
;
595 /* DBSR already modified by caller */
597 cpu_abort(cs
, "Debug exception triggered on unsupported model\n");
600 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable */
601 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
603 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
605 cpu_abort(cs
, "Embedded floating point data exception "
606 "is not implemented yet !\n");
607 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
609 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
611 cpu_abort(cs
, "Embedded floating point round exception "
612 "is not implemented yet !\n");
613 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
615 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
618 "Performance counter exception is not implemented yet !\n");
620 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
622 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
623 srr0
= SPR_BOOKE_CSRR0
;
624 srr1
= SPR_BOOKE_CSRR1
;
626 case POWERPC_EXCP_RESET
: /* System reset exception */
627 /* A power-saving exception sets ME, otherwise it is unchanged */
629 /* indicate that we resumed from power save mode */
631 new_msr
|= ((target_ulong
)1 << MSR_ME
);
633 if (env
->msr_mask
& MSR_HVB
) {
635 * ISA specifies HV, but can be delivered to guest with HV
636 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
638 new_msr
|= (target_ulong
)MSR_HVB
;
641 cpu_abort(cs
, "Trying to deliver power-saving system reset "
642 "exception %d with no HV support\n", excp
);
646 case POWERPC_EXCP_DSEG
: /* Data segment exception */
647 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
648 case POWERPC_EXCP_TRACE
: /* Trace exception */
650 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
651 msr
|= env
->error_code
;
653 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
654 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
655 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
656 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
657 case POWERPC_EXCP_SDOOR_HV
: /* Hypervisor Doorbell interrupt */
658 case POWERPC_EXCP_HV_EMU
:
659 case POWERPC_EXCP_HVIRT
: /* Hypervisor virtualization */
662 new_msr
|= (target_ulong
)MSR_HVB
;
663 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
665 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
666 case POWERPC_EXCP_VSXU
: /* VSX unavailable exception */
667 case POWERPC_EXCP_FU
: /* Facility unavailable exception */
669 env
->spr
[SPR_FSCR
] |= ((target_ulong
)env
->error_code
<< 56);
672 case POWERPC_EXCP_HV_FU
: /* Hypervisor Facility Unavailable Exception */
674 env
->spr
[SPR_HFSCR
] |= ((target_ulong
)env
->error_code
<< FSCR_IC_POS
);
677 new_msr
|= (target_ulong
)MSR_HVB
;
678 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
681 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
682 LOG_EXCP("PIT exception\n");
684 case POWERPC_EXCP_IO
: /* IO error exception */
686 cpu_abort(cs
, "601 IO error exception is not implemented yet !\n");
688 case POWERPC_EXCP_RUNM
: /* Run mode exception */
690 cpu_abort(cs
, "601 run mode exception is not implemented yet !\n");
692 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
694 cpu_abort(cs
, "602 emulation trap exception "
695 "is not implemented yet !\n");
697 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
698 switch (excp_model
) {
699 case POWERPC_EXCP_602
:
700 case POWERPC_EXCP_603
:
701 case POWERPC_EXCP_603E
:
702 case POWERPC_EXCP_G2
:
704 case POWERPC_EXCP_7x5
:
706 case POWERPC_EXCP_74xx
:
709 cpu_abort(cs
, "Invalid instruction TLB miss exception\n");
713 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
714 switch (excp_model
) {
715 case POWERPC_EXCP_602
:
716 case POWERPC_EXCP_603
:
717 case POWERPC_EXCP_603E
:
718 case POWERPC_EXCP_G2
:
720 case POWERPC_EXCP_7x5
:
722 case POWERPC_EXCP_74xx
:
725 cpu_abort(cs
, "Invalid data load TLB miss exception\n");
729 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
730 switch (excp_model
) {
731 case POWERPC_EXCP_602
:
732 case POWERPC_EXCP_603
:
733 case POWERPC_EXCP_603E
:
734 case POWERPC_EXCP_G2
:
736 /* Swap temporary saved registers with GPRs */
737 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
738 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
739 hreg_swap_gpr_tgpr(env
);
742 case POWERPC_EXCP_7x5
:
744 #if defined(DEBUG_SOFTWARE_TLB)
745 if (qemu_log_enabled()) {
747 target_ulong
*miss
, *cmp
;
750 if (excp
== POWERPC_EXCP_IFTLB
) {
753 miss
= &env
->spr
[SPR_IMISS
];
754 cmp
= &env
->spr
[SPR_ICMP
];
756 if (excp
== POWERPC_EXCP_DLTLB
) {
762 miss
= &env
->spr
[SPR_DMISS
];
763 cmp
= &env
->spr
[SPR_DCMP
];
765 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
766 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
767 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
768 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
772 msr
|= env
->crf
[0] << 28;
773 msr
|= env
->error_code
; /* key, D/I, S/L bits */
774 /* Set way using a LRU mechanism */
775 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
777 case POWERPC_EXCP_74xx
:
779 #if defined(DEBUG_SOFTWARE_TLB)
780 if (qemu_log_enabled()) {
782 target_ulong
*miss
, *cmp
;
785 if (excp
== POWERPC_EXCP_IFTLB
) {
788 miss
= &env
->spr
[SPR_TLBMISS
];
789 cmp
= &env
->spr
[SPR_PTEHI
];
791 if (excp
== POWERPC_EXCP_DLTLB
) {
797 miss
= &env
->spr
[SPR_TLBMISS
];
798 cmp
= &env
->spr
[SPR_PTEHI
];
800 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
801 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
805 msr
|= env
->error_code
; /* key bit */
808 cpu_abort(cs
, "Invalid data store TLB miss exception\n");
812 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
814 cpu_abort(cs
, "Floating point assist exception "
815 "is not implemented yet !\n");
817 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
819 cpu_abort(cs
, "DABR exception is not implemented yet !\n");
821 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
823 cpu_abort(cs
, "IABR exception is not implemented yet !\n");
825 case POWERPC_EXCP_SMI
: /* System management interrupt */
827 cpu_abort(cs
, "SMI exception is not implemented yet !\n");
829 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
831 cpu_abort(cs
, "Thermal management exception "
832 "is not implemented yet !\n");
834 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
837 "Performance counter exception is not implemented yet !\n");
839 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
841 cpu_abort(cs
, "VPU assist exception is not implemented yet !\n");
843 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
846 "970 soft-patch exception is not implemented yet !\n");
848 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
851 "970 maintenance exception is not implemented yet !\n");
853 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
855 cpu_abort(cs
, "Maskable external exception "
856 "is not implemented yet !\n");
858 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
860 cpu_abort(cs
, "Non maskable external exception "
861 "is not implemented yet !\n");
865 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
870 if (!(env
->msr_mask
& MSR_HVB
)) {
871 if (new_msr
& MSR_HVB
) {
872 cpu_abort(cs
, "Trying to deliver HV exception (MSR) %d with "
873 "no HV support\n", excp
);
875 if (srr0
== SPR_HSRR0
) {
876 cpu_abort(cs
, "Trying to deliver HV exception (HSRR) %d with "
877 "no HV support\n", excp
);
882 * Sort out endianness of interrupt, this differs depending on the
883 * CPU, the HV mode, etc...
886 if (excp_model
== POWERPC_EXCP_POWER7
) {
887 if (!(new_msr
& MSR_HVB
) && (env
->spr
[SPR_LPCR
] & LPCR_ILE
)) {
888 new_msr
|= (target_ulong
)1 << MSR_LE
;
890 } else if (excp_model
== POWERPC_EXCP_POWER8
) {
891 if (new_msr
& MSR_HVB
) {
892 if (env
->spr
[SPR_HID0
] & HID0_HILE
) {
893 new_msr
|= (target_ulong
)1 << MSR_LE
;
895 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
896 new_msr
|= (target_ulong
)1 << MSR_LE
;
898 } else if (excp_model
== POWERPC_EXCP_POWER9
||
899 excp_model
== POWERPC_EXCP_POWER10
) {
900 if (new_msr
& MSR_HVB
) {
901 if (env
->spr
[SPR_HID0
] & HID0_POWER9_HILE
) {
902 new_msr
|= (target_ulong
)1 << MSR_LE
;
904 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
905 new_msr
|= (target_ulong
)1 << MSR_LE
;
907 } else if (msr_ile
) {
908 new_msr
|= (target_ulong
)1 << MSR_LE
;
912 new_msr
|= (target_ulong
)1 << MSR_LE
;
916 vector
= env
->excp_vectors
[excp
];
917 if (vector
== (target_ulong
)-1ULL) {
918 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
922 vector
|= env
->excp_prefix
;
924 /* If any alternate SRR register are defined, duplicate saved values */
926 env
->spr
[asrr0
] = env
->nip
;
929 env
->spr
[asrr1
] = msr
;
932 #if defined(TARGET_PPC64)
933 if (excp_model
== POWERPC_EXCP_BOOKE
) {
934 if (env
->spr
[SPR_BOOKE_EPCR
] & EPCR_ICM
) {
935 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
936 new_msr
|= (target_ulong
)1 << MSR_CM
;
938 vector
= (uint32_t)vector
;
941 if (!msr_isf
&& !mmu_is_64bit(env
->mmu_model
)) {
942 vector
= (uint32_t)vector
;
944 new_msr
|= (target_ulong
)1 << MSR_SF
;
949 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
951 env
->spr
[srr0
] = env
->nip
;
954 env
->spr
[srr1
] = msr
;
956 #if defined(TARGET_PPC64)
958 vector
+= lev
* 0x20;
965 /* This can update new_msr and vector if AIL applies */
966 ppc_excp_apply_ail(cpu
, excp_model
, excp
, msr
, &new_msr
, &vector
);
968 powerpc_set_excp_state(cpu
, vector
, new_msr
);
971 void ppc_cpu_do_interrupt(CPUState
*cs
)
973 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
974 CPUPPCState
*env
= &cpu
->env
;
976 powerpc_excp(cpu
, env
->excp_model
, cs
->exception_index
);
979 static void ppc_hw_interrupt(CPUPPCState
*env
)
981 PowerPCCPU
*cpu
= env_archcpu(env
);
985 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
986 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
987 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
990 /* Machine check exception */
991 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
992 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
993 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
997 /* External debug exception */
998 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
999 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
1000 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
1006 * For interrupts that gate on MSR:EE, we need to do something a
1007 * bit more subtle, as we need to let them through even when EE is
1008 * clear when coming out of some power management states (in order
1009 * for them to become a 0x100).
1011 async_deliver
= (msr_ee
!= 0) || env
->resume_as_sreset
;
1013 /* Hypervisor decrementer exception */
1014 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
1015 /* LPCR will be clear when not supported so this will work */
1016 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
1017 if ((async_deliver
|| msr_hv
== 0) && hdice
) {
1018 /* HDEC clears on delivery */
1019 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
1020 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HDECR
);
1025 /* Hypervisor virtualization interrupt */
1026 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HVIRT
)) {
1027 /* LPCR will be clear when not supported so this will work */
1028 bool hvice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HVICE
);
1029 if ((async_deliver
|| msr_hv
== 0) && hvice
) {
1030 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HVIRT
);
1035 /* External interrupt can ignore MSR:EE under some circumstances */
1036 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
1037 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
1038 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
1039 /* HEIC blocks delivery to the hypervisor */
1040 if ((async_deliver
&& !(heic
&& msr_hv
&& !msr_pr
)) ||
1041 (env
->has_hv_mode
&& msr_hv
== 0 && !lpes0
)) {
1042 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
1047 /* External critical interrupt */
1048 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
1049 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
1053 if (async_deliver
!= 0) {
1054 /* Watchdog timer on embedded PowerPC */
1055 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
1056 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
1057 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_WDT
);
1060 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
1061 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
1062 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
1065 /* Fixed interval timer on embedded PowerPC */
1066 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
1067 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
1068 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_FIT
);
1071 /* Programmable interval timer on embedded PowerPC */
1072 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
1073 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
1074 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PIT
);
1077 /* Decrementer exception */
1078 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
1079 if (ppc_decr_clear_on_delivery(env
)) {
1080 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
1082 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DECR
);
1085 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
1086 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1087 if (is_book3s_arch2x(env
)) {
1088 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR
);
1090 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORI
);
1094 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDOORBELL
)) {
1095 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1096 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR_HV
);
1099 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
1100 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
1101 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PERFM
);
1104 /* Thermal interrupt */
1105 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
1106 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
1107 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_THERM
);
1112 if (env
->resume_as_sreset
) {
1114 * This is a bug ! It means that has_work took us out of halt without
1115 * anything to deliver while in a PM state that requires getting
1118 * This means we will incorrectly execute past the power management
1119 * instruction instead of triggering a reset.
1121 * It generally means a discrepancy between the wakeup conditions in the
1122 * processor has_work implementation and the logic in this function.
1124 cpu_abort(env_cpu(env
),
1125 "Wakeup from PM state but interrupt Undelivered");
1129 void ppc_cpu_do_system_reset(CPUState
*cs
)
1131 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1132 CPUPPCState
*env
= &cpu
->env
;
1134 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
1137 void ppc_cpu_do_fwnmi_machine_check(CPUState
*cs
, target_ulong vector
)
1139 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1140 CPUPPCState
*env
= &cpu
->env
;
1141 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
1142 target_ulong msr
= 0;
1145 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1148 msr
= (1ULL << MSR_ME
);
1149 msr
|= env
->msr
& (1ULL << MSR_SF
);
1150 if (!(*pcc
->interrupts_big_endian
)(cpu
)) {
1151 msr
|= (1ULL << MSR_LE
);
1154 powerpc_set_excp_state(cpu
, vector
, msr
);
1156 #endif /* !CONFIG_USER_ONLY */
1158 bool ppc_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1160 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1161 CPUPPCState
*env
= &cpu
->env
;
1163 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1164 ppc_hw_interrupt(env
);
1165 if (env
->pending_interrupts
== 0) {
1166 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1173 #if defined(DEBUG_OP)
1174 static void cpu_dump_rfi(target_ulong RA
, target_ulong msr
)
1176 qemu_log("Return from exception at " TARGET_FMT_lx
" with flags "
1177 TARGET_FMT_lx
"\n", RA
, msr
);
1181 /*****************************************************************************/
1182 /* Exceptions processing helpers */
1184 void raise_exception_err_ra(CPUPPCState
*env
, uint32_t exception
,
1185 uint32_t error_code
, uintptr_t raddr
)
1187 CPUState
*cs
= env_cpu(env
);
1189 cs
->exception_index
= exception
;
1190 env
->error_code
= error_code
;
1191 cpu_loop_exit_restore(cs
, raddr
);
1194 void raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1195 uint32_t error_code
)
1197 raise_exception_err_ra(env
, exception
, error_code
, 0);
1200 void raise_exception(CPUPPCState
*env
, uint32_t exception
)
1202 raise_exception_err_ra(env
, exception
, 0, 0);
1205 void raise_exception_ra(CPUPPCState
*env
, uint32_t exception
,
1208 raise_exception_err_ra(env
, exception
, 0, raddr
);
1211 void helper_raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1212 uint32_t error_code
)
1214 raise_exception_err_ra(env
, exception
, error_code
, 0);
1217 void helper_raise_exception(CPUPPCState
*env
, uint32_t exception
)
1219 raise_exception_err_ra(env
, exception
, 0, 0);
1222 #if !defined(CONFIG_USER_ONLY)
1223 void helper_store_msr(CPUPPCState
*env
, target_ulong val
)
1225 uint32_t excp
= hreg_store_msr(env
, val
, 0);
1228 CPUState
*cs
= env_cpu(env
);
1229 cpu_interrupt_exittb(cs
);
1230 raise_exception(env
, excp
);
1234 #if defined(TARGET_PPC64)
1235 void helper_scv(CPUPPCState
*env
, uint32_t lev
)
1237 if (env
->spr
[SPR_FSCR
] & (1ull << FSCR_SCV
)) {
1238 raise_exception_err(env
, POWERPC_EXCP_SYSCALL_VECTORED
, lev
);
1240 raise_exception_err(env
, POWERPC_EXCP_FU
, FSCR_IC_SCV
);
1244 void helper_pminsn(CPUPPCState
*env
, powerpc_pm_insn_t insn
)
1252 * The architecture specifies that HDEC interrupts are discarded
1255 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
1257 /* Condition for waking up at 0x100 */
1258 env
->resume_as_sreset
= (insn
!= PPC_PM_STOP
) ||
1259 (env
->spr
[SPR_PSSCR
] & PSSCR_EC
);
1261 #endif /* defined(TARGET_PPC64) */
1263 static inline void do_rfi(CPUPPCState
*env
, target_ulong nip
, target_ulong msr
)
1265 CPUState
*cs
= env_cpu(env
);
1267 /* MSR:POW cannot be set by any form of rfi */
1268 msr
&= ~(1ULL << MSR_POW
);
1270 #if defined(TARGET_PPC64)
1271 /* Switching to 32-bit ? Crop the nip */
1272 if (!msr_is_64bit(env
, msr
)) {
1273 nip
= (uint32_t)nip
;
1276 nip
= (uint32_t)nip
;
1278 /* XXX: beware: this is false if VLE is supported */
1279 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1280 hreg_store_msr(env
, msr
, 1);
1281 #if defined(DEBUG_OP)
1282 cpu_dump_rfi(env
->nip
, env
->msr
);
1285 * No need to raise an exception here, as rfi is always the last
1288 cpu_interrupt_exittb(cs
);
1289 /* Reset the reservation */
1290 env
->reserve_addr
= -1;
1292 /* Context synchronizing: check if TCG TLB needs flush */
1293 check_tlb_flush(env
, false);
1296 void helper_rfi(CPUPPCState
*env
)
1298 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
] & 0xfffffffful
);
1301 #define MSR_BOOK3S_MASK
1302 #if defined(TARGET_PPC64)
1303 void helper_rfid(CPUPPCState
*env
)
1306 * The architecture defines a number of rules for which bits can
1307 * change but in practice, we handle this in hreg_store_msr()
1308 * which will be called by do_rfi(), so there is no need to filter
1311 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
]);
1314 void helper_rfscv(CPUPPCState
*env
)
1316 do_rfi(env
, env
->lr
, env
->ctr
);
1319 void helper_hrfid(CPUPPCState
*env
)
1321 do_rfi(env
, env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
]);
1325 /*****************************************************************************/
1326 /* Embedded PowerPC specific helpers */
1327 void helper_40x_rfci(CPUPPCState
*env
)
1329 do_rfi(env
, env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
]);
1332 void helper_rfci(CPUPPCState
*env
)
1334 do_rfi(env
, env
->spr
[SPR_BOOKE_CSRR0
], env
->spr
[SPR_BOOKE_CSRR1
]);
1337 void helper_rfdi(CPUPPCState
*env
)
1339 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1340 do_rfi(env
, env
->spr
[SPR_BOOKE_DSRR0
], env
->spr
[SPR_BOOKE_DSRR1
]);
1343 void helper_rfmci(CPUPPCState
*env
)
1345 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1346 do_rfi(env
, env
->spr
[SPR_BOOKE_MCSRR0
], env
->spr
[SPR_BOOKE_MCSRR1
]);
1350 void helper_tw(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1353 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1354 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1355 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1356 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1357 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1358 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1359 POWERPC_EXCP_TRAP
, GETPC());
1363 #if defined(TARGET_PPC64)
1364 void helper_td(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1367 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1368 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1369 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1370 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1371 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01))))) {
1372 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1373 POWERPC_EXCP_TRAP
, GETPC());
1378 #if !defined(CONFIG_USER_ONLY)
1379 /*****************************************************************************/
1380 /* PowerPC 601 specific instructions (POWER bridge) */
1382 void helper_rfsvc(CPUPPCState
*env
)
1384 do_rfi(env
, env
->lr
, env
->ctr
& 0x0000FFFF);
1387 /* Embedded.Processor Control */
1388 static int dbell2irq(target_ulong rb
)
1390 int msg
= rb
& DBELL_TYPE_MASK
;
1394 case DBELL_TYPE_DBELL
:
1395 irq
= PPC_INTERRUPT_DOORBELL
;
1397 case DBELL_TYPE_DBELL_CRIT
:
1398 irq
= PPC_INTERRUPT_CDOORBELL
;
1400 case DBELL_TYPE_G_DBELL
:
1401 case DBELL_TYPE_G_DBELL_CRIT
:
1402 case DBELL_TYPE_G_DBELL_MC
:
1411 void helper_msgclr(CPUPPCState
*env
, target_ulong rb
)
1413 int irq
= dbell2irq(rb
);
1419 env
->pending_interrupts
&= ~(1 << irq
);
1422 void helper_msgsnd(target_ulong rb
)
1424 int irq
= dbell2irq(rb
);
1425 int pir
= rb
& DBELL_PIRTAG_MASK
;
1432 qemu_mutex_lock_iothread();
1434 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1435 CPUPPCState
*cenv
= &cpu
->env
;
1437 if ((rb
& DBELL_BRDCAST
) || (cenv
->spr
[SPR_BOOKE_PIR
] == pir
)) {
1438 cenv
->pending_interrupts
|= 1 << irq
;
1439 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1442 qemu_mutex_unlock_iothread();
1445 /* Server Processor Control */
1447 static bool dbell_type_server(target_ulong rb
)
1450 * A Directed Hypervisor Doorbell message is sent only if the
1451 * message type is 5. All other types are reserved and the
1452 * instruction is a no-op
1454 return (rb
& DBELL_TYPE_MASK
) == DBELL_TYPE_DBELL_SERVER
;
1457 void helper_book3s_msgclr(CPUPPCState
*env
, target_ulong rb
)
1459 if (!dbell_type_server(rb
)) {
1463 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1466 static void book3s_msgsnd_common(int pir
, int irq
)
1470 qemu_mutex_lock_iothread();
1472 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1473 CPUPPCState
*cenv
= &cpu
->env
;
1475 /* TODO: broadcast message to all threads of the same processor */
1476 if (cenv
->spr_cb
[SPR_PIR
].default_value
== pir
) {
1477 cenv
->pending_interrupts
|= 1 << irq
;
1478 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1481 qemu_mutex_unlock_iothread();
1484 void helper_book3s_msgsnd(target_ulong rb
)
1486 int pir
= rb
& DBELL_PROCIDTAG_MASK
;
1488 if (!dbell_type_server(rb
)) {
1492 book3s_msgsnd_common(pir
, PPC_INTERRUPT_HDOORBELL
);
1495 #if defined(TARGET_PPC64)
1496 void helper_book3s_msgclrp(CPUPPCState
*env
, target_ulong rb
)
1498 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgclrp", HFSCR_IC_MSGP
);
1500 if (!dbell_type_server(rb
)) {
1504 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1508 * sends a message to other threads that are on the same
1509 * multi-threaded processor
1511 void helper_book3s_msgsndp(CPUPPCState
*env
, target_ulong rb
)
1513 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
1515 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgsndp", HFSCR_IC_MSGP
);
1517 if (!dbell_type_server(rb
)) {
1521 /* TODO: TCG supports only one thread */
1523 book3s_msgsnd_common(pir
, PPC_INTERRUPT_DOORBELL
);
1528 void ppc_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
1529 MMUAccessType access_type
,
1530 int mmu_idx
, uintptr_t retaddr
)
1532 CPUPPCState
*env
= cs
->env_ptr
;
1535 /* Restore state and reload the insn we executed, for filling in DSISR. */
1536 cpu_restore_state(cs
, retaddr
, true);
1537 insn
= cpu_ldl_code(env
, env
->nip
);
1539 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1540 env
->error_code
= insn
& 0x03FF0000;