2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
25 #include "helper_regs.h"
26 #include "hw/ppc/ppc.h"
31 #include "sysemu/tcg.h"
32 #include "exec/helper-proto.h"
33 #include "exec/cpu_ldst.h"
36 /*****************************************************************************/
37 /* Exception processing */
38 #if !defined(CONFIG_USER_ONLY)
40 static const char *powerpc_excp_name(int excp
)
43 case POWERPC_EXCP_CRITICAL
: return "CRITICAL";
44 case POWERPC_EXCP_MCHECK
: return "MCHECK";
45 case POWERPC_EXCP_DSI
: return "DSI";
46 case POWERPC_EXCP_ISI
: return "ISI";
47 case POWERPC_EXCP_EXTERNAL
: return "EXTERNAL";
48 case POWERPC_EXCP_ALIGN
: return "ALIGN";
49 case POWERPC_EXCP_PROGRAM
: return "PROGRAM";
50 case POWERPC_EXCP_FPU
: return "FPU";
51 case POWERPC_EXCP_SYSCALL
: return "SYSCALL";
52 case POWERPC_EXCP_APU
: return "APU";
53 case POWERPC_EXCP_DECR
: return "DECR";
54 case POWERPC_EXCP_FIT
: return "FIT";
55 case POWERPC_EXCP_WDT
: return "WDT";
56 case POWERPC_EXCP_DTLB
: return "DTLB";
57 case POWERPC_EXCP_ITLB
: return "ITLB";
58 case POWERPC_EXCP_DEBUG
: return "DEBUG";
59 case POWERPC_EXCP_SPEU
: return "SPEU";
60 case POWERPC_EXCP_EFPDI
: return "EFPDI";
61 case POWERPC_EXCP_EFPRI
: return "EFPRI";
62 case POWERPC_EXCP_EPERFM
: return "EPERFM";
63 case POWERPC_EXCP_DOORI
: return "DOORI";
64 case POWERPC_EXCP_DOORCI
: return "DOORCI";
65 case POWERPC_EXCP_GDOORI
: return "GDOORI";
66 case POWERPC_EXCP_GDOORCI
: return "GDOORCI";
67 case POWERPC_EXCP_HYPPRIV
: return "HYPPRIV";
68 case POWERPC_EXCP_RESET
: return "RESET";
69 case POWERPC_EXCP_DSEG
: return "DSEG";
70 case POWERPC_EXCP_ISEG
: return "ISEG";
71 case POWERPC_EXCP_HDECR
: return "HDECR";
72 case POWERPC_EXCP_TRACE
: return "TRACE";
73 case POWERPC_EXCP_HDSI
: return "HDSI";
74 case POWERPC_EXCP_HISI
: return "HISI";
75 case POWERPC_EXCP_HDSEG
: return "HDSEG";
76 case POWERPC_EXCP_HISEG
: return "HISEG";
77 case POWERPC_EXCP_VPU
: return "VPU";
78 case POWERPC_EXCP_PIT
: return "PIT";
79 case POWERPC_EXCP_EMUL
: return "EMUL";
80 case POWERPC_EXCP_IFTLB
: return "IFTLB";
81 case POWERPC_EXCP_DLTLB
: return "DLTLB";
82 case POWERPC_EXCP_DSTLB
: return "DSTLB";
83 case POWERPC_EXCP_FPA
: return "FPA";
84 case POWERPC_EXCP_DABR
: return "DABR";
85 case POWERPC_EXCP_IABR
: return "IABR";
86 case POWERPC_EXCP_SMI
: return "SMI";
87 case POWERPC_EXCP_PERFM
: return "PERFM";
88 case POWERPC_EXCP_THERM
: return "THERM";
89 case POWERPC_EXCP_VPUA
: return "VPUA";
90 case POWERPC_EXCP_SOFTP
: return "SOFTP";
91 case POWERPC_EXCP_MAINT
: return "MAINT";
92 case POWERPC_EXCP_MEXTBR
: return "MEXTBR";
93 case POWERPC_EXCP_NMEXTBR
: return "NMEXTBR";
94 case POWERPC_EXCP_ITLBE
: return "ITLBE";
95 case POWERPC_EXCP_DTLBE
: return "DTLBE";
96 case POWERPC_EXCP_VSXU
: return "VSXU";
97 case POWERPC_EXCP_FU
: return "FU";
98 case POWERPC_EXCP_HV_EMU
: return "HV_EMU";
99 case POWERPC_EXCP_HV_MAINT
: return "HV_MAINT";
100 case POWERPC_EXCP_HV_FU
: return "HV_FU";
101 case POWERPC_EXCP_SDOOR
: return "SDOOR";
102 case POWERPC_EXCP_SDOOR_HV
: return "SDOOR_HV";
103 case POWERPC_EXCP_HVIRT
: return "HVIRT";
104 case POWERPC_EXCP_SYSCALL_VECTORED
: return "SYSCALL_VECTORED";
106 g_assert_not_reached();
110 static void dump_syscall(CPUPPCState
*env
)
112 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
113 " r3=%016" PRIx64
" r4=%016" PRIx64
" r5=%016" PRIx64
114 " r6=%016" PRIx64
" r7=%016" PRIx64
" r8=%016" PRIx64
115 " nip=" TARGET_FMT_lx
"\n",
116 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
117 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
118 ppc_dump_gpr(env
, 6), ppc_dump_gpr(env
, 7),
119 ppc_dump_gpr(env
, 8), env
->nip
);
122 static void dump_hcall(CPUPPCState
*env
)
124 qemu_log_mask(CPU_LOG_INT
, "hypercall r3=%016" PRIx64
125 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
126 " r7=%016" PRIx64
" r8=%016" PRIx64
" r9=%016" PRIx64
127 " r10=%016" PRIx64
" r11=%016" PRIx64
" r12=%016" PRIx64
128 " nip=" TARGET_FMT_lx
"\n",
129 ppc_dump_gpr(env
, 3), ppc_dump_gpr(env
, 4),
130 ppc_dump_gpr(env
, 5), ppc_dump_gpr(env
, 6),
131 ppc_dump_gpr(env
, 7), ppc_dump_gpr(env
, 8),
132 ppc_dump_gpr(env
, 9), ppc_dump_gpr(env
, 10),
133 ppc_dump_gpr(env
, 11), ppc_dump_gpr(env
, 12),
138 /* Return true iff byteswap is needed to load instruction */
139 static inline bool insn_need_byteswap(CPUArchState
*env
)
141 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
142 return !!(env
->msr
& ((target_ulong
)1 << MSR_LE
));
145 static uint32_t ppc_ldl_code(CPUArchState
*env
, abi_ptr addr
)
147 uint32_t insn
= cpu_ldl_code(env
, addr
);
149 if (insn_need_byteswap(env
)) {
150 insn
= bswap32(insn
);
157 static void ppc_excp_debug_sw_tlb(CPUPPCState
*env
, int excp
)
160 target_ulong
*miss
, *cmp
;
163 if (!qemu_loglevel_mask(CPU_LOG_MMU
)) {
167 if (excp
== POWERPC_EXCP_IFTLB
) {
170 miss
= &env
->spr
[SPR_IMISS
];
171 cmp
= &env
->spr
[SPR_ICMP
];
173 if (excp
== POWERPC_EXCP_DLTLB
) {
179 miss
= &env
->spr
[SPR_DMISS
];
180 cmp
= &env
->spr
[SPR_DCMP
];
182 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
183 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
184 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
185 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
189 #if defined(TARGET_PPC64)
190 static int powerpc_reset_wakeup(CPUState
*cs
, CPUPPCState
*env
, int excp
,
193 /* We no longer are in a PM state */
194 env
->resume_as_sreset
= false;
196 /* Pretend to be returning from doze always as we don't lose state */
197 *msr
|= SRR1_WS_NOLOSS
;
199 /* Machine checks are sent normally */
200 if (excp
== POWERPC_EXCP_MCHECK
) {
204 case POWERPC_EXCP_RESET
:
205 *msr
|= SRR1_WAKERESET
;
207 case POWERPC_EXCP_EXTERNAL
:
210 case POWERPC_EXCP_DECR
:
211 *msr
|= SRR1_WAKEDEC
;
213 case POWERPC_EXCP_SDOOR
:
214 *msr
|= SRR1_WAKEDBELL
;
216 case POWERPC_EXCP_SDOOR_HV
:
217 *msr
|= SRR1_WAKEHDBELL
;
219 case POWERPC_EXCP_HV_MAINT
:
220 *msr
|= SRR1_WAKEHMI
;
222 case POWERPC_EXCP_HVIRT
:
223 *msr
|= SRR1_WAKEHVI
;
226 cpu_abort(cs
, "Unsupported exception %d in Power Save mode\n",
229 return POWERPC_EXCP_RESET
;
233 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
234 * taken with the MMU on, and which uses an alternate location (e.g., so the
235 * kernel/hv can map the vectors there with an effective address).
237 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
238 * are delivered in this way. AIL requires the LPCR to be set to enable this
239 * mode, and then a number of conditions have to be true for AIL to apply.
241 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
242 * they specifically want to be in real mode (e.g., the MCE might be signaling
243 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
245 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
246 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
247 * radix mode (LPCR[HR]).
249 * POWER8, POWER9 with LPCR[HR]=0
250 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
251 * +-----------+-------------+---------+-------------+-----+
252 * | a | 00/01/10 | x | x | 0 |
253 * | a | 11 | 0 | 1 | 0 |
254 * | a | 11 | 1 | 1 | a |
255 * | a | 11 | 0 | 0 | a |
256 * +-------------------------------------------------------+
258 * POWER9 with LPCR[HR]=1
259 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
260 * +-----------+-------------+---------+-------------+-----+
261 * | a | 00/01/10 | x | x | 0 |
262 * | a | 11 | x | x | a |
263 * +-------------------------------------------------------+
265 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
266 * the hypervisor in AIL mode if the guest is radix. This is good for
267 * performance but allows the guest to influence the AIL of hypervisor
268 * interrupts using its MSR, and also the hypervisor must disallow guest
269 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
270 * use AIL for its MSR[HV] 0->1 interrupts.
272 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
273 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
276 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
278 * POWER10 behaviour is
279 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
280 * +-----------+------------+-------------+---------+-------------+-----+
281 * | a | h | 00/01/10 | 0 | 0 | 0 |
282 * | a | h | 11 | 0 | 0 | a |
283 * | a | h | x | 0 | 1 | h |
284 * | a | h | 00/01/10 | 1 | 1 | 0 |
285 * | a | h | 11 | 1 | 1 | h |
286 * +--------------------------------------------------------------------+
288 static void ppc_excp_apply_ail(PowerPCCPU
*cpu
, int excp
, target_ulong msr
,
289 target_ulong
*new_msr
, target_ulong
*vector
)
291 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
292 CPUPPCState
*env
= &cpu
->env
;
293 bool mmu_all_on
= ((msr
>> MSR_IR
) & 1) && ((msr
>> MSR_DR
) & 1);
294 bool hv_escalation
= !(msr
& MSR_HVB
) && (*new_msr
& MSR_HVB
);
297 if (excp
== POWERPC_EXCP_MCHECK
||
298 excp
== POWERPC_EXCP_RESET
||
299 excp
== POWERPC_EXCP_HV_MAINT
) {
300 /* SRESET, MCE, HMI never apply AIL */
304 if (!(pcc
->lpcr_mask
& LPCR_AIL
)) {
305 /* This CPU does not have AIL */
310 if (!(pcc
->lpcr_mask
& LPCR_HAIL
)) {
312 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
315 if (hv_escalation
&& !(env
->spr
[SPR_LPCR
] & LPCR_HR
)) {
317 * AIL does not work if there is a MSR[HV] 0->1 transition and the
318 * partition is in HPT mode. For radix guests, such interrupts are
319 * allowed to be delivered to the hypervisor in ail mode.
324 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
329 /* AIL=1 is reserved, treat it like AIL=0 */
335 if (!mmu_all_on
&& !hv_escalation
) {
337 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
338 * Guest->guest and HV->HV interrupts do require MMU on.
343 if (*new_msr
& MSR_HVB
) {
344 if (!(env
->spr
[SPR_LPCR
] & LPCR_HAIL
)) {
345 /* HV interrupts depend on LPCR[HAIL] */
348 ail
= 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
350 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
355 if (ail
== 1 || ail
== 2) {
356 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
362 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
365 *new_msr
|= (1 << MSR_IR
) | (1 << MSR_DR
);
367 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
369 *vector
|= 0x0000000000018000ull
;
370 } else if (ail
== 3) {
371 *vector
|= 0xc000000000004000ull
;
375 * scv AIL is a little different. AIL=2 does not change the address,
376 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
379 *vector
&= ~0x0000000000017000ull
; /* Un-apply the base offset */
380 *vector
|= 0xc000000000003000ull
; /* Apply scv's AIL=3 offset */
386 static void powerpc_reset_excp_state(PowerPCCPU
*cpu
)
388 CPUState
*cs
= CPU(cpu
);
389 CPUPPCState
*env
= &cpu
->env
;
391 /* Reset exception state */
392 cs
->exception_index
= POWERPC_EXCP_NONE
;
396 static void powerpc_set_excp_state(PowerPCCPU
*cpu
, target_ulong vector
,
399 CPUPPCState
*env
= &cpu
->env
;
401 assert((msr
& env
->msr_mask
) == msr
);
404 * We don't use hreg_store_msr here as already have treated any
405 * special case that could occur. Just store MSR and update hflags
407 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
408 * will prevent setting of the HV bit which some exceptions might need
413 hreg_compute_hflags(env
);
414 ppc_maybe_interrupt(env
);
416 powerpc_reset_excp_state(cpu
);
419 * Any interrupt is context synchronizing, check if TCG TLB needs
420 * a delayed flush on ppc64
422 check_tlb_flush(env
, false);
424 /* Reset the reservation */
425 env
->reserve_addr
= -1;
428 static void powerpc_excp_40x(PowerPCCPU
*cpu
, int excp
)
430 CPUState
*cs
= CPU(cpu
);
431 CPUPPCState
*env
= &cpu
->env
;
432 target_ulong msr
, new_msr
, vector
;
435 /* new srr1 value excluding must-be-zero bits */
436 msr
= env
->msr
& ~0x783f0000ULL
;
439 * new interrupt handler msr preserves existing ME unless
440 * explicitly overriden.
442 new_msr
= env
->msr
& (((target_ulong
)1 << MSR_ME
));
444 /* target registers */
449 * Hypervisor emulation assistance interrupt only exists on server
450 * arch 2.05 server or later.
452 if (excp
== POWERPC_EXCP_HV_EMU
) {
453 excp
= POWERPC_EXCP_PROGRAM
;
456 vector
= env
->excp_vectors
[excp
];
457 if (vector
== (target_ulong
)-1ULL) {
458 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
462 vector
|= env
->excp_prefix
;
465 case POWERPC_EXCP_CRITICAL
: /* Critical input */
469 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
470 if (!FIELD_EX64(env
->msr
, MSR
, ME
)) {
472 * Machine check exception is not enabled. Enter
475 fprintf(stderr
, "Machine check while not allowed. "
476 "Entering checkstop state\n");
477 if (qemu_log_separate()) {
478 qemu_log("Machine check while not allowed. "
479 "Entering checkstop state\n");
482 cpu_interrupt_exittb(cs
);
485 /* machine check exceptions don't have ME set */
486 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
491 case POWERPC_EXCP_DSI
: /* Data storage exception */
492 trace_ppc_excp_dsi(env
->spr
[SPR_40x_ESR
], env
->spr
[SPR_40x_DEAR
]);
494 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
495 trace_ppc_excp_isi(msr
, env
->nip
);
497 case POWERPC_EXCP_EXTERNAL
: /* External input */
499 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
501 case POWERPC_EXCP_PROGRAM
: /* Program exception */
502 switch (env
->error_code
& ~0xF) {
503 case POWERPC_EXCP_FP
:
504 if (!FIELD_EX64_FE(env
->msr
) || !FIELD_EX64(env
->msr
, MSR
, FP
)) {
505 trace_ppc_excp_fp_ignore();
506 powerpc_reset_excp_state(cpu
);
509 env
->spr
[SPR_40x_ESR
] = ESR_FP
;
511 case POWERPC_EXCP_INVAL
:
512 trace_ppc_excp_inval(env
->nip
);
513 env
->spr
[SPR_40x_ESR
] = ESR_PIL
;
515 case POWERPC_EXCP_PRIV
:
516 env
->spr
[SPR_40x_ESR
] = ESR_PPR
;
518 case POWERPC_EXCP_TRAP
:
519 env
->spr
[SPR_40x_ESR
] = ESR_PTR
;
522 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
527 case POWERPC_EXCP_SYSCALL
: /* System call exception */
531 * We need to correct the NIP which in this case is supposed
532 * to point to the next instruction
536 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
537 trace_ppc_excp_print("FIT");
539 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
540 trace_ppc_excp_print("WDT");
542 case POWERPC_EXCP_DTLB
: /* Data TLB error */
543 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
545 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
546 trace_ppc_excp_print("PIT");
548 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
549 cpu_abort(cs
, "%s exception not implemented\n",
550 powerpc_excp_name(excp
));
553 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
558 env
->spr
[srr0
] = env
->nip
;
561 env
->spr
[srr1
] = msr
;
563 powerpc_set_excp_state(cpu
, vector
, new_msr
);
566 static void powerpc_excp_6xx(PowerPCCPU
*cpu
, int excp
)
568 CPUState
*cs
= CPU(cpu
);
569 CPUPPCState
*env
= &cpu
->env
;
570 target_ulong msr
, new_msr
, vector
;
572 /* new srr1 value excluding must-be-zero bits */
573 msr
= env
->msr
& ~0x783f0000ULL
;
576 * new interrupt handler msr preserves existing ME unless
577 * explicitly overriden
579 new_msr
= env
->msr
& ((target_ulong
)1 << MSR_ME
);
582 * Hypervisor emulation assistance interrupt only exists on server
583 * arch 2.05 server or later.
585 if (excp
== POWERPC_EXCP_HV_EMU
) {
586 excp
= POWERPC_EXCP_PROGRAM
;
589 vector
= env
->excp_vectors
[excp
];
590 if (vector
== (target_ulong
)-1ULL) {
591 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
595 vector
|= env
->excp_prefix
;
598 case POWERPC_EXCP_CRITICAL
: /* Critical input */
600 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
601 if (!FIELD_EX64(env
->msr
, MSR
, ME
)) {
603 * Machine check exception is not enabled. Enter
606 fprintf(stderr
, "Machine check while not allowed. "
607 "Entering checkstop state\n");
608 if (qemu_log_separate()) {
609 qemu_log("Machine check while not allowed. "
610 "Entering checkstop state\n");
613 cpu_interrupt_exittb(cs
);
616 /* machine check exceptions don't have ME set */
617 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
620 case POWERPC_EXCP_DSI
: /* Data storage exception */
621 trace_ppc_excp_dsi(env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
623 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
624 trace_ppc_excp_isi(msr
, env
->nip
);
625 msr
|= env
->error_code
;
627 case POWERPC_EXCP_EXTERNAL
: /* External input */
629 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
630 /* Get rS/rD and rA from faulting opcode */
632 * Note: the opcode fields will not be set properly for a
633 * direct store load/store, but nobody cares as nobody
634 * actually uses direct store segments.
636 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
638 case POWERPC_EXCP_PROGRAM
: /* Program exception */
639 switch (env
->error_code
& ~0xF) {
640 case POWERPC_EXCP_FP
:
641 if (!FIELD_EX64_FE(env
->msr
) || !FIELD_EX64(env
->msr
, MSR
, FP
)) {
642 trace_ppc_excp_fp_ignore();
643 powerpc_reset_excp_state(cpu
);
648 * FP exceptions always have NIP pointing to the faulting
649 * instruction, so always use store_next and claim we are
650 * precise in the MSR.
654 case POWERPC_EXCP_INVAL
:
655 trace_ppc_excp_inval(env
->nip
);
658 case POWERPC_EXCP_PRIV
:
661 case POWERPC_EXCP_TRAP
:
665 /* Should never occur */
666 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
671 case POWERPC_EXCP_SYSCALL
: /* System call exception */
675 * We need to correct the NIP which in this case is supposed
676 * to point to the next instruction
680 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
681 case POWERPC_EXCP_DECR
: /* Decrementer exception */
683 case POWERPC_EXCP_DTLB
: /* Data TLB error */
684 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
686 case POWERPC_EXCP_RESET
: /* System reset exception */
687 if (FIELD_EX64(env
->msr
, MSR
, POW
)) {
688 cpu_abort(cs
, "Trying to deliver power-saving system reset "
689 "exception %d with no HV support\n", excp
);
692 case POWERPC_EXCP_TRACE
: /* Trace exception */
694 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
695 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
696 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
697 /* Swap temporary saved registers with GPRs */
698 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
699 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
700 hreg_swap_gpr_tgpr(env
);
703 ppc_excp_debug_sw_tlb(env
, excp
);
705 msr
|= env
->crf
[0] << 28;
706 msr
|= env
->error_code
; /* key, D/I, S/L bits */
707 /* Set way using a LRU mechanism */
708 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
710 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
711 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
712 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
713 case POWERPC_EXCP_SMI
: /* System management interrupt */
714 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
715 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
716 cpu_abort(cs
, "%s exception not implemented\n",
717 powerpc_excp_name(excp
));
720 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
725 * Sort out endianness of interrupt, this differs depending on the
726 * CPU, the HV mode, etc...
728 if (ppc_interrupts_little_endian(cpu
, !!(new_msr
& MSR_HVB
))) {
729 new_msr
|= (target_ulong
)1 << MSR_LE
;
733 env
->spr
[SPR_SRR0
] = env
->nip
;
736 env
->spr
[SPR_SRR1
] = msr
;
738 powerpc_set_excp_state(cpu
, vector
, new_msr
);
741 static void powerpc_excp_7xx(PowerPCCPU
*cpu
, int excp
)
743 CPUState
*cs
= CPU(cpu
);
744 CPUPPCState
*env
= &cpu
->env
;
745 target_ulong msr
, new_msr
, vector
;
747 /* new srr1 value excluding must-be-zero bits */
748 msr
= env
->msr
& ~0x783f0000ULL
;
751 * new interrupt handler msr preserves existing ME unless
752 * explicitly overriden
754 new_msr
= env
->msr
& ((target_ulong
)1 << MSR_ME
);
757 * Hypervisor emulation assistance interrupt only exists on server
758 * arch 2.05 server or later.
760 if (excp
== POWERPC_EXCP_HV_EMU
) {
761 excp
= POWERPC_EXCP_PROGRAM
;
764 vector
= env
->excp_vectors
[excp
];
765 if (vector
== (target_ulong
)-1ULL) {
766 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
770 vector
|= env
->excp_prefix
;
773 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
774 if (!FIELD_EX64(env
->msr
, MSR
, ME
)) {
776 * Machine check exception is not enabled. Enter
779 fprintf(stderr
, "Machine check while not allowed. "
780 "Entering checkstop state\n");
781 if (qemu_log_separate()) {
782 qemu_log("Machine check while not allowed. "
783 "Entering checkstop state\n");
786 cpu_interrupt_exittb(cs
);
789 /* machine check exceptions don't have ME set */
790 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
793 case POWERPC_EXCP_DSI
: /* Data storage exception */
794 trace_ppc_excp_dsi(env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
796 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
797 trace_ppc_excp_isi(msr
, env
->nip
);
798 msr
|= env
->error_code
;
800 case POWERPC_EXCP_EXTERNAL
: /* External input */
802 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
803 /* Get rS/rD and rA from faulting opcode */
805 * Note: the opcode fields will not be set properly for a
806 * direct store load/store, but nobody cares as nobody
807 * actually uses direct store segments.
809 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
811 case POWERPC_EXCP_PROGRAM
: /* Program exception */
812 switch (env
->error_code
& ~0xF) {
813 case POWERPC_EXCP_FP
:
814 if (!FIELD_EX64_FE(env
->msr
) || !FIELD_EX64(env
->msr
, MSR
, FP
)) {
815 trace_ppc_excp_fp_ignore();
816 powerpc_reset_excp_state(cpu
);
821 * FP exceptions always have NIP pointing to the faulting
822 * instruction, so always use store_next and claim we are
823 * precise in the MSR.
827 case POWERPC_EXCP_INVAL
:
828 trace_ppc_excp_inval(env
->nip
);
831 case POWERPC_EXCP_PRIV
:
834 case POWERPC_EXCP_TRAP
:
838 /* Should never occur */
839 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
844 case POWERPC_EXCP_SYSCALL
: /* System call exception */
846 int lev
= env
->error_code
;
848 if (lev
== 1 && cpu
->vhyp
) {
855 * We need to correct the NIP which in this case is supposed
856 * to point to the next instruction
861 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
862 * instruction to communicate with QEMU. The pegasos2 machine
863 * uses VOF and the 7xx CPUs, so although the 7xx don't have
864 * HV mode, we need to keep hypercall support.
866 if (lev
== 1 && cpu
->vhyp
) {
867 PPCVirtualHypervisorClass
*vhc
=
868 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
869 vhc
->hypercall(cpu
->vhyp
, cpu
);
875 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
876 case POWERPC_EXCP_DECR
: /* Decrementer exception */
878 case POWERPC_EXCP_RESET
: /* System reset exception */
879 if (FIELD_EX64(env
->msr
, MSR
, POW
)) {
880 cpu_abort(cs
, "Trying to deliver power-saving system reset "
881 "exception %d with no HV support\n", excp
);
884 case POWERPC_EXCP_TRACE
: /* Trace exception */
886 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
887 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
888 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
889 ppc_excp_debug_sw_tlb(env
, excp
);
891 msr
|= env
->crf
[0] << 28;
892 msr
|= env
->error_code
; /* key, D/I, S/L bits */
893 /* Set way using a LRU mechanism */
894 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
897 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
898 case POWERPC_EXCP_SMI
: /* System management interrupt */
899 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
900 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
901 cpu_abort(cs
, "%s exception not implemented\n",
902 powerpc_excp_name(excp
));
905 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
910 * Sort out endianness of interrupt, this differs depending on the
911 * CPU, the HV mode, etc...
913 if (ppc_interrupts_little_endian(cpu
, !!(new_msr
& MSR_HVB
))) {
914 new_msr
|= (target_ulong
)1 << MSR_LE
;
918 env
->spr
[SPR_SRR0
] = env
->nip
;
921 env
->spr
[SPR_SRR1
] = msr
;
923 powerpc_set_excp_state(cpu
, vector
, new_msr
);
926 static void powerpc_excp_74xx(PowerPCCPU
*cpu
, int excp
)
928 CPUState
*cs
= CPU(cpu
);
929 CPUPPCState
*env
= &cpu
->env
;
930 target_ulong msr
, new_msr
, vector
;
932 /* new srr1 value excluding must-be-zero bits */
933 msr
= env
->msr
& ~0x783f0000ULL
;
936 * new interrupt handler msr preserves existing ME unless
937 * explicitly overriden
939 new_msr
= env
->msr
& ((target_ulong
)1 << MSR_ME
);
942 * Hypervisor emulation assistance interrupt only exists on server
943 * arch 2.05 server or later.
945 if (excp
== POWERPC_EXCP_HV_EMU
) {
946 excp
= POWERPC_EXCP_PROGRAM
;
949 vector
= env
->excp_vectors
[excp
];
950 if (vector
== (target_ulong
)-1ULL) {
951 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
955 vector
|= env
->excp_prefix
;
958 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
959 if (!FIELD_EX64(env
->msr
, MSR
, ME
)) {
961 * Machine check exception is not enabled. Enter
964 fprintf(stderr
, "Machine check while not allowed. "
965 "Entering checkstop state\n");
966 if (qemu_log_separate()) {
967 qemu_log("Machine check while not allowed. "
968 "Entering checkstop state\n");
971 cpu_interrupt_exittb(cs
);
974 /* machine check exceptions don't have ME set */
975 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
978 case POWERPC_EXCP_DSI
: /* Data storage exception */
979 trace_ppc_excp_dsi(env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
981 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
982 trace_ppc_excp_isi(msr
, env
->nip
);
983 msr
|= env
->error_code
;
985 case POWERPC_EXCP_EXTERNAL
: /* External input */
987 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
988 /* Get rS/rD and rA from faulting opcode */
990 * Note: the opcode fields will not be set properly for a
991 * direct store load/store, but nobody cares as nobody
992 * actually uses direct store segments.
994 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
996 case POWERPC_EXCP_PROGRAM
: /* Program exception */
997 switch (env
->error_code
& ~0xF) {
998 case POWERPC_EXCP_FP
:
999 if (!FIELD_EX64_FE(env
->msr
) || !FIELD_EX64(env
->msr
, MSR
, FP
)) {
1000 trace_ppc_excp_fp_ignore();
1001 powerpc_reset_excp_state(cpu
);
1006 * FP exceptions always have NIP pointing to the faulting
1007 * instruction, so always use store_next and claim we are
1008 * precise in the MSR.
1012 case POWERPC_EXCP_INVAL
:
1013 trace_ppc_excp_inval(env
->nip
);
1016 case POWERPC_EXCP_PRIV
:
1019 case POWERPC_EXCP_TRAP
:
1023 /* Should never occur */
1024 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
1029 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1031 int lev
= env
->error_code
;
1033 if (lev
== 1 && cpu
->vhyp
) {
1040 * We need to correct the NIP which in this case is supposed
1041 * to point to the next instruction
1046 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
1047 * instruction to communicate with QEMU. The pegasos2 machine
1048 * uses VOF and the 74xx CPUs, so although the 74xx don't have
1049 * HV mode, we need to keep hypercall support.
1051 if (lev
== 1 && cpu
->vhyp
) {
1052 PPCVirtualHypervisorClass
*vhc
=
1053 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
1054 vhc
->hypercall(cpu
->vhyp
, cpu
);
1060 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1061 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1063 case POWERPC_EXCP_RESET
: /* System reset exception */
1064 if (FIELD_EX64(env
->msr
, MSR
, POW
)) {
1065 cpu_abort(cs
, "Trying to deliver power-saving system reset "
1066 "exception %d with no HV support\n", excp
);
1069 case POWERPC_EXCP_TRACE
: /* Trace exception */
1071 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1073 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
1074 case POWERPC_EXCP_SMI
: /* System management interrupt */
1075 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1076 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
1077 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1078 cpu_abort(cs
, "%s exception not implemented\n",
1079 powerpc_excp_name(excp
));
1082 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
1087 * Sort out endianness of interrupt, this differs depending on the
1088 * CPU, the HV mode, etc...
1090 if (ppc_interrupts_little_endian(cpu
, !!(new_msr
& MSR_HVB
))) {
1091 new_msr
|= (target_ulong
)1 << MSR_LE
;
1095 env
->spr
[SPR_SRR0
] = env
->nip
;
1098 env
->spr
[SPR_SRR1
] = msr
;
1100 powerpc_set_excp_state(cpu
, vector
, new_msr
);
1103 static void powerpc_excp_booke(PowerPCCPU
*cpu
, int excp
)
1105 CPUState
*cs
= CPU(cpu
);
1106 CPUPPCState
*env
= &cpu
->env
;
1107 target_ulong msr
, new_msr
, vector
;
1113 * new interrupt handler msr preserves existing ME unless
1114 * explicitly overriden
1116 new_msr
= env
->msr
& ((target_ulong
)1 << MSR_ME
);
1118 /* target registers */
1123 * Hypervisor emulation assistance interrupt only exists on server
1124 * arch 2.05 server or later.
1126 if (excp
== POWERPC_EXCP_HV_EMU
) {
1127 excp
= POWERPC_EXCP_PROGRAM
;
1132 * SPEU and VPU share the same IVOR but they exist in different
1133 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
1135 if (excp
== POWERPC_EXCP_VPU
) {
1136 excp
= POWERPC_EXCP_SPEU
;
1140 vector
= env
->excp_vectors
[excp
];
1141 if (vector
== (target_ulong
)-1ULL) {
1142 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
1146 vector
|= env
->excp_prefix
;
1149 case POWERPC_EXCP_CRITICAL
: /* Critical input */
1150 srr0
= SPR_BOOKE_CSRR0
;
1151 srr1
= SPR_BOOKE_CSRR1
;
1153 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1154 if (!FIELD_EX64(env
->msr
, MSR
, ME
)) {
1156 * Machine check exception is not enabled. Enter
1159 fprintf(stderr
, "Machine check while not allowed. "
1160 "Entering checkstop state\n");
1161 if (qemu_log_separate()) {
1162 qemu_log("Machine check while not allowed. "
1163 "Entering checkstop state\n");
1166 cpu_interrupt_exittb(cs
);
1169 /* machine check exceptions don't have ME set */
1170 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
1172 /* FIXME: choose one or the other based on CPU type */
1173 srr0
= SPR_BOOKE_MCSRR0
;
1174 srr1
= SPR_BOOKE_MCSRR1
;
1176 env
->spr
[SPR_BOOKE_CSRR0
] = env
->nip
;
1177 env
->spr
[SPR_BOOKE_CSRR1
] = msr
;
1180 case POWERPC_EXCP_DSI
: /* Data storage exception */
1181 trace_ppc_excp_dsi(env
->spr
[SPR_BOOKE_ESR
], env
->spr
[SPR_BOOKE_DEAR
]);
1183 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1184 trace_ppc_excp_isi(msr
, env
->nip
);
1186 case POWERPC_EXCP_EXTERNAL
: /* External input */
1187 if (env
->mpic_proxy
) {
1188 /* IACK the IRQ on delivery */
1189 env
->spr
[SPR_BOOKE_EPR
] = ldl_phys(cs
->as
, env
->mpic_iack
);
1192 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1194 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1195 switch (env
->error_code
& ~0xF) {
1196 case POWERPC_EXCP_FP
:
1197 if (!FIELD_EX64_FE(env
->msr
) || !FIELD_EX64(env
->msr
, MSR
, FP
)) {
1198 trace_ppc_excp_fp_ignore();
1199 powerpc_reset_excp_state(cpu
);
1204 * FP exceptions always have NIP pointing to the faulting
1205 * instruction, so always use store_next and claim we are
1206 * precise in the MSR.
1209 env
->spr
[SPR_BOOKE_ESR
] = ESR_FP
;
1211 case POWERPC_EXCP_INVAL
:
1212 trace_ppc_excp_inval(env
->nip
);
1214 env
->spr
[SPR_BOOKE_ESR
] = ESR_PIL
;
1216 case POWERPC_EXCP_PRIV
:
1218 env
->spr
[SPR_BOOKE_ESR
] = ESR_PPR
;
1220 case POWERPC_EXCP_TRAP
:
1222 env
->spr
[SPR_BOOKE_ESR
] = ESR_PTR
;
1225 /* Should never occur */
1226 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
1231 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1235 * We need to correct the NIP which in this case is supposed
1236 * to point to the next instruction
1240 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1241 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
1242 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1244 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
1246 trace_ppc_excp_print("FIT");
1248 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
1249 trace_ppc_excp_print("WDT");
1250 srr0
= SPR_BOOKE_CSRR0
;
1251 srr1
= SPR_BOOKE_CSRR1
;
1253 case POWERPC_EXCP_DTLB
: /* Data TLB error */
1254 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
1256 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
1257 if (env
->flags
& POWERPC_FLAG_DE
) {
1258 /* FIXME: choose one or the other based on CPU type */
1259 srr0
= SPR_BOOKE_DSRR0
;
1260 srr1
= SPR_BOOKE_DSRR1
;
1262 env
->spr
[SPR_BOOKE_CSRR0
] = env
->nip
;
1263 env
->spr
[SPR_BOOKE_CSRR1
] = msr
;
1265 /* DBSR already modified by caller */
1267 cpu_abort(cs
, "Debug exception triggered on unsupported model\n");
1270 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable/VPU */
1271 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
1273 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
1275 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
1276 srr0
= SPR_BOOKE_CSRR0
;
1277 srr1
= SPR_BOOKE_CSRR1
;
1279 case POWERPC_EXCP_RESET
: /* System reset exception */
1280 if (FIELD_EX64(env
->msr
, MSR
, POW
)) {
1281 cpu_abort(cs
, "Trying to deliver power-saving system reset "
1282 "exception %d with no HV support\n", excp
);
1285 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
1286 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
1287 cpu_abort(cs
, "%s exception not implemented\n",
1288 powerpc_excp_name(excp
));
1291 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
1295 #if defined(TARGET_PPC64)
1296 if (env
->spr
[SPR_BOOKE_EPCR
] & EPCR_ICM
) {
1297 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
1298 new_msr
|= (target_ulong
)1 << MSR_CM
;
1300 vector
= (uint32_t)vector
;
1305 env
->spr
[srr0
] = env
->nip
;
1308 env
->spr
[srr1
] = msr
;
1310 powerpc_set_excp_state(cpu
, vector
, new_msr
);
1314 * When running a nested HV guest under vhyp, external interrupts are
1315 * delivered as HVIRT.
1317 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU
*cpu
)
1320 return vhyp_cpu_in_nested(cpu
);
1327 * When running under vhyp, hcalls are always intercepted and sent to the
1328 * vhc->hypercall handler.
1330 static bool books_vhyp_handles_hcall(PowerPCCPU
*cpu
)
1333 return !vhyp_cpu_in_nested(cpu
);
1339 * When running a nested KVM HV guest under vhyp, HV exceptions are not
1340 * delivered to the guest (because there is no concept of HV support), but
1341 * rather they are sent tothe vhyp to exit from the L2 back to the L1 and
1342 * return from the H_ENTER_NESTED hypercall.
1344 static bool books_vhyp_handles_hv_excp(PowerPCCPU
*cpu
)
1347 return vhyp_cpu_in_nested(cpu
);
1353 static bool is_prefix_insn(CPUPPCState
*env
, uint32_t insn
)
1355 if (!(env
->insns_flags2
& PPC2_ISA310
)) {
1358 return ((insn
& 0xfc000000) == 0x04000000);
1361 static bool is_prefix_insn_excp(PowerPCCPU
*cpu
, int excp
)
1363 CPUPPCState
*env
= &cpu
->env
;
1365 if (!tcg_enabled()) {
1367 * This does not load instructions and set the prefix bit correctly
1368 * for injected interrupts with KVM. That may have to be discovered
1369 * and set by the KVM layer before injecting.
1375 case POWERPC_EXCP_HDSI
:
1376 /* HDSI PRTABLE_FAULT has the originating access type in error_code */
1377 if ((env
->spr
[SPR_HDSISR
] & DSISR_PRTABLE_FAULT
) &&
1378 (env
->error_code
== MMU_INST_FETCH
)) {
1380 * Fetch failed due to partition scope translation, so prefix
1381 * indication is not relevant (and attempting to load the
1382 * instruction at NIP would cause recursive faults with the same
1388 case POWERPC_EXCP_MCHECK
:
1389 case POWERPC_EXCP_DSI
:
1390 case POWERPC_EXCP_DSEG
:
1391 case POWERPC_EXCP_ALIGN
:
1392 case POWERPC_EXCP_PROGRAM
:
1393 case POWERPC_EXCP_FPU
:
1394 case POWERPC_EXCP_TRACE
:
1395 case POWERPC_EXCP_HV_EMU
:
1396 case POWERPC_EXCP_VPU
:
1397 case POWERPC_EXCP_VSXU
:
1398 case POWERPC_EXCP_FU
:
1399 case POWERPC_EXCP_HV_FU
: {
1400 uint32_t insn
= ppc_ldl_code(env
, env
->nip
);
1401 if (is_prefix_insn(env
, insn
)) {
1412 static bool is_prefix_insn_excp(PowerPCCPU
*cpu
, int excp
)
1418 static void powerpc_excp_books(PowerPCCPU
*cpu
, int excp
)
1420 CPUState
*cs
= CPU(cpu
);
1421 CPUPPCState
*env
= &cpu
->env
;
1422 target_ulong msr
, new_msr
, vector
;
1423 int srr0
, srr1
, lev
= -1;
1425 /* new srr1 value excluding must-be-zero bits */
1426 msr
= env
->msr
& ~0x783f0000ULL
;
1429 * new interrupt handler msr preserves existing HV and ME unless
1430 * explicitly overriden
1432 new_msr
= env
->msr
& (((target_ulong
)1 << MSR_ME
) | MSR_HVB
);
1434 /* target registers */
1439 * check for special resume at 0x100 from doze/nap/sleep/winkle on
1442 if (env
->resume_as_sreset
) {
1443 excp
= powerpc_reset_wakeup(cs
, env
, excp
, &msr
);
1447 * We don't want to generate a Hypervisor Emulation Assistance
1448 * Interrupt if we don't have HVB in msr_mask (PAPR mode),
1449 * unless running a nested-hv guest, in which case the L1
1450 * kernel wants the interrupt.
1452 if (excp
== POWERPC_EXCP_HV_EMU
&& !(env
->msr_mask
& MSR_HVB
) &&
1453 !books_vhyp_handles_hv_excp(cpu
)) {
1454 excp
= POWERPC_EXCP_PROGRAM
;
1457 vector
= env
->excp_vectors
[excp
];
1458 if (vector
== (target_ulong
)-1ULL) {
1459 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
1463 vector
|= env
->excp_prefix
;
1465 if (is_prefix_insn_excp(cpu
, excp
)) {
1470 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
1471 if (!FIELD_EX64(env
->msr
, MSR
, ME
)) {
1473 * Machine check exception is not enabled. Enter
1476 fprintf(stderr
, "Machine check while not allowed. "
1477 "Entering checkstop state\n");
1478 if (qemu_log_separate()) {
1479 qemu_log("Machine check while not allowed. "
1480 "Entering checkstop state\n");
1483 cpu_interrupt_exittb(cs
);
1485 if (env
->msr_mask
& MSR_HVB
) {
1487 * ISA specifies HV, but can be delivered to guest with HV
1488 * clear (e.g., see FWNMI in PAPR).
1490 new_msr
|= (target_ulong
)MSR_HVB
;
1493 /* machine check exceptions don't have ME set */
1494 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
1497 case POWERPC_EXCP_DSI
: /* Data storage exception */
1498 trace_ppc_excp_dsi(env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
1500 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
1501 trace_ppc_excp_isi(msr
, env
->nip
);
1502 msr
|= env
->error_code
;
1504 case POWERPC_EXCP_EXTERNAL
: /* External input */
1509 * LPES0 is only taken into consideration if we support HV
1510 * mode for this CPU.
1512 if (!env
->has_hv_mode
) {
1516 lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
1519 new_msr
|= (target_ulong
)MSR_HVB
;
1520 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
1527 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
1528 /* Optional DSISR update was removed from ISA v3.0 */
1529 if (!(env
->insns_flags2
& PPC2_ISA300
)) {
1530 /* Get rS/rD and rA from faulting opcode */
1532 * Note: the opcode fields will not be set properly for a
1533 * direct store load/store, but nobody cares as nobody
1534 * actually uses direct store segments.
1536 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
1539 case POWERPC_EXCP_PROGRAM
: /* Program exception */
1540 switch (env
->error_code
& ~0xF) {
1541 case POWERPC_EXCP_FP
:
1542 if (!FIELD_EX64_FE(env
->msr
) || !FIELD_EX64(env
->msr
, MSR
, FP
)) {
1543 trace_ppc_excp_fp_ignore();
1544 powerpc_reset_excp_state(cpu
);
1549 * FP exceptions always have NIP pointing to the faulting
1550 * instruction, so always use store_next and claim we are
1551 * precise in the MSR.
1555 case POWERPC_EXCP_INVAL
:
1556 trace_ppc_excp_inval(env
->nip
);
1559 case POWERPC_EXCP_PRIV
:
1562 case POWERPC_EXCP_TRAP
:
1566 /* Should never occur */
1567 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
1572 case POWERPC_EXCP_SYSCALL
: /* System call exception */
1573 lev
= env
->error_code
;
1575 if (lev
== 1 && cpu
->vhyp
) {
1582 * We need to correct the NIP which in this case is supposed
1583 * to point to the next instruction
1587 /* "PAPR mode" built-in hypercall emulation */
1588 if (lev
== 1 && books_vhyp_handles_hcall(cpu
)) {
1589 PPCVirtualHypervisorClass
*vhc
=
1590 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
1591 vhc
->hypercall(cpu
->vhyp
, cpu
);
1594 if (env
->insns_flags2
& PPC2_ISA310
) {
1595 /* ISAv3.1 puts LEV into SRR1 */
1599 new_msr
|= (target_ulong
)MSR_HVB
;
1602 case POWERPC_EXCP_SYSCALL_VECTORED
: /* scv exception */
1603 lev
= env
->error_code
;
1606 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_EE
);
1607 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
1609 vector
+= lev
* 0x20;
1614 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
1615 case POWERPC_EXCP_DECR
: /* Decrementer exception */
1617 case POWERPC_EXCP_RESET
: /* System reset exception */
1618 /* A power-saving exception sets ME, otherwise it is unchanged */
1619 if (FIELD_EX64(env
->msr
, MSR
, POW
)) {
1620 /* indicate that we resumed from power save mode */
1622 new_msr
|= ((target_ulong
)1 << MSR_ME
);
1624 if (env
->msr_mask
& MSR_HVB
) {
1626 * ISA specifies HV, but can be delivered to guest with HV
1627 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
1629 new_msr
|= (target_ulong
)MSR_HVB
;
1631 if (FIELD_EX64(env
->msr
, MSR
, POW
)) {
1632 cpu_abort(cs
, "Trying to deliver power-saving system reset "
1633 "exception %d with no HV support\n", excp
);
1637 case POWERPC_EXCP_DSEG
: /* Data segment exception */
1638 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
1639 case POWERPC_EXCP_TRACE
: /* Trace exception */
1640 case POWERPC_EXCP_SDOOR
: /* Doorbell interrupt */
1641 case POWERPC_EXCP_PERFM
: /* Performance monitor interrupt */
1643 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
1644 msr
|= env
->error_code
;
1646 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
1647 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
1648 case POWERPC_EXCP_SDOOR_HV
: /* Hypervisor Doorbell interrupt */
1649 case POWERPC_EXCP_HVIRT
: /* Hypervisor virtualization */
1652 new_msr
|= (target_ulong
)MSR_HVB
;
1653 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
1656 case POWERPC_EXCP_HV_EMU
: {
1657 uint32_t insn
= ppc_ldl_code(env
, env
->nip
);
1658 env
->spr
[SPR_HEIR
] = insn
;
1659 if (is_prefix_insn(env
, insn
)) {
1660 uint32_t insn2
= ppc_ldl_code(env
, env
->nip
+ 4);
1661 env
->spr
[SPR_HEIR
] <<= 32;
1662 env
->spr
[SPR_HEIR
] |= insn2
;
1666 new_msr
|= (target_ulong
)MSR_HVB
;
1667 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
1671 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
1672 case POWERPC_EXCP_VSXU
: /* VSX unavailable exception */
1673 case POWERPC_EXCP_FU
: /* Facility unavailable exception */
1674 env
->spr
[SPR_FSCR
] |= ((target_ulong
)env
->error_code
<< 56);
1676 case POWERPC_EXCP_HV_FU
: /* Hypervisor Facility Unavailable Exception */
1677 env
->spr
[SPR_HFSCR
] |= ((target_ulong
)env
->error_code
<< FSCR_IC_POS
);
1680 new_msr
|= (target_ulong
)MSR_HVB
;
1681 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
1683 case POWERPC_EXCP_PERFM_EBB
: /* Performance Monitor EBB Exception */
1684 case POWERPC_EXCP_EXTERNAL_EBB
: /* External EBB Exception */
1685 env
->spr
[SPR_BESCR
] &= ~BESCR_GE
;
1688 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is
1689 * stored in the EBB Handler SPR_EBBHR.
1691 env
->spr
[SPR_EBBRR
] = env
->nip
;
1692 powerpc_set_excp_state(cpu
, env
->spr
[SPR_EBBHR
], env
->msr
);
1695 * This exception is handled in userspace. No need to proceed.
1698 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
1699 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
1700 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
1701 case POWERPC_EXCP_HV_MAINT
: /* Hypervisor Maintenance exception */
1702 cpu_abort(cs
, "%s exception not implemented\n",
1703 powerpc_excp_name(excp
));
1706 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
1711 * Sort out endianness of interrupt, this differs depending on the
1712 * CPU, the HV mode, etc...
1714 if (ppc_interrupts_little_endian(cpu
, !!(new_msr
& MSR_HVB
))) {
1715 new_msr
|= (target_ulong
)1 << MSR_LE
;
1718 new_msr
|= (target_ulong
)1 << MSR_SF
;
1720 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
1722 env
->spr
[srr0
] = env
->nip
;
1725 env
->spr
[srr1
] = msr
;
1728 if ((new_msr
& MSR_HVB
) && books_vhyp_handles_hv_excp(cpu
)) {
1729 PPCVirtualHypervisorClass
*vhc
=
1730 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
1731 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */
1732 vhc
->deliver_hv_excp(cpu
, excp
);
1734 powerpc_reset_excp_state(cpu
);
1738 if (!(env
->msr_mask
& MSR_HVB
) && srr0
== SPR_HSRR0
) {
1739 cpu_abort(cs
, "Trying to deliver HV exception (HSRR) %d with "
1740 "no HV support\n", excp
);
1743 /* This can update new_msr and vector if AIL applies */
1744 ppc_excp_apply_ail(cpu
, excp
, msr
, &new_msr
, &vector
);
1746 powerpc_set_excp_state(cpu
, vector
, new_msr
);
1750 static inline void powerpc_excp_books(PowerPCCPU
*cpu
, int excp
)
1752 g_assert_not_reached();
1756 static void powerpc_excp(PowerPCCPU
*cpu
, int excp
)
1758 CPUState
*cs
= CPU(cpu
);
1759 CPUPPCState
*env
= &cpu
->env
;
1761 if (excp
<= POWERPC_EXCP_NONE
|| excp
>= POWERPC_EXCP_NB
) {
1762 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
1765 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
1766 " => %s (%d) error=%02x\n", env
->nip
, powerpc_excp_name(excp
),
1767 excp
, env
->error_code
);
1768 env
->excp_stats
[excp
]++;
1770 switch (env
->excp_model
) {
1771 case POWERPC_EXCP_40x
:
1772 powerpc_excp_40x(cpu
, excp
);
1774 case POWERPC_EXCP_6xx
:
1775 powerpc_excp_6xx(cpu
, excp
);
1777 case POWERPC_EXCP_7xx
:
1778 powerpc_excp_7xx(cpu
, excp
);
1780 case POWERPC_EXCP_74xx
:
1781 powerpc_excp_74xx(cpu
, excp
);
1783 case POWERPC_EXCP_BOOKE
:
1784 powerpc_excp_booke(cpu
, excp
);
1786 case POWERPC_EXCP_970
:
1787 case POWERPC_EXCP_POWER7
:
1788 case POWERPC_EXCP_POWER8
:
1789 case POWERPC_EXCP_POWER9
:
1790 case POWERPC_EXCP_POWER10
:
1791 powerpc_excp_books(cpu
, excp
);
1794 g_assert_not_reached();
1798 void ppc_cpu_do_interrupt(CPUState
*cs
)
1800 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1802 powerpc_excp(cpu
, cs
->exception_index
);
1805 #if defined(TARGET_PPC64)
1806 #define P7_UNUSED_INTERRUPTS \
1807 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \
1808 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1809 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
1810 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
1812 static int p7_interrupt_powersave(CPUPPCState
*env
)
1814 if ((env
->pending_interrupts
& PPC_INTERRUPT_EXT
) &&
1815 (env
->spr
[SPR_LPCR
] & LPCR_P7_PECE0
)) {
1816 return PPC_INTERRUPT_EXT
;
1818 if ((env
->pending_interrupts
& PPC_INTERRUPT_DECR
) &&
1819 (env
->spr
[SPR_LPCR
] & LPCR_P7_PECE1
)) {
1820 return PPC_INTERRUPT_DECR
;
1822 if ((env
->pending_interrupts
& PPC_INTERRUPT_MCK
) &&
1823 (env
->spr
[SPR_LPCR
] & LPCR_P7_PECE2
)) {
1824 return PPC_INTERRUPT_MCK
;
1826 if ((env
->pending_interrupts
& PPC_INTERRUPT_HMI
) &&
1827 (env
->spr
[SPR_LPCR
] & LPCR_P7_PECE2
)) {
1828 return PPC_INTERRUPT_HMI
;
1830 if (env
->pending_interrupts
& PPC_INTERRUPT_RESET
) {
1831 return PPC_INTERRUPT_RESET
;
1836 static int p7_next_unmasked_interrupt(CPUPPCState
*env
)
1838 PowerPCCPU
*cpu
= env_archcpu(env
);
1839 CPUState
*cs
= CPU(cpu
);
1840 /* Ignore MSR[EE] when coming out of some power management states */
1841 bool msr_ee
= FIELD_EX64(env
->msr
, MSR
, EE
) || env
->resume_as_sreset
;
1843 assert((env
->pending_interrupts
& P7_UNUSED_INTERRUPTS
) == 0);
1846 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1847 return p7_interrupt_powersave(env
);
1850 /* Machine check exception */
1851 if (env
->pending_interrupts
& PPC_INTERRUPT_MCK
) {
1852 return PPC_INTERRUPT_MCK
;
1855 /* Hypervisor decrementer exception */
1856 if (env
->pending_interrupts
& PPC_INTERRUPT_HDECR
) {
1857 /* LPCR will be clear when not supported so this will work */
1858 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
1859 if ((msr_ee
|| !FIELD_EX64_HV(env
->msr
)) && hdice
) {
1860 /* HDEC clears on delivery */
1861 return PPC_INTERRUPT_HDECR
;
1865 /* External interrupt can ignore MSR:EE under some circumstances */
1866 if (env
->pending_interrupts
& PPC_INTERRUPT_EXT
) {
1867 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
1868 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
1869 /* HEIC blocks delivery to the hypervisor */
1870 if ((msr_ee
&& !(heic
&& FIELD_EX64_HV(env
->msr
) &&
1871 !FIELD_EX64(env
->msr
, MSR
, PR
))) ||
1872 (env
->has_hv_mode
&& !FIELD_EX64_HV(env
->msr
) && !lpes0
)) {
1873 return PPC_INTERRUPT_EXT
;
1877 /* Decrementer exception */
1878 if (env
->pending_interrupts
& PPC_INTERRUPT_DECR
) {
1879 return PPC_INTERRUPT_DECR
;
1881 if (env
->pending_interrupts
& PPC_INTERRUPT_PERFM
) {
1882 return PPC_INTERRUPT_PERFM
;
1889 #define P8_UNUSED_INTERRUPTS \
1890 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \
1891 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
1892 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1894 static int p8_interrupt_powersave(CPUPPCState
*env
)
1896 if ((env
->pending_interrupts
& PPC_INTERRUPT_EXT
) &&
1897 (env
->spr
[SPR_LPCR
] & LPCR_P8_PECE2
)) {
1898 return PPC_INTERRUPT_EXT
;
1900 if ((env
->pending_interrupts
& PPC_INTERRUPT_DECR
) &&
1901 (env
->spr
[SPR_LPCR
] & LPCR_P8_PECE3
)) {
1902 return PPC_INTERRUPT_DECR
;
1904 if ((env
->pending_interrupts
& PPC_INTERRUPT_MCK
) &&
1905 (env
->spr
[SPR_LPCR
] & LPCR_P8_PECE4
)) {
1906 return PPC_INTERRUPT_MCK
;
1908 if ((env
->pending_interrupts
& PPC_INTERRUPT_HMI
) &&
1909 (env
->spr
[SPR_LPCR
] & LPCR_P8_PECE4
)) {
1910 return PPC_INTERRUPT_HMI
;
1912 if ((env
->pending_interrupts
& PPC_INTERRUPT_DOORBELL
) &&
1913 (env
->spr
[SPR_LPCR
] & LPCR_P8_PECE0
)) {
1914 return PPC_INTERRUPT_DOORBELL
;
1916 if ((env
->pending_interrupts
& PPC_INTERRUPT_HDOORBELL
) &&
1917 (env
->spr
[SPR_LPCR
] & LPCR_P8_PECE1
)) {
1918 return PPC_INTERRUPT_HDOORBELL
;
1920 if (env
->pending_interrupts
& PPC_INTERRUPT_RESET
) {
1921 return PPC_INTERRUPT_RESET
;
1926 static int p8_next_unmasked_interrupt(CPUPPCState
*env
)
1928 PowerPCCPU
*cpu
= env_archcpu(env
);
1929 CPUState
*cs
= CPU(cpu
);
1930 /* Ignore MSR[EE] when coming out of some power management states */
1931 bool msr_ee
= FIELD_EX64(env
->msr
, MSR
, EE
) || env
->resume_as_sreset
;
1933 assert((env
->pending_interrupts
& P8_UNUSED_INTERRUPTS
) == 0);
1936 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1937 return p8_interrupt_powersave(env
);
1940 /* Machine check exception */
1941 if (env
->pending_interrupts
& PPC_INTERRUPT_MCK
) {
1942 return PPC_INTERRUPT_MCK
;
1945 /* Hypervisor decrementer exception */
1946 if (env
->pending_interrupts
& PPC_INTERRUPT_HDECR
) {
1947 /* LPCR will be clear when not supported so this will work */
1948 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
1949 if ((msr_ee
|| !FIELD_EX64_HV(env
->msr
)) && hdice
) {
1950 /* HDEC clears on delivery */
1951 return PPC_INTERRUPT_HDECR
;
1955 /* External interrupt can ignore MSR:EE under some circumstances */
1956 if (env
->pending_interrupts
& PPC_INTERRUPT_EXT
) {
1957 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
1958 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
1959 /* HEIC blocks delivery to the hypervisor */
1960 if ((msr_ee
&& !(heic
&& FIELD_EX64_HV(env
->msr
) &&
1961 !FIELD_EX64(env
->msr
, MSR
, PR
))) ||
1962 (env
->has_hv_mode
&& !FIELD_EX64_HV(env
->msr
) && !lpes0
)) {
1963 return PPC_INTERRUPT_EXT
;
1967 /* Decrementer exception */
1968 if (env
->pending_interrupts
& PPC_INTERRUPT_DECR
) {
1969 return PPC_INTERRUPT_DECR
;
1971 if (env
->pending_interrupts
& PPC_INTERRUPT_DOORBELL
) {
1972 return PPC_INTERRUPT_DOORBELL
;
1974 if (env
->pending_interrupts
& PPC_INTERRUPT_HDOORBELL
) {
1975 return PPC_INTERRUPT_HDOORBELL
;
1977 if (env
->pending_interrupts
& PPC_INTERRUPT_PERFM
) {
1978 return PPC_INTERRUPT_PERFM
;
1981 if (env
->pending_interrupts
& PPC_INTERRUPT_EBB
) {
1983 * EBB exception must be taken in problem state and
1984 * with BESCR_GE set.
1986 if (FIELD_EX64(env
->msr
, MSR
, PR
) &&
1987 (env
->spr
[SPR_BESCR
] & BESCR_GE
)) {
1988 return PPC_INTERRUPT_EBB
;
1996 #define P9_UNUSED_INTERRUPTS \
1997 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \
1998 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1999 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
2001 static int p9_interrupt_powersave(CPUPPCState
*env
)
2003 /* External Exception */
2004 if ((env
->pending_interrupts
& PPC_INTERRUPT_EXT
) &&
2005 (env
->spr
[SPR_LPCR
] & LPCR_EEE
)) {
2006 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
2007 if (!heic
|| !FIELD_EX64_HV(env
->msr
) ||
2008 FIELD_EX64(env
->msr
, MSR
, PR
)) {
2009 return PPC_INTERRUPT_EXT
;
2012 /* Decrementer Exception */
2013 if ((env
->pending_interrupts
& PPC_INTERRUPT_DECR
) &&
2014 (env
->spr
[SPR_LPCR
] & LPCR_DEE
)) {
2015 return PPC_INTERRUPT_DECR
;
2017 /* Machine Check or Hypervisor Maintenance Exception */
2018 if (env
->spr
[SPR_LPCR
] & LPCR_OEE
) {
2019 if (env
->pending_interrupts
& PPC_INTERRUPT_MCK
) {
2020 return PPC_INTERRUPT_MCK
;
2022 if (env
->pending_interrupts
& PPC_INTERRUPT_HMI
) {
2023 return PPC_INTERRUPT_HMI
;
2026 /* Privileged Doorbell Exception */
2027 if ((env
->pending_interrupts
& PPC_INTERRUPT_DOORBELL
) &&
2028 (env
->spr
[SPR_LPCR
] & LPCR_PDEE
)) {
2029 return PPC_INTERRUPT_DOORBELL
;
2031 /* Hypervisor Doorbell Exception */
2032 if ((env
->pending_interrupts
& PPC_INTERRUPT_HDOORBELL
) &&
2033 (env
->spr
[SPR_LPCR
] & LPCR_HDEE
)) {
2034 return PPC_INTERRUPT_HDOORBELL
;
2036 /* Hypervisor virtualization exception */
2037 if ((env
->pending_interrupts
& PPC_INTERRUPT_HVIRT
) &&
2038 (env
->spr
[SPR_LPCR
] & LPCR_HVEE
)) {
2039 return PPC_INTERRUPT_HVIRT
;
2041 if (env
->pending_interrupts
& PPC_INTERRUPT_RESET
) {
2042 return PPC_INTERRUPT_RESET
;
2047 static int p9_next_unmasked_interrupt(CPUPPCState
*env
)
2049 PowerPCCPU
*cpu
= env_archcpu(env
);
2050 CPUState
*cs
= CPU(cpu
);
2051 /* Ignore MSR[EE] when coming out of some power management states */
2052 bool msr_ee
= FIELD_EX64(env
->msr
, MSR
, EE
) || env
->resume_as_sreset
;
2054 assert((env
->pending_interrupts
& P9_UNUSED_INTERRUPTS
) == 0);
2057 if (env
->spr
[SPR_PSSCR
] & PSSCR_EC
) {
2059 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
2060 * wakeup the processor
2062 return p9_interrupt_powersave(env
);
2065 * When it's clear, any system-caused exception exits power-saving
2066 * mode, even the ones that gate on MSR[EE].
2072 /* Machine check exception */
2073 if (env
->pending_interrupts
& PPC_INTERRUPT_MCK
) {
2074 return PPC_INTERRUPT_MCK
;
2077 /* Hypervisor decrementer exception */
2078 if (env
->pending_interrupts
& PPC_INTERRUPT_HDECR
) {
2079 /* LPCR will be clear when not supported so this will work */
2080 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
2081 if ((msr_ee
|| !FIELD_EX64_HV(env
->msr
)) && hdice
) {
2082 /* HDEC clears on delivery */
2083 return PPC_INTERRUPT_HDECR
;
2087 /* Hypervisor virtualization interrupt */
2088 if (env
->pending_interrupts
& PPC_INTERRUPT_HVIRT
) {
2089 /* LPCR will be clear when not supported so this will work */
2090 bool hvice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HVICE
);
2091 if ((msr_ee
|| !FIELD_EX64_HV(env
->msr
)) && hvice
) {
2092 return PPC_INTERRUPT_HVIRT
;
2096 /* External interrupt can ignore MSR:EE under some circumstances */
2097 if (env
->pending_interrupts
& PPC_INTERRUPT_EXT
) {
2098 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
2099 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
2100 /* HEIC blocks delivery to the hypervisor */
2101 if ((msr_ee
&& !(heic
&& FIELD_EX64_HV(env
->msr
) &&
2102 !FIELD_EX64(env
->msr
, MSR
, PR
))) ||
2103 (env
->has_hv_mode
&& !FIELD_EX64_HV(env
->msr
) && !lpes0
)) {
2104 return PPC_INTERRUPT_EXT
;
2108 /* Decrementer exception */
2109 if (env
->pending_interrupts
& PPC_INTERRUPT_DECR
) {
2110 return PPC_INTERRUPT_DECR
;
2112 if (env
->pending_interrupts
& PPC_INTERRUPT_DOORBELL
) {
2113 return PPC_INTERRUPT_DOORBELL
;
2115 if (env
->pending_interrupts
& PPC_INTERRUPT_HDOORBELL
) {
2116 return PPC_INTERRUPT_HDOORBELL
;
2118 if (env
->pending_interrupts
& PPC_INTERRUPT_PERFM
) {
2119 return PPC_INTERRUPT_PERFM
;
2122 if (env
->pending_interrupts
& PPC_INTERRUPT_EBB
) {
2124 * EBB exception must be taken in problem state and
2125 * with BESCR_GE set.
2127 if (FIELD_EX64(env
->msr
, MSR
, PR
) &&
2128 (env
->spr
[SPR_BESCR
] & BESCR_GE
)) {
2129 return PPC_INTERRUPT_EBB
;
2138 static int ppc_next_unmasked_interrupt_generic(CPUPPCState
*env
)
2142 /* External reset */
2143 if (env
->pending_interrupts
& PPC_INTERRUPT_RESET
) {
2144 return PPC_INTERRUPT_RESET
;
2146 /* Machine check exception */
2147 if (env
->pending_interrupts
& PPC_INTERRUPT_MCK
) {
2148 return PPC_INTERRUPT_MCK
;
2151 /* External debug exception */
2152 if (env
->pending_interrupts
& PPC_INTERRUPT_DEBUG
) {
2153 return PPC_INTERRUPT_DEBUG
;
2158 * For interrupts that gate on MSR:EE, we need to do something a
2159 * bit more subtle, as we need to let them through even when EE is
2160 * clear when coming out of some power management states (in order
2161 * for them to become a 0x100).
2163 async_deliver
= FIELD_EX64(env
->msr
, MSR
, EE
) || env
->resume_as_sreset
;
2165 /* Hypervisor decrementer exception */
2166 if (env
->pending_interrupts
& PPC_INTERRUPT_HDECR
) {
2167 /* LPCR will be clear when not supported so this will work */
2168 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
2169 if ((async_deliver
|| !FIELD_EX64_HV(env
->msr
)) && hdice
) {
2170 /* HDEC clears on delivery */
2171 return PPC_INTERRUPT_HDECR
;
2175 /* Hypervisor virtualization interrupt */
2176 if (env
->pending_interrupts
& PPC_INTERRUPT_HVIRT
) {
2177 /* LPCR will be clear when not supported so this will work */
2178 bool hvice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HVICE
);
2179 if ((async_deliver
|| !FIELD_EX64_HV(env
->msr
)) && hvice
) {
2180 return PPC_INTERRUPT_HVIRT
;
2184 /* External interrupt can ignore MSR:EE under some circumstances */
2185 if (env
->pending_interrupts
& PPC_INTERRUPT_EXT
) {
2186 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
2187 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
2188 /* HEIC blocks delivery to the hypervisor */
2189 if ((async_deliver
&& !(heic
&& FIELD_EX64_HV(env
->msr
) &&
2190 !FIELD_EX64(env
->msr
, MSR
, PR
))) ||
2191 (env
->has_hv_mode
&& !FIELD_EX64_HV(env
->msr
) && !lpes0
)) {
2192 return PPC_INTERRUPT_EXT
;
2195 if (FIELD_EX64(env
->msr
, MSR
, CE
)) {
2196 /* External critical interrupt */
2197 if (env
->pending_interrupts
& PPC_INTERRUPT_CEXT
) {
2198 return PPC_INTERRUPT_CEXT
;
2201 if (async_deliver
!= 0) {
2202 /* Watchdog timer on embedded PowerPC */
2203 if (env
->pending_interrupts
& PPC_INTERRUPT_WDT
) {
2204 return PPC_INTERRUPT_WDT
;
2206 if (env
->pending_interrupts
& PPC_INTERRUPT_CDOORBELL
) {
2207 return PPC_INTERRUPT_CDOORBELL
;
2209 /* Fixed interval timer on embedded PowerPC */
2210 if (env
->pending_interrupts
& PPC_INTERRUPT_FIT
) {
2211 return PPC_INTERRUPT_FIT
;
2213 /* Programmable interval timer on embedded PowerPC */
2214 if (env
->pending_interrupts
& PPC_INTERRUPT_PIT
) {
2215 return PPC_INTERRUPT_PIT
;
2217 /* Decrementer exception */
2218 if (env
->pending_interrupts
& PPC_INTERRUPT_DECR
) {
2219 return PPC_INTERRUPT_DECR
;
2221 if (env
->pending_interrupts
& PPC_INTERRUPT_DOORBELL
) {
2222 return PPC_INTERRUPT_DOORBELL
;
2224 if (env
->pending_interrupts
& PPC_INTERRUPT_HDOORBELL
) {
2225 return PPC_INTERRUPT_HDOORBELL
;
2227 if (env
->pending_interrupts
& PPC_INTERRUPT_PERFM
) {
2228 return PPC_INTERRUPT_PERFM
;
2230 /* Thermal interrupt */
2231 if (env
->pending_interrupts
& PPC_INTERRUPT_THERM
) {
2232 return PPC_INTERRUPT_THERM
;
2235 if (env
->pending_interrupts
& PPC_INTERRUPT_EBB
) {
2237 * EBB exception must be taken in problem state and
2238 * with BESCR_GE set.
2240 if (FIELD_EX64(env
->msr
, MSR
, PR
) &&
2241 (env
->spr
[SPR_BESCR
] & BESCR_GE
)) {
2242 return PPC_INTERRUPT_EBB
;
2250 static int ppc_next_unmasked_interrupt(CPUPPCState
*env
)
2252 switch (env
->excp_model
) {
2253 #if defined(TARGET_PPC64)
2254 case POWERPC_EXCP_POWER7
:
2255 return p7_next_unmasked_interrupt(env
);
2256 case POWERPC_EXCP_POWER8
:
2257 return p8_next_unmasked_interrupt(env
);
2258 case POWERPC_EXCP_POWER9
:
2259 case POWERPC_EXCP_POWER10
:
2260 return p9_next_unmasked_interrupt(env
);
2263 return ppc_next_unmasked_interrupt_generic(env
);
2268 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be
2269 * delivered and clears CPU_INTERRUPT_HARD otherwise.
2271 * This method is called by ppc_set_interrupt when an interrupt is raised or
2272 * lowered, and should also be called whenever an interrupt masking condition
2274 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.;
2275 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.;
2276 * - When PSSCR[EC] or env->resume_as_sreset are changed;
2277 * - When cs->halted is changed and the CPU has a different interrupt masking
2278 * logic in power-saving mode (e.g., POWER7/8/9/10);
2280 void ppc_maybe_interrupt(CPUPPCState
*env
)
2282 CPUState
*cs
= env_cpu(env
);
2283 QEMU_IOTHREAD_LOCK_GUARD();
2285 if (ppc_next_unmasked_interrupt(env
)) {
2286 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
2288 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
2292 #if defined(TARGET_PPC64)
2293 static void p7_deliver_interrupt(CPUPPCState
*env
, int interrupt
)
2295 PowerPCCPU
*cpu
= env_archcpu(env
);
2296 CPUState
*cs
= env_cpu(env
);
2298 switch (interrupt
) {
2299 case PPC_INTERRUPT_MCK
: /* Machine check exception */
2300 env
->pending_interrupts
&= ~PPC_INTERRUPT_MCK
;
2301 powerpc_excp(cpu
, POWERPC_EXCP_MCHECK
);
2304 case PPC_INTERRUPT_HDECR
: /* Hypervisor decrementer exception */
2305 /* HDEC clears on delivery */
2306 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDECR
;
2307 powerpc_excp(cpu
, POWERPC_EXCP_HDECR
);
2310 case PPC_INTERRUPT_EXT
:
2311 if (books_vhyp_promotes_external_to_hvirt(cpu
)) {
2312 powerpc_excp(cpu
, POWERPC_EXCP_HVIRT
);
2314 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL
);
2318 case PPC_INTERRUPT_DECR
: /* Decrementer exception */
2319 powerpc_excp(cpu
, POWERPC_EXCP_DECR
);
2321 case PPC_INTERRUPT_PERFM
:
2322 env
->pending_interrupts
&= ~PPC_INTERRUPT_PERFM
;
2323 powerpc_excp(cpu
, POWERPC_EXCP_PERFM
);
2327 * This is a bug ! It means that has_work took us out of halt without
2328 * anything to deliver while in a PM state that requires getting
2331 * This means we will incorrectly execute past the power management
2332 * instruction instead of triggering a reset.
2334 * It generally means a discrepancy between the wakeup conditions in the
2335 * processor has_work implementation and the logic in this function.
2337 assert(!env
->resume_as_sreset
);
2340 cpu_abort(cs
, "Invalid PowerPC interrupt %d. Aborting\n", interrupt
);
2344 static void p8_deliver_interrupt(CPUPPCState
*env
, int interrupt
)
2346 PowerPCCPU
*cpu
= env_archcpu(env
);
2347 CPUState
*cs
= env_cpu(env
);
2349 switch (interrupt
) {
2350 case PPC_INTERRUPT_MCK
: /* Machine check exception */
2351 env
->pending_interrupts
&= ~PPC_INTERRUPT_MCK
;
2352 powerpc_excp(cpu
, POWERPC_EXCP_MCHECK
);
2355 case PPC_INTERRUPT_HDECR
: /* Hypervisor decrementer exception */
2356 /* HDEC clears on delivery */
2357 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDECR
;
2358 powerpc_excp(cpu
, POWERPC_EXCP_HDECR
);
2361 case PPC_INTERRUPT_EXT
:
2362 if (books_vhyp_promotes_external_to_hvirt(cpu
)) {
2363 powerpc_excp(cpu
, POWERPC_EXCP_HVIRT
);
2365 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL
);
2369 case PPC_INTERRUPT_DECR
: /* Decrementer exception */
2370 powerpc_excp(cpu
, POWERPC_EXCP_DECR
);
2372 case PPC_INTERRUPT_DOORBELL
:
2373 env
->pending_interrupts
&= ~PPC_INTERRUPT_DOORBELL
;
2374 if (is_book3s_arch2x(env
)) {
2375 powerpc_excp(cpu
, POWERPC_EXCP_SDOOR
);
2377 powerpc_excp(cpu
, POWERPC_EXCP_DOORI
);
2380 case PPC_INTERRUPT_HDOORBELL
:
2381 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDOORBELL
;
2382 powerpc_excp(cpu
, POWERPC_EXCP_SDOOR_HV
);
2384 case PPC_INTERRUPT_PERFM
:
2385 env
->pending_interrupts
&= ~PPC_INTERRUPT_PERFM
;
2386 powerpc_excp(cpu
, POWERPC_EXCP_PERFM
);
2388 case PPC_INTERRUPT_EBB
: /* EBB exception */
2389 env
->pending_interrupts
&= ~PPC_INTERRUPT_EBB
;
2390 if (env
->spr
[SPR_BESCR
] & BESCR_PMEO
) {
2391 powerpc_excp(cpu
, POWERPC_EXCP_PERFM_EBB
);
2392 } else if (env
->spr
[SPR_BESCR
] & BESCR_EEO
) {
2393 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL_EBB
);
2398 * This is a bug ! It means that has_work took us out of halt without
2399 * anything to deliver while in a PM state that requires getting
2402 * This means we will incorrectly execute past the power management
2403 * instruction instead of triggering a reset.
2405 * It generally means a discrepancy between the wakeup conditions in the
2406 * processor has_work implementation and the logic in this function.
2408 assert(!env
->resume_as_sreset
);
2411 cpu_abort(cs
, "Invalid PowerPC interrupt %d. Aborting\n", interrupt
);
2415 static void p9_deliver_interrupt(CPUPPCState
*env
, int interrupt
)
2417 PowerPCCPU
*cpu
= env_archcpu(env
);
2418 CPUState
*cs
= env_cpu(env
);
2420 if (cs
->halted
&& !(env
->spr
[SPR_PSSCR
] & PSSCR_EC
) &&
2421 !FIELD_EX64(env
->msr
, MSR
, EE
)) {
2423 * A pending interrupt took us out of power-saving, but MSR[EE] says
2424 * that we should return to NIP+4 instead of delivering it.
2429 switch (interrupt
) {
2430 case PPC_INTERRUPT_MCK
: /* Machine check exception */
2431 env
->pending_interrupts
&= ~PPC_INTERRUPT_MCK
;
2432 powerpc_excp(cpu
, POWERPC_EXCP_MCHECK
);
2435 case PPC_INTERRUPT_HDECR
: /* Hypervisor decrementer exception */
2436 /* HDEC clears on delivery */
2437 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDECR
;
2438 powerpc_excp(cpu
, POWERPC_EXCP_HDECR
);
2440 case PPC_INTERRUPT_HVIRT
: /* Hypervisor virtualization interrupt */
2441 powerpc_excp(cpu
, POWERPC_EXCP_HVIRT
);
2444 case PPC_INTERRUPT_EXT
:
2445 if (books_vhyp_promotes_external_to_hvirt(cpu
)) {
2446 powerpc_excp(cpu
, POWERPC_EXCP_HVIRT
);
2448 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL
);
2452 case PPC_INTERRUPT_DECR
: /* Decrementer exception */
2453 powerpc_excp(cpu
, POWERPC_EXCP_DECR
);
2455 case PPC_INTERRUPT_DOORBELL
:
2456 env
->pending_interrupts
&= ~PPC_INTERRUPT_DOORBELL
;
2457 powerpc_excp(cpu
, POWERPC_EXCP_SDOOR
);
2459 case PPC_INTERRUPT_HDOORBELL
:
2460 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDOORBELL
;
2461 powerpc_excp(cpu
, POWERPC_EXCP_SDOOR_HV
);
2463 case PPC_INTERRUPT_PERFM
:
2464 env
->pending_interrupts
&= ~PPC_INTERRUPT_PERFM
;
2465 powerpc_excp(cpu
, POWERPC_EXCP_PERFM
);
2467 case PPC_INTERRUPT_EBB
: /* EBB exception */
2468 env
->pending_interrupts
&= ~PPC_INTERRUPT_EBB
;
2469 if (env
->spr
[SPR_BESCR
] & BESCR_PMEO
) {
2470 powerpc_excp(cpu
, POWERPC_EXCP_PERFM_EBB
);
2471 } else if (env
->spr
[SPR_BESCR
] & BESCR_EEO
) {
2472 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL_EBB
);
2477 * This is a bug ! It means that has_work took us out of halt without
2478 * anything to deliver while in a PM state that requires getting
2481 * This means we will incorrectly execute past the power management
2482 * instruction instead of triggering a reset.
2484 * It generally means a discrepancy between the wakeup conditions in the
2485 * processor has_work implementation and the logic in this function.
2487 assert(!env
->resume_as_sreset
);
2490 cpu_abort(cs
, "Invalid PowerPC interrupt %d. Aborting\n", interrupt
);
2495 static void ppc_deliver_interrupt_generic(CPUPPCState
*env
, int interrupt
)
2497 PowerPCCPU
*cpu
= env_archcpu(env
);
2498 CPUState
*cs
= env_cpu(env
);
2500 switch (interrupt
) {
2501 case PPC_INTERRUPT_RESET
: /* External reset */
2502 env
->pending_interrupts
&= ~PPC_INTERRUPT_RESET
;
2503 powerpc_excp(cpu
, POWERPC_EXCP_RESET
);
2505 case PPC_INTERRUPT_MCK
: /* Machine check exception */
2506 env
->pending_interrupts
&= ~PPC_INTERRUPT_MCK
;
2507 powerpc_excp(cpu
, POWERPC_EXCP_MCHECK
);
2510 case PPC_INTERRUPT_HDECR
: /* Hypervisor decrementer exception */
2511 /* HDEC clears on delivery */
2512 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDECR
;
2513 powerpc_excp(cpu
, POWERPC_EXCP_HDECR
);
2515 case PPC_INTERRUPT_HVIRT
: /* Hypervisor virtualization interrupt */
2516 powerpc_excp(cpu
, POWERPC_EXCP_HVIRT
);
2519 case PPC_INTERRUPT_EXT
:
2520 if (books_vhyp_promotes_external_to_hvirt(cpu
)) {
2521 powerpc_excp(cpu
, POWERPC_EXCP_HVIRT
);
2523 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL
);
2526 case PPC_INTERRUPT_CEXT
: /* External critical interrupt */
2527 powerpc_excp(cpu
, POWERPC_EXCP_CRITICAL
);
2530 case PPC_INTERRUPT_WDT
: /* Watchdog timer on embedded PowerPC */
2531 env
->pending_interrupts
&= ~PPC_INTERRUPT_WDT
;
2532 powerpc_excp(cpu
, POWERPC_EXCP_WDT
);
2534 case PPC_INTERRUPT_CDOORBELL
:
2535 env
->pending_interrupts
&= ~PPC_INTERRUPT_CDOORBELL
;
2536 powerpc_excp(cpu
, POWERPC_EXCP_DOORCI
);
2538 case PPC_INTERRUPT_FIT
: /* Fixed interval timer on embedded PowerPC */
2539 env
->pending_interrupts
&= ~PPC_INTERRUPT_FIT
;
2540 powerpc_excp(cpu
, POWERPC_EXCP_FIT
);
2542 case PPC_INTERRUPT_PIT
: /* Programmable interval timer on embedded ppc */
2543 env
->pending_interrupts
&= ~PPC_INTERRUPT_PIT
;
2544 powerpc_excp(cpu
, POWERPC_EXCP_PIT
);
2546 case PPC_INTERRUPT_DECR
: /* Decrementer exception */
2547 if (ppc_decr_clear_on_delivery(env
)) {
2548 env
->pending_interrupts
&= ~PPC_INTERRUPT_DECR
;
2550 powerpc_excp(cpu
, POWERPC_EXCP_DECR
);
2552 case PPC_INTERRUPT_DOORBELL
:
2553 env
->pending_interrupts
&= ~PPC_INTERRUPT_DOORBELL
;
2554 if (is_book3s_arch2x(env
)) {
2555 powerpc_excp(cpu
, POWERPC_EXCP_SDOOR
);
2557 powerpc_excp(cpu
, POWERPC_EXCP_DOORI
);
2560 case PPC_INTERRUPT_HDOORBELL
:
2561 env
->pending_interrupts
&= ~PPC_INTERRUPT_HDOORBELL
;
2562 powerpc_excp(cpu
, POWERPC_EXCP_SDOOR_HV
);
2564 case PPC_INTERRUPT_PERFM
:
2565 env
->pending_interrupts
&= ~PPC_INTERRUPT_PERFM
;
2566 powerpc_excp(cpu
, POWERPC_EXCP_PERFM
);
2568 case PPC_INTERRUPT_THERM
: /* Thermal interrupt */
2569 env
->pending_interrupts
&= ~PPC_INTERRUPT_THERM
;
2570 powerpc_excp(cpu
, POWERPC_EXCP_THERM
);
2572 case PPC_INTERRUPT_EBB
: /* EBB exception */
2573 env
->pending_interrupts
&= ~PPC_INTERRUPT_EBB
;
2574 if (env
->spr
[SPR_BESCR
] & BESCR_PMEO
) {
2575 powerpc_excp(cpu
, POWERPC_EXCP_PERFM_EBB
);
2576 } else if (env
->spr
[SPR_BESCR
] & BESCR_EEO
) {
2577 powerpc_excp(cpu
, POWERPC_EXCP_EXTERNAL_EBB
);
2582 * This is a bug ! It means that has_work took us out of halt without
2583 * anything to deliver while in a PM state that requires getting
2586 * This means we will incorrectly execute past the power management
2587 * instruction instead of triggering a reset.
2589 * It generally means a discrepancy between the wakeup conditions in the
2590 * processor has_work implementation and the logic in this function.
2592 assert(!env
->resume_as_sreset
);
2595 cpu_abort(cs
, "Invalid PowerPC interrupt %d. Aborting\n", interrupt
);
2599 static void ppc_deliver_interrupt(CPUPPCState
*env
, int interrupt
)
2601 switch (env
->excp_model
) {
2602 #if defined(TARGET_PPC64)
2603 case POWERPC_EXCP_POWER7
:
2604 p7_deliver_interrupt(env
, interrupt
);
2606 case POWERPC_EXCP_POWER8
:
2607 p8_deliver_interrupt(env
, interrupt
);
2609 case POWERPC_EXCP_POWER9
:
2610 case POWERPC_EXCP_POWER10
:
2611 p9_deliver_interrupt(env
, interrupt
);
2615 ppc_deliver_interrupt_generic(env
, interrupt
);
2619 void ppc_cpu_do_system_reset(CPUState
*cs
)
2621 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2623 powerpc_excp(cpu
, POWERPC_EXCP_RESET
);
2626 void ppc_cpu_do_fwnmi_machine_check(CPUState
*cs
, target_ulong vector
)
2628 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2629 CPUPPCState
*env
= &cpu
->env
;
2630 target_ulong msr
= 0;
2633 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
2636 msr
= (1ULL << MSR_ME
);
2637 msr
|= env
->msr
& (1ULL << MSR_SF
);
2638 if (ppc_interrupts_little_endian(cpu
, false)) {
2639 msr
|= (1ULL << MSR_LE
);
2642 /* Anything for nested required here? MSR[HV] bit? */
2644 powerpc_set_excp_state(cpu
, vector
, msr
);
2647 bool ppc_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
2649 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2650 CPUPPCState
*env
= &cpu
->env
;
2653 if ((interrupt_request
& CPU_INTERRUPT_HARD
) == 0) {
2657 interrupt
= ppc_next_unmasked_interrupt(env
);
2658 if (interrupt
== 0) {
2662 ppc_deliver_interrupt(env
, interrupt
);
2663 if (env
->pending_interrupts
== 0) {
2664 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
2669 #endif /* !CONFIG_USER_ONLY */
2671 /*****************************************************************************/
2672 /* Exceptions processing helpers */
2674 void raise_exception_err_ra(CPUPPCState
*env
, uint32_t exception
,
2675 uint32_t error_code
, uintptr_t raddr
)
2677 CPUState
*cs
= env_cpu(env
);
2679 cs
->exception_index
= exception
;
2680 env
->error_code
= error_code
;
2681 cpu_loop_exit_restore(cs
, raddr
);
2684 void raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
2685 uint32_t error_code
)
2687 raise_exception_err_ra(env
, exception
, error_code
, 0);
2690 void raise_exception(CPUPPCState
*env
, uint32_t exception
)
2692 raise_exception_err_ra(env
, exception
, 0, 0);
2695 void raise_exception_ra(CPUPPCState
*env
, uint32_t exception
,
2698 raise_exception_err_ra(env
, exception
, 0, raddr
);
2702 void helper_raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
2703 uint32_t error_code
)
2705 raise_exception_err_ra(env
, exception
, error_code
, 0);
2708 void helper_raise_exception(CPUPPCState
*env
, uint32_t exception
)
2710 raise_exception_err_ra(env
, exception
, 0, 0);
2714 #if !defined(CONFIG_USER_ONLY)
2716 void helper_store_msr(CPUPPCState
*env
, target_ulong val
)
2718 uint32_t excp
= hreg_store_msr(env
, val
, 0);
2721 CPUState
*cs
= env_cpu(env
);
2722 cpu_interrupt_exittb(cs
);
2723 raise_exception(env
, excp
);
2727 void helper_ppc_maybe_interrupt(CPUPPCState
*env
)
2729 ppc_maybe_interrupt(env
);
2732 #if defined(TARGET_PPC64)
2733 void helper_scv(CPUPPCState
*env
, uint32_t lev
)
2735 if (env
->spr
[SPR_FSCR
] & (1ull << FSCR_SCV
)) {
2736 raise_exception_err(env
, POWERPC_EXCP_SYSCALL_VECTORED
, lev
);
2738 raise_exception_err(env
, POWERPC_EXCP_FU
, FSCR_IC_SCV
);
2742 void helper_pminsn(CPUPPCState
*env
, uint32_t insn
)
2749 /* Condition for waking up at 0x100 */
2750 env
->resume_as_sreset
= (insn
!= PPC_PM_STOP
) ||
2751 (env
->spr
[SPR_PSSCR
] & PSSCR_EC
);
2753 ppc_maybe_interrupt(env
);
2755 #endif /* defined(TARGET_PPC64) */
2757 static void do_rfi(CPUPPCState
*env
, target_ulong nip
, target_ulong msr
)
2759 CPUState
*cs
= env_cpu(env
);
2761 /* MSR:POW cannot be set by any form of rfi */
2762 msr
&= ~(1ULL << MSR_POW
);
2764 /* MSR:TGPR cannot be set by any form of rfi */
2765 if (env
->flags
& POWERPC_FLAG_TGPR
)
2766 msr
&= ~(1ULL << MSR_TGPR
);
2768 #if defined(TARGET_PPC64)
2769 /* Switching to 32-bit ? Crop the nip */
2770 if (!msr_is_64bit(env
, msr
)) {
2771 nip
= (uint32_t)nip
;
2774 nip
= (uint32_t)nip
;
2776 /* XXX: beware: this is false if VLE is supported */
2777 env
->nip
= nip
& ~((target_ulong
)0x00000003);
2778 hreg_store_msr(env
, msr
, 1);
2779 trace_ppc_excp_rfi(env
->nip
, env
->msr
);
2781 * No need to raise an exception here, as rfi is always the last
2784 cpu_interrupt_exittb(cs
);
2785 /* Reset the reservation */
2786 env
->reserve_addr
= -1;
2788 /* Context synchronizing: check if TCG TLB needs flush */
2789 check_tlb_flush(env
, false);
2792 void helper_rfi(CPUPPCState
*env
)
2794 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
] & 0xfffffffful
);
2797 #if defined(TARGET_PPC64)
2798 void helper_rfid(CPUPPCState
*env
)
2801 * The architecture defines a number of rules for which bits can
2802 * change but in practice, we handle this in hreg_store_msr()
2803 * which will be called by do_rfi(), so there is no need to filter
2806 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
]);
2809 void helper_rfscv(CPUPPCState
*env
)
2811 do_rfi(env
, env
->lr
, env
->ctr
);
2814 void helper_hrfid(CPUPPCState
*env
)
2816 do_rfi(env
, env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
]);
2820 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2821 void helper_rfebb(CPUPPCState
*env
, target_ulong s
)
2823 target_ulong msr
= env
->msr
;
2826 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
2828 * "If BESCR 32:33 != 0b00 the instruction is treated as if
2829 * the instruction form were invalid."
2831 if (env
->spr
[SPR_BESCR
] & BESCR_INVALID
) {
2832 raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2833 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
);
2836 env
->nip
= env
->spr
[SPR_EBBRR
];
2838 /* Switching to 32-bit ? Crop the nip */
2839 if (!msr_is_64bit(env
, msr
)) {
2840 env
->nip
= (uint32_t)env
->spr
[SPR_EBBRR
];
2844 env
->spr
[SPR_BESCR
] |= BESCR_GE
;
2846 env
->spr
[SPR_BESCR
] &= ~BESCR_GE
;
2851 * Triggers or queues an 'ebb_excp' EBB exception. All checks
2852 * but FSCR, HFSCR and msr_pr must be done beforehand.
2854 * PowerISA v3.1 isn't clear about whether an EBB should be
2855 * postponed or cancelled if the EBB facility is unavailable.
2856 * Our assumption here is that the EBB is cancelled if both
2857 * FSCR and HFSCR EBB facilities aren't available.
2859 static void do_ebb(CPUPPCState
*env
, int ebb_excp
)
2861 PowerPCCPU
*cpu
= env_archcpu(env
);
2864 * FSCR_EBB and FSCR_IC_EBB are the same bits used with
2867 helper_fscr_facility_check(env
, FSCR_EBB
, 0, FSCR_IC_EBB
);
2868 helper_hfscr_facility_check(env
, FSCR_EBB
, "EBB", FSCR_IC_EBB
);
2870 if (ebb_excp
== POWERPC_EXCP_PERFM_EBB
) {
2871 env
->spr
[SPR_BESCR
] |= BESCR_PMEO
;
2872 } else if (ebb_excp
== POWERPC_EXCP_EXTERNAL_EBB
) {
2873 env
->spr
[SPR_BESCR
] |= BESCR_EEO
;
2876 if (FIELD_EX64(env
->msr
, MSR
, PR
)) {
2877 powerpc_excp(cpu
, ebb_excp
);
2879 ppc_set_irq(cpu
, PPC_INTERRUPT_EBB
, 1);
2883 void raise_ebb_perfm_exception(CPUPPCState
*env
)
2885 bool perfm_ebb_enabled
= env
->spr
[SPR_POWER_MMCR0
] & MMCR0_EBE
&&
2886 env
->spr
[SPR_BESCR
] & BESCR_PME
&&
2887 env
->spr
[SPR_BESCR
] & BESCR_GE
;
2889 if (!perfm_ebb_enabled
) {
2893 do_ebb(env
, POWERPC_EXCP_PERFM_EBB
);
2897 /*****************************************************************************/
2898 /* Embedded PowerPC specific helpers */
2899 void helper_40x_rfci(CPUPPCState
*env
)
2901 do_rfi(env
, env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
]);
2904 void helper_rfci(CPUPPCState
*env
)
2906 do_rfi(env
, env
->spr
[SPR_BOOKE_CSRR0
], env
->spr
[SPR_BOOKE_CSRR1
]);
2909 void helper_rfdi(CPUPPCState
*env
)
2911 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
2912 do_rfi(env
, env
->spr
[SPR_BOOKE_DSRR0
], env
->spr
[SPR_BOOKE_DSRR1
]);
2915 void helper_rfmci(CPUPPCState
*env
)
2917 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
2918 do_rfi(env
, env
->spr
[SPR_BOOKE_MCSRR0
], env
->spr
[SPR_BOOKE_MCSRR1
]);
2920 #endif /* CONFIG_TCG */
2921 #endif /* !defined(CONFIG_USER_ONLY) */
2924 void helper_tw(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
2927 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
2928 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
2929 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
2930 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
2931 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
2932 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2933 POWERPC_EXCP_TRAP
, GETPC());
2937 #if defined(TARGET_PPC64)
2938 void helper_td(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
2941 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
2942 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
2943 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
2944 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
2945 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01))))) {
2946 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
2947 POWERPC_EXCP_TRAP
, GETPC());
2954 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x
, uint64_t key
, uint32_t lane
)
2956 const uint16_t c
= 0xfffc;
2957 const uint64_t z0
= 0xfa2561cdf44ac398ULL
;
2958 uint16_t z
= 0, temp
;
2959 uint16_t k
[32], eff_k
[32], xleft
[33], xright
[33], fxleft
[32];
2961 for (int i
= 3; i
>= 0; i
--) {
2962 k
[i
] = key
& 0xffff;
2965 xleft
[0] = x
& 0xffff;
2966 xright
[0] = (x
>> 16) & 0xffff;
2968 for (int i
= 0; i
< 28; i
++) {
2969 z
= (z0
>> (63 - i
)) & 1;
2970 temp
= ror16(k
[i
+ 3], 3) ^ k
[i
+ 1];
2971 k
[i
+ 4] = c
^ z
^ k
[i
] ^ temp
^ ror16(temp
, 1);
2974 for (int i
= 0; i
< 8; i
++) {
2975 eff_k
[4 * i
+ 0] = k
[4 * i
+ ((0 + lane
) % 4)];
2976 eff_k
[4 * i
+ 1] = k
[4 * i
+ ((1 + lane
) % 4)];
2977 eff_k
[4 * i
+ 2] = k
[4 * i
+ ((2 + lane
) % 4)];
2978 eff_k
[4 * i
+ 3] = k
[4 * i
+ ((3 + lane
) % 4)];
2981 for (int i
= 0; i
< 32; i
++) {
2982 fxleft
[i
] = (rol16(xleft
[i
], 1) &
2983 rol16(xleft
[i
], 8)) ^ rol16(xleft
[i
], 2);
2984 xleft
[i
+ 1] = xright
[i
] ^ fxleft
[i
] ^ eff_k
[i
];
2985 xright
[i
+ 1] = xleft
[i
];
2988 return (((uint32_t)xright
[32]) << 16) | xleft
[32];
2991 static uint64_t hash_digest(uint64_t ra
, uint64_t rb
, uint64_t key
)
2993 uint64_t stage0_h
= 0ULL, stage0_l
= 0ULL;
2994 uint64_t stage1_h
, stage1_l
;
2996 for (int i
= 0; i
< 4; i
++) {
2997 stage0_h
|= ror64(rb
& 0xff, 8 * (2 * i
+ 1));
2998 stage0_h
|= ((ra
>> 32) & 0xff) << (8 * 2 * i
);
2999 stage0_l
|= ror64((rb
>> 32) & 0xff, 8 * (2 * i
+ 1));
3000 stage0_l
|= (ra
& 0xff) << (8 * 2 * i
);
3005 stage1_h
= (uint64_t)helper_SIMON_LIKE_32_64(stage0_h
>> 32, key
, 0) << 32;
3006 stage1_h
|= helper_SIMON_LIKE_32_64(stage0_h
, key
, 1);
3007 stage1_l
= (uint64_t)helper_SIMON_LIKE_32_64(stage0_l
>> 32, key
, 2) << 32;
3008 stage1_l
|= helper_SIMON_LIKE_32_64(stage0_l
, key
, 3);
3010 return stage1_h
^ stage1_l
;
3013 static void do_hash(CPUPPCState
*env
, target_ulong ea
, target_ulong ra
,
3014 target_ulong rb
, uint64_t key
, bool store
)
3016 uint64_t calculated_hash
= hash_digest(ra
, rb
, key
), loaded_hash
;
3019 cpu_stq_data_ra(env
, ea
, calculated_hash
, GETPC());
3021 loaded_hash
= cpu_ldq_data_ra(env
, ea
, GETPC());
3022 if (loaded_hash
!= calculated_hash
) {
3023 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
3024 POWERPC_EXCP_TRAP
, GETPC());
3029 #include "qemu/guest-random.h"
3032 #define HELPER_HASH(op, key, store, dexcr_aspect) \
3033 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
3036 if (env->msr & R_MSR_PR_MASK) { \
3037 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
3038 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
3040 } else if (!(env->msr & R_MSR_HV_MASK)) { \
3041 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
3042 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
3044 } else if (!(env->msr & R_MSR_S_MASK)) { \
3045 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
3049 do_hash(env, ea, ra, rb, key, store); \
3052 #define HELPER_HASH(op, key, store, dexcr_aspect) \
3053 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
3056 do_hash(env, ea, ra, rb, key, store); \
3058 #endif /* TARGET_PPC64 */
3060 HELPER_HASH(HASHST
, env
->spr
[SPR_HASHKEYR
], true, NPHIE
)
3061 HELPER_HASH(HASHCHK
, env
->spr
[SPR_HASHKEYR
], false, NPHIE
)
3062 HELPER_HASH(HASHSTP
, env
->spr
[SPR_HASHPKEYR
], true, PHIE
)
3063 HELPER_HASH(HASHCHKP
, env
->spr
[SPR_HASHPKEYR
], false, PHIE
)
3064 #endif /* CONFIG_TCG */
3066 #if !defined(CONFIG_USER_ONLY)
3070 /* Embedded.Processor Control */
3071 static int dbell2irq(target_ulong rb
)
3073 int msg
= rb
& DBELL_TYPE_MASK
;
3077 case DBELL_TYPE_DBELL
:
3078 irq
= PPC_INTERRUPT_DOORBELL
;
3080 case DBELL_TYPE_DBELL_CRIT
:
3081 irq
= PPC_INTERRUPT_CDOORBELL
;
3083 case DBELL_TYPE_G_DBELL
:
3084 case DBELL_TYPE_G_DBELL_CRIT
:
3085 case DBELL_TYPE_G_DBELL_MC
:
3094 void helper_msgclr(CPUPPCState
*env
, target_ulong rb
)
3096 int irq
= dbell2irq(rb
);
3102 ppc_set_irq(env_archcpu(env
), irq
, 0);
3105 void helper_msgsnd(target_ulong rb
)
3107 int irq
= dbell2irq(rb
);
3108 int pir
= rb
& DBELL_PIRTAG_MASK
;
3115 qemu_mutex_lock_iothread();
3117 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3118 CPUPPCState
*cenv
= &cpu
->env
;
3120 if ((rb
& DBELL_BRDCAST
) || (cenv
->spr
[SPR_BOOKE_PIR
] == pir
)) {
3121 ppc_set_irq(cpu
, irq
, 1);
3124 qemu_mutex_unlock_iothread();
3127 /* Server Processor Control */
3129 static bool dbell_type_server(target_ulong rb
)
3132 * A Directed Hypervisor Doorbell message is sent only if the
3133 * message type is 5. All other types are reserved and the
3134 * instruction is a no-op
3136 return (rb
& DBELL_TYPE_MASK
) == DBELL_TYPE_DBELL_SERVER
;
3139 void helper_book3s_msgclr(CPUPPCState
*env
, target_ulong rb
)
3141 if (!dbell_type_server(rb
)) {
3145 ppc_set_irq(env_archcpu(env
), PPC_INTERRUPT_HDOORBELL
, 0);
3148 static void book3s_msgsnd_common(int pir
, int irq
)
3152 qemu_mutex_lock_iothread();
3154 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3155 CPUPPCState
*cenv
= &cpu
->env
;
3157 /* TODO: broadcast message to all threads of the same processor */
3158 if (cenv
->spr_cb
[SPR_PIR
].default_value
== pir
) {
3159 ppc_set_irq(cpu
, irq
, 1);
3162 qemu_mutex_unlock_iothread();
3165 void helper_book3s_msgsnd(target_ulong rb
)
3167 int pir
= rb
& DBELL_PROCIDTAG_MASK
;
3169 if (!dbell_type_server(rb
)) {
3173 book3s_msgsnd_common(pir
, PPC_INTERRUPT_HDOORBELL
);
3176 #if defined(TARGET_PPC64)
3177 void helper_book3s_msgclrp(CPUPPCState
*env
, target_ulong rb
)
3179 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgclrp", HFSCR_IC_MSGP
);
3181 if (!dbell_type_server(rb
)) {
3185 ppc_set_irq(env_archcpu(env
), PPC_INTERRUPT_DOORBELL
, 0);
3189 * sends a message to another thread on the same
3190 * multi-threaded processor
3192 void helper_book3s_msgsndp(CPUPPCState
*env
, target_ulong rb
)
3194 CPUState
*cs
= env_cpu(env
);
3195 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3197 uint32_t nr_threads
= cs
->nr_threads
;
3198 int ttir
= rb
& PPC_BITMASK(57, 63);
3200 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgsndp", HFSCR_IC_MSGP
);
3202 if (!dbell_type_server(rb
) || ttir
>= nr_threads
) {
3206 if (nr_threads
== 1) {
3207 ppc_set_irq(cpu
, PPC_INTERRUPT_DOORBELL
, 1);
3211 /* Does iothread need to be locked for walking CPU list? */
3212 qemu_mutex_lock_iothread();
3213 THREAD_SIBLING_FOREACH(cs
, ccs
) {
3214 PowerPCCPU
*ccpu
= POWERPC_CPU(ccs
);
3215 uint32_t thread_id
= ppc_cpu_tir(ccpu
);
3217 if (ttir
== thread_id
) {
3218 ppc_set_irq(ccpu
, PPC_INTERRUPT_DOORBELL
, 1);
3219 qemu_mutex_unlock_iothread();
3224 g_assert_not_reached();
3226 #endif /* TARGET_PPC64 */
3228 void ppc_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
3229 MMUAccessType access_type
,
3230 int mmu_idx
, uintptr_t retaddr
)
3232 CPUPPCState
*env
= cs
->env_ptr
;
3235 /* Restore state and reload the insn we executed, for filling in DSISR. */
3236 cpu_restore_state(cs
, retaddr
);
3237 insn
= ppc_ldl_code(env
, env
->nip
);
3239 switch (env
->mmu_model
) {
3240 case POWERPC_MMU_SOFT_4xx
:
3241 env
->spr
[SPR_40x_DEAR
] = vaddr
;
3243 case POWERPC_MMU_BOOKE
:
3244 case POWERPC_MMU_BOOKE206
:
3245 env
->spr
[SPR_BOOKE_DEAR
] = vaddr
;
3248 env
->spr
[SPR_DAR
] = vaddr
;
3252 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
3253 env
->error_code
= insn
& 0x03FF0000;
3256 #endif /* CONFIG_TCG */
3257 #endif /* !CONFIG_USER_ONLY */