Merge tag 'pull-request-2024-06-12' of https://gitlab.com/thuth/qemu into staging
[qemu/kevin.git] / target / ppc / excp_helper.c
blob0cd542675ffc9f71127ce1925bf3297f9d66869b
1 /*
2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/log.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/runstate.h"
24 #include "cpu.h"
25 #include "exec/exec-all.h"
26 #include "internal.h"
27 #include "helper_regs.h"
28 #include "hw/ppc/ppc.h"
30 #include "trace.h"
32 #ifdef CONFIG_TCG
33 #include "sysemu/tcg.h"
34 #include "exec/helper-proto.h"
35 #include "exec/cpu_ldst.h"
36 #endif
38 /*****************************************************************************/
39 /* Exception processing */
40 #ifndef CONFIG_USER_ONLY
42 static const char *powerpc_excp_name(int excp)
44 switch (excp) {
45 case POWERPC_EXCP_CRITICAL: return "CRITICAL";
46 case POWERPC_EXCP_MCHECK: return "MCHECK";
47 case POWERPC_EXCP_DSI: return "DSI";
48 case POWERPC_EXCP_ISI: return "ISI";
49 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
50 case POWERPC_EXCP_ALIGN: return "ALIGN";
51 case POWERPC_EXCP_PROGRAM: return "PROGRAM";
52 case POWERPC_EXCP_FPU: return "FPU";
53 case POWERPC_EXCP_SYSCALL: return "SYSCALL";
54 case POWERPC_EXCP_APU: return "APU";
55 case POWERPC_EXCP_DECR: return "DECR";
56 case POWERPC_EXCP_FIT: return "FIT";
57 case POWERPC_EXCP_WDT: return "WDT";
58 case POWERPC_EXCP_DTLB: return "DTLB";
59 case POWERPC_EXCP_ITLB: return "ITLB";
60 case POWERPC_EXCP_DEBUG: return "DEBUG";
61 case POWERPC_EXCP_SPEU: return "SPEU";
62 case POWERPC_EXCP_EFPDI: return "EFPDI";
63 case POWERPC_EXCP_EFPRI: return "EFPRI";
64 case POWERPC_EXCP_EPERFM: return "EPERFM";
65 case POWERPC_EXCP_DOORI: return "DOORI";
66 case POWERPC_EXCP_DOORCI: return "DOORCI";
67 case POWERPC_EXCP_GDOORI: return "GDOORI";
68 case POWERPC_EXCP_GDOORCI: return "GDOORCI";
69 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
70 case POWERPC_EXCP_RESET: return "RESET";
71 case POWERPC_EXCP_DSEG: return "DSEG";
72 case POWERPC_EXCP_ISEG: return "ISEG";
73 case POWERPC_EXCP_HDECR: return "HDECR";
74 case POWERPC_EXCP_TRACE: return "TRACE";
75 case POWERPC_EXCP_HDSI: return "HDSI";
76 case POWERPC_EXCP_HISI: return "HISI";
77 case POWERPC_EXCP_HDSEG: return "HDSEG";
78 case POWERPC_EXCP_HISEG: return "HISEG";
79 case POWERPC_EXCP_VPU: return "VPU";
80 case POWERPC_EXCP_PIT: return "PIT";
81 case POWERPC_EXCP_EMUL: return "EMUL";
82 case POWERPC_EXCP_IFTLB: return "IFTLB";
83 case POWERPC_EXCP_DLTLB: return "DLTLB";
84 case POWERPC_EXCP_DSTLB: return "DSTLB";
85 case POWERPC_EXCP_FPA: return "FPA";
86 case POWERPC_EXCP_DABR: return "DABR";
87 case POWERPC_EXCP_IABR: return "IABR";
88 case POWERPC_EXCP_SMI: return "SMI";
89 case POWERPC_EXCP_PERFM: return "PERFM";
90 case POWERPC_EXCP_THERM: return "THERM";
91 case POWERPC_EXCP_VPUA: return "VPUA";
92 case POWERPC_EXCP_SOFTP: return "SOFTP";
93 case POWERPC_EXCP_MAINT: return "MAINT";
94 case POWERPC_EXCP_MEXTBR: return "MEXTBR";
95 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
96 case POWERPC_EXCP_ITLBE: return "ITLBE";
97 case POWERPC_EXCP_DTLBE: return "DTLBE";
98 case POWERPC_EXCP_VSXU: return "VSXU";
99 case POWERPC_EXCP_FU: return "FU";
100 case POWERPC_EXCP_HV_EMU: return "HV_EMU";
101 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
102 case POWERPC_EXCP_HV_FU: return "HV_FU";
103 case POWERPC_EXCP_SDOOR: return "SDOOR";
104 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
105 case POWERPC_EXCP_HVIRT: return "HVIRT";
106 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
107 default:
108 g_assert_not_reached();
112 static void dump_syscall(CPUPPCState *env)
114 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
115 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
116 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
117 " nip=" TARGET_FMT_lx "\n",
118 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
119 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
120 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
121 ppc_dump_gpr(env, 8), env->nip);
124 static void dump_hcall(CPUPPCState *env)
126 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
127 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
128 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
129 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
130 " nip=" TARGET_FMT_lx "\n",
131 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
132 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
133 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
134 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
135 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
136 env->nip);
139 #ifdef CONFIG_TCG
140 /* Return true iff byteswap is needed to load instruction */
141 static inline bool insn_need_byteswap(CPUArchState *env)
143 /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
144 return !!(env->msr & ((target_ulong)1 << MSR_LE));
147 static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
149 uint32_t insn = cpu_ldl_code(env, addr);
151 if (insn_need_byteswap(env)) {
152 insn = bswap32(insn);
155 return insn;
158 #endif
160 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
162 const char *es;
163 target_ulong *miss, *cmp;
164 int en;
166 if (!qemu_loglevel_mask(CPU_LOG_MMU)) {
167 return;
170 if (excp == POWERPC_EXCP_IFTLB) {
171 es = "I";
172 en = 'I';
173 miss = &env->spr[SPR_IMISS];
174 cmp = &env->spr[SPR_ICMP];
175 } else {
176 if (excp == POWERPC_EXCP_DLTLB) {
177 es = "DL";
178 } else {
179 es = "DS";
181 en = 'D';
182 miss = &env->spr[SPR_DMISS];
183 cmp = &env->spr[SPR_DCMP];
185 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
186 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
187 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
188 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
189 env->error_code);
192 #ifdef TARGET_PPC64
193 static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr)
195 /* We no longer are in a PM state */
196 env->resume_as_sreset = false;
198 /* Pretend to be returning from doze always as we don't lose state */
199 *msr |= SRR1_WS_NOLOSS;
201 /* Machine checks are sent normally */
202 if (excp == POWERPC_EXCP_MCHECK) {
203 return excp;
205 switch (excp) {
206 case POWERPC_EXCP_RESET:
207 *msr |= SRR1_WAKERESET;
208 break;
209 case POWERPC_EXCP_EXTERNAL:
210 *msr |= SRR1_WAKEEE;
211 break;
212 case POWERPC_EXCP_DECR:
213 *msr |= SRR1_WAKEDEC;
214 break;
215 case POWERPC_EXCP_SDOOR:
216 *msr |= SRR1_WAKEDBELL;
217 break;
218 case POWERPC_EXCP_SDOOR_HV:
219 *msr |= SRR1_WAKEHDBELL;
220 break;
221 case POWERPC_EXCP_HV_MAINT:
222 *msr |= SRR1_WAKEHMI;
223 break;
224 case POWERPC_EXCP_HVIRT:
225 *msr |= SRR1_WAKEHVI;
226 break;
227 default:
228 cpu_abort(env_cpu(env),
229 "Unsupported exception %d in Power Save mode\n", excp);
231 return POWERPC_EXCP_RESET;
235 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
236 * taken with the MMU on, and which uses an alternate location (e.g., so the
237 * kernel/hv can map the vectors there with an effective address).
239 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
240 * are delivered in this way. AIL requires the LPCR to be set to enable this
241 * mode, and then a number of conditions have to be true for AIL to apply.
243 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
244 * they specifically want to be in real mode (e.g., the MCE might be signaling
245 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
247 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
248 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
249 * radix mode (LPCR[HR]).
251 * POWER8, POWER9 with LPCR[HR]=0
252 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
253 * +-----------+-------------+---------+-------------+-----+
254 * | a | 00/01/10 | x | x | 0 |
255 * | a | 11 | 0 | 1 | 0 |
256 * | a | 11 | 1 | 1 | a |
257 * | a | 11 | 0 | 0 | a |
258 * +-------------------------------------------------------+
260 * POWER9 with LPCR[HR]=1
261 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
262 * +-----------+-------------+---------+-------------+-----+
263 * | a | 00/01/10 | x | x | 0 |
264 * | a | 11 | x | x | a |
265 * +-------------------------------------------------------+
267 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
268 * the hypervisor in AIL mode if the guest is radix. This is good for
269 * performance but allows the guest to influence the AIL of hypervisor
270 * interrupts using its MSR, and also the hypervisor must disallow guest
271 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
272 * use AIL for its MSR[HV] 0->1 interrupts.
274 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
275 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
276 * MSR[HV] 1->1).
278 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
280 * POWER10 behaviour is
281 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
282 * +-----------+------------+-------------+---------+-------------+-----+
283 * | a | h | 00/01/10 | 0 | 0 | 0 |
284 * | a | h | 11 | 0 | 0 | a |
285 * | a | h | x | 0 | 1 | h |
286 * | a | h | 00/01/10 | 1 | 1 | 0 |
287 * | a | h | 11 | 1 | 1 | h |
288 * +--------------------------------------------------------------------+
290 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
291 target_ulong *new_msr, target_ulong *vector)
293 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
294 CPUPPCState *env = &cpu->env;
295 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
296 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
297 int ail = 0;
299 if (excp == POWERPC_EXCP_MCHECK ||
300 excp == POWERPC_EXCP_RESET ||
301 excp == POWERPC_EXCP_HV_MAINT) {
302 /* SRESET, MCE, HMI never apply AIL */
303 return;
306 if (!(pcc->lpcr_mask & LPCR_AIL)) {
307 /* This CPU does not have AIL */
308 return;
311 /* P8 & P9 */
312 if (!(pcc->lpcr_mask & LPCR_HAIL)) {
313 if (!mmu_all_on) {
314 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
315 return;
317 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
319 * AIL does not work if there is a MSR[HV] 0->1 transition and the
320 * partition is in HPT mode. For radix guests, such interrupts are
321 * allowed to be delivered to the hypervisor in ail mode.
323 return;
326 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
327 if (ail == 0) {
328 return;
330 if (ail == 1) {
331 /* AIL=1 is reserved, treat it like AIL=0 */
332 return;
335 /* P10 and up */
336 } else {
337 if (!mmu_all_on && !hv_escalation) {
339 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
340 * Guest->guest and HV->HV interrupts do require MMU on.
342 return;
345 if (*new_msr & MSR_HVB) {
346 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
347 /* HV interrupts depend on LPCR[HAIL] */
348 return;
350 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
351 } else {
352 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
354 if (ail == 0) {
355 return;
357 if (ail == 1 || ail == 2) {
358 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
359 return;
364 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
365 * to the new IP.
367 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
369 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
370 if (ail == 2) {
371 *vector |= 0x0000000000018000ull;
372 } else if (ail == 3) {
373 *vector |= 0xc000000000004000ull;
375 } else {
377 * scv AIL is a little different. AIL=2 does not change the address,
378 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
380 if (ail == 3) {
381 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
382 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
386 #endif /* TARGET_PPC64 */
388 static void powerpc_reset_excp_state(PowerPCCPU *cpu)
390 CPUState *cs = CPU(cpu);
391 CPUPPCState *env = &cpu->env;
393 /* Reset exception state */
394 cs->exception_index = POWERPC_EXCP_NONE;
395 env->error_code = 0;
398 static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
399 target_ulong msr)
401 CPUPPCState *env = &cpu->env;
403 assert((msr & env->msr_mask) == msr);
406 * We don't use hreg_store_msr here as already have treated any
407 * special case that could occur. Just store MSR and update hflags
409 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will
410 * prevent setting of the HV bit which some exceptions might need to do.
412 env->nip = vector;
413 env->msr = msr;
414 hreg_compute_hflags(env);
415 ppc_maybe_interrupt(env);
417 powerpc_reset_excp_state(cpu);
420 * Any interrupt is context synchronizing, check if TCG TLB needs
421 * a delayed flush on ppc64
423 check_tlb_flush(env, false);
425 /* Reset the reservation */
426 env->reserve_addr = -1;
429 #ifdef CONFIG_TCG
431 * This stops the machine and logs CPU state without killing QEMU (like
432 * cpu_abort()) because it is often a guest error as opposed to a QEMU error,
433 * so the machine can still be debugged.
435 static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason)
437 CPUState *cs = env_cpu(env);
438 FILE *f;
440 f = qemu_log_trylock();
441 if (f) {
442 fprintf(f, "Entering checkstop state: %s\n", reason);
443 cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP);
444 qemu_log_unlock(f);
448 * This stops the machine and logs CPU state without killing QEMU
449 * (like cpu_abort()) so the machine can still be debugged (because
450 * it is often a guest error).
452 qemu_system_guest_panicked(NULL);
453 cpu_loop_exit_noexc(cs);
456 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
457 void helper_attn(CPUPPCState *env)
459 /* POWER attn is unprivileged when enabled by HID, otherwise illegal */
460 if ((*env->check_attn)(env)) {
461 powerpc_checkstop(env, "host executed attn");
462 } else {
463 raise_exception_err(env, POWERPC_EXCP_HV_EMU,
464 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
467 #endif
468 #endif /* CONFIG_TCG */
470 static void powerpc_mcheck_checkstop(CPUPPCState *env)
472 /* KVM guests always have MSR[ME] enabled */
473 #ifdef CONFIG_TCG
474 if (FIELD_EX64(env->msr, MSR, ME)) {
475 return;
478 powerpc_checkstop(env, "machine check with MSR[ME]=0");
479 #endif
482 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
484 CPUPPCState *env = &cpu->env;
485 target_ulong msr, new_msr, vector;
486 int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
488 /* new srr1 value excluding must-be-zero bits */
489 msr = env->msr & ~0x783f0000ULL;
491 /* new interrupt handler msr preserves ME unless explicitly overridden */
492 new_msr = env->msr & (((target_ulong)1 << MSR_ME));
494 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
495 if (excp == POWERPC_EXCP_HV_EMU) {
496 excp = POWERPC_EXCP_PROGRAM;
499 vector = env->excp_vectors[excp];
500 if (vector == (target_ulong)-1ULL) {
501 cpu_abort(env_cpu(env),
502 "Raised an exception without defined vector %d\n", excp);
504 vector |= env->excp_prefix;
506 switch (excp) {
507 case POWERPC_EXCP_CRITICAL: /* Critical input */
508 srr0 = SPR_40x_SRR2;
509 srr1 = SPR_40x_SRR3;
510 break;
511 case POWERPC_EXCP_MCHECK: /* Machine check exception */
512 powerpc_mcheck_checkstop(env);
513 /* machine check exceptions don't have ME set */
514 new_msr &= ~((target_ulong)1 << MSR_ME);
515 srr0 = SPR_40x_SRR2;
516 srr1 = SPR_40x_SRR3;
517 break;
518 case POWERPC_EXCP_DSI: /* Data storage exception */
519 trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
520 break;
521 case POWERPC_EXCP_ISI: /* Instruction storage exception */
522 trace_ppc_excp_isi(msr, env->nip);
523 break;
524 case POWERPC_EXCP_EXTERNAL: /* External input */
525 break;
526 case POWERPC_EXCP_ALIGN: /* Alignment exception */
527 break;
528 case POWERPC_EXCP_PROGRAM: /* Program exception */
529 switch (env->error_code & ~0xF) {
530 case POWERPC_EXCP_FP:
531 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
532 trace_ppc_excp_fp_ignore();
533 powerpc_reset_excp_state(cpu);
534 return;
536 env->spr[SPR_40x_ESR] = ESR_FP;
537 break;
538 case POWERPC_EXCP_INVAL:
539 trace_ppc_excp_inval(env->nip);
540 env->spr[SPR_40x_ESR] = ESR_PIL;
541 break;
542 case POWERPC_EXCP_PRIV:
543 env->spr[SPR_40x_ESR] = ESR_PPR;
544 break;
545 case POWERPC_EXCP_TRAP:
546 env->spr[SPR_40x_ESR] = ESR_PTR;
547 break;
548 default:
549 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
550 env->error_code);
551 break;
553 break;
554 case POWERPC_EXCP_SYSCALL: /* System call exception */
555 dump_syscall(env);
558 * We need to correct the NIP which in this case is supposed
559 * to point to the next instruction
561 env->nip += 4;
562 break;
563 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
564 trace_ppc_excp_print("FIT");
565 break;
566 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
567 trace_ppc_excp_print("WDT");
568 break;
569 case POWERPC_EXCP_DTLB: /* Data TLB error */
570 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
571 break;
572 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
573 trace_ppc_excp_print("PIT");
574 break;
575 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
576 cpu_abort(env_cpu(env), "%s exception not implemented\n",
577 powerpc_excp_name(excp));
578 break;
579 default:
580 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
581 excp);
582 break;
585 env->spr[srr0] = env->nip;
586 env->spr[srr1] = msr;
587 powerpc_set_excp_state(cpu, vector, new_msr);
590 static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
592 CPUPPCState *env = &cpu->env;
593 target_ulong msr, new_msr, vector;
595 /* new srr1 value excluding must-be-zero bits */
596 msr = env->msr & ~0x783f0000ULL;
598 /* new interrupt handler msr preserves ME unless explicitly overridden */
599 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
601 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
602 if (excp == POWERPC_EXCP_HV_EMU) {
603 excp = POWERPC_EXCP_PROGRAM;
606 vector = env->excp_vectors[excp];
607 if (vector == (target_ulong)-1ULL) {
608 cpu_abort(env_cpu(env),
609 "Raised an exception without defined vector %d\n", excp);
611 vector |= env->excp_prefix;
613 switch (excp) {
614 case POWERPC_EXCP_CRITICAL: /* Critical input */
615 break;
616 case POWERPC_EXCP_MCHECK: /* Machine check exception */
617 powerpc_mcheck_checkstop(env);
618 /* machine check exceptions don't have ME set */
619 new_msr &= ~((target_ulong)1 << MSR_ME);
620 break;
621 case POWERPC_EXCP_DSI: /* Data storage exception */
622 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
623 break;
624 case POWERPC_EXCP_ISI: /* Instruction storage exception */
625 trace_ppc_excp_isi(msr, env->nip);
626 msr |= env->error_code;
627 break;
628 case POWERPC_EXCP_EXTERNAL: /* External input */
629 break;
630 case POWERPC_EXCP_ALIGN: /* Alignment exception */
631 /* Get rS/rD and rA from faulting opcode */
633 * Note: the opcode fields will not be set properly for a
634 * direct store load/store, but nobody cares as nobody
635 * actually uses direct store segments.
637 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
638 break;
639 case POWERPC_EXCP_PROGRAM: /* Program exception */
640 switch (env->error_code & ~0xF) {
641 case POWERPC_EXCP_FP:
642 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
643 trace_ppc_excp_fp_ignore();
644 powerpc_reset_excp_state(cpu);
645 return;
648 * NIP always points to the faulting instruction for FP exceptions,
649 * so always use store_next and claim we are precise in the MSR.
651 msr |= 0x00100000;
652 break;
653 case POWERPC_EXCP_INVAL:
654 trace_ppc_excp_inval(env->nip);
655 msr |= 0x00080000;
656 break;
657 case POWERPC_EXCP_PRIV:
658 msr |= 0x00040000;
659 break;
660 case POWERPC_EXCP_TRAP:
661 msr |= 0x00020000;
662 break;
663 default:
664 /* Should never occur */
665 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
666 env->error_code);
667 break;
669 break;
670 case POWERPC_EXCP_SYSCALL: /* System call exception */
671 dump_syscall(env);
674 * We need to correct the NIP which in this case is supposed
675 * to point to the next instruction
677 env->nip += 4;
678 break;
679 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
680 case POWERPC_EXCP_DECR: /* Decrementer exception */
681 break;
682 case POWERPC_EXCP_DTLB: /* Data TLB error */
683 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
684 break;
685 case POWERPC_EXCP_RESET: /* System reset exception */
686 if (FIELD_EX64(env->msr, MSR, POW)) {
687 cpu_abort(env_cpu(env),
688 "Trying to deliver power-saving system reset exception "
689 "%d with no HV support\n", excp);
691 break;
692 case POWERPC_EXCP_TRACE: /* Trace exception */
693 break;
694 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
695 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
696 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
697 /* Swap temporary saved registers with GPRs */
698 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
699 new_msr |= (target_ulong)1 << MSR_TGPR;
700 hreg_swap_gpr_tgpr(env);
703 ppc_excp_debug_sw_tlb(env, excp);
705 msr |= env->crf[0] << 28;
706 msr |= env->error_code; /* key, D/I, S/L bits */
707 /* Set way using a LRU mechanism */
708 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
709 break;
710 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
711 case POWERPC_EXCP_DABR: /* Data address breakpoint */
712 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
713 case POWERPC_EXCP_SMI: /* System management interrupt */
714 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
715 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
716 cpu_abort(env_cpu(env), "%s exception not implemented\n",
717 powerpc_excp_name(excp));
718 break;
719 default:
720 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
721 excp);
722 break;
725 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
726 new_msr |= (target_ulong)1 << MSR_LE;
728 env->spr[SPR_SRR0] = env->nip;
729 env->spr[SPR_SRR1] = msr;
730 powerpc_set_excp_state(cpu, vector, new_msr);
733 static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
735 CPUPPCState *env = &cpu->env;
736 target_ulong msr, new_msr, vector;
738 /* new srr1 value excluding must-be-zero bits */
739 msr = env->msr & ~0x783f0000ULL;
741 /* new interrupt handler msr preserves ME unless explicitly overridden */
742 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
744 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
745 if (excp == POWERPC_EXCP_HV_EMU) {
746 excp = POWERPC_EXCP_PROGRAM;
749 vector = env->excp_vectors[excp];
750 if (vector == (target_ulong)-1ULL) {
751 cpu_abort(env_cpu(env),
752 "Raised an exception without defined vector %d\n", excp);
754 vector |= env->excp_prefix;
756 switch (excp) {
757 case POWERPC_EXCP_MCHECK: /* Machine check exception */
758 powerpc_mcheck_checkstop(env);
759 /* machine check exceptions don't have ME set */
760 new_msr &= ~((target_ulong)1 << MSR_ME);
761 break;
762 case POWERPC_EXCP_DSI: /* Data storage exception */
763 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
764 break;
765 case POWERPC_EXCP_ISI: /* Instruction storage exception */
766 trace_ppc_excp_isi(msr, env->nip);
767 msr |= env->error_code;
768 break;
769 case POWERPC_EXCP_EXTERNAL: /* External input */
770 break;
771 case POWERPC_EXCP_ALIGN: /* Alignment exception */
772 /* Get rS/rD and rA from faulting opcode */
774 * Note: the opcode fields will not be set properly for a
775 * direct store load/store, but nobody cares as nobody
776 * actually uses direct store segments.
778 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
779 break;
780 case POWERPC_EXCP_PROGRAM: /* Program exception */
781 switch (env->error_code & ~0xF) {
782 case POWERPC_EXCP_FP:
783 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
784 trace_ppc_excp_fp_ignore();
785 powerpc_reset_excp_state(cpu);
786 return;
789 * NIP always points to the faulting instruction for FP exceptions,
790 * so always use store_next and claim we are precise in the MSR.
792 msr |= 0x00100000;
793 break;
794 case POWERPC_EXCP_INVAL:
795 trace_ppc_excp_inval(env->nip);
796 msr |= 0x00080000;
797 break;
798 case POWERPC_EXCP_PRIV:
799 msr |= 0x00040000;
800 break;
801 case POWERPC_EXCP_TRAP:
802 msr |= 0x00020000;
803 break;
804 default:
805 /* Should never occur */
806 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
807 env->error_code);
808 break;
810 break;
811 case POWERPC_EXCP_SYSCALL: /* System call exception */
813 int lev = env->error_code;
815 if (lev == 1 && cpu->vhyp) {
816 dump_hcall(env);
817 } else {
818 dump_syscall(env);
822 * We need to correct the NIP which in this case is supposed
823 * to point to the next instruction
825 env->nip += 4;
828 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
829 * instruction to communicate with QEMU. The pegasos2 machine
830 * uses VOF and the 7xx CPUs, so although the 7xx don't have
831 * HV mode, we need to keep hypercall support.
833 if (lev == 1 && cpu->vhyp) {
834 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
835 powerpc_reset_excp_state(cpu);
836 return;
839 break;
841 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
842 case POWERPC_EXCP_DECR: /* Decrementer exception */
843 break;
844 case POWERPC_EXCP_RESET: /* System reset exception */
845 if (FIELD_EX64(env->msr, MSR, POW)) {
846 cpu_abort(env_cpu(env),
847 "Trying to deliver power-saving system reset exception "
848 "%d with no HV support\n", excp);
850 break;
851 case POWERPC_EXCP_TRACE: /* Trace exception */
852 break;
853 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
854 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
855 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
856 ppc_excp_debug_sw_tlb(env, excp);
857 msr |= env->crf[0] << 28;
858 msr |= env->error_code; /* key, D/I, S/L bits */
859 /* Set way using a LRU mechanism */
860 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
861 break;
862 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
863 case POWERPC_EXCP_SMI: /* System management interrupt */
864 case POWERPC_EXCP_THERM: /* Thermal interrupt */
865 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
866 cpu_abort(env_cpu(env), "%s exception not implemented\n",
867 powerpc_excp_name(excp));
868 break;
869 default:
870 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
871 excp);
872 break;
875 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
876 new_msr |= (target_ulong)1 << MSR_LE;
878 env->spr[SPR_SRR0] = env->nip;
879 env->spr[SPR_SRR1] = msr;
880 powerpc_set_excp_state(cpu, vector, new_msr);
883 static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
885 CPUPPCState *env = &cpu->env;
886 target_ulong msr, new_msr, vector;
888 /* new srr1 value excluding must-be-zero bits */
889 msr = env->msr & ~0x783f0000ULL;
891 /* new interrupt handler msr preserves ME unless explicitly overridden */
892 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
894 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
895 if (excp == POWERPC_EXCP_HV_EMU) {
896 excp = POWERPC_EXCP_PROGRAM;
899 vector = env->excp_vectors[excp];
900 if (vector == (target_ulong)-1ULL) {
901 cpu_abort(env_cpu(env),
902 "Raised an exception without defined vector %d\n", excp);
904 vector |= env->excp_prefix;
906 switch (excp) {
907 case POWERPC_EXCP_MCHECK: /* Machine check exception */
908 powerpc_mcheck_checkstop(env);
909 /* machine check exceptions don't have ME set */
910 new_msr &= ~((target_ulong)1 << MSR_ME);
911 break;
912 case POWERPC_EXCP_DSI: /* Data storage exception */
913 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
914 break;
915 case POWERPC_EXCP_ISI: /* Instruction storage exception */
916 trace_ppc_excp_isi(msr, env->nip);
917 msr |= env->error_code;
918 break;
919 case POWERPC_EXCP_EXTERNAL: /* External input */
920 break;
921 case POWERPC_EXCP_ALIGN: /* Alignment exception */
922 /* Get rS/rD and rA from faulting opcode */
924 * Note: the opcode fields will not be set properly for a
925 * direct store load/store, but nobody cares as nobody
926 * actually uses direct store segments.
928 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
929 break;
930 case POWERPC_EXCP_PROGRAM: /* Program exception */
931 switch (env->error_code & ~0xF) {
932 case POWERPC_EXCP_FP:
933 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
934 trace_ppc_excp_fp_ignore();
935 powerpc_reset_excp_state(cpu);
936 return;
939 * NIP always points to the faulting instruction for FP exceptions,
940 * so always use store_next and claim we are precise in the MSR.
942 msr |= 0x00100000;
943 break;
944 case POWERPC_EXCP_INVAL:
945 trace_ppc_excp_inval(env->nip);
946 msr |= 0x00080000;
947 break;
948 case POWERPC_EXCP_PRIV:
949 msr |= 0x00040000;
950 break;
951 case POWERPC_EXCP_TRAP:
952 msr |= 0x00020000;
953 break;
954 default:
955 /* Should never occur */
956 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
957 env->error_code);
958 break;
960 break;
961 case POWERPC_EXCP_SYSCALL: /* System call exception */
963 int lev = env->error_code;
965 if (lev == 1 && cpu->vhyp) {
966 dump_hcall(env);
967 } else {
968 dump_syscall(env);
972 * We need to correct the NIP which in this case is supposed
973 * to point to the next instruction
975 env->nip += 4;
978 * The Virtual Open Firmware (VOF) relies on the 'sc 1'
979 * instruction to communicate with QEMU. The pegasos2 machine
980 * uses VOF and the 74xx CPUs, so although the 74xx don't have
981 * HV mode, we need to keep hypercall support.
983 if (lev == 1 && cpu->vhyp) {
984 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
985 powerpc_reset_excp_state(cpu);
986 return;
989 break;
991 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
992 case POWERPC_EXCP_DECR: /* Decrementer exception */
993 break;
994 case POWERPC_EXCP_RESET: /* System reset exception */
995 if (FIELD_EX64(env->msr, MSR, POW)) {
996 cpu_abort(env_cpu(env),
997 "Trying to deliver power-saving system reset "
998 "exception %d with no HV support\n", excp);
1000 break;
1001 case POWERPC_EXCP_TRACE: /* Trace exception */
1002 break;
1003 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1004 break;
1005 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1006 case POWERPC_EXCP_SMI: /* System management interrupt */
1007 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1008 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
1009 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1010 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1011 powerpc_excp_name(excp));
1012 break;
1013 default:
1014 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1015 excp);
1016 break;
1019 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1020 new_msr |= (target_ulong)1 << MSR_LE;
1022 env->spr[SPR_SRR0] = env->nip;
1023 env->spr[SPR_SRR1] = msr;
1024 powerpc_set_excp_state(cpu, vector, new_msr);
1027 static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
1029 CPUPPCState *env = &cpu->env;
1030 target_ulong msr, new_msr, vector;
1031 int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
1034 * Book E does not play games with certain bits of xSRR1 being MSR save
1035 * bits and others being error status. xSRR1 is the old MSR, period.
1037 msr = env->msr;
1039 /* new interrupt handler msr preserves ME unless explicitly overridden */
1040 new_msr = env->msr & ((target_ulong)1 << MSR_ME);
1042 /* HV emu assistance interrupt only exists on server arch 2.05 or later */
1043 if (excp == POWERPC_EXCP_HV_EMU) {
1044 excp = POWERPC_EXCP_PROGRAM;
1047 #ifdef TARGET_PPC64
1049 * SPEU and VPU share the same IVOR but they exist in different
1050 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
1052 if (excp == POWERPC_EXCP_VPU) {
1053 excp = POWERPC_EXCP_SPEU;
1055 #endif
1057 vector = env->excp_vectors[excp];
1058 if (vector == (target_ulong)-1ULL) {
1059 cpu_abort(env_cpu(env),
1060 "Raised an exception without defined vector %d\n", excp);
1062 vector |= env->excp_prefix;
1064 switch (excp) {
1065 case POWERPC_EXCP_CRITICAL: /* Critical input */
1066 srr0 = SPR_BOOKE_CSRR0;
1067 srr1 = SPR_BOOKE_CSRR1;
1068 break;
1069 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1070 powerpc_mcheck_checkstop(env);
1071 /* machine check exceptions don't have ME set */
1072 new_msr &= ~((target_ulong)1 << MSR_ME);
1074 /* FIXME: choose one or the other based on CPU type */
1075 srr0 = SPR_BOOKE_MCSRR0;
1076 srr1 = SPR_BOOKE_MCSRR1;
1078 env->spr[SPR_BOOKE_CSRR0] = env->nip;
1079 env->spr[SPR_BOOKE_CSRR1] = msr;
1081 break;
1082 case POWERPC_EXCP_DSI: /* Data storage exception */
1083 trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]);
1084 break;
1085 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1086 trace_ppc_excp_isi(msr, env->nip);
1087 break;
1088 case POWERPC_EXCP_EXTERNAL: /* External input */
1089 if (env->mpic_proxy) {
1090 CPUState *cs = env_cpu(env);
1091 /* IACK the IRQ on delivery */
1092 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
1094 break;
1095 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1096 break;
1097 case POWERPC_EXCP_PROGRAM: /* Program exception */
1098 switch (env->error_code & ~0xF) {
1099 case POWERPC_EXCP_FP:
1100 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
1101 trace_ppc_excp_fp_ignore();
1102 powerpc_reset_excp_state(cpu);
1103 return;
1106 * NIP always points to the faulting instruction for FP exceptions,
1107 * so always use store_next and claim we are precise in the MSR.
1109 msr |= 0x00100000;
1110 env->spr[SPR_BOOKE_ESR] = ESR_FP;
1111 break;
1112 case POWERPC_EXCP_INVAL:
1113 trace_ppc_excp_inval(env->nip);
1114 msr |= 0x00080000;
1115 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
1116 break;
1117 case POWERPC_EXCP_PRIV:
1118 msr |= 0x00040000;
1119 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
1120 break;
1121 case POWERPC_EXCP_TRAP:
1122 msr |= 0x00020000;
1123 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
1124 break;
1125 default:
1126 /* Should never occur */
1127 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
1128 env->error_code);
1129 break;
1131 break;
1132 case POWERPC_EXCP_SYSCALL: /* System call exception */
1133 dump_syscall(env);
1136 * We need to correct the NIP which in this case is supposed
1137 * to point to the next instruction
1139 env->nip += 4;
1140 break;
1141 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1142 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1143 case POWERPC_EXCP_DECR: /* Decrementer exception */
1144 break;
1145 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1146 /* FIT on 4xx */
1147 trace_ppc_excp_print("FIT");
1148 break;
1149 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1150 trace_ppc_excp_print("WDT");
1151 srr0 = SPR_BOOKE_CSRR0;
1152 srr1 = SPR_BOOKE_CSRR1;
1153 break;
1154 case POWERPC_EXCP_DTLB: /* Data TLB error */
1155 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1156 break;
1157 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
1158 if (env->flags & POWERPC_FLAG_DE) {
1159 /* FIXME: choose one or the other based on CPU type */
1160 srr0 = SPR_BOOKE_DSRR0;
1161 srr1 = SPR_BOOKE_DSRR1;
1163 env->spr[SPR_BOOKE_CSRR0] = env->nip;
1164 env->spr[SPR_BOOKE_CSRR1] = msr;
1166 /* DBSR already modified by caller */
1167 } else {
1168 cpu_abort(env_cpu(env),
1169 "Debug exception triggered on unsupported model\n");
1171 break;
1172 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
1173 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
1174 break;
1175 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1176 break;
1177 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1178 srr0 = SPR_BOOKE_CSRR0;
1179 srr1 = SPR_BOOKE_CSRR1;
1180 break;
1181 case POWERPC_EXCP_RESET: /* System reset exception */
1182 if (FIELD_EX64(env->msr, MSR, POW)) {
1183 cpu_abort(env_cpu(env),
1184 "Trying to deliver power-saving system reset "
1185 "exception %d with no HV support\n", excp);
1187 break;
1188 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
1189 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
1190 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1191 powerpc_excp_name(excp));
1192 break;
1193 default:
1194 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1195 excp);
1196 break;
1199 #ifdef TARGET_PPC64
1200 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
1201 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
1202 new_msr |= (target_ulong)1 << MSR_CM;
1203 } else {
1204 vector = (uint32_t)vector;
1206 #endif
1208 env->spr[srr0] = env->nip;
1209 env->spr[srr1] = msr;
1210 powerpc_set_excp_state(cpu, vector, new_msr);
1214 * When running a nested HV guest under vhyp, external interrupts are
1215 * delivered as HVIRT.
1217 static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu)
1219 if (cpu->vhyp) {
1220 return vhyp_cpu_in_nested(cpu);
1222 return false;
1225 #ifdef TARGET_PPC64
1227 * When running under vhyp, hcalls are always intercepted and sent to the
1228 * vhc->hypercall handler.
1230 static bool books_vhyp_handles_hcall(PowerPCCPU *cpu)
1232 if (cpu->vhyp) {
1233 return !vhyp_cpu_in_nested(cpu);
1235 return false;
1239 * When running a nested KVM HV guest under vhyp, HV exceptions are not
1240 * delivered to the guest (because there is no concept of HV support), but
1241 * rather they are sent to the vhyp to exit from the L2 back to the L1 and
1242 * return from the H_ENTER_NESTED hypercall.
1244 static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu)
1246 if (cpu->vhyp) {
1247 return vhyp_cpu_in_nested(cpu);
1249 return false;
1252 #ifdef CONFIG_TCG
1253 static bool is_prefix_insn(CPUPPCState *env, uint32_t insn)
1255 if (!(env->insns_flags2 & PPC2_ISA310)) {
1256 return false;
1258 return ((insn & 0xfc000000) == 0x04000000);
1261 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
1263 CPUPPCState *env = &cpu->env;
1265 if (!(env->insns_flags2 & PPC2_ISA310)) {
1266 return false;
1269 if (!tcg_enabled()) {
1271 * This does not load instructions and set the prefix bit correctly
1272 * for injected interrupts with KVM. That may have to be discovered
1273 * and set by the KVM layer before injecting.
1275 return false;
1278 switch (excp) {
1279 case POWERPC_EXCP_MCHECK:
1280 if (!(env->error_code & PPC_BIT(42))) {
1282 * Fetch attempt caused a machine check, so attempting to fetch
1283 * again would cause a recursive machine check.
1285 return false;
1287 break;
1288 case POWERPC_EXCP_HDSI:
1289 /* HDSI PRTABLE_FAULT has the originating access type in error_code */
1290 if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) &&
1291 (env->error_code == MMU_INST_FETCH)) {
1293 * Fetch failed due to partition scope translation, so prefix
1294 * indication is not relevant (and attempting to load the
1295 * instruction at NIP would cause recursive faults with the same
1296 * translation).
1298 return false;
1300 break;
1302 case POWERPC_EXCP_DSI:
1303 case POWERPC_EXCP_DSEG:
1304 case POWERPC_EXCP_ALIGN:
1305 case POWERPC_EXCP_PROGRAM:
1306 case POWERPC_EXCP_FPU:
1307 case POWERPC_EXCP_TRACE:
1308 case POWERPC_EXCP_HV_EMU:
1309 case POWERPC_EXCP_VPU:
1310 case POWERPC_EXCP_VSXU:
1311 case POWERPC_EXCP_FU:
1312 case POWERPC_EXCP_HV_FU:
1313 break;
1314 default:
1315 return false;
1318 return is_prefix_insn(env, ppc_ldl_code(env, env->nip));
1320 #else
1321 static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
1323 return false;
1325 #endif
1327 static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
1329 CPUPPCState *env = &cpu->env;
1330 target_ulong msr, new_msr, vector;
1331 int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1;
1333 /* new srr1 value excluding must-be-zero bits */
1334 msr = env->msr & ~0x783f0000ULL;
1337 * new interrupt handler msr preserves HV and ME unless explicitly
1338 * overridden
1340 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
1343 * check for special resume at 0x100 from doze/nap/sleep/winkle on
1344 * P7/P8/P9
1346 if (env->resume_as_sreset) {
1347 excp = powerpc_reset_wakeup(env, excp, &msr);
1351 * We don't want to generate a Hypervisor Emulation Assistance
1352 * Interrupt if we don't have HVB in msr_mask (PAPR mode),
1353 * unless running a nested-hv guest, in which case the L1
1354 * kernel wants the interrupt.
1356 if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) &&
1357 !books_vhyp_handles_hv_excp(cpu)) {
1358 excp = POWERPC_EXCP_PROGRAM;
1361 vector = env->excp_vectors[excp];
1362 if (vector == (target_ulong)-1ULL) {
1363 cpu_abort(env_cpu(env),
1364 "Raised an exception without defined vector %d\n", excp);
1366 vector |= env->excp_prefix;
1368 if (is_prefix_insn_excp(cpu, excp)) {
1369 msr |= PPC_BIT(34);
1372 switch (excp) {
1373 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1374 powerpc_mcheck_checkstop(env);
1375 if (env->msr_mask & MSR_HVB) {
1377 * ISA specifies HV, but can be delivered to guest with HV
1378 * clear (e.g., see FWNMI in PAPR).
1380 new_msr |= (target_ulong)MSR_HVB;
1382 /* HV machine check exceptions don't have ME set */
1383 new_msr &= ~((target_ulong)1 << MSR_ME);
1386 msr |= env->error_code;
1387 break;
1389 case POWERPC_EXCP_DSI: /* Data storage exception */
1390 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
1391 break;
1392 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1393 trace_ppc_excp_isi(msr, env->nip);
1394 msr |= env->error_code;
1395 break;
1396 case POWERPC_EXCP_EXTERNAL: /* External input */
1398 bool lpes0;
1400 /* LPES0 is only taken into consideration if we support HV mode */
1401 if (!env->has_hv_mode) {
1402 break;
1404 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1405 if (!lpes0) {
1406 new_msr |= (target_ulong)MSR_HVB;
1407 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1408 srr0 = SPR_HSRR0;
1409 srr1 = SPR_HSRR1;
1411 break;
1413 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1414 /* Optional DSISR update was removed from ISA v3.0 */
1415 if (!(env->insns_flags2 & PPC2_ISA300)) {
1416 /* Get rS/rD and rA from faulting opcode */
1418 * Note: the opcode fields will not be set properly for a
1419 * direct store load/store, but nobody cares as nobody
1420 * actually uses direct store segments.
1422 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
1424 break;
1425 case POWERPC_EXCP_PROGRAM: /* Program exception */
1426 switch (env->error_code & ~0xF) {
1427 case POWERPC_EXCP_FP:
1428 if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
1429 trace_ppc_excp_fp_ignore();
1430 powerpc_reset_excp_state(cpu);
1431 return;
1434 * NIP always points to the faulting instruction for FP exceptions,
1435 * so always use store_next and claim we are precise in the MSR.
1437 msr |= 0x00100000;
1438 break;
1439 case POWERPC_EXCP_INVAL:
1440 trace_ppc_excp_inval(env->nip);
1441 msr |= 0x00080000;
1442 break;
1443 case POWERPC_EXCP_PRIV:
1444 msr |= 0x00040000;
1445 break;
1446 case POWERPC_EXCP_TRAP:
1447 msr |= 0x00020000;
1448 break;
1449 default:
1450 /* Should never occur */
1451 cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
1452 env->error_code);
1453 break;
1455 break;
1456 case POWERPC_EXCP_SYSCALL: /* System call exception */
1457 lev = env->error_code;
1459 if (lev == 1 && cpu->vhyp) {
1460 dump_hcall(env);
1461 } else {
1462 dump_syscall(env);
1466 * We need to correct the NIP which in this case is supposed
1467 * to point to the next instruction
1469 env->nip += 4;
1471 /* "PAPR mode" built-in hypercall emulation */
1472 if (lev == 1 && books_vhyp_handles_hcall(cpu)) {
1473 cpu->vhyp_class->hypercall(cpu->vhyp, cpu);
1474 powerpc_reset_excp_state(cpu);
1475 return;
1477 if (env->insns_flags2 & PPC2_ISA310) {
1478 /* ISAv3.1 puts LEV into SRR1 */
1479 msr |= lev << 20;
1481 if (lev == 1) {
1482 new_msr |= (target_ulong)MSR_HVB;
1484 break;
1485 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
1486 lev = env->error_code;
1487 dump_syscall(env);
1488 env->nip += 4;
1489 new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
1490 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1492 vector += lev * 0x20;
1494 env->lr = env->nip;
1495 env->ctr = msr;
1496 break;
1497 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1498 case POWERPC_EXCP_DECR: /* Decrementer exception */
1499 break;
1500 case POWERPC_EXCP_RESET: /* System reset exception */
1501 /* A power-saving exception sets ME, otherwise it is unchanged */
1502 if (FIELD_EX64(env->msr, MSR, POW)) {
1503 /* indicate that we resumed from power save mode */
1504 msr |= 0x10000;
1505 new_msr |= ((target_ulong)1 << MSR_ME);
1507 if (env->msr_mask & MSR_HVB) {
1509 * ISA specifies HV, but can be delivered to guest with HV
1510 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
1512 new_msr |= (target_ulong)MSR_HVB;
1513 } else {
1514 if (FIELD_EX64(env->msr, MSR, POW)) {
1515 cpu_abort(env_cpu(env),
1516 "Trying to deliver power-saving system reset "
1517 "exception %d with no HV support\n", excp);
1520 break;
1521 case POWERPC_EXCP_TRACE: /* Trace exception */
1522 msr |= env->error_code;
1523 /* fall through */
1524 case POWERPC_EXCP_DSEG: /* Data segment exception */
1525 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1526 case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
1527 case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */
1528 break;
1529 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
1530 msr |= env->error_code;
1531 /* fall through */
1532 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1533 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1534 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
1535 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
1536 srr0 = SPR_HSRR0;
1537 srr1 = SPR_HSRR1;
1538 new_msr |= (target_ulong)MSR_HVB;
1539 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1540 break;
1541 #ifdef CONFIG_TCG
1542 case POWERPC_EXCP_HV_EMU: {
1543 uint32_t insn = ppc_ldl_code(env, env->nip);
1544 env->spr[SPR_HEIR] = insn;
1545 if (is_prefix_insn(env, insn)) {
1546 uint32_t insn2 = ppc_ldl_code(env, env->nip + 4);
1547 env->spr[SPR_HEIR] <<= 32;
1548 env->spr[SPR_HEIR] |= insn2;
1550 srr0 = SPR_HSRR0;
1551 srr1 = SPR_HSRR1;
1552 new_msr |= (target_ulong)MSR_HVB;
1553 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1554 break;
1556 #endif
1557 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1558 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
1559 case POWERPC_EXCP_FU: /* Facility unavailable exception */
1560 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
1561 break;
1562 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
1563 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
1564 srr0 = SPR_HSRR0;
1565 srr1 = SPR_HSRR1;
1566 new_msr |= (target_ulong)MSR_HVB;
1567 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
1568 break;
1569 case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */
1570 case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */
1571 env->spr[SPR_BESCR] &= ~BESCR_GE;
1574 * Save NIP for rfebb insn in SPR_EBBRR. Next nip is
1575 * stored in the EBB Handler SPR_EBBHR.
1577 env->spr[SPR_EBBRR] = env->nip;
1578 powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr);
1581 * This exception is handled in userspace. No need to proceed.
1583 return;
1584 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1585 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1586 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1587 case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */
1588 cpu_abort(env_cpu(env), "%s exception not implemented\n",
1589 powerpc_excp_name(excp));
1590 break;
1591 default:
1592 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1593 excp);
1594 break;
1597 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1598 new_msr |= (target_ulong)1 << MSR_LE;
1600 new_msr |= (target_ulong)1 << MSR_SF;
1602 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
1603 env->spr[srr0] = env->nip;
1604 env->spr[srr1] = msr;
1607 if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) {
1608 /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */
1609 cpu->vhyp_class->deliver_hv_excp(cpu, excp);
1610 powerpc_reset_excp_state(cpu);
1611 } else {
1612 /* Sanity check */
1613 if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) {
1614 cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d "
1615 "with no HV support\n", excp);
1617 /* This can update new_msr and vector if AIL applies */
1618 ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector);
1619 powerpc_set_excp_state(cpu, vector, new_msr);
1622 #else
1623 static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
1625 g_assert_not_reached();
1627 #endif /* TARGET_PPC64 */
1629 static void powerpc_excp(PowerPCCPU *cpu, int excp)
1631 CPUPPCState *env = &cpu->env;
1633 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
1634 cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
1635 excp);
1638 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
1639 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
1640 excp, env->error_code);
1641 env->excp_stats[excp]++;
1643 switch (env->excp_model) {
1644 case POWERPC_EXCP_40x:
1645 powerpc_excp_40x(cpu, excp);
1646 break;
1647 case POWERPC_EXCP_6xx:
1648 powerpc_excp_6xx(cpu, excp);
1649 break;
1650 case POWERPC_EXCP_7xx:
1651 powerpc_excp_7xx(cpu, excp);
1652 break;
1653 case POWERPC_EXCP_74xx:
1654 powerpc_excp_74xx(cpu, excp);
1655 break;
1656 case POWERPC_EXCP_BOOKE:
1657 powerpc_excp_booke(cpu, excp);
1658 break;
1659 case POWERPC_EXCP_970:
1660 case POWERPC_EXCP_POWER7:
1661 case POWERPC_EXCP_POWER8:
1662 case POWERPC_EXCP_POWER9:
1663 case POWERPC_EXCP_POWER10:
1664 powerpc_excp_books(cpu, excp);
1665 break;
1666 default:
1667 g_assert_not_reached();
1671 void ppc_cpu_do_interrupt(CPUState *cs)
1673 PowerPCCPU *cpu = POWERPC_CPU(cs);
1675 powerpc_excp(cpu, cs->exception_index);
1678 #ifdef TARGET_PPC64
1679 #define P7_UNUSED_INTERRUPTS \
1680 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \
1681 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1682 PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
1683 PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
1685 static int p7_interrupt_powersave(CPUPPCState *env)
1687 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
1688 (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
1689 return PPC_INTERRUPT_EXT;
1691 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
1692 (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
1693 return PPC_INTERRUPT_DECR;
1695 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
1696 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
1697 return PPC_INTERRUPT_MCK;
1699 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
1700 (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
1701 return PPC_INTERRUPT_HMI;
1703 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
1704 return PPC_INTERRUPT_RESET;
1706 return 0;
1709 static int p7_next_unmasked_interrupt(CPUPPCState *env)
1711 CPUState *cs = env_cpu(env);
1713 /* Ignore MSR[EE] when coming out of some power management states */
1714 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1716 assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
1718 if (cs->halted) {
1719 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1720 return p7_interrupt_powersave(env);
1723 /* Machine check exception */
1724 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1725 return PPC_INTERRUPT_MCK;
1728 /* Hypervisor decrementer exception */
1729 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
1730 /* LPCR will be clear when not supported so this will work */
1731 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1732 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1733 /* HDEC clears on delivery */
1734 return PPC_INTERRUPT_HDECR;
1738 /* External interrupt can ignore MSR:EE under some circumstances */
1739 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
1740 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1741 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1742 /* HEIC blocks delivery to the hypervisor */
1743 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1744 !FIELD_EX64(env->msr, MSR, PR))) ||
1745 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1746 return PPC_INTERRUPT_EXT;
1749 if (msr_ee != 0) {
1750 /* Decrementer exception */
1751 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
1752 return PPC_INTERRUPT_DECR;
1754 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
1755 return PPC_INTERRUPT_PERFM;
1759 return 0;
1762 #define P8_UNUSED_INTERRUPTS \
1763 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \
1764 PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
1765 PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1767 static int p8_interrupt_powersave(CPUPPCState *env)
1769 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
1770 (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
1771 return PPC_INTERRUPT_EXT;
1773 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
1774 (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
1775 return PPC_INTERRUPT_DECR;
1777 if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
1778 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
1779 return PPC_INTERRUPT_MCK;
1781 if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
1782 (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
1783 return PPC_INTERRUPT_HMI;
1785 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
1786 (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
1787 return PPC_INTERRUPT_DOORBELL;
1789 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
1790 (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
1791 return PPC_INTERRUPT_HDOORBELL;
1793 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
1794 return PPC_INTERRUPT_RESET;
1796 return 0;
1799 static int p8_next_unmasked_interrupt(CPUPPCState *env)
1801 CPUState *cs = env_cpu(env);
1803 /* Ignore MSR[EE] when coming out of some power management states */
1804 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1806 assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0);
1808 if (cs->halted) {
1809 /* LPCR[PECE] controls which interrupts can exit power-saving mode */
1810 return p8_interrupt_powersave(env);
1813 /* Machine check exception */
1814 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1815 return PPC_INTERRUPT_MCK;
1818 /* Hypervisor decrementer exception */
1819 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
1820 /* LPCR will be clear when not supported so this will work */
1821 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1822 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1823 /* HDEC clears on delivery */
1824 return PPC_INTERRUPT_HDECR;
1828 /* External interrupt can ignore MSR:EE under some circumstances */
1829 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
1830 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1831 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1832 /* HEIC blocks delivery to the hypervisor */
1833 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1834 !FIELD_EX64(env->msr, MSR, PR))) ||
1835 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1836 return PPC_INTERRUPT_EXT;
1839 if (msr_ee != 0) {
1840 /* Decrementer exception */
1841 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
1842 return PPC_INTERRUPT_DECR;
1844 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
1845 return PPC_INTERRUPT_DOORBELL;
1847 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
1848 return PPC_INTERRUPT_HDOORBELL;
1850 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
1851 return PPC_INTERRUPT_PERFM;
1853 /* EBB exception */
1854 if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
1856 * EBB exception must be taken in problem state and
1857 * with BESCR_GE set.
1859 if (FIELD_EX64(env->msr, MSR, PR) &&
1860 (env->spr[SPR_BESCR] & BESCR_GE)) {
1861 return PPC_INTERRUPT_EBB;
1866 return 0;
1869 #define P9_UNUSED_INTERRUPTS \
1870 (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \
1871 PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
1872 PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
1874 static int p9_interrupt_powersave(CPUPPCState *env)
1876 /* External Exception */
1877 if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
1878 (env->spr[SPR_LPCR] & LPCR_EEE)) {
1879 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1880 if (!heic || !FIELD_EX64_HV(env->msr) ||
1881 FIELD_EX64(env->msr, MSR, PR)) {
1882 return PPC_INTERRUPT_EXT;
1885 /* Decrementer Exception */
1886 if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
1887 (env->spr[SPR_LPCR] & LPCR_DEE)) {
1888 return PPC_INTERRUPT_DECR;
1890 /* Machine Check or Hypervisor Maintenance Exception */
1891 if (env->spr[SPR_LPCR] & LPCR_OEE) {
1892 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1893 return PPC_INTERRUPT_MCK;
1895 if (env->pending_interrupts & PPC_INTERRUPT_HMI) {
1896 return PPC_INTERRUPT_HMI;
1899 /* Privileged Doorbell Exception */
1900 if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
1901 (env->spr[SPR_LPCR] & LPCR_PDEE)) {
1902 return PPC_INTERRUPT_DOORBELL;
1904 /* Hypervisor Doorbell Exception */
1905 if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
1906 (env->spr[SPR_LPCR] & LPCR_HDEE)) {
1907 return PPC_INTERRUPT_HDOORBELL;
1909 /* Hypervisor virtualization exception */
1910 if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) &&
1911 (env->spr[SPR_LPCR] & LPCR_HVEE)) {
1912 return PPC_INTERRUPT_HVIRT;
1914 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
1915 return PPC_INTERRUPT_RESET;
1917 return 0;
1920 static int p9_next_unmasked_interrupt(CPUPPCState *env)
1922 CPUState *cs = env_cpu(env);
1924 /* Ignore MSR[EE] when coming out of some power management states */
1925 bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
1927 assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
1929 if (cs->halted) {
1930 if (env->spr[SPR_PSSCR] & PSSCR_EC) {
1932 * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
1933 * wakeup the processor
1935 return p9_interrupt_powersave(env);
1936 } else {
1938 * When it's clear, any system-caused exception exits power-saving
1939 * mode, even the ones that gate on MSR[EE].
1941 msr_ee = true;
1945 /* Machine check exception */
1946 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
1947 return PPC_INTERRUPT_MCK;
1950 /* Hypervisor decrementer exception */
1951 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
1952 /* LPCR will be clear when not supported so this will work */
1953 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1954 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
1955 /* HDEC clears on delivery */
1956 return PPC_INTERRUPT_HDECR;
1960 /* Hypervisor virtualization interrupt */
1961 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
1962 /* LPCR will be clear when not supported so this will work */
1963 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
1964 if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) {
1965 return PPC_INTERRUPT_HVIRT;
1969 /* External interrupt can ignore MSR:EE under some circumstances */
1970 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
1971 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1972 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1973 /* HEIC blocks delivery to the hypervisor */
1974 if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
1975 !FIELD_EX64(env->msr, MSR, PR))) ||
1976 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
1977 return PPC_INTERRUPT_EXT;
1980 if (msr_ee != 0) {
1981 /* Decrementer exception */
1982 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
1983 return PPC_INTERRUPT_DECR;
1985 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
1986 return PPC_INTERRUPT_DOORBELL;
1988 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
1989 return PPC_INTERRUPT_HDOORBELL;
1991 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
1992 return PPC_INTERRUPT_PERFM;
1994 /* EBB exception */
1995 if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
1997 * EBB exception must be taken in problem state and
1998 * with BESCR_GE set.
2000 if (FIELD_EX64(env->msr, MSR, PR) &&
2001 (env->spr[SPR_BESCR] & BESCR_GE)) {
2002 return PPC_INTERRUPT_EBB;
2007 return 0;
2009 #endif /* TARGET_PPC64 */
2011 static int ppc_next_unmasked_interrupt(CPUPPCState *env)
2013 #ifdef TARGET_PPC64
2014 switch (env->excp_model) {
2015 case POWERPC_EXCP_POWER7:
2016 return p7_next_unmasked_interrupt(env);
2017 case POWERPC_EXCP_POWER8:
2018 return p8_next_unmasked_interrupt(env);
2019 case POWERPC_EXCP_POWER9:
2020 case POWERPC_EXCP_POWER10:
2021 return p9_next_unmasked_interrupt(env);
2022 default:
2023 break;
2025 #endif
2026 bool async_deliver;
2028 /* External reset */
2029 if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
2030 return PPC_INTERRUPT_RESET;
2032 /* Machine check exception */
2033 if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
2034 return PPC_INTERRUPT_MCK;
2036 #if 0 /* TODO */
2037 /* External debug exception */
2038 if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) {
2039 return PPC_INTERRUPT_DEBUG;
2041 #endif
2044 * For interrupts that gate on MSR:EE, we need to do something a
2045 * bit more subtle, as we need to let them through even when EE is
2046 * clear when coming out of some power management states (in order
2047 * for them to become a 0x100).
2049 async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
2051 /* Hypervisor decrementer exception */
2052 if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
2053 /* LPCR will be clear when not supported so this will work */
2054 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
2055 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
2056 /* HDEC clears on delivery */
2057 return PPC_INTERRUPT_HDECR;
2061 /* Hypervisor virtualization interrupt */
2062 if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
2063 /* LPCR will be clear when not supported so this will work */
2064 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
2065 if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
2066 return PPC_INTERRUPT_HVIRT;
2070 /* External interrupt can ignore MSR:EE under some circumstances */
2071 if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
2072 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
2073 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
2074 /* HEIC blocks delivery to the hypervisor */
2075 if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
2076 !FIELD_EX64(env->msr, MSR, PR))) ||
2077 (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
2078 return PPC_INTERRUPT_EXT;
2081 if (FIELD_EX64(env->msr, MSR, CE)) {
2082 /* External critical interrupt */
2083 if (env->pending_interrupts & PPC_INTERRUPT_CEXT) {
2084 return PPC_INTERRUPT_CEXT;
2087 if (async_deliver != 0) {
2088 /* Watchdog timer on embedded PowerPC */
2089 if (env->pending_interrupts & PPC_INTERRUPT_WDT) {
2090 return PPC_INTERRUPT_WDT;
2092 if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
2093 return PPC_INTERRUPT_CDOORBELL;
2095 /* Fixed interval timer on embedded PowerPC */
2096 if (env->pending_interrupts & PPC_INTERRUPT_FIT) {
2097 return PPC_INTERRUPT_FIT;
2099 /* Programmable interval timer on embedded PowerPC */
2100 if (env->pending_interrupts & PPC_INTERRUPT_PIT) {
2101 return PPC_INTERRUPT_PIT;
2103 /* Decrementer exception */
2104 if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
2105 return PPC_INTERRUPT_DECR;
2107 if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
2108 return PPC_INTERRUPT_DOORBELL;
2110 if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
2111 return PPC_INTERRUPT_HDOORBELL;
2113 if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
2114 return PPC_INTERRUPT_PERFM;
2116 /* Thermal interrupt */
2117 if (env->pending_interrupts & PPC_INTERRUPT_THERM) {
2118 return PPC_INTERRUPT_THERM;
2120 /* EBB exception */
2121 if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
2123 * EBB exception must be taken in problem state and
2124 * with BESCR_GE set.
2126 if (FIELD_EX64(env->msr, MSR, PR) &&
2127 (env->spr[SPR_BESCR] & BESCR_GE)) {
2128 return PPC_INTERRUPT_EBB;
2133 return 0;
2137 * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be
2138 * delivered and clears CPU_INTERRUPT_HARD otherwise.
2140 * This method is called by ppc_set_interrupt when an interrupt is raised or
2141 * lowered, and should also be called whenever an interrupt masking condition
2142 * is changed, e.g.:
2143 * - When relevant bits of MSR are altered, like EE, HV, PR, etc.;
2144 * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.;
2145 * - When PSSCR[EC] or env->resume_as_sreset are changed;
2146 * - When cs->halted is changed and the CPU has a different interrupt masking
2147 * logic in power-saving mode (e.g., POWER7/8/9/10);
2149 void ppc_maybe_interrupt(CPUPPCState *env)
2151 CPUState *cs = env_cpu(env);
2152 BQL_LOCK_GUARD();
2154 if (ppc_next_unmasked_interrupt(env)) {
2155 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
2156 } else {
2157 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
2161 #ifdef TARGET_PPC64
2162 static void p7_deliver_interrupt(CPUPPCState *env, int interrupt)
2164 PowerPCCPU *cpu = env_archcpu(env);
2166 switch (interrupt) {
2167 case PPC_INTERRUPT_MCK: /* Machine check exception */
2168 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2169 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2170 break;
2172 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2173 /* HDEC clears on delivery */
2174 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2175 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2176 break;
2178 case PPC_INTERRUPT_EXT:
2179 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2180 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2181 } else {
2182 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2184 break;
2186 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2187 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2188 break;
2189 case PPC_INTERRUPT_PERFM:
2190 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2191 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2192 break;
2193 case 0:
2195 * This is a bug ! It means that has_work took us out of halt without
2196 * anything to deliver while in a PM state that requires getting
2197 * out via a 0x100
2199 * This means we will incorrectly execute past the power management
2200 * instruction instead of triggering a reset.
2202 * It generally means a discrepancy between the wakeup conditions in the
2203 * processor has_work implementation and the logic in this function.
2205 assert(!env->resume_as_sreset);
2206 break;
2207 default:
2208 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2209 interrupt);
2213 static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
2215 PowerPCCPU *cpu = env_archcpu(env);
2217 switch (interrupt) {
2218 case PPC_INTERRUPT_MCK: /* Machine check exception */
2219 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2220 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2221 break;
2223 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2224 /* HDEC clears on delivery */
2225 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2226 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2227 break;
2229 case PPC_INTERRUPT_EXT:
2230 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2231 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2232 } else {
2233 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2235 break;
2237 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2238 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2239 break;
2240 case PPC_INTERRUPT_DOORBELL:
2241 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2242 if (is_book3s_arch2x(env)) {
2243 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2244 } else {
2245 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
2247 break;
2248 case PPC_INTERRUPT_HDOORBELL:
2249 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2250 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2251 break;
2252 case PPC_INTERRUPT_PERFM:
2253 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2254 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2255 break;
2256 case PPC_INTERRUPT_EBB: /* EBB exception */
2257 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2258 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2259 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2260 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2261 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2263 break;
2264 case 0:
2266 * This is a bug ! It means that has_work took us out of halt without
2267 * anything to deliver while in a PM state that requires getting
2268 * out via a 0x100
2270 * This means we will incorrectly execute past the power management
2271 * instruction instead of triggering a reset.
2273 * It generally means a discrepancy between the wakeup conditions in the
2274 * processor has_work implementation and the logic in this function.
2276 assert(!env->resume_as_sreset);
2277 break;
2278 default:
2279 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2280 interrupt);
2284 static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
2286 PowerPCCPU *cpu = env_archcpu(env);
2287 CPUState *cs = env_cpu(env);
2289 if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) &&
2290 !FIELD_EX64(env->msr, MSR, EE)) {
2292 * A pending interrupt took us out of power-saving, but MSR[EE] says
2293 * that we should return to NIP+4 instead of delivering it.
2295 return;
2298 switch (interrupt) {
2299 case PPC_INTERRUPT_MCK: /* Machine check exception */
2300 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2301 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2302 break;
2304 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2305 /* HDEC clears on delivery */
2306 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2307 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2308 break;
2309 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
2310 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2311 break;
2313 case PPC_INTERRUPT_EXT:
2314 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2315 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2316 } else {
2317 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2319 break;
2321 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2322 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2323 break;
2324 case PPC_INTERRUPT_DOORBELL:
2325 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2326 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2327 break;
2328 case PPC_INTERRUPT_HDOORBELL:
2329 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2330 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2331 break;
2332 case PPC_INTERRUPT_PERFM:
2333 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2334 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2335 break;
2336 case PPC_INTERRUPT_EBB: /* EBB exception */
2337 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2338 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2339 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2340 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2341 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2343 break;
2344 case 0:
2346 * This is a bug ! It means that has_work took us out of halt without
2347 * anything to deliver while in a PM state that requires getting
2348 * out via a 0x100
2350 * This means we will incorrectly execute past the power management
2351 * instruction instead of triggering a reset.
2353 * It generally means a discrepancy between the wakeup conditions in the
2354 * processor has_work implementation and the logic in this function.
2356 assert(!env->resume_as_sreset);
2357 break;
2358 default:
2359 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2360 interrupt);
2363 #endif /* TARGET_PPC64 */
2365 static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
2367 #ifdef TARGET_PPC64
2368 switch (env->excp_model) {
2369 case POWERPC_EXCP_POWER7:
2370 return p7_deliver_interrupt(env, interrupt);
2371 case POWERPC_EXCP_POWER8:
2372 return p8_deliver_interrupt(env, interrupt);
2373 case POWERPC_EXCP_POWER9:
2374 case POWERPC_EXCP_POWER10:
2375 return p9_deliver_interrupt(env, interrupt);
2376 default:
2377 break;
2379 #endif
2380 PowerPCCPU *cpu = env_archcpu(env);
2382 switch (interrupt) {
2383 case PPC_INTERRUPT_RESET: /* External reset */
2384 env->pending_interrupts &= ~PPC_INTERRUPT_RESET;
2385 powerpc_excp(cpu, POWERPC_EXCP_RESET);
2386 break;
2387 case PPC_INTERRUPT_MCK: /* Machine check exception */
2388 env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
2389 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
2390 break;
2392 case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
2393 /* HDEC clears on delivery */
2394 env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
2395 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
2396 break;
2397 case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
2398 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2399 break;
2401 case PPC_INTERRUPT_EXT:
2402 if (books_vhyp_promotes_external_to_hvirt(cpu)) {
2403 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
2404 } else {
2405 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
2407 break;
2408 case PPC_INTERRUPT_CEXT: /* External critical interrupt */
2409 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
2410 break;
2412 case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */
2413 env->pending_interrupts &= ~PPC_INTERRUPT_WDT;
2414 powerpc_excp(cpu, POWERPC_EXCP_WDT);
2415 break;
2416 case PPC_INTERRUPT_CDOORBELL:
2417 env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL;
2418 powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
2419 break;
2420 case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */
2421 env->pending_interrupts &= ~PPC_INTERRUPT_FIT;
2422 powerpc_excp(cpu, POWERPC_EXCP_FIT);
2423 break;
2424 case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */
2425 env->pending_interrupts &= ~PPC_INTERRUPT_PIT;
2426 powerpc_excp(cpu, POWERPC_EXCP_PIT);
2427 break;
2428 case PPC_INTERRUPT_DECR: /* Decrementer exception */
2429 if (ppc_decr_clear_on_delivery(env)) {
2430 env->pending_interrupts &= ~PPC_INTERRUPT_DECR;
2432 powerpc_excp(cpu, POWERPC_EXCP_DECR);
2433 break;
2434 case PPC_INTERRUPT_DOORBELL:
2435 env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
2436 if (is_book3s_arch2x(env)) {
2437 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
2438 } else {
2439 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
2441 break;
2442 case PPC_INTERRUPT_HDOORBELL:
2443 env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
2444 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
2445 break;
2446 case PPC_INTERRUPT_PERFM:
2447 env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
2448 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
2449 break;
2450 case PPC_INTERRUPT_THERM: /* Thermal interrupt */
2451 env->pending_interrupts &= ~PPC_INTERRUPT_THERM;
2452 powerpc_excp(cpu, POWERPC_EXCP_THERM);
2453 break;
2454 case PPC_INTERRUPT_EBB: /* EBB exception */
2455 env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
2456 if (env->spr[SPR_BESCR] & BESCR_PMEO) {
2457 powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
2458 } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
2459 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
2461 break;
2462 case 0:
2464 * This is a bug ! It means that has_work took us out of halt without
2465 * anything to deliver while in a PM state that requires getting
2466 * out via a 0x100
2468 * This means we will incorrectly execute past the power management
2469 * instruction instead of triggering a reset.
2471 * It generally means a discrepancy between the wakeup conditions in the
2472 * processor has_work implementation and the logic in this function.
2474 assert(!env->resume_as_sreset);
2475 break;
2476 default:
2477 cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
2478 interrupt);
2482 void ppc_cpu_do_system_reset(CPUState *cs)
2484 PowerPCCPU *cpu = POWERPC_CPU(cs);
2486 powerpc_excp(cpu, POWERPC_EXCP_RESET);
2489 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
2491 PowerPCCPU *cpu = POWERPC_CPU(cs);
2492 CPUPPCState *env = &cpu->env;
2493 target_ulong msr = 0;
2496 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
2497 * been set by KVM.
2499 msr = (1ULL << MSR_ME);
2500 msr |= env->msr & (1ULL << MSR_SF);
2501 if (ppc_interrupts_little_endian(cpu, false)) {
2502 msr |= (1ULL << MSR_LE);
2505 /* Anything for nested required here? MSR[HV] bit? */
2507 powerpc_set_excp_state(cpu, vector, msr);
2510 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
2512 CPUPPCState *env = cpu_env(cs);
2513 int interrupt;
2515 if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) {
2516 return false;
2519 interrupt = ppc_next_unmasked_interrupt(env);
2520 if (interrupt == 0) {
2521 return false;
2524 ppc_deliver_interrupt(env, interrupt);
2525 if (env->pending_interrupts == 0) {
2526 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
2528 return true;
2531 #endif /* !CONFIG_USER_ONLY */
2533 /*****************************************************************************/
2534 /* Exceptions processing helpers */
2536 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
2537 uint32_t error_code, uintptr_t raddr)
2539 CPUState *cs = env_cpu(env);
2541 cs->exception_index = exception;
2542 env->error_code = error_code;
2543 cpu_loop_exit_restore(cs, raddr);
2546 void raise_exception_err(CPUPPCState *env, uint32_t exception,
2547 uint32_t error_code)
2549 raise_exception_err_ra(env, exception, error_code, 0);
2552 void raise_exception(CPUPPCState *env, uint32_t exception)
2554 raise_exception_err_ra(env, exception, 0, 0);
2557 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
2558 uintptr_t raddr)
2560 raise_exception_err_ra(env, exception, 0, raddr);
2563 #ifdef CONFIG_TCG
2564 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
2565 uint32_t error_code)
2567 raise_exception_err_ra(env, exception, error_code, 0);
2570 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
2572 raise_exception_err_ra(env, exception, 0, 0);
2575 #ifndef CONFIG_USER_ONLY
2576 void helper_store_msr(CPUPPCState *env, target_ulong val)
2578 uint32_t excp = hreg_store_msr(env, val, 0);
2580 if (excp != 0) {
2581 cpu_interrupt_exittb(env_cpu(env));
2582 raise_exception(env, excp);
2586 void helper_ppc_maybe_interrupt(CPUPPCState *env)
2588 ppc_maybe_interrupt(env);
2591 #ifdef TARGET_PPC64
2592 void helper_scv(CPUPPCState *env, uint32_t lev)
2594 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
2595 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
2596 } else {
2597 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
2601 void helper_pminsn(CPUPPCState *env, uint32_t insn)
2603 CPUState *cs = env_cpu(env);
2605 cs->halted = 1;
2607 /* Condition for waking up at 0x100 */
2608 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
2609 (env->spr[SPR_PSSCR] & PSSCR_EC);
2611 /* HDECR is not to wake from PM state, it may have already fired */
2612 if (env->resume_as_sreset) {
2613 PowerPCCPU *cpu = env_archcpu(env);
2614 ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
2617 ppc_maybe_interrupt(env);
2619 #endif /* TARGET_PPC64 */
2621 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
2623 /* MSR:POW cannot be set by any form of rfi */
2624 msr &= ~(1ULL << MSR_POW);
2626 /* MSR:TGPR cannot be set by any form of rfi */
2627 if (env->flags & POWERPC_FLAG_TGPR)
2628 msr &= ~(1ULL << MSR_TGPR);
2630 #ifdef TARGET_PPC64
2631 /* Switching to 32-bit ? Crop the nip */
2632 if (!msr_is_64bit(env, msr)) {
2633 nip = (uint32_t)nip;
2635 #else
2636 nip = (uint32_t)nip;
2637 #endif
2638 /* XXX: beware: this is false if VLE is supported */
2639 env->nip = nip & ~((target_ulong)0x00000003);
2640 hreg_store_msr(env, msr, 1);
2641 trace_ppc_excp_rfi(env->nip, env->msr);
2643 * No need to raise an exception here, as rfi is always the last
2644 * insn of a TB
2646 cpu_interrupt_exittb(env_cpu(env));
2647 /* Reset the reservation */
2648 env->reserve_addr = -1;
2650 /* Context synchronizing: check if TCG TLB needs flush */
2651 check_tlb_flush(env, false);
2654 void helper_rfi(CPUPPCState *env)
2656 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
2659 #ifdef TARGET_PPC64
2660 void helper_rfid(CPUPPCState *env)
2663 * The architecture defines a number of rules for which bits can
2664 * change but in practice, we handle this in hreg_store_msr()
2665 * which will be called by do_rfi(), so there is no need to filter
2666 * here
2668 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
2671 void helper_rfscv(CPUPPCState *env)
2673 do_rfi(env, env->lr, env->ctr);
2676 void helper_hrfid(CPUPPCState *env)
2678 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
2681 void helper_rfebb(CPUPPCState *env, target_ulong s)
2683 target_ulong msr = env->msr;
2686 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
2688 * "If BESCR 32:33 != 0b00 the instruction is treated as if
2689 * the instruction form were invalid."
2691 if (env->spr[SPR_BESCR] & BESCR_INVALID) {
2692 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
2693 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
2696 env->nip = env->spr[SPR_EBBRR];
2698 /* Switching to 32-bit ? Crop the nip */
2699 if (!msr_is_64bit(env, msr)) {
2700 env->nip = (uint32_t)env->spr[SPR_EBBRR];
2703 if (s) {
2704 env->spr[SPR_BESCR] |= BESCR_GE;
2705 } else {
2706 env->spr[SPR_BESCR] &= ~BESCR_GE;
2711 * Triggers or queues an 'ebb_excp' EBB exception. All checks
2712 * but FSCR, HFSCR and msr_pr must be done beforehand.
2714 * PowerISA v3.1 isn't clear about whether an EBB should be
2715 * postponed or cancelled if the EBB facility is unavailable.
2716 * Our assumption here is that the EBB is cancelled if both
2717 * FSCR and HFSCR EBB facilities aren't available.
2719 static void do_ebb(CPUPPCState *env, int ebb_excp)
2721 PowerPCCPU *cpu = env_archcpu(env);
2724 * FSCR_EBB and FSCR_IC_EBB are the same bits used with
2725 * HFSCR.
2727 helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
2728 helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
2730 if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
2731 env->spr[SPR_BESCR] |= BESCR_PMEO;
2732 } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
2733 env->spr[SPR_BESCR] |= BESCR_EEO;
2736 if (FIELD_EX64(env->msr, MSR, PR)) {
2737 powerpc_excp(cpu, ebb_excp);
2738 } else {
2739 ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
2743 void raise_ebb_perfm_exception(CPUPPCState *env)
2745 bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
2746 env->spr[SPR_BESCR] & BESCR_PME &&
2747 env->spr[SPR_BESCR] & BESCR_GE;
2749 if (!perfm_ebb_enabled) {
2750 return;
2753 do_ebb(env, POWERPC_EXCP_PERFM_EBB);
2755 #endif /* TARGET_PPC64 */
2757 /*****************************************************************************/
2758 /* Embedded PowerPC specific helpers */
2759 void helper_40x_rfci(CPUPPCState *env)
2761 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
2764 void helper_rfci(CPUPPCState *env)
2766 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
2769 void helper_rfdi(CPUPPCState *env)
2771 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
2772 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
2775 void helper_rfmci(CPUPPCState *env)
2777 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
2778 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
2780 #endif /* !CONFIG_USER_ONLY */
2782 void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
2783 uint32_t flags)
2785 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
2786 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
2787 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
2788 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
2789 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
2790 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2791 POWERPC_EXCP_TRAP, GETPC());
2795 #ifdef TARGET_PPC64
2796 void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
2797 uint32_t flags)
2799 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
2800 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
2801 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
2802 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
2803 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
2804 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2805 POWERPC_EXCP_TRAP, GETPC());
2808 #endif /* TARGET_PPC64 */
2810 static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
2812 const uint16_t c = 0xfffc;
2813 const uint64_t z0 = 0xfa2561cdf44ac398ULL;
2814 uint16_t z = 0, temp;
2815 uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
2817 for (int i = 3; i >= 0; i--) {
2818 k[i] = key & 0xffff;
2819 key >>= 16;
2821 xleft[0] = x & 0xffff;
2822 xright[0] = (x >> 16) & 0xffff;
2824 for (int i = 0; i < 28; i++) {
2825 z = (z0 >> (63 - i)) & 1;
2826 temp = ror16(k[i + 3], 3) ^ k[i + 1];
2827 k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
2830 for (int i = 0; i < 8; i++) {
2831 eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
2832 eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
2833 eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
2834 eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
2837 for (int i = 0; i < 32; i++) {
2838 fxleft[i] = (rol16(xleft[i], 1) &
2839 rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
2840 xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
2841 xright[i + 1] = xleft[i];
2844 return (((uint32_t)xright[32]) << 16) | xleft[32];
2847 static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
2849 uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
2850 uint64_t stage1_h, stage1_l;
2852 for (int i = 0; i < 4; i++) {
2853 stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
2854 stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
2855 stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
2856 stage0_l |= (ra & 0xff) << (8 * 2 * i);
2857 rb >>= 8;
2858 ra >>= 8;
2861 stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
2862 stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
2863 stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
2864 stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
2866 return stage1_h ^ stage1_l;
2869 static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
2870 target_ulong rb, uint64_t key, bool store)
2872 uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
2874 if (store) {
2875 cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
2876 } else {
2877 loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
2878 if (loaded_hash != calculated_hash) {
2879 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2880 POWERPC_EXCP_TRAP, GETPC());
2885 #include "qemu/guest-random.h"
2887 #ifdef TARGET_PPC64
2888 #define HELPER_HASH(op, key, store, dexcr_aspect) \
2889 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
2890 target_ulong rb) \
2892 if (env->msr & R_MSR_PR_MASK) { \
2893 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
2894 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
2895 return; \
2896 } else if (!(env->msr & R_MSR_HV_MASK)) { \
2897 if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
2898 env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
2899 return; \
2900 } else if (!(env->msr & R_MSR_S_MASK)) { \
2901 if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
2902 return; \
2905 do_hash(env, ea, ra, rb, key, store); \
2907 #else
2908 #define HELPER_HASH(op, key, store, dexcr_aspect) \
2909 void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
2910 target_ulong rb) \
2912 do_hash(env, ea, ra, rb, key, store); \
2914 #endif /* TARGET_PPC64 */
2916 HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
2917 HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
2918 HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
2919 HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
2921 #ifndef CONFIG_USER_ONLY
2922 /* Embedded.Processor Control */
2923 static int dbell2irq(target_ulong rb)
2925 int msg = rb & DBELL_TYPE_MASK;
2926 int irq = -1;
2928 switch (msg) {
2929 case DBELL_TYPE_DBELL:
2930 irq = PPC_INTERRUPT_DOORBELL;
2931 break;
2932 case DBELL_TYPE_DBELL_CRIT:
2933 irq = PPC_INTERRUPT_CDOORBELL;
2934 break;
2935 case DBELL_TYPE_G_DBELL:
2936 case DBELL_TYPE_G_DBELL_CRIT:
2937 case DBELL_TYPE_G_DBELL_MC:
2938 /* XXX implement */
2939 default:
2940 break;
2943 return irq;
2946 void helper_msgclr(CPUPPCState *env, target_ulong rb)
2948 int irq = dbell2irq(rb);
2950 if (irq < 0) {
2951 return;
2954 ppc_set_irq(env_archcpu(env), irq, 0);
2957 void helper_msgsnd(target_ulong rb)
2959 int irq = dbell2irq(rb);
2960 int pir = rb & DBELL_PIRTAG_MASK;
2961 CPUState *cs;
2963 if (irq < 0) {
2964 return;
2967 bql_lock();
2968 CPU_FOREACH(cs) {
2969 PowerPCCPU *cpu = POWERPC_CPU(cs);
2970 CPUPPCState *cenv = &cpu->env;
2972 if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
2973 ppc_set_irq(cpu, irq, 1);
2976 bql_unlock();
2979 /* Server Processor Control */
2981 static bool dbell_type_server(target_ulong rb)
2984 * A Directed Hypervisor Doorbell message is sent only if the
2985 * message type is 5. All other types are reserved and the
2986 * instruction is a no-op
2988 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
2991 static inline bool dbell_bcast_core(target_ulong rb)
2993 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
2996 static inline bool dbell_bcast_subproc(target_ulong rb)
2998 return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
3001 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
3003 if (!dbell_type_server(rb)) {
3004 return;
3007 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
3010 void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
3012 int pir = rb & DBELL_PROCIDTAG_MASK;
3013 bool brdcast = false;
3014 CPUState *cs, *ccs;
3015 PowerPCCPU *cpu;
3017 if (!dbell_type_server(rb)) {
3018 return;
3021 cpu = ppc_get_vcpu_by_pir(pir);
3022 if (!cpu) {
3023 return;
3025 cs = CPU(cpu);
3027 if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
3028 (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
3029 brdcast = true;
3032 if (cs->nr_threads == 1 || !brdcast) {
3033 ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
3034 return;
3038 * Why is bql needed for walking CPU list? Answer seems to be because ppc
3039 * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
3040 * so could this be removed?
3042 bql_lock();
3043 THREAD_SIBLING_FOREACH(cs, ccs) {
3044 ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
3046 bql_unlock();
3049 #ifdef TARGET_PPC64
3050 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
3052 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
3054 if (!dbell_type_server(rb)) {
3055 return;
3058 ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
3062 * sends a message to another thread on the same
3063 * multi-threaded processor
3065 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
3067 CPUState *cs = env_cpu(env);
3068 PowerPCCPU *cpu = env_archcpu(env);
3069 CPUState *ccs;
3070 uint32_t nr_threads = cs->nr_threads;
3071 int ttir = rb & PPC_BITMASK(57, 63);
3073 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
3075 if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
3076 nr_threads = 1; /* msgsndp behaves as 1-thread in LPAR-per-thread mode*/
3079 if (!dbell_type_server(rb) || ttir >= nr_threads) {
3080 return;
3083 if (nr_threads == 1) {
3084 ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, 1);
3085 return;
3088 /* Does iothread need to be locked for walking CPU list? */
3089 bql_lock();
3090 THREAD_SIBLING_FOREACH(cs, ccs) {
3091 PowerPCCPU *ccpu = POWERPC_CPU(ccs);
3092 uint32_t thread_id = ppc_cpu_tir(ccpu);
3094 if (ttir == thread_id) {
3095 ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1);
3096 bql_unlock();
3097 return;
3101 g_assert_not_reached();
3103 #endif /* TARGET_PPC64 */
3105 /* Single-step tracing */
3106 void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
3108 uint32_t error_code = 0;
3109 if (env->insns_flags2 & PPC2_ISA207S) {
3110 /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
3111 env->spr[SPR_POWER_SIAR] = prev_ip;
3112 error_code = PPC_BIT(33);
3114 raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
3117 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
3118 MMUAccessType access_type,
3119 int mmu_idx, uintptr_t retaddr)
3121 CPUPPCState *env = cpu_env(cs);
3122 uint32_t insn;
3124 /* Restore state and reload the insn we executed, for filling in DSISR. */
3125 cpu_restore_state(cs, retaddr);
3126 insn = ppc_ldl_code(env, env->nip);
3128 switch (env->mmu_model) {
3129 case POWERPC_MMU_SOFT_4xx:
3130 env->spr[SPR_40x_DEAR] = vaddr;
3131 break;
3132 case POWERPC_MMU_BOOKE:
3133 case POWERPC_MMU_BOOKE206:
3134 env->spr[SPR_BOOKE_DEAR] = vaddr;
3135 break;
3136 default:
3137 env->spr[SPR_DAR] = vaddr;
3138 break;
3141 cs->exception_index = POWERPC_EXCP_ALIGN;
3142 env->error_code = insn & 0x03FF0000;
3143 cpu_loop_exit(cs);
3146 void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
3147 vaddr vaddr, unsigned size,
3148 MMUAccessType access_type,
3149 int mmu_idx, MemTxAttrs attrs,
3150 MemTxResult response, uintptr_t retaddr)
3152 CPUPPCState *env = cpu_env(cs);
3154 switch (env->excp_model) {
3155 #if defined(TARGET_PPC64)
3156 case POWERPC_EXCP_POWER8:
3157 case POWERPC_EXCP_POWER9:
3158 case POWERPC_EXCP_POWER10:
3160 * Machine check codes can be found in processor User Manual or
3161 * Linux or skiboot source.
3163 if (access_type == MMU_DATA_LOAD) {
3164 env->spr[SPR_DAR] = vaddr;
3165 env->spr[SPR_DSISR] = PPC_BIT(57);
3166 env->error_code = PPC_BIT(42);
3168 } else if (access_type == MMU_DATA_STORE) {
3170 * MCE for stores in POWER is asynchronous so hardware does
3171 * not set DAR, but QEMU can do better.
3173 env->spr[SPR_DAR] = vaddr;
3174 env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
3175 env->error_code |= PPC_BIT(42);
3177 } else { /* Fetch */
3179 * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
3180 * the instruction, so that must always be clear for fetches.
3182 env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
3184 break;
3185 #endif
3186 default:
3188 * TODO: Check behaviour for other CPUs, for now do nothing.
3189 * Could add a basic MCE even if real hardware ignores.
3191 return;
3194 cs->exception_index = POWERPC_EXCP_MCHECK;
3195 cpu_loop_exit_restore(cs, retaddr);
3198 void ppc_cpu_debug_excp_handler(CPUState *cs)
3200 #if defined(TARGET_PPC64)
3201 CPUPPCState *env = cpu_env(cs);
3203 if (env->insns_flags2 & PPC2_ISA207S) {
3204 if (cs->watchpoint_hit) {
3205 if (cs->watchpoint_hit->flags & BP_CPU) {
3206 env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
3207 env->spr[SPR_DSISR] = PPC_BIT(41);
3208 cs->watchpoint_hit = NULL;
3209 raise_exception(env, POWERPC_EXCP_DSI);
3211 cs->watchpoint_hit = NULL;
3212 } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
3213 raise_exception_err(env, POWERPC_EXCP_TRACE,
3214 PPC_BIT(33) | PPC_BIT(43));
3217 #endif
3220 bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
3222 #if defined(TARGET_PPC64)
3223 CPUPPCState *env = cpu_env(cs);
3225 if (env->insns_flags2 & PPC2_ISA207S) {
3226 target_ulong priv;
3228 priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
3229 switch (priv) {
3230 case 0x1: /* problem */
3231 return env->msr & ((target_ulong)1 << MSR_PR);
3232 case 0x2: /* supervisor */
3233 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
3234 !(env->msr & ((target_ulong)1 << MSR_HV)));
3235 case 0x3: /* hypervisor */
3236 return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
3237 (env->msr & ((target_ulong)1 << MSR_HV)));
3238 default:
3239 g_assert_not_reached();
3242 #endif
3244 return false;
3247 bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
3249 #if defined(TARGET_PPC64)
3250 CPUPPCState *env = cpu_env(cs);
3252 if (env->insns_flags2 & PPC2_ISA207S) {
3253 if (wp == env->dawr0_watchpoint) {
3254 uint32_t dawrx = env->spr[SPR_DAWRX0];
3255 bool wt = extract32(dawrx, PPC_BIT_NR(59), 1);
3256 bool wti = extract32(dawrx, PPC_BIT_NR(60), 1);
3257 bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
3258 bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
3259 bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
3261 if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
3262 return false;
3263 } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
3264 return false;
3265 } else if (!sv) {
3266 return false;
3269 if (!wti) {
3270 if (env->msr & ((target_ulong)1 << MSR_DR)) {
3271 if (!wt) {
3272 return false;
3274 } else {
3275 if (wt) {
3276 return false;
3281 return true;
3284 #endif
3286 return false;
3289 #endif /* !CONFIG_USER_ONLY */
3290 #endif /* CONFIG_TCG */