target/ppc: 405: External exception cleanup
[qemu/kevin.git] / target / ppc / excp_helper.c
blob069288a5c86135f90ba2bad817a7fb12b7c1f81d
1 /*
2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "helper_regs.h"
26 #include "trace.h"
28 #ifdef CONFIG_TCG
29 #include "exec/helper-proto.h"
30 #include "exec/cpu_ldst.h"
31 #endif
33 /*****************************************************************************/
34 /* Exception processing */
35 #if !defined(CONFIG_USER_ONLY)
37 static const char *powerpc_excp_name(int excp)
39 switch (excp) {
40 case POWERPC_EXCP_CRITICAL: return "CRITICAL";
41 case POWERPC_EXCP_MCHECK: return "MCHECK";
42 case POWERPC_EXCP_DSI: return "DSI";
43 case POWERPC_EXCP_ISI: return "ISI";
44 case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
45 case POWERPC_EXCP_ALIGN: return "ALIGN";
46 case POWERPC_EXCP_PROGRAM: return "PROGRAM";
47 case POWERPC_EXCP_FPU: return "FPU";
48 case POWERPC_EXCP_SYSCALL: return "SYSCALL";
49 case POWERPC_EXCP_APU: return "APU";
50 case POWERPC_EXCP_DECR: return "DECR";
51 case POWERPC_EXCP_FIT: return "FIT";
52 case POWERPC_EXCP_WDT: return "WDT";
53 case POWERPC_EXCP_DTLB: return "DTLB";
54 case POWERPC_EXCP_ITLB: return "ITLB";
55 case POWERPC_EXCP_DEBUG: return "DEBUG";
56 case POWERPC_EXCP_SPEU: return "SPEU";
57 case POWERPC_EXCP_EFPDI: return "EFPDI";
58 case POWERPC_EXCP_EFPRI: return "EFPRI";
59 case POWERPC_EXCP_EPERFM: return "EPERFM";
60 case POWERPC_EXCP_DOORI: return "DOORI";
61 case POWERPC_EXCP_DOORCI: return "DOORCI";
62 case POWERPC_EXCP_GDOORI: return "GDOORI";
63 case POWERPC_EXCP_GDOORCI: return "GDOORCI";
64 case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
65 case POWERPC_EXCP_RESET: return "RESET";
66 case POWERPC_EXCP_DSEG: return "DSEG";
67 case POWERPC_EXCP_ISEG: return "ISEG";
68 case POWERPC_EXCP_HDECR: return "HDECR";
69 case POWERPC_EXCP_TRACE: return "TRACE";
70 case POWERPC_EXCP_HDSI: return "HDSI";
71 case POWERPC_EXCP_HISI: return "HISI";
72 case POWERPC_EXCP_HDSEG: return "HDSEG";
73 case POWERPC_EXCP_HISEG: return "HISEG";
74 case POWERPC_EXCP_VPU: return "VPU";
75 case POWERPC_EXCP_PIT: return "PIT";
76 case POWERPC_EXCP_IO: return "IO";
77 case POWERPC_EXCP_RUNM: return "RUNM";
78 case POWERPC_EXCP_EMUL: return "EMUL";
79 case POWERPC_EXCP_IFTLB: return "IFTLB";
80 case POWERPC_EXCP_DLTLB: return "DLTLB";
81 case POWERPC_EXCP_DSTLB: return "DSTLB";
82 case POWERPC_EXCP_FPA: return "FPA";
83 case POWERPC_EXCP_DABR: return "DABR";
84 case POWERPC_EXCP_IABR: return "IABR";
85 case POWERPC_EXCP_SMI: return "SMI";
86 case POWERPC_EXCP_PERFM: return "PERFM";
87 case POWERPC_EXCP_THERM: return "THERM";
88 case POWERPC_EXCP_VPUA: return "VPUA";
89 case POWERPC_EXCP_SOFTP: return "SOFTP";
90 case POWERPC_EXCP_MAINT: return "MAINT";
91 case POWERPC_EXCP_MEXTBR: return "MEXTBR";
92 case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
93 case POWERPC_EXCP_ITLBE: return "ITLBE";
94 case POWERPC_EXCP_DTLBE: return "DTLBE";
95 case POWERPC_EXCP_VSXU: return "VSXU";
96 case POWERPC_EXCP_FU: return "FU";
97 case POWERPC_EXCP_HV_EMU: return "HV_EMU";
98 case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
99 case POWERPC_EXCP_HV_FU: return "HV_FU";
100 case POWERPC_EXCP_SDOOR: return "SDOOR";
101 case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
102 case POWERPC_EXCP_HVIRT: return "HVIRT";
103 case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
104 default:
105 g_assert_not_reached();
109 static void dump_syscall(CPUPPCState *env)
111 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
112 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
113 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
114 " nip=" TARGET_FMT_lx "\n",
115 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
116 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
117 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
118 ppc_dump_gpr(env, 8), env->nip);
121 static void dump_hcall(CPUPPCState *env)
123 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
124 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
125 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
126 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
127 " nip=" TARGET_FMT_lx "\n",
128 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
129 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
130 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
131 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
132 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
133 env->nip);
136 static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
138 const char *es;
139 target_ulong *miss, *cmp;
140 int en;
142 if (!qemu_loglevel_mask(CPU_LOG_MMU)) {
143 return;
146 if (excp == POWERPC_EXCP_IFTLB) {
147 es = "I";
148 en = 'I';
149 miss = &env->spr[SPR_IMISS];
150 cmp = &env->spr[SPR_ICMP];
151 } else {
152 if (excp == POWERPC_EXCP_DLTLB) {
153 es = "DL";
154 } else {
155 es = "DS";
157 en = 'D';
158 miss = &env->spr[SPR_DMISS];
159 cmp = &env->spr[SPR_DCMP];
161 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
162 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
163 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
164 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
165 env->error_code);
169 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
170 target_ulong *msr)
172 /* We no longer are in a PM state */
173 env->resume_as_sreset = false;
175 /* Pretend to be returning from doze always as we don't lose state */
176 *msr |= SRR1_WS_NOLOSS;
178 /* Machine checks are sent normally */
179 if (excp == POWERPC_EXCP_MCHECK) {
180 return excp;
182 switch (excp) {
183 case POWERPC_EXCP_RESET:
184 *msr |= SRR1_WAKERESET;
185 break;
186 case POWERPC_EXCP_EXTERNAL:
187 *msr |= SRR1_WAKEEE;
188 break;
189 case POWERPC_EXCP_DECR:
190 *msr |= SRR1_WAKEDEC;
191 break;
192 case POWERPC_EXCP_SDOOR:
193 *msr |= SRR1_WAKEDBELL;
194 break;
195 case POWERPC_EXCP_SDOOR_HV:
196 *msr |= SRR1_WAKEHDBELL;
197 break;
198 case POWERPC_EXCP_HV_MAINT:
199 *msr |= SRR1_WAKEHMI;
200 break;
201 case POWERPC_EXCP_HVIRT:
202 *msr |= SRR1_WAKEHVI;
203 break;
204 default:
205 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
206 excp);
208 return POWERPC_EXCP_RESET;
212 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
213 * taken with the MMU on, and which uses an alternate location (e.g., so the
214 * kernel/hv can map the vectors there with an effective address).
216 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
217 * are delivered in this way. AIL requires the LPCR to be set to enable this
218 * mode, and then a number of conditions have to be true for AIL to apply.
220 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
221 * they specifically want to be in real mode (e.g., the MCE might be signaling
222 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
224 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
225 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
226 * radix mode (LPCR[HR]).
228 * POWER8, POWER9 with LPCR[HR]=0
229 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
230 * +-----------+-------------+---------+-------------+-----+
231 * | a | 00/01/10 | x | x | 0 |
232 * | a | 11 | 0 | 1 | 0 |
233 * | a | 11 | 1 | 1 | a |
234 * | a | 11 | 0 | 0 | a |
235 * +-------------------------------------------------------+
237 * POWER9 with LPCR[HR]=1
238 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
239 * +-----------+-------------+---------+-------------+-----+
240 * | a | 00/01/10 | x | x | 0 |
241 * | a | 11 | x | x | a |
242 * +-------------------------------------------------------+
244 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
245 * the hypervisor in AIL mode if the guest is radix. This is good for
246 * performance but allows the guest to influence the AIL of hypervisor
247 * interrupts using its MSR, and also the hypervisor must disallow guest
248 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
249 * use AIL for its MSR[HV] 0->1 interrupts.
251 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
252 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
253 * MSR[HV] 1->1).
255 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
257 * POWER10 behaviour is
258 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
259 * +-----------+------------+-------------+---------+-------------+-----+
260 * | a | h | 00/01/10 | 0 | 0 | 0 |
261 * | a | h | 11 | 0 | 0 | a |
262 * | a | h | x | 0 | 1 | h |
263 * | a | h | 00/01/10 | 1 | 1 | 0 |
264 * | a | h | 11 | 1 | 1 | h |
265 * +--------------------------------------------------------------------+
267 static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
268 target_ulong msr,
269 target_ulong *new_msr,
270 target_ulong *vector)
272 #if defined(TARGET_PPC64)
273 CPUPPCState *env = &cpu->env;
274 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
275 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
276 int ail = 0;
278 if (excp == POWERPC_EXCP_MCHECK ||
279 excp == POWERPC_EXCP_RESET ||
280 excp == POWERPC_EXCP_HV_MAINT) {
281 /* SRESET, MCE, HMI never apply AIL */
282 return;
285 if (excp_model == POWERPC_EXCP_POWER8 ||
286 excp_model == POWERPC_EXCP_POWER9) {
287 if (!mmu_all_on) {
288 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
289 return;
291 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
293 * AIL does not work if there is a MSR[HV] 0->1 transition and the
294 * partition is in HPT mode. For radix guests, such interrupts are
295 * allowed to be delivered to the hypervisor in ail mode.
297 return;
300 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
301 if (ail == 0) {
302 return;
304 if (ail == 1) {
305 /* AIL=1 is reserved, treat it like AIL=0 */
306 return;
309 } else if (excp_model == POWERPC_EXCP_POWER10) {
310 if (!mmu_all_on && !hv_escalation) {
312 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
313 * Guest->guest and HV->HV interrupts do require MMU on.
315 return;
318 if (*new_msr & MSR_HVB) {
319 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
320 /* HV interrupts depend on LPCR[HAIL] */
321 return;
323 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
324 } else {
325 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
327 if (ail == 0) {
328 return;
330 if (ail == 1 || ail == 2) {
331 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
332 return;
334 } else {
335 /* Other processors do not support AIL */
336 return;
340 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
341 * to the new IP.
343 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
345 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
346 if (ail == 2) {
347 *vector |= 0x0000000000018000ull;
348 } else if (ail == 3) {
349 *vector |= 0xc000000000004000ull;
351 } else {
353 * scv AIL is a little different. AIL=2 does not change the address,
354 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
356 if (ail == 3) {
357 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
358 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
361 #endif
364 static void powerpc_set_excp_state(PowerPCCPU *cpu,
365 target_ulong vector, target_ulong msr)
367 CPUState *cs = CPU(cpu);
368 CPUPPCState *env = &cpu->env;
371 * We don't use hreg_store_msr here as already have treated any
372 * special case that could occur. Just store MSR and update hflags
374 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
375 * will prevent setting of the HV bit which some exceptions might need
376 * to do.
378 env->msr = msr & env->msr_mask;
379 hreg_compute_hflags(env);
380 env->nip = vector;
381 /* Reset exception state */
382 cs->exception_index = POWERPC_EXCP_NONE;
383 env->error_code = 0;
385 /* Reset the reservation */
386 env->reserve_addr = -1;
389 * Any interrupt is context synchronizing, check if TCG TLB needs
390 * a delayed flush on ppc64
392 check_tlb_flush(env, false);
395 static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
397 CPUState *cs = CPU(cpu);
398 CPUPPCState *env = &cpu->env;
399 int excp_model = env->excp_model;
400 target_ulong msr, new_msr, vector;
401 int srr0, srr1, lev = -1;
403 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
404 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
407 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
408 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
409 excp, env->error_code);
411 /* new srr1 value excluding must-be-zero bits */
412 msr = env->msr & ~0x783f0000ULL;
415 * new interrupt handler msr preserves existing ME unless
416 * explicitly overriden.
418 new_msr = env->msr & (((target_ulong)1 << MSR_ME));
420 /* target registers */
421 srr0 = SPR_SRR0;
422 srr1 = SPR_SRR1;
425 * Hypervisor emulation assistance interrupt only exists on server
426 * arch 2.05 server or later.
428 if (excp == POWERPC_EXCP_HV_EMU) {
429 excp = POWERPC_EXCP_PROGRAM;
432 vector = env->excp_vectors[excp];
433 if (vector == (target_ulong)-1ULL) {
434 cpu_abort(cs, "Raised an exception without defined vector %d\n",
435 excp);
438 vector |= env->excp_prefix;
440 switch (excp) {
441 case POWERPC_EXCP_CRITICAL: /* Critical input */
442 srr0 = SPR_40x_SRR2;
443 srr1 = SPR_40x_SRR3;
444 break;
445 case POWERPC_EXCP_MCHECK: /* Machine check exception */
446 if (msr_me == 0) {
448 * Machine check exception is not enabled. Enter
449 * checkstop state.
451 fprintf(stderr, "Machine check while not allowed. "
452 "Entering checkstop state\n");
453 if (qemu_log_separate()) {
454 qemu_log("Machine check while not allowed. "
455 "Entering checkstop state\n");
457 cs->halted = 1;
458 cpu_interrupt_exittb(cs);
461 /* machine check exceptions don't have ME set */
462 new_msr &= ~((target_ulong)1 << MSR_ME);
464 srr0 = SPR_40x_SRR2;
465 srr1 = SPR_40x_SRR3;
466 break;
467 case POWERPC_EXCP_DSI: /* Data storage exception */
468 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
469 break;
470 case POWERPC_EXCP_ISI: /* Instruction storage exception */
471 trace_ppc_excp_isi(msr, env->nip);
472 msr |= env->error_code;
473 break;
474 case POWERPC_EXCP_EXTERNAL: /* External input */
475 break;
476 case POWERPC_EXCP_ALIGN: /* Alignment exception */
477 /* Get rS/rD and rA from faulting opcode */
479 * Note: the opcode fields will not be set properly for a
480 * direct store load/store, but nobody cares as nobody
481 * actually uses direct store segments.
483 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
484 break;
485 case POWERPC_EXCP_PROGRAM: /* Program exception */
486 switch (env->error_code & ~0xF) {
487 case POWERPC_EXCP_FP:
488 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
489 trace_ppc_excp_fp_ignore();
490 cs->exception_index = POWERPC_EXCP_NONE;
491 env->error_code = 0;
492 return;
496 * FP exceptions always have NIP pointing to the faulting
497 * instruction, so always use store_next and claim we are
498 * precise in the MSR.
500 msr |= 0x00100000;
501 env->spr[SPR_BOOKE_ESR] = ESR_FP;
502 break;
503 case POWERPC_EXCP_INVAL:
504 trace_ppc_excp_inval(env->nip);
505 msr |= 0x00080000;
506 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
507 break;
508 case POWERPC_EXCP_PRIV:
509 msr |= 0x00040000;
510 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
511 break;
512 case POWERPC_EXCP_TRAP:
513 msr |= 0x00020000;
514 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
515 break;
516 default:
517 /* Should never occur */
518 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
519 env->error_code);
520 break;
522 break;
523 case POWERPC_EXCP_SYSCALL: /* System call exception */
524 lev = env->error_code;
526 if ((lev == 1) && cpu->vhyp) {
527 dump_hcall(env);
528 } else {
529 dump_syscall(env);
533 * We need to correct the NIP which in this case is supposed
534 * to point to the next instruction
536 env->nip += 4;
538 /* "PAPR mode" built-in hypercall emulation */
539 if ((lev == 1) && cpu->vhyp) {
540 PPCVirtualHypervisorClass *vhc =
541 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
542 vhc->hypercall(cpu->vhyp, cpu);
543 return;
545 if (lev == 1) {
546 new_msr |= (target_ulong)MSR_HVB;
548 break;
549 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
550 trace_ppc_excp_print("FIT");
551 break;
552 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
553 trace_ppc_excp_print("WDT");
554 switch (excp_model) {
555 case POWERPC_EXCP_BOOKE:
556 srr0 = SPR_BOOKE_CSRR0;
557 srr1 = SPR_BOOKE_CSRR1;
558 break;
559 default:
560 break;
562 break;
563 case POWERPC_EXCP_DTLB: /* Data TLB error */
564 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
565 break;
566 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
567 if (env->flags & POWERPC_FLAG_DE) {
568 /* FIXME: choose one or the other based on CPU type */
569 srr0 = SPR_BOOKE_DSRR0;
570 srr1 = SPR_BOOKE_DSRR1;
572 env->spr[SPR_BOOKE_CSRR0] = env->nip;
573 env->spr[SPR_BOOKE_CSRR1] = msr;
575 /* DBSR already modified by caller */
576 } else {
577 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
579 break;
580 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
581 trace_ppc_excp_print("PIT");
582 break;
583 default:
584 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
585 break;
588 /* Sanity check */
589 if (!(env->msr_mask & MSR_HVB)) {
590 if (new_msr & MSR_HVB) {
591 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
592 "no HV support\n", excp);
594 if (srr0 == SPR_HSRR0) {
595 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
596 "no HV support\n", excp);
600 /* Save PC */
601 env->spr[srr0] = env->nip;
603 /* Save MSR */
604 env->spr[srr1] = msr;
606 powerpc_set_excp_state(cpu, vector, new_msr);
610 * Note that this function should be greatly optimized when called
611 * with a constant excp, from ppc_hw_interrupt
613 static inline void powerpc_excp_legacy(PowerPCCPU *cpu, int excp)
615 CPUState *cs = CPU(cpu);
616 CPUPPCState *env = &cpu->env;
617 int excp_model = env->excp_model;
618 target_ulong msr, new_msr, vector;
619 int srr0, srr1, lev = -1;
621 if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
622 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
625 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
626 " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
627 excp, env->error_code);
629 /* new srr1 value excluding must-be-zero bits */
630 if (excp_model == POWERPC_EXCP_BOOKE) {
631 msr = env->msr;
632 } else {
633 msr = env->msr & ~0x783f0000ULL;
637 * new interrupt handler msr preserves existing HV and ME unless
638 * explicitly overriden
640 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
642 /* target registers */
643 srr0 = SPR_SRR0;
644 srr1 = SPR_SRR1;
647 * check for special resume at 0x100 from doze/nap/sleep/winkle on
648 * P7/P8/P9
650 if (env->resume_as_sreset) {
651 excp = powerpc_reset_wakeup(cs, env, excp, &msr);
655 * Hypervisor emulation assistance interrupt only exists on server
656 * arch 2.05 server or later. We also don't want to generate it if
657 * we don't have HVB in msr_mask (PAPR mode).
659 if (excp == POWERPC_EXCP_HV_EMU
660 #if defined(TARGET_PPC64)
661 && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
662 #endif /* defined(TARGET_PPC64) */
665 excp = POWERPC_EXCP_PROGRAM;
668 #ifdef TARGET_PPC64
670 * SPEU and VPU share the same IVOR but they exist in different
671 * processors. SPEU is e500v1/2 only and VPU is e6500 only.
673 if (excp_model == POWERPC_EXCP_BOOKE && excp == POWERPC_EXCP_VPU) {
674 excp = POWERPC_EXCP_SPEU;
676 #endif
678 vector = env->excp_vectors[excp];
679 if (vector == (target_ulong)-1ULL) {
680 cpu_abort(cs, "Raised an exception without defined vector %d\n",
681 excp);
684 vector |= env->excp_prefix;
686 switch (excp) {
687 case POWERPC_EXCP_CRITICAL: /* Critical input */
688 switch (excp_model) {
689 case POWERPC_EXCP_40x:
690 srr0 = SPR_40x_SRR2;
691 srr1 = SPR_40x_SRR3;
692 break;
693 case POWERPC_EXCP_BOOKE:
694 srr0 = SPR_BOOKE_CSRR0;
695 srr1 = SPR_BOOKE_CSRR1;
696 break;
697 case POWERPC_EXCP_G2:
698 break;
699 default:
700 goto excp_invalid;
702 break;
703 case POWERPC_EXCP_MCHECK: /* Machine check exception */
704 if (msr_me == 0) {
706 * Machine check exception is not enabled. Enter
707 * checkstop state.
709 fprintf(stderr, "Machine check while not allowed. "
710 "Entering checkstop state\n");
711 if (qemu_log_separate()) {
712 qemu_log("Machine check while not allowed. "
713 "Entering checkstop state\n");
715 cs->halted = 1;
716 cpu_interrupt_exittb(cs);
718 if (env->msr_mask & MSR_HVB) {
720 * ISA specifies HV, but can be delivered to guest with HV
721 * clear (e.g., see FWNMI in PAPR).
723 new_msr |= (target_ulong)MSR_HVB;
726 /* machine check exceptions don't have ME set */
727 new_msr &= ~((target_ulong)1 << MSR_ME);
729 /* XXX: should also have something loaded in DAR / DSISR */
730 switch (excp_model) {
731 case POWERPC_EXCP_40x:
732 srr0 = SPR_40x_SRR2;
733 srr1 = SPR_40x_SRR3;
734 break;
735 case POWERPC_EXCP_BOOKE:
736 /* FIXME: choose one or the other based on CPU type */
737 srr0 = SPR_BOOKE_MCSRR0;
738 srr1 = SPR_BOOKE_MCSRR1;
740 env->spr[SPR_BOOKE_CSRR0] = env->nip;
741 env->spr[SPR_BOOKE_CSRR1] = msr;
742 break;
743 default:
744 break;
746 break;
747 case POWERPC_EXCP_DSI: /* Data storage exception */
748 trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
749 break;
750 case POWERPC_EXCP_ISI: /* Instruction storage exception */
751 trace_ppc_excp_isi(msr, env->nip);
752 msr |= env->error_code;
753 break;
754 case POWERPC_EXCP_EXTERNAL: /* External input */
756 bool lpes0;
758 cs = CPU(cpu);
761 * Exception targeting modifiers
763 * LPES0 is supported on POWER7/8/9
764 * LPES1 is not supported (old iSeries mode)
766 * On anything else, we behave as if LPES0 is 1
767 * (externals don't alter MSR:HV)
769 #if defined(TARGET_PPC64)
770 if (excp_model == POWERPC_EXCP_POWER7 ||
771 excp_model == POWERPC_EXCP_POWER8 ||
772 excp_model == POWERPC_EXCP_POWER9 ||
773 excp_model == POWERPC_EXCP_POWER10) {
774 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
775 } else
776 #endif /* defined(TARGET_PPC64) */
778 lpes0 = true;
781 if (!lpes0) {
782 new_msr |= (target_ulong)MSR_HVB;
783 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
784 srr0 = SPR_HSRR0;
785 srr1 = SPR_HSRR1;
787 if (env->mpic_proxy) {
788 /* IACK the IRQ on delivery */
789 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
791 break;
793 case POWERPC_EXCP_ALIGN: /* Alignment exception */
794 /* Get rS/rD and rA from faulting opcode */
796 * Note: the opcode fields will not be set properly for a
797 * direct store load/store, but nobody cares as nobody
798 * actually uses direct store segments.
800 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
801 break;
802 case POWERPC_EXCP_PROGRAM: /* Program exception */
803 switch (env->error_code & ~0xF) {
804 case POWERPC_EXCP_FP:
805 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
806 trace_ppc_excp_fp_ignore();
807 cs->exception_index = POWERPC_EXCP_NONE;
808 env->error_code = 0;
809 return;
813 * FP exceptions always have NIP pointing to the faulting
814 * instruction, so always use store_next and claim we are
815 * precise in the MSR.
817 msr |= 0x00100000;
818 env->spr[SPR_BOOKE_ESR] = ESR_FP;
819 break;
820 case POWERPC_EXCP_INVAL:
821 trace_ppc_excp_inval(env->nip);
822 msr |= 0x00080000;
823 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
824 break;
825 case POWERPC_EXCP_PRIV:
826 msr |= 0x00040000;
827 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
828 break;
829 case POWERPC_EXCP_TRAP:
830 msr |= 0x00020000;
831 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
832 break;
833 default:
834 /* Should never occur */
835 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
836 env->error_code);
837 break;
839 break;
840 case POWERPC_EXCP_SYSCALL: /* System call exception */
841 lev = env->error_code;
843 if ((lev == 1) && cpu->vhyp) {
844 dump_hcall(env);
845 } else {
846 dump_syscall(env);
850 * We need to correct the NIP which in this case is supposed
851 * to point to the next instruction
853 env->nip += 4;
855 /* "PAPR mode" built-in hypercall emulation */
856 if ((lev == 1) && cpu->vhyp) {
857 PPCVirtualHypervisorClass *vhc =
858 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
859 vhc->hypercall(cpu->vhyp, cpu);
860 return;
862 if (lev == 1) {
863 new_msr |= (target_ulong)MSR_HVB;
865 break;
866 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
867 lev = env->error_code;
868 dump_syscall(env);
869 env->nip += 4;
870 new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
871 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
873 vector += lev * 0x20;
875 env->lr = env->nip;
876 env->ctr = msr;
877 break;
878 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
879 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
880 case POWERPC_EXCP_DECR: /* Decrementer exception */
881 break;
882 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
883 /* FIT on 4xx */
884 trace_ppc_excp_print("FIT");
885 break;
886 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
887 trace_ppc_excp_print("WDT");
888 switch (excp_model) {
889 case POWERPC_EXCP_BOOKE:
890 srr0 = SPR_BOOKE_CSRR0;
891 srr1 = SPR_BOOKE_CSRR1;
892 break;
893 default:
894 break;
896 break;
897 case POWERPC_EXCP_DTLB: /* Data TLB error */
898 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
899 break;
900 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
901 if (env->flags & POWERPC_FLAG_DE) {
902 /* FIXME: choose one or the other based on CPU type */
903 srr0 = SPR_BOOKE_DSRR0;
904 srr1 = SPR_BOOKE_DSRR1;
906 env->spr[SPR_BOOKE_CSRR0] = env->nip;
907 env->spr[SPR_BOOKE_CSRR1] = msr;
909 /* DBSR already modified by caller */
910 } else {
911 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
913 break;
914 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
915 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
916 break;
917 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
918 break;
919 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
920 srr0 = SPR_BOOKE_CSRR0;
921 srr1 = SPR_BOOKE_CSRR1;
922 break;
923 case POWERPC_EXCP_RESET: /* System reset exception */
924 /* A power-saving exception sets ME, otherwise it is unchanged */
925 if (msr_pow) {
926 /* indicate that we resumed from power save mode */
927 msr |= 0x10000;
928 new_msr |= ((target_ulong)1 << MSR_ME);
930 if (env->msr_mask & MSR_HVB) {
932 * ISA specifies HV, but can be delivered to guest with HV
933 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
935 new_msr |= (target_ulong)MSR_HVB;
936 } else {
937 if (msr_pow) {
938 cpu_abort(cs, "Trying to deliver power-saving system reset "
939 "exception %d with no HV support\n", excp);
942 break;
943 case POWERPC_EXCP_DSEG: /* Data segment exception */
944 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
945 case POWERPC_EXCP_TRACE: /* Trace exception */
946 break;
947 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
948 msr |= env->error_code;
949 /* fall through */
950 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
951 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
952 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
953 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
954 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
955 case POWERPC_EXCP_HV_EMU:
956 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
957 srr0 = SPR_HSRR0;
958 srr1 = SPR_HSRR1;
959 new_msr |= (target_ulong)MSR_HVB;
960 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
961 break;
962 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
963 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
964 case POWERPC_EXCP_FU: /* Facility unavailable exception */
965 #ifdef TARGET_PPC64
966 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
967 #endif
968 break;
969 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
970 #ifdef TARGET_PPC64
971 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
972 srr0 = SPR_HSRR0;
973 srr1 = SPR_HSRR1;
974 new_msr |= (target_ulong)MSR_HVB;
975 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
976 #endif
977 break;
978 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
979 trace_ppc_excp_print("PIT");
980 break;
981 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
982 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
983 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
984 switch (excp_model) {
985 case POWERPC_EXCP_602:
986 case POWERPC_EXCP_603:
987 case POWERPC_EXCP_G2:
988 /* Swap temporary saved registers with GPRs */
989 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
990 new_msr |= (target_ulong)1 << MSR_TGPR;
991 hreg_swap_gpr_tgpr(env);
993 /* fall through */
994 case POWERPC_EXCP_7x5:
995 ppc_excp_debug_sw_tlb(env, excp);
997 msr |= env->crf[0] << 28;
998 msr |= env->error_code; /* key, D/I, S/L bits */
999 /* Set way using a LRU mechanism */
1000 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
1001 break;
1002 default:
1003 cpu_abort(cs, "Invalid TLB miss exception\n");
1004 break;
1006 break;
1007 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
1008 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
1009 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
1010 case POWERPC_EXCP_IO: /* IO error exception */
1011 case POWERPC_EXCP_RUNM: /* Run mode exception */
1012 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
1013 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
1014 case POWERPC_EXCP_DABR: /* Data address breakpoint */
1015 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1016 case POWERPC_EXCP_SMI: /* System management interrupt */
1017 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1018 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
1019 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1020 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
1021 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1022 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
1023 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
1024 cpu_abort(cs, "%s exception not implemented\n",
1025 powerpc_excp_name(excp));
1026 break;
1027 default:
1028 excp_invalid:
1029 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
1030 break;
1033 /* Sanity check */
1034 if (!(env->msr_mask & MSR_HVB)) {
1035 if (new_msr & MSR_HVB) {
1036 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
1037 "no HV support\n", excp);
1039 if (srr0 == SPR_HSRR0) {
1040 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
1041 "no HV support\n", excp);
1046 * Sort out endianness of interrupt, this differs depending on the
1047 * CPU, the HV mode, etc...
1049 if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
1050 new_msr |= (target_ulong)1 << MSR_LE;
1053 #if defined(TARGET_PPC64)
1054 if (excp_model == POWERPC_EXCP_BOOKE) {
1055 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
1056 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
1057 new_msr |= (target_ulong)1 << MSR_CM;
1058 } else {
1059 vector = (uint32_t)vector;
1061 } else {
1062 if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
1063 vector = (uint32_t)vector;
1064 } else {
1065 new_msr |= (target_ulong)1 << MSR_SF;
1068 #endif
1070 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
1071 /* Save PC */
1072 env->spr[srr0] = env->nip;
1074 /* Save MSR */
1075 env->spr[srr1] = msr;
1078 /* This can update new_msr and vector if AIL applies */
1079 ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
1081 powerpc_set_excp_state(cpu, vector, new_msr);
1084 static void powerpc_excp(PowerPCCPU *cpu, int excp)
1086 CPUPPCState *env = &cpu->env;
1088 switch (env->excp_model) {
1089 case POWERPC_EXCP_40x:
1090 powerpc_excp_40x(cpu, excp);
1091 break;
1092 default:
1093 powerpc_excp_legacy(cpu, excp);
1097 void ppc_cpu_do_interrupt(CPUState *cs)
1099 PowerPCCPU *cpu = POWERPC_CPU(cs);
1101 powerpc_excp(cpu, cs->exception_index);
1104 static void ppc_hw_interrupt(CPUPPCState *env)
1106 PowerPCCPU *cpu = env_archcpu(env);
1107 bool async_deliver;
1109 /* External reset */
1110 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
1111 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
1112 powerpc_excp(cpu, POWERPC_EXCP_RESET);
1113 return;
1115 /* Machine check exception */
1116 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
1117 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
1118 powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
1119 return;
1121 #if 0 /* TODO */
1122 /* External debug exception */
1123 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
1124 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
1125 powerpc_excp(cpu, POWERPC_EXCP_DEBUG);
1126 return;
1128 #endif
1131 * For interrupts that gate on MSR:EE, we need to do something a
1132 * bit more subtle, as we need to let them through even when EE is
1133 * clear when coming out of some power management states (in order
1134 * for them to become a 0x100).
1136 async_deliver = (msr_ee != 0) || env->resume_as_sreset;
1138 /* Hypervisor decrementer exception */
1139 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
1140 /* LPCR will be clear when not supported so this will work */
1141 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
1142 if ((async_deliver || msr_hv == 0) && hdice) {
1143 /* HDEC clears on delivery */
1144 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
1145 powerpc_excp(cpu, POWERPC_EXCP_HDECR);
1146 return;
1150 /* Hypervisor virtualization interrupt */
1151 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
1152 /* LPCR will be clear when not supported so this will work */
1153 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
1154 if ((async_deliver || msr_hv == 0) && hvice) {
1155 powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
1156 return;
1160 /* External interrupt can ignore MSR:EE under some circumstances */
1161 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
1162 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
1163 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1164 /* HEIC blocks delivery to the hypervisor */
1165 if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
1166 (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
1167 powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
1168 return;
1171 if (msr_ce != 0) {
1172 /* External critical interrupt */
1173 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
1174 powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
1175 return;
1178 if (async_deliver != 0) {
1179 /* Watchdog timer on embedded PowerPC */
1180 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
1181 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
1182 powerpc_excp(cpu, POWERPC_EXCP_WDT);
1183 return;
1185 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
1186 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
1187 powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
1188 return;
1190 /* Fixed interval timer on embedded PowerPC */
1191 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
1192 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
1193 powerpc_excp(cpu, POWERPC_EXCP_FIT);
1194 return;
1196 /* Programmable interval timer on embedded PowerPC */
1197 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
1198 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
1199 powerpc_excp(cpu, POWERPC_EXCP_PIT);
1200 return;
1202 /* Decrementer exception */
1203 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
1204 if (ppc_decr_clear_on_delivery(env)) {
1205 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
1207 powerpc_excp(cpu, POWERPC_EXCP_DECR);
1208 return;
1210 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
1211 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1212 if (is_book3s_arch2x(env)) {
1213 powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
1214 } else {
1215 powerpc_excp(cpu, POWERPC_EXCP_DOORI);
1217 return;
1219 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
1220 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1221 powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
1222 return;
1224 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
1225 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
1226 powerpc_excp(cpu, POWERPC_EXCP_PERFM);
1227 return;
1229 /* Thermal interrupt */
1230 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
1231 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
1232 powerpc_excp(cpu, POWERPC_EXCP_THERM);
1233 return;
1237 if (env->resume_as_sreset) {
1239 * This is a bug ! It means that has_work took us out of halt without
1240 * anything to deliver while in a PM state that requires getting
1241 * out via a 0x100
1243 * This means we will incorrectly execute past the power management
1244 * instruction instead of triggering a reset.
1246 * It generally means a discrepancy between the wakeup conditions in the
1247 * processor has_work implementation and the logic in this function.
1249 cpu_abort(env_cpu(env),
1250 "Wakeup from PM state but interrupt Undelivered");
1254 void ppc_cpu_do_system_reset(CPUState *cs)
1256 PowerPCCPU *cpu = POWERPC_CPU(cs);
1258 powerpc_excp(cpu, POWERPC_EXCP_RESET);
1261 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
1263 PowerPCCPU *cpu = POWERPC_CPU(cs);
1264 CPUPPCState *env = &cpu->env;
1265 target_ulong msr = 0;
1268 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1269 * been set by KVM.
1271 msr = (1ULL << MSR_ME);
1272 msr |= env->msr & (1ULL << MSR_SF);
1273 if (ppc_interrupts_little_endian(cpu, false)) {
1274 msr |= (1ULL << MSR_LE);
1277 powerpc_set_excp_state(cpu, vector, msr);
1280 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1282 PowerPCCPU *cpu = POWERPC_CPU(cs);
1283 CPUPPCState *env = &cpu->env;
1285 if (interrupt_request & CPU_INTERRUPT_HARD) {
1286 ppc_hw_interrupt(env);
1287 if (env->pending_interrupts == 0) {
1288 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1290 return true;
1292 return false;
1295 #endif /* !CONFIG_USER_ONLY */
1297 /*****************************************************************************/
1298 /* Exceptions processing helpers */
1300 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1301 uint32_t error_code, uintptr_t raddr)
1303 CPUState *cs = env_cpu(env);
1305 cs->exception_index = exception;
1306 env->error_code = error_code;
1307 cpu_loop_exit_restore(cs, raddr);
1310 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1311 uint32_t error_code)
1313 raise_exception_err_ra(env, exception, error_code, 0);
1316 void raise_exception(CPUPPCState *env, uint32_t exception)
1318 raise_exception_err_ra(env, exception, 0, 0);
1321 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1322 uintptr_t raddr)
1324 raise_exception_err_ra(env, exception, 0, raddr);
1327 #ifdef CONFIG_TCG
1328 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1329 uint32_t error_code)
1331 raise_exception_err_ra(env, exception, error_code, 0);
1334 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1336 raise_exception_err_ra(env, exception, 0, 0);
1338 #endif
1340 #if !defined(CONFIG_USER_ONLY)
1341 #ifdef CONFIG_TCG
1342 void helper_store_msr(CPUPPCState *env, target_ulong val)
1344 uint32_t excp = hreg_store_msr(env, val, 0);
1346 if (excp != 0) {
1347 CPUState *cs = env_cpu(env);
1348 cpu_interrupt_exittb(cs);
1349 raise_exception(env, excp);
1353 #if defined(TARGET_PPC64)
1354 void helper_scv(CPUPPCState *env, uint32_t lev)
1356 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
1357 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
1358 } else {
1359 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
1363 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1365 CPUState *cs;
1367 cs = env_cpu(env);
1368 cs->halted = 1;
1370 /* Condition for waking up at 0x100 */
1371 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1372 (env->spr[SPR_PSSCR] & PSSCR_EC);
1374 #endif /* defined(TARGET_PPC64) */
1376 static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1378 CPUState *cs = env_cpu(env);
1380 /* MSR:POW cannot be set by any form of rfi */
1381 msr &= ~(1ULL << MSR_POW);
1383 /* MSR:TGPR cannot be set by any form of rfi */
1384 if (env->flags & POWERPC_FLAG_TGPR)
1385 msr &= ~(1ULL << MSR_TGPR);
1387 #if defined(TARGET_PPC64)
1388 /* Switching to 32-bit ? Crop the nip */
1389 if (!msr_is_64bit(env, msr)) {
1390 nip = (uint32_t)nip;
1392 #else
1393 nip = (uint32_t)nip;
1394 #endif
1395 /* XXX: beware: this is false if VLE is supported */
1396 env->nip = nip & ~((target_ulong)0x00000003);
1397 hreg_store_msr(env, msr, 1);
1398 trace_ppc_excp_rfi(env->nip, env->msr);
1400 * No need to raise an exception here, as rfi is always the last
1401 * insn of a TB
1403 cpu_interrupt_exittb(cs);
1404 /* Reset the reservation */
1405 env->reserve_addr = -1;
1407 /* Context synchronizing: check if TCG TLB needs flush */
1408 check_tlb_flush(env, false);
1411 void helper_rfi(CPUPPCState *env)
1413 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1416 #define MSR_BOOK3S_MASK
1417 #if defined(TARGET_PPC64)
1418 void helper_rfid(CPUPPCState *env)
1421 * The architecture defines a number of rules for which bits can
1422 * change but in practice, we handle this in hreg_store_msr()
1423 * which will be called by do_rfi(), so there is no need to filter
1424 * here
1426 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1429 void helper_rfscv(CPUPPCState *env)
1431 do_rfi(env, env->lr, env->ctr);
1434 void helper_hrfid(CPUPPCState *env)
1436 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1438 #endif
1440 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
1441 void helper_rfebb(CPUPPCState *env, target_ulong s)
1443 target_ulong msr = env->msr;
1446 * Handling of BESCR bits 32:33 according to PowerISA v3.1:
1448 * "If BESCR 32:33 != 0b00 the instruction is treated as if
1449 * the instruction form were invalid."
1451 if (env->spr[SPR_BESCR] & BESCR_INVALID) {
1452 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1453 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1456 env->nip = env->spr[SPR_EBBRR];
1458 /* Switching to 32-bit ? Crop the nip */
1459 if (!msr_is_64bit(env, msr)) {
1460 env->nip = (uint32_t)env->spr[SPR_EBBRR];
1463 if (s) {
1464 env->spr[SPR_BESCR] |= BESCR_GE;
1465 } else {
1466 env->spr[SPR_BESCR] &= ~BESCR_GE;
1469 #endif
1471 /*****************************************************************************/
1472 /* Embedded PowerPC specific helpers */
1473 void helper_40x_rfci(CPUPPCState *env)
1475 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1478 void helper_rfci(CPUPPCState *env)
1480 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1483 void helper_rfdi(CPUPPCState *env)
1485 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1486 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1489 void helper_rfmci(CPUPPCState *env)
1491 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1492 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1494 #endif /* CONFIG_TCG */
1495 #endif /* !defined(CONFIG_USER_ONLY) */
1497 #ifdef CONFIG_TCG
1498 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1499 uint32_t flags)
1501 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1502 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1503 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1504 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1505 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1506 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1507 POWERPC_EXCP_TRAP, GETPC());
1511 #if defined(TARGET_PPC64)
1512 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1513 uint32_t flags)
1515 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1516 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1517 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1518 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1519 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1520 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1521 POWERPC_EXCP_TRAP, GETPC());
1524 #endif
1525 #endif
1527 #if !defined(CONFIG_USER_ONLY)
1528 /*****************************************************************************/
1529 /* PowerPC 601 specific instructions (POWER bridge) */
1531 #ifdef CONFIG_TCG
1532 void helper_rfsvc(CPUPPCState *env)
1534 do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1537 /* Embedded.Processor Control */
1538 static int dbell2irq(target_ulong rb)
1540 int msg = rb & DBELL_TYPE_MASK;
1541 int irq = -1;
1543 switch (msg) {
1544 case DBELL_TYPE_DBELL:
1545 irq = PPC_INTERRUPT_DOORBELL;
1546 break;
1547 case DBELL_TYPE_DBELL_CRIT:
1548 irq = PPC_INTERRUPT_CDOORBELL;
1549 break;
1550 case DBELL_TYPE_G_DBELL:
1551 case DBELL_TYPE_G_DBELL_CRIT:
1552 case DBELL_TYPE_G_DBELL_MC:
1553 /* XXX implement */
1554 default:
1555 break;
1558 return irq;
1561 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1563 int irq = dbell2irq(rb);
1565 if (irq < 0) {
1566 return;
1569 env->pending_interrupts &= ~(1 << irq);
1572 void helper_msgsnd(target_ulong rb)
1574 int irq = dbell2irq(rb);
1575 int pir = rb & DBELL_PIRTAG_MASK;
1576 CPUState *cs;
1578 if (irq < 0) {
1579 return;
1582 qemu_mutex_lock_iothread();
1583 CPU_FOREACH(cs) {
1584 PowerPCCPU *cpu = POWERPC_CPU(cs);
1585 CPUPPCState *cenv = &cpu->env;
1587 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1588 cenv->pending_interrupts |= 1 << irq;
1589 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1592 qemu_mutex_unlock_iothread();
1595 /* Server Processor Control */
1597 static bool dbell_type_server(target_ulong rb)
1600 * A Directed Hypervisor Doorbell message is sent only if the
1601 * message type is 5. All other types are reserved and the
1602 * instruction is a no-op
1604 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1607 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1609 if (!dbell_type_server(rb)) {
1610 return;
1613 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1616 static void book3s_msgsnd_common(int pir, int irq)
1618 CPUState *cs;
1620 qemu_mutex_lock_iothread();
1621 CPU_FOREACH(cs) {
1622 PowerPCCPU *cpu = POWERPC_CPU(cs);
1623 CPUPPCState *cenv = &cpu->env;
1625 /* TODO: broadcast message to all threads of the same processor */
1626 if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1627 cenv->pending_interrupts |= 1 << irq;
1628 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1631 qemu_mutex_unlock_iothread();
1634 void helper_book3s_msgsnd(target_ulong rb)
1636 int pir = rb & DBELL_PROCIDTAG_MASK;
1638 if (!dbell_type_server(rb)) {
1639 return;
1642 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1645 #if defined(TARGET_PPC64)
1646 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1648 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1650 if (!dbell_type_server(rb)) {
1651 return;
1654 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1658 * sends a message to other threads that are on the same
1659 * multi-threaded processor
1661 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1663 int pir = env->spr_cb[SPR_PIR].default_value;
1665 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1667 if (!dbell_type_server(rb)) {
1668 return;
1671 /* TODO: TCG supports only one thread */
1673 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1675 #endif /* TARGET_PPC64 */
1677 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1678 MMUAccessType access_type,
1679 int mmu_idx, uintptr_t retaddr)
1681 CPUPPCState *env = cs->env_ptr;
1682 uint32_t insn;
1684 /* Restore state and reload the insn we executed, for filling in DSISR. */
1685 cpu_restore_state(cs, retaddr, true);
1686 insn = cpu_ldl_code(env, env->nip);
1688 switch (env->mmu_model) {
1689 case POWERPC_MMU_SOFT_4xx:
1690 env->spr[SPR_40x_DEAR] = vaddr;
1691 break;
1692 case POWERPC_MMU_BOOKE:
1693 case POWERPC_MMU_BOOKE206:
1694 env->spr[SPR_BOOKE_DEAR] = vaddr;
1695 break;
1696 default:
1697 env->spr[SPR_DAR] = vaddr;
1698 break;
1701 cs->exception_index = POWERPC_EXCP_ALIGN;
1702 env->error_code = insn & 0x03FF0000;
1703 cpu_loop_exit(cs);
1705 #endif /* CONFIG_TCG */
1706 #endif /* !CONFIG_USER_ONLY */