block/copy-before-write: relax permission requirements when no parents
[qemu.git] / target / ppc / excp_helper.c
blob7b6ac16eef720f2ef30829e0cb467b1bce03c8e2
1 /*
2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "internal.h"
24 #include "helper_regs.h"
26 #ifdef CONFIG_TCG
27 #include "exec/helper-proto.h"
28 #include "exec/cpu_ldst.h"
29 #endif
31 /* #define DEBUG_OP */
32 /* #define DEBUG_SOFTWARE_TLB */
33 /* #define DEBUG_EXCEPTIONS */
35 #ifdef DEBUG_EXCEPTIONS
36 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
37 #else
38 # define LOG_EXCP(...) do { } while (0)
39 #endif
41 /*****************************************************************************/
42 /* Exception processing */
43 #if defined(CONFIG_USER_ONLY)
44 void ppc_cpu_do_interrupt(CPUState *cs)
46 PowerPCCPU *cpu = POWERPC_CPU(cs);
47 CPUPPCState *env = &cpu->env;
49 cs->exception_index = POWERPC_EXCP_NONE;
50 env->error_code = 0;
53 static void ppc_hw_interrupt(CPUPPCState *env)
55 CPUState *cs = env_cpu(env);
57 cs->exception_index = POWERPC_EXCP_NONE;
58 env->error_code = 0;
60 #else /* defined(CONFIG_USER_ONLY) */
61 static inline void dump_syscall(CPUPPCState *env)
63 qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
64 " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
65 " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64
66 " nip=" TARGET_FMT_lx "\n",
67 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
68 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
69 ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7),
70 ppc_dump_gpr(env, 8), env->nip);
73 static inline void dump_hcall(CPUPPCState *env)
75 qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
76 " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
77 " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64
78 " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64
79 " nip=" TARGET_FMT_lx "\n",
80 ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4),
81 ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6),
82 ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8),
83 ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10),
84 ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12),
85 env->nip);
88 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
89 target_ulong *msr)
91 /* We no longer are in a PM state */
92 env->resume_as_sreset = false;
94 /* Pretend to be returning from doze always as we don't lose state */
95 *msr |= SRR1_WS_NOLOSS;
97 /* Machine checks are sent normally */
98 if (excp == POWERPC_EXCP_MCHECK) {
99 return excp;
101 switch (excp) {
102 case POWERPC_EXCP_RESET:
103 *msr |= SRR1_WAKERESET;
104 break;
105 case POWERPC_EXCP_EXTERNAL:
106 *msr |= SRR1_WAKEEE;
107 break;
108 case POWERPC_EXCP_DECR:
109 *msr |= SRR1_WAKEDEC;
110 break;
111 case POWERPC_EXCP_SDOOR:
112 *msr |= SRR1_WAKEDBELL;
113 break;
114 case POWERPC_EXCP_SDOOR_HV:
115 *msr |= SRR1_WAKEHDBELL;
116 break;
117 case POWERPC_EXCP_HV_MAINT:
118 *msr |= SRR1_WAKEHMI;
119 break;
120 case POWERPC_EXCP_HVIRT:
121 *msr |= SRR1_WAKEHVI;
122 break;
123 default:
124 cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
125 excp);
127 return POWERPC_EXCP_RESET;
131 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
132 * taken with the MMU on, and which uses an alternate location (e.g., so the
133 * kernel/hv can map the vectors there with an effective address).
135 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
136 * are delivered in this way. AIL requires the LPCR to be set to enable this
137 * mode, and then a number of conditions have to be true for AIL to apply.
139 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
140 * they specifically want to be in real mode (e.g., the MCE might be signaling
141 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
143 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
144 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
145 * radix mode (LPCR[HR]).
147 * POWER8, POWER9 with LPCR[HR]=0
148 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
149 * +-----------+-------------+---------+-------------+-----+
150 * | a | 00/01/10 | x | x | 0 |
151 * | a | 11 | 0 | 1 | 0 |
152 * | a | 11 | 1 | 1 | a |
153 * | a | 11 | 0 | 0 | a |
154 * +-------------------------------------------------------+
156 * POWER9 with LPCR[HR]=1
157 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
158 * +-----------+-------------+---------+-------------+-----+
159 * | a | 00/01/10 | x | x | 0 |
160 * | a | 11 | x | x | a |
161 * +-------------------------------------------------------+
163 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
164 * the hypervisor in AIL mode if the guest is radix. This is good for
165 * performance but allows the guest to influence the AIL of hypervisor
166 * interrupts using its MSR, and also the hypervisor must disallow guest
167 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
168 * use AIL for its MSR[HV] 0->1 interrupts.
170 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
171 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
172 * MSR[HV] 1->1).
174 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
176 * POWER10 behaviour is
177 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
178 * +-----------+------------+-------------+---------+-------------+-----+
179 * | a | h | 00/01/10 | 0 | 0 | 0 |
180 * | a | h | 11 | 0 | 0 | a |
181 * | a | h | x | 0 | 1 | h |
182 * | a | h | 00/01/10 | 1 | 1 | 0 |
183 * | a | h | 11 | 1 | 1 | h |
184 * +--------------------------------------------------------------------+
186 static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
187 target_ulong msr,
188 target_ulong *new_msr,
189 target_ulong *vector)
191 #if defined(TARGET_PPC64)
192 CPUPPCState *env = &cpu->env;
193 bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
194 bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
195 int ail = 0;
197 if (excp == POWERPC_EXCP_MCHECK ||
198 excp == POWERPC_EXCP_RESET ||
199 excp == POWERPC_EXCP_HV_MAINT) {
200 /* SRESET, MCE, HMI never apply AIL */
201 return;
204 if (excp_model == POWERPC_EXCP_POWER8 ||
205 excp_model == POWERPC_EXCP_POWER9) {
206 if (!mmu_all_on) {
207 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
208 return;
210 if (hv_escalation && !(env->spr[SPR_LPCR] & LPCR_HR)) {
212 * AIL does not work if there is a MSR[HV] 0->1 transition and the
213 * partition is in HPT mode. For radix guests, such interrupts are
214 * allowed to be delivered to the hypervisor in ail mode.
216 return;
219 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
220 if (ail == 0) {
221 return;
223 if (ail == 1) {
224 /* AIL=1 is reserved, treat it like AIL=0 */
225 return;
228 } else if (excp_model == POWERPC_EXCP_POWER10) {
229 if (!mmu_all_on && !hv_escalation) {
231 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
232 * Guest->guest and HV->HV interrupts do require MMU on.
234 return;
237 if (*new_msr & MSR_HVB) {
238 if (!(env->spr[SPR_LPCR] & LPCR_HAIL)) {
239 /* HV interrupts depend on LPCR[HAIL] */
240 return;
242 ail = 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
243 } else {
244 ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
246 if (ail == 0) {
247 return;
249 if (ail == 1 || ail == 2) {
250 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
251 return;
253 } else {
254 /* Other processors do not support AIL */
255 return;
259 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
260 * to the new IP.
262 *new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
264 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
265 if (ail == 2) {
266 *vector |= 0x0000000000018000ull;
267 } else if (ail == 3) {
268 *vector |= 0xc000000000004000ull;
270 } else {
272 * scv AIL is a little different. AIL=2 does not change the address,
273 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
275 if (ail == 3) {
276 *vector &= ~0x0000000000017000ull; /* Un-apply the base offset */
277 *vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
280 #endif
283 static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
284 target_ulong vector, target_ulong msr)
286 CPUState *cs = CPU(cpu);
287 CPUPPCState *env = &cpu->env;
290 * We don't use hreg_store_msr here as already have treated any
291 * special case that could occur. Just store MSR and update hflags
293 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
294 * will prevent setting of the HV bit which some exceptions might need
295 * to do.
297 env->msr = msr & env->msr_mask;
298 hreg_compute_hflags(env);
299 env->nip = vector;
300 /* Reset exception state */
301 cs->exception_index = POWERPC_EXCP_NONE;
302 env->error_code = 0;
304 /* Reset the reservation */
305 env->reserve_addr = -1;
308 * Any interrupt is context synchronizing, check if TCG TLB needs
309 * a delayed flush on ppc64
311 check_tlb_flush(env, false);
315 * Note that this function should be greatly optimized when called
316 * with a constant excp, from ppc_hw_interrupt
318 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
320 CPUState *cs = CPU(cpu);
321 CPUPPCState *env = &cpu->env;
322 target_ulong msr, new_msr, vector;
323 int srr0, srr1, asrr0, asrr1, lev = -1;
325 qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
326 " => %08x (%02x)\n", env->nip, excp, env->error_code);
328 /* new srr1 value excluding must-be-zero bits */
329 if (excp_model == POWERPC_EXCP_BOOKE) {
330 msr = env->msr;
331 } else {
332 msr = env->msr & ~0x783f0000ULL;
336 * new interrupt handler msr preserves existing HV and ME unless
337 * explicitly overriden
339 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
341 /* target registers */
342 srr0 = SPR_SRR0;
343 srr1 = SPR_SRR1;
344 asrr0 = -1;
345 asrr1 = -1;
348 * check for special resume at 0x100 from doze/nap/sleep/winkle on
349 * P7/P8/P9
351 if (env->resume_as_sreset) {
352 excp = powerpc_reset_wakeup(cs, env, excp, &msr);
356 * Hypervisor emulation assistance interrupt only exists on server
357 * arch 2.05 server or later. We also don't want to generate it if
358 * we don't have HVB in msr_mask (PAPR mode).
360 if (excp == POWERPC_EXCP_HV_EMU
361 #if defined(TARGET_PPC64)
362 && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
363 #endif /* defined(TARGET_PPC64) */
366 excp = POWERPC_EXCP_PROGRAM;
369 switch (excp) {
370 case POWERPC_EXCP_NONE:
371 /* Should never happen */
372 return;
373 case POWERPC_EXCP_CRITICAL: /* Critical input */
374 switch (excp_model) {
375 case POWERPC_EXCP_40x:
376 srr0 = SPR_40x_SRR2;
377 srr1 = SPR_40x_SRR3;
378 break;
379 case POWERPC_EXCP_BOOKE:
380 srr0 = SPR_BOOKE_CSRR0;
381 srr1 = SPR_BOOKE_CSRR1;
382 break;
383 case POWERPC_EXCP_G2:
384 break;
385 default:
386 goto excp_invalid;
388 break;
389 case POWERPC_EXCP_MCHECK: /* Machine check exception */
390 if (msr_me == 0) {
392 * Machine check exception is not enabled. Enter
393 * checkstop state.
395 fprintf(stderr, "Machine check while not allowed. "
396 "Entering checkstop state\n");
397 if (qemu_log_separate()) {
398 qemu_log("Machine check while not allowed. "
399 "Entering checkstop state\n");
401 cs->halted = 1;
402 cpu_interrupt_exittb(cs);
404 if (env->msr_mask & MSR_HVB) {
406 * ISA specifies HV, but can be delivered to guest with HV
407 * clear (e.g., see FWNMI in PAPR).
409 new_msr |= (target_ulong)MSR_HVB;
412 /* machine check exceptions don't have ME set */
413 new_msr &= ~((target_ulong)1 << MSR_ME);
415 /* XXX: should also have something loaded in DAR / DSISR */
416 switch (excp_model) {
417 case POWERPC_EXCP_40x:
418 srr0 = SPR_40x_SRR2;
419 srr1 = SPR_40x_SRR3;
420 break;
421 case POWERPC_EXCP_BOOKE:
422 /* FIXME: choose one or the other based on CPU type */
423 srr0 = SPR_BOOKE_MCSRR0;
424 srr1 = SPR_BOOKE_MCSRR1;
425 asrr0 = SPR_BOOKE_CSRR0;
426 asrr1 = SPR_BOOKE_CSRR1;
427 break;
428 default:
429 break;
431 break;
432 case POWERPC_EXCP_DSI: /* Data storage exception */
433 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
434 "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
435 break;
436 case POWERPC_EXCP_ISI: /* Instruction storage exception */
437 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
438 "\n", msr, env->nip);
439 msr |= env->error_code;
440 break;
441 case POWERPC_EXCP_EXTERNAL: /* External input */
443 bool lpes0;
445 cs = CPU(cpu);
448 * Exception targeting modifiers
450 * LPES0 is supported on POWER7/8/9
451 * LPES1 is not supported (old iSeries mode)
453 * On anything else, we behave as if LPES0 is 1
454 * (externals don't alter MSR:HV)
456 #if defined(TARGET_PPC64)
457 if (excp_model == POWERPC_EXCP_POWER7 ||
458 excp_model == POWERPC_EXCP_POWER8 ||
459 excp_model == POWERPC_EXCP_POWER9 ||
460 excp_model == POWERPC_EXCP_POWER10) {
461 lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
462 } else
463 #endif /* defined(TARGET_PPC64) */
465 lpes0 = true;
468 if (!lpes0) {
469 new_msr |= (target_ulong)MSR_HVB;
470 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
471 srr0 = SPR_HSRR0;
472 srr1 = SPR_HSRR1;
474 if (env->mpic_proxy) {
475 /* IACK the IRQ on delivery */
476 env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
478 break;
480 case POWERPC_EXCP_ALIGN: /* Alignment exception */
481 /* Get rS/rD and rA from faulting opcode */
483 * Note: the opcode fields will not be set properly for a
484 * direct store load/store, but nobody cares as nobody
485 * actually uses direct store segments.
487 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
488 break;
489 case POWERPC_EXCP_PROGRAM: /* Program exception */
490 switch (env->error_code & ~0xF) {
491 case POWERPC_EXCP_FP:
492 if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
493 LOG_EXCP("Ignore floating point exception\n");
494 cs->exception_index = POWERPC_EXCP_NONE;
495 env->error_code = 0;
496 return;
500 * FP exceptions always have NIP pointing to the faulting
501 * instruction, so always use store_next and claim we are
502 * precise in the MSR.
504 msr |= 0x00100000;
505 env->spr[SPR_BOOKE_ESR] = ESR_FP;
506 break;
507 case POWERPC_EXCP_INVAL:
508 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
509 msr |= 0x00080000;
510 env->spr[SPR_BOOKE_ESR] = ESR_PIL;
511 break;
512 case POWERPC_EXCP_PRIV:
513 msr |= 0x00040000;
514 env->spr[SPR_BOOKE_ESR] = ESR_PPR;
515 break;
516 case POWERPC_EXCP_TRAP:
517 msr |= 0x00020000;
518 env->spr[SPR_BOOKE_ESR] = ESR_PTR;
519 break;
520 default:
521 /* Should never occur */
522 cpu_abort(cs, "Invalid program exception %d. Aborting\n",
523 env->error_code);
524 break;
526 break;
527 case POWERPC_EXCP_SYSCALL: /* System call exception */
528 lev = env->error_code;
530 if ((lev == 1) && cpu->vhyp) {
531 dump_hcall(env);
532 } else {
533 dump_syscall(env);
537 * We need to correct the NIP which in this case is supposed
538 * to point to the next instruction
540 env->nip += 4;
542 /* "PAPR mode" built-in hypercall emulation */
543 if ((lev == 1) && cpu->vhyp) {
544 PPCVirtualHypervisorClass *vhc =
545 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
546 vhc->hypercall(cpu->vhyp, cpu);
547 return;
549 if (lev == 1) {
550 new_msr |= (target_ulong)MSR_HVB;
552 break;
553 case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
554 lev = env->error_code;
555 dump_syscall(env);
556 env->nip += 4;
557 new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
558 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
559 break;
560 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
561 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
562 case POWERPC_EXCP_DECR: /* Decrementer exception */
563 break;
564 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
565 /* FIT on 4xx */
566 LOG_EXCP("FIT exception\n");
567 break;
568 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
569 LOG_EXCP("WDT exception\n");
570 switch (excp_model) {
571 case POWERPC_EXCP_BOOKE:
572 srr0 = SPR_BOOKE_CSRR0;
573 srr1 = SPR_BOOKE_CSRR1;
574 break;
575 default:
576 break;
578 break;
579 case POWERPC_EXCP_DTLB: /* Data TLB error */
580 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
581 break;
582 case POWERPC_EXCP_DEBUG: /* Debug interrupt */
583 if (env->flags & POWERPC_FLAG_DE) {
584 /* FIXME: choose one or the other based on CPU type */
585 srr0 = SPR_BOOKE_DSRR0;
586 srr1 = SPR_BOOKE_DSRR1;
587 asrr0 = SPR_BOOKE_CSRR0;
588 asrr1 = SPR_BOOKE_CSRR1;
589 /* DBSR already modified by caller */
590 } else {
591 cpu_abort(cs, "Debug exception triggered on unsupported model\n");
593 break;
594 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
595 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
596 break;
597 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
598 /* XXX: TODO */
599 cpu_abort(cs, "Embedded floating point data exception "
600 "is not implemented yet !\n");
601 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
602 break;
603 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
604 /* XXX: TODO */
605 cpu_abort(cs, "Embedded floating point round exception "
606 "is not implemented yet !\n");
607 env->spr[SPR_BOOKE_ESR] = ESR_SPV;
608 break;
609 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
610 /* XXX: TODO */
611 cpu_abort(cs,
612 "Performance counter exception is not implemented yet !\n");
613 break;
614 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
615 break;
616 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
617 srr0 = SPR_BOOKE_CSRR0;
618 srr1 = SPR_BOOKE_CSRR1;
619 break;
620 case POWERPC_EXCP_RESET: /* System reset exception */
621 /* A power-saving exception sets ME, otherwise it is unchanged */
622 if (msr_pow) {
623 /* indicate that we resumed from power save mode */
624 msr |= 0x10000;
625 new_msr |= ((target_ulong)1 << MSR_ME);
627 if (env->msr_mask & MSR_HVB) {
629 * ISA specifies HV, but can be delivered to guest with HV
630 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
632 new_msr |= (target_ulong)MSR_HVB;
633 } else {
634 if (msr_pow) {
635 cpu_abort(cs, "Trying to deliver power-saving system reset "
636 "exception %d with no HV support\n", excp);
639 break;
640 case POWERPC_EXCP_DSEG: /* Data segment exception */
641 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
642 case POWERPC_EXCP_TRACE: /* Trace exception */
643 break;
644 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
645 msr |= env->error_code;
646 /* fall through */
647 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
648 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
649 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
650 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
651 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
652 case POWERPC_EXCP_HV_EMU:
653 case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
654 srr0 = SPR_HSRR0;
655 srr1 = SPR_HSRR1;
656 new_msr |= (target_ulong)MSR_HVB;
657 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
658 break;
659 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
660 case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
661 case POWERPC_EXCP_FU: /* Facility unavailable exception */
662 #ifdef TARGET_PPC64
663 env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
664 #endif
665 break;
666 case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
667 #ifdef TARGET_PPC64
668 env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
669 srr0 = SPR_HSRR0;
670 srr1 = SPR_HSRR1;
671 new_msr |= (target_ulong)MSR_HVB;
672 new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
673 #endif
674 break;
675 case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
676 LOG_EXCP("PIT exception\n");
677 break;
678 case POWERPC_EXCP_IO: /* IO error exception */
679 /* XXX: TODO */
680 cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
681 break;
682 case POWERPC_EXCP_RUNM: /* Run mode exception */
683 /* XXX: TODO */
684 cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
685 break;
686 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
687 /* XXX: TODO */
688 cpu_abort(cs, "602 emulation trap exception "
689 "is not implemented yet !\n");
690 break;
691 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
692 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
693 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
694 switch (excp_model) {
695 case POWERPC_EXCP_602:
696 case POWERPC_EXCP_603:
697 case POWERPC_EXCP_603E:
698 case POWERPC_EXCP_G2:
699 /* Swap temporary saved registers with GPRs */
700 if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
701 new_msr |= (target_ulong)1 << MSR_TGPR;
702 hreg_swap_gpr_tgpr(env);
704 /* fall through */
705 case POWERPC_EXCP_7x5:
706 #if defined(DEBUG_SOFTWARE_TLB)
707 if (qemu_log_enabled()) {
708 const char *es;
709 target_ulong *miss, *cmp;
710 int en;
712 if (excp == POWERPC_EXCP_IFTLB) {
713 es = "I";
714 en = 'I';
715 miss = &env->spr[SPR_IMISS];
716 cmp = &env->spr[SPR_ICMP];
717 } else {
718 if (excp == POWERPC_EXCP_DLTLB) {
719 es = "DL";
720 } else {
721 es = "DS";
723 en = 'D';
724 miss = &env->spr[SPR_DMISS];
725 cmp = &env->spr[SPR_DCMP];
727 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
728 TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
729 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
730 env->spr[SPR_HASH1], env->spr[SPR_HASH2],
731 env->error_code);
733 #endif
734 msr |= env->crf[0] << 28;
735 msr |= env->error_code; /* key, D/I, S/L bits */
736 /* Set way using a LRU mechanism */
737 msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
738 break;
739 case POWERPC_EXCP_74xx:
740 #if defined(DEBUG_SOFTWARE_TLB)
741 if (qemu_log_enabled()) {
742 const char *es;
743 target_ulong *miss, *cmp;
744 int en;
746 if (excp == POWERPC_EXCP_IFTLB) {
747 es = "I";
748 en = 'I';
749 miss = &env->spr[SPR_TLBMISS];
750 cmp = &env->spr[SPR_PTEHI];
751 } else {
752 if (excp == POWERPC_EXCP_DLTLB) {
753 es = "DL";
754 } else {
755 es = "DS";
757 en = 'D';
758 miss = &env->spr[SPR_TLBMISS];
759 cmp = &env->spr[SPR_PTEHI];
761 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
762 TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
763 env->error_code);
765 #endif
766 msr |= env->error_code; /* key bit */
767 break;
768 default:
769 cpu_abort(cs, "Invalid TLB miss exception\n");
770 break;
772 break;
773 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
774 /* XXX: TODO */
775 cpu_abort(cs, "Floating point assist exception "
776 "is not implemented yet !\n");
777 break;
778 case POWERPC_EXCP_DABR: /* Data address breakpoint */
779 /* XXX: TODO */
780 cpu_abort(cs, "DABR exception is not implemented yet !\n");
781 break;
782 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
783 /* XXX: TODO */
784 cpu_abort(cs, "IABR exception is not implemented yet !\n");
785 break;
786 case POWERPC_EXCP_SMI: /* System management interrupt */
787 /* XXX: TODO */
788 cpu_abort(cs, "SMI exception is not implemented yet !\n");
789 break;
790 case POWERPC_EXCP_THERM: /* Thermal interrupt */
791 /* XXX: TODO */
792 cpu_abort(cs, "Thermal management exception "
793 "is not implemented yet !\n");
794 break;
795 case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
796 /* XXX: TODO */
797 cpu_abort(cs,
798 "Performance counter exception is not implemented yet !\n");
799 break;
800 case POWERPC_EXCP_VPUA: /* Vector assist exception */
801 /* XXX: TODO */
802 cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
803 break;
804 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
805 /* XXX: TODO */
806 cpu_abort(cs,
807 "970 soft-patch exception is not implemented yet !\n");
808 break;
809 case POWERPC_EXCP_MAINT: /* Maintenance exception */
810 /* XXX: TODO */
811 cpu_abort(cs,
812 "970 maintenance exception is not implemented yet !\n");
813 break;
814 case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
815 /* XXX: TODO */
816 cpu_abort(cs, "Maskable external exception "
817 "is not implemented yet !\n");
818 break;
819 case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
820 /* XXX: TODO */
821 cpu_abort(cs, "Non maskable external exception "
822 "is not implemented yet !\n");
823 break;
824 default:
825 excp_invalid:
826 cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
827 break;
830 /* Sanity check */
831 if (!(env->msr_mask & MSR_HVB)) {
832 if (new_msr & MSR_HVB) {
833 cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
834 "no HV support\n", excp);
836 if (srr0 == SPR_HSRR0) {
837 cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
838 "no HV support\n", excp);
843 * Sort out endianness of interrupt, this differs depending on the
844 * CPU, the HV mode, etc...
846 #ifdef TARGET_PPC64
847 if (excp_model == POWERPC_EXCP_POWER7) {
848 if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
849 new_msr |= (target_ulong)1 << MSR_LE;
851 } else if (excp_model == POWERPC_EXCP_POWER8) {
852 if (new_msr & MSR_HVB) {
853 if (env->spr[SPR_HID0] & HID0_HILE) {
854 new_msr |= (target_ulong)1 << MSR_LE;
856 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
857 new_msr |= (target_ulong)1 << MSR_LE;
859 } else if (excp_model == POWERPC_EXCP_POWER9 ||
860 excp_model == POWERPC_EXCP_POWER10) {
861 if (new_msr & MSR_HVB) {
862 if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
863 new_msr |= (target_ulong)1 << MSR_LE;
865 } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
866 new_msr |= (target_ulong)1 << MSR_LE;
868 } else if (msr_ile) {
869 new_msr |= (target_ulong)1 << MSR_LE;
871 #else
872 if (msr_ile) {
873 new_msr |= (target_ulong)1 << MSR_LE;
875 #endif
877 vector = env->excp_vectors[excp];
878 if (vector == (target_ulong)-1ULL) {
879 cpu_abort(cs, "Raised an exception without defined vector %d\n",
880 excp);
883 vector |= env->excp_prefix;
885 /* If any alternate SRR register are defined, duplicate saved values */
886 if (asrr0 != -1) {
887 env->spr[asrr0] = env->nip;
889 if (asrr1 != -1) {
890 env->spr[asrr1] = msr;
893 #if defined(TARGET_PPC64)
894 if (excp_model == POWERPC_EXCP_BOOKE) {
895 if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
896 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
897 new_msr |= (target_ulong)1 << MSR_CM;
898 } else {
899 vector = (uint32_t)vector;
901 } else {
902 if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
903 vector = (uint32_t)vector;
904 } else {
905 new_msr |= (target_ulong)1 << MSR_SF;
908 #endif
910 if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
911 /* Save PC */
912 env->spr[srr0] = env->nip;
914 /* Save MSR */
915 env->spr[srr1] = msr;
917 #if defined(TARGET_PPC64)
918 } else {
919 vector += lev * 0x20;
921 env->lr = env->nip;
922 env->ctr = msr;
923 #endif
926 /* This can update new_msr and vector if AIL applies */
927 ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
929 powerpc_set_excp_state(cpu, vector, new_msr);
932 void ppc_cpu_do_interrupt(CPUState *cs)
934 PowerPCCPU *cpu = POWERPC_CPU(cs);
935 CPUPPCState *env = &cpu->env;
937 powerpc_excp(cpu, env->excp_model, cs->exception_index);
940 static void ppc_hw_interrupt(CPUPPCState *env)
942 PowerPCCPU *cpu = env_archcpu(env);
943 bool async_deliver;
945 /* External reset */
946 if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
947 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
948 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
949 return;
951 /* Machine check exception */
952 if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
953 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
954 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
955 return;
957 #if 0 /* TODO */
958 /* External debug exception */
959 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
960 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
961 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
962 return;
964 #endif
967 * For interrupts that gate on MSR:EE, we need to do something a
968 * bit more subtle, as we need to let them through even when EE is
969 * clear when coming out of some power management states (in order
970 * for them to become a 0x100).
972 async_deliver = (msr_ee != 0) || env->resume_as_sreset;
974 /* Hypervisor decrementer exception */
975 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
976 /* LPCR will be clear when not supported so this will work */
977 bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
978 if ((async_deliver || msr_hv == 0) && hdice) {
979 /* HDEC clears on delivery */
980 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
981 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
982 return;
986 /* Hypervisor virtualization interrupt */
987 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
988 /* LPCR will be clear when not supported so this will work */
989 bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
990 if ((async_deliver || msr_hv == 0) && hvice) {
991 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
992 return;
996 /* External interrupt can ignore MSR:EE under some circumstances */
997 if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
998 bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
999 bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
1000 /* HEIC blocks delivery to the hypervisor */
1001 if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
1002 (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
1003 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
1004 return;
1007 if (msr_ce != 0) {
1008 /* External critical interrupt */
1009 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
1010 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
1011 return;
1014 if (async_deliver != 0) {
1015 /* Watchdog timer on embedded PowerPC */
1016 if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
1017 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
1018 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
1019 return;
1021 if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
1022 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
1023 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
1024 return;
1026 /* Fixed interval timer on embedded PowerPC */
1027 if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
1028 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
1029 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
1030 return;
1032 /* Programmable interval timer on embedded PowerPC */
1033 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
1034 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
1035 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
1036 return;
1038 /* Decrementer exception */
1039 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
1040 if (ppc_decr_clear_on_delivery(env)) {
1041 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
1043 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
1044 return;
1046 if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
1047 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1048 if (is_book3s_arch2x(env)) {
1049 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
1050 } else {
1051 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
1053 return;
1055 if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
1056 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1057 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
1058 return;
1060 if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
1061 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
1062 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
1063 return;
1065 /* Thermal interrupt */
1066 if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
1067 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
1068 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
1069 return;
1073 if (env->resume_as_sreset) {
1075 * This is a bug ! It means that has_work took us out of halt without
1076 * anything to deliver while in a PM state that requires getting
1077 * out via a 0x100
1079 * This means we will incorrectly execute past the power management
1080 * instruction instead of triggering a reset.
1082 * It generally means a discrepancy between the wakeup conditions in the
1083 * processor has_work implementation and the logic in this function.
1085 cpu_abort(env_cpu(env),
1086 "Wakeup from PM state but interrupt Undelivered");
1090 void ppc_cpu_do_system_reset(CPUState *cs)
1092 PowerPCCPU *cpu = POWERPC_CPU(cs);
1093 CPUPPCState *env = &cpu->env;
1095 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
1098 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
1100 PowerPCCPU *cpu = POWERPC_CPU(cs);
1101 CPUPPCState *env = &cpu->env;
1102 target_ulong msr = 0;
1105 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1106 * been set by KVM.
1108 msr = (1ULL << MSR_ME);
1109 msr |= env->msr & (1ULL << MSR_SF);
1110 if (ppc_interrupts_little_endian(cpu)) {
1111 msr |= (1ULL << MSR_LE);
1114 powerpc_set_excp_state(cpu, vector, msr);
1116 #endif /* !CONFIG_USER_ONLY */
1118 bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1120 PowerPCCPU *cpu = POWERPC_CPU(cs);
1121 CPUPPCState *env = &cpu->env;
1123 if (interrupt_request & CPU_INTERRUPT_HARD) {
1124 ppc_hw_interrupt(env);
1125 if (env->pending_interrupts == 0) {
1126 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
1128 return true;
1130 return false;
1133 #if defined(DEBUG_OP)
1134 static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
1136 qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
1137 TARGET_FMT_lx "\n", RA, msr);
1139 #endif
1141 /*****************************************************************************/
1142 /* Exceptions processing helpers */
1144 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
1145 uint32_t error_code, uintptr_t raddr)
1147 CPUState *cs = env_cpu(env);
1149 cs->exception_index = exception;
1150 env->error_code = error_code;
1151 cpu_loop_exit_restore(cs, raddr);
1154 void raise_exception_err(CPUPPCState *env, uint32_t exception,
1155 uint32_t error_code)
1157 raise_exception_err_ra(env, exception, error_code, 0);
1160 void raise_exception(CPUPPCState *env, uint32_t exception)
1162 raise_exception_err_ra(env, exception, 0, 0);
1165 void raise_exception_ra(CPUPPCState *env, uint32_t exception,
1166 uintptr_t raddr)
1168 raise_exception_err_ra(env, exception, 0, raddr);
1171 #ifdef CONFIG_TCG
1172 void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
1173 uint32_t error_code)
1175 raise_exception_err_ra(env, exception, error_code, 0);
1178 void helper_raise_exception(CPUPPCState *env, uint32_t exception)
1180 raise_exception_err_ra(env, exception, 0, 0);
1182 #endif
1184 #if !defined(CONFIG_USER_ONLY)
1185 #ifdef CONFIG_TCG
1186 void helper_store_msr(CPUPPCState *env, target_ulong val)
1188 uint32_t excp = hreg_store_msr(env, val, 0);
1190 if (excp != 0) {
1191 CPUState *cs = env_cpu(env);
1192 cpu_interrupt_exittb(cs);
1193 raise_exception(env, excp);
1197 #if defined(TARGET_PPC64)
1198 void helper_scv(CPUPPCState *env, uint32_t lev)
1200 if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
1201 raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
1202 } else {
1203 raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
1207 void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
1209 CPUState *cs;
1211 cs = env_cpu(env);
1212 cs->halted = 1;
1214 /* Condition for waking up at 0x100 */
1215 env->resume_as_sreset = (insn != PPC_PM_STOP) ||
1216 (env->spr[SPR_PSSCR] & PSSCR_EC);
1218 #endif /* defined(TARGET_PPC64) */
1219 #endif /* CONFIG_TCG */
1221 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
1223 CPUState *cs = env_cpu(env);
1225 /* MSR:POW cannot be set by any form of rfi */
1226 msr &= ~(1ULL << MSR_POW);
1228 #if defined(TARGET_PPC64)
1229 /* Switching to 32-bit ? Crop the nip */
1230 if (!msr_is_64bit(env, msr)) {
1231 nip = (uint32_t)nip;
1233 #else
1234 nip = (uint32_t)nip;
1235 #endif
1236 /* XXX: beware: this is false if VLE is supported */
1237 env->nip = nip & ~((target_ulong)0x00000003);
1238 hreg_store_msr(env, msr, 1);
1239 #if defined(DEBUG_OP)
1240 cpu_dump_rfi(env->nip, env->msr);
1241 #endif
1243 * No need to raise an exception here, as rfi is always the last
1244 * insn of a TB
1246 cpu_interrupt_exittb(cs);
1247 /* Reset the reservation */
1248 env->reserve_addr = -1;
1250 /* Context synchronizing: check if TCG TLB needs flush */
1251 check_tlb_flush(env, false);
1254 #ifdef CONFIG_TCG
1255 void helper_rfi(CPUPPCState *env)
1257 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
1260 #define MSR_BOOK3S_MASK
1261 #if defined(TARGET_PPC64)
1262 void helper_rfid(CPUPPCState *env)
1265 * The architecture defines a number of rules for which bits can
1266 * change but in practice, we handle this in hreg_store_msr()
1267 * which will be called by do_rfi(), so there is no need to filter
1268 * here
1270 do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
1273 void helper_rfscv(CPUPPCState *env)
1275 do_rfi(env, env->lr, env->ctr);
1278 void helper_hrfid(CPUPPCState *env)
1280 do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
1282 #endif
1284 /*****************************************************************************/
1285 /* Embedded PowerPC specific helpers */
1286 void helper_40x_rfci(CPUPPCState *env)
1288 do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
1291 void helper_rfci(CPUPPCState *env)
1293 do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
1296 void helper_rfdi(CPUPPCState *env)
1298 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1299 do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
1302 void helper_rfmci(CPUPPCState *env)
1304 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1305 do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
1307 #endif /* CONFIG_TCG */
1308 #endif /* !defined(CONFIG_USER_ONLY) */
1310 #ifdef CONFIG_TCG
1311 void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1312 uint32_t flags)
1314 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1315 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1316 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1317 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1318 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1319 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1320 POWERPC_EXCP_TRAP, GETPC());
1324 #if defined(TARGET_PPC64)
1325 void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
1326 uint32_t flags)
1328 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1329 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1330 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1331 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1332 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1333 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1334 POWERPC_EXCP_TRAP, GETPC());
1337 #endif
1338 #endif
1340 #if !defined(CONFIG_USER_ONLY)
1341 /*****************************************************************************/
1342 /* PowerPC 601 specific instructions (POWER bridge) */
1344 #ifdef CONFIG_TCG
1345 void helper_rfsvc(CPUPPCState *env)
1347 do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
1350 /* Embedded.Processor Control */
1351 static int dbell2irq(target_ulong rb)
1353 int msg = rb & DBELL_TYPE_MASK;
1354 int irq = -1;
1356 switch (msg) {
1357 case DBELL_TYPE_DBELL:
1358 irq = PPC_INTERRUPT_DOORBELL;
1359 break;
1360 case DBELL_TYPE_DBELL_CRIT:
1361 irq = PPC_INTERRUPT_CDOORBELL;
1362 break;
1363 case DBELL_TYPE_G_DBELL:
1364 case DBELL_TYPE_G_DBELL_CRIT:
1365 case DBELL_TYPE_G_DBELL_MC:
1366 /* XXX implement */
1367 default:
1368 break;
1371 return irq;
1374 void helper_msgclr(CPUPPCState *env, target_ulong rb)
1376 int irq = dbell2irq(rb);
1378 if (irq < 0) {
1379 return;
1382 env->pending_interrupts &= ~(1 << irq);
1385 void helper_msgsnd(target_ulong rb)
1387 int irq = dbell2irq(rb);
1388 int pir = rb & DBELL_PIRTAG_MASK;
1389 CPUState *cs;
1391 if (irq < 0) {
1392 return;
1395 qemu_mutex_lock_iothread();
1396 CPU_FOREACH(cs) {
1397 PowerPCCPU *cpu = POWERPC_CPU(cs);
1398 CPUPPCState *cenv = &cpu->env;
1400 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
1401 cenv->pending_interrupts |= 1 << irq;
1402 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1405 qemu_mutex_unlock_iothread();
1408 /* Server Processor Control */
1410 static bool dbell_type_server(target_ulong rb)
1413 * A Directed Hypervisor Doorbell message is sent only if the
1414 * message type is 5. All other types are reserved and the
1415 * instruction is a no-op
1417 return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
1420 void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
1422 if (!dbell_type_server(rb)) {
1423 return;
1426 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
1429 static void book3s_msgsnd_common(int pir, int irq)
1431 CPUState *cs;
1433 qemu_mutex_lock_iothread();
1434 CPU_FOREACH(cs) {
1435 PowerPCCPU *cpu = POWERPC_CPU(cs);
1436 CPUPPCState *cenv = &cpu->env;
1438 /* TODO: broadcast message to all threads of the same processor */
1439 if (cenv->spr_cb[SPR_PIR].default_value == pir) {
1440 cenv->pending_interrupts |= 1 << irq;
1441 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
1444 qemu_mutex_unlock_iothread();
1447 void helper_book3s_msgsnd(target_ulong rb)
1449 int pir = rb & DBELL_PROCIDTAG_MASK;
1451 if (!dbell_type_server(rb)) {
1452 return;
1455 book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
1458 #if defined(TARGET_PPC64)
1459 void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
1461 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
1463 if (!dbell_type_server(rb)) {
1464 return;
1467 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
1471 * sends a message to other threads that are on the same
1472 * multi-threaded processor
1474 void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
1476 int pir = env->spr_cb[SPR_PIR].default_value;
1478 helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
1480 if (!dbell_type_server(rb)) {
1481 return;
1484 /* TODO: TCG supports only one thread */
1486 book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
1488 #endif
1489 #endif /* CONFIG_TCG */
1490 #endif
1492 #ifdef CONFIG_TCG
1493 void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
1494 MMUAccessType access_type,
1495 int mmu_idx, uintptr_t retaddr)
1497 CPUPPCState *env = cs->env_ptr;
1498 uint32_t insn;
1500 /* Restore state and reload the insn we executed, for filling in DSISR. */
1501 cpu_restore_state(cs, retaddr, true);
1502 insn = cpu_ldl_code(env, env->nip);
1504 cs->exception_index = POWERPC_EXCP_ALIGN;
1505 env->error_code = insn & 0x03FF0000;
1506 cpu_loop_exit(cs);
1508 #endif