2 * PowerPC exception emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
22 #include "exec/exec-all.h"
24 #include "helper_regs.h"
27 #include "exec/helper-proto.h"
28 #include "exec/cpu_ldst.h"
31 /* #define DEBUG_OP */
32 /* #define DEBUG_SOFTWARE_TLB */
33 /* #define DEBUG_EXCEPTIONS */
35 #ifdef DEBUG_EXCEPTIONS
36 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
38 # define LOG_EXCP(...) do { } while (0)
41 /*****************************************************************************/
42 /* Exception processing */
43 #if defined(CONFIG_USER_ONLY)
44 void ppc_cpu_do_interrupt(CPUState
*cs
)
46 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
47 CPUPPCState
*env
= &cpu
->env
;
49 cs
->exception_index
= POWERPC_EXCP_NONE
;
53 static void ppc_hw_interrupt(CPUPPCState
*env
)
55 CPUState
*cs
= env_cpu(env
);
57 cs
->exception_index
= POWERPC_EXCP_NONE
;
60 #else /* defined(CONFIG_USER_ONLY) */
61 static inline void dump_syscall(CPUPPCState
*env
)
63 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
64 " r3=%016" PRIx64
" r4=%016" PRIx64
" r5=%016" PRIx64
65 " r6=%016" PRIx64
" r7=%016" PRIx64
" r8=%016" PRIx64
66 " nip=" TARGET_FMT_lx
"\n",
67 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
68 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
69 ppc_dump_gpr(env
, 6), ppc_dump_gpr(env
, 7),
70 ppc_dump_gpr(env
, 8), env
->nip
);
73 static inline void dump_hcall(CPUPPCState
*env
)
75 qemu_log_mask(CPU_LOG_INT
, "hypercall r3=%016" PRIx64
76 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
77 " r7=%016" PRIx64
" r8=%016" PRIx64
" r9=%016" PRIx64
78 " r10=%016" PRIx64
" r11=%016" PRIx64
" r12=%016" PRIx64
79 " nip=" TARGET_FMT_lx
"\n",
80 ppc_dump_gpr(env
, 3), ppc_dump_gpr(env
, 4),
81 ppc_dump_gpr(env
, 5), ppc_dump_gpr(env
, 6),
82 ppc_dump_gpr(env
, 7), ppc_dump_gpr(env
, 8),
83 ppc_dump_gpr(env
, 9), ppc_dump_gpr(env
, 10),
84 ppc_dump_gpr(env
, 11), ppc_dump_gpr(env
, 12),
88 static int powerpc_reset_wakeup(CPUState
*cs
, CPUPPCState
*env
, int excp
,
91 /* We no longer are in a PM state */
92 env
->resume_as_sreset
= false;
94 /* Pretend to be returning from doze always as we don't lose state */
95 *msr
|= SRR1_WS_NOLOSS
;
97 /* Machine checks are sent normally */
98 if (excp
== POWERPC_EXCP_MCHECK
) {
102 case POWERPC_EXCP_RESET
:
103 *msr
|= SRR1_WAKERESET
;
105 case POWERPC_EXCP_EXTERNAL
:
108 case POWERPC_EXCP_DECR
:
109 *msr
|= SRR1_WAKEDEC
;
111 case POWERPC_EXCP_SDOOR
:
112 *msr
|= SRR1_WAKEDBELL
;
114 case POWERPC_EXCP_SDOOR_HV
:
115 *msr
|= SRR1_WAKEHDBELL
;
117 case POWERPC_EXCP_HV_MAINT
:
118 *msr
|= SRR1_WAKEHMI
;
120 case POWERPC_EXCP_HVIRT
:
121 *msr
|= SRR1_WAKEHVI
;
124 cpu_abort(cs
, "Unsupported exception %d in Power Save mode\n",
127 return POWERPC_EXCP_RESET
;
131 * AIL - Alternate Interrupt Location, a mode that allows interrupts to be
132 * taken with the MMU on, and which uses an alternate location (e.g., so the
133 * kernel/hv can map the vectors there with an effective address).
135 * An interrupt is considered to be taken "with AIL" or "AIL applies" if they
136 * are delivered in this way. AIL requires the LPCR to be set to enable this
137 * mode, and then a number of conditions have to be true for AIL to apply.
139 * First of all, SRESET, MCE, and HMI are always delivered without AIL, because
140 * they specifically want to be in real mode (e.g., the MCE might be signaling
141 * a SLB multi-hit which requires SLB flush before the MMU can be enabled).
143 * After that, behaviour depends on the current MSR[IR], MSR[DR], MSR[HV],
144 * whether or not the interrupt changes MSR[HV] from 0 to 1, and the current
145 * radix mode (LPCR[HR]).
147 * POWER8, POWER9 with LPCR[HR]=0
148 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
149 * +-----------+-------------+---------+-------------+-----+
150 * | a | 00/01/10 | x | x | 0 |
151 * | a | 11 | 0 | 1 | 0 |
152 * | a | 11 | 1 | 1 | a |
153 * | a | 11 | 0 | 0 | a |
154 * +-------------------------------------------------------+
156 * POWER9 with LPCR[HR]=1
157 * | LPCR[AIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
158 * +-----------+-------------+---------+-------------+-----+
159 * | a | 00/01/10 | x | x | 0 |
160 * | a | 11 | x | x | a |
161 * +-------------------------------------------------------+
163 * The difference with POWER9 being that MSR[HV] 0->1 interrupts can be sent to
164 * the hypervisor in AIL mode if the guest is radix. This is good for
165 * performance but allows the guest to influence the AIL of hypervisor
166 * interrupts using its MSR, and also the hypervisor must disallow guest
167 * interrupts (MSR[HV] 0->0) from using AIL if the hypervisor does not want to
168 * use AIL for its MSR[HV] 0->1 interrupts.
170 * POWER10 addresses those issues with a new LPCR[HAIL] bit that is applied to
171 * interrupts that begin execution with MSR[HV]=1 (so both MSR[HV] 0->1 and
174 * HAIL=1 is equivalent to AIL=3, for interrupts delivered with MSR[HV]=1.
176 * POWER10 behaviour is
177 * | LPCR[AIL] | LPCR[HAIL] | MSR[IR||DR] | MSR[HV] | new MSR[HV] | AIL |
178 * +-----------+------------+-------------+---------+-------------+-----+
179 * | a | h | 00/01/10 | 0 | 0 | 0 |
180 * | a | h | 11 | 0 | 0 | a |
181 * | a | h | x | 0 | 1 | h |
182 * | a | h | 00/01/10 | 1 | 1 | 0 |
183 * | a | h | 11 | 1 | 1 | h |
184 * +--------------------------------------------------------------------+
186 static inline void ppc_excp_apply_ail(PowerPCCPU
*cpu
, int excp_model
, int excp
,
188 target_ulong
*new_msr
,
189 target_ulong
*vector
)
191 #if defined(TARGET_PPC64)
192 CPUPPCState
*env
= &cpu
->env
;
193 bool mmu_all_on
= ((msr
>> MSR_IR
) & 1) && ((msr
>> MSR_DR
) & 1);
194 bool hv_escalation
= !(msr
& MSR_HVB
) && (*new_msr
& MSR_HVB
);
197 if (excp
== POWERPC_EXCP_MCHECK
||
198 excp
== POWERPC_EXCP_RESET
||
199 excp
== POWERPC_EXCP_HV_MAINT
) {
200 /* SRESET, MCE, HMI never apply AIL */
204 if (excp_model
== POWERPC_EXCP_POWER8
||
205 excp_model
== POWERPC_EXCP_POWER9
) {
207 /* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
210 if (hv_escalation
&& !(env
->spr
[SPR_LPCR
] & LPCR_HR
)) {
212 * AIL does not work if there is a MSR[HV] 0->1 transition and the
213 * partition is in HPT mode. For radix guests, such interrupts are
214 * allowed to be delivered to the hypervisor in ail mode.
219 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
224 /* AIL=1 is reserved, treat it like AIL=0 */
228 } else if (excp_model
== POWERPC_EXCP_POWER10
) {
229 if (!mmu_all_on
&& !hv_escalation
) {
231 * AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
232 * Guest->guest and HV->HV interrupts do require MMU on.
237 if (*new_msr
& MSR_HVB
) {
238 if (!(env
->spr
[SPR_LPCR
] & LPCR_HAIL
)) {
239 /* HV interrupts depend on LPCR[HAIL] */
242 ail
= 3; /* HAIL=1 gives AIL=3 behaviour for HV interrupts */
244 ail
= (env
->spr
[SPR_LPCR
] & LPCR_AIL
) >> LPCR_AIL_SHIFT
;
249 if (ail
== 1 || ail
== 2) {
250 /* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
254 /* Other processors do not support AIL */
259 * AIL applies, so the new MSR gets IR and DR set, and an offset applied
262 *new_msr
|= (1 << MSR_IR
) | (1 << MSR_DR
);
264 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
266 *vector
|= 0x0000000000018000ull
;
267 } else if (ail
== 3) {
268 *vector
|= 0xc000000000004000ull
;
272 * scv AIL is a little different. AIL=2 does not change the address,
273 * only the MSR. AIL=3 replaces the 0x17000 base with 0xc...3000.
276 *vector
&= ~0x0000000000017000ull
; /* Un-apply the base offset */
277 *vector
|= 0xc000000000003000ull
; /* Apply scv's AIL=3 offset */
283 static inline void powerpc_set_excp_state(PowerPCCPU
*cpu
,
284 target_ulong vector
, target_ulong msr
)
286 CPUState
*cs
= CPU(cpu
);
287 CPUPPCState
*env
= &cpu
->env
;
290 * We don't use hreg_store_msr here as already have treated any
291 * special case that could occur. Just store MSR and update hflags
293 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
294 * will prevent setting of the HV bit which some exceptions might need
297 env
->msr
= msr
& env
->msr_mask
;
298 hreg_compute_hflags(env
);
300 /* Reset exception state */
301 cs
->exception_index
= POWERPC_EXCP_NONE
;
304 /* Reset the reservation */
305 env
->reserve_addr
= -1;
308 * Any interrupt is context synchronizing, check if TCG TLB needs
309 * a delayed flush on ppc64
311 check_tlb_flush(env
, false);
315 * Note that this function should be greatly optimized when called
316 * with a constant excp, from ppc_hw_interrupt
318 static inline void powerpc_excp(PowerPCCPU
*cpu
, int excp_model
, int excp
)
320 CPUState
*cs
= CPU(cpu
);
321 CPUPPCState
*env
= &cpu
->env
;
322 target_ulong msr
, new_msr
, vector
;
323 int srr0
, srr1
, asrr0
, asrr1
, lev
= -1;
325 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
326 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
328 /* new srr1 value excluding must-be-zero bits */
329 if (excp_model
== POWERPC_EXCP_BOOKE
) {
332 msr
= env
->msr
& ~0x783f0000ULL
;
336 * new interrupt handler msr preserves existing HV and ME unless
337 * explicitly overriden
339 new_msr
= env
->msr
& (((target_ulong
)1 << MSR_ME
) | MSR_HVB
);
341 /* target registers */
348 * check for special resume at 0x100 from doze/nap/sleep/winkle on
351 if (env
->resume_as_sreset
) {
352 excp
= powerpc_reset_wakeup(cs
, env
, excp
, &msr
);
356 * Hypervisor emulation assistance interrupt only exists on server
357 * arch 2.05 server or later. We also don't want to generate it if
358 * we don't have HVB in msr_mask (PAPR mode).
360 if (excp
== POWERPC_EXCP_HV_EMU
361 #if defined(TARGET_PPC64)
362 && !(mmu_is_64bit(env
->mmu_model
) && (env
->msr_mask
& MSR_HVB
))
363 #endif /* defined(TARGET_PPC64) */
366 excp
= POWERPC_EXCP_PROGRAM
;
370 case POWERPC_EXCP_NONE
:
371 /* Should never happen */
373 case POWERPC_EXCP_CRITICAL
: /* Critical input */
374 switch (excp_model
) {
375 case POWERPC_EXCP_40x
:
379 case POWERPC_EXCP_BOOKE
:
380 srr0
= SPR_BOOKE_CSRR0
;
381 srr1
= SPR_BOOKE_CSRR1
;
383 case POWERPC_EXCP_G2
:
389 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
392 * Machine check exception is not enabled. Enter
395 fprintf(stderr
, "Machine check while not allowed. "
396 "Entering checkstop state\n");
397 if (qemu_log_separate()) {
398 qemu_log("Machine check while not allowed. "
399 "Entering checkstop state\n");
402 cpu_interrupt_exittb(cs
);
404 if (env
->msr_mask
& MSR_HVB
) {
406 * ISA specifies HV, but can be delivered to guest with HV
407 * clear (e.g., see FWNMI in PAPR).
409 new_msr
|= (target_ulong
)MSR_HVB
;
412 /* machine check exceptions don't have ME set */
413 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
415 /* XXX: should also have something loaded in DAR / DSISR */
416 switch (excp_model
) {
417 case POWERPC_EXCP_40x
:
421 case POWERPC_EXCP_BOOKE
:
422 /* FIXME: choose one or the other based on CPU type */
423 srr0
= SPR_BOOKE_MCSRR0
;
424 srr1
= SPR_BOOKE_MCSRR1
;
425 asrr0
= SPR_BOOKE_CSRR0
;
426 asrr1
= SPR_BOOKE_CSRR1
;
432 case POWERPC_EXCP_DSI
: /* Data storage exception */
433 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx
" DAR=" TARGET_FMT_lx
434 "\n", env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
436 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
437 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx
", nip=" TARGET_FMT_lx
438 "\n", msr
, env
->nip
);
439 msr
|= env
->error_code
;
441 case POWERPC_EXCP_EXTERNAL
: /* External input */
448 * Exception targeting modifiers
450 * LPES0 is supported on POWER7/8/9
451 * LPES1 is not supported (old iSeries mode)
453 * On anything else, we behave as if LPES0 is 1
454 * (externals don't alter MSR:HV)
456 #if defined(TARGET_PPC64)
457 if (excp_model
== POWERPC_EXCP_POWER7
||
458 excp_model
== POWERPC_EXCP_POWER8
||
459 excp_model
== POWERPC_EXCP_POWER9
||
460 excp_model
== POWERPC_EXCP_POWER10
) {
461 lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
463 #endif /* defined(TARGET_PPC64) */
469 new_msr
|= (target_ulong
)MSR_HVB
;
470 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
474 if (env
->mpic_proxy
) {
475 /* IACK the IRQ on delivery */
476 env
->spr
[SPR_BOOKE_EPR
] = ldl_phys(cs
->as
, env
->mpic_iack
);
480 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
481 /* Get rS/rD and rA from faulting opcode */
483 * Note: the opcode fields will not be set properly for a
484 * direct store load/store, but nobody cares as nobody
485 * actually uses direct store segments.
487 env
->spr
[SPR_DSISR
] |= (env
->error_code
& 0x03FF0000) >> 16;
489 case POWERPC_EXCP_PROGRAM
: /* Program exception */
490 switch (env
->error_code
& ~0xF) {
491 case POWERPC_EXCP_FP
:
492 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
493 LOG_EXCP("Ignore floating point exception\n");
494 cs
->exception_index
= POWERPC_EXCP_NONE
;
500 * FP exceptions always have NIP pointing to the faulting
501 * instruction, so always use store_next and claim we are
502 * precise in the MSR.
505 env
->spr
[SPR_BOOKE_ESR
] = ESR_FP
;
507 case POWERPC_EXCP_INVAL
:
508 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx
"\n", env
->nip
);
510 env
->spr
[SPR_BOOKE_ESR
] = ESR_PIL
;
512 case POWERPC_EXCP_PRIV
:
514 env
->spr
[SPR_BOOKE_ESR
] = ESR_PPR
;
516 case POWERPC_EXCP_TRAP
:
518 env
->spr
[SPR_BOOKE_ESR
] = ESR_PTR
;
521 /* Should never occur */
522 cpu_abort(cs
, "Invalid program exception %d. Aborting\n",
527 case POWERPC_EXCP_SYSCALL
: /* System call exception */
528 lev
= env
->error_code
;
530 if ((lev
== 1) && cpu
->vhyp
) {
537 * We need to correct the NIP which in this case is supposed
538 * to point to the next instruction
542 /* "PAPR mode" built-in hypercall emulation */
543 if ((lev
== 1) && cpu
->vhyp
) {
544 PPCVirtualHypervisorClass
*vhc
=
545 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu
->vhyp
);
546 vhc
->hypercall(cpu
->vhyp
, cpu
);
550 new_msr
|= (target_ulong
)MSR_HVB
;
553 case POWERPC_EXCP_SYSCALL_VECTORED
: /* scv exception */
554 lev
= env
->error_code
;
557 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_EE
);
558 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
560 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
561 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
562 case POWERPC_EXCP_DECR
: /* Decrementer exception */
564 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
566 LOG_EXCP("FIT exception\n");
568 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
569 LOG_EXCP("WDT exception\n");
570 switch (excp_model
) {
571 case POWERPC_EXCP_BOOKE
:
572 srr0
= SPR_BOOKE_CSRR0
;
573 srr1
= SPR_BOOKE_CSRR1
;
579 case POWERPC_EXCP_DTLB
: /* Data TLB error */
580 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
582 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
583 if (env
->flags
& POWERPC_FLAG_DE
) {
584 /* FIXME: choose one or the other based on CPU type */
585 srr0
= SPR_BOOKE_DSRR0
;
586 srr1
= SPR_BOOKE_DSRR1
;
587 asrr0
= SPR_BOOKE_CSRR0
;
588 asrr1
= SPR_BOOKE_CSRR1
;
589 /* DBSR already modified by caller */
591 cpu_abort(cs
, "Debug exception triggered on unsupported model\n");
594 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable */
595 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
597 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
599 cpu_abort(cs
, "Embedded floating point data exception "
600 "is not implemented yet !\n");
601 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
603 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
605 cpu_abort(cs
, "Embedded floating point round exception "
606 "is not implemented yet !\n");
607 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
609 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
612 "Performance counter exception is not implemented yet !\n");
614 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
616 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
617 srr0
= SPR_BOOKE_CSRR0
;
618 srr1
= SPR_BOOKE_CSRR1
;
620 case POWERPC_EXCP_RESET
: /* System reset exception */
621 /* A power-saving exception sets ME, otherwise it is unchanged */
623 /* indicate that we resumed from power save mode */
625 new_msr
|= ((target_ulong
)1 << MSR_ME
);
627 if (env
->msr_mask
& MSR_HVB
) {
629 * ISA specifies HV, but can be delivered to guest with HV
630 * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU).
632 new_msr
|= (target_ulong
)MSR_HVB
;
635 cpu_abort(cs
, "Trying to deliver power-saving system reset "
636 "exception %d with no HV support\n", excp
);
640 case POWERPC_EXCP_DSEG
: /* Data segment exception */
641 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
642 case POWERPC_EXCP_TRACE
: /* Trace exception */
644 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
645 msr
|= env
->error_code
;
647 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
648 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
649 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
650 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
651 case POWERPC_EXCP_SDOOR_HV
: /* Hypervisor Doorbell interrupt */
652 case POWERPC_EXCP_HV_EMU
:
653 case POWERPC_EXCP_HVIRT
: /* Hypervisor virtualization */
656 new_msr
|= (target_ulong
)MSR_HVB
;
657 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
659 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
660 case POWERPC_EXCP_VSXU
: /* VSX unavailable exception */
661 case POWERPC_EXCP_FU
: /* Facility unavailable exception */
663 env
->spr
[SPR_FSCR
] |= ((target_ulong
)env
->error_code
<< 56);
666 case POWERPC_EXCP_HV_FU
: /* Hypervisor Facility Unavailable Exception */
668 env
->spr
[SPR_HFSCR
] |= ((target_ulong
)env
->error_code
<< FSCR_IC_POS
);
671 new_msr
|= (target_ulong
)MSR_HVB
;
672 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
675 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
676 LOG_EXCP("PIT exception\n");
678 case POWERPC_EXCP_IO
: /* IO error exception */
680 cpu_abort(cs
, "601 IO error exception is not implemented yet !\n");
682 case POWERPC_EXCP_RUNM
: /* Run mode exception */
684 cpu_abort(cs
, "601 run mode exception is not implemented yet !\n");
686 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
688 cpu_abort(cs
, "602 emulation trap exception "
689 "is not implemented yet !\n");
691 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
692 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
693 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
694 switch (excp_model
) {
695 case POWERPC_EXCP_602
:
696 case POWERPC_EXCP_603
:
697 case POWERPC_EXCP_603E
:
698 case POWERPC_EXCP_G2
:
699 /* Swap temporary saved registers with GPRs */
700 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
701 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
702 hreg_swap_gpr_tgpr(env
);
705 case POWERPC_EXCP_7x5
:
706 #if defined(DEBUG_SOFTWARE_TLB)
707 if (qemu_log_enabled()) {
709 target_ulong
*miss
, *cmp
;
712 if (excp
== POWERPC_EXCP_IFTLB
) {
715 miss
= &env
->spr
[SPR_IMISS
];
716 cmp
= &env
->spr
[SPR_ICMP
];
718 if (excp
== POWERPC_EXCP_DLTLB
) {
724 miss
= &env
->spr
[SPR_DMISS
];
725 cmp
= &env
->spr
[SPR_DCMP
];
727 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
728 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
729 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
730 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
734 msr
|= env
->crf
[0] << 28;
735 msr
|= env
->error_code
; /* key, D/I, S/L bits */
736 /* Set way using a LRU mechanism */
737 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
739 case POWERPC_EXCP_74xx
:
740 #if defined(DEBUG_SOFTWARE_TLB)
741 if (qemu_log_enabled()) {
743 target_ulong
*miss
, *cmp
;
746 if (excp
== POWERPC_EXCP_IFTLB
) {
749 miss
= &env
->spr
[SPR_TLBMISS
];
750 cmp
= &env
->spr
[SPR_PTEHI
];
752 if (excp
== POWERPC_EXCP_DLTLB
) {
758 miss
= &env
->spr
[SPR_TLBMISS
];
759 cmp
= &env
->spr
[SPR_PTEHI
];
761 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
762 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
766 msr
|= env
->error_code
; /* key bit */
769 cpu_abort(cs
, "Invalid TLB miss exception\n");
773 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
775 cpu_abort(cs
, "Floating point assist exception "
776 "is not implemented yet !\n");
778 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
780 cpu_abort(cs
, "DABR exception is not implemented yet !\n");
782 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
784 cpu_abort(cs
, "IABR exception is not implemented yet !\n");
786 case POWERPC_EXCP_SMI
: /* System management interrupt */
788 cpu_abort(cs
, "SMI exception is not implemented yet !\n");
790 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
792 cpu_abort(cs
, "Thermal management exception "
793 "is not implemented yet !\n");
795 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
798 "Performance counter exception is not implemented yet !\n");
800 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
802 cpu_abort(cs
, "VPU assist exception is not implemented yet !\n");
804 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
807 "970 soft-patch exception is not implemented yet !\n");
809 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
812 "970 maintenance exception is not implemented yet !\n");
814 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
816 cpu_abort(cs
, "Maskable external exception "
817 "is not implemented yet !\n");
819 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
821 cpu_abort(cs
, "Non maskable external exception "
822 "is not implemented yet !\n");
826 cpu_abort(cs
, "Invalid PowerPC exception %d. Aborting\n", excp
);
831 if (!(env
->msr_mask
& MSR_HVB
)) {
832 if (new_msr
& MSR_HVB
) {
833 cpu_abort(cs
, "Trying to deliver HV exception (MSR) %d with "
834 "no HV support\n", excp
);
836 if (srr0
== SPR_HSRR0
) {
837 cpu_abort(cs
, "Trying to deliver HV exception (HSRR) %d with "
838 "no HV support\n", excp
);
843 * Sort out endianness of interrupt, this differs depending on the
844 * CPU, the HV mode, etc...
847 if (excp_model
== POWERPC_EXCP_POWER7
) {
848 if (!(new_msr
& MSR_HVB
) && (env
->spr
[SPR_LPCR
] & LPCR_ILE
)) {
849 new_msr
|= (target_ulong
)1 << MSR_LE
;
851 } else if (excp_model
== POWERPC_EXCP_POWER8
) {
852 if (new_msr
& MSR_HVB
) {
853 if (env
->spr
[SPR_HID0
] & HID0_HILE
) {
854 new_msr
|= (target_ulong
)1 << MSR_LE
;
856 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
857 new_msr
|= (target_ulong
)1 << MSR_LE
;
859 } else if (excp_model
== POWERPC_EXCP_POWER9
||
860 excp_model
== POWERPC_EXCP_POWER10
) {
861 if (new_msr
& MSR_HVB
) {
862 if (env
->spr
[SPR_HID0
] & HID0_POWER9_HILE
) {
863 new_msr
|= (target_ulong
)1 << MSR_LE
;
865 } else if (env
->spr
[SPR_LPCR
] & LPCR_ILE
) {
866 new_msr
|= (target_ulong
)1 << MSR_LE
;
868 } else if (msr_ile
) {
869 new_msr
|= (target_ulong
)1 << MSR_LE
;
873 new_msr
|= (target_ulong
)1 << MSR_LE
;
877 vector
= env
->excp_vectors
[excp
];
878 if (vector
== (target_ulong
)-1ULL) {
879 cpu_abort(cs
, "Raised an exception without defined vector %d\n",
883 vector
|= env
->excp_prefix
;
885 /* If any alternate SRR register are defined, duplicate saved values */
887 env
->spr
[asrr0
] = env
->nip
;
890 env
->spr
[asrr1
] = msr
;
893 #if defined(TARGET_PPC64)
894 if (excp_model
== POWERPC_EXCP_BOOKE
) {
895 if (env
->spr
[SPR_BOOKE_EPCR
] & EPCR_ICM
) {
896 /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
897 new_msr
|= (target_ulong
)1 << MSR_CM
;
899 vector
= (uint32_t)vector
;
902 if (!msr_isf
&& !mmu_is_64bit(env
->mmu_model
)) {
903 vector
= (uint32_t)vector
;
905 new_msr
|= (target_ulong
)1 << MSR_SF
;
910 if (excp
!= POWERPC_EXCP_SYSCALL_VECTORED
) {
912 env
->spr
[srr0
] = env
->nip
;
915 env
->spr
[srr1
] = msr
;
917 #if defined(TARGET_PPC64)
919 vector
+= lev
* 0x20;
926 /* This can update new_msr and vector if AIL applies */
927 ppc_excp_apply_ail(cpu
, excp_model
, excp
, msr
, &new_msr
, &vector
);
929 powerpc_set_excp_state(cpu
, vector
, new_msr
);
932 void ppc_cpu_do_interrupt(CPUState
*cs
)
934 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
935 CPUPPCState
*env
= &cpu
->env
;
937 powerpc_excp(cpu
, env
->excp_model
, cs
->exception_index
);
940 static void ppc_hw_interrupt(CPUPPCState
*env
)
942 PowerPCCPU
*cpu
= env_archcpu(env
);
946 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
947 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
948 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
951 /* Machine check exception */
952 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
953 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
954 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
958 /* External debug exception */
959 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
960 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
961 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
967 * For interrupts that gate on MSR:EE, we need to do something a
968 * bit more subtle, as we need to let them through even when EE is
969 * clear when coming out of some power management states (in order
970 * for them to become a 0x100).
972 async_deliver
= (msr_ee
!= 0) || env
->resume_as_sreset
;
974 /* Hypervisor decrementer exception */
975 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
976 /* LPCR will be clear when not supported so this will work */
977 bool hdice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HDICE
);
978 if ((async_deliver
|| msr_hv
== 0) && hdice
) {
979 /* HDEC clears on delivery */
980 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
981 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HDECR
);
986 /* Hypervisor virtualization interrupt */
987 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HVIRT
)) {
988 /* LPCR will be clear when not supported so this will work */
989 bool hvice
= !!(env
->spr
[SPR_LPCR
] & LPCR_HVICE
);
990 if ((async_deliver
|| msr_hv
== 0) && hvice
) {
991 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_HVIRT
);
996 /* External interrupt can ignore MSR:EE under some circumstances */
997 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
998 bool lpes0
= !!(env
->spr
[SPR_LPCR
] & LPCR_LPES0
);
999 bool heic
= !!(env
->spr
[SPR_LPCR
] & LPCR_HEIC
);
1000 /* HEIC blocks delivery to the hypervisor */
1001 if ((async_deliver
&& !(heic
&& msr_hv
&& !msr_pr
)) ||
1002 (env
->has_hv_mode
&& msr_hv
== 0 && !lpes0
)) {
1003 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
1008 /* External critical interrupt */
1009 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
1010 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
1014 if (async_deliver
!= 0) {
1015 /* Watchdog timer on embedded PowerPC */
1016 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
1017 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
1018 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_WDT
);
1021 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
1022 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
1023 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
1026 /* Fixed interval timer on embedded PowerPC */
1027 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
1028 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
1029 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_FIT
);
1032 /* Programmable interval timer on embedded PowerPC */
1033 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
1034 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
1035 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PIT
);
1038 /* Decrementer exception */
1039 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
1040 if (ppc_decr_clear_on_delivery(env
)) {
1041 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
1043 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DECR
);
1046 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
1047 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1048 if (is_book3s_arch2x(env
)) {
1049 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR
);
1051 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_DOORI
);
1055 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDOORBELL
)) {
1056 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1057 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_SDOOR_HV
);
1060 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
1061 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
1062 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_PERFM
);
1065 /* Thermal interrupt */
1066 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
1067 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
1068 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_THERM
);
1073 if (env
->resume_as_sreset
) {
1075 * This is a bug ! It means that has_work took us out of halt without
1076 * anything to deliver while in a PM state that requires getting
1079 * This means we will incorrectly execute past the power management
1080 * instruction instead of triggering a reset.
1082 * It generally means a discrepancy between the wakeup conditions in the
1083 * processor has_work implementation and the logic in this function.
1085 cpu_abort(env_cpu(env
),
1086 "Wakeup from PM state but interrupt Undelivered");
1090 void ppc_cpu_do_system_reset(CPUState
*cs
)
1092 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1093 CPUPPCState
*env
= &cpu
->env
;
1095 powerpc_excp(cpu
, env
->excp_model
, POWERPC_EXCP_RESET
);
1098 void ppc_cpu_do_fwnmi_machine_check(CPUState
*cs
, target_ulong vector
)
1100 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1101 CPUPPCState
*env
= &cpu
->env
;
1102 target_ulong msr
= 0;
1105 * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
1108 msr
= (1ULL << MSR_ME
);
1109 msr
|= env
->msr
& (1ULL << MSR_SF
);
1110 if (ppc_interrupts_little_endian(cpu
)) {
1111 msr
|= (1ULL << MSR_LE
);
1114 powerpc_set_excp_state(cpu
, vector
, msr
);
1116 #endif /* !CONFIG_USER_ONLY */
1118 bool ppc_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1120 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1121 CPUPPCState
*env
= &cpu
->env
;
1123 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1124 ppc_hw_interrupt(env
);
1125 if (env
->pending_interrupts
== 0) {
1126 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1133 #if defined(DEBUG_OP)
1134 static void cpu_dump_rfi(target_ulong RA
, target_ulong msr
)
1136 qemu_log("Return from exception at " TARGET_FMT_lx
" with flags "
1137 TARGET_FMT_lx
"\n", RA
, msr
);
1141 /*****************************************************************************/
1142 /* Exceptions processing helpers */
1144 void raise_exception_err_ra(CPUPPCState
*env
, uint32_t exception
,
1145 uint32_t error_code
, uintptr_t raddr
)
1147 CPUState
*cs
= env_cpu(env
);
1149 cs
->exception_index
= exception
;
1150 env
->error_code
= error_code
;
1151 cpu_loop_exit_restore(cs
, raddr
);
1154 void raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1155 uint32_t error_code
)
1157 raise_exception_err_ra(env
, exception
, error_code
, 0);
1160 void raise_exception(CPUPPCState
*env
, uint32_t exception
)
1162 raise_exception_err_ra(env
, exception
, 0, 0);
1165 void raise_exception_ra(CPUPPCState
*env
, uint32_t exception
,
1168 raise_exception_err_ra(env
, exception
, 0, raddr
);
1172 void helper_raise_exception_err(CPUPPCState
*env
, uint32_t exception
,
1173 uint32_t error_code
)
1175 raise_exception_err_ra(env
, exception
, error_code
, 0);
1178 void helper_raise_exception(CPUPPCState
*env
, uint32_t exception
)
1180 raise_exception_err_ra(env
, exception
, 0, 0);
1184 #if !defined(CONFIG_USER_ONLY)
1186 void helper_store_msr(CPUPPCState
*env
, target_ulong val
)
1188 uint32_t excp
= hreg_store_msr(env
, val
, 0);
1191 CPUState
*cs
= env_cpu(env
);
1192 cpu_interrupt_exittb(cs
);
1193 raise_exception(env
, excp
);
1197 #if defined(TARGET_PPC64)
1198 void helper_scv(CPUPPCState
*env
, uint32_t lev
)
1200 if (env
->spr
[SPR_FSCR
] & (1ull << FSCR_SCV
)) {
1201 raise_exception_err(env
, POWERPC_EXCP_SYSCALL_VECTORED
, lev
);
1203 raise_exception_err(env
, POWERPC_EXCP_FU
, FSCR_IC_SCV
);
1207 void helper_pminsn(CPUPPCState
*env
, powerpc_pm_insn_t insn
)
1215 * The architecture specifies that HDEC interrupts are discarded
1218 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
1220 /* Condition for waking up at 0x100 */
1221 env
->resume_as_sreset
= (insn
!= PPC_PM_STOP
) ||
1222 (env
->spr
[SPR_PSSCR
] & PSSCR_EC
);
1224 #endif /* defined(TARGET_PPC64) */
1225 #endif /* CONFIG_TCG */
1227 static inline void do_rfi(CPUPPCState
*env
, target_ulong nip
, target_ulong msr
)
1229 CPUState
*cs
= env_cpu(env
);
1231 /* MSR:POW cannot be set by any form of rfi */
1232 msr
&= ~(1ULL << MSR_POW
);
1234 #if defined(TARGET_PPC64)
1235 /* Switching to 32-bit ? Crop the nip */
1236 if (!msr_is_64bit(env
, msr
)) {
1237 nip
= (uint32_t)nip
;
1240 nip
= (uint32_t)nip
;
1242 /* XXX: beware: this is false if VLE is supported */
1243 env
->nip
= nip
& ~((target_ulong
)0x00000003);
1244 hreg_store_msr(env
, msr
, 1);
1245 #if defined(DEBUG_OP)
1246 cpu_dump_rfi(env
->nip
, env
->msr
);
1249 * No need to raise an exception here, as rfi is always the last
1252 cpu_interrupt_exittb(cs
);
1253 /* Reset the reservation */
1254 env
->reserve_addr
= -1;
1256 /* Context synchronizing: check if TCG TLB needs flush */
1257 check_tlb_flush(env
, false);
1261 void helper_rfi(CPUPPCState
*env
)
1263 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
] & 0xfffffffful
);
1266 #define MSR_BOOK3S_MASK
1267 #if defined(TARGET_PPC64)
1268 void helper_rfid(CPUPPCState
*env
)
1271 * The architecture defines a number of rules for which bits can
1272 * change but in practice, we handle this in hreg_store_msr()
1273 * which will be called by do_rfi(), so there is no need to filter
1276 do_rfi(env
, env
->spr
[SPR_SRR0
], env
->spr
[SPR_SRR1
]);
1279 void helper_rfscv(CPUPPCState
*env
)
1281 do_rfi(env
, env
->lr
, env
->ctr
);
1284 void helper_hrfid(CPUPPCState
*env
)
1286 do_rfi(env
, env
->spr
[SPR_HSRR0
], env
->spr
[SPR_HSRR1
]);
1290 /*****************************************************************************/
1291 /* Embedded PowerPC specific helpers */
1292 void helper_40x_rfci(CPUPPCState
*env
)
1294 do_rfi(env
, env
->spr
[SPR_40x_SRR2
], env
->spr
[SPR_40x_SRR3
]);
1297 void helper_rfci(CPUPPCState
*env
)
1299 do_rfi(env
, env
->spr
[SPR_BOOKE_CSRR0
], env
->spr
[SPR_BOOKE_CSRR1
]);
1302 void helper_rfdi(CPUPPCState
*env
)
1304 /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
1305 do_rfi(env
, env
->spr
[SPR_BOOKE_DSRR0
], env
->spr
[SPR_BOOKE_DSRR1
]);
1308 void helper_rfmci(CPUPPCState
*env
)
1310 /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
1311 do_rfi(env
, env
->spr
[SPR_BOOKE_MCSRR0
], env
->spr
[SPR_BOOKE_MCSRR1
]);
1313 #endif /* CONFIG_TCG */
1314 #endif /* !defined(CONFIG_USER_ONLY) */
1317 void helper_tw(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1320 if (!likely(!(((int32_t)arg1
< (int32_t)arg2
&& (flags
& 0x10)) ||
1321 ((int32_t)arg1
> (int32_t)arg2
&& (flags
& 0x08)) ||
1322 ((int32_t)arg1
== (int32_t)arg2
&& (flags
& 0x04)) ||
1323 ((uint32_t)arg1
< (uint32_t)arg2
&& (flags
& 0x02)) ||
1324 ((uint32_t)arg1
> (uint32_t)arg2
&& (flags
& 0x01))))) {
1325 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1326 POWERPC_EXCP_TRAP
, GETPC());
1330 #if defined(TARGET_PPC64)
1331 void helper_td(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
,
1334 if (!likely(!(((int64_t)arg1
< (int64_t)arg2
&& (flags
& 0x10)) ||
1335 ((int64_t)arg1
> (int64_t)arg2
&& (flags
& 0x08)) ||
1336 ((int64_t)arg1
== (int64_t)arg2
&& (flags
& 0x04)) ||
1337 ((uint64_t)arg1
< (uint64_t)arg2
&& (flags
& 0x02)) ||
1338 ((uint64_t)arg1
> (uint64_t)arg2
&& (flags
& 0x01))))) {
1339 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
1340 POWERPC_EXCP_TRAP
, GETPC());
1346 #if !defined(CONFIG_USER_ONLY)
1347 /*****************************************************************************/
1348 /* PowerPC 601 specific instructions (POWER bridge) */
1351 void helper_rfsvc(CPUPPCState
*env
)
1353 do_rfi(env
, env
->lr
, env
->ctr
& 0x0000FFFF);
1356 /* Embedded.Processor Control */
1357 static int dbell2irq(target_ulong rb
)
1359 int msg
= rb
& DBELL_TYPE_MASK
;
1363 case DBELL_TYPE_DBELL
:
1364 irq
= PPC_INTERRUPT_DOORBELL
;
1366 case DBELL_TYPE_DBELL_CRIT
:
1367 irq
= PPC_INTERRUPT_CDOORBELL
;
1369 case DBELL_TYPE_G_DBELL
:
1370 case DBELL_TYPE_G_DBELL_CRIT
:
1371 case DBELL_TYPE_G_DBELL_MC
:
1380 void helper_msgclr(CPUPPCState
*env
, target_ulong rb
)
1382 int irq
= dbell2irq(rb
);
1388 env
->pending_interrupts
&= ~(1 << irq
);
1391 void helper_msgsnd(target_ulong rb
)
1393 int irq
= dbell2irq(rb
);
1394 int pir
= rb
& DBELL_PIRTAG_MASK
;
1401 qemu_mutex_lock_iothread();
1403 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1404 CPUPPCState
*cenv
= &cpu
->env
;
1406 if ((rb
& DBELL_BRDCAST
) || (cenv
->spr
[SPR_BOOKE_PIR
] == pir
)) {
1407 cenv
->pending_interrupts
|= 1 << irq
;
1408 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1411 qemu_mutex_unlock_iothread();
1414 /* Server Processor Control */
1416 static bool dbell_type_server(target_ulong rb
)
1419 * A Directed Hypervisor Doorbell message is sent only if the
1420 * message type is 5. All other types are reserved and the
1421 * instruction is a no-op
1423 return (rb
& DBELL_TYPE_MASK
) == DBELL_TYPE_DBELL_SERVER
;
1426 void helper_book3s_msgclr(CPUPPCState
*env
, target_ulong rb
)
1428 if (!dbell_type_server(rb
)) {
1432 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDOORBELL
);
1435 static void book3s_msgsnd_common(int pir
, int irq
)
1439 qemu_mutex_lock_iothread();
1441 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1442 CPUPPCState
*cenv
= &cpu
->env
;
1444 /* TODO: broadcast message to all threads of the same processor */
1445 if (cenv
->spr_cb
[SPR_PIR
].default_value
== pir
) {
1446 cenv
->pending_interrupts
|= 1 << irq
;
1447 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
1450 qemu_mutex_unlock_iothread();
1453 void helper_book3s_msgsnd(target_ulong rb
)
1455 int pir
= rb
& DBELL_PROCIDTAG_MASK
;
1457 if (!dbell_type_server(rb
)) {
1461 book3s_msgsnd_common(pir
, PPC_INTERRUPT_HDOORBELL
);
1464 #if defined(TARGET_PPC64)
1465 void helper_book3s_msgclrp(CPUPPCState
*env
, target_ulong rb
)
1467 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgclrp", HFSCR_IC_MSGP
);
1469 if (!dbell_type_server(rb
)) {
1473 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
1477 * sends a message to other threads that are on the same
1478 * multi-threaded processor
1480 void helper_book3s_msgsndp(CPUPPCState
*env
, target_ulong rb
)
1482 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
1484 helper_hfscr_facility_check(env
, HFSCR_MSGP
, "msgsndp", HFSCR_IC_MSGP
);
1486 if (!dbell_type_server(rb
)) {
1490 /* TODO: TCG supports only one thread */
1492 book3s_msgsnd_common(pir
, PPC_INTERRUPT_DOORBELL
);
1495 #endif /* CONFIG_TCG */
1499 void ppc_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
1500 MMUAccessType access_type
,
1501 int mmu_idx
, uintptr_t retaddr
)
1503 CPUPPCState
*env
= cs
->env_ptr
;
1506 /* Restore state and reload the insn we executed, for filling in DSISR. */
1507 cpu_restore_state(cs
, retaddr
, true);
1508 insn
= cpu_ldl_code(env
, env
->nip
);
1510 cs
->exception_index
= POWERPC_EXCP_ALIGN
;
1511 env
->error_code
= insn
& 0x03FF0000;