4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
31 static void raise_exception(CPUARMState
*env
, uint32_t excp
,
32 uint32_t syndrome
, uint32_t target_el
)
34 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
36 assert(!excp_is_internal(excp
));
37 cs
->exception_index
= excp
;
38 env
->exception
.syndrome
= syndrome
;
39 env
->exception
.target_el
= target_el
;
43 static int exception_target_el(CPUARMState
*env
)
45 int target_el
= MAX(1, arm_current_el(env
));
47 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
48 * to EL3 in this case.
50 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
57 uint32_t HELPER(neon_tbl
)(CPUARMState
*env
, uint32_t ireg
, uint32_t def
,
58 uint32_t rn
, uint32_t maxindex
)
65 table
= (uint64_t *)&env
->vfp
.regs
[rn
];
67 for (shift
= 0; shift
< 32; shift
+= 8) {
68 index
= (ireg
>> shift
) & 0xff;
69 if (index
< maxindex
) {
70 tmp
= (table
[index
>> 3] >> ((index
& 7) << 3)) & 0xff;
73 val
|= def
& (0xff << shift
);
79 #if !defined(CONFIG_USER_ONLY)
81 static inline uint32_t merge_syn_data_abort(uint32_t template_syn
,
82 unsigned int target_el
,
83 bool same_el
, bool ea
,
84 bool s1ptw
, bool is_write
,
89 /* ISV is only set for data aborts routed to EL2 and
90 * never for stage-1 page table walks faulting on stage 2.
92 * Furthermore, ISV is only set for certain kinds of load/stores.
93 * If the template syndrome does not have ISV set, we should leave
96 * See ARMv8 specs, D7-1974:
97 * ISS encoding for an exception from a Data Abort, the
100 if (!(template_syn
& ARM_EL_ISV
) || target_el
!= 2 || s1ptw
) {
101 syn
= syn_data_abort_no_iss(same_el
,
102 ea
, 0, s1ptw
, is_write
, fsc
);
104 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
105 * syndrome created at translation time.
106 * Now we create the runtime syndrome with the remaining fields.
108 syn
= syn_data_abort_with_iss(same_el
,
110 ea
, 0, s1ptw
, is_write
, fsc
,
112 /* Merge the runtime syndrome with the template syndrome. */
118 static void deliver_fault(ARMCPU
*cpu
, vaddr addr
, MMUAccessType access_type
,
119 uint32_t fsr
, uint32_t fsc
, ARMMMUFaultInfo
*fi
)
121 CPUARMState
*env
= &cpu
->env
;
126 target_el
= exception_target_el(env
);
129 env
->cp15
.hpfar_el2
= extract64(fi
->s2addr
, 12, 47) << 4;
131 same_el
= (arm_current_el(env
) == target_el
);
134 /* Caller doesn't have a long-format fault status code. This
135 * should only happen if this fault will never actually be reported
136 * to an EL that uses a syndrome register. Check that here.
137 * 0x3f is a (currently) reserved FSC code, in case the constructed
138 * syndrome does leak into the guest somehow.
140 assert(target_el
!= 2 && !arm_el_is_aa64(env
, target_el
));
143 if (access_type
== MMU_INST_FETCH
) {
144 syn
= syn_insn_abort(same_el
, fi
->ea
, fi
->s1ptw
, fsc
);
145 exc
= EXCP_PREFETCH_ABORT
;
147 syn
= merge_syn_data_abort(env
->exception
.syndrome
, target_el
,
148 same_el
, fi
->ea
, fi
->s1ptw
,
149 access_type
== MMU_DATA_STORE
,
151 if (access_type
== MMU_DATA_STORE
152 && arm_feature(env
, ARM_FEATURE_V6
)) {
155 exc
= EXCP_DATA_ABORT
;
158 env
->exception
.vaddress
= addr
;
159 env
->exception
.fsr
= fsr
;
160 raise_exception(env
, exc
, syn
, target_el
);
163 /* try to fill the TLB and return an exception if error. If retaddr is
164 * NULL, it means that the function was called in C code (i.e. not
165 * from generated code or from helper.c)
167 void tlb_fill(CPUState
*cs
, target_ulong addr
, MMUAccessType access_type
,
168 int mmu_idx
, uintptr_t retaddr
)
172 ARMMMUFaultInfo fi
= {};
174 ret
= arm_tlb_fill(cs
, addr
, access_type
, mmu_idx
, &fsr
, &fi
);
176 ARMCPU
*cpu
= ARM_CPU(cs
);
180 /* now we have a real cpu fault */
181 cpu_restore_state(cs
, retaddr
);
184 if (fsr
& (1 << 9)) {
185 /* LPAE format fault status register : bottom 6 bits are
186 * status code in the same form as needed for syndrome
188 fsc
= extract32(fsr
, 0, 6);
190 /* Short format FSR : this fault will never actually be reported
191 * to an EL that uses a syndrome register. Use a (currently)
192 * reserved FSR code in case the constructed syndrome does leak
193 * into the guest somehow. deliver_fault will assert that
194 * we don't target an EL using the syndrome.
199 deliver_fault(cpu
, addr
, access_type
, fsr
, fsc
, &fi
);
203 /* Raise a data fault alignment exception for the specified virtual address */
204 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
205 MMUAccessType access_type
,
206 int mmu_idx
, uintptr_t retaddr
)
208 ARMCPU
*cpu
= ARM_CPU(cs
);
209 CPUARMState
*env
= &cpu
->env
;
211 ARMMMUFaultInfo fi
= {};
212 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
215 /* now we have a real cpu fault */
216 cpu_restore_state(cs
, retaddr
);
219 /* the DFSR for an alignment fault depends on whether we're using
220 * the LPAE long descriptor format, or the short descriptor format
222 if (arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
)) {
223 fsr
= (1 << 9) | 0x21;
229 deliver_fault(cpu
, vaddr
, access_type
, fsr
, fsc
, &fi
);
232 /* arm_cpu_do_transaction_failed: handle a memory system error response
233 * (eg "no device/memory present at address") by raising an external abort
236 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
237 vaddr addr
, unsigned size
,
238 MMUAccessType access_type
,
239 int mmu_idx
, MemTxAttrs attrs
,
240 MemTxResult response
, uintptr_t retaddr
)
242 ARMCPU
*cpu
= ARM_CPU(cs
);
243 CPUARMState
*env
= &cpu
->env
;
245 ARMMMUFaultInfo fi
= {};
246 ARMMMUIdx arm_mmu_idx
= core_to_arm_mmu_idx(env
, mmu_idx
);
249 /* now we have a real cpu fault */
250 cpu_restore_state(cs
, retaddr
);
253 /* The EA bit in syndromes and fault status registers is an
254 * IMPDEF classification of external aborts. ARM implementations
255 * usually use this to indicate AXI bus Decode error (0) or
256 * Slave error (1); in QEMU we follow that.
258 fi
.ea
= (response
!= MEMTX_DECODE_ERROR
);
260 /* The fault status register format depends on whether we're using
261 * the LPAE long descriptor format, or the short descriptor format.
263 if (arm_s1_regime_using_lpae_format(env
, arm_mmu_idx
)) {
264 /* long descriptor form, STATUS 0b010000: synchronous ext abort */
265 fsr
= (fi
.ea
<< 12) | (1 << 9) | 0x10;
267 /* short descriptor form, FSR 0b01000 : synchronous ext abort */
268 fsr
= (fi
.ea
<< 12) | 0x8;
272 deliver_fault(cpu
, addr
, access_type
, fsr
, fsc
, &fi
);
275 #endif /* !defined(CONFIG_USER_ONLY) */
277 uint32_t HELPER(add_setq
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
279 uint32_t res
= a
+ b
;
280 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
285 uint32_t HELPER(add_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
287 uint32_t res
= a
+ b
;
288 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
290 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
295 uint32_t HELPER(sub_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
297 uint32_t res
= a
- b
;
298 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
300 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
305 uint32_t HELPER(double_saturate
)(CPUARMState
*env
, int32_t val
)
308 if (val
>= 0x40000000) {
311 } else if (val
<= (int32_t)0xc0000000) {
320 uint32_t HELPER(add_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
322 uint32_t res
= a
+ b
;
330 uint32_t HELPER(sub_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
332 uint32_t res
= a
- b
;
340 /* Signed saturation. */
341 static inline uint32_t do_ssat(CPUARMState
*env
, int32_t val
, int shift
)
347 mask
= (1u << shift
) - 1;
351 } else if (top
< -1) {
358 /* Unsigned saturation. */
359 static inline uint32_t do_usat(CPUARMState
*env
, int32_t val
, int shift
)
363 max
= (1u << shift
) - 1;
367 } else if (val
> max
) {
374 /* Signed saturate. */
375 uint32_t HELPER(ssat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
377 return do_ssat(env
, x
, shift
);
380 /* Dual halfword signed saturate. */
381 uint32_t HELPER(ssat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
385 res
= (uint16_t)do_ssat(env
, (int16_t)x
, shift
);
386 res
|= do_ssat(env
, ((int32_t)x
) >> 16, shift
) << 16;
390 /* Unsigned saturate. */
391 uint32_t HELPER(usat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
393 return do_usat(env
, x
, shift
);
396 /* Dual halfword unsigned saturate. */
397 uint32_t HELPER(usat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
401 res
= (uint16_t)do_usat(env
, (int16_t)x
, shift
);
402 res
|= do_usat(env
, ((int32_t)x
) >> 16, shift
) << 16;
406 void HELPER(setend
)(CPUARMState
*env
)
408 env
->uncached_cpsr
^= CPSR_E
;
411 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
412 * The function returns the target EL (1-3) if the instruction is to be trapped;
413 * otherwise it returns 0 indicating it is not trapped.
415 static inline int check_wfx_trap(CPUARMState
*env
, bool is_wfe
)
417 int cur_el
= arm_current_el(env
);
420 if (arm_feature(env
, ARM_FEATURE_M
)) {
421 /* M profile cores can never trap WFI/WFE. */
425 /* If we are currently in EL0 then we need to check if SCTLR is set up for
426 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
428 if (cur_el
< 1 && arm_feature(env
, ARM_FEATURE_V8
)) {
431 mask
= is_wfe
? SCTLR_nTWE
: SCTLR_nTWI
;
432 if (arm_is_secure_below_el3(env
) && !arm_el_is_aa64(env
, 3)) {
433 /* Secure EL0 and Secure PL1 is at EL3 */
439 if (!(env
->cp15
.sctlr_el
[target_el
] & mask
)) {
444 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
445 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
446 * bits will be zero indicating no trap.
448 if (cur_el
< 2 && !arm_is_secure(env
)) {
449 mask
= (is_wfe
) ? HCR_TWE
: HCR_TWI
;
450 if (env
->cp15
.hcr_el2
& mask
) {
455 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
457 mask
= (is_wfe
) ? SCR_TWE
: SCR_TWI
;
458 if (env
->cp15
.scr_el3
& mask
) {
466 void HELPER(wfi
)(CPUARMState
*env
)
468 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
469 int target_el
= check_wfx_trap(env
, false);
471 if (cpu_has_work(cs
)) {
472 /* Don't bother to go into our "low power state" if
473 * we would just wake up immediately.
480 raise_exception(env
, EXCP_UDEF
, syn_wfx(1, 0xe, 0), target_el
);
483 cs
->exception_index
= EXCP_HLT
;
488 void HELPER(wfe
)(CPUARMState
*env
)
490 /* This is a hint instruction that is semantically different
491 * from YIELD even though we currently implement it identically.
492 * Don't actually halt the CPU, just yield back to top
493 * level loop. This is not going into a "low power state"
494 * (ie halting until some event occurs), so we never take
495 * a configurable trap to a different exception level.
500 void HELPER(yield
)(CPUARMState
*env
)
502 ARMCPU
*cpu
= arm_env_get_cpu(env
);
503 CPUState
*cs
= CPU(cpu
);
505 /* When running in MTTCG we don't generate jumps to the yield and
506 * WFE helpers as it won't affect the scheduling of other vCPUs.
507 * If we wanted to more completely model WFE/SEV so we don't busy
508 * spin unnecessarily we would need to do something more involved.
510 g_assert(!parallel_cpus
);
512 /* This is a non-trappable hint instruction that generally indicates
513 * that the guest is currently busy-looping. Yield control back to the
514 * top level loop so that a more deserving VCPU has a chance to run.
516 cs
->exception_index
= EXCP_YIELD
;
520 /* Raise an internal-to-QEMU exception. This is limited to only
521 * those EXCP values which are special cases for QEMU to interrupt
522 * execution and not to be used for exceptions which are passed to
523 * the guest (those must all have syndrome information and thus should
524 * use exception_with_syndrome).
526 void HELPER(exception_internal
)(CPUARMState
*env
, uint32_t excp
)
528 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
530 assert(excp_is_internal(excp
));
531 cs
->exception_index
= excp
;
535 /* Raise an exception with the specified syndrome register value */
536 void HELPER(exception_with_syndrome
)(CPUARMState
*env
, uint32_t excp
,
537 uint32_t syndrome
, uint32_t target_el
)
539 raise_exception(env
, excp
, syndrome
, target_el
);
542 uint32_t HELPER(cpsr_read
)(CPUARMState
*env
)
544 return cpsr_read(env
) & ~(CPSR_EXEC
| CPSR_RESERVED
);
547 void HELPER(cpsr_write
)(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
549 cpsr_write(env
, val
, mask
, CPSRWriteByInstr
);
552 /* Write the CPSR for a 32-bit exception return */
553 void HELPER(cpsr_write_eret
)(CPUARMState
*env
, uint32_t val
)
555 cpsr_write(env
, val
, CPSR_ERET_MASK
, CPSRWriteExceptionReturn
);
557 /* Generated code has already stored the new PC value, but
558 * without masking out its low bits, because which bits need
559 * masking depends on whether we're returning to Thumb or ARM
560 * state. Do the masking now.
562 env
->regs
[15] &= (env
->thumb
? ~1 : ~3);
564 qemu_mutex_lock_iothread();
565 arm_call_el_change_hook(arm_env_get_cpu(env
));
566 qemu_mutex_unlock_iothread();
569 /* Access to user mode registers from privileged modes. */
570 uint32_t HELPER(get_user_reg
)(CPUARMState
*env
, uint32_t regno
)
575 val
= env
->banked_r13
[BANK_USRSYS
];
576 } else if (regno
== 14) {
577 val
= env
->banked_r14
[BANK_USRSYS
];
578 } else if (regno
>= 8
579 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
580 val
= env
->usr_regs
[regno
- 8];
582 val
= env
->regs
[regno
];
587 void HELPER(set_user_reg
)(CPUARMState
*env
, uint32_t regno
, uint32_t val
)
590 env
->banked_r13
[BANK_USRSYS
] = val
;
591 } else if (regno
== 14) {
592 env
->banked_r14
[BANK_USRSYS
] = val
;
593 } else if (regno
>= 8
594 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
595 env
->usr_regs
[regno
- 8] = val
;
597 env
->regs
[regno
] = val
;
601 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
603 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
606 env
->banked_r13
[bank_number(mode
)] = val
;
610 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
612 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_SYS
) {
613 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
614 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
616 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
617 exception_target_el(env
));
620 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
621 return env
->regs
[13];
623 return env
->banked_r13
[bank_number(mode
)];
627 static void msr_mrs_banked_exc_checks(CPUARMState
*env
, uint32_t tgtmode
,
630 /* Raise an exception if the requested access is one of the UNPREDICTABLE
631 * cases; otherwise return. This broadly corresponds to the pseudocode
632 * BankedRegisterAccessValid() and SPSRAccessValid(),
633 * except that we have already handled some cases at translate time.
635 int curmode
= env
->uncached_cpsr
& CPSR_M
;
637 if (curmode
== tgtmode
) {
641 if (tgtmode
== ARM_CPU_MODE_USR
) {
644 if (curmode
!= ARM_CPU_MODE_FIQ
) {
649 if (curmode
== ARM_CPU_MODE_SYS
) {
654 if (curmode
== ARM_CPU_MODE_HYP
|| curmode
== ARM_CPU_MODE_SYS
) {
663 if (tgtmode
== ARM_CPU_MODE_HYP
) {
665 case 17: /* ELR_Hyp */
666 if (curmode
!= ARM_CPU_MODE_HYP
&& curmode
!= ARM_CPU_MODE_MON
) {
671 if (curmode
!= ARM_CPU_MODE_MON
) {
681 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
682 exception_target_el(env
));
685 void HELPER(msr_banked
)(CPUARMState
*env
, uint32_t value
, uint32_t tgtmode
,
688 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
692 env
->banked_spsr
[bank_number(tgtmode
)] = value
;
694 case 17: /* ELR_Hyp */
695 env
->elr_el
[2] = value
;
698 env
->banked_r13
[bank_number(tgtmode
)] = value
;
701 env
->banked_r14
[bank_number(tgtmode
)] = value
;
705 case ARM_CPU_MODE_USR
:
706 env
->usr_regs
[regno
- 8] = value
;
708 case ARM_CPU_MODE_FIQ
:
709 env
->fiq_regs
[regno
- 8] = value
;
712 g_assert_not_reached();
716 g_assert_not_reached();
720 uint32_t HELPER(mrs_banked
)(CPUARMState
*env
, uint32_t tgtmode
, uint32_t regno
)
722 msr_mrs_banked_exc_checks(env
, tgtmode
, regno
);
726 return env
->banked_spsr
[bank_number(tgtmode
)];
727 case 17: /* ELR_Hyp */
728 return env
->elr_el
[2];
730 return env
->banked_r13
[bank_number(tgtmode
)];
732 return env
->banked_r14
[bank_number(tgtmode
)];
735 case ARM_CPU_MODE_USR
:
736 return env
->usr_regs
[regno
- 8];
737 case ARM_CPU_MODE_FIQ
:
738 return env
->fiq_regs
[regno
- 8];
740 g_assert_not_reached();
743 g_assert_not_reached();
747 void HELPER(access_check_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t syndrome
,
750 const ARMCPRegInfo
*ri
= rip
;
753 if (arm_feature(env
, ARM_FEATURE_XSCALE
) && ri
->cp
< 14
754 && extract32(env
->cp15
.c15_cpar
, ri
->cp
, 1) == 0) {
755 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
762 switch (ri
->accessfn(env
, ri
, isread
)) {
766 target_el
= exception_target_el(env
);
768 case CP_ACCESS_TRAP_EL2
:
769 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
770 * a bug in the access function.
772 assert(!arm_is_secure(env
) && arm_current_el(env
) != 3);
775 case CP_ACCESS_TRAP_EL3
:
778 case CP_ACCESS_TRAP_UNCATEGORIZED
:
779 target_el
= exception_target_el(env
);
780 syndrome
= syn_uncategorized();
782 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2
:
784 syndrome
= syn_uncategorized();
786 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3
:
788 syndrome
= syn_uncategorized();
790 case CP_ACCESS_TRAP_FP_EL2
:
792 /* Since we are an implementation that takes exceptions on a trapped
793 * conditional insn only if the insn has passed its condition code
794 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
795 * (which is also the required value for AArch64 traps).
797 syndrome
= syn_fp_access_trap(1, 0xe, false);
799 case CP_ACCESS_TRAP_FP_EL3
:
801 syndrome
= syn_fp_access_trap(1, 0xe, false);
804 g_assert_not_reached();
807 raise_exception(env
, EXCP_UDEF
, syndrome
, target_el
);
810 void HELPER(set_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t value
)
812 const ARMCPRegInfo
*ri
= rip
;
814 if (ri
->type
& ARM_CP_IO
) {
815 qemu_mutex_lock_iothread();
816 ri
->writefn(env
, ri
, value
);
817 qemu_mutex_unlock_iothread();
819 ri
->writefn(env
, ri
, value
);
823 uint32_t HELPER(get_cp_reg
)(CPUARMState
*env
, void *rip
)
825 const ARMCPRegInfo
*ri
= rip
;
828 if (ri
->type
& ARM_CP_IO
) {
829 qemu_mutex_lock_iothread();
830 res
= ri
->readfn(env
, ri
);
831 qemu_mutex_unlock_iothread();
833 res
= ri
->readfn(env
, ri
);
839 void HELPER(set_cp_reg64
)(CPUARMState
*env
, void *rip
, uint64_t value
)
841 const ARMCPRegInfo
*ri
= rip
;
843 if (ri
->type
& ARM_CP_IO
) {
844 qemu_mutex_lock_iothread();
845 ri
->writefn(env
, ri
, value
);
846 qemu_mutex_unlock_iothread();
848 ri
->writefn(env
, ri
, value
);
852 uint64_t HELPER(get_cp_reg64
)(CPUARMState
*env
, void *rip
)
854 const ARMCPRegInfo
*ri
= rip
;
857 if (ri
->type
& ARM_CP_IO
) {
858 qemu_mutex_lock_iothread();
859 res
= ri
->readfn(env
, ri
);
860 qemu_mutex_unlock_iothread();
862 res
= ri
->readfn(env
, ri
);
868 void HELPER(msr_i_pstate
)(CPUARMState
*env
, uint32_t op
, uint32_t imm
)
870 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
871 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
872 * to catch that case at translate time.
874 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
875 uint32_t syndrome
= syn_aa64_sysregtrap(0, extract32(op
, 0, 3),
876 extract32(op
, 3, 3), 4,
878 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
882 case 0x05: /* SPSel */
883 update_spsel(env
, imm
);
885 case 0x1e: /* DAIFSet */
886 env
->daif
|= (imm
<< 6) & PSTATE_DAIF
;
888 case 0x1f: /* DAIFClear */
889 env
->daif
&= ~((imm
<< 6) & PSTATE_DAIF
);
892 g_assert_not_reached();
896 void HELPER(clear_pstate_ss
)(CPUARMState
*env
)
898 env
->pstate
&= ~PSTATE_SS
;
901 void HELPER(pre_hvc
)(CPUARMState
*env
)
903 ARMCPU
*cpu
= arm_env_get_cpu(env
);
904 int cur_el
= arm_current_el(env
);
905 /* FIXME: Use actual secure state. */
909 if (arm_is_psci_call(cpu
, EXCP_HVC
)) {
910 /* If PSCI is enabled and this looks like a valid PSCI call then
911 * that overrides the architecturally mandated HVC behaviour.
916 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
917 /* If EL2 doesn't exist, HVC always UNDEFs */
919 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
920 /* EL3.HCE has priority over EL2.HCD. */
921 undef
= !(env
->cp15
.scr_el3
& SCR_HCE
);
923 undef
= env
->cp15
.hcr_el2
& HCR_HCD
;
926 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
927 * For ARMv8/AArch64, HVC is allowed in EL3.
928 * Note that we've already trapped HVC from EL0 at translation
931 if (secure
&& (!is_a64(env
) || cur_el
== 1)) {
936 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
937 exception_target_el(env
));
941 void HELPER(pre_smc
)(CPUARMState
*env
, uint32_t syndrome
)
943 ARMCPU
*cpu
= arm_env_get_cpu(env
);
944 int cur_el
= arm_current_el(env
);
945 bool secure
= arm_is_secure(env
);
946 bool smd
= env
->cp15
.scr_el3
& SCR_SMD
;
947 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
948 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
949 * extensions, SMD only applies to NS state.
950 * On ARMv7 without the Virtualization extensions, the SMD bit
951 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
952 * so we need not special case this here.
954 bool undef
= arm_feature(env
, ARM_FEATURE_AARCH64
) ? smd
: smd
&& !secure
;
956 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
957 cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
958 /* If we have no EL3 then SMC always UNDEFs and can't be
959 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
960 * firmware within QEMU, and we want an EL2 guest to be able
961 * to forbid its EL1 from making PSCI calls into QEMU's
962 * "firmware" via HCR.TSC, so for these purposes treat
963 * PSCI-via-SMC as implying an EL3.
966 } else if (!secure
&& cur_el
== 1 && (env
->cp15
.hcr_el2
& HCR_TSC
)) {
967 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
968 * We also want an EL2 guest to be able to forbid its EL1 from
969 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
971 raise_exception(env
, EXCP_HYP_TRAP
, syndrome
, 2);
974 /* If PSCI is enabled and this looks like a valid PSCI call then
975 * suppress the UNDEF -- we'll catch the SMC exception and
976 * implement the PSCI call behaviour there.
978 if (undef
&& !arm_is_psci_call(cpu
, EXCP_SMC
)) {
979 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
980 exception_target_el(env
));
984 static int el_from_spsr(uint32_t spsr
)
986 /* Return the exception level that this SPSR is requesting a return to,
987 * or -1 if it is invalid (an illegal return)
989 if (spsr
& PSTATE_nRW
) {
990 switch (spsr
& CPSR_M
) {
991 case ARM_CPU_MODE_USR
:
993 case ARM_CPU_MODE_HYP
:
995 case ARM_CPU_MODE_FIQ
:
996 case ARM_CPU_MODE_IRQ
:
997 case ARM_CPU_MODE_SVC
:
998 case ARM_CPU_MODE_ABT
:
999 case ARM_CPU_MODE_UND
:
1000 case ARM_CPU_MODE_SYS
:
1002 case ARM_CPU_MODE_MON
:
1003 /* Returning to Mon from AArch64 is never possible,
1004 * so this is an illegal return.
1010 if (extract32(spsr
, 1, 1)) {
1011 /* Return with reserved M[1] bit set */
1014 if (extract32(spsr
, 0, 4) == 1) {
1015 /* return to EL0 with M[0] bit set */
1018 return extract32(spsr
, 2, 2);
1022 void HELPER(exception_return
)(CPUARMState
*env
)
1024 int cur_el
= arm_current_el(env
);
1025 unsigned int spsr_idx
= aarch64_banked_spsr_index(cur_el
);
1026 uint32_t spsr
= env
->banked_spsr
[spsr_idx
];
1028 bool return_to_aa64
= (spsr
& PSTATE_nRW
) == 0;
1030 aarch64_save_sp(env
, cur_el
);
1032 arm_clear_exclusive(env
);
1034 /* We must squash the PSTATE.SS bit to zero unless both of the
1036 * 1. debug exceptions are currently disabled
1037 * 2. singlestep will be active in the EL we return to
1038 * We check 1 here and 2 after we've done the pstate/cpsr write() to
1039 * transition to the EL we're going to.
1041 if (arm_generate_debug_exceptions(env
)) {
1045 new_el
= el_from_spsr(spsr
);
1047 goto illegal_return
;
1050 || (new_el
== 2 && !arm_feature(env
, ARM_FEATURE_EL2
))) {
1051 /* Disallow return to an EL which is unimplemented or higher
1052 * than the current one.
1054 goto illegal_return
;
1057 if (new_el
!= 0 && arm_el_is_aa64(env
, new_el
) != return_to_aa64
) {
1058 /* Return to an EL which is configured for a different register width */
1059 goto illegal_return
;
1062 if (new_el
== 2 && arm_is_secure_below_el3(env
)) {
1063 /* Return to the non-existent secure-EL2 */
1064 goto illegal_return
;
1067 if (new_el
== 1 && (env
->cp15
.hcr_el2
& HCR_TGE
)
1068 && !arm_is_secure_below_el3(env
)) {
1069 goto illegal_return
;
1072 if (!return_to_aa64
) {
1074 /* We do a raw CPSR write because aarch64_sync_64_to_32()
1075 * will sort the register banks out for us, and we've already
1076 * caught all the bad-mode cases in el_from_spsr().
1078 cpsr_write(env
, spsr
, ~0, CPSRWriteRaw
);
1079 if (!arm_singlestep_active(env
)) {
1080 env
->uncached_cpsr
&= ~PSTATE_SS
;
1082 aarch64_sync_64_to_32(env
);
1084 if (spsr
& CPSR_T
) {
1085 env
->regs
[15] = env
->elr_el
[cur_el
] & ~0x1;
1087 env
->regs
[15] = env
->elr_el
[cur_el
] & ~0x3;
1089 qemu_log_mask(CPU_LOG_INT
, "Exception return from AArch64 EL%d to "
1090 "AArch32 EL%d PC 0x%" PRIx32
"\n",
1091 cur_el
, new_el
, env
->regs
[15]);
1094 pstate_write(env
, spsr
);
1095 if (!arm_singlestep_active(env
)) {
1096 env
->pstate
&= ~PSTATE_SS
;
1098 aarch64_restore_sp(env
, new_el
);
1099 env
->pc
= env
->elr_el
[cur_el
];
1100 qemu_log_mask(CPU_LOG_INT
, "Exception return from AArch64 EL%d to "
1101 "AArch64 EL%d PC 0x%" PRIx64
"\n",
1102 cur_el
, new_el
, env
->pc
);
1105 qemu_mutex_lock_iothread();
1106 arm_call_el_change_hook(arm_env_get_cpu(env
));
1107 qemu_mutex_unlock_iothread();
1112 /* Illegal return events of various kinds have architecturally
1113 * mandated behaviour:
1114 * restore NZCV and DAIF from SPSR_ELx
1116 * restore PC from ELR_ELx
1117 * no change to exception level, execution state or stack pointer
1119 env
->pstate
|= PSTATE_IL
;
1120 env
->pc
= env
->elr_el
[cur_el
];
1121 spsr
&= PSTATE_NZCV
| PSTATE_DAIF
;
1122 spsr
|= pstate_read(env
) & ~(PSTATE_NZCV
| PSTATE_DAIF
);
1123 pstate_write(env
, spsr
);
1124 if (!arm_singlestep_active(env
)) {
1125 env
->pstate
&= ~PSTATE_SS
;
1127 qemu_log_mask(LOG_GUEST_ERROR
, "Illegal exception return at EL%d: "
1128 "resuming execution at 0x%" PRIx64
"\n", cur_el
, env
->pc
);
1131 /* Return true if the linked breakpoint entry lbn passes its checks */
1132 static bool linked_bp_matches(ARMCPU
*cpu
, int lbn
)
1134 CPUARMState
*env
= &cpu
->env
;
1135 uint64_t bcr
= env
->cp15
.dbgbcr
[lbn
];
1136 int brps
= extract32(cpu
->dbgdidr
, 24, 4);
1137 int ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
1139 uint32_t contextidr
;
1141 /* Links to unimplemented or non-context aware breakpoints are
1142 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1143 * as if linked to an UNKNOWN context-aware breakpoint (in which
1144 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1145 * We choose the former.
1147 if (lbn
> brps
|| lbn
< (brps
- ctx_cmps
)) {
1151 bcr
= env
->cp15
.dbgbcr
[lbn
];
1153 if (extract64(bcr
, 0, 1) == 0) {
1154 /* Linked breakpoint disabled : generate no events */
1158 bt
= extract64(bcr
, 20, 4);
1160 /* We match the whole register even if this is AArch32 using the
1161 * short descriptor format (in which case it holds both PROCID and ASID),
1162 * since we don't implement the optional v7 context ID masking.
1164 contextidr
= extract64(env
->cp15
.contextidr_el
[1], 0, 32);
1167 case 3: /* linked context ID match */
1168 if (arm_current_el(env
) > 1) {
1169 /* Context matches never fire in EL2 or (AArch64) EL3 */
1172 return (contextidr
== extract64(env
->cp15
.dbgbvr
[lbn
], 0, 32));
1173 case 5: /* linked address mismatch (reserved in AArch64) */
1174 case 9: /* linked VMID match (reserved if no EL2) */
1175 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1177 /* Links to Unlinked context breakpoints must generate no
1178 * events; we choose to do the same for reserved values too.
1186 static bool bp_wp_matches(ARMCPU
*cpu
, int n
, bool is_wp
)
1188 CPUARMState
*env
= &cpu
->env
;
1190 int pac
, hmc
, ssc
, wt
, lbn
;
1191 /* Note that for watchpoints the check is against the CPU security
1192 * state, not the S/NS attribute on the offending data access.
1194 bool is_secure
= arm_is_secure(env
);
1195 int access_el
= arm_current_el(env
);
1198 CPUWatchpoint
*wp
= env
->cpu_watchpoint
[n
];
1200 if (!wp
|| !(wp
->flags
& BP_WATCHPOINT_HIT
)) {
1203 cr
= env
->cp15
.dbgwcr
[n
];
1204 if (wp
->hitattrs
.user
) {
1205 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1206 * match watchpoints as if they were accesses done at EL0, even if
1207 * the CPU is at EL1 or higher.
1212 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
1214 if (!env
->cpu_breakpoint
[n
] || env
->cpu_breakpoint
[n
]->pc
!= pc
) {
1217 cr
= env
->cp15
.dbgbcr
[n
];
1219 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1220 * enabled and that the address and access type match; for breakpoints
1221 * we know the address matched; check the remaining fields, including
1222 * linked breakpoints. We rely on WCR and BCR having the same layout
1223 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1224 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1225 * must act either like some valid combination or as if the watchpoint
1226 * were disabled. We choose the former, and use this together with
1227 * the fact that EL3 must always be Secure and EL2 must always be
1228 * Non-Secure to simplify the code slightly compared to the full
1229 * table in the ARM ARM.
1231 pac
= extract64(cr
, 1, 2);
1232 hmc
= extract64(cr
, 13, 1);
1233 ssc
= extract64(cr
, 14, 2);
1251 switch (access_el
) {
1259 if (extract32(pac
, 0, 1) == 0) {
1264 if (extract32(pac
, 1, 1) == 0) {
1269 g_assert_not_reached();
1272 wt
= extract64(cr
, 20, 1);
1273 lbn
= extract64(cr
, 16, 4);
1275 if (wt
&& !linked_bp_matches(cpu
, lbn
)) {
1282 static bool check_watchpoints(ARMCPU
*cpu
)
1284 CPUARMState
*env
= &cpu
->env
;
1287 /* If watchpoints are disabled globally or we can't take debug
1288 * exceptions here then watchpoint firings are ignored.
1290 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
1291 || !arm_generate_debug_exceptions(env
)) {
1295 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_watchpoint
); n
++) {
1296 if (bp_wp_matches(cpu
, n
, true)) {
1303 static bool check_breakpoints(ARMCPU
*cpu
)
1305 CPUARMState
*env
= &cpu
->env
;
1308 /* If breakpoints are disabled globally or we can't take debug
1309 * exceptions here then breakpoint firings are ignored.
1311 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
1312 || !arm_generate_debug_exceptions(env
)) {
1316 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_breakpoint
); n
++) {
1317 if (bp_wp_matches(cpu
, n
, false)) {
1324 void HELPER(check_breakpoints
)(CPUARMState
*env
)
1326 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1328 if (check_breakpoints(cpu
)) {
1329 HELPER(exception_internal(env
, EXCP_DEBUG
));
1333 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
)
1335 /* Called by core code when a CPU watchpoint fires; need to check if this
1336 * is also an architectural watchpoint match.
1338 ARMCPU
*cpu
= ARM_CPU(cs
);
1340 return check_watchpoints(cpu
);
1343 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
)
1345 ARMCPU
*cpu
= ARM_CPU(cs
);
1346 CPUARMState
*env
= &cpu
->env
;
1348 /* In BE32 system mode, target memory is stored byteswapped (on a
1349 * little-endian host system), and by the time we reach here (via an
1350 * opcode helper) the addresses of subword accesses have been adjusted
1351 * to account for that, which means that watchpoints will not match.
1352 * Undo the adjustment here.
1354 if (arm_sctlr_b(env
)) {
1357 } else if (len
== 2) {
1365 void arm_debug_excp_handler(CPUState
*cs
)
1367 /* Called by core code when a watchpoint or breakpoint fires;
1368 * need to check which one and raise the appropriate exception.
1370 ARMCPU
*cpu
= ARM_CPU(cs
);
1371 CPUARMState
*env
= &cpu
->env
;
1372 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
1375 if (wp_hit
->flags
& BP_CPU
) {
1376 bool wnr
= (wp_hit
->flags
& BP_WATCHPOINT_HIT_WRITE
) != 0;
1377 bool same_el
= arm_debug_target_el(env
) == arm_current_el(env
);
1379 cs
->watchpoint_hit
= NULL
;
1381 if (extended_addresses_enabled(env
)) {
1382 env
->exception
.fsr
= (1 << 9) | 0x22;
1384 env
->exception
.fsr
= 0x2;
1386 env
->exception
.vaddress
= wp_hit
->hitaddr
;
1387 raise_exception(env
, EXCP_DATA_ABORT
,
1388 syn_watchpoint(same_el
, 0, wnr
),
1389 arm_debug_target_el(env
));
1392 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
1393 bool same_el
= (arm_debug_target_el(env
) == arm_current_el(env
));
1395 /* (1) GDB breakpoints should be handled first.
1396 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1397 * since singlestep is also done by generating a debug internal
1400 if (cpu_breakpoint_test(cs
, pc
, BP_GDB
)
1401 || !cpu_breakpoint_test(cs
, pc
, BP_CPU
)) {
1405 if (extended_addresses_enabled(env
)) {
1406 env
->exception
.fsr
= (1 << 9) | 0x22;
1408 env
->exception
.fsr
= 0x2;
1410 /* FAR is UNKNOWN, so doesn't need setting */
1411 raise_exception(env
, EXCP_PREFETCH_ABORT
,
1412 syn_breakpoint(same_el
),
1413 arm_debug_target_el(env
));
1417 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1418 The only way to do that in TCG is a conditional branch, which clobbers
1419 all our temporaries. For now implement these as helper functions. */
1421 /* Similarly for variable shift instructions. */
1423 uint32_t HELPER(shl_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1425 int shift
= i
& 0xff;
1432 } else if (shift
!= 0) {
1433 env
->CF
= (x
>> (32 - shift
)) & 1;
1439 uint32_t HELPER(shr_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1441 int shift
= i
& 0xff;
1444 env
->CF
= (x
>> 31) & 1;
1448 } else if (shift
!= 0) {
1449 env
->CF
= (x
>> (shift
- 1)) & 1;
1455 uint32_t HELPER(sar_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1457 int shift
= i
& 0xff;
1459 env
->CF
= (x
>> 31) & 1;
1460 return (int32_t)x
>> 31;
1461 } else if (shift
!= 0) {
1462 env
->CF
= (x
>> (shift
- 1)) & 1;
1463 return (int32_t)x
>> shift
;
1468 uint32_t HELPER(ror_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1472 shift
= shift1
& 0x1f;
1475 env
->CF
= (x
>> 31) & 1;
1478 env
->CF
= (x
>> (shift
- 1)) & 1;
1479 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));