4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "internals.h"
23 #include "exec/cpu_ldst.h"
25 #define SIGNBIT (uint32_t)0x80000000
26 #define SIGNBIT64 ((uint64_t)1 << 63)
28 static void raise_exception(CPUARMState
*env
, uint32_t excp
,
29 uint32_t syndrome
, uint32_t target_el
)
31 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
33 assert(!excp_is_internal(excp
));
34 cs
->exception_index
= excp
;
35 env
->exception
.syndrome
= syndrome
;
36 env
->exception
.target_el
= target_el
;
40 static int exception_target_el(CPUARMState
*env
)
42 int target_el
= MAX(1, arm_current_el(env
));
44 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
45 * to EL3 in this case.
47 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
54 uint32_t HELPER(neon_tbl
)(CPUARMState
*env
, uint32_t ireg
, uint32_t def
,
55 uint32_t rn
, uint32_t maxindex
)
62 table
= (uint64_t *)&env
->vfp
.regs
[rn
];
64 for (shift
= 0; shift
< 32; shift
+= 8) {
65 index
= (ireg
>> shift
) & 0xff;
66 if (index
< maxindex
) {
67 tmp
= (table
[index
>> 3] >> ((index
& 7) << 3)) & 0xff;
70 val
|= def
& (0xff << shift
);
76 #if !defined(CONFIG_USER_ONLY)
78 /* try to fill the TLB and return an exception if error. If retaddr is
79 * NULL, it means that the function was called in C code (i.e. not
80 * from generated code or from helper.c)
82 void tlb_fill(CPUState
*cs
, target_ulong addr
, int is_write
, int mmu_idx
,
87 ARMMMUFaultInfo fi
= {};
89 ret
= arm_tlb_fill(cs
, addr
, is_write
, mmu_idx
, &fsr
, &fi
);
91 ARMCPU
*cpu
= ARM_CPU(cs
);
92 CPUARMState
*env
= &cpu
->env
;
94 unsigned int target_el
;
98 /* now we have a real cpu fault */
99 cpu_restore_state(cs
, retaddr
);
102 target_el
= exception_target_el(env
);
105 env
->cp15
.hpfar_el2
= extract64(fi
.s2addr
, 12, 47) << 4;
107 same_el
= arm_current_el(env
) == target_el
;
108 /* AArch64 syndrome does not have an LPAE bit */
109 syn
= fsr
& ~(1 << 9);
111 /* For insn and data aborts we assume there is no instruction syndrome
112 * information; this is always true for exceptions reported to EL1.
115 syn
= syn_insn_abort(same_el
, 0, fi
.s1ptw
, syn
);
116 exc
= EXCP_PREFETCH_ABORT
;
118 syn
= syn_data_abort(same_el
, 0, 0, fi
.s1ptw
, is_write
== 1, syn
);
119 if (is_write
== 1 && arm_feature(env
, ARM_FEATURE_V6
)) {
122 exc
= EXCP_DATA_ABORT
;
125 env
->exception
.vaddress
= addr
;
126 env
->exception
.fsr
= fsr
;
127 raise_exception(env
, exc
, syn
, target_el
);
131 /* Raise a data fault alignment exception for the specified virtual address */
132 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
, int is_write
,
133 int is_user
, uintptr_t retaddr
)
135 ARMCPU
*cpu
= ARM_CPU(cs
);
136 CPUARMState
*env
= &cpu
->env
;
141 /* now we have a real cpu fault */
142 cpu_restore_state(cs
, retaddr
);
145 target_el
= exception_target_el(env
);
146 same_el
= (arm_current_el(env
) == target_el
);
148 env
->exception
.vaddress
= vaddr
;
150 /* the DFSR for an alignment fault depends on whether we're using
151 * the LPAE long descriptor format, or the short descriptor format
153 if (arm_s1_regime_using_lpae_format(env
, cpu_mmu_index(env
, false))) {
154 env
->exception
.fsr
= 0x21;
156 env
->exception
.fsr
= 0x1;
159 if (is_write
== 1 && arm_feature(env
, ARM_FEATURE_V6
)) {
160 env
->exception
.fsr
|= (1 << 11);
163 raise_exception(env
, EXCP_DATA_ABORT
,
164 syn_data_abort(same_el
, 0, 0, 0, is_write
== 1, 0x21),
168 #endif /* !defined(CONFIG_USER_ONLY) */
170 uint32_t HELPER(add_setq
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
172 uint32_t res
= a
+ b
;
173 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
178 uint32_t HELPER(add_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
180 uint32_t res
= a
+ b
;
181 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
183 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
188 uint32_t HELPER(sub_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
190 uint32_t res
= a
- b
;
191 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
193 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
198 uint32_t HELPER(double_saturate
)(CPUARMState
*env
, int32_t val
)
201 if (val
>= 0x40000000) {
204 } else if (val
<= (int32_t)0xc0000000) {
213 uint32_t HELPER(add_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
215 uint32_t res
= a
+ b
;
223 uint32_t HELPER(sub_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
225 uint32_t res
= a
- b
;
233 /* Signed saturation. */
234 static inline uint32_t do_ssat(CPUARMState
*env
, int32_t val
, int shift
)
240 mask
= (1u << shift
) - 1;
244 } else if (top
< -1) {
251 /* Unsigned saturation. */
252 static inline uint32_t do_usat(CPUARMState
*env
, int32_t val
, int shift
)
256 max
= (1u << shift
) - 1;
260 } else if (val
> max
) {
267 /* Signed saturate. */
268 uint32_t HELPER(ssat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
270 return do_ssat(env
, x
, shift
);
273 /* Dual halfword signed saturate. */
274 uint32_t HELPER(ssat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
278 res
= (uint16_t)do_ssat(env
, (int16_t)x
, shift
);
279 res
|= do_ssat(env
, ((int32_t)x
) >> 16, shift
) << 16;
283 /* Unsigned saturate. */
284 uint32_t HELPER(usat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
286 return do_usat(env
, x
, shift
);
289 /* Dual halfword unsigned saturate. */
290 uint32_t HELPER(usat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
294 res
= (uint16_t)do_usat(env
, (int16_t)x
, shift
);
295 res
|= do_usat(env
, ((int32_t)x
) >> 16, shift
) << 16;
299 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
300 * The function returns the target EL (1-3) if the instruction is to be trapped;
301 * otherwise it returns 0 indicating it is not trapped.
303 static inline int check_wfx_trap(CPUARMState
*env
, bool is_wfe
)
305 int cur_el
= arm_current_el(env
);
308 /* If we are currently in EL0 then we need to check if SCTLR is set up for
309 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
311 if (cur_el
< 1 && arm_feature(env
, ARM_FEATURE_V8
)) {
314 mask
= is_wfe
? SCTLR_nTWE
: SCTLR_nTWI
;
315 if (arm_is_secure_below_el3(env
) && !arm_el_is_aa64(env
, 3)) {
316 /* Secure EL0 and Secure PL1 is at EL3 */
322 if (!(env
->cp15
.sctlr_el
[target_el
] & mask
)) {
327 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
328 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
329 * bits will be zero indicating no trap.
331 if (cur_el
< 2 && !arm_is_secure(env
)) {
332 mask
= (is_wfe
) ? HCR_TWE
: HCR_TWI
;
333 if (env
->cp15
.hcr_el2
& mask
) {
338 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
340 mask
= (is_wfe
) ? SCR_TWE
: SCR_TWI
;
341 if (env
->cp15
.scr_el3
& mask
) {
349 void HELPER(wfi
)(CPUARMState
*env
)
351 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
352 int target_el
= check_wfx_trap(env
, false);
354 if (cpu_has_work(cs
)) {
355 /* Don't bother to go into our "low power state" if
356 * we would just wake up immediately.
363 raise_exception(env
, EXCP_UDEF
, syn_wfx(1, 0xe, 0), target_el
);
366 cs
->exception_index
= EXCP_HLT
;
371 void HELPER(wfe
)(CPUARMState
*env
)
373 /* This is a hint instruction that is semantically different
374 * from YIELD even though we currently implement it identically.
375 * Don't actually halt the CPU, just yield back to top
376 * level loop. This is not going into a "low power state"
377 * (ie halting until some event occurs), so we never take
378 * a configurable trap to a different exception level.
383 void HELPER(yield
)(CPUARMState
*env
)
385 ARMCPU
*cpu
= arm_env_get_cpu(env
);
386 CPUState
*cs
= CPU(cpu
);
388 /* This is a non-trappable hint instruction that generally indicates
389 * that the guest is currently busy-looping. Yield control back to the
390 * top level loop so that a more deserving VCPU has a chance to run.
392 cs
->exception_index
= EXCP_YIELD
;
396 /* Raise an internal-to-QEMU exception. This is limited to only
397 * those EXCP values which are special cases for QEMU to interrupt
398 * execution and not to be used for exceptions which are passed to
399 * the guest (those must all have syndrome information and thus should
400 * use exception_with_syndrome).
402 void HELPER(exception_internal
)(CPUARMState
*env
, uint32_t excp
)
404 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
406 assert(excp_is_internal(excp
));
407 cs
->exception_index
= excp
;
411 /* Raise an exception with the specified syndrome register value */
412 void HELPER(exception_with_syndrome
)(CPUARMState
*env
, uint32_t excp
,
413 uint32_t syndrome
, uint32_t target_el
)
415 raise_exception(env
, excp
, syndrome
, target_el
);
418 uint32_t HELPER(cpsr_read
)(CPUARMState
*env
)
420 return cpsr_read(env
) & ~(CPSR_EXEC
| CPSR_RESERVED
);
423 void HELPER(cpsr_write
)(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
425 cpsr_write(env
, val
, mask
);
428 /* Access to user mode registers from privileged modes. */
429 uint32_t HELPER(get_user_reg
)(CPUARMState
*env
, uint32_t regno
)
434 val
= env
->banked_r13
[BANK_USRSYS
];
435 } else if (regno
== 14) {
436 val
= env
->banked_r14
[BANK_USRSYS
];
437 } else if (regno
>= 8
438 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
439 val
= env
->usr_regs
[regno
- 8];
441 val
= env
->regs
[regno
];
446 void HELPER(set_user_reg
)(CPUARMState
*env
, uint32_t regno
, uint32_t val
)
449 env
->banked_r13
[BANK_USRSYS
] = val
;
450 } else if (regno
== 14) {
451 env
->banked_r14
[BANK_USRSYS
] = val
;
452 } else if (regno
>= 8
453 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
454 env
->usr_regs
[regno
- 8] = val
;
456 env
->regs
[regno
] = val
;
460 void HELPER(access_check_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t syndrome
,
463 const ARMCPRegInfo
*ri
= rip
;
466 if (arm_feature(env
, ARM_FEATURE_XSCALE
) && ri
->cp
< 14
467 && extract32(env
->cp15
.c15_cpar
, ri
->cp
, 1) == 0) {
468 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
475 switch (ri
->accessfn(env
, ri
, isread
)) {
479 target_el
= exception_target_el(env
);
481 case CP_ACCESS_TRAP_EL2
:
482 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
483 * a bug in the access function.
485 assert(!arm_is_secure(env
) && arm_current_el(env
) != 3);
488 case CP_ACCESS_TRAP_EL3
:
491 case CP_ACCESS_TRAP_UNCATEGORIZED
:
492 target_el
= exception_target_el(env
);
493 syndrome
= syn_uncategorized();
495 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2
:
497 syndrome
= syn_uncategorized();
499 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3
:
501 syndrome
= syn_uncategorized();
504 g_assert_not_reached();
507 raise_exception(env
, EXCP_UDEF
, syndrome
, target_el
);
510 void HELPER(set_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t value
)
512 const ARMCPRegInfo
*ri
= rip
;
514 ri
->writefn(env
, ri
, value
);
517 uint32_t HELPER(get_cp_reg
)(CPUARMState
*env
, void *rip
)
519 const ARMCPRegInfo
*ri
= rip
;
521 return ri
->readfn(env
, ri
);
524 void HELPER(set_cp_reg64
)(CPUARMState
*env
, void *rip
, uint64_t value
)
526 const ARMCPRegInfo
*ri
= rip
;
528 ri
->writefn(env
, ri
, value
);
531 uint64_t HELPER(get_cp_reg64
)(CPUARMState
*env
, void *rip
)
533 const ARMCPRegInfo
*ri
= rip
;
535 return ri
->readfn(env
, ri
);
538 void HELPER(msr_i_pstate
)(CPUARMState
*env
, uint32_t op
, uint32_t imm
)
540 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
541 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
542 * to catch that case at translate time.
544 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
545 uint32_t syndrome
= syn_aa64_sysregtrap(0, extract32(op
, 0, 3),
546 extract32(op
, 3, 3), 4,
548 raise_exception(env
, EXCP_UDEF
, syndrome
, exception_target_el(env
));
552 case 0x05: /* SPSel */
553 update_spsel(env
, imm
);
555 case 0x1e: /* DAIFSet */
556 env
->daif
|= (imm
<< 6) & PSTATE_DAIF
;
558 case 0x1f: /* DAIFClear */
559 env
->daif
&= ~((imm
<< 6) & PSTATE_DAIF
);
562 g_assert_not_reached();
566 void HELPER(clear_pstate_ss
)(CPUARMState
*env
)
568 env
->pstate
&= ~PSTATE_SS
;
571 void HELPER(pre_hvc
)(CPUARMState
*env
)
573 ARMCPU
*cpu
= arm_env_get_cpu(env
);
574 int cur_el
= arm_current_el(env
);
575 /* FIXME: Use actual secure state. */
579 if (arm_is_psci_call(cpu
, EXCP_HVC
)) {
580 /* If PSCI is enabled and this looks like a valid PSCI call then
581 * that overrides the architecturally mandated HVC behaviour.
586 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
587 /* If EL2 doesn't exist, HVC always UNDEFs */
589 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
590 /* EL3.HCE has priority over EL2.HCD. */
591 undef
= !(env
->cp15
.scr_el3
& SCR_HCE
);
593 undef
= env
->cp15
.hcr_el2
& HCR_HCD
;
596 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
597 * For ARMv8/AArch64, HVC is allowed in EL3.
598 * Note that we've already trapped HVC from EL0 at translation
601 if (secure
&& (!is_a64(env
) || cur_el
== 1)) {
606 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
607 exception_target_el(env
));
611 void HELPER(pre_smc
)(CPUARMState
*env
, uint32_t syndrome
)
613 ARMCPU
*cpu
= arm_env_get_cpu(env
);
614 int cur_el
= arm_current_el(env
);
615 bool secure
= arm_is_secure(env
);
616 bool smd
= env
->cp15
.scr_el3
& SCR_SMD
;
617 /* On ARMv8 AArch32, SMD only applies to NS state.
618 * On ARMv7 SMD only applies to NS state and only if EL2 is available.
619 * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
620 * the EL2 condition here.
622 bool undef
= is_a64(env
) ? smd
: (!secure
&& smd
);
624 if (arm_is_psci_call(cpu
, EXCP_SMC
)) {
625 /* If PSCI is enabled and this looks like a valid PSCI call then
626 * that overrides the architecturally mandated SMC behaviour.
631 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
632 /* If we have no EL3 then SMC always UNDEFs */
634 } else if (!secure
&& cur_el
== 1 && (env
->cp15
.hcr_el2
& HCR_TSC
)) {
635 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
636 raise_exception(env
, EXCP_HYP_TRAP
, syndrome
, 2);
640 raise_exception(env
, EXCP_UDEF
, syn_uncategorized(),
641 exception_target_el(env
));
645 static int el_from_spsr(uint32_t spsr
)
647 /* Return the exception level that this SPSR is requesting a return to,
648 * or -1 if it is invalid (an illegal return)
650 if (spsr
& PSTATE_nRW
) {
651 switch (spsr
& CPSR_M
) {
652 case ARM_CPU_MODE_USR
:
654 case ARM_CPU_MODE_HYP
:
656 case ARM_CPU_MODE_FIQ
:
657 case ARM_CPU_MODE_IRQ
:
658 case ARM_CPU_MODE_SVC
:
659 case ARM_CPU_MODE_ABT
:
660 case ARM_CPU_MODE_UND
:
661 case ARM_CPU_MODE_SYS
:
663 case ARM_CPU_MODE_MON
:
664 /* Returning to Mon from AArch64 is never possible,
665 * so this is an illegal return.
671 if (extract32(spsr
, 1, 1)) {
672 /* Return with reserved M[1] bit set */
675 if (extract32(spsr
, 0, 4) == 1) {
676 /* return to EL0 with M[0] bit set */
679 return extract32(spsr
, 2, 2);
683 void HELPER(exception_return
)(CPUARMState
*env
)
685 int cur_el
= arm_current_el(env
);
686 unsigned int spsr_idx
= aarch64_banked_spsr_index(cur_el
);
687 uint32_t spsr
= env
->banked_spsr
[spsr_idx
];
689 bool return_to_aa64
= (spsr
& PSTATE_nRW
) == 0;
691 aarch64_save_sp(env
, cur_el
);
693 env
->exclusive_addr
= -1;
695 /* We must squash the PSTATE.SS bit to zero unless both of the
697 * 1. debug exceptions are currently disabled
698 * 2. singlestep will be active in the EL we return to
699 * We check 1 here and 2 after we've done the pstate/cpsr write() to
700 * transition to the EL we're going to.
702 if (arm_generate_debug_exceptions(env
)) {
706 new_el
= el_from_spsr(spsr
);
711 || (new_el
== 2 && !arm_feature(env
, ARM_FEATURE_EL2
))) {
712 /* Disallow return to an EL which is unimplemented or higher
713 * than the current one.
718 if (new_el
!= 0 && arm_el_is_aa64(env
, new_el
) != return_to_aa64
) {
719 /* Return to an EL which is configured for a different register width */
723 if (new_el
== 2 && arm_is_secure_below_el3(env
)) {
724 /* Return to the non-existent secure-EL2 */
728 if (new_el
== 1 && (env
->cp15
.hcr_el2
& HCR_TGE
)
729 && !arm_is_secure_below_el3(env
)) {
733 if (!return_to_aa64
) {
735 env
->uncached_cpsr
= spsr
& CPSR_M
;
736 cpsr_write(env
, spsr
, ~0);
737 if (!arm_singlestep_active(env
)) {
738 env
->uncached_cpsr
&= ~PSTATE_SS
;
740 aarch64_sync_64_to_32(env
);
743 env
->regs
[15] = env
->elr_el
[cur_el
] & ~0x1;
745 env
->regs
[15] = env
->elr_el
[cur_el
] & ~0x3;
749 pstate_write(env
, spsr
);
750 if (!arm_singlestep_active(env
)) {
751 env
->pstate
&= ~PSTATE_SS
;
753 aarch64_restore_sp(env
, new_el
);
754 env
->pc
= env
->elr_el
[cur_el
];
760 /* Illegal return events of various kinds have architecturally
761 * mandated behaviour:
762 * restore NZCV and DAIF from SPSR_ELx
764 * restore PC from ELR_ELx
765 * no change to exception level, execution state or stack pointer
767 env
->pstate
|= PSTATE_IL
;
768 env
->pc
= env
->elr_el
[cur_el
];
769 spsr
&= PSTATE_NZCV
| PSTATE_DAIF
;
770 spsr
|= pstate_read(env
) & ~(PSTATE_NZCV
| PSTATE_DAIF
);
771 pstate_write(env
, spsr
);
772 if (!arm_singlestep_active(env
)) {
773 env
->pstate
&= ~PSTATE_SS
;
777 /* Return true if the linked breakpoint entry lbn passes its checks */
778 static bool linked_bp_matches(ARMCPU
*cpu
, int lbn
)
780 CPUARMState
*env
= &cpu
->env
;
781 uint64_t bcr
= env
->cp15
.dbgbcr
[lbn
];
782 int brps
= extract32(cpu
->dbgdidr
, 24, 4);
783 int ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
787 /* Links to unimplemented or non-context aware breakpoints are
788 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
789 * as if linked to an UNKNOWN context-aware breakpoint (in which
790 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
791 * We choose the former.
793 if (lbn
> brps
|| lbn
< (brps
- ctx_cmps
)) {
797 bcr
= env
->cp15
.dbgbcr
[lbn
];
799 if (extract64(bcr
, 0, 1) == 0) {
800 /* Linked breakpoint disabled : generate no events */
804 bt
= extract64(bcr
, 20, 4);
806 /* We match the whole register even if this is AArch32 using the
807 * short descriptor format (in which case it holds both PROCID and ASID),
808 * since we don't implement the optional v7 context ID masking.
810 contextidr
= extract64(env
->cp15
.contextidr_el
[1], 0, 32);
813 case 3: /* linked context ID match */
814 if (arm_current_el(env
) > 1) {
815 /* Context matches never fire in EL2 or (AArch64) EL3 */
818 return (contextidr
== extract64(env
->cp15
.dbgbvr
[lbn
], 0, 32));
819 case 5: /* linked address mismatch (reserved in AArch64) */
820 case 9: /* linked VMID match (reserved if no EL2) */
821 case 11: /* linked context ID and VMID match (reserved if no EL2) */
823 /* Links to Unlinked context breakpoints must generate no
824 * events; we choose to do the same for reserved values too.
832 static bool bp_wp_matches(ARMCPU
*cpu
, int n
, bool is_wp
)
834 CPUARMState
*env
= &cpu
->env
;
836 int pac
, hmc
, ssc
, wt
, lbn
;
837 /* Note that for watchpoints the check is against the CPU security
838 * state, not the S/NS attribute on the offending data access.
840 bool is_secure
= arm_is_secure(env
);
841 int access_el
= arm_current_el(env
);
844 CPUWatchpoint
*wp
= env
->cpu_watchpoint
[n
];
846 if (!wp
|| !(wp
->flags
& BP_WATCHPOINT_HIT
)) {
849 cr
= env
->cp15
.dbgwcr
[n
];
850 if (wp
->hitattrs
.user
) {
851 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
852 * match watchpoints as if they were accesses done at EL0, even if
853 * the CPU is at EL1 or higher.
858 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
860 if (!env
->cpu_breakpoint
[n
] || env
->cpu_breakpoint
[n
]->pc
!= pc
) {
863 cr
= env
->cp15
.dbgbcr
[n
];
865 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
866 * enabled and that the address and access type match; for breakpoints
867 * we know the address matched; check the remaining fields, including
868 * linked breakpoints. We rely on WCR and BCR having the same layout
869 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
870 * Note that some combinations of {PAC, HMC, SSC} are reserved and
871 * must act either like some valid combination or as if the watchpoint
872 * were disabled. We choose the former, and use this together with
873 * the fact that EL3 must always be Secure and EL2 must always be
874 * Non-Secure to simplify the code slightly compared to the full
875 * table in the ARM ARM.
877 pac
= extract64(cr
, 1, 2);
878 hmc
= extract64(cr
, 13, 1);
879 ssc
= extract64(cr
, 14, 2);
905 if (extract32(pac
, 0, 1) == 0) {
910 if (extract32(pac
, 1, 1) == 0) {
915 g_assert_not_reached();
918 wt
= extract64(cr
, 20, 1);
919 lbn
= extract64(cr
, 16, 4);
921 if (wt
&& !linked_bp_matches(cpu
, lbn
)) {
928 static bool check_watchpoints(ARMCPU
*cpu
)
930 CPUARMState
*env
= &cpu
->env
;
933 /* If watchpoints are disabled globally or we can't take debug
934 * exceptions here then watchpoint firings are ignored.
936 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
937 || !arm_generate_debug_exceptions(env
)) {
941 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_watchpoint
); n
++) {
942 if (bp_wp_matches(cpu
, n
, true)) {
949 static bool check_breakpoints(ARMCPU
*cpu
)
951 CPUARMState
*env
= &cpu
->env
;
954 /* If breakpoints are disabled globally or we can't take debug
955 * exceptions here then breakpoint firings are ignored.
957 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
958 || !arm_generate_debug_exceptions(env
)) {
962 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_breakpoint
); n
++) {
963 if (bp_wp_matches(cpu
, n
, false)) {
970 void HELPER(check_breakpoints
)(CPUARMState
*env
)
972 ARMCPU
*cpu
= arm_env_get_cpu(env
);
974 if (check_breakpoints(cpu
)) {
975 HELPER(exception_internal(env
, EXCP_DEBUG
));
979 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
)
981 /* Called by core code when a CPU watchpoint fires; need to check if this
982 * is also an architectural watchpoint match.
984 ARMCPU
*cpu
= ARM_CPU(cs
);
986 return check_watchpoints(cpu
);
989 void arm_debug_excp_handler(CPUState
*cs
)
991 /* Called by core code when a watchpoint or breakpoint fires;
992 * need to check which one and raise the appropriate exception.
994 ARMCPU
*cpu
= ARM_CPU(cs
);
995 CPUARMState
*env
= &cpu
->env
;
996 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
999 if (wp_hit
->flags
& BP_CPU
) {
1000 bool wnr
= (wp_hit
->flags
& BP_WATCHPOINT_HIT_WRITE
) != 0;
1001 bool same_el
= arm_debug_target_el(env
) == arm_current_el(env
);
1003 cs
->watchpoint_hit
= NULL
;
1005 if (extended_addresses_enabled(env
)) {
1006 env
->exception
.fsr
= (1 << 9) | 0x22;
1008 env
->exception
.fsr
= 0x2;
1010 env
->exception
.vaddress
= wp_hit
->hitaddr
;
1011 raise_exception(env
, EXCP_DATA_ABORT
,
1012 syn_watchpoint(same_el
, 0, wnr
),
1013 arm_debug_target_el(env
));
1016 uint64_t pc
= is_a64(env
) ? env
->pc
: env
->regs
[15];
1017 bool same_el
= (arm_debug_target_el(env
) == arm_current_el(env
));
1019 /* (1) GDB breakpoints should be handled first.
1020 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1021 * since singlestep is also done by generating a debug internal
1024 if (cpu_breakpoint_test(cs
, pc
, BP_GDB
)
1025 || !cpu_breakpoint_test(cs
, pc
, BP_CPU
)) {
1029 if (extended_addresses_enabled(env
)) {
1030 env
->exception
.fsr
= (1 << 9) | 0x22;
1032 env
->exception
.fsr
= 0x2;
1034 /* FAR is UNKNOWN, so doesn't need setting */
1035 raise_exception(env
, EXCP_PREFETCH_ABORT
,
1036 syn_breakpoint(same_el
),
1037 arm_debug_target_el(env
));
1041 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1042 The only way to do that in TCG is a conditional branch, which clobbers
1043 all our temporaries. For now implement these as helper functions. */
1045 /* Similarly for variable shift instructions. */
1047 uint32_t HELPER(shl_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1049 int shift
= i
& 0xff;
1056 } else if (shift
!= 0) {
1057 env
->CF
= (x
>> (32 - shift
)) & 1;
1063 uint32_t HELPER(shr_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1065 int shift
= i
& 0xff;
1068 env
->CF
= (x
>> 31) & 1;
1072 } else if (shift
!= 0) {
1073 env
->CF
= (x
>> (shift
- 1)) & 1;
1079 uint32_t HELPER(sar_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1081 int shift
= i
& 0xff;
1083 env
->CF
= (x
>> 31) & 1;
1084 return (int32_t)x
>> 31;
1085 } else if (shift
!= 0) {
1086 env
->CF
= (x
>> (shift
- 1)) & 1;
1087 return (int32_t)x
>> shift
;
1092 uint32_t HELPER(ror_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
1096 shift
= shift1
& 0x1f;
1099 env
->CF
= (x
>> 31) & 1;
1102 env
->CF
= (x
>> (shift
- 1)) & 1;
1103 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));