2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
30 /* register banks for CPU modes */
40 static inline bool excp_is_internal(int excp
)
42 /* Return true if this exception number represents a QEMU-internal
43 * exception that will not be passed to the guest.
45 return excp
== EXCP_INTERRUPT
48 || excp
== EXCP_HALTED
49 || excp
== EXCP_EXCEPTION_EXIT
50 || excp
== EXCP_KERNEL_TRAP
51 || excp
== EXCP_SEMIHOST
;
54 /* Scale factor for generic timers, ie number of ns per tick.
55 * This gives a 62.5MHz timer.
57 #define GTIMER_SCALE 16
59 /* Bit definitions for the v7M CONTROL register */
60 FIELD(V7M_CONTROL
, NPRIV
, 0, 1)
61 FIELD(V7M_CONTROL
, SPSEL
, 1, 1)
62 FIELD(V7M_CONTROL
, FPCA
, 2, 1)
63 FIELD(V7M_CONTROL
, SFPA
, 3, 1)
65 /* Bit definitions for v7M exception return payload */
66 FIELD(V7M_EXCRET
, ES
, 0, 1)
67 FIELD(V7M_EXCRET
, RES0
, 1, 1)
68 FIELD(V7M_EXCRET
, SPSEL
, 2, 1)
69 FIELD(V7M_EXCRET
, MODE
, 3, 1)
70 FIELD(V7M_EXCRET
, FTYPE
, 4, 1)
71 FIELD(V7M_EXCRET
, DCRS
, 5, 1)
72 FIELD(V7M_EXCRET
, S
, 6, 1)
73 FIELD(V7M_EXCRET
, RES1
, 7, 25) /* including the must-be-1 prefix */
75 /* Minimum value which is a magic number for exception return */
76 #define EXC_RETURN_MIN_MAGIC 0xff000000
77 /* Minimum number which is a magic number for function or exception return
78 * when using v8M security extension
80 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
82 /* We use a few fake FSR values for internal purposes in M profile.
83 * M profile cores don't have A/R format FSRs, but currently our
84 * get_phys_addr() code assumes A/R profile and reports failures via
85 * an A/R format FSR value. We then translate that into the proper
86 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
87 * Mostly the FSR values we use for this are those defined for v7PMSA,
88 * since we share some of that codepath. A few kinds of fault are
89 * only for M profile and have no A/R equivalent, though, so we have
90 * to pick a value from the reserved range (which we never otherwise
91 * generate) to use for these.
92 * These values will never be visible to the guest.
94 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
95 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
98 * raise_exception: Raise the specified exception.
99 * Raise a guest exception with the specified value, syndrome register
100 * and target exception level. This should be called from helper functions,
101 * and never returns because we will longjump back up to the CPU main loop.
103 void QEMU_NORETURN
raise_exception(CPUARMState
*env
, uint32_t excp
,
104 uint32_t syndrome
, uint32_t target_el
);
107 * Similarly, but also use unwinding to restore cpu state.
109 void QEMU_NORETURN
raise_exception_ra(CPUARMState
*env
, uint32_t excp
,
110 uint32_t syndrome
, uint32_t target_el
,
114 * For AArch64, map a given EL to an index in the banked_spsr array.
115 * Note that this mapping and the AArch32 mapping defined in bank_number()
116 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
117 * mandated mapping between each other.
119 static inline unsigned int aarch64_banked_spsr_index(unsigned int el
)
121 static const unsigned int map
[4] = {
122 [1] = BANK_SVC
, /* EL1. */
123 [2] = BANK_HYP
, /* EL2. */
124 [3] = BANK_MON
, /* EL3. */
126 assert(el
>= 1 && el
<= 3);
130 /* Map CPU modes onto saved register banks. */
131 static inline int bank_number(int mode
)
134 case ARM_CPU_MODE_USR
:
135 case ARM_CPU_MODE_SYS
:
137 case ARM_CPU_MODE_SVC
:
139 case ARM_CPU_MODE_ABT
:
141 case ARM_CPU_MODE_UND
:
143 case ARM_CPU_MODE_IRQ
:
145 case ARM_CPU_MODE_FIQ
:
147 case ARM_CPU_MODE_HYP
:
149 case ARM_CPU_MODE_MON
:
152 g_assert_not_reached();
156 * r14_bank_number: Map CPU mode onto register bank for r14
158 * Given an AArch32 CPU mode, return the index into the saved register
159 * banks to use for the R14 (LR) in that mode. This is the same as
160 * bank_number(), except for the special case of Hyp mode, where
161 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
162 * This should be used as the index into env->banked_r14[], and
163 * bank_number() used for the index into env->banked_r13[] and
164 * env->banked_spsr[].
166 static inline int r14_bank_number(int mode
)
168 return (mode
== ARM_CPU_MODE_HYP
) ? BANK_USRSYS
: bank_number(mode
);
171 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
);
172 void arm_translate_init(void);
174 enum arm_fprounding
{
183 int arm_rmode_to_sf(int rmode
);
185 static inline void aarch64_save_sp(CPUARMState
*env
, int el
)
187 if (env
->pstate
& PSTATE_SP
) {
188 env
->sp_el
[el
] = env
->xregs
[31];
190 env
->sp_el
[0] = env
->xregs
[31];
194 static inline void aarch64_restore_sp(CPUARMState
*env
, int el
)
196 if (env
->pstate
& PSTATE_SP
) {
197 env
->xregs
[31] = env
->sp_el
[el
];
199 env
->xregs
[31] = env
->sp_el
[0];
203 static inline void update_spsel(CPUARMState
*env
, uint32_t imm
)
205 unsigned int cur_el
= arm_current_el(env
);
206 /* Update PSTATE SPSel bit; this requires us to update the
207 * working stack pointer in xregs[31].
209 if (!((imm
^ env
->pstate
) & PSTATE_SP
)) {
212 aarch64_save_sp(env
, cur_el
);
213 env
->pstate
= deposit32(env
->pstate
, 0, 1, imm
);
215 /* We rely on illegal updates to SPsel from EL0 to get trapped
216 * at translation time.
218 assert(cur_el
>= 1 && cur_el
<= 3);
219 aarch64_restore_sp(env
, cur_el
);
226 * Returns the implementation defined bit-width of physical addresses.
227 * The ARMv8 reference manuals refer to this as PAMax().
229 static inline unsigned int arm_pamax(ARMCPU
*cpu
)
231 static const unsigned int pamax_map
[] = {
239 unsigned int parange
=
240 FIELD_EX64(cpu
->isar
.id_aa64mmfr0
, ID_AA64MMFR0
, PARANGE
);
242 /* id_aa64mmfr0 is a read-only register so values outside of the
243 * supported mappings can be considered an implementation error. */
244 assert(parange
< ARRAY_SIZE(pamax_map
));
245 return pamax_map
[parange
];
248 /* Return true if extended addresses are enabled.
249 * This is always the case if our translation regime is 64 bit,
250 * but depends on TTBCR.EAE for 32 bit.
252 static inline bool extended_addresses_enabled(CPUARMState
*env
)
254 TCR
*tcr
= &env
->cp15
.tcr_el
[arm_is_secure(env
) ? 3 : 1];
255 return arm_el_is_aa64(env
, 1) ||
256 (arm_feature(env
, ARM_FEATURE_LPAE
) && (tcr
->raw_tcr
& TTBCR_EAE
));
259 /* Valid Syndrome Register EC field values */
260 enum arm_exception_class
{
261 EC_UNCATEGORIZED
= 0x00,
263 EC_CP15RTTRAP
= 0x03,
264 EC_CP15RRTTRAP
= 0x04,
265 EC_CP14RTTRAP
= 0x05,
266 EC_CP14DTTRAP
= 0x06,
267 EC_ADVSIMDFPACCESSTRAP
= 0x07,
270 EC_CP14RRTTRAP
= 0x0c,
272 EC_ILLEGALSTATE
= 0x0e,
279 EC_SYSTEMREGISTERTRAP
= 0x18,
280 EC_SVEACCESSTRAP
= 0x19,
282 EC_INSNABORT_SAME_EL
= 0x21,
283 EC_PCALIGNMENT
= 0x22,
285 EC_DATAABORT_SAME_EL
= 0x25,
286 EC_SPALIGNMENT
= 0x26,
287 EC_AA32_FPTRAP
= 0x28,
288 EC_AA64_FPTRAP
= 0x2c,
290 EC_BREAKPOINT
= 0x30,
291 EC_BREAKPOINT_SAME_EL
= 0x31,
292 EC_SOFTWARESTEP
= 0x32,
293 EC_SOFTWARESTEP_SAME_EL
= 0x33,
294 EC_WATCHPOINT
= 0x34,
295 EC_WATCHPOINT_SAME_EL
= 0x35,
297 EC_VECTORCATCH
= 0x3a,
301 #define ARM_EL_EC_SHIFT 26
302 #define ARM_EL_IL_SHIFT 25
303 #define ARM_EL_ISV_SHIFT 24
304 #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
305 #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
307 static inline uint32_t syn_get_ec(uint32_t syn
)
309 return syn
>> ARM_EL_EC_SHIFT
;
312 /* Utility functions for constructing various kinds of syndrome value.
313 * Note that in general we follow the AArch64 syndrome values; in a
314 * few cases the value in HSR for exceptions taken to AArch32 Hyp
315 * mode differs slightly, and we fix this up when populating HSR in
316 * arm_cpu_do_interrupt_aarch32_hyp().
317 * The exception is FP/SIMD access traps -- these report extra information
318 * when taking an exception to AArch32. For those we include the extra coproc
319 * and TA fields, and mask them out when taking the exception to AArch64.
321 static inline uint32_t syn_uncategorized(void)
323 return (EC_UNCATEGORIZED
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
;
326 static inline uint32_t syn_aa64_svc(uint32_t imm16
)
328 return (EC_AA64_SVC
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
| (imm16
& 0xffff);
331 static inline uint32_t syn_aa64_hvc(uint32_t imm16
)
333 return (EC_AA64_HVC
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
| (imm16
& 0xffff);
336 static inline uint32_t syn_aa64_smc(uint32_t imm16
)
338 return (EC_AA64_SMC
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
| (imm16
& 0xffff);
341 static inline uint32_t syn_aa32_svc(uint32_t imm16
, bool is_16bit
)
343 return (EC_AA32_SVC
<< ARM_EL_EC_SHIFT
) | (imm16
& 0xffff)
344 | (is_16bit
? 0 : ARM_EL_IL
);
347 static inline uint32_t syn_aa32_hvc(uint32_t imm16
)
349 return (EC_AA32_HVC
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
| (imm16
& 0xffff);
352 static inline uint32_t syn_aa32_smc(void)
354 return (EC_AA32_SMC
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
;
357 static inline uint32_t syn_aa64_bkpt(uint32_t imm16
)
359 return (EC_AA64_BKPT
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
| (imm16
& 0xffff);
362 static inline uint32_t syn_aa32_bkpt(uint32_t imm16
, bool is_16bit
)
364 return (EC_AA32_BKPT
<< ARM_EL_EC_SHIFT
) | (imm16
& 0xffff)
365 | (is_16bit
? 0 : ARM_EL_IL
);
368 static inline uint32_t syn_aa64_sysregtrap(int op0
, int op1
, int op2
,
369 int crn
, int crm
, int rt
,
372 return (EC_SYSTEMREGISTERTRAP
<< ARM_EL_EC_SHIFT
) | ARM_EL_IL
373 | (op0
<< 20) | (op2
<< 17) | (op1
<< 14) | (crn
<< 10) | (rt
<< 5)
374 | (crm
<< 1) | isread
;
377 static inline uint32_t syn_cp14_rt_trap(int cv
, int cond
, int opc1
, int opc2
,
378 int crn
, int crm
, int rt
, int isread
,
381 return (EC_CP14RTTRAP
<< ARM_EL_EC_SHIFT
)
382 | (is_16bit
? 0 : ARM_EL_IL
)
383 | (cv
<< 24) | (cond
<< 20) | (opc2
<< 17) | (opc1
<< 14)
384 | (crn
<< 10) | (rt
<< 5) | (crm
<< 1) | isread
;
387 static inline uint32_t syn_cp15_rt_trap(int cv
, int cond
, int opc1
, int opc2
,
388 int crn
, int crm
, int rt
, int isread
,
391 return (EC_CP15RTTRAP
<< ARM_EL_EC_SHIFT
)
392 | (is_16bit
? 0 : ARM_EL_IL
)
393 | (cv
<< 24) | (cond
<< 20) | (opc2
<< 17) | (opc1
<< 14)
394 | (crn
<< 10) | (rt
<< 5) | (crm
<< 1) | isread
;
397 static inline uint32_t syn_cp14_rrt_trap(int cv
, int cond
, int opc1
, int crm
,
398 int rt
, int rt2
, int isread
,
401 return (EC_CP14RRTTRAP
<< ARM_EL_EC_SHIFT
)
402 | (is_16bit
? 0 : ARM_EL_IL
)
403 | (cv
<< 24) | (cond
<< 20) | (opc1
<< 16)
404 | (rt2
<< 10) | (rt
<< 5) | (crm
<< 1) | isread
;
407 static inline uint32_t syn_cp15_rrt_trap(int cv
, int cond
, int opc1
, int crm
,
408 int rt
, int rt2
, int isread
,
411 return (EC_CP15RRTTRAP
<< ARM_EL_EC_SHIFT
)
412 | (is_16bit
? 0 : ARM_EL_IL
)
413 | (cv
<< 24) | (cond
<< 20) | (opc1
<< 16)
414 | (rt2
<< 10) | (rt
<< 5) | (crm
<< 1) | isread
;
417 static inline uint32_t syn_fp_access_trap(int cv
, int cond
, bool is_16bit
)
419 /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
420 return (EC_ADVSIMDFPACCESSTRAP
<< ARM_EL_EC_SHIFT
)
421 | (is_16bit
? 0 : ARM_EL_IL
)
422 | (cv
<< 24) | (cond
<< 20) | 0xa;
425 static inline uint32_t syn_simd_access_trap(int cv
, int cond
, bool is_16bit
)
427 /* AArch32 SIMD trap: TA == 1 coproc == 0 */
428 return (EC_ADVSIMDFPACCESSTRAP
<< ARM_EL_EC_SHIFT
)
429 | (is_16bit
? 0 : ARM_EL_IL
)
430 | (cv
<< 24) | (cond
<< 20) | (1 << 5);
433 static inline uint32_t syn_sve_access_trap(void)
435 return EC_SVEACCESSTRAP
<< ARM_EL_EC_SHIFT
;
438 static inline uint32_t syn_pactrap(void)
440 return EC_PACTRAP
<< ARM_EL_EC_SHIFT
;
443 static inline uint32_t syn_btitrap(int btype
)
445 return (EC_BTITRAP
<< ARM_EL_EC_SHIFT
) | btype
;
448 static inline uint32_t syn_insn_abort(int same_el
, int ea
, int s1ptw
, int fsc
)
450 return (EC_INSNABORT
<< ARM_EL_EC_SHIFT
) | (same_el
<< ARM_EL_EC_SHIFT
)
451 | ARM_EL_IL
| (ea
<< 9) | (s1ptw
<< 7) | fsc
;
454 static inline uint32_t syn_data_abort_no_iss(int same_el
, int fnv
,
455 int ea
, int cm
, int s1ptw
,
458 return (EC_DATAABORT
<< ARM_EL_EC_SHIFT
) | (same_el
<< ARM_EL_EC_SHIFT
)
460 | (fnv
<< 10) | (ea
<< 9) | (cm
<< 8) | (s1ptw
<< 7)
464 static inline uint32_t syn_data_abort_with_iss(int same_el
,
465 int sas
, int sse
, int srt
,
467 int ea
, int cm
, int s1ptw
,
471 return (EC_DATAABORT
<< ARM_EL_EC_SHIFT
) | (same_el
<< ARM_EL_EC_SHIFT
)
472 | (is_16bit
? 0 : ARM_EL_IL
)
473 | ARM_EL_ISV
| (sas
<< 22) | (sse
<< 21) | (srt
<< 16)
474 | (sf
<< 15) | (ar
<< 14)
475 | (ea
<< 9) | (cm
<< 8) | (s1ptw
<< 7) | (wnr
<< 6) | fsc
;
478 static inline uint32_t syn_swstep(int same_el
, int isv
, int ex
)
480 return (EC_SOFTWARESTEP
<< ARM_EL_EC_SHIFT
) | (same_el
<< ARM_EL_EC_SHIFT
)
481 | ARM_EL_IL
| (isv
<< 24) | (ex
<< 6) | 0x22;
484 static inline uint32_t syn_watchpoint(int same_el
, int cm
, int wnr
)
486 return (EC_WATCHPOINT
<< ARM_EL_EC_SHIFT
) | (same_el
<< ARM_EL_EC_SHIFT
)
487 | ARM_EL_IL
| (cm
<< 8) | (wnr
<< 6) | 0x22;
490 static inline uint32_t syn_breakpoint(int same_el
)
492 return (EC_BREAKPOINT
<< ARM_EL_EC_SHIFT
) | (same_el
<< ARM_EL_EC_SHIFT
)
496 static inline uint32_t syn_wfx(int cv
, int cond
, int ti
, bool is_16bit
)
498 return (EC_WFX_TRAP
<< ARM_EL_EC_SHIFT
) |
499 (is_16bit
? 0 : (1 << ARM_EL_IL_SHIFT
)) |
500 (cv
<< 24) | (cond
<< 20) | ti
;
503 /* Update a QEMU watchpoint based on the information the guest has set in the
504 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
506 void hw_watchpoint_update(ARMCPU
*cpu
, int n
);
507 /* Update the QEMU watchpoints for every guest watchpoint. This does a
508 * complete delete-and-reinstate of the QEMU watchpoint list and so is
509 * suitable for use after migration or on reset.
511 void hw_watchpoint_update_all(ARMCPU
*cpu
);
512 /* Update a QEMU breakpoint based on the information the guest has set in the
513 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
515 void hw_breakpoint_update(ARMCPU
*cpu
, int n
);
516 /* Update the QEMU breakpoints for every guest breakpoint. This does a
517 * complete delete-and-reinstate of the QEMU breakpoint list and so is
518 * suitable for use after migration or on reset.
520 void hw_breakpoint_update_all(ARMCPU
*cpu
);
522 /* Callback function for checking if a watchpoint should trigger. */
523 bool arm_debug_check_watchpoint(CPUState
*cs
, CPUWatchpoint
*wp
);
525 /* Adjust addresses (in BE32 mode) before testing against watchpoint
528 vaddr
arm_adjust_watchpoint_address(CPUState
*cs
, vaddr addr
, int len
);
530 /* Callback function for when a watchpoint or breakpoint triggers. */
531 void arm_debug_excp_handler(CPUState
*cs
);
533 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
534 static inline bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
)
538 static inline void arm_handle_psci_call(ARMCPU
*cpu
)
540 g_assert_not_reached();
543 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
544 bool arm_is_psci_call(ARMCPU
*cpu
, int excp_type
);
545 /* Actually handle a PSCI call */
546 void arm_handle_psci_call(ARMCPU
*cpu
);
550 * arm_clear_exclusive: clear the exclusive monitor
552 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
554 static inline void arm_clear_exclusive(CPUARMState
*env
)
556 env
->exclusive_addr
= -1;
560 * ARMFaultType: type of an ARM MMU fault
561 * This corresponds to the v8A pseudocode's Fault enumeration,
562 * with extensions for QEMU internal conditions.
564 typedef enum ARMFaultType
{
571 ARMFault_Translation
,
572 ARMFault_AddressSize
,
573 ARMFault_SyncExternal
,
574 ARMFault_SyncExternalOnWalk
,
576 ARMFault_SyncParityOnWalk
,
577 ARMFault_AsyncParity
,
578 ARMFault_AsyncExternal
,
580 ARMFault_TLBConflict
,
583 ARMFault_ICacheMaint
,
584 ARMFault_QEMU_NSCExec
, /* v8M: NS executing in S&NSC memory */
585 ARMFault_QEMU_SFault
, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
589 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
590 * @type: Type of fault
591 * @level: Table walk level (for translation, access flag and permission faults)
592 * @domain: Domain of the fault address (for non-LPAE CPUs only)
593 * @s2addr: Address that caused a fault at stage 2
594 * @stage2: True if we faulted at stage 2
595 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
596 * @s1ns: True if we faulted on a non-secure IPA while in secure state
597 * @ea: True if we should set the EA (external abort type) bit in syndrome
599 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo
;
600 struct ARMMMUFaultInfo
{
612 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
613 * Compare pseudocode EncodeSDFSC(), though unlike that function
614 * we set up a whole FSR-format code including domain field and
615 * putting the high bit of the FSC into bit 10.
617 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo
*fi
)
624 case ARMFault_AccessFlag
:
625 fsc
= fi
->level
== 1 ? 0x3 : 0x6;
627 case ARMFault_Alignment
:
630 case ARMFault_Permission
:
631 fsc
= fi
->level
== 1 ? 0xd : 0xf;
633 case ARMFault_Domain
:
634 fsc
= fi
->level
== 1 ? 0x9 : 0xb;
636 case ARMFault_Translation
:
637 fsc
= fi
->level
== 1 ? 0x5 : 0x7;
639 case ARMFault_SyncExternal
:
640 fsc
= 0x8 | (fi
->ea
<< 12);
642 case ARMFault_SyncExternalOnWalk
:
643 fsc
= fi
->level
== 1 ? 0xc : 0xe;
644 fsc
|= (fi
->ea
<< 12);
646 case ARMFault_SyncParity
:
649 case ARMFault_SyncParityOnWalk
:
650 fsc
= fi
->level
== 1 ? 0x40c : 0x40e;
652 case ARMFault_AsyncParity
:
655 case ARMFault_AsyncExternal
:
656 fsc
= 0x406 | (fi
->ea
<< 12);
661 case ARMFault_TLBConflict
:
664 case ARMFault_Lockdown
:
667 case ARMFault_Exclusive
:
670 case ARMFault_ICacheMaint
:
673 case ARMFault_Background
:
676 case ARMFault_QEMU_NSCExec
:
677 fsc
= M_FAKE_FSR_NSC_EXEC
;
679 case ARMFault_QEMU_SFault
:
680 fsc
= M_FAKE_FSR_SFAULT
;
683 /* Other faults can't occur in a context that requires a
684 * short-format status code.
686 g_assert_not_reached();
689 fsc
|= (fi
->domain
<< 4);
694 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
695 * Compare pseudocode EncodeLDFSC(), though unlike that function
696 * we fill in also the LPAE bit 9 of a DFSR format.
698 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo
*fi
)
705 case ARMFault_AddressSize
:
708 case ARMFault_AccessFlag
:
709 fsc
= (fi
->level
& 3) | (0x2 << 2);
711 case ARMFault_Permission
:
712 fsc
= (fi
->level
& 3) | (0x3 << 2);
714 case ARMFault_Translation
:
715 fsc
= (fi
->level
& 3) | (0x1 << 2);
717 case ARMFault_SyncExternal
:
718 fsc
= 0x10 | (fi
->ea
<< 12);
720 case ARMFault_SyncExternalOnWalk
:
721 fsc
= (fi
->level
& 3) | (0x5 << 2) | (fi
->ea
<< 12);
723 case ARMFault_SyncParity
:
726 case ARMFault_SyncParityOnWalk
:
727 fsc
= (fi
->level
& 3) | (0x7 << 2);
729 case ARMFault_AsyncParity
:
732 case ARMFault_AsyncExternal
:
733 fsc
= 0x11 | (fi
->ea
<< 12);
735 case ARMFault_Alignment
:
741 case ARMFault_TLBConflict
:
744 case ARMFault_Lockdown
:
747 case ARMFault_Exclusive
:
751 /* Other faults can't occur in a context that requires a
752 * long-format status code.
754 g_assert_not_reached();
761 static inline bool arm_extabort_type(MemTxResult result
)
763 /* The EA bit in syndromes and fault status registers is an
764 * IMPDEF classification of external aborts. ARM implementations
765 * usually use this to indicate AXI bus Decode error (0) or
766 * Slave error (1); in QEMU we follow that.
768 return result
!= MEMTX_DECODE_ERROR
;
771 bool arm_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
772 MMUAccessType access_type
, int mmu_idx
,
773 bool probe
, uintptr_t retaddr
);
775 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx
)
777 return mmu_idx
& ARM_MMU_IDX_COREIDX_MASK
;
780 static inline ARMMMUIdx
core_to_arm_mmu_idx(CPUARMState
*env
, int mmu_idx
)
782 if (arm_feature(env
, ARM_FEATURE_M
)) {
783 return mmu_idx
| ARM_MMU_IDX_M
;
785 return mmu_idx
| ARM_MMU_IDX_A
;
789 static inline ARMMMUIdx
core_to_aa64_mmu_idx(int mmu_idx
)
791 /* AArch64 is always a-profile. */
792 return mmu_idx
| ARM_MMU_IDX_A
;
795 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx
);
798 * Return the MMU index for a v7M CPU with all relevant information
799 * manually specified.
801 ARMMMUIdx
arm_v7m_mmu_idx_all(CPUARMState
*env
,
802 bool secstate
, bool priv
, bool negpri
);
805 * Return the MMU index for a v7M CPU in the specified security and
808 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
809 bool secstate
, bool priv
);
811 /* Return the MMU index for a v7M CPU in the specified security state */
812 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
);
814 /* Return true if the stage 1 translation regime is using LPAE format page
816 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
);
818 /* Raise a data fault alignment exception for the specified virtual address */
819 void arm_cpu_do_unaligned_access(CPUState
*cs
, vaddr vaddr
,
820 MMUAccessType access_type
,
821 int mmu_idx
, uintptr_t retaddr
);
823 /* arm_cpu_do_transaction_failed: handle a memory system error response
824 * (eg "no device/memory present at address") by raising an external abort
827 void arm_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
828 vaddr addr
, unsigned size
,
829 MMUAccessType access_type
,
830 int mmu_idx
, MemTxAttrs attrs
,
831 MemTxResult response
, uintptr_t retaddr
);
833 /* Call any registered EL change hooks */
834 static inline void arm_call_pre_el_change_hook(ARMCPU
*cpu
)
836 ARMELChangeHook
*hook
, *next
;
837 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
838 hook
->hook(cpu
, hook
->opaque
);
841 static inline void arm_call_el_change_hook(ARMCPU
*cpu
)
843 ARMELChangeHook
*hook
, *next
;
844 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
845 hook
->hook(cpu
, hook
->opaque
);
849 /* Return true if this address translation regime has two ranges. */
850 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx
)
853 case ARMMMUIdx_Stage1_E0
:
854 case ARMMMUIdx_Stage1_E1
:
855 case ARMMMUIdx_Stage1_E1_PAN
:
856 case ARMMMUIdx_Stage1_SE0
:
857 case ARMMMUIdx_Stage1_SE1
:
858 case ARMMMUIdx_Stage1_SE1_PAN
:
859 case ARMMMUIdx_E10_0
:
860 case ARMMMUIdx_E10_1
:
861 case ARMMMUIdx_E10_1_PAN
:
862 case ARMMMUIdx_E20_0
:
863 case ARMMMUIdx_E20_2
:
864 case ARMMMUIdx_E20_2_PAN
:
865 case ARMMMUIdx_SE10_0
:
866 case ARMMMUIdx_SE10_1
:
867 case ARMMMUIdx_SE10_1_PAN
:
868 case ARMMMUIdx_SE20_0
:
869 case ARMMMUIdx_SE20_2
:
870 case ARMMMUIdx_SE20_2_PAN
:
877 /* Return true if this address translation regime is secure */
878 static inline bool regime_is_secure(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
881 case ARMMMUIdx_E10_0
:
882 case ARMMMUIdx_E10_1
:
883 case ARMMMUIdx_E10_1_PAN
:
884 case ARMMMUIdx_E20_0
:
885 case ARMMMUIdx_E20_2
:
886 case ARMMMUIdx_E20_2_PAN
:
887 case ARMMMUIdx_Stage1_E0
:
888 case ARMMMUIdx_Stage1_E1
:
889 case ARMMMUIdx_Stage1_E1_PAN
:
891 case ARMMMUIdx_Stage2
:
892 case ARMMMUIdx_MPrivNegPri
:
893 case ARMMMUIdx_MUserNegPri
:
894 case ARMMMUIdx_MPriv
:
895 case ARMMMUIdx_MUser
:
898 case ARMMMUIdx_SE10_0
:
899 case ARMMMUIdx_SE10_1
:
900 case ARMMMUIdx_SE10_1_PAN
:
901 case ARMMMUIdx_SE20_0
:
902 case ARMMMUIdx_SE20_2
:
903 case ARMMMUIdx_SE20_2_PAN
:
904 case ARMMMUIdx_Stage1_SE0
:
905 case ARMMMUIdx_Stage1_SE1
:
906 case ARMMMUIdx_Stage1_SE1_PAN
:
908 case ARMMMUIdx_Stage2_S
:
909 case ARMMMUIdx_MSPrivNegPri
:
910 case ARMMMUIdx_MSUserNegPri
:
911 case ARMMMUIdx_MSPriv
:
912 case ARMMMUIdx_MSUser
:
915 g_assert_not_reached();
919 static inline bool regime_is_pan(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
922 case ARMMMUIdx_Stage1_E1_PAN
:
923 case ARMMMUIdx_Stage1_SE1_PAN
:
924 case ARMMMUIdx_E10_1_PAN
:
925 case ARMMMUIdx_E20_2_PAN
:
926 case ARMMMUIdx_SE10_1_PAN
:
927 case ARMMMUIdx_SE20_2_PAN
:
934 /* Return the exception level which controls this address translation regime */
935 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
938 case ARMMMUIdx_SE20_0
:
939 case ARMMMUIdx_SE20_2
:
940 case ARMMMUIdx_SE20_2_PAN
:
941 case ARMMMUIdx_E20_0
:
942 case ARMMMUIdx_E20_2
:
943 case ARMMMUIdx_E20_2_PAN
:
944 case ARMMMUIdx_Stage2
:
945 case ARMMMUIdx_Stage2_S
:
951 case ARMMMUIdx_SE10_0
:
952 case ARMMMUIdx_Stage1_SE0
:
953 return arm_el_is_aa64(env
, 3) ? 1 : 3;
954 case ARMMMUIdx_SE10_1
:
955 case ARMMMUIdx_SE10_1_PAN
:
956 case ARMMMUIdx_Stage1_E0
:
957 case ARMMMUIdx_Stage1_E1
:
958 case ARMMMUIdx_Stage1_E1_PAN
:
959 case ARMMMUIdx_Stage1_SE1
:
960 case ARMMMUIdx_Stage1_SE1_PAN
:
961 case ARMMMUIdx_E10_0
:
962 case ARMMMUIdx_E10_1
:
963 case ARMMMUIdx_E10_1_PAN
:
964 case ARMMMUIdx_MPrivNegPri
:
965 case ARMMMUIdx_MUserNegPri
:
966 case ARMMMUIdx_MPriv
:
967 case ARMMMUIdx_MUser
:
968 case ARMMMUIdx_MSPrivNegPri
:
969 case ARMMMUIdx_MSUserNegPri
:
970 case ARMMMUIdx_MSPriv
:
971 case ARMMMUIdx_MSUser
:
974 g_assert_not_reached();
978 /* Return the TCR controlling this translation regime */
979 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
981 if (mmu_idx
== ARMMMUIdx_Stage2
) {
982 return &env
->cp15
.vtcr_el2
;
984 if (mmu_idx
== ARMMMUIdx_Stage2_S
) {
986 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
987 * those are not currently used by QEMU, so just return VSTCR_EL2.
989 return &env
->cp15
.vstcr_el2
;
991 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
994 /* Return the FSR value for a debug exception (watchpoint, hardware
995 * breakpoint or BKPT insn) targeting the specified exception level.
997 static inline uint32_t arm_debug_exception_fsr(CPUARMState
*env
)
999 ARMMMUFaultInfo fi
= { .type
= ARMFault_Debug
};
1000 int target_el
= arm_debug_target_el(env
);
1001 bool using_lpae
= false;
1003 if (target_el
== 2 || arm_el_is_aa64(env
, target_el
)) {
1006 if (arm_feature(env
, ARM_FEATURE_LPAE
) &&
1007 (env
->cp15
.tcr_el
[target_el
].raw_tcr
& TTBCR_EAE
)) {
1013 return arm_fi_to_lfsc(&fi
);
1015 return arm_fi_to_sfsc(&fi
);
1020 * arm_num_brps: Return number of implemented breakpoints.
1021 * Note that the ID register BRPS field is "number of bps - 1",
1022 * and we return the actual number of breakpoints.
1024 static inline int arm_num_brps(ARMCPU
*cpu
)
1026 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1027 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
) + 1;
1029 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, BRPS
) + 1;
1034 * arm_num_wrps: Return number of implemented watchpoints.
1035 * Note that the ID register WRPS field is "number of wps - 1",
1036 * and we return the actual number of watchpoints.
1038 static inline int arm_num_wrps(ARMCPU
*cpu
)
1040 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1041 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
) + 1;
1043 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, WRPS
) + 1;
1048 * arm_num_ctx_cmps: Return number of implemented context comparators.
1049 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1050 * and we return the actual number of comparators.
1052 static inline int arm_num_ctx_cmps(ARMCPU
*cpu
)
1054 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1055 return FIELD_EX64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
) + 1;
1057 return FIELD_EX32(cpu
->isar
.dbgdidr
, DBGDIDR
, CTX_CMPS
) + 1;
1062 * v7m_using_psp: Return true if using process stack pointer
1063 * Return true if the CPU is currently using the process stack
1064 * pointer, or false if it is using the main stack pointer.
1066 static inline bool v7m_using_psp(CPUARMState
*env
)
1068 /* Handler mode always uses the main stack; for thread mode
1069 * the CONTROL.SPSEL bit determines the answer.
1070 * Note that in v7M it is not possible to be in Handler mode with
1071 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1073 return !arm_v7m_is_handler_mode(env
) &&
1074 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
1078 * v7m_sp_limit: Return SP limit for current CPU state
1079 * Return the SP limit value for the current CPU security state
1080 * and stack pointer.
1082 static inline uint32_t v7m_sp_limit(CPUARMState
*env
)
1084 if (v7m_using_psp(env
)) {
1085 return env
->v7m
.psplim
[env
->v7m
.secure
];
1087 return env
->v7m
.msplim
[env
->v7m
.secure
];
1093 * Return true if the v7M CPACR permits access to the FPU for the specified
1094 * security state and privilege level.
1096 static inline bool v7m_cpacr_pass(CPUARMState
*env
,
1097 bool is_secure
, bool is_priv
)
1099 switch (extract32(env
->v7m
.cpacr
[is_secure
], 20, 2)) {
1101 case 2: /* UNPREDICTABLE: we treat like 0 */
1108 g_assert_not_reached();
1113 * aarch32_mode_name(): Return name of the AArch32 CPU mode
1114 * @psr: Program Status Register indicating CPU mode
1116 * Returns, for debug logging purposes, a printable representation
1117 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1118 * the low bits of the specified PSR.
1120 static inline const char *aarch32_mode_name(uint32_t psr
)
1122 static const char cpu_mode_names
[16][4] = {
1123 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1124 "???", "???", "hyp", "und", "???", "???", "???", "sys"
1127 return cpu_mode_names
[psr
& 0xf];
1131 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1133 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1134 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1135 * Must be called with the iothread lock held.
1137 void arm_cpu_update_virq(ARMCPU
*cpu
);
1140 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1142 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1143 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1144 * Must be called with the iothread lock held.
1146 void arm_cpu_update_vfiq(ARMCPU
*cpu
);
1150 * @env: The cpu environment
1151 * @el: The EL to use.
1153 * Return the full ARMMMUIdx for the translation regime for EL.
1155 ARMMMUIdx
arm_mmu_idx_el(CPUARMState
*env
, int el
);
1159 * @env: The cpu environment
1161 * Return the full ARMMMUIdx for the current translation regime.
1163 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
);
1166 * arm_stage1_mmu_idx:
1167 * @env: The cpu environment
1169 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1171 #ifdef CONFIG_USER_ONLY
1172 static inline ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
1174 return ARMMMUIdx_Stage1_E0
;
1177 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
);
1181 * arm_mmu_idx_is_stage1_of_2:
1182 * @mmu_idx: The ARMMMUIdx to test
1184 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1185 * first stage of a two stage regime.
1187 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx
)
1190 case ARMMMUIdx_Stage1_E0
:
1191 case ARMMMUIdx_Stage1_E1
:
1192 case ARMMMUIdx_Stage1_E1_PAN
:
1193 case ARMMMUIdx_Stage1_SE0
:
1194 case ARMMMUIdx_Stage1_SE1
:
1195 case ARMMMUIdx_Stage1_SE1_PAN
:
1202 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features
,
1203 const ARMISARegisters
*id
)
1205 uint32_t valid
= CPSR_M
| CPSR_AIF
| CPSR_IL
| CPSR_NZCV
;
1207 if ((features
>> ARM_FEATURE_V4T
) & 1) {
1210 if ((features
>> ARM_FEATURE_V5
) & 1) {
1211 valid
|= CPSR_Q
; /* V5TE in reality*/
1213 if ((features
>> ARM_FEATURE_V6
) & 1) {
1214 valid
|= CPSR_E
| CPSR_GE
;
1216 if ((features
>> ARM_FEATURE_THUMB2
) & 1) {
1219 if (isar_feature_aa32_jazelle(id
)) {
1222 if (isar_feature_aa32_pan(id
)) {
1229 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters
*id
)
1233 valid
= PSTATE_M
| PSTATE_DAIF
| PSTATE_IL
| PSTATE_SS
| PSTATE_NZCV
;
1234 if (isar_feature_aa64_bti(id
)) {
1235 valid
|= PSTATE_BTYPE
;
1237 if (isar_feature_aa64_pan(id
)) {
1238 valid
|= PSTATE_PAN
;
1240 if (isar_feature_aa64_uao(id
)) {
1241 valid
|= PSTATE_UAO
;
1243 if (isar_feature_aa64_mte(id
)) {
1244 valid
|= PSTATE_TCO
;
1251 * Parameters of a given virtual address, as extracted from the
1252 * translation control register (TCR) for a given regime.
1254 typedef struct ARMVAParameters
{
1256 unsigned select
: 1;
1264 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
1265 ARMMMUIdx mmu_idx
, bool data
);
1267 static inline int exception_target_el(CPUARMState
*env
)
1269 int target_el
= MAX(1, arm_current_el(env
));
1272 * No such thing as secure EL1 if EL3 is aarch32,
1273 * so update the target EL to EL3 in this case.
1275 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3) && target_el
== 1) {
1282 /* Determine if allocation tags are available. */
1283 static inline bool allocation_tag_access_enabled(CPUARMState
*env
, int el
,
1287 && arm_feature(env
, ARM_FEATURE_EL3
)
1288 && !(env
->cp15
.scr_el3
& SCR_ATA
)) {
1291 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
1292 uint64_t hcr
= arm_hcr_el2_eff(env
);
1293 if (!(hcr
& HCR_ATA
) && (!(hcr
& HCR_E2H
) || !(hcr
& HCR_TGE
))) {
1297 sctlr
&= (el
== 0 ? SCTLR_ATA0
: SCTLR_ATA
);
1301 #ifndef CONFIG_USER_ONLY
1303 /* Security attributes for an address, as returned by v8m_security_lookup. */
1304 typedef struct V8M_SAttributes
{
1305 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
1314 void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
1315 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1316 V8M_SAttributes
*sattrs
);
1318 bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
1319 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1320 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
1321 int *prot
, bool *is_subpage
,
1322 ARMMMUFaultInfo
*fi
, uint32_t *mregion
);
1324 /* Cacheability and shareability attributes for a memory access */
1325 typedef struct ARMCacheAttrs
{
1326 unsigned int attrs
:8; /* as in the MAIR register encoding */
1327 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
1330 bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
1331 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
1332 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
1333 target_ulong
*page_size
,
1334 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
1335 __attribute__((nonnull
));
1337 void arm_log_exception(int idx
);
1339 #endif /* !CONFIG_USER_ONLY */
1342 * The log2 of the words in the tag block, for GMID_EL1.BS.
1343 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1345 #define GMID_EL1_BS 6
1347 /* We associate one allocation tag per 16 bytes, the minimum. */
1348 #define LOG2_TAG_GRANULE 4
1349 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1352 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1353 * the same simd_desc() encoding due to restrictions on size.
1354 * Use these instead.
1356 FIELD(PREDDESC
, OPRSZ
, 0, 6)
1357 FIELD(PREDDESC
, ESZ
, 6, 2)
1358 FIELD(PREDDESC
, DATA
, 8, 24)
1361 * The SVE simd_data field, for memory ops, contains either
1362 * rd (5 bits) or a shift count (2 bits).
1364 #define SVE_MTEDESC_SHIFT 5
1366 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1367 FIELD(MTEDESC
, MIDX
, 0, 4)
1368 FIELD(MTEDESC
, TBI
, 4, 2)
1369 FIELD(MTEDESC
, TCMA
, 6, 2)
1370 FIELD(MTEDESC
, WRITE
, 8, 1)
1371 FIELD(MTEDESC
, ESIZE
, 9, 5)
1372 FIELD(MTEDESC
, TSIZE
, 14, 10) /* mte_checkN only */
1374 bool mte_probe1(CPUARMState
*env
, uint32_t desc
, uint64_t ptr
);
1375 uint64_t mte_check1(CPUARMState
*env
, uint32_t desc
,
1376 uint64_t ptr
, uintptr_t ra
);
1377 uint64_t mte_checkN(CPUARMState
*env
, uint32_t desc
,
1378 uint64_t ptr
, uintptr_t ra
);
1380 static inline int allocation_tag_from_addr(uint64_t ptr
)
1382 return extract64(ptr
, 56, 4);
1385 static inline uint64_t address_with_allocation_tag(uint64_t ptr
, int rtag
)
1387 return deposit64(ptr
, 56, 4, rtag
);
1390 /* Return true if tbi bits mean that the access is checked. */
1391 static inline bool tbi_check(uint32_t desc
, int bit55
)
1393 return (desc
>> (R_MTEDESC_TBI_SHIFT
+ bit55
)) & 1;
1396 /* Return true if tcma bits mean that the access is unchecked. */
1397 static inline bool tcma_check(uint32_t desc
, int bit55
, int ptr_tag
)
1400 * We had extracted bit55 and ptr_tag for other reasons, so fold
1401 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1403 bool match
= ((ptr_tag
+ bit55
) & 0xf) == 0;
1404 bool tcma
= (desc
>> (R_MTEDESC_TCMA_SHIFT
+ bit55
)) & 1;
1405 return tcma
&& match
;
1409 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1410 * for the tag to be present in the FAR_ELx register. But for user-only
1411 * mode, we do not have a TLB with which to implement this, so we must
1412 * remove the top byte.
1414 static inline uint64_t useronly_clean_ptr(uint64_t ptr
)
1416 /* TBI is known to be enabled. */
1417 #ifdef CONFIG_USER_ONLY
1418 ptr
= sextract64(ptr
, 0, 56);
1423 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc
, uint64_t ptr
)
1425 #ifdef CONFIG_USER_ONLY
1426 int64_t clean_ptr
= sextract64(ptr
, 0, 56);
1427 if (tbi_check(desc
, clean_ptr
< 0)) {