qapi: Require descriptions and tagged sections to be indented
[qemu/kevin.git] / target / arm / internals.h
blob50bff4454945be7cb295248c14018d6e4a56d4e3
1 /*
2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
30 #include "syndrome.h"
31 #include "cpu-features.h"
33 /* register banks for CPU modes */
34 #define BANK_USRSYS 0
35 #define BANK_SVC 1
36 #define BANK_ABT 2
37 #define BANK_UND 3
38 #define BANK_IRQ 4
39 #define BANK_FIQ 5
40 #define BANK_HYP 6
41 #define BANK_MON 7
43 static inline int arm_env_mmu_index(CPUARMState *env)
45 return EX_TBFLAG_ANY(env->hflags, MMUIDX);
48 static inline bool excp_is_internal(int excp)
50 /* Return true if this exception number represents a QEMU-internal
51 * exception that will not be passed to the guest.
53 return excp == EXCP_INTERRUPT
54 || excp == EXCP_HLT
55 || excp == EXCP_DEBUG
56 || excp == EXCP_HALTED
57 || excp == EXCP_EXCEPTION_EXIT
58 || excp == EXCP_KERNEL_TRAP
59 || excp == EXCP_SEMIHOST;
62 /* Scale factor for generic timers, ie number of ns per tick.
63 * This gives a 62.5MHz timer.
65 #define GTIMER_SCALE 16
67 /* Bit definitions for the v7M CONTROL register */
68 FIELD(V7M_CONTROL, NPRIV, 0, 1)
69 FIELD(V7M_CONTROL, SPSEL, 1, 1)
70 FIELD(V7M_CONTROL, FPCA, 2, 1)
71 FIELD(V7M_CONTROL, SFPA, 3, 1)
73 /* Bit definitions for v7M exception return payload */
74 FIELD(V7M_EXCRET, ES, 0, 1)
75 FIELD(V7M_EXCRET, RES0, 1, 1)
76 FIELD(V7M_EXCRET, SPSEL, 2, 1)
77 FIELD(V7M_EXCRET, MODE, 3, 1)
78 FIELD(V7M_EXCRET, FTYPE, 4, 1)
79 FIELD(V7M_EXCRET, DCRS, 5, 1)
80 FIELD(V7M_EXCRET, S, 6, 1)
81 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
83 /* Minimum value which is a magic number for exception return */
84 #define EXC_RETURN_MIN_MAGIC 0xff000000
85 /* Minimum number which is a magic number for function or exception return
86 * when using v8M security extension
88 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
90 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
91 FIELD(DBGWCR, E, 0, 1)
92 FIELD(DBGWCR, PAC, 1, 2)
93 FIELD(DBGWCR, LSC, 3, 2)
94 FIELD(DBGWCR, BAS, 5, 8)
95 FIELD(DBGWCR, HMC, 13, 1)
96 FIELD(DBGWCR, SSC, 14, 2)
97 FIELD(DBGWCR, LBN, 16, 4)
98 FIELD(DBGWCR, WT, 20, 1)
99 FIELD(DBGWCR, MASK, 24, 5)
100 FIELD(DBGWCR, SSCE, 29, 1)
102 /* We use a few fake FSR values for internal purposes in M profile.
103 * M profile cores don't have A/R format FSRs, but currently our
104 * get_phys_addr() code assumes A/R profile and reports failures via
105 * an A/R format FSR value. We then translate that into the proper
106 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
107 * Mostly the FSR values we use for this are those defined for v7PMSA,
108 * since we share some of that codepath. A few kinds of fault are
109 * only for M profile and have no A/R equivalent, though, so we have
110 * to pick a value from the reserved range (which we never otherwise
111 * generate) to use for these.
112 * These values will never be visible to the guest.
114 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
115 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
118 * raise_exception: Raise the specified exception.
119 * Raise a guest exception with the specified value, syndrome register
120 * and target exception level. This should be called from helper functions,
121 * and never returns because we will longjump back up to the CPU main loop.
123 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el);
127 * Similarly, but also use unwinding to restore cpu state.
129 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
130 uint32_t syndrome, uint32_t target_el,
131 uintptr_t ra);
134 * For AArch64, map a given EL to an index in the banked_spsr array.
135 * Note that this mapping and the AArch32 mapping defined in bank_number()
136 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
137 * mandated mapping between each other.
139 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
141 static const unsigned int map[4] = {
142 [1] = BANK_SVC, /* EL1. */
143 [2] = BANK_HYP, /* EL2. */
144 [3] = BANK_MON, /* EL3. */
146 assert(el >= 1 && el <= 3);
147 return map[el];
150 /* Map CPU modes onto saved register banks. */
151 static inline int bank_number(int mode)
153 switch (mode) {
154 case ARM_CPU_MODE_USR:
155 case ARM_CPU_MODE_SYS:
156 return BANK_USRSYS;
157 case ARM_CPU_MODE_SVC:
158 return BANK_SVC;
159 case ARM_CPU_MODE_ABT:
160 return BANK_ABT;
161 case ARM_CPU_MODE_UND:
162 return BANK_UND;
163 case ARM_CPU_MODE_IRQ:
164 return BANK_IRQ;
165 case ARM_CPU_MODE_FIQ:
166 return BANK_FIQ;
167 case ARM_CPU_MODE_HYP:
168 return BANK_HYP;
169 case ARM_CPU_MODE_MON:
170 return BANK_MON;
172 g_assert_not_reached();
176 * r14_bank_number: Map CPU mode onto register bank for r14
178 * Given an AArch32 CPU mode, return the index into the saved register
179 * banks to use for the R14 (LR) in that mode. This is the same as
180 * bank_number(), except for the special case of Hyp mode, where
181 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
182 * This should be used as the index into env->banked_r14[], and
183 * bank_number() used for the index into env->banked_r13[] and
184 * env->banked_spsr[].
186 static inline int r14_bank_number(int mode)
188 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
191 void arm_cpu_register(const ARMCPUInfo *info);
192 void aarch64_cpu_register(const ARMCPUInfo *info);
194 void register_cp_regs_for_features(ARMCPU *cpu);
195 void init_cpreg_list(ARMCPU *cpu);
197 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
198 void arm_translate_init(void);
200 void arm_restore_state_to_opc(CPUState *cs,
201 const TranslationBlock *tb,
202 const uint64_t *data);
204 #ifdef CONFIG_TCG
205 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
206 #endif /* CONFIG_TCG */
208 typedef enum ARMFPRounding {
209 FPROUNDING_TIEEVEN,
210 FPROUNDING_POSINF,
211 FPROUNDING_NEGINF,
212 FPROUNDING_ZERO,
213 FPROUNDING_TIEAWAY,
214 FPROUNDING_ODD
215 } ARMFPRounding;
217 extern const FloatRoundMode arm_rmode_to_sf_map[6];
219 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
221 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
222 return arm_rmode_to_sf_map[rmode];
225 static inline void aarch64_save_sp(CPUARMState *env, int el)
227 if (env->pstate & PSTATE_SP) {
228 env->sp_el[el] = env->xregs[31];
229 } else {
230 env->sp_el[0] = env->xregs[31];
234 static inline void aarch64_restore_sp(CPUARMState *env, int el)
236 if (env->pstate & PSTATE_SP) {
237 env->xregs[31] = env->sp_el[el];
238 } else {
239 env->xregs[31] = env->sp_el[0];
243 static inline void update_spsel(CPUARMState *env, uint32_t imm)
245 unsigned int cur_el = arm_current_el(env);
246 /* Update PSTATE SPSel bit; this requires us to update the
247 * working stack pointer in xregs[31].
249 if (!((imm ^ env->pstate) & PSTATE_SP)) {
250 return;
252 aarch64_save_sp(env, cur_el);
253 env->pstate = deposit32(env->pstate, 0, 1, imm);
255 /* We rely on illegal updates to SPsel from EL0 to get trapped
256 * at translation time.
258 assert(cur_el >= 1 && cur_el <= 3);
259 aarch64_restore_sp(env, cur_el);
263 * arm_pamax
264 * @cpu: ARMCPU
266 * Returns the implementation defined bit-width of physical addresses.
267 * The ARMv8 reference manuals refer to this as PAMax().
269 unsigned int arm_pamax(ARMCPU *cpu);
271 /* Return true if extended addresses are enabled.
272 * This is always the case if our translation regime is 64 bit,
273 * but depends on TTBCR.EAE for 32 bit.
275 static inline bool extended_addresses_enabled(CPUARMState *env)
277 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
278 if (arm_feature(env, ARM_FEATURE_PMSA) &&
279 arm_feature(env, ARM_FEATURE_V8)) {
280 return true;
282 return arm_el_is_aa64(env, 1) ||
283 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
286 /* Update a QEMU watchpoint based on the information the guest has set in the
287 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
289 void hw_watchpoint_update(ARMCPU *cpu, int n);
290 /* Update the QEMU watchpoints for every guest watchpoint. This does a
291 * complete delete-and-reinstate of the QEMU watchpoint list and so is
292 * suitable for use after migration or on reset.
294 void hw_watchpoint_update_all(ARMCPU *cpu);
295 /* Update a QEMU breakpoint based on the information the guest has set in the
296 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
298 void hw_breakpoint_update(ARMCPU *cpu, int n);
299 /* Update the QEMU breakpoints for every guest breakpoint. This does a
300 * complete delete-and-reinstate of the QEMU breakpoint list and so is
301 * suitable for use after migration or on reset.
303 void hw_breakpoint_update_all(ARMCPU *cpu);
305 /* Callback function for checking if a breakpoint should trigger. */
306 bool arm_debug_check_breakpoint(CPUState *cs);
308 /* Callback function for checking if a watchpoint should trigger. */
309 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
311 /* Adjust addresses (in BE32 mode) before testing against watchpoint
312 * addresses.
314 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
316 /* Callback function for when a watchpoint or breakpoint triggers. */
317 void arm_debug_excp_handler(CPUState *cs);
319 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
320 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
322 return false;
324 static inline void arm_handle_psci_call(ARMCPU *cpu)
326 g_assert_not_reached();
328 #else
329 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
330 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
331 /* Actually handle a PSCI call */
332 void arm_handle_psci_call(ARMCPU *cpu);
333 #endif
336 * arm_clear_exclusive: clear the exclusive monitor
337 * @env: CPU env
338 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
340 static inline void arm_clear_exclusive(CPUARMState *env)
342 env->exclusive_addr = -1;
346 * ARMFaultType: type of an ARM MMU fault
347 * This corresponds to the v8A pseudocode's Fault enumeration,
348 * with extensions for QEMU internal conditions.
350 typedef enum ARMFaultType {
351 ARMFault_None,
352 ARMFault_AccessFlag,
353 ARMFault_Alignment,
354 ARMFault_Background,
355 ARMFault_Domain,
356 ARMFault_Permission,
357 ARMFault_Translation,
358 ARMFault_AddressSize,
359 ARMFault_SyncExternal,
360 ARMFault_SyncExternalOnWalk,
361 ARMFault_SyncParity,
362 ARMFault_SyncParityOnWalk,
363 ARMFault_AsyncParity,
364 ARMFault_AsyncExternal,
365 ARMFault_Debug,
366 ARMFault_TLBConflict,
367 ARMFault_UnsuppAtomicUpdate,
368 ARMFault_Lockdown,
369 ARMFault_Exclusive,
370 ARMFault_ICacheMaint,
371 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
372 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
373 ARMFault_GPCFOnWalk,
374 ARMFault_GPCFOnOutput,
375 } ARMFaultType;
377 typedef enum ARMGPCF {
378 GPCF_None,
379 GPCF_AddressSize,
380 GPCF_Walk,
381 GPCF_EABT,
382 GPCF_Fail,
383 } ARMGPCF;
386 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
387 * @type: Type of fault
388 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
389 * @level: Table walk level (for translation, access flag and permission faults)
390 * @domain: Domain of the fault address (for non-LPAE CPUs only)
391 * @s2addr: Address that caused a fault at stage 2
392 * @paddr: physical address that caused a fault for gpc
393 * @paddr_space: physical address space that caused a fault for gpc
394 * @stage2: True if we faulted at stage 2
395 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
396 * @s1ns: True if we faulted on a non-secure IPA while in secure state
397 * @ea: True if we should set the EA (external abort type) bit in syndrome
399 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
400 struct ARMMMUFaultInfo {
401 ARMFaultType type;
402 ARMGPCF gpcf;
403 target_ulong s2addr;
404 target_ulong paddr;
405 ARMSecuritySpace paddr_space;
406 int level;
407 int domain;
408 bool stage2;
409 bool s1ptw;
410 bool s1ns;
411 bool ea;
415 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
416 * Compare pseudocode EncodeSDFSC(), though unlike that function
417 * we set up a whole FSR-format code including domain field and
418 * putting the high bit of the FSC into bit 10.
420 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
422 uint32_t fsc;
424 switch (fi->type) {
425 case ARMFault_None:
426 return 0;
427 case ARMFault_AccessFlag:
428 fsc = fi->level == 1 ? 0x3 : 0x6;
429 break;
430 case ARMFault_Alignment:
431 fsc = 0x1;
432 break;
433 case ARMFault_Permission:
434 fsc = fi->level == 1 ? 0xd : 0xf;
435 break;
436 case ARMFault_Domain:
437 fsc = fi->level == 1 ? 0x9 : 0xb;
438 break;
439 case ARMFault_Translation:
440 fsc = fi->level == 1 ? 0x5 : 0x7;
441 break;
442 case ARMFault_SyncExternal:
443 fsc = 0x8 | (fi->ea << 12);
444 break;
445 case ARMFault_SyncExternalOnWalk:
446 fsc = fi->level == 1 ? 0xc : 0xe;
447 fsc |= (fi->ea << 12);
448 break;
449 case ARMFault_SyncParity:
450 fsc = 0x409;
451 break;
452 case ARMFault_SyncParityOnWalk:
453 fsc = fi->level == 1 ? 0x40c : 0x40e;
454 break;
455 case ARMFault_AsyncParity:
456 fsc = 0x408;
457 break;
458 case ARMFault_AsyncExternal:
459 fsc = 0x406 | (fi->ea << 12);
460 break;
461 case ARMFault_Debug:
462 fsc = 0x2;
463 break;
464 case ARMFault_TLBConflict:
465 fsc = 0x400;
466 break;
467 case ARMFault_Lockdown:
468 fsc = 0x404;
469 break;
470 case ARMFault_Exclusive:
471 fsc = 0x405;
472 break;
473 case ARMFault_ICacheMaint:
474 fsc = 0x4;
475 break;
476 case ARMFault_Background:
477 fsc = 0x0;
478 break;
479 case ARMFault_QEMU_NSCExec:
480 fsc = M_FAKE_FSR_NSC_EXEC;
481 break;
482 case ARMFault_QEMU_SFault:
483 fsc = M_FAKE_FSR_SFAULT;
484 break;
485 default:
486 /* Other faults can't occur in a context that requires a
487 * short-format status code.
489 g_assert_not_reached();
492 fsc |= (fi->domain << 4);
493 return fsc;
497 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
498 * Compare pseudocode EncodeLDFSC(), though unlike that function
499 * we fill in also the LPAE bit 9 of a DFSR format.
501 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
503 uint32_t fsc;
505 switch (fi->type) {
506 case ARMFault_None:
507 return 0;
508 case ARMFault_AddressSize:
509 assert(fi->level >= -1 && fi->level <= 3);
510 if (fi->level < 0) {
511 fsc = 0b101001;
512 } else {
513 fsc = fi->level;
515 break;
516 case ARMFault_AccessFlag:
517 assert(fi->level >= 0 && fi->level <= 3);
518 fsc = 0b001000 | fi->level;
519 break;
520 case ARMFault_Permission:
521 assert(fi->level >= 0 && fi->level <= 3);
522 fsc = 0b001100 | fi->level;
523 break;
524 case ARMFault_Translation:
525 assert(fi->level >= -1 && fi->level <= 3);
526 if (fi->level < 0) {
527 fsc = 0b101011;
528 } else {
529 fsc = 0b000100 | fi->level;
531 break;
532 case ARMFault_SyncExternal:
533 fsc = 0x10 | (fi->ea << 12);
534 break;
535 case ARMFault_SyncExternalOnWalk:
536 assert(fi->level >= -1 && fi->level <= 3);
537 if (fi->level < 0) {
538 fsc = 0b010011;
539 } else {
540 fsc = 0b010100 | fi->level;
542 fsc |= fi->ea << 12;
543 break;
544 case ARMFault_SyncParity:
545 fsc = 0x18;
546 break;
547 case ARMFault_SyncParityOnWalk:
548 assert(fi->level >= -1 && fi->level <= 3);
549 if (fi->level < 0) {
550 fsc = 0b011011;
551 } else {
552 fsc = 0b011100 | fi->level;
554 break;
555 case ARMFault_AsyncParity:
556 fsc = 0x19;
557 break;
558 case ARMFault_AsyncExternal:
559 fsc = 0x11 | (fi->ea << 12);
560 break;
561 case ARMFault_Alignment:
562 fsc = 0x21;
563 break;
564 case ARMFault_Debug:
565 fsc = 0x22;
566 break;
567 case ARMFault_TLBConflict:
568 fsc = 0x30;
569 break;
570 case ARMFault_UnsuppAtomicUpdate:
571 fsc = 0x31;
572 break;
573 case ARMFault_Lockdown:
574 fsc = 0x34;
575 break;
576 case ARMFault_Exclusive:
577 fsc = 0x35;
578 break;
579 case ARMFault_GPCFOnWalk:
580 assert(fi->level >= -1 && fi->level <= 3);
581 if (fi->level < 0) {
582 fsc = 0b100011;
583 } else {
584 fsc = 0b100100 | fi->level;
586 break;
587 case ARMFault_GPCFOnOutput:
588 fsc = 0b101000;
589 break;
590 default:
591 /* Other faults can't occur in a context that requires a
592 * long-format status code.
594 g_assert_not_reached();
597 fsc |= 1 << 9;
598 return fsc;
601 static inline bool arm_extabort_type(MemTxResult result)
603 /* The EA bit in syndromes and fault status registers is an
604 * IMPDEF classification of external aborts. ARM implementations
605 * usually use this to indicate AXI bus Decode error (0) or
606 * Slave error (1); in QEMU we follow that.
608 return result != MEMTX_DECODE_ERROR;
611 #ifdef CONFIG_USER_ONLY
612 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
613 MMUAccessType access_type,
614 bool maperr, uintptr_t ra);
615 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
616 MMUAccessType access_type, uintptr_t ra);
617 #else
618 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
619 MMUAccessType access_type, int mmu_idx,
620 bool probe, uintptr_t retaddr);
621 #endif
623 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
625 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
628 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
630 if (arm_feature(env, ARM_FEATURE_M)) {
631 return mmu_idx | ARM_MMU_IDX_M;
632 } else {
633 return mmu_idx | ARM_MMU_IDX_A;
637 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
639 /* AArch64 is always a-profile. */
640 return mmu_idx | ARM_MMU_IDX_A;
643 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
645 /* Return the MMU index for a v7M CPU in the specified security state */
646 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
649 * Return true if the stage 1 translation regime is using LPAE
650 * format page tables
652 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
654 /* Raise a data fault alignment exception for the specified virtual address */
655 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
656 MMUAccessType access_type,
657 int mmu_idx, uintptr_t retaddr);
659 #ifndef CONFIG_USER_ONLY
660 /* arm_cpu_do_transaction_failed: handle a memory system error response
661 * (eg "no device/memory present at address") by raising an external abort
662 * exception
664 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
665 vaddr addr, unsigned size,
666 MMUAccessType access_type,
667 int mmu_idx, MemTxAttrs attrs,
668 MemTxResult response, uintptr_t retaddr);
669 #endif
671 /* Call any registered EL change hooks */
672 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
674 ARMELChangeHook *hook, *next;
675 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
676 hook->hook(cpu, hook->opaque);
679 static inline void arm_call_el_change_hook(ARMCPU *cpu)
681 ARMELChangeHook *hook, *next;
682 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
683 hook->hook(cpu, hook->opaque);
687 /* Return true if this address translation regime has two ranges. */
688 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
690 switch (mmu_idx) {
691 case ARMMMUIdx_Stage1_E0:
692 case ARMMMUIdx_Stage1_E1:
693 case ARMMMUIdx_Stage1_E1_PAN:
694 case ARMMMUIdx_E10_0:
695 case ARMMMUIdx_E10_1:
696 case ARMMMUIdx_E10_1_PAN:
697 case ARMMMUIdx_E20_0:
698 case ARMMMUIdx_E20_2:
699 case ARMMMUIdx_E20_2_PAN:
700 return true;
701 default:
702 return false;
706 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
708 switch (mmu_idx) {
709 case ARMMMUIdx_Stage1_E1_PAN:
710 case ARMMMUIdx_E10_1_PAN:
711 case ARMMMUIdx_E20_2_PAN:
712 return true;
713 default:
714 return false;
718 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
720 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
723 /* Return the exception level which controls this address translation regime */
724 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
726 switch (mmu_idx) {
727 case ARMMMUIdx_E20_0:
728 case ARMMMUIdx_E20_2:
729 case ARMMMUIdx_E20_2_PAN:
730 case ARMMMUIdx_Stage2:
731 case ARMMMUIdx_Stage2_S:
732 case ARMMMUIdx_E2:
733 return 2;
734 case ARMMMUIdx_E3:
735 return 3;
736 case ARMMMUIdx_E10_0:
737 case ARMMMUIdx_Stage1_E0:
738 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
739 case ARMMMUIdx_Stage1_E1:
740 case ARMMMUIdx_Stage1_E1_PAN:
741 case ARMMMUIdx_E10_1:
742 case ARMMMUIdx_E10_1_PAN:
743 case ARMMMUIdx_MPrivNegPri:
744 case ARMMMUIdx_MUserNegPri:
745 case ARMMMUIdx_MPriv:
746 case ARMMMUIdx_MUser:
747 case ARMMMUIdx_MSPrivNegPri:
748 case ARMMMUIdx_MSUserNegPri:
749 case ARMMMUIdx_MSPriv:
750 case ARMMMUIdx_MSUser:
751 return 1;
752 default:
753 g_assert_not_reached();
757 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
759 switch (mmu_idx) {
760 case ARMMMUIdx_E20_0:
761 case ARMMMUIdx_Stage1_E0:
762 case ARMMMUIdx_MUser:
763 case ARMMMUIdx_MSUser:
764 case ARMMMUIdx_MUserNegPri:
765 case ARMMMUIdx_MSUserNegPri:
766 return true;
767 default:
768 return false;
769 case ARMMMUIdx_E10_0:
770 case ARMMMUIdx_E10_1:
771 case ARMMMUIdx_E10_1_PAN:
772 g_assert_not_reached();
776 /* Return the SCTLR value which controls this address translation regime */
777 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
779 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
783 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
784 * and the Non-Secure stage 2 translation regimes (and hence which are
785 * not present in VSTCR_EL2).
787 #define VTCR_SHARED_FIELD_MASK \
788 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
789 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
790 R_VTCR_DS_MASK)
792 /* Return the value of the TCR controlling this translation regime */
793 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
795 if (mmu_idx == ARMMMUIdx_Stage2) {
796 return env->cp15.vtcr_el2;
798 if (mmu_idx == ARMMMUIdx_Stage2_S) {
800 * Secure stage 2 shares fields from VTCR_EL2. We merge those
801 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
802 * value so the callers don't need to special case this.
804 * If a future architecture change defines bits in VSTCR_EL2 that
805 * overlap with these VTCR_EL2 fields we may need to revisit this.
807 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
808 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
809 return v;
811 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
814 /* Return true if the translation regime is using LPAE format page tables */
815 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
817 int el = regime_el(env, mmu_idx);
818 if (el == 2 || arm_el_is_aa64(env, el)) {
819 return true;
821 if (arm_feature(env, ARM_FEATURE_PMSA) &&
822 arm_feature(env, ARM_FEATURE_V8)) {
823 return true;
825 if (arm_feature(env, ARM_FEATURE_LPAE)
826 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
827 return true;
829 return false;
833 * arm_num_brps: Return number of implemented breakpoints.
834 * Note that the ID register BRPS field is "number of bps - 1",
835 * and we return the actual number of breakpoints.
837 static inline int arm_num_brps(ARMCPU *cpu)
839 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
840 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
841 } else {
842 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
847 * arm_num_wrps: Return number of implemented watchpoints.
848 * Note that the ID register WRPS field is "number of wps - 1",
849 * and we return the actual number of watchpoints.
851 static inline int arm_num_wrps(ARMCPU *cpu)
853 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
854 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
855 } else {
856 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
861 * arm_num_ctx_cmps: Return number of implemented context comparators.
862 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
863 * and we return the actual number of comparators.
865 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
867 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
868 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
869 } else {
870 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
875 * v7m_using_psp: Return true if using process stack pointer
876 * Return true if the CPU is currently using the process stack
877 * pointer, or false if it is using the main stack pointer.
879 static inline bool v7m_using_psp(CPUARMState *env)
881 /* Handler mode always uses the main stack; for thread mode
882 * the CONTROL.SPSEL bit determines the answer.
883 * Note that in v7M it is not possible to be in Handler mode with
884 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
886 return !arm_v7m_is_handler_mode(env) &&
887 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
891 * v7m_sp_limit: Return SP limit for current CPU state
892 * Return the SP limit value for the current CPU security state
893 * and stack pointer.
895 static inline uint32_t v7m_sp_limit(CPUARMState *env)
897 if (v7m_using_psp(env)) {
898 return env->v7m.psplim[env->v7m.secure];
899 } else {
900 return env->v7m.msplim[env->v7m.secure];
905 * v7m_cpacr_pass:
906 * Return true if the v7M CPACR permits access to the FPU for the specified
907 * security state and privilege level.
909 static inline bool v7m_cpacr_pass(CPUARMState *env,
910 bool is_secure, bool is_priv)
912 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
913 case 0:
914 case 2: /* UNPREDICTABLE: we treat like 0 */
915 return false;
916 case 1:
917 return is_priv;
918 case 3:
919 return true;
920 default:
921 g_assert_not_reached();
926 * aarch32_mode_name(): Return name of the AArch32 CPU mode
927 * @psr: Program Status Register indicating CPU mode
929 * Returns, for debug logging purposes, a printable representation
930 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
931 * the low bits of the specified PSR.
933 static inline const char *aarch32_mode_name(uint32_t psr)
935 static const char cpu_mode_names[16][4] = {
936 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
937 "???", "???", "hyp", "und", "???", "???", "???", "sys"
940 return cpu_mode_names[psr & 0xf];
944 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
946 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
947 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
948 * Must be called with the BQL held.
950 void arm_cpu_update_virq(ARMCPU *cpu);
953 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
955 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
956 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
957 * Must be called with the BQL held.
959 void arm_cpu_update_vfiq(ARMCPU *cpu);
962 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
964 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
965 * following a change to the HCR_EL2.VSE bit.
967 void arm_cpu_update_vserr(ARMCPU *cpu);
970 * arm_mmu_idx_el:
971 * @env: The cpu environment
972 * @el: The EL to use.
974 * Return the full ARMMMUIdx for the translation regime for EL.
976 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
979 * arm_mmu_idx:
980 * @env: The cpu environment
982 * Return the full ARMMMUIdx for the current translation regime.
984 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
987 * arm_stage1_mmu_idx:
988 * @env: The cpu environment
990 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
992 #ifdef CONFIG_USER_ONLY
993 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
995 return ARMMMUIdx_Stage1_E0;
997 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
999 return ARMMMUIdx_Stage1_E0;
1001 #else
1002 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1003 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1004 #endif
1007 * arm_mmu_idx_is_stage1_of_2:
1008 * @mmu_idx: The ARMMMUIdx to test
1010 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1011 * first stage of a two stage regime.
1013 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1015 switch (mmu_idx) {
1016 case ARMMMUIdx_Stage1_E0:
1017 case ARMMMUIdx_Stage1_E1:
1018 case ARMMMUIdx_Stage1_E1_PAN:
1019 return true;
1020 default:
1021 return false;
1025 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1026 const ARMISARegisters *id)
1028 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1030 if ((features >> ARM_FEATURE_V4T) & 1) {
1031 valid |= CPSR_T;
1033 if ((features >> ARM_FEATURE_V5) & 1) {
1034 valid |= CPSR_Q; /* V5TE in reality*/
1036 if ((features >> ARM_FEATURE_V6) & 1) {
1037 valid |= CPSR_E | CPSR_GE;
1039 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1040 valid |= CPSR_IT;
1042 if (isar_feature_aa32_jazelle(id)) {
1043 valid |= CPSR_J;
1045 if (isar_feature_aa32_pan(id)) {
1046 valid |= CPSR_PAN;
1048 if (isar_feature_aa32_dit(id)) {
1049 valid |= CPSR_DIT;
1051 if (isar_feature_aa32_ssbs(id)) {
1052 valid |= CPSR_SSBS;
1055 return valid;
1058 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1060 uint32_t valid;
1062 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1063 if (isar_feature_aa64_bti(id)) {
1064 valid |= PSTATE_BTYPE;
1066 if (isar_feature_aa64_pan(id)) {
1067 valid |= PSTATE_PAN;
1069 if (isar_feature_aa64_uao(id)) {
1070 valid |= PSTATE_UAO;
1072 if (isar_feature_aa64_dit(id)) {
1073 valid |= PSTATE_DIT;
1075 if (isar_feature_aa64_ssbs(id)) {
1076 valid |= PSTATE_SSBS;
1078 if (isar_feature_aa64_mte(id)) {
1079 valid |= PSTATE_TCO;
1082 return valid;
1085 /* Granule size (i.e. page size) */
1086 typedef enum ARMGranuleSize {
1087 /* Same order as TG0 encoding */
1088 Gran4K,
1089 Gran64K,
1090 Gran16K,
1091 GranInvalid,
1092 } ARMGranuleSize;
1095 * arm_granule_bits: Return address size of the granule in bits
1097 * Return the address size of the granule in bits. This corresponds
1098 * to the pseudocode TGxGranuleBits().
1100 static inline int arm_granule_bits(ARMGranuleSize gran)
1102 switch (gran) {
1103 case Gran64K:
1104 return 16;
1105 case Gran16K:
1106 return 14;
1107 case Gran4K:
1108 return 12;
1109 default:
1110 g_assert_not_reached();
1115 * Parameters of a given virtual address, as extracted from the
1116 * translation control register (TCR) for a given regime.
1118 typedef struct ARMVAParameters {
1119 unsigned tsz : 8;
1120 unsigned ps : 3;
1121 unsigned sh : 2;
1122 unsigned select : 1;
1123 bool tbi : 1;
1124 bool epd : 1;
1125 bool hpd : 1;
1126 bool tsz_oob : 1; /* tsz has been clamped to legal range */
1127 bool ds : 1;
1128 bool ha : 1;
1129 bool hd : 1;
1130 ARMGranuleSize gran : 2;
1131 } ARMVAParameters;
1134 * aa64_va_parameters: Return parameters for an AArch64 virtual address
1135 * @env: CPU
1136 * @va: virtual address to look up
1137 * @mmu_idx: determines translation regime to use
1138 * @data: true if this is a data access
1139 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1140 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1142 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1143 ARMMMUIdx mmu_idx, bool data,
1144 bool el1_is_aa32);
1146 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1147 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1148 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1150 /* Determine if allocation tags are available. */
1151 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1152 uint64_t sctlr)
1154 if (el < 3
1155 && arm_feature(env, ARM_FEATURE_EL3)
1156 && !(env->cp15.scr_el3 & SCR_ATA)) {
1157 return false;
1159 if (el < 2 && arm_is_el2_enabled(env)) {
1160 uint64_t hcr = arm_hcr_el2_eff(env);
1161 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1162 return false;
1165 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1166 return sctlr != 0;
1169 #ifndef CONFIG_USER_ONLY
1171 /* Security attributes for an address, as returned by v8m_security_lookup. */
1172 typedef struct V8M_SAttributes {
1173 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1174 bool ns;
1175 bool nsc;
1176 uint8_t sregion;
1177 bool srvalid;
1178 uint8_t iregion;
1179 bool irvalid;
1180 } V8M_SAttributes;
1182 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1183 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1184 bool secure, V8M_SAttributes *sattrs);
1186 /* Cacheability and shareability attributes for a memory access */
1187 typedef struct ARMCacheAttrs {
1189 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1190 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1192 unsigned int attrs:8;
1193 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1194 bool is_s2_format:1;
1195 } ARMCacheAttrs;
1197 /* Fields that are valid upon success. */
1198 typedef struct GetPhysAddrResult {
1199 CPUTLBEntryFull f;
1200 ARMCacheAttrs cacheattrs;
1201 } GetPhysAddrResult;
1204 * get_phys_addr: get the physical address for a virtual address
1205 * @env: CPUARMState
1206 * @address: virtual address to get physical address for
1207 * @access_type: 0 for read, 1 for write, 2 for execute
1208 * @mmu_idx: MMU index indicating required translation regime
1209 * @result: set on translation success.
1210 * @fi: set to fault info if the translation fails
1212 * Find the physical address corresponding to the given virtual address,
1213 * by doing a translation table walk on MMU based systems or using the
1214 * MPU state on MPU based systems.
1216 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1217 * prot and page_size may not be filled in, and the populated fsr value provides
1218 * information on why the translation aborted, in the format of a
1219 * DFSR/IFSR fault register, with the following caveats:
1220 * * we honour the short vs long DFSR format differences.
1221 * * the WnR bit is never set (the caller must do this).
1222 * * for PSMAv5 based systems we don't bother to return a full FSR format
1223 * value.
1225 bool get_phys_addr(CPUARMState *env, target_ulong address,
1226 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1227 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1228 __attribute__((nonnull));
1231 * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1232 * address
1233 * @env: CPUARMState
1234 * @address: virtual address to get physical address for
1235 * @access_type: 0 for read, 1 for write, 2 for execute
1236 * @mmu_idx: MMU index indicating required translation regime
1237 * @space: security space for the access
1238 * @result: set on translation success.
1239 * @fi: set to fault info if the translation fails
1241 * Similar to get_phys_addr, but use the given security space and don't perform
1242 * a Granule Protection Check on the resulting address.
1244 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
1245 MMUAccessType access_type,
1246 ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1247 GetPhysAddrResult *result,
1248 ARMMMUFaultInfo *fi)
1249 __attribute__((nonnull));
1251 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1252 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1253 bool is_secure, GetPhysAddrResult *result,
1254 ARMMMUFaultInfo *fi, uint32_t *mregion);
1256 void arm_log_exception(CPUState *cs);
1258 #endif /* !CONFIG_USER_ONLY */
1261 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1262 * the same simd_desc() encoding due to restrictions on size.
1263 * Use these instead.
1265 FIELD(PREDDESC, OPRSZ, 0, 6)
1266 FIELD(PREDDESC, ESZ, 6, 2)
1267 FIELD(PREDDESC, DATA, 8, 24)
1270 * The SVE simd_data field, for memory ops, contains either
1271 * rd (5 bits) or a shift count (2 bits).
1273 #define SVE_MTEDESC_SHIFT 5
1275 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1276 FIELD(MTEDESC, MIDX, 0, 4)
1277 FIELD(MTEDESC, TBI, 4, 2)
1278 FIELD(MTEDESC, TCMA, 6, 2)
1279 FIELD(MTEDESC, WRITE, 8, 1)
1280 FIELD(MTEDESC, ALIGN, 9, 3)
1281 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */
1283 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1284 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1287 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1288 * @env: CPU env
1289 * @ptr: start address of memory region (dirty pointer)
1290 * @size: length of region (guaranteed not to cross a page boundary)
1291 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1292 * Returns: the size of the region that can be copied without hitting
1293 * an MTE tag failure
1295 * Note that we assume that the caller has already checked the TBI
1296 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1297 * required.
1299 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1300 uint32_t desc);
1303 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1304 * operation going in the reverse direction
1305 * @env: CPU env
1306 * @ptr: *end* address of memory region (dirty pointer)
1307 * @size: length of region (guaranteed not to cross a page boundary)
1308 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1309 * Returns: the size of the region that can be copied without hitting
1310 * an MTE tag failure
1312 * Note that we assume that the caller has already checked the TBI
1313 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1314 * required.
1316 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1317 uint32_t desc);
1320 * mte_check_fail: Record an MTE tag check failure
1321 * @env: CPU env
1322 * @desc: MTEDESC descriptor word
1323 * @dirty_ptr: Failing dirty address
1324 * @ra: TCG retaddr
1326 * This may never return (if the MTE tag checks are configured to fault).
1328 void mte_check_fail(CPUARMState *env, uint32_t desc,
1329 uint64_t dirty_ptr, uintptr_t ra);
1332 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1333 * @env: CPU env
1334 * @dirty_ptr: Start address of memory region (dirty pointer)
1335 * @size: length of region (guaranteed not to cross page boundary)
1336 * @desc: MTEDESC descriptor word
1338 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1339 uint32_t desc);
1341 static inline int allocation_tag_from_addr(uint64_t ptr)
1343 return extract64(ptr, 56, 4);
1346 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1348 return deposit64(ptr, 56, 4, rtag);
1351 /* Return true if tbi bits mean that the access is checked. */
1352 static inline bool tbi_check(uint32_t desc, int bit55)
1354 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1357 /* Return true if tcma bits mean that the access is unchecked. */
1358 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1361 * We had extracted bit55 and ptr_tag for other reasons, so fold
1362 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1364 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1365 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1366 return tcma && match;
1370 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1371 * for the tag to be present in the FAR_ELx register. But for user-only
1372 * mode, we do not have a TLB with which to implement this, so we must
1373 * remove the top byte.
1375 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1377 #ifdef CONFIG_USER_ONLY
1378 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1379 ptr &= sextract64(ptr, 0, 56);
1380 #endif
1381 return ptr;
1384 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1386 #ifdef CONFIG_USER_ONLY
1387 int64_t clean_ptr = sextract64(ptr, 0, 56);
1388 if (tbi_check(desc, clean_ptr < 0)) {
1389 ptr = clean_ptr;
1391 #endif
1392 return ptr;
1395 /* Values for M-profile PSR.ECI for MVE insns */
1396 enum MVEECIState {
1397 ECI_NONE = 0, /* No completed beats */
1398 ECI_A0 = 1, /* Completed: A0 */
1399 ECI_A0A1 = 2, /* Completed: A0, A1 */
1400 /* 3 is reserved */
1401 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1402 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1403 /* All other values reserved */
1406 /* Definitions for the PMU registers */
1407 #define PMCRN_MASK 0xf800
1408 #define PMCRN_SHIFT 11
1409 #define PMCRLP 0x80
1410 #define PMCRLC 0x40
1411 #define PMCRDP 0x20
1412 #define PMCRX 0x10
1413 #define PMCRD 0x8
1414 #define PMCRC 0x4
1415 #define PMCRP 0x2
1416 #define PMCRE 0x1
1418 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1419 * which can be written as 1 to trigger behaviour but which stay RAZ).
1421 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1423 #define PMXEVTYPER_P 0x80000000
1424 #define PMXEVTYPER_U 0x40000000
1425 #define PMXEVTYPER_NSK 0x20000000
1426 #define PMXEVTYPER_NSU 0x10000000
1427 #define PMXEVTYPER_NSH 0x08000000
1428 #define PMXEVTYPER_M 0x04000000
1429 #define PMXEVTYPER_MT 0x02000000
1430 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1431 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1432 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1433 PMXEVTYPER_M | PMXEVTYPER_MT | \
1434 PMXEVTYPER_EVTCOUNT)
1436 #define PMCCFILTR 0xf8000000
1437 #define PMCCFILTR_M PMXEVTYPER_M
1438 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1440 static inline uint32_t pmu_num_counters(CPUARMState *env)
1442 ARMCPU *cpu = env_archcpu(env);
1444 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1447 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1448 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1450 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1453 #ifdef TARGET_AARCH64
1454 int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1455 int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
1456 int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
1457 int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
1458 int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
1459 int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
1460 int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
1461 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1462 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1463 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1464 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1465 void aarch64_max_tcg_initfn(Object *obj);
1466 void aarch64_add_pauth_properties(Object *obj);
1467 void aarch64_add_sve_properties(Object *obj);
1468 void aarch64_add_sme_properties(Object *obj);
1469 #endif
1471 /* Read the CONTROL register as the MRS instruction would. */
1472 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1475 * Return a pointer to the location where we currently store the
1476 * stack pointer for the requested security state and thread mode.
1477 * This pointer will become invalid if the CPU state is updated
1478 * such that the stack pointers are switched around (eg changing
1479 * the SPSEL control bit).
1481 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1482 bool threadmode, bool spsel);
1484 bool el_is_in_host(CPUARMState *env, int el);
1486 void aa32_max_features(ARMCPU *cpu);
1487 int exception_target_el(CPUARMState *env);
1488 bool arm_singlestep_active(CPUARMState *env);
1489 bool arm_generate_debug_exceptions(CPUARMState *env);
1492 * pauth_ptr_mask:
1493 * @param: parameters defining the MMU setup
1495 * Return a mask of the address bits that contain the authentication code,
1496 * given the MMU config defined by @param.
1498 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1500 int bot_pac_bit = 64 - param.tsz;
1501 int top_pac_bit = 64 - 8 * param.tbi;
1503 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1506 /* Add the cpreg definitions for debug related system registers */
1507 void define_debug_regs(ARMCPU *cpu);
1509 /* Effective value of MDCR_EL2 */
1510 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1512 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1515 /* Powers of 2 for sve_vq_map et al. */
1516 #define SVE_VQ_POW2_MAP \
1517 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1518 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1521 * Return true if it is possible to take a fine-grained-trap to EL2.
1523 static inline bool arm_fgt_active(CPUARMState *env, int el)
1526 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1527 * that can affect EL0, but it is harmless to do the test also for
1528 * traps on registers that are only accessible at EL1 because if the test
1529 * returns true then we can't be executing at EL1 anyway.
1530 * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1531 * traps from AArch32 only happen for the EL0 is AArch32 case.
1533 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1534 el < 2 && arm_is_el2_enabled(env) &&
1535 arm_el_is_aa64(env, 1) &&
1536 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1537 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1540 void assert_hflags_rebuild_correctly(CPUARMState *env);
1543 * Although the ARM implementation of hardware assisted debugging
1544 * allows for different breakpoints per-core, the current GDB
1545 * interface treats them as a global pool of registers (which seems to
1546 * be the case for x86, ppc and s390). As a result we store one copy
1547 * of registers which is used for all active cores.
1549 * Write access is serialised by virtue of the GDB protocol which
1550 * updates things. Read access (i.e. when the values are copied to the
1551 * vCPU) is also gated by GDB's run control.
1553 * This is not unreasonable as most of the time debugging kernels you
1554 * never know which core will eventually execute your function.
1557 typedef struct {
1558 uint64_t bcr;
1559 uint64_t bvr;
1560 } HWBreakpoint;
1563 * The watchpoint registers can cover more area than the requested
1564 * watchpoint so we need to store the additional information
1565 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1566 * when the watchpoint is hit.
1568 typedef struct {
1569 uint64_t wcr;
1570 uint64_t wvr;
1571 CPUWatchpoint details;
1572 } HWWatchpoint;
1574 /* Maximum and current break/watch point counts */
1575 extern int max_hw_bps, max_hw_wps;
1576 extern GArray *hw_breakpoints, *hw_watchpoints;
1578 #define cur_hw_wps (hw_watchpoints->len)
1579 #define cur_hw_bps (hw_breakpoints->len)
1580 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1581 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1583 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1584 int insert_hw_breakpoint(target_ulong pc);
1585 int delete_hw_breakpoint(target_ulong pc);
1587 bool check_watchpoint_in_range(int i, target_ulong addr);
1588 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1589 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1590 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1591 #endif