1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/kvm.h"
20 #include "fpu/softfloat.h"
21 #include "qemu/range.h"
23 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
25 #ifndef CONFIG_USER_ONLY
26 /* Cacheability and shareability attributes for a memory access */
27 typedef struct ARMCacheAttrs
{
28 unsigned int attrs
:8; /* as in the MAIR register encoding */
29 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
32 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
33 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
34 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
35 target_ulong
*page_size
,
36 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
38 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
39 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
40 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
41 target_ulong
*page_size_ptr
,
42 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
44 /* Security attributes for an address, as returned by v8m_security_lookup. */
45 typedef struct V8M_SAttributes
{
46 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
55 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
56 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
57 V8M_SAttributes
*sattrs
);
60 static void switch_mode(CPUARMState
*env
, int mode
);
62 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
66 /* VFP data registers are always little-endian. */
67 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
69 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
72 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
73 /* Aliases for Q regs. */
76 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
78 stq_le_p(buf
+ 8, q
[1]);
82 switch (reg
- nregs
) {
83 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
84 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
85 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
90 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
94 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
96 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
99 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
102 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
103 q
[0] = ldq_le_p(buf
);
104 q
[1] = ldq_le_p(buf
+ 8);
108 switch (reg
- nregs
) {
109 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
110 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
111 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
116 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
120 /* 128 bit FP register */
122 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
124 stq_le_p(buf
+ 8, q
[1]);
129 stl_p(buf
, vfp_get_fpsr(env
));
133 stl_p(buf
, vfp_get_fpcr(env
));
140 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
144 /* 128 bit FP register */
146 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
147 q
[0] = ldq_le_p(buf
);
148 q
[1] = ldq_le_p(buf
+ 8);
153 vfp_set_fpsr(env
, ldl_p(buf
));
157 vfp_set_fpcr(env
, ldl_p(buf
));
164 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
166 assert(ri
->fieldoffset
);
167 if (cpreg_field_is_64bit(ri
)) {
168 return CPREG_FIELD64(env
, ri
);
170 return CPREG_FIELD32(env
, ri
);
174 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
177 assert(ri
->fieldoffset
);
178 if (cpreg_field_is_64bit(ri
)) {
179 CPREG_FIELD64(env
, ri
) = value
;
181 CPREG_FIELD32(env
, ri
) = value
;
185 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
187 return (char *)env
+ ri
->fieldoffset
;
190 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
192 /* Raw read of a coprocessor register (as needed for migration, etc). */
193 if (ri
->type
& ARM_CP_CONST
) {
194 return ri
->resetvalue
;
195 } else if (ri
->raw_readfn
) {
196 return ri
->raw_readfn(env
, ri
);
197 } else if (ri
->readfn
) {
198 return ri
->readfn(env
, ri
);
200 return raw_read(env
, ri
);
204 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
207 /* Raw write of a coprocessor register (as needed for migration, etc).
208 * Note that constant registers are treated as write-ignored; the
209 * caller should check for success by whether a readback gives the
212 if (ri
->type
& ARM_CP_CONST
) {
214 } else if (ri
->raw_writefn
) {
215 ri
->raw_writefn(env
, ri
, v
);
216 } else if (ri
->writefn
) {
217 ri
->writefn(env
, ri
, v
);
219 raw_write(env
, ri
, v
);
223 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
225 ARMCPU
*cpu
= arm_env_get_cpu(env
);
226 const ARMCPRegInfo
*ri
;
229 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
230 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
232 if (cpreg_field_is_64bit(ri
)) {
233 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
235 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
241 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
246 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
248 /* Return true if the regdef would cause an assertion if you called
249 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
250 * program bug for it not to have the NO_RAW flag).
251 * NB that returning false here doesn't necessarily mean that calling
252 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
253 * read/write access functions which are safe for raw use" from "has
254 * read/write access functions which have side effects but has forgotten
255 * to provide raw access functions".
256 * The tests here line up with the conditions in read/write_raw_cp_reg()
257 * and assertions in raw_read()/raw_write().
259 if ((ri
->type
& ARM_CP_CONST
) ||
261 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
267 bool write_cpustate_to_list(ARMCPU
*cpu
)
269 /* Write the coprocessor state from cpu->env to the (index,value) list. */
273 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
274 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
275 const ARMCPRegInfo
*ri
;
277 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
282 if (ri
->type
& ARM_CP_NO_RAW
) {
285 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
290 bool write_list_to_cpustate(ARMCPU
*cpu
)
295 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
296 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
297 uint64_t v
= cpu
->cpreg_values
[i
];
298 const ARMCPRegInfo
*ri
;
300 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
305 if (ri
->type
& ARM_CP_NO_RAW
) {
308 /* Write value and confirm it reads back as written
309 * (to catch read-only registers and partially read-only
310 * registers where the incoming migration value doesn't match)
312 write_raw_cp_reg(&cpu
->env
, ri
, v
);
313 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
320 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
322 ARMCPU
*cpu
= opaque
;
324 const ARMCPRegInfo
*ri
;
326 regidx
= *(uint32_t *)key
;
327 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
329 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
330 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
331 /* The value array need not be initialized at this point */
332 cpu
->cpreg_array_len
++;
336 static void count_cpreg(gpointer key
, gpointer opaque
)
338 ARMCPU
*cpu
= opaque
;
340 const ARMCPRegInfo
*ri
;
342 regidx
= *(uint32_t *)key
;
343 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
345 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
346 cpu
->cpreg_array_len
++;
350 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
352 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
353 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
364 void init_cpreg_list(ARMCPU
*cpu
)
366 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
367 * Note that we require cpreg_tuples[] to be sorted by key ID.
372 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
373 keys
= g_list_sort(keys
, cpreg_key_compare
);
375 cpu
->cpreg_array_len
= 0;
377 g_list_foreach(keys
, count_cpreg
, cpu
);
379 arraylen
= cpu
->cpreg_array_len
;
380 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
381 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
382 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
383 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
384 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
385 cpu
->cpreg_array_len
= 0;
387 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
389 assert(cpu
->cpreg_array_len
== arraylen
);
395 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
396 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
398 * access_el3_aa32ns: Used to check AArch32 register views.
399 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
401 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
402 const ARMCPRegInfo
*ri
,
405 bool secure
= arm_is_secure_below_el3(env
);
407 assert(!arm_el_is_aa64(env
, 3));
409 return CP_ACCESS_TRAP_UNCATEGORIZED
;
414 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
415 const ARMCPRegInfo
*ri
,
418 if (!arm_el_is_aa64(env
, 3)) {
419 return access_el3_aa32ns(env
, ri
, isread
);
424 /* Some secure-only AArch32 registers trap to EL3 if used from
425 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
426 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
427 * We assume that the .access field is set to PL1_RW.
429 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
430 const ARMCPRegInfo
*ri
,
433 if (arm_current_el(env
) == 3) {
436 if (arm_is_secure_below_el3(env
)) {
437 return CP_ACCESS_TRAP_EL3
;
439 /* This will be EL1 NS and EL2 NS, which just UNDEF */
440 return CP_ACCESS_TRAP_UNCATEGORIZED
;
443 /* Check for traps to "powerdown debug" registers, which are controlled
446 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
449 int el
= arm_current_el(env
);
450 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
451 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
452 (arm_hcr_el2_eff(env
) & HCR_TGE
);
454 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
455 return CP_ACCESS_TRAP_EL2
;
457 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
458 return CP_ACCESS_TRAP_EL3
;
463 /* Check for traps to "debug ROM" registers, which are controlled
464 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
466 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
469 int el
= arm_current_el(env
);
470 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
471 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
472 (arm_hcr_el2_eff(env
) & HCR_TGE
);
474 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
475 return CP_ACCESS_TRAP_EL2
;
477 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
478 return CP_ACCESS_TRAP_EL3
;
483 /* Check for traps to general debug registers, which are controlled
484 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
486 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
489 int el
= arm_current_el(env
);
490 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
491 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
492 (arm_hcr_el2_eff(env
) & HCR_TGE
);
494 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
495 return CP_ACCESS_TRAP_EL2
;
497 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
498 return CP_ACCESS_TRAP_EL3
;
503 /* Check for traps to performance monitor registers, which are controlled
504 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
506 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
509 int el
= arm_current_el(env
);
511 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
512 && !arm_is_secure_below_el3(env
)) {
513 return CP_ACCESS_TRAP_EL2
;
515 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
516 return CP_ACCESS_TRAP_EL3
;
521 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
523 ARMCPU
*cpu
= arm_env_get_cpu(env
);
525 raw_write(env
, ri
, value
);
526 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
529 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
531 ARMCPU
*cpu
= arm_env_get_cpu(env
);
533 if (raw_read(env
, ri
) != value
) {
534 /* Unlike real hardware the qemu TLB uses virtual addresses,
535 * not modified virtual addresses, so this causes a TLB flush.
538 raw_write(env
, ri
, value
);
542 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
545 ARMCPU
*cpu
= arm_env_get_cpu(env
);
547 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
548 && !extended_addresses_enabled(env
)) {
549 /* For VMSA (when not using the LPAE long descriptor page table
550 * format) this register includes the ASID, so do a TLB flush.
551 * For PMSA it is purely a process ID and no action is needed.
555 raw_write(env
, ri
, value
);
558 /* IS variants of TLB operations must affect all cores */
559 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
562 CPUState
*cs
= ENV_GET_CPU(env
);
564 tlb_flush_all_cpus_synced(cs
);
567 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
570 CPUState
*cs
= ENV_GET_CPU(env
);
572 tlb_flush_all_cpus_synced(cs
);
575 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
578 CPUState
*cs
= ENV_GET_CPU(env
);
580 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
583 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
586 CPUState
*cs
= ENV_GET_CPU(env
);
588 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
592 * Non-IS variants of TLB operations are upgraded to
593 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
594 * force broadcast of these operations.
596 static bool tlb_force_broadcast(CPUARMState
*env
)
598 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
599 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
602 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
605 /* Invalidate all (TLBIALL) */
606 ARMCPU
*cpu
= arm_env_get_cpu(env
);
608 if (tlb_force_broadcast(env
)) {
609 tlbiall_is_write(env
, NULL
, value
);
616 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
619 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
620 ARMCPU
*cpu
= arm_env_get_cpu(env
);
622 if (tlb_force_broadcast(env
)) {
623 tlbimva_is_write(env
, NULL
, value
);
627 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
630 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
633 /* Invalidate by ASID (TLBIASID) */
634 ARMCPU
*cpu
= arm_env_get_cpu(env
);
636 if (tlb_force_broadcast(env
)) {
637 tlbiasid_is_write(env
, NULL
, value
);
644 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
647 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
648 ARMCPU
*cpu
= arm_env_get_cpu(env
);
650 if (tlb_force_broadcast(env
)) {
651 tlbimvaa_is_write(env
, NULL
, value
);
655 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
658 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
661 CPUState
*cs
= ENV_GET_CPU(env
);
663 tlb_flush_by_mmuidx(cs
,
664 ARMMMUIdxBit_S12NSE1
|
665 ARMMMUIdxBit_S12NSE0
|
669 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
672 CPUState
*cs
= ENV_GET_CPU(env
);
674 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
675 ARMMMUIdxBit_S12NSE1
|
676 ARMMMUIdxBit_S12NSE0
|
680 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
683 /* Invalidate by IPA. This has to invalidate any structures that
684 * contain only stage 2 translation information, but does not need
685 * to apply to structures that contain combined stage 1 and stage 2
686 * translation information.
687 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
689 CPUState
*cs
= ENV_GET_CPU(env
);
692 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
696 pageaddr
= sextract64(value
<< 12, 0, 40);
698 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
701 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
704 CPUState
*cs
= ENV_GET_CPU(env
);
707 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
711 pageaddr
= sextract64(value
<< 12, 0, 40);
713 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
717 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
720 CPUState
*cs
= ENV_GET_CPU(env
);
722 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
725 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
728 CPUState
*cs
= ENV_GET_CPU(env
);
730 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
733 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
736 CPUState
*cs
= ENV_GET_CPU(env
);
737 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
739 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
742 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
745 CPUState
*cs
= ENV_GET_CPU(env
);
746 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
748 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
752 static const ARMCPRegInfo cp_reginfo
[] = {
753 /* Define the secure and non-secure FCSE identifier CP registers
754 * separately because there is no secure bank in V8 (no _EL3). This allows
755 * the secure register to be properly reset and migrated. There is also no
756 * v8 EL1 version of the register so the non-secure instance stands alone.
759 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
760 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
761 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
762 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
763 { .name
= "FCSEIDR_S",
764 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
765 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
766 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
767 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
768 /* Define the secure and non-secure context identifier CP registers
769 * separately because there is no secure bank in V8 (no _EL3). This allows
770 * the secure register to be properly reset and migrated. In the
771 * non-secure case, the 32-bit register will have reset and migration
772 * disabled during registration as it is handled by the 64-bit instance.
774 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
775 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
776 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
777 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
778 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
779 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
780 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
781 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
782 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
783 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
787 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
788 /* NB: Some of these registers exist in v8 but with more precise
789 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
791 /* MMU Domain access control / MPU write buffer control */
793 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
794 .access
= PL1_RW
, .resetvalue
= 0,
795 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
796 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
797 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
798 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
799 * For v6 and v5, these mappings are overly broad.
801 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
802 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
803 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
804 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
805 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
806 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
807 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
808 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
809 /* Cache maintenance ops; some of this space may be overridden later. */
810 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
811 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
812 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
816 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
817 /* Not all pre-v6 cores implemented this WFI, so this is slightly
820 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
821 .access
= PL1_W
, .type
= ARM_CP_WFI
},
825 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
826 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
827 * is UNPREDICTABLE; we choose to NOP as most implementations do).
829 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
830 .access
= PL1_W
, .type
= ARM_CP_WFI
},
831 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
832 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
833 * OMAPCP will override this space.
835 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
836 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
838 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
839 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
841 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
842 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
843 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
845 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
846 * implementing it as RAZ means the "debug architecture version" bits
847 * will read as a reserved value, which should cause Linux to not try
848 * to use the debug hardware.
850 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
851 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
852 /* MMU TLB control. Note that the wildcarding means we cover not just
853 * the unified TLB ops but also the dside/iside/inner-shareable variants.
855 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
856 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
857 .type
= ARM_CP_NO_RAW
},
858 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
859 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
860 .type
= ARM_CP_NO_RAW
},
861 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
862 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
863 .type
= ARM_CP_NO_RAW
},
864 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
865 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
866 .type
= ARM_CP_NO_RAW
},
867 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
868 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
869 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
870 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
874 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
879 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
880 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
881 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
882 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
883 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
885 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
886 /* VFP coprocessor: cp10 & cp11 [23:20] */
887 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
889 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
890 /* ASEDIS [31] bit is RAO/WI */
894 /* VFPv3 and upwards with NEON implement 32 double precision
895 * registers (D0-D31).
897 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
898 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
899 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
905 env
->cp15
.cpacr_el1
= value
;
908 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
910 /* Call cpacr_write() so that we reset with the correct RAO bits set
911 * for our CPU features.
913 cpacr_write(env
, ri
, 0);
916 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
919 if (arm_feature(env
, ARM_FEATURE_V8
)) {
920 /* Check if CPACR accesses are to be trapped to EL2 */
921 if (arm_current_el(env
) == 1 &&
922 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
923 return CP_ACCESS_TRAP_EL2
;
924 /* Check if CPACR accesses are to be trapped to EL3 */
925 } else if (arm_current_el(env
) < 3 &&
926 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
927 return CP_ACCESS_TRAP_EL3
;
934 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
937 /* Check if CPTR accesses are set to trap to EL3 */
938 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
939 return CP_ACCESS_TRAP_EL3
;
945 static const ARMCPRegInfo v6_cp_reginfo
[] = {
946 /* prefetch by MVA in v6, NOP in v7 */
947 { .name
= "MVA_prefetch",
948 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
949 .access
= PL1_W
, .type
= ARM_CP_NOP
},
950 /* We need to break the TB after ISB to execute self-modifying code
951 * correctly and also to take any pending interrupts immediately.
952 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
954 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
955 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
956 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
957 .access
= PL0_W
, .type
= ARM_CP_NOP
},
958 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
959 .access
= PL0_W
, .type
= ARM_CP_NOP
},
960 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
962 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
963 offsetof(CPUARMState
, cp15
.ifar_ns
) },
965 /* Watchpoint Fault Address Register : should actually only be present
966 * for 1136, 1176, 11MPCore.
968 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
969 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
970 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
971 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
972 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
973 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
977 /* Definitions for the PMU registers */
978 #define PMCRN_MASK 0xf800
979 #define PMCRN_SHIFT 11
986 #define PMXEVTYPER_P 0x80000000
987 #define PMXEVTYPER_U 0x40000000
988 #define PMXEVTYPER_NSK 0x20000000
989 #define PMXEVTYPER_NSU 0x10000000
990 #define PMXEVTYPER_NSH 0x08000000
991 #define PMXEVTYPER_M 0x04000000
992 #define PMXEVTYPER_MT 0x02000000
993 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
994 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
995 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
996 PMXEVTYPER_M | PMXEVTYPER_MT | \
999 #define PMCCFILTR 0xf8000000
1000 #define PMCCFILTR_M PMXEVTYPER_M
1001 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1003 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1005 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1008 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1009 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1011 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1014 typedef struct pm_event
{
1015 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1016 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1017 bool (*supported
)(CPUARMState
*);
1019 * Retrieve the current count of the underlying event. The programmed
1020 * counters hold a difference from the return value from this function
1022 uint64_t (*get_count
)(CPUARMState
*);
1025 static bool event_always_supported(CPUARMState
*env
)
1030 static uint64_t swinc_get_count(CPUARMState
*env
)
1033 * SW_INCR events are written directly to the pmevcntr's by writes to
1034 * PMSWINC, so there is no underlying count maintained by the PMU itself
1040 * Return the underlying cycle count for the PMU cycle counters. If we're in
1041 * usermode, simply return 0.
1043 static uint64_t cycles_get_count(CPUARMState
*env
)
1045 #ifndef CONFIG_USER_ONLY
1046 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1047 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1049 return cpu_get_host_ticks();
1053 #ifndef CONFIG_USER_ONLY
1054 static bool instructions_supported(CPUARMState
*env
)
1056 return use_icount
== 1 /* Precise instruction counting */;
1059 static uint64_t instructions_get_count(CPUARMState
*env
)
1061 return (uint64_t)cpu_get_icount_raw();
1065 static const pm_event pm_events
[] = {
1066 { .number
= 0x000, /* SW_INCR */
1067 .supported
= event_always_supported
,
1068 .get_count
= swinc_get_count
,
1070 #ifndef CONFIG_USER_ONLY
1071 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1072 .supported
= instructions_supported
,
1073 .get_count
= instructions_get_count
,
1075 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1076 .supported
= event_always_supported
,
1077 .get_count
= cycles_get_count
,
1083 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1084 * events (i.e. the statistical profiling extension), this implementation
1085 * should first be updated to something sparse instead of the current
1086 * supported_event_map[] array.
1088 #define MAX_EVENT_ID 0x11
1089 #define UNSUPPORTED_EVENT UINT16_MAX
1090 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1093 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1094 * of ARM event numbers to indices in our pm_events array.
1096 * Note: Events in the 0x40XX range are not currently supported.
1098 void pmu_init(ARMCPU
*cpu
)
1103 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1106 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1107 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1112 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1113 const pm_event
*cnt
= &pm_events
[i
];
1114 assert(cnt
->number
<= MAX_EVENT_ID
);
1115 /* We do not currently support events in the 0x40xx range */
1116 assert(cnt
->number
<= 0x3f);
1118 if (cnt
->supported(&cpu
->env
)) {
1119 supported_event_map
[cnt
->number
] = i
;
1120 uint64_t event_mask
= 1 << (cnt
->number
& 0x1f);
1121 if (cnt
->number
& 0x20) {
1122 cpu
->pmceid1
|= event_mask
;
1124 cpu
->pmceid0
|= event_mask
;
1131 * Check at runtime whether a PMU event is supported for the current machine
1133 static bool event_supported(uint16_t number
)
1135 if (number
> MAX_EVENT_ID
) {
1138 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1141 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1144 /* Performance monitor registers user accessibility is controlled
1145 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1146 * trapping to EL2 or EL3 for other accesses.
1148 int el
= arm_current_el(env
);
1150 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1151 return CP_ACCESS_TRAP
;
1153 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1154 && !arm_is_secure_below_el3(env
)) {
1155 return CP_ACCESS_TRAP_EL2
;
1157 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1158 return CP_ACCESS_TRAP_EL3
;
1161 return CP_ACCESS_OK
;
1164 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1165 const ARMCPRegInfo
*ri
,
1168 /* ER: event counter read trap control */
1169 if (arm_feature(env
, ARM_FEATURE_V8
)
1170 && arm_current_el(env
) == 0
1171 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1173 return CP_ACCESS_OK
;
1176 return pmreg_access(env
, ri
, isread
);
1179 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1180 const ARMCPRegInfo
*ri
,
1183 /* SW: software increment write trap control */
1184 if (arm_feature(env
, ARM_FEATURE_V8
)
1185 && arm_current_el(env
) == 0
1186 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1188 return CP_ACCESS_OK
;
1191 return pmreg_access(env
, ri
, isread
);
1194 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1195 const ARMCPRegInfo
*ri
,
1198 /* ER: event counter read trap control */
1199 if (arm_feature(env
, ARM_FEATURE_V8
)
1200 && arm_current_el(env
) == 0
1201 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1202 return CP_ACCESS_OK
;
1205 return pmreg_access(env
, ri
, isread
);
1208 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1209 const ARMCPRegInfo
*ri
,
1212 /* CR: cycle counter read trap control */
1213 if (arm_feature(env
, ARM_FEATURE_V8
)
1214 && arm_current_el(env
) == 0
1215 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1217 return CP_ACCESS_OK
;
1220 return pmreg_access(env
, ri
, isread
);
1223 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1224 * the current EL, security state, and register configuration.
1226 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1229 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1230 bool enabled
, prohibited
, filtered
;
1231 bool secure
= arm_is_secure(env
);
1232 int el
= arm_current_el(env
);
1233 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1235 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1236 (counter
< hpmn
|| counter
== 31)) {
1237 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1239 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1241 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1244 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1245 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1250 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1251 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1254 if (prohibited
&& counter
== 31) {
1255 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1258 if (counter
== 31) {
1259 filter
= env
->cp15
.pmccfiltr_el0
;
1261 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1264 p
= filter
& PMXEVTYPER_P
;
1265 u
= filter
& PMXEVTYPER_U
;
1266 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1267 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1268 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1269 m
= arm_el_is_aa64(env
, 1) &&
1270 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1273 filtered
= secure
? u
: u
!= nsu
;
1274 } else if (el
== 1) {
1275 filtered
= secure
? p
: p
!= nsk
;
1276 } else if (el
== 2) {
1282 if (counter
!= 31) {
1284 * If not checking PMCCNTR, ensure the counter is setup to an event we
1287 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1288 if (!event_supported(event
)) {
1293 return enabled
&& !prohibited
&& !filtered
;
1297 * Ensure c15_ccnt is the guest-visible count so that operations such as
1298 * enabling/disabling the counter or filtering, modifying the count itself,
1299 * etc. can be done logically. This is essentially a no-op if the counter is
1300 * not enabled at the time of the call.
1302 void pmccntr_op_start(CPUARMState
*env
)
1304 uint64_t cycles
= cycles_get_count(env
);
1306 if (pmu_counter_enabled(env
, 31)) {
1307 uint64_t eff_cycles
= cycles
;
1308 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1309 /* Increment once every 64 processor clock cycles */
1313 env
->cp15
.c15_ccnt
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1315 env
->cp15
.c15_ccnt_delta
= cycles
;
1319 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1320 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1323 void pmccntr_op_finish(CPUARMState
*env
)
1325 if (pmu_counter_enabled(env
, 31)) {
1326 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1328 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1329 /* Increment once every 64 processor clock cycles */
1333 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1337 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1340 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1342 if (event_supported(event
)) {
1343 uint16_t event_idx
= supported_event_map
[event
];
1344 count
= pm_events
[event_idx
].get_count(env
);
1347 if (pmu_counter_enabled(env
, counter
)) {
1348 env
->cp15
.c14_pmevcntr
[counter
] =
1349 count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1351 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1354 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1356 if (pmu_counter_enabled(env
, counter
)) {
1357 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1358 env
->cp15
.c14_pmevcntr
[counter
];
1362 void pmu_op_start(CPUARMState
*env
)
1365 pmccntr_op_start(env
);
1366 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1367 pmevcntr_op_start(env
, i
);
1371 void pmu_op_finish(CPUARMState
*env
)
1374 pmccntr_op_finish(env
);
1375 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1376 pmevcntr_op_finish(env
, i
);
1380 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1382 pmu_op_start(&cpu
->env
);
1385 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1387 pmu_op_finish(&cpu
->env
);
1390 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1395 if (value
& PMCRC
) {
1396 /* The counter has been reset */
1397 env
->cp15
.c15_ccnt
= 0;
1400 if (value
& PMCRP
) {
1402 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1403 env
->cp15
.c14_pmevcntr
[i
] = 0;
1407 /* only the DP, X, D and E bits are writable */
1408 env
->cp15
.c9_pmcr
&= ~0x39;
1409 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1414 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1418 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1419 /* Increment a counter's count iff: */
1420 if ((value
& (1 << i
)) && /* counter's bit is set */
1421 /* counter is enabled and not filtered */
1422 pmu_counter_enabled(env
, i
) &&
1423 /* counter is SW_INCR */
1424 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1425 pmevcntr_op_start(env
, i
);
1426 env
->cp15
.c14_pmevcntr
[i
]++;
1427 pmevcntr_op_finish(env
, i
);
1432 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1435 pmccntr_op_start(env
);
1436 ret
= env
->cp15
.c15_ccnt
;
1437 pmccntr_op_finish(env
);
1441 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1444 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1445 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1446 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1449 env
->cp15
.c9_pmselr
= value
& 0x1f;
1452 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1455 pmccntr_op_start(env
);
1456 env
->cp15
.c15_ccnt
= value
;
1457 pmccntr_op_finish(env
);
1460 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1463 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1465 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1468 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1471 pmccntr_op_start(env
);
1472 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1473 pmccntr_op_finish(env
);
1476 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1479 pmccntr_op_start(env
);
1480 /* M is not accessible from AArch32 */
1481 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1482 (value
& PMCCFILTR
);
1483 pmccntr_op_finish(env
);
1486 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1488 /* M is not visible in AArch32 */
1489 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1492 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1495 value
&= pmu_counter_mask(env
);
1496 env
->cp15
.c9_pmcnten
|= value
;
1499 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1502 value
&= pmu_counter_mask(env
);
1503 env
->cp15
.c9_pmcnten
&= ~value
;
1506 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1509 value
&= pmu_counter_mask(env
);
1510 env
->cp15
.c9_pmovsr
&= ~value
;
1513 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1516 value
&= pmu_counter_mask(env
);
1517 env
->cp15
.c9_pmovsr
|= value
;
1520 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1521 uint64_t value
, const uint8_t counter
)
1523 if (counter
== 31) {
1524 pmccfiltr_write(env
, ri
, value
);
1525 } else if (counter
< pmu_num_counters(env
)) {
1526 pmevcntr_op_start(env
, counter
);
1529 * If this counter's event type is changing, store the current
1530 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1531 * pmevcntr_op_finish has the correct baseline when it converts back to
1534 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1535 PMXEVTYPER_EVTCOUNT
;
1536 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1537 if (old_event
!= new_event
) {
1539 if (event_supported(new_event
)) {
1540 uint16_t event_idx
= supported_event_map
[new_event
];
1541 count
= pm_events
[event_idx
].get_count(env
);
1543 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1546 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1547 pmevcntr_op_finish(env
, counter
);
1549 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1550 * PMSELR value is equal to or greater than the number of implemented
1551 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1555 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1556 const uint8_t counter
)
1558 if (counter
== 31) {
1559 return env
->cp15
.pmccfiltr_el0
;
1560 } else if (counter
< pmu_num_counters(env
)) {
1561 return env
->cp15
.c14_pmevtyper
[counter
];
1564 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1565 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1571 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1574 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1575 pmevtyper_write(env
, ri
, value
, counter
);
1578 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1581 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1582 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1585 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1586 * pmu_op_finish calls when loading saved state for a migration. Because
1587 * we're potentially updating the type of event here, the value written to
1588 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1589 * different counter type. Therefore, we need to set this value to the
1590 * current count for the counter type we're writing so that pmu_op_finish
1591 * has the correct count for its calculation.
1593 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1594 if (event_supported(event
)) {
1595 uint16_t event_idx
= supported_event_map
[event
];
1596 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1597 pm_events
[event_idx
].get_count(env
);
1601 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1603 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1604 return pmevtyper_read(env
, ri
, counter
);
1607 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1610 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1613 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1615 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1618 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1619 uint64_t value
, uint8_t counter
)
1621 if (counter
< pmu_num_counters(env
)) {
1622 pmevcntr_op_start(env
, counter
);
1623 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1624 pmevcntr_op_finish(env
, counter
);
1627 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1628 * are CONSTRAINED UNPREDICTABLE.
1632 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1635 if (counter
< pmu_num_counters(env
)) {
1637 pmevcntr_op_start(env
, counter
);
1638 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1639 pmevcntr_op_finish(env
, counter
);
1642 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1643 * are CONSTRAINED UNPREDICTABLE. */
1648 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1651 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1652 pmevcntr_write(env
, ri
, value
, counter
);
1655 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1657 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1658 return pmevcntr_read(env
, ri
, counter
);
1661 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1664 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1665 assert(counter
< pmu_num_counters(env
));
1666 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1667 pmevcntr_write(env
, ri
, value
, counter
);
1670 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1672 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1673 assert(counter
< pmu_num_counters(env
));
1674 return env
->cp15
.c14_pmevcntr
[counter
];
1677 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1680 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1683 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1685 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1688 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1691 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1692 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1694 env
->cp15
.c9_pmuserenr
= value
& 1;
1698 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1701 /* We have no event counters so only the C bit can be changed */
1702 value
&= pmu_counter_mask(env
);
1703 env
->cp15
.c9_pminten
|= value
;
1706 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1709 value
&= pmu_counter_mask(env
);
1710 env
->cp15
.c9_pminten
&= ~value
;
1713 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1716 /* Note that even though the AArch64 view of this register has bits
1717 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1718 * architectural requirements for bits which are RES0 only in some
1719 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1720 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1722 raw_write(env
, ri
, value
& ~0x1FULL
);
1725 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1727 /* Begin with base v8.0 state. */
1728 uint32_t valid_mask
= 0x3fff;
1729 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1731 if (arm_el_is_aa64(env
, 3)) {
1732 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1733 valid_mask
&= ~SCR_NET
;
1735 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1738 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1739 valid_mask
&= ~SCR_HCE
;
1741 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1742 * supported if EL2 exists. The bit is UNK/SBZP when
1743 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1744 * when EL2 is unavailable.
1745 * On ARMv8, this bit is always available.
1747 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1748 !arm_feature(env
, ARM_FEATURE_V8
)) {
1749 valid_mask
&= ~SCR_SMD
;
1752 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1753 valid_mask
|= SCR_TLOR
;
1756 /* Clear all-context RES0 bits. */
1757 value
&= valid_mask
;
1758 raw_write(env
, ri
, value
);
1761 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1763 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1765 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1768 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1769 ri
->secure
& ARM_CP_SECSTATE_S
);
1771 return cpu
->ccsidr
[index
];
1774 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1777 raw_write(env
, ri
, value
& 0xf);
1780 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1782 CPUState
*cs
= ENV_GET_CPU(env
);
1783 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1786 if (hcr_el2
& HCR_IMO
) {
1787 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1791 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1796 if (hcr_el2
& HCR_FMO
) {
1797 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1801 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1806 /* External aborts are not possible in QEMU so A bit is always clear */
1810 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1811 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1812 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1813 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1814 /* Performance monitors are implementation defined in v7,
1815 * but with an ARM recommended set of registers, which we
1818 * Performance registers fall into three categories:
1819 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1820 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1821 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1822 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1823 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1825 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1826 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1827 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1828 .writefn
= pmcntenset_write
,
1829 .accessfn
= pmreg_access
,
1830 .raw_writefn
= raw_write
},
1831 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1832 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1833 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1834 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1835 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1836 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1838 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1839 .accessfn
= pmreg_access
,
1840 .writefn
= pmcntenclr_write
,
1841 .type
= ARM_CP_ALIAS
},
1842 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1843 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1844 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1845 .type
= ARM_CP_ALIAS
,
1846 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1847 .writefn
= pmcntenclr_write
},
1848 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1850 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1851 .accessfn
= pmreg_access
,
1852 .writefn
= pmovsr_write
,
1853 .raw_writefn
= raw_write
},
1854 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1855 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1856 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1857 .type
= ARM_CP_ALIAS
,
1858 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1859 .writefn
= pmovsr_write
,
1860 .raw_writefn
= raw_write
},
1861 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1862 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NO_RAW
,
1863 .writefn
= pmswinc_write
},
1864 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
1865 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
1866 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NO_RAW
,
1867 .writefn
= pmswinc_write
},
1868 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1869 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1870 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1871 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1872 .raw_writefn
= raw_write
},
1873 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1874 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1875 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1876 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1877 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1878 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1879 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1880 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1881 .accessfn
= pmreg_access_ccntr
},
1882 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1883 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1884 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1886 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
1887 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
1888 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
1889 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
1890 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
1891 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1892 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1894 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1895 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1896 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
1897 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1899 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1901 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1902 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1903 .accessfn
= pmreg_access
,
1904 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1905 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1906 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1907 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1908 .accessfn
= pmreg_access
,
1909 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1910 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1911 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1912 .accessfn
= pmreg_access_xevcntr
,
1913 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
1914 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1915 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
1916 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1917 .accessfn
= pmreg_access_xevcntr
,
1918 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
1919 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1920 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1921 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
1923 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1924 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1925 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1926 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1927 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1929 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1930 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1931 .access
= PL1_RW
, .accessfn
= access_tpm
,
1932 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1933 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1935 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1936 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1937 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1938 .access
= PL1_RW
, .accessfn
= access_tpm
,
1940 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1941 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1942 .resetvalue
= 0x0 },
1943 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1944 .access
= PL1_RW
, .accessfn
= access_tpm
,
1945 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1946 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1947 .writefn
= pmintenclr_write
, },
1948 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1949 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1950 .access
= PL1_RW
, .accessfn
= access_tpm
,
1951 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1952 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1953 .writefn
= pmintenclr_write
},
1954 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1955 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1956 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1957 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1958 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1959 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1960 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1961 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1962 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1963 * just RAZ for all cores:
1965 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1966 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1967 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1968 /* Auxiliary fault status registers: these also are IMPDEF, and we
1969 * choose to RAZ/WI for all cores.
1971 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1972 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1973 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1974 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1975 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1976 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1977 /* MAIR can just read-as-written because we don't implement caches
1978 * and so don't need to care about memory attributes.
1980 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1981 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1982 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1984 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1985 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1986 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1988 /* For non-long-descriptor page tables these are PRRR and NMRR;
1989 * regardless they still act as reads-as-written for QEMU.
1991 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1992 * allows them to assign the correct fieldoffset based on the endianness
1993 * handled in the field definitions.
1995 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1996 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1997 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1998 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1999 .resetfn
= arm_cp_reset_ignore
},
2000 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2001 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2002 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2003 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2004 .resetfn
= arm_cp_reset_ignore
},
2005 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2006 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2007 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2008 /* 32 bit ITLB invalidates */
2009 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2010 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2011 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2012 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2013 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2014 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2015 /* 32 bit DTLB invalidates */
2016 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2017 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2018 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2019 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2020 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2021 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2022 /* 32 bit TLB invalidates */
2023 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2024 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2025 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2026 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2027 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2028 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2029 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2030 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2034 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2035 /* 32 bit TLB invalidates, Inner Shareable */
2036 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2037 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2038 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2039 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2040 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2041 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2042 .writefn
= tlbiasid_is_write
},
2043 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2044 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2045 .writefn
= tlbimvaa_is_write
},
2049 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2050 /* PMOVSSET is not implemented in v7 before v7ve */
2051 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2052 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2053 .type
= ARM_CP_ALIAS
,
2054 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2055 .writefn
= pmovsset_write
,
2056 .raw_writefn
= raw_write
},
2057 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2058 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2059 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2060 .type
= ARM_CP_ALIAS
,
2061 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2062 .writefn
= pmovsset_write
,
2063 .raw_writefn
= raw_write
},
2067 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2074 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2077 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2078 return CP_ACCESS_TRAP
;
2080 return CP_ACCESS_OK
;
2083 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2084 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2085 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2087 .writefn
= teecr_write
},
2088 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2089 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2090 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2094 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2095 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2096 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2098 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2099 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2101 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2102 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2103 .resetfn
= arm_cp_reset_ignore
},
2104 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2105 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2106 .access
= PL0_R
|PL1_W
,
2107 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2109 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2110 .access
= PL0_R
|PL1_W
,
2111 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2112 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2113 .resetfn
= arm_cp_reset_ignore
},
2114 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2115 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2117 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2118 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2120 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2121 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2126 #ifndef CONFIG_USER_ONLY
2128 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2131 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2132 * Writable only at the highest implemented exception level.
2134 int el
= arm_current_el(env
);
2138 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
2139 return CP_ACCESS_TRAP
;
2143 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2144 arm_is_secure_below_el3(env
)) {
2145 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2146 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2154 if (!isread
&& el
< arm_highest_el(env
)) {
2155 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2158 return CP_ACCESS_OK
;
2161 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2164 unsigned int cur_el
= arm_current_el(env
);
2165 bool secure
= arm_is_secure(env
);
2167 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2169 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2170 return CP_ACCESS_TRAP
;
2173 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2174 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2175 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
2176 return CP_ACCESS_TRAP_EL2
;
2178 return CP_ACCESS_OK
;
2181 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2184 unsigned int cur_el
= arm_current_el(env
);
2185 bool secure
= arm_is_secure(env
);
2187 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2188 * EL0[PV]TEN is zero.
2191 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2192 return CP_ACCESS_TRAP
;
2195 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2196 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2197 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2198 return CP_ACCESS_TRAP_EL2
;
2200 return CP_ACCESS_OK
;
2203 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2204 const ARMCPRegInfo
*ri
,
2207 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2210 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2211 const ARMCPRegInfo
*ri
,
2214 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2217 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2220 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2223 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2226 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2229 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2230 const ARMCPRegInfo
*ri
,
2233 /* The AArch64 register view of the secure physical timer is
2234 * always accessible from EL3, and configurably accessible from
2237 switch (arm_current_el(env
)) {
2239 if (!arm_is_secure(env
)) {
2240 return CP_ACCESS_TRAP
;
2242 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2243 return CP_ACCESS_TRAP_EL3
;
2245 return CP_ACCESS_OK
;
2248 return CP_ACCESS_TRAP
;
2250 return CP_ACCESS_OK
;
2252 g_assert_not_reached();
2256 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2258 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
2261 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2263 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2266 /* Timer enabled: calculate and set current ISTATUS, irq, and
2267 * reset timer to when ISTATUS next has to change
2269 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2270 cpu
->env
.cp15
.cntvoff_el2
: 0;
2271 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2272 /* Note that this must be unsigned 64 bit arithmetic: */
2273 int istatus
= count
- offset
>= gt
->cval
;
2277 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2279 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2280 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2283 /* Next transition is when count rolls back over to zero */
2284 nexttick
= UINT64_MAX
;
2286 /* Next transition is when we hit cval */
2287 nexttick
= gt
->cval
+ offset
;
2289 /* Note that the desired next expiry time might be beyond the
2290 * signed-64-bit range of a QEMUTimer -- in this case we just
2291 * set the timer for as far in the future as possible. When the
2292 * timer expires we will reset the timer for any remaining period.
2294 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
2295 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
2297 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2298 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2300 /* Timer disabled: ISTATUS and timer output always clear */
2302 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2303 timer_del(cpu
->gt_timer
[timeridx
]);
2304 trace_arm_gt_recalc_disabled(timeridx
);
2308 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2311 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2313 timer_del(cpu
->gt_timer
[timeridx
]);
2316 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2318 return gt_get_countervalue(env
);
2321 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2323 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
2326 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2330 trace_arm_gt_cval_write(timeridx
, value
);
2331 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2332 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2335 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2338 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2340 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2341 (gt_get_countervalue(env
) - offset
));
2344 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2348 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2350 trace_arm_gt_tval_write(timeridx
, value
);
2351 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2352 sextract64(value
, 0, 32);
2353 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2356 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2360 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2361 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2363 trace_arm_gt_ctl_write(timeridx
, value
);
2364 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2365 if ((oldval
^ value
) & 1) {
2366 /* Enable toggled */
2367 gt_recalc_timer(cpu
, timeridx
);
2368 } else if ((oldval
^ value
) & 2) {
2369 /* IMASK toggled: don't need to recalculate,
2370 * just set the interrupt line based on ISTATUS
2372 int irqstate
= (oldval
& 4) && !(value
& 2);
2374 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2375 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2379 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2381 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2384 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2387 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2390 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2392 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2395 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2398 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2401 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2404 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2407 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2409 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2412 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2415 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2418 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2420 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2423 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2426 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2429 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2432 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2435 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2438 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2440 trace_arm_gt_cntvoff_write(value
);
2441 raw_write(env
, ri
, value
);
2442 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2445 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2447 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2450 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2453 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2456 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2458 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2461 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2464 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2467 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2470 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2473 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2475 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2478 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2481 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2484 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2486 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2489 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2492 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2495 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2498 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2501 void arm_gt_ptimer_cb(void *opaque
)
2503 ARMCPU
*cpu
= opaque
;
2505 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2508 void arm_gt_vtimer_cb(void *opaque
)
2510 ARMCPU
*cpu
= opaque
;
2512 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2515 void arm_gt_htimer_cb(void *opaque
)
2517 ARMCPU
*cpu
= opaque
;
2519 gt_recalc_timer(cpu
, GTIMER_HYP
);
2522 void arm_gt_stimer_cb(void *opaque
)
2524 ARMCPU
*cpu
= opaque
;
2526 gt_recalc_timer(cpu
, GTIMER_SEC
);
2529 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2530 /* Note that CNTFRQ is purely reads-as-written for the benefit
2531 * of software; writing it doesn't actually change the timer frequency.
2532 * Our reset value matches the fixed frequency we implement the timer at.
2534 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2535 .type
= ARM_CP_ALIAS
,
2536 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2537 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2539 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2540 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2541 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2542 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2543 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2545 /* overall control: mostly access permissions */
2546 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2547 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2549 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2552 /* per-timer control */
2553 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2554 .secure
= ARM_CP_SECSTATE_NS
,
2555 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2556 .accessfn
= gt_ptimer_access
,
2557 .fieldoffset
= offsetoflow32(CPUARMState
,
2558 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2559 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2561 { .name
= "CNTP_CTL_S",
2562 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2563 .secure
= ARM_CP_SECSTATE_S
,
2564 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2565 .accessfn
= gt_ptimer_access
,
2566 .fieldoffset
= offsetoflow32(CPUARMState
,
2567 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2568 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2570 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2571 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2572 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2573 .accessfn
= gt_ptimer_access
,
2574 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2576 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2578 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2579 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2580 .accessfn
= gt_vtimer_access
,
2581 .fieldoffset
= offsetoflow32(CPUARMState
,
2582 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2583 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2585 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2586 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2587 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2588 .accessfn
= gt_vtimer_access
,
2589 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2591 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2593 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2594 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2595 .secure
= ARM_CP_SECSTATE_NS
,
2596 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2597 .accessfn
= gt_ptimer_access
,
2598 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2600 { .name
= "CNTP_TVAL_S",
2601 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2602 .secure
= ARM_CP_SECSTATE_S
,
2603 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2604 .accessfn
= gt_ptimer_access
,
2605 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2607 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2608 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2609 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2610 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2611 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2613 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2614 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2615 .accessfn
= gt_vtimer_access
,
2616 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2618 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2619 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2620 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2621 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2622 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2624 /* The counter itself */
2625 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2626 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2627 .accessfn
= gt_pct_access
,
2628 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2630 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2631 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2632 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2633 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2635 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2636 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2637 .accessfn
= gt_vct_access
,
2638 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2640 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2641 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2642 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2643 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2645 /* Comparison value, indicating when the timer goes off */
2646 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2647 .secure
= ARM_CP_SECSTATE_NS
,
2648 .access
= PL1_RW
| PL0_R
,
2649 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2650 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2651 .accessfn
= gt_ptimer_access
,
2652 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2654 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2655 .secure
= ARM_CP_SECSTATE_S
,
2656 .access
= PL1_RW
| PL0_R
,
2657 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2658 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2659 .accessfn
= gt_ptimer_access
,
2660 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2662 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2663 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2664 .access
= PL1_RW
| PL0_R
,
2666 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2667 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2668 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2670 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2671 .access
= PL1_RW
| PL0_R
,
2672 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2673 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2674 .accessfn
= gt_vtimer_access
,
2675 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2677 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2678 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2679 .access
= PL1_RW
| PL0_R
,
2681 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2682 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2683 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2685 /* Secure timer -- this is actually restricted to only EL3
2686 * and configurably Secure-EL1 via the accessfn.
2688 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2689 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2690 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2691 .accessfn
= gt_stimer_access
,
2692 .readfn
= gt_sec_tval_read
,
2693 .writefn
= gt_sec_tval_write
,
2694 .resetfn
= gt_sec_timer_reset
,
2696 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2697 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2698 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2699 .accessfn
= gt_stimer_access
,
2700 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2702 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2704 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2705 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2706 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2707 .accessfn
= gt_stimer_access
,
2708 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2709 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2716 /* In user-mode most of the generic timer registers are inaccessible
2717 * however modern kernels (4.12+) allow access to cntvct_el0
2720 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2722 /* Currently we have no support for QEMUTimer in linux-user so we
2723 * can't call gt_get_countervalue(env), instead we directly
2724 * call the lower level functions.
2726 return cpu_get_clock() / GTIMER_SCALE
;
2729 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2730 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2731 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2732 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2733 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2734 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2736 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2737 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2738 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2739 .readfn
= gt_virt_cnt_read
,
2746 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2748 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2749 raw_write(env
, ri
, value
);
2750 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2751 raw_write(env
, ri
, value
& 0xfffff6ff);
2753 raw_write(env
, ri
, value
& 0xfffff1ff);
2757 #ifndef CONFIG_USER_ONLY
2758 /* get_phys_addr() isn't present for user-mode-only targets */
2760 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2764 /* The ATS12NSO* operations must trap to EL3 if executed in
2765 * Secure EL1 (which can only happen if EL3 is AArch64).
2766 * They are simply UNDEF if executed from NS EL1.
2767 * They function normally from EL2 or EL3.
2769 if (arm_current_el(env
) == 1) {
2770 if (arm_is_secure_below_el3(env
)) {
2771 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2773 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2776 return CP_ACCESS_OK
;
2779 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2780 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2783 target_ulong page_size
;
2787 bool format64
= false;
2788 MemTxAttrs attrs
= {};
2789 ARMMMUFaultInfo fi
= {};
2790 ARMCacheAttrs cacheattrs
= {};
2792 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2793 &prot
, &page_size
, &fi
, &cacheattrs
);
2797 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2800 * * TTBCR.EAE determines whether the result is returned using the
2801 * 32-bit or the 64-bit PAR format
2802 * * Instructions executed in Hyp mode always use the 64bit format
2804 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2805 * * The Non-secure TTBCR.EAE bit is set to 1
2806 * * The implementation includes EL2, and the value of HCR.VM is 1
2808 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2810 * ATS1Hx always uses the 64bit format.
2812 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2814 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2815 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2816 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
2818 format64
|= arm_current_el(env
) == 2;
2824 /* Create a 64-bit PAR */
2825 par64
= (1 << 11); /* LPAE bit always set */
2827 par64
|= phys_addr
& ~0xfffULL
;
2828 if (!attrs
.secure
) {
2829 par64
|= (1 << 9); /* NS */
2831 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2832 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2834 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2837 par64
|= (fsr
& 0x3f) << 1; /* FS */
2839 par64
|= (1 << 9); /* S */
2842 par64
|= (1 << 8); /* PTW */
2846 /* fsr is a DFSR/IFSR value for the short descriptor
2847 * translation table format (with WnR always clear).
2848 * Convert it to a 32-bit PAR.
2851 /* We do not set any attribute bits in the PAR */
2852 if (page_size
== (1 << 24)
2853 && arm_feature(env
, ARM_FEATURE_V7
)) {
2854 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2856 par64
= phys_addr
& 0xfffff000;
2858 if (!attrs
.secure
) {
2859 par64
|= (1 << 9); /* NS */
2862 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2864 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2865 ((fsr
& 0xf) << 1) | 1;
2871 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2873 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2876 int el
= arm_current_el(env
);
2877 bool secure
= arm_is_secure_below_el3(env
);
2879 switch (ri
->opc2
& 6) {
2881 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2884 mmu_idx
= ARMMMUIdx_S1E3
;
2887 mmu_idx
= ARMMMUIdx_S1NSE1
;
2890 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2893 g_assert_not_reached();
2897 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2900 mmu_idx
= ARMMMUIdx_S1SE0
;
2903 mmu_idx
= ARMMMUIdx_S1NSE0
;
2906 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2909 g_assert_not_reached();
2913 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2914 mmu_idx
= ARMMMUIdx_S12NSE1
;
2917 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2918 mmu_idx
= ARMMMUIdx_S12NSE0
;
2921 g_assert_not_reached();
2924 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2926 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2929 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2932 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2935 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S1E2
);
2937 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2940 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2943 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2944 return CP_ACCESS_TRAP
;
2946 return CP_ACCESS_OK
;
2949 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2952 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2954 int secure
= arm_is_secure_below_el3(env
);
2956 switch (ri
->opc2
& 6) {
2959 case 0: /* AT S1E1R, AT S1E1W */
2960 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2962 case 4: /* AT S1E2R, AT S1E2W */
2963 mmu_idx
= ARMMMUIdx_S1E2
;
2965 case 6: /* AT S1E3R, AT S1E3W */
2966 mmu_idx
= ARMMMUIdx_S1E3
;
2969 g_assert_not_reached();
2972 case 2: /* AT S1E0R, AT S1E0W */
2973 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2975 case 4: /* AT S12E1R, AT S12E1W */
2976 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2978 case 6: /* AT S12E0R, AT S12E0W */
2979 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2982 g_assert_not_reached();
2985 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2989 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2990 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2991 .access
= PL1_RW
, .resetvalue
= 0,
2992 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2993 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2994 .writefn
= par_write
},
2995 #ifndef CONFIG_USER_ONLY
2996 /* This underdecoding is safe because the reginfo is NO_RAW. */
2997 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2998 .access
= PL1_W
, .accessfn
= ats_access
,
2999 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
3004 /* Return basic MPU access permission bits. */
3005 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3012 for (i
= 0; i
< 16; i
+= 2) {
3013 ret
|= (val
>> i
) & mask
;
3019 /* Pad basic MPU access permission bits to extended format. */
3020 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3027 for (i
= 0; i
< 16; i
+= 2) {
3028 ret
|= (val
& mask
) << i
;
3034 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3037 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3040 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3042 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3045 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3048 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3051 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3053 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3056 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3058 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3064 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3068 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3071 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3072 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3078 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3079 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3083 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3086 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3087 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3089 if (value
>= nrgs
) {
3090 qemu_log_mask(LOG_GUEST_ERROR
,
3091 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3092 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3096 raw_write(env
, ri
, value
);
3099 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3100 /* Reset for all these registers is handled in arm_cpu_reset(),
3101 * because the PMSAv7 is also used by M-profile CPUs, which do
3102 * not register cpregs but still need the state to be reset.
3104 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3105 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3106 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3107 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3108 .resetfn
= arm_cp_reset_ignore
},
3109 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3110 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3111 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3112 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3113 .resetfn
= arm_cp_reset_ignore
},
3114 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3115 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3116 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3117 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3118 .resetfn
= arm_cp_reset_ignore
},
3119 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3121 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3122 .writefn
= pmsav7_rgnr_write
,
3123 .resetfn
= arm_cp_reset_ignore
},
3127 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3128 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3129 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3130 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3131 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3132 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3133 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3134 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3135 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3136 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3138 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3140 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3142 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3144 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3146 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3147 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3149 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3150 /* Protection region base and size registers */
3151 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3152 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3153 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3154 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3155 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3156 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3157 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3158 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3159 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3160 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3161 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3162 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3163 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3164 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3165 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3166 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3167 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3168 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3169 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3170 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3171 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3172 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3173 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3174 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3178 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3181 TCR
*tcr
= raw_ptr(env
, ri
);
3182 int maskshift
= extract32(value
, 0, 3);
3184 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3185 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3186 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3187 * using Long-desciptor translation table format */
3188 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3189 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3190 /* In an implementation that includes the Security Extensions
3191 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3192 * Short-descriptor translation table format.
3194 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3200 /* Update the masks corresponding to the TCR bank being written
3201 * Note that we always calculate mask and base_mask, but
3202 * they are only used for short-descriptor tables (ie if EAE is 0);
3203 * for long-descriptor tables the TCR fields are used differently
3204 * and the mask and base_mask values are meaningless.
3206 tcr
->raw_tcr
= value
;
3207 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3208 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3211 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3214 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3215 TCR
*tcr
= raw_ptr(env
, ri
);
3217 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3218 /* With LPAE the TTBCR could result in a change of ASID
3219 * via the TTBCR.A1 bit, so do a TLB flush.
3221 tlb_flush(CPU(cpu
));
3223 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3224 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3225 vmsa_ttbcr_raw_write(env
, ri
, value
);
3228 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3230 TCR
*tcr
= raw_ptr(env
, ri
);
3232 /* Reset both the TCR as well as the masks corresponding to the bank of
3233 * the TCR being reset.
3237 tcr
->base_mask
= 0xffffc000u
;
3240 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3243 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3244 TCR
*tcr
= raw_ptr(env
, ri
);
3246 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3247 tlb_flush(CPU(cpu
));
3248 tcr
->raw_tcr
= value
;
3251 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3254 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3255 if (cpreg_field_is_64bit(ri
) &&
3256 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3257 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3258 tlb_flush(CPU(cpu
));
3260 raw_write(env
, ri
, value
);
3263 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3266 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3267 CPUState
*cs
= CPU(cpu
);
3269 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3270 if (raw_read(env
, ri
) != value
) {
3271 tlb_flush_by_mmuidx(cs
,
3272 ARMMMUIdxBit_S12NSE1
|
3273 ARMMMUIdxBit_S12NSE0
|
3275 raw_write(env
, ri
, value
);
3279 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3280 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3281 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3282 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3283 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3284 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3285 .access
= PL1_RW
, .resetvalue
= 0,
3286 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3287 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3288 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3289 .access
= PL1_RW
, .resetvalue
= 0,
3290 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3291 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3292 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3293 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3294 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3299 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3300 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3301 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3303 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3304 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3305 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3306 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3307 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3308 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3309 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3310 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3311 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3312 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3313 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3314 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3315 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3316 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
3317 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3318 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3319 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3320 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3321 .raw_writefn
= vmsa_ttbcr_raw_write
,
3322 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3323 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3327 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3328 * qemu tlbs nor adjusting cached masks.
3330 static const ARMCPRegInfo ttbcr2_reginfo
= {
3331 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3332 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3333 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3334 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3337 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3340 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3341 /* The OS_TYPE bit in this register changes the reported CPUID! */
3342 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3343 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3346 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3349 env
->cp15
.c15_threadid
= value
& 0xffff;
3352 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3355 /* Wait-for-interrupt (deprecated) */
3356 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
3359 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3362 /* On OMAP there are registers indicating the max/min index of dcache lines
3363 * containing a dirty line; cache flush operations have to reset these.
3365 env
->cp15
.c15_i_max
= 0x000;
3366 env
->cp15
.c15_i_min
= 0xff0;
3369 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3370 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3371 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3372 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3374 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3375 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3376 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3378 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3379 .writefn
= omap_ticonfig_write
},
3380 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3382 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3383 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3384 .access
= PL1_RW
, .resetvalue
= 0xff0,
3385 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3386 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3388 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3389 .writefn
= omap_threadid_write
},
3390 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3391 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3392 .type
= ARM_CP_NO_RAW
,
3393 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3394 /* TODO: Peripheral port remap register:
3395 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3396 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3399 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3400 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3401 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3402 .writefn
= omap_cachemaint_write
},
3403 { .name
= "C9", .cp
= 15, .crn
= 9,
3404 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3405 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3409 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3412 env
->cp15
.c15_cpar
= value
& 0x3fff;
3415 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3416 { .name
= "XSCALE_CPAR",
3417 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3418 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3419 .writefn
= xscale_cpar_write
, },
3420 { .name
= "XSCALE_AUXCR",
3421 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3422 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3424 /* XScale specific cache-lockdown: since we have no cache we NOP these
3425 * and hope the guest does not really rely on cache behaviour.
3427 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3428 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3429 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3430 { .name
= "XSCALE_UNLOCK_ICACHE",
3431 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3432 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3433 { .name
= "XSCALE_DCACHE_LOCK",
3434 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3435 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3436 { .name
= "XSCALE_UNLOCK_DCACHE",
3437 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3438 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3442 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3443 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3444 * implementation of this implementation-defined space.
3445 * Ideally this should eventually disappear in favour of actually
3446 * implementing the correct behaviour for all cores.
3448 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3449 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3451 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3456 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3457 /* Cache status: RAZ because we have no cache so it's always clean */
3458 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3459 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3464 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3465 /* We never have a a block transfer operation in progress */
3466 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3467 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3469 /* The cache ops themselves: these all NOP for QEMU */
3470 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3471 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3472 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3473 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3474 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3475 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3476 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3477 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3478 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3479 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3480 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3481 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3485 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3486 /* The cache test-and-clean instructions always return (1 << 30)
3487 * to indicate that there are no dirty cache lines.
3489 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3490 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3491 .resetvalue
= (1 << 30) },
3492 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3493 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3494 .resetvalue
= (1 << 30) },
3498 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3499 /* Ignore ReadBuffer accesses */
3500 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3501 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3502 .access
= PL1_RW
, .resetvalue
= 0,
3503 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3507 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3509 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3510 unsigned int cur_el
= arm_current_el(env
);
3511 bool secure
= arm_is_secure(env
);
3513 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3514 return env
->cp15
.vpidr_el2
;
3516 return raw_read(env
, ri
);
3519 static uint64_t mpidr_read_val(CPUARMState
*env
)
3521 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
3522 uint64_t mpidr
= cpu
->mp_affinity
;
3524 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3525 mpidr
|= (1U << 31);
3526 /* Cores which are uniprocessor (non-coherent)
3527 * but still implement the MP extensions set
3528 * bit 30. (For instance, Cortex-R5).
3530 if (cpu
->mp_is_up
) {
3531 mpidr
|= (1u << 30);
3537 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3539 unsigned int cur_el
= arm_current_el(env
);
3540 bool secure
= arm_is_secure(env
);
3542 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3543 return env
->cp15
.vmpidr_el2
;
3545 return mpidr_read_val(env
);
3548 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
3549 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
3550 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
3551 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
3555 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3557 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3558 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3559 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3561 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3562 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3563 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3565 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3566 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3567 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3568 offsetof(CPUARMState
, cp15
.par_ns
)} },
3569 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3570 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3571 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3572 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3573 .writefn
= vmsa_ttbr_write
, },
3574 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3575 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3576 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3577 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3578 .writefn
= vmsa_ttbr_write
, },
3582 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3584 return vfp_get_fpcr(env
);
3587 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3590 vfp_set_fpcr(env
, value
);
3593 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3595 return vfp_get_fpsr(env
);
3598 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3601 vfp_set_fpsr(env
, value
);
3604 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3607 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3608 return CP_ACCESS_TRAP
;
3610 return CP_ACCESS_OK
;
3613 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3616 env
->daif
= value
& PSTATE_DAIF
;
3619 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3620 const ARMCPRegInfo
*ri
,
3623 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3624 * SCTLR_EL1.UCI is set.
3626 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3627 return CP_ACCESS_TRAP
;
3629 return CP_ACCESS_OK
;
3632 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3633 * Page D4-1736 (DDI0487A.b)
3636 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3639 CPUState
*cs
= ENV_GET_CPU(env
);
3640 bool sec
= arm_is_secure_below_el3(env
);
3643 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3644 ARMMMUIdxBit_S1SE1
|
3645 ARMMMUIdxBit_S1SE0
);
3647 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3648 ARMMMUIdxBit_S12NSE1
|
3649 ARMMMUIdxBit_S12NSE0
);
3653 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3656 CPUState
*cs
= ENV_GET_CPU(env
);
3658 if (tlb_force_broadcast(env
)) {
3659 tlbi_aa64_vmalle1is_write(env
, NULL
, value
);
3663 if (arm_is_secure_below_el3(env
)) {
3664 tlb_flush_by_mmuidx(cs
,
3665 ARMMMUIdxBit_S1SE1
|
3666 ARMMMUIdxBit_S1SE0
);
3668 tlb_flush_by_mmuidx(cs
,
3669 ARMMMUIdxBit_S12NSE1
|
3670 ARMMMUIdxBit_S12NSE0
);
3674 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3677 /* Note that the 'ALL' scope must invalidate both stage 1 and
3678 * stage 2 translations, whereas most other scopes only invalidate
3679 * stage 1 translations.
3681 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3682 CPUState
*cs
= CPU(cpu
);
3684 if (arm_is_secure_below_el3(env
)) {
3685 tlb_flush_by_mmuidx(cs
,
3686 ARMMMUIdxBit_S1SE1
|
3687 ARMMMUIdxBit_S1SE0
);
3689 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3690 tlb_flush_by_mmuidx(cs
,
3691 ARMMMUIdxBit_S12NSE1
|
3692 ARMMMUIdxBit_S12NSE0
|
3695 tlb_flush_by_mmuidx(cs
,
3696 ARMMMUIdxBit_S12NSE1
|
3697 ARMMMUIdxBit_S12NSE0
);
3702 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3705 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3706 CPUState
*cs
= CPU(cpu
);
3708 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3711 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3714 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3715 CPUState
*cs
= CPU(cpu
);
3717 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3720 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3723 /* Note that the 'ALL' scope must invalidate both stage 1 and
3724 * stage 2 translations, whereas most other scopes only invalidate
3725 * stage 1 translations.
3727 CPUState
*cs
= ENV_GET_CPU(env
);
3728 bool sec
= arm_is_secure_below_el3(env
);
3729 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3732 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3733 ARMMMUIdxBit_S1SE1
|
3734 ARMMMUIdxBit_S1SE0
);
3735 } else if (has_el2
) {
3736 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3737 ARMMMUIdxBit_S12NSE1
|
3738 ARMMMUIdxBit_S12NSE0
|
3741 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3742 ARMMMUIdxBit_S12NSE1
|
3743 ARMMMUIdxBit_S12NSE0
);
3747 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3750 CPUState
*cs
= ENV_GET_CPU(env
);
3752 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3755 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3758 CPUState
*cs
= ENV_GET_CPU(env
);
3760 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3763 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3766 /* Invalidate by VA, EL2
3767 * Currently handles both VAE2 and VALE2, since we don't support
3768 * flush-last-level-only.
3770 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3771 CPUState
*cs
= CPU(cpu
);
3772 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3774 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3777 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3780 /* Invalidate by VA, EL3
3781 * Currently handles both VAE3 and VALE3, since we don't support
3782 * flush-last-level-only.
3784 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3785 CPUState
*cs
= CPU(cpu
);
3786 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3788 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3791 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3794 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3795 CPUState
*cs
= CPU(cpu
);
3796 bool sec
= arm_is_secure_below_el3(env
);
3797 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3800 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3801 ARMMMUIdxBit_S1SE1
|
3802 ARMMMUIdxBit_S1SE0
);
3804 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3805 ARMMMUIdxBit_S12NSE1
|
3806 ARMMMUIdxBit_S12NSE0
);
3810 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3813 /* Invalidate by VA, EL1&0 (AArch64 version).
3814 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3815 * since we don't support flush-for-specific-ASID-only or
3816 * flush-last-level-only.
3818 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3819 CPUState
*cs
= CPU(cpu
);
3820 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3822 if (tlb_force_broadcast(env
)) {
3823 tlbi_aa64_vae1is_write(env
, NULL
, value
);
3827 if (arm_is_secure_below_el3(env
)) {
3828 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3829 ARMMMUIdxBit_S1SE1
|
3830 ARMMMUIdxBit_S1SE0
);
3832 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3833 ARMMMUIdxBit_S12NSE1
|
3834 ARMMMUIdxBit_S12NSE0
);
3838 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3841 CPUState
*cs
= ENV_GET_CPU(env
);
3842 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3844 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3848 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3851 CPUState
*cs
= ENV_GET_CPU(env
);
3852 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3854 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3858 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3861 /* Invalidate by IPA. This has to invalidate any structures that
3862 * contain only stage 2 translation information, but does not need
3863 * to apply to structures that contain combined stage 1 and stage 2
3864 * translation information.
3865 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3867 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3868 CPUState
*cs
= CPU(cpu
);
3871 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3875 pageaddr
= sextract64(value
<< 12, 0, 48);
3877 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3880 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3883 CPUState
*cs
= ENV_GET_CPU(env
);
3886 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3890 pageaddr
= sextract64(value
<< 12, 0, 48);
3892 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3896 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3899 /* We don't implement EL2, so the only control on DC ZVA is the
3900 * bit in the SCTLR which can prohibit access for EL0.
3902 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3903 return CP_ACCESS_TRAP
;
3905 return CP_ACCESS_OK
;
3908 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3910 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3911 int dzp_bit
= 1 << 4;
3913 /* DZP indicates whether DC ZVA access is allowed */
3914 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3917 return cpu
->dcz_blocksize
| dzp_bit
;
3920 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3923 if (!(env
->pstate
& PSTATE_SP
)) {
3924 /* Access to SP_EL0 is undefined if it's being used as
3925 * the stack pointer.
3927 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3929 return CP_ACCESS_OK
;
3932 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3934 return env
->pstate
& PSTATE_SP
;
3937 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3939 update_spsel(env
, val
);
3942 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3945 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3947 if (raw_read(env
, ri
) == value
) {
3948 /* Skip the TLB flush if nothing actually changed; Linux likes
3949 * to do a lot of pointless SCTLR writes.
3954 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3955 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3959 raw_write(env
, ri
, value
);
3960 /* ??? Lots of these bits are not implemented. */
3961 /* This may enable/disable the MMU, so do a TLB flush. */
3962 tlb_flush(CPU(cpu
));
3965 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3968 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3969 return CP_ACCESS_TRAP_FP_EL2
;
3971 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3972 return CP_ACCESS_TRAP_FP_EL3
;
3974 return CP_ACCESS_OK
;
3977 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3980 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3983 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3984 /* Minimal set of EL0-visible registers. This will need to be expanded
3985 * significantly for system emulation of AArch64 CPUs.
3987 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3988 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3989 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3990 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3991 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3992 .type
= ARM_CP_NO_RAW
,
3993 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3994 .fieldoffset
= offsetof(CPUARMState
, daif
),
3995 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3996 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3997 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3998 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3999 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4000 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4001 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4002 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4003 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4004 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4005 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4006 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4007 .readfn
= aa64_dczid_read
},
4008 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4009 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4010 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4011 #ifndef CONFIG_USER_ONLY
4012 /* Avoid overhead of an access check that always passes in user-mode */
4013 .accessfn
= aa64_zva_access
,
4016 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4017 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4018 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4019 /* Cache ops: all NOPs since we don't emulate caches */
4020 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4021 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4022 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4023 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4024 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4025 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4026 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4027 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4028 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4029 .accessfn
= aa64_cacheop_access
},
4030 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4031 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4032 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4033 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4034 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4035 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4036 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4037 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4038 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4039 .accessfn
= aa64_cacheop_access
},
4040 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4041 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4042 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4043 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4044 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4045 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4046 .accessfn
= aa64_cacheop_access
},
4047 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4048 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4049 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4050 .accessfn
= aa64_cacheop_access
},
4051 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4052 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4053 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4054 /* TLBI operations */
4055 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4056 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4057 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4058 .writefn
= tlbi_aa64_vmalle1is_write
},
4059 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4060 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4061 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4062 .writefn
= tlbi_aa64_vae1is_write
},
4063 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4064 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4065 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4066 .writefn
= tlbi_aa64_vmalle1is_write
},
4067 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4068 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4069 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4070 .writefn
= tlbi_aa64_vae1is_write
},
4071 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4072 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4073 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4074 .writefn
= tlbi_aa64_vae1is_write
},
4075 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4076 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4077 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4078 .writefn
= tlbi_aa64_vae1is_write
},
4079 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4080 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4081 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4082 .writefn
= tlbi_aa64_vmalle1_write
},
4083 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4084 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4085 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4086 .writefn
= tlbi_aa64_vae1_write
},
4087 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4088 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4089 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4090 .writefn
= tlbi_aa64_vmalle1_write
},
4091 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4092 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4093 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4094 .writefn
= tlbi_aa64_vae1_write
},
4095 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4096 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4097 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4098 .writefn
= tlbi_aa64_vae1_write
},
4099 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4100 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4101 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4102 .writefn
= tlbi_aa64_vae1_write
},
4103 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4104 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4105 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4106 .writefn
= tlbi_aa64_ipas2e1is_write
},
4107 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4108 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4109 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4110 .writefn
= tlbi_aa64_ipas2e1is_write
},
4111 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4112 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4113 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4114 .writefn
= tlbi_aa64_alle1is_write
},
4115 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4116 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4117 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4118 .writefn
= tlbi_aa64_alle1is_write
},
4119 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4120 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4121 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4122 .writefn
= tlbi_aa64_ipas2e1_write
},
4123 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4124 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4125 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4126 .writefn
= tlbi_aa64_ipas2e1_write
},
4127 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4128 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4129 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4130 .writefn
= tlbi_aa64_alle1_write
},
4131 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4132 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4133 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4134 .writefn
= tlbi_aa64_alle1is_write
},
4135 #ifndef CONFIG_USER_ONLY
4136 /* 64 bit address translation operations */
4137 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4138 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4139 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4140 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4141 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4142 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4143 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4144 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4145 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4146 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4147 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4148 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4149 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4150 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4151 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4152 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4153 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4154 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4155 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4156 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4157 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4158 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4159 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4160 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4161 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4162 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4163 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4164 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4165 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4166 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4167 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4168 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4169 .type
= ARM_CP_ALIAS
,
4170 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4171 .access
= PL1_RW
, .resetvalue
= 0,
4172 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4173 .writefn
= par_write
},
4175 /* TLB invalidate last level of translation table walk */
4176 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4177 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4178 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4179 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4180 .writefn
= tlbimvaa_is_write
},
4181 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4182 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4183 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4184 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4185 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4186 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4187 .writefn
= tlbimva_hyp_write
},
4188 { .name
= "TLBIMVALHIS",
4189 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4190 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4191 .writefn
= tlbimva_hyp_is_write
},
4192 { .name
= "TLBIIPAS2",
4193 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4194 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4195 .writefn
= tlbiipas2_write
},
4196 { .name
= "TLBIIPAS2IS",
4197 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4198 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4199 .writefn
= tlbiipas2_is_write
},
4200 { .name
= "TLBIIPAS2L",
4201 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4202 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4203 .writefn
= tlbiipas2_write
},
4204 { .name
= "TLBIIPAS2LIS",
4205 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4206 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4207 .writefn
= tlbiipas2_is_write
},
4208 /* 32 bit cache operations */
4209 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4210 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4211 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4212 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4213 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4214 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4215 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4216 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4217 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4218 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4219 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4220 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4221 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4222 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4223 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4224 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4225 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4226 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4227 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4228 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4229 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4230 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4231 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4232 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4233 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4234 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4235 /* MMU Domain access control / MPU write buffer control */
4236 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4237 .access
= PL1_RW
, .resetvalue
= 0,
4238 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4239 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4240 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4241 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4242 .type
= ARM_CP_ALIAS
,
4243 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4245 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4246 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4247 .type
= ARM_CP_ALIAS
,
4248 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4250 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4251 /* We rely on the access checks not allowing the guest to write to the
4252 * state field when SPSel indicates that it's being used as the stack
4255 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4256 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4257 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4258 .type
= ARM_CP_ALIAS
,
4259 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4260 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4261 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4262 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4263 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4264 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4265 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4266 .type
= ARM_CP_NO_RAW
,
4267 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4268 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4269 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4270 .type
= ARM_CP_ALIAS
,
4271 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4272 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4273 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4274 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4275 .access
= PL2_RW
, .resetvalue
= 0,
4276 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4277 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4278 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4279 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4280 .access
= PL2_RW
, .resetvalue
= 0,
4281 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4282 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4283 .type
= ARM_CP_ALIAS
,
4284 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4286 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4287 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4288 .type
= ARM_CP_ALIAS
,
4289 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4291 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4292 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4293 .type
= ARM_CP_ALIAS
,
4294 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4296 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4297 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4298 .type
= ARM_CP_ALIAS
,
4299 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4301 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4302 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4303 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4305 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4306 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4307 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4308 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4309 .writefn
= sdcr_write
,
4310 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4314 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4315 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4316 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4317 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4319 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4320 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4321 .type
= ARM_CP_NO_RAW
,
4322 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4324 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4325 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4326 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4328 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4329 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4330 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4331 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4332 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4333 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4334 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4336 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4337 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4338 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4339 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4340 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4341 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4343 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4344 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4345 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4347 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4348 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4349 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4351 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4352 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4353 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4355 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4356 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4357 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4358 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4359 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4360 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4361 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4362 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4363 .cp
= 15, .opc1
= 6, .crm
= 2,
4364 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4365 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4366 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4367 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4368 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4369 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4370 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4371 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4372 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4373 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4374 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4375 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4376 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4377 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4378 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4379 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4381 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4382 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4383 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4384 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4385 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4386 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4387 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4388 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4390 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4391 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4392 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4393 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4394 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4396 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4397 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4398 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4399 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4400 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4401 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4402 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4403 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4404 .access
= PL2_RW
, .accessfn
= access_tda
,
4405 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4406 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4407 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4408 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4409 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4410 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4411 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4412 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4413 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4414 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4415 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4416 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4417 .type
= ARM_CP_CONST
,
4418 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4419 .access
= PL2_RW
, .resetvalue
= 0 },
4423 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4424 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
4425 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4426 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4428 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4432 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
4434 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4435 uint64_t valid_mask
= HCR_MASK
;
4437 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4438 valid_mask
&= ~HCR_HCD
;
4439 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
4440 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4441 * However, if we're using the SMC PSCI conduit then QEMU is
4442 * effectively acting like EL3 firmware and so the guest at
4443 * EL2 should retain the ability to prevent EL1 from being
4444 * able to make SMC calls into the ersatz firmware, so in
4445 * that case HCR.TSC should be read/write.
4447 valid_mask
&= ~HCR_TSC
;
4449 if (cpu_isar_feature(aa64_lor
, cpu
)) {
4450 valid_mask
|= HCR_TLOR
;
4453 /* Clear RES0 bits. */
4454 value
&= valid_mask
;
4456 /* These bits change the MMU setup:
4457 * HCR_VM enables stage 2 translation
4458 * HCR_PTW forbids certain page-table setups
4459 * HCR_DC Disables stage1 and enables stage2 translation
4461 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4462 tlb_flush(CPU(cpu
));
4464 env
->cp15
.hcr_el2
= value
;
4467 * Updates to VI and VF require us to update the status of
4468 * virtual interrupts, which are the logical OR of these bits
4469 * and the state of the input lines from the GIC. (This requires
4470 * that we have the iothread lock, which is done by marking the
4471 * reginfo structs as ARM_CP_IO.)
4472 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4473 * possible for it to be taken immediately, because VIRQ and
4474 * VFIQ are masked unless running at EL0 or EL1, and HCR
4475 * can only be written at EL2.
4477 g_assert(qemu_mutex_iothread_locked());
4478 arm_cpu_update_virq(cpu
);
4479 arm_cpu_update_vfiq(cpu
);
4482 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4485 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4486 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4487 hcr_write(env
, NULL
, value
);
4490 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4493 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4494 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4495 hcr_write(env
, NULL
, value
);
4499 * Return the effective value of HCR_EL2.
4500 * Bits that are not included here:
4501 * RW (read from SCR_EL3.RW as needed)
4503 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4505 uint64_t ret
= env
->cp15
.hcr_el2
;
4507 if (arm_is_secure_below_el3(env
)) {
4509 * "This register has no effect if EL2 is not enabled in the
4510 * current Security state". This is ARMv8.4-SecEL2 speak for
4511 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4513 * Prior to that, the language was "In an implementation that
4514 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4515 * as if this field is 0 for all purposes other than a direct
4516 * read or write access of HCR_EL2". With lots of enumeration
4517 * on a per-field basis. In current QEMU, this is condition
4518 * is arm_is_secure_below_el3.
4520 * Since the v8.4 language applies to the entire register, and
4521 * appears to be backward compatible, use that.
4524 } else if (ret
& HCR_TGE
) {
4525 /* These bits are up-to-date as of ARMv8.4. */
4526 if (ret
& HCR_E2H
) {
4527 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4528 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4529 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4530 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4532 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4534 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4535 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4536 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4543 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4544 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4546 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4547 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4548 .writefn
= hcr_write
},
4549 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4550 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4551 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4552 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4553 .writefn
= hcr_writelow
},
4554 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4555 .type
= ARM_CP_ALIAS
,
4556 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4558 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4559 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4560 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4561 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4562 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4563 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4564 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4565 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4566 .type
= ARM_CP_ALIAS
,
4567 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4569 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4570 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4571 .type
= ARM_CP_ALIAS
,
4572 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4574 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4575 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4576 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4577 .access
= PL2_RW
, .writefn
= vbar_write
,
4578 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4580 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4581 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4582 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4583 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4584 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4585 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4586 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4587 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
4588 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4589 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4590 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4592 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4593 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4594 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4595 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4596 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4597 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4598 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4600 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4601 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4602 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4603 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4605 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4606 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4607 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4609 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4610 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4611 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4613 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4614 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4616 /* no .writefn needed as this can't cause an ASID change;
4617 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4619 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4620 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4621 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4622 .type
= ARM_CP_ALIAS
,
4623 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4624 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4625 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4626 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4628 /* no .writefn needed as this can't cause an ASID change;
4629 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4631 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4632 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4633 .cp
= 15, .opc1
= 6, .crm
= 2,
4634 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4635 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4636 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4637 .writefn
= vttbr_write
},
4638 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4639 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4640 .access
= PL2_RW
, .writefn
= vttbr_write
,
4641 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4642 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4643 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4644 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4645 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4646 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4647 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4648 .access
= PL2_RW
, .resetvalue
= 0,
4649 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4650 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4651 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4652 .access
= PL2_RW
, .resetvalue
= 0,
4653 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4654 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4655 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4656 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4657 { .name
= "TLBIALLNSNH",
4658 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4659 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4660 .writefn
= tlbiall_nsnh_write
},
4661 { .name
= "TLBIALLNSNHIS",
4662 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4663 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4664 .writefn
= tlbiall_nsnh_is_write
},
4665 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4666 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4667 .writefn
= tlbiall_hyp_write
},
4668 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4669 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4670 .writefn
= tlbiall_hyp_is_write
},
4671 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4672 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4673 .writefn
= tlbimva_hyp_write
},
4674 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4675 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4676 .writefn
= tlbimva_hyp_is_write
},
4677 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4678 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4679 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4680 .writefn
= tlbi_aa64_alle2_write
},
4681 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4682 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4683 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4684 .writefn
= tlbi_aa64_vae2_write
},
4685 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4686 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4687 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4688 .writefn
= tlbi_aa64_vae2_write
},
4689 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4690 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4691 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4692 .writefn
= tlbi_aa64_alle2is_write
},
4693 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4694 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4695 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4696 .writefn
= tlbi_aa64_vae2is_write
},
4697 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4698 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4699 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4700 .writefn
= tlbi_aa64_vae2is_write
},
4701 #ifndef CONFIG_USER_ONLY
4702 /* Unlike the other EL2-related AT operations, these must
4703 * UNDEF from EL3 if EL2 is not implemented, which is why we
4704 * define them here rather than with the rest of the AT ops.
4706 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4707 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4708 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4709 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4710 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4711 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4712 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4713 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4714 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4715 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4716 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4717 * to behave as if SCR.NS was 1.
4719 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4721 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4722 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4724 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4725 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4726 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4727 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4728 * reset values as IMPDEF. We choose to reset to 3 to comply with
4729 * both ARMv7 and ARMv8.
4731 .access
= PL2_RW
, .resetvalue
= 3,
4732 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4733 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4734 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4735 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4736 .writefn
= gt_cntvoff_write
,
4737 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4738 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4739 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4740 .writefn
= gt_cntvoff_write
,
4741 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4742 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4743 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4744 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4745 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4746 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4747 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4748 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4749 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4750 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4751 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4752 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4753 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4754 .resetfn
= gt_hyp_timer_reset
,
4755 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4756 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4758 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4760 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4762 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4764 /* The only field of MDCR_EL2 that has a defined architectural reset value
4765 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4766 * don't implement any PMU event counters, so using zero as a reset
4767 * value for MDCR_EL2 is okay
4769 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4770 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4771 .access
= PL2_RW
, .resetvalue
= 0,
4772 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4773 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4774 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4775 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4776 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4777 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4778 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4780 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4781 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4782 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4784 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4788 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4789 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4790 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4791 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4793 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
4794 .writefn
= hcr_writehigh
},
4798 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4801 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4802 * At Secure EL1 it traps to EL3.
4804 if (arm_current_el(env
) == 3) {
4805 return CP_ACCESS_OK
;
4807 if (arm_is_secure_below_el3(env
)) {
4808 return CP_ACCESS_TRAP_EL3
;
4810 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4812 return CP_ACCESS_OK
;
4814 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4817 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4818 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4819 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4820 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4821 .resetvalue
= 0, .writefn
= scr_write
},
4822 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4823 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4824 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4825 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4826 .writefn
= scr_write
},
4827 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4828 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4829 .access
= PL3_RW
, .resetvalue
= 0,
4830 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4832 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4833 .access
= PL3_RW
, .resetvalue
= 0,
4834 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4835 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4836 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4837 .writefn
= vbar_write
, .resetvalue
= 0,
4838 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4839 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4840 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4841 .access
= PL3_RW
, .resetvalue
= 0,
4842 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4843 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4844 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4846 /* no .writefn needed as this can't cause an ASID change;
4847 * we must provide a .raw_writefn and .resetfn because we handle
4848 * reset and migration for the AArch32 TTBCR(S), which might be
4849 * using mask and base_mask.
4851 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4852 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4853 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4854 .type
= ARM_CP_ALIAS
,
4855 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4857 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4858 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4859 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4860 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4861 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4862 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4863 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4864 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4865 .type
= ARM_CP_ALIAS
,
4866 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4868 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4869 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4870 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4871 .access
= PL3_RW
, .writefn
= vbar_write
,
4872 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4874 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4875 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4876 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4877 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4878 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4879 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4880 .access
= PL3_RW
, .resetvalue
= 0,
4881 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4882 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4883 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4884 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4886 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4887 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4888 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4890 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4891 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4892 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4894 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4895 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4896 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4897 .writefn
= tlbi_aa64_alle3is_write
},
4898 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4899 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4900 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4901 .writefn
= tlbi_aa64_vae3is_write
},
4902 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4903 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4904 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4905 .writefn
= tlbi_aa64_vae3is_write
},
4906 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4907 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4908 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4909 .writefn
= tlbi_aa64_alle3_write
},
4910 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4911 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4912 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4913 .writefn
= tlbi_aa64_vae3_write
},
4914 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4915 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4916 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4917 .writefn
= tlbi_aa64_vae3_write
},
4921 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4924 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4925 * but the AArch32 CTR has its own reginfo struct)
4927 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4928 return CP_ACCESS_TRAP
;
4930 return CP_ACCESS_OK
;
4933 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4936 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4937 * read via a bit in OSLSR_EL1.
4941 if (ri
->state
== ARM_CP_STATE_AA32
) {
4942 oslock
= (value
== 0xC5ACCE55);
4947 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4950 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4951 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4952 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4953 * unlike DBGDRAR it is never accessible from EL0.
4954 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4957 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4958 .access
= PL0_R
, .accessfn
= access_tdra
,
4959 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4960 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4961 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4962 .access
= PL1_R
, .accessfn
= access_tdra
,
4963 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4964 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4965 .access
= PL0_R
, .accessfn
= access_tdra
,
4966 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4967 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4968 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4969 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4970 .access
= PL1_RW
, .accessfn
= access_tda
,
4971 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4973 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4974 * We don't implement the configurable EL0 access.
4976 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4977 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4978 .type
= ARM_CP_ALIAS
,
4979 .access
= PL1_R
, .accessfn
= access_tda
,
4980 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4981 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4982 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4983 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4984 .accessfn
= access_tdosa
,
4985 .writefn
= oslar_write
},
4986 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4987 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4988 .access
= PL1_R
, .resetvalue
= 10,
4989 .accessfn
= access_tdosa
,
4990 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4991 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4992 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4993 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4994 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4995 .type
= ARM_CP_NOP
},
4996 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4997 * implement vector catch debug events yet.
5000 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5001 .access
= PL1_RW
, .accessfn
= access_tda
,
5002 .type
= ARM_CP_NOP
},
5003 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5004 * to save and restore a 32-bit guest's DBGVCR)
5006 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5007 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5008 .access
= PL2_RW
, .accessfn
= access_tda
,
5009 .type
= ARM_CP_NOP
},
5010 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5011 * Channel but Linux may try to access this register. The 32-bit
5012 * alias is DBGDCCINT.
5014 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5015 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5016 .access
= PL1_RW
, .accessfn
= access_tda
,
5017 .type
= ARM_CP_NOP
},
5021 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5022 /* 64 bit access versions of the (dummy) debug registers */
5023 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5024 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5025 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5026 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5030 /* Return the exception level to which exceptions should be taken
5031 * via SVEAccessTrap. If an exception should be routed through
5032 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5033 * take care of raising that exception.
5034 * C.f. the ARM pseudocode function CheckSVEEnabled.
5036 int sve_exception_el(CPUARMState
*env
, int el
)
5038 #ifndef CONFIG_USER_ONLY
5040 bool disabled
= false;
5042 /* The CPACR.ZEN controls traps to EL1:
5043 * 0, 2 : trap EL0 and EL1 accesses
5044 * 1 : trap only EL0 accesses
5045 * 3 : trap no accesses
5047 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5049 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5054 return (arm_feature(env
, ARM_FEATURE_EL2
)
5055 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
5058 /* Check CPACR.FPEN. */
5059 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5061 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5069 /* CPTR_EL2. Since TZ and TFP are positive,
5070 * they will be zero when EL2 is not present.
5072 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5073 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5076 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5081 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5082 if (arm_feature(env
, ARM_FEATURE_EL3
)
5083 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5091 * Given that SVE is enabled, return the vector length for EL.
5093 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5095 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5096 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5099 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5101 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5102 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5104 if (el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
5105 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5110 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5113 int cur_el
= arm_current_el(env
);
5114 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5117 /* Bits other than [3:0] are RAZ/WI. */
5118 raw_write(env
, ri
, value
& 0xf);
5121 * Because we arrived here, we know both FP and SVE are enabled;
5122 * otherwise we would have trapped access to the ZCR_ELn register.
5124 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5125 if (new_len
< old_len
) {
5126 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5130 static const ARMCPRegInfo zcr_el1_reginfo
= {
5131 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5132 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5133 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5134 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5135 .writefn
= zcr_write
, .raw_writefn
= raw_write
5138 static const ARMCPRegInfo zcr_el2_reginfo
= {
5139 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5140 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5141 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5142 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5143 .writefn
= zcr_write
, .raw_writefn
= raw_write
5146 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5147 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5148 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5149 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5150 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5153 static const ARMCPRegInfo zcr_el3_reginfo
= {
5154 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5155 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5156 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5157 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5158 .writefn
= zcr_write
, .raw_writefn
= raw_write
5161 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
5163 CPUARMState
*env
= &cpu
->env
;
5165 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
5166 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
5168 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
5170 if (env
->cpu_watchpoint
[n
]) {
5171 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
5172 env
->cpu_watchpoint
[n
] = NULL
;
5175 if (!extract64(wcr
, 0, 1)) {
5176 /* E bit clear : watchpoint disabled */
5180 switch (extract64(wcr
, 3, 2)) {
5182 /* LSC 00 is reserved and must behave as if the wp is disabled */
5185 flags
|= BP_MEM_READ
;
5188 flags
|= BP_MEM_WRITE
;
5191 flags
|= BP_MEM_ACCESS
;
5195 /* Attempts to use both MASK and BAS fields simultaneously are
5196 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5197 * thus generating a watchpoint for every byte in the masked region.
5199 mask
= extract64(wcr
, 24, 4);
5200 if (mask
== 1 || mask
== 2) {
5201 /* Reserved values of MASK; we must act as if the mask value was
5202 * some non-reserved value, or as if the watchpoint were disabled.
5203 * We choose the latter.
5207 /* Watchpoint covers an aligned area up to 2GB in size */
5209 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5210 * whether the watchpoint fires when the unmasked bits match; we opt
5211 * to generate the exceptions.
5215 /* Watchpoint covers bytes defined by the byte address select bits */
5216 int bas
= extract64(wcr
, 5, 8);
5220 /* This must act as if the watchpoint is disabled */
5224 if (extract64(wvr
, 2, 1)) {
5225 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5226 * ignored, and BAS[3:0] define which bytes to watch.
5230 /* The BAS bits are supposed to be programmed to indicate a contiguous
5231 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5232 * we fire for each byte in the word/doubleword addressed by the WVR.
5233 * We choose to ignore any non-zero bits after the first range of 1s.
5235 basstart
= ctz32(bas
);
5236 len
= cto32(bas
>> basstart
);
5240 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
5241 &env
->cpu_watchpoint
[n
]);
5244 void hw_watchpoint_update_all(ARMCPU
*cpu
)
5247 CPUARMState
*env
= &cpu
->env
;
5249 /* Completely clear out existing QEMU watchpoints and our array, to
5250 * avoid possible stale entries following migration load.
5252 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
5253 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
5255 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
5256 hw_watchpoint_update(cpu
, i
);
5260 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5263 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5266 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5267 * register reads and behaves as if values written are sign extended.
5268 * Bits [1:0] are RES0.
5270 value
= sextract64(value
, 0, 49) & ~3ULL;
5272 raw_write(env
, ri
, value
);
5273 hw_watchpoint_update(cpu
, i
);
5276 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5279 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5282 raw_write(env
, ri
, value
);
5283 hw_watchpoint_update(cpu
, i
);
5286 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
5288 CPUARMState
*env
= &cpu
->env
;
5289 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
5290 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
5295 if (env
->cpu_breakpoint
[n
]) {
5296 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
5297 env
->cpu_breakpoint
[n
] = NULL
;
5300 if (!extract64(bcr
, 0, 1)) {
5301 /* E bit clear : watchpoint disabled */
5305 bt
= extract64(bcr
, 20, 4);
5308 case 4: /* unlinked address mismatch (reserved if AArch64) */
5309 case 5: /* linked address mismatch (reserved if AArch64) */
5310 qemu_log_mask(LOG_UNIMP
,
5311 "arm: address mismatch breakpoint types not implemented\n");
5313 case 0: /* unlinked address match */
5314 case 1: /* linked address match */
5316 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5317 * we behave as if the register was sign extended. Bits [1:0] are
5318 * RES0. The BAS field is used to allow setting breakpoints on 16
5319 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5320 * a bp will fire if the addresses covered by the bp and the addresses
5321 * covered by the insn overlap but the insn doesn't start at the
5322 * start of the bp address range. We choose to require the insn and
5323 * the bp to have the same address. The constraints on writing to
5324 * BAS enforced in dbgbcr_write mean we have only four cases:
5325 * 0b0000 => no breakpoint
5326 * 0b0011 => breakpoint on addr
5327 * 0b1100 => breakpoint on addr + 2
5328 * 0b1111 => breakpoint on addr
5329 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5331 int bas
= extract64(bcr
, 5, 4);
5332 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
5341 case 2: /* unlinked context ID match */
5342 case 8: /* unlinked VMID match (reserved if no EL2) */
5343 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5344 qemu_log_mask(LOG_UNIMP
,
5345 "arm: unlinked context breakpoint types not implemented\n");
5347 case 9: /* linked VMID match (reserved if no EL2) */
5348 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5349 case 3: /* linked context ID match */
5351 /* We must generate no events for Linked context matches (unless
5352 * they are linked to by some other bp/wp, which is handled in
5353 * updates for the linking bp/wp). We choose to also generate no events
5354 * for reserved values.
5359 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
5362 void hw_breakpoint_update_all(ARMCPU
*cpu
)
5365 CPUARMState
*env
= &cpu
->env
;
5367 /* Completely clear out existing QEMU breakpoints and our array, to
5368 * avoid possible stale entries following migration load.
5370 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
5371 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
5373 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
5374 hw_breakpoint_update(cpu
, i
);
5378 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5381 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5384 raw_write(env
, ri
, value
);
5385 hw_breakpoint_update(cpu
, i
);
5388 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5391 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5394 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5397 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
5398 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
5400 raw_write(env
, ri
, value
);
5401 hw_breakpoint_update(cpu
, i
);
5404 static void define_debug_regs(ARMCPU
*cpu
)
5406 /* Define v7 and v8 architectural debug registers.
5407 * These are just dummy implementations for now.
5410 int wrps
, brps
, ctx_cmps
;
5411 ARMCPRegInfo dbgdidr
= {
5412 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
5413 .access
= PL0_R
, .accessfn
= access_tda
,
5414 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
5417 /* Note that all these register fields hold "number of Xs minus 1". */
5418 brps
= extract32(cpu
->dbgdidr
, 24, 4);
5419 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
5420 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
5422 assert(ctx_cmps
<= brps
);
5424 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5425 * of the debug registers such as number of breakpoints;
5426 * check that if they both exist then they agree.
5428 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
5429 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
5430 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
5431 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
5434 define_one_arm_cp_reg(cpu
, &dbgdidr
);
5435 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
5437 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
5438 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
5441 for (i
= 0; i
< brps
+ 1; i
++) {
5442 ARMCPRegInfo dbgregs
[] = {
5443 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
5444 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
5445 .access
= PL1_RW
, .accessfn
= access_tda
,
5446 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
5447 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
5449 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
5450 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
5451 .access
= PL1_RW
, .accessfn
= access_tda
,
5452 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5453 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5457 define_arm_cp_regs(cpu
, dbgregs
);
5460 for (i
= 0; i
< wrps
+ 1; i
++) {
5461 ARMCPRegInfo dbgregs
[] = {
5462 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5463 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5464 .access
= PL1_RW
, .accessfn
= access_tda
,
5465 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5466 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5468 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5469 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5470 .access
= PL1_RW
, .accessfn
= access_tda
,
5471 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5472 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5476 define_arm_cp_regs(cpu
, dbgregs
);
5480 /* We don't know until after realize whether there's a GICv3
5481 * attached, and that is what registers the gicv3 sysregs.
5482 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5485 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5487 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5488 uint64_t pfr1
= cpu
->id_pfr1
;
5490 if (env
->gicv3state
) {
5496 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5498 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5499 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5501 if (env
->gicv3state
) {
5507 /* Shared logic between LORID and the rest of the LOR* registers.
5508 * Secure state has already been delt with.
5510 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5512 int el
= arm_current_el(env
);
5514 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5515 return CP_ACCESS_TRAP_EL2
;
5517 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5518 return CP_ACCESS_TRAP_EL3
;
5520 return CP_ACCESS_OK
;
5523 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5526 if (arm_is_secure_below_el3(env
)) {
5527 /* Access ok in secure mode. */
5528 return CP_ACCESS_OK
;
5530 return access_lor_ns(env
);
5533 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5534 const ARMCPRegInfo
*ri
, bool isread
)
5536 if (arm_is_secure_below_el3(env
)) {
5537 /* Access denied in secure mode. */
5538 return CP_ACCESS_TRAP
;
5540 return access_lor_ns(env
);
5543 #ifdef TARGET_AARCH64
5544 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5547 int el
= arm_current_el(env
);
5550 arm_feature(env
, ARM_FEATURE_EL2
) &&
5551 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5552 return CP_ACCESS_TRAP_EL2
;
5555 arm_feature(env
, ARM_FEATURE_EL3
) &&
5556 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5557 return CP_ACCESS_TRAP_EL3
;
5559 return CP_ACCESS_OK
;
5562 static const ARMCPRegInfo pauth_reginfo
[] = {
5563 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5564 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5565 .access
= PL1_RW
, .accessfn
= access_pauth
,
5566 .fieldoffset
= offsetof(CPUARMState
, apda_key
.lo
) },
5567 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5568 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5569 .access
= PL1_RW
, .accessfn
= access_pauth
,
5570 .fieldoffset
= offsetof(CPUARMState
, apda_key
.hi
) },
5571 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5572 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5573 .access
= PL1_RW
, .accessfn
= access_pauth
,
5574 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.lo
) },
5575 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5576 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5577 .access
= PL1_RW
, .accessfn
= access_pauth
,
5578 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.hi
) },
5579 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5580 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5581 .access
= PL1_RW
, .accessfn
= access_pauth
,
5582 .fieldoffset
= offsetof(CPUARMState
, apga_key
.lo
) },
5583 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5584 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5585 .access
= PL1_RW
, .accessfn
= access_pauth
,
5586 .fieldoffset
= offsetof(CPUARMState
, apga_key
.hi
) },
5587 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5588 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5589 .access
= PL1_RW
, .accessfn
= access_pauth
,
5590 .fieldoffset
= offsetof(CPUARMState
, apia_key
.lo
) },
5591 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5592 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5593 .access
= PL1_RW
, .accessfn
= access_pauth
,
5594 .fieldoffset
= offsetof(CPUARMState
, apia_key
.hi
) },
5595 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5596 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5597 .access
= PL1_RW
, .accessfn
= access_pauth
,
5598 .fieldoffset
= offsetof(CPUARMState
, apib_key
.lo
) },
5599 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5600 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5601 .access
= PL1_RW
, .accessfn
= access_pauth
,
5602 .fieldoffset
= offsetof(CPUARMState
, apib_key
.hi
) },
5607 void register_cp_regs_for_features(ARMCPU
*cpu
)
5609 /* Register all the coprocessor registers based on feature bits */
5610 CPUARMState
*env
= &cpu
->env
;
5611 if (arm_feature(env
, ARM_FEATURE_M
)) {
5612 /* M profile has no coprocessor registers */
5616 define_arm_cp_regs(cpu
, cp_reginfo
);
5617 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
5618 /* Must go early as it is full of wildcards that may be
5619 * overridden by later definitions.
5621 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
5624 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5625 /* The ID registers all have impdef reset values */
5626 ARMCPRegInfo v6_idregs
[] = {
5627 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
5628 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5629 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5630 .resetvalue
= cpu
->id_pfr0
},
5631 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5632 * the value of the GIC field until after we define these regs.
5634 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
5635 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
5636 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5637 .readfn
= id_pfr1_read
,
5638 .writefn
= arm_cp_write_ignore
},
5639 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
5640 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
5641 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5642 .resetvalue
= cpu
->id_dfr0
},
5643 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
5644 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
5645 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5646 .resetvalue
= cpu
->id_afr0
},
5647 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
5648 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
5649 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5650 .resetvalue
= cpu
->id_mmfr0
},
5651 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
5652 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
5653 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5654 .resetvalue
= cpu
->id_mmfr1
},
5655 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
5656 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
5657 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5658 .resetvalue
= cpu
->id_mmfr2
},
5659 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
5660 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
5661 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5662 .resetvalue
= cpu
->id_mmfr3
},
5663 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
5664 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5665 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5666 .resetvalue
= cpu
->isar
.id_isar0
},
5667 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
5668 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
5669 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5670 .resetvalue
= cpu
->isar
.id_isar1
},
5671 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
5672 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5673 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5674 .resetvalue
= cpu
->isar
.id_isar2
},
5675 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
5676 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
5677 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5678 .resetvalue
= cpu
->isar
.id_isar3
},
5679 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
5680 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
5681 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5682 .resetvalue
= cpu
->isar
.id_isar4
},
5683 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
5684 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
5685 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5686 .resetvalue
= cpu
->isar
.id_isar5
},
5687 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
5688 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
5689 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5690 .resetvalue
= cpu
->id_mmfr4
},
5691 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
5692 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
5693 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5694 .resetvalue
= cpu
->isar
.id_isar6
},
5697 define_arm_cp_regs(cpu
, v6_idregs
);
5698 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
5700 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
5702 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
5703 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
5705 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
5706 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
5707 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
5709 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
5710 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
5712 if (arm_feature(env
, ARM_FEATURE_V7
)) {
5713 /* v7 performance monitor control register: same implementor
5714 * field as main ID register, and we implement four counters in
5715 * addition to the cycle count register.
5717 unsigned int i
, pmcrn
= 4;
5718 ARMCPRegInfo pmcr
= {
5719 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
5721 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5722 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
5723 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
5724 .raw_writefn
= raw_write
,
5726 ARMCPRegInfo pmcr64
= {
5727 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
5728 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
5729 .access
= PL0_RW
, .accessfn
= pmreg_access
,
5731 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
5732 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
5733 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
5735 define_one_arm_cp_reg(cpu
, &pmcr
);
5736 define_one_arm_cp_reg(cpu
, &pmcr64
);
5737 for (i
= 0; i
< pmcrn
; i
++) {
5738 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
5739 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
5740 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
5741 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
5742 ARMCPRegInfo pmev_regs
[] = {
5743 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 15,
5744 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5745 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5746 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5747 .accessfn
= pmreg_access
},
5748 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
5749 .opc0
= 3, .opc1
= 3, .crn
= 15, .crm
= 8 | (3 & (i
>> 3)),
5750 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5752 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5753 .raw_readfn
= pmevcntr_rawread
,
5754 .raw_writefn
= pmevcntr_rawwrite
},
5755 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 15,
5756 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5757 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5758 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5759 .accessfn
= pmreg_access
},
5760 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
5761 .opc0
= 3, .opc1
= 3, .crn
= 15, .crm
= 12 | (3 & (i
>> 3)),
5762 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5764 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5765 .raw_writefn
= pmevtyper_rawwrite
},
5768 define_arm_cp_regs(cpu
, pmev_regs
);
5769 g_free(pmevcntr_name
);
5770 g_free(pmevcntr_el0_name
);
5771 g_free(pmevtyper_name
);
5772 g_free(pmevtyper_el0_name
);
5774 ARMCPRegInfo clidr
= {
5775 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
5776 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
5777 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
5779 define_one_arm_cp_reg(cpu
, &clidr
);
5780 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
5781 define_debug_regs(cpu
);
5783 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
5785 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
5786 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
5787 ARMCPRegInfo v81_pmu_regs
[] = {
5788 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
5789 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
5790 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5791 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
5792 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
5793 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
5794 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5795 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
5798 define_arm_cp_regs(cpu
, v81_pmu_regs
);
5800 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5801 /* AArch64 ID registers, which all have impdef reset values.
5802 * Note that within the ID register ranges the unused slots
5803 * must all RAZ, not UNDEF; future architecture versions may
5804 * define new registers here.
5806 ARMCPRegInfo v8_idregs
[] = {
5807 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5808 * know the right value for the GIC field until after we
5809 * define these regs.
5811 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5812 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
5813 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5814 .readfn
= id_aa64pfr0_read
,
5815 .writefn
= arm_cp_write_ignore
},
5816 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5817 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
5818 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5819 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
5820 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5821 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
5822 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5824 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5825 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
5826 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5828 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5829 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
5830 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5831 /* At present, only SVEver == 0 is defined anyway. */
5833 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5834 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
5835 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5837 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5838 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
5839 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5841 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5842 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
5843 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5845 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5846 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
5847 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5848 .resetvalue
= cpu
->id_aa64dfr0
},
5849 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5850 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
5851 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5852 .resetvalue
= cpu
->id_aa64dfr1
},
5853 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5854 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
5855 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5857 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5858 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
5859 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5861 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5862 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
5863 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5864 .resetvalue
= cpu
->id_aa64afr0
},
5865 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5866 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
5867 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5868 .resetvalue
= cpu
->id_aa64afr1
},
5869 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5870 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
5871 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5873 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5874 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
5875 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5877 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
5878 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
5879 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5880 .resetvalue
= cpu
->isar
.id_aa64isar0
},
5881 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
5882 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
5883 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5884 .resetvalue
= cpu
->isar
.id_aa64isar1
},
5885 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5886 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
5887 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5889 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5890 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
5891 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5893 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5894 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
5895 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5897 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5898 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
5899 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5901 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5902 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
5903 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5905 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5906 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
5907 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5909 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5910 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5911 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5912 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
5913 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5914 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
5915 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5916 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
5917 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5918 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
5919 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5921 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5922 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
5923 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5925 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5926 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
5927 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5929 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5930 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
5931 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5933 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5934 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
5935 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5937 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5938 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
5939 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5941 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5942 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
5943 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5944 .resetvalue
= cpu
->isar
.mvfr0
},
5945 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5946 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
5947 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5948 .resetvalue
= cpu
->isar
.mvfr1
},
5949 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
5950 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
5951 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5952 .resetvalue
= cpu
->isar
.mvfr2
},
5953 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5954 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
5955 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5957 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5958 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
5959 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5961 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5962 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
5963 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5965 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5966 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
5967 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5969 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5970 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
5971 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5973 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
5974 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
5975 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5976 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
5977 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
5978 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
5979 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5980 .resetvalue
= cpu
->pmceid0
},
5981 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
5982 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
5983 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5984 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
5985 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
5986 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
5987 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5988 .resetvalue
= cpu
->pmceid1
},
5991 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
5992 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
5993 !arm_feature(env
, ARM_FEATURE_EL2
)) {
5994 ARMCPRegInfo rvbar
= {
5995 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5996 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5997 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
5999 define_one_arm_cp_reg(cpu
, &rvbar
);
6001 define_arm_cp_regs(cpu
, v8_idregs
);
6002 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
6004 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6005 uint64_t vmpidr_def
= mpidr_read_val(env
);
6006 ARMCPRegInfo vpidr_regs
[] = {
6007 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
6008 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6009 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6010 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
6011 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
6012 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6013 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6014 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
6015 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6016 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
6017 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6018 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6019 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
6020 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
6021 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6022 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6024 .resetvalue
= vmpidr_def
,
6025 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
6028 define_arm_cp_regs(cpu
, vpidr_regs
);
6029 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
6030 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6031 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
6033 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6034 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
6035 ARMCPRegInfo rvbar
= {
6036 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
6037 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
6038 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
6040 define_one_arm_cp_reg(cpu
, &rvbar
);
6043 /* If EL2 is missing but higher ELs are enabled, we need to
6044 * register the no_el2 reginfos.
6046 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6047 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6048 * of MIDR_EL1 and MPIDR_EL1.
6050 ARMCPRegInfo vpidr_regs
[] = {
6051 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6052 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6053 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6054 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
6055 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6056 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6057 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6058 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6059 .type
= ARM_CP_NO_RAW
,
6060 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
6063 define_arm_cp_regs(cpu
, vpidr_regs
);
6064 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
6065 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6066 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
6070 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6071 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
6072 ARMCPRegInfo el3_regs
[] = {
6073 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6074 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
6075 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
6076 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6077 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
6079 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6080 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
6081 .resetvalue
= cpu
->reset_sctlr
},
6085 define_arm_cp_regs(cpu
, el3_regs
);
6087 /* The behaviour of NSACR is sufficiently various that we don't
6088 * try to describe it in a single reginfo:
6089 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6090 * reads as constant 0xc00 from NS EL1 and NS EL2
6091 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6092 * if v7 without EL3, register doesn't exist
6093 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6095 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6096 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6097 ARMCPRegInfo nsacr
= {
6098 .name
= "NSACR", .type
= ARM_CP_CONST
,
6099 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6100 .access
= PL1_RW
, .accessfn
= nsacr_access
,
6103 define_one_arm_cp_reg(cpu
, &nsacr
);
6105 ARMCPRegInfo nsacr
= {
6107 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6108 .access
= PL3_RW
| PL1_R
,
6110 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
6112 define_one_arm_cp_reg(cpu
, &nsacr
);
6115 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6116 ARMCPRegInfo nsacr
= {
6117 .name
= "NSACR", .type
= ARM_CP_CONST
,
6118 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6122 define_one_arm_cp_reg(cpu
, &nsacr
);
6126 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
6127 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6128 /* PMSAv6 not implemented */
6129 assert(arm_feature(env
, ARM_FEATURE_V7
));
6130 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6131 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
6133 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
6136 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6137 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
6138 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6139 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
6140 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
6143 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6144 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
6146 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
6147 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
6149 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
6150 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
6152 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
6153 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
6155 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
6156 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
6158 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
6159 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
6161 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
6162 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
6164 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6165 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
6167 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6168 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
6170 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
6171 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
6173 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
6174 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
6176 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6177 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6178 * be read-only (ie write causes UNDEF exception).
6181 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
6182 /* Pre-v8 MIDR space.
6183 * Note that the MIDR isn't a simple constant register because
6184 * of the TI925 behaviour where writes to another register can
6185 * cause the MIDR value to change.
6187 * Unimplemented registers in the c15 0 0 0 space default to
6188 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6189 * and friends override accordingly.
6192 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
6193 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
6194 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
6195 .readfn
= midr_read
,
6196 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6197 .type
= ARM_CP_OVERRIDE
},
6198 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6200 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
6201 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6203 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
6204 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6206 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
6207 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6209 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
6210 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6212 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
6213 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6216 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
6217 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6218 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
6219 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
6220 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6221 .readfn
= midr_read
},
6222 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6223 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6224 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6225 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6226 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6227 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
6228 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6229 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6230 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
6231 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
6234 ARMCPRegInfo id_cp_reginfo
[] = {
6235 /* These are common to v8 and pre-v8 */
6237 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
6238 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6239 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
6240 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
6241 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
6242 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6243 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6245 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
6246 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6249 /* TLBTR is specific to VMSA */
6250 ARMCPRegInfo id_tlbtr_reginfo
= {
6252 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
6253 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
6255 /* MPUIR is specific to PMSA V6+ */
6256 ARMCPRegInfo id_mpuir_reginfo
= {
6258 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6259 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6260 .resetvalue
= cpu
->pmsav7_dregion
<< 8
6262 ARMCPRegInfo crn0_wi_reginfo
= {
6263 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
6264 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
6265 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
6267 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
6268 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6270 /* Register the blanket "writes ignored" value first to cover the
6271 * whole space. Then update the specific ID registers to allow write
6272 * access, so that they ignore writes rather than causing them to
6275 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
6276 for (r
= id_pre_v8_midr_cp_reginfo
;
6277 r
->type
!= ARM_CP_SENTINEL
; r
++) {
6280 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6283 id_mpuir_reginfo
.access
= PL1_RW
;
6284 id_tlbtr_reginfo
.access
= PL1_RW
;
6286 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6287 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
6289 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
6291 define_arm_cp_regs(cpu
, id_cp_reginfo
);
6292 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
6293 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
6294 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
6295 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
6299 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
6300 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
6303 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
6304 ARMCPRegInfo auxcr_reginfo
[] = {
6305 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6306 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
6307 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
6308 .resetvalue
= cpu
->reset_auxcr
},
6309 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6310 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
6311 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6313 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6314 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
6315 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6319 define_arm_cp_regs(cpu
, auxcr_reginfo
);
6320 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6321 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6322 ARMCPRegInfo hactlr2_reginfo
= {
6323 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
6324 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
6325 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6328 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
6332 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
6333 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6334 /* 32 bit view is [31:18] 0...0 [43:32]. */
6335 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
6336 | extract64(cpu
->reset_cbar
, 32, 12);
6337 ARMCPRegInfo cbar_reginfo
[] = {
6339 .type
= ARM_CP_CONST
,
6340 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6341 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
6342 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6343 .type
= ARM_CP_CONST
,
6344 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
6345 .access
= PL1_R
, .resetvalue
= cbar32
},
6348 /* We don't implement a r/w 64 bit CBAR currently */
6349 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
6350 define_arm_cp_regs(cpu
, cbar_reginfo
);
6352 ARMCPRegInfo cbar
= {
6354 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6355 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
6356 .fieldoffset
= offsetof(CPUARMState
,
6357 cp15
.c15_config_base_address
)
6359 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
6360 cbar
.access
= PL1_R
;
6361 cbar
.fieldoffset
= 0;
6362 cbar
.type
= ARM_CP_CONST
;
6364 define_one_arm_cp_reg(cpu
, &cbar
);
6368 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
6369 ARMCPRegInfo vbar_cp_reginfo
[] = {
6370 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
6371 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
6372 .access
= PL1_RW
, .writefn
= vbar_write
,
6373 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
6374 offsetof(CPUARMState
, cp15
.vbar_ns
) },
6378 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
6381 /* Generic registers whose values depend on the implementation */
6383 ARMCPRegInfo sctlr
= {
6384 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
6385 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6387 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
6388 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
6389 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
6390 .raw_writefn
= raw_write
,
6392 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6393 /* Normally we would always end the TB on an SCTLR write, but Linux
6394 * arch/arm/mach-pxa/sleep.S expects two instructions following
6395 * an MMU enable to execute from cache. Imitate this behaviour.
6397 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
6399 define_one_arm_cp_reg(cpu
, &sctlr
);
6402 if (cpu_isar_feature(aa64_lor
, cpu
)) {
6404 * A trivial implementation of ARMv8.1-LOR leaves all of these
6405 * registers fixed at 0, which indicates that there are zero
6406 * supported Limited Ordering regions.
6408 static const ARMCPRegInfo lor_reginfo
[] = {
6409 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6410 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6411 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6412 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6413 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6414 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6415 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6416 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6417 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6418 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6419 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6420 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6421 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6422 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6423 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6424 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6425 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6426 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6427 .access
= PL1_R
, .accessfn
= access_lorid
,
6428 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6431 define_arm_cp_regs(cpu
, lor_reginfo
);
6434 if (cpu_isar_feature(aa64_sve
, cpu
)) {
6435 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
6436 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6437 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
6439 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
6441 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6442 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
6446 #ifdef TARGET_AARCH64
6447 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
6448 define_arm_cp_regs(cpu
, pauth_reginfo
);
6453 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
6455 CPUState
*cs
= CPU(cpu
);
6456 CPUARMState
*env
= &cpu
->env
;
6458 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6459 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
6460 aarch64_fpu_gdb_set_reg
,
6461 34, "aarch64-fpu.xml", 0);
6462 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
6463 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6464 51, "arm-neon.xml", 0);
6465 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
6466 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6467 35, "arm-vfp3.xml", 0);
6468 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
6469 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6470 19, "arm-vfp.xml", 0);
6472 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
6473 arm_gen_dynamic_xml(cs
),
6474 "system-registers.xml", 0);
6477 /* Sort alphabetically by type name, except for "any". */
6478 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
6480 ObjectClass
*class_a
= (ObjectClass
*)a
;
6481 ObjectClass
*class_b
= (ObjectClass
*)b
;
6482 const char *name_a
, *name_b
;
6484 name_a
= object_class_get_name(class_a
);
6485 name_b
= object_class_get_name(class_b
);
6486 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
6488 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
6491 return strcmp(name_a
, name_b
);
6495 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
6497 ObjectClass
*oc
= data
;
6498 CPUListState
*s
= user_data
;
6499 const char *typename
;
6502 typename
= object_class_get_name(oc
);
6503 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6504 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
6509 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
6513 .cpu_fprintf
= cpu_fprintf
,
6517 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6518 list
= g_slist_sort(list
, arm_cpu_list_compare
);
6519 (*cpu_fprintf
)(f
, "Available CPUs:\n");
6520 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
6524 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
6526 ObjectClass
*oc
= data
;
6527 CpuDefinitionInfoList
**cpu_list
= user_data
;
6528 CpuDefinitionInfoList
*entry
;
6529 CpuDefinitionInfo
*info
;
6530 const char *typename
;
6532 typename
= object_class_get_name(oc
);
6533 info
= g_malloc0(sizeof(*info
));
6534 info
->name
= g_strndup(typename
,
6535 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6536 info
->q_typename
= g_strdup(typename
);
6538 entry
= g_malloc0(sizeof(*entry
));
6539 entry
->value
= info
;
6540 entry
->next
= *cpu_list
;
6544 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
6546 CpuDefinitionInfoList
*cpu_list
= NULL
;
6549 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6550 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
6556 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
6557 void *opaque
, int state
, int secstate
,
6558 int crm
, int opc1
, int opc2
,
6561 /* Private utility function for define_one_arm_cp_reg_with_opaque():
6562 * add a single reginfo struct to the hash table.
6564 uint32_t *key
= g_new(uint32_t, 1);
6565 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
6566 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
6567 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
6569 r2
->name
= g_strdup(name
);
6570 /* Reset the secure state to the specific incoming state. This is
6571 * necessary as the register may have been defined with both states.
6573 r2
->secure
= secstate
;
6575 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6576 /* Register is banked (using both entries in array).
6577 * Overwriting fieldoffset as the array is only used to define
6578 * banked registers but later only fieldoffset is used.
6580 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
6583 if (state
== ARM_CP_STATE_AA32
) {
6584 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6585 /* If the register is banked then we don't need to migrate or
6586 * reset the 32-bit instance in certain cases:
6588 * 1) If the register has both 32-bit and 64-bit instances then we
6589 * can count on the 64-bit instance taking care of the
6591 * 2) If ARMv8 is enabled then we can count on a 64-bit version
6592 * taking care of the secure bank. This requires that separate
6593 * 32 and 64-bit definitions are provided.
6595 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
6596 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
6597 r2
->type
|= ARM_CP_ALIAS
;
6599 } else if ((secstate
!= r
->secure
) && !ns
) {
6600 /* The register is not banked so we only want to allow migration of
6601 * the non-secure instance.
6603 r2
->type
|= ARM_CP_ALIAS
;
6606 if (r
->state
== ARM_CP_STATE_BOTH
) {
6607 /* We assume it is a cp15 register if the .cp field is left unset.
6613 #ifdef HOST_WORDS_BIGENDIAN
6614 if (r2
->fieldoffset
) {
6615 r2
->fieldoffset
+= sizeof(uint32_t);
6620 if (state
== ARM_CP_STATE_AA64
) {
6621 /* To allow abbreviation of ARMCPRegInfo
6622 * definitions, we treat cp == 0 as equivalent to
6623 * the value for "standard guest-visible sysreg".
6624 * STATE_BOTH definitions are also always "standard
6625 * sysreg" in their AArch64 view (the .cp value may
6626 * be non-zero for the benefit of the AArch32 view).
6628 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
6629 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
6631 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
6632 r2
->opc0
, opc1
, opc2
);
6634 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
6637 r2
->opaque
= opaque
;
6639 /* reginfo passed to helpers is correct for the actual access,
6640 * and is never ARM_CP_STATE_BOTH:
6643 /* Make sure reginfo passed to helpers for wildcarded regs
6644 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6649 /* By convention, for wildcarded registers only the first
6650 * entry is used for migration; the others are marked as
6651 * ALIAS so we don't try to transfer the register
6652 * multiple times. Special registers (ie NOP/WFI) are
6653 * never migratable and not even raw-accessible.
6655 if ((r
->type
& ARM_CP_SPECIAL
)) {
6656 r2
->type
|= ARM_CP_NO_RAW
;
6658 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
6659 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
6660 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
6661 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
6664 /* Check that raw accesses are either forbidden or handled. Note that
6665 * we can't assert this earlier because the setup of fieldoffset for
6666 * banked registers has to be done first.
6668 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
6669 assert(!raw_accessors_invalid(r2
));
6672 /* Overriding of an existing definition must be explicitly
6675 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
6676 ARMCPRegInfo
*oldreg
;
6677 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
6678 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
6679 fprintf(stderr
, "Register redefined: cp=%d %d bit "
6680 "crn=%d crm=%d opc1=%d opc2=%d, "
6681 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
6682 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
6683 oldreg
->name
, r2
->name
);
6684 g_assert_not_reached();
6687 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
6691 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
6692 const ARMCPRegInfo
*r
, void *opaque
)
6694 /* Define implementations of coprocessor registers.
6695 * We store these in a hashtable because typically
6696 * there are less than 150 registers in a space which
6697 * is 16*16*16*8*8 = 262144 in size.
6698 * Wildcarding is supported for the crm, opc1 and opc2 fields.
6699 * If a register is defined twice then the second definition is
6700 * used, so this can be used to define some generic registers and
6701 * then override them with implementation specific variations.
6702 * At least one of the original and the second definition should
6703 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6704 * against accidental use.
6706 * The state field defines whether the register is to be
6707 * visible in the AArch32 or AArch64 execution state. If the
6708 * state is set to ARM_CP_STATE_BOTH then we synthesise a
6709 * reginfo structure for the AArch32 view, which sees the lower
6710 * 32 bits of the 64 bit register.
6712 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6713 * be wildcarded. AArch64 registers are always considered to be 64
6714 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6715 * the register, if any.
6717 int crm
, opc1
, opc2
, state
;
6718 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
6719 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
6720 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
6721 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
6722 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
6723 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
6724 /* 64 bit registers have only CRm and Opc1 fields */
6725 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
6726 /* op0 only exists in the AArch64 encodings */
6727 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
6728 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6729 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
6730 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6731 * encodes a minimum access level for the register. We roll this
6732 * runtime check into our general permission check code, so check
6733 * here that the reginfo's specified permissions are strict enough
6734 * to encompass the generic architectural permission check.
6736 if (r
->state
!= ARM_CP_STATE_AA32
) {
6739 case 0: case 1: case 2:
6752 /* unallocated encoding, so not possible */
6760 /* min_EL EL1, secure mode only (we don't check the latter) */
6764 /* broken reginfo with out-of-range opc1 */
6768 /* assert our permissions are not too lax (stricter is fine) */
6769 assert((r
->access
& ~mask
) == 0);
6772 /* Check that the register definition has enough info to handle
6773 * reads and writes if they are permitted.
6775 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
6776 if (r
->access
& PL3_R
) {
6777 assert((r
->fieldoffset
||
6778 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6781 if (r
->access
& PL3_W
) {
6782 assert((r
->fieldoffset
||
6783 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6787 /* Bad type field probably means missing sentinel at end of reg list */
6788 assert(cptype_valid(r
->type
));
6789 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
6790 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
6791 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
6792 for (state
= ARM_CP_STATE_AA32
;
6793 state
<= ARM_CP_STATE_AA64
; state
++) {
6794 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
6797 if (state
== ARM_CP_STATE_AA32
) {
6798 /* Under AArch32 CP registers can be common
6799 * (same for secure and non-secure world) or banked.
6803 switch (r
->secure
) {
6804 case ARM_CP_SECSTATE_S
:
6805 case ARM_CP_SECSTATE_NS
:
6806 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6807 r
->secure
, crm
, opc1
, opc2
,
6811 name
= g_strdup_printf("%s_S", r
->name
);
6812 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6814 crm
, opc1
, opc2
, name
);
6816 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6818 crm
, opc1
, opc2
, r
->name
);
6822 /* AArch64 registers get mapped to non-secure instance
6824 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6826 crm
, opc1
, opc2
, r
->name
);
6834 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
6835 const ARMCPRegInfo
*regs
, void *opaque
)
6837 /* Define a whole list of registers */
6838 const ARMCPRegInfo
*r
;
6839 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6840 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
6844 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
6846 return g_hash_table_lookup(cpregs
, &encoded_cp
);
6849 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6852 /* Helper coprocessor write function for write-ignore registers */
6855 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6857 /* Helper coprocessor write function for read-as-zero registers */
6861 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
6863 /* Helper coprocessor reset function for do-nothing-on-reset registers */
6866 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
6868 /* Return true if it is not valid for us to switch to
6869 * this CPU mode (ie all the UNPREDICTABLE cases in
6870 * the ARM ARM CPSRWriteByInstr pseudocode).
6873 /* Changes to or from Hyp via MSR and CPS are illegal. */
6874 if (write_type
== CPSRWriteByInstr
&&
6875 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
6876 mode
== ARM_CPU_MODE_HYP
)) {
6881 case ARM_CPU_MODE_USR
:
6883 case ARM_CPU_MODE_SYS
:
6884 case ARM_CPU_MODE_SVC
:
6885 case ARM_CPU_MODE_ABT
:
6886 case ARM_CPU_MODE_UND
:
6887 case ARM_CPU_MODE_IRQ
:
6888 case ARM_CPU_MODE_FIQ
:
6889 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
6890 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
6892 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
6893 * and CPS are treated as illegal mode changes.
6895 if (write_type
== CPSRWriteByInstr
&&
6896 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
6897 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
6901 case ARM_CPU_MODE_HYP
:
6902 return !arm_feature(env
, ARM_FEATURE_EL2
)
6903 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
6904 case ARM_CPU_MODE_MON
:
6905 return arm_current_el(env
) < 3;
6911 uint32_t cpsr_read(CPUARMState
*env
)
6914 ZF
= (env
->ZF
== 0);
6915 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
6916 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
6917 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
6918 | ((env
->condexec_bits
& 0xfc) << 8)
6919 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
6922 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
6923 CPSRWriteType write_type
)
6925 uint32_t changed_daif
;
6927 if (mask
& CPSR_NZCV
) {
6928 env
->ZF
= (~val
) & CPSR_Z
;
6930 env
->CF
= (val
>> 29) & 1;
6931 env
->VF
= (val
<< 3) & 0x80000000;
6934 env
->QF
= ((val
& CPSR_Q
) != 0);
6936 env
->thumb
= ((val
& CPSR_T
) != 0);
6937 if (mask
& CPSR_IT_0_1
) {
6938 env
->condexec_bits
&= ~3;
6939 env
->condexec_bits
|= (val
>> 25) & 3;
6941 if (mask
& CPSR_IT_2_7
) {
6942 env
->condexec_bits
&= 3;
6943 env
->condexec_bits
|= (val
>> 8) & 0xfc;
6945 if (mask
& CPSR_GE
) {
6946 env
->GE
= (val
>> 16) & 0xf;
6949 /* In a V7 implementation that includes the security extensions but does
6950 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
6951 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
6952 * bits respectively.
6954 * In a V8 implementation, it is permitted for privileged software to
6955 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
6957 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
6958 arm_feature(env
, ARM_FEATURE_EL3
) &&
6959 !arm_feature(env
, ARM_FEATURE_EL2
) &&
6960 !arm_is_secure(env
)) {
6962 changed_daif
= (env
->daif
^ val
) & mask
;
6964 if (changed_daif
& CPSR_A
) {
6965 /* Check to see if we are allowed to change the masking of async
6966 * abort exceptions from a non-secure state.
6968 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
6969 qemu_log_mask(LOG_GUEST_ERROR
,
6970 "Ignoring attempt to switch CPSR_A flag from "
6971 "non-secure world with SCR.AW bit clear\n");
6976 if (changed_daif
& CPSR_F
) {
6977 /* Check to see if we are allowed to change the masking of FIQ
6978 * exceptions from a non-secure state.
6980 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
6981 qemu_log_mask(LOG_GUEST_ERROR
,
6982 "Ignoring attempt to switch CPSR_F flag from "
6983 "non-secure world with SCR.FW bit clear\n");
6987 /* Check whether non-maskable FIQ (NMFI) support is enabled.
6988 * If this bit is set software is not allowed to mask
6989 * FIQs, but is allowed to set CPSR_F to 0.
6991 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
6993 qemu_log_mask(LOG_GUEST_ERROR
,
6994 "Ignoring attempt to enable CPSR_F flag "
6995 "(non-maskable FIQ [NMFI] support enabled)\n");
7001 env
->daif
&= ~(CPSR_AIF
& mask
);
7002 env
->daif
|= val
& CPSR_AIF
& mask
;
7004 if (write_type
!= CPSRWriteRaw
&&
7005 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
7006 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
7007 /* Note that we can only get here in USR mode if this is a
7008 * gdb stub write; for this case we follow the architectural
7009 * behaviour for guest writes in USR mode of ignoring an attempt
7010 * to switch mode. (Those are caught by translate.c for writes
7011 * triggered by guest instructions.)
7014 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
7015 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7016 * v7, and has defined behaviour in v8:
7017 * + leave CPSR.M untouched
7018 * + allow changes to the other CPSR fields
7020 * For user changes via the GDB stub, we don't set PSTATE.IL,
7021 * as this would be unnecessarily harsh for a user error.
7024 if (write_type
!= CPSRWriteByGDBStub
&&
7025 arm_feature(env
, ARM_FEATURE_V8
)) {
7029 qemu_log_mask(LOG_GUEST_ERROR
,
7030 "Illegal AArch32 mode switch attempt from %s to %s\n",
7031 aarch32_mode_name(env
->uncached_cpsr
),
7032 aarch32_mode_name(val
));
7034 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
7035 write_type
== CPSRWriteExceptionReturn
?
7036 "Exception return from AArch32" :
7037 "AArch32 mode switch from",
7038 aarch32_mode_name(env
->uncached_cpsr
),
7039 aarch32_mode_name(val
), env
->regs
[15]);
7040 switch_mode(env
, val
& CPSR_M
);
7043 mask
&= ~CACHED_CPSR_BITS
;
7044 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
7047 /* Sign/zero extend */
7048 uint32_t HELPER(sxtb16
)(uint32_t x
)
7051 res
= (uint16_t)(int8_t)x
;
7052 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
7056 uint32_t HELPER(uxtb16
)(uint32_t x
)
7059 res
= (uint16_t)(uint8_t)x
;
7060 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
7064 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
7068 if (num
== INT_MIN
&& den
== -1)
7073 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
7080 uint32_t HELPER(rbit
)(uint32_t x
)
7085 #if defined(CONFIG_USER_ONLY)
7087 /* These should probably raise undefined insn exceptions. */
7088 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
7090 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7092 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
7095 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
7097 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7099 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
7103 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7105 /* translate.c should never generate calls here in user-only mode */
7106 g_assert_not_reached();
7109 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7111 /* translate.c should never generate calls here in user-only mode */
7112 g_assert_not_reached();
7115 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
7117 /* The TT instructions can be used by unprivileged code, but in
7118 * user-only emulation we don't have the MPU.
7119 * Luckily since we know we are NonSecure unprivileged (and that in
7120 * turn means that the A flag wasn't specified), all the bits in the
7121 * register must be zero:
7122 * IREGION: 0 because IRVALID is 0
7123 * IRVALID: 0 because NS
7125 * NSRW: 0 because NS
7127 * RW: 0 because unpriv and A flag not set
7128 * R: 0 because unpriv and A flag not set
7129 * SRVALID: 0 because NS
7130 * MRVALID: 0 because unpriv and A flag not set
7131 * SREGION: 0 becaus SRVALID is 0
7132 * MREGION: 0 because MRVALID is 0
7137 static void switch_mode(CPUARMState
*env
, int mode
)
7139 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7141 if (mode
!= ARM_CPU_MODE_USR
) {
7142 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
7146 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7147 uint32_t cur_el
, bool secure
)
7152 void aarch64_sync_64_to_32(CPUARMState
*env
)
7154 g_assert_not_reached();
7159 static void switch_mode(CPUARMState
*env
, int mode
)
7164 old_mode
= env
->uncached_cpsr
& CPSR_M
;
7165 if (mode
== old_mode
)
7168 if (old_mode
== ARM_CPU_MODE_FIQ
) {
7169 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7170 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
7171 } else if (mode
== ARM_CPU_MODE_FIQ
) {
7172 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7173 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
7176 i
= bank_number(old_mode
);
7177 env
->banked_r13
[i
] = env
->regs
[13];
7178 env
->banked_spsr
[i
] = env
->spsr
;
7180 i
= bank_number(mode
);
7181 env
->regs
[13] = env
->banked_r13
[i
];
7182 env
->spsr
= env
->banked_spsr
[i
];
7184 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
7185 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
7188 /* Physical Interrupt Target EL Lookup Table
7190 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7192 * The below multi-dimensional table is used for looking up the target
7193 * exception level given numerous condition criteria. Specifically, the
7194 * target EL is based on SCR and HCR routing controls as well as the
7195 * currently executing EL and secure state.
7198 * target_el_table[2][2][2][2][2][4]
7199 * | | | | | +--- Current EL
7200 * | | | | +------ Non-secure(0)/Secure(1)
7201 * | | | +--------- HCR mask override
7202 * | | +------------ SCR exec state control
7203 * | +--------------- SCR mask override
7204 * +------------------ 32-bit(0)/64-bit(1) EL3
7206 * The table values are as such:
7210 * The ARM ARM target EL table includes entries indicating that an "exception
7211 * is not taken". The two cases where this is applicable are:
7212 * 1) An exception is taken from EL3 but the SCR does not have the exception
7214 * 2) An exception is taken from EL2 but the HCR does not have the exception
7216 * In these two cases, the below table contain a target of EL1. This value is
7217 * returned as it is expected that the consumer of the table data will check
7218 * for "target EL >= current EL" to ensure the exception is not taken.
7222 * BIT IRQ IMO Non-secure Secure
7223 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7225 static const int8_t target_el_table
[2][2][2][2][2][4] = {
7226 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7227 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7228 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7229 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7230 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7231 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7232 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7233 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7234 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7235 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7236 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7237 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7238 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7239 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7240 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7241 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7245 * Determine the target EL for physical exceptions
7247 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7248 uint32_t cur_el
, bool secure
)
7250 CPUARMState
*env
= cs
->env_ptr
;
7255 /* Is the highest EL AArch64? */
7256 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
7259 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7260 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
7262 /* Either EL2 is the highest EL (and so the EL2 register width
7263 * is given by is64); or there is no EL2 or EL3, in which case
7264 * the value of 'rw' does not affect the table lookup anyway.
7269 hcr_el2
= arm_hcr_el2_eff(env
);
7272 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
7273 hcr
= hcr_el2
& HCR_IMO
;
7276 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
7277 hcr
= hcr_el2
& HCR_FMO
;
7280 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
7281 hcr
= hcr_el2
& HCR_AMO
;
7285 /* Perform a table-lookup for the target EL given the current state */
7286 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
7288 assert(target_el
> 0);
7293 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
7294 ARMMMUIdx mmu_idx
, bool ignfault
)
7296 CPUState
*cs
= CPU(cpu
);
7297 CPUARMState
*env
= &cpu
->env
;
7298 MemTxAttrs attrs
= {};
7300 target_ulong page_size
;
7303 ARMMMUFaultInfo fi
= {};
7304 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7308 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
7309 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7310 /* MPU/SAU lookup failed */
7311 if (fi
.type
== ARMFault_QEMU_SFault
) {
7312 qemu_log_mask(CPU_LOG_INT
,
7313 "...SecureFault with SFSR.AUVIOL during stacking\n");
7314 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7315 env
->v7m
.sfar
= addr
;
7316 exc
= ARMV7M_EXCP_SECURE
;
7319 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
7320 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
7321 exc
= ARMV7M_EXCP_MEM
;
7322 exc_secure
= secure
;
7326 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
7328 if (txres
!= MEMTX_OK
) {
7329 /* BusFault trying to write the data */
7330 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
7331 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
7332 exc
= ARMV7M_EXCP_BUS
;
7339 /* By pending the exception at this point we are making
7340 * the IMPDEF choice "overridden exceptions pended" (see the
7341 * MergeExcInfo() pseudocode). The other choice would be to not
7342 * pend them now and then make a choice about which to throw away
7343 * later if we have two derived exceptions.
7344 * The only case when we must not pend the exception but instead
7345 * throw it away is if we are doing the push of the callee registers
7346 * and we've already generated a derived exception. Even in this
7347 * case we will still update the fault status registers.
7350 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
7355 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
7358 CPUState
*cs
= CPU(cpu
);
7359 CPUARMState
*env
= &cpu
->env
;
7360 MemTxAttrs attrs
= {};
7362 target_ulong page_size
;
7365 ARMMMUFaultInfo fi
= {};
7366 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7371 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
7372 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7373 /* MPU/SAU lookup failed */
7374 if (fi
.type
== ARMFault_QEMU_SFault
) {
7375 qemu_log_mask(CPU_LOG_INT
,
7376 "...SecureFault with SFSR.AUVIOL during unstack\n");
7377 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7378 env
->v7m
.sfar
= addr
;
7379 exc
= ARMV7M_EXCP_SECURE
;
7382 qemu_log_mask(CPU_LOG_INT
,
7383 "...MemManageFault with CFSR.MUNSTKERR\n");
7384 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
7385 exc
= ARMV7M_EXCP_MEM
;
7386 exc_secure
= secure
;
7391 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
7393 if (txres
!= MEMTX_OK
) {
7394 /* BusFault trying to read the data */
7395 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
7396 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
7397 exc
= ARMV7M_EXCP_BUS
;
7406 /* By pending the exception at this point we are making
7407 * the IMPDEF choice "overridden exceptions pended" (see the
7408 * MergeExcInfo() pseudocode). The other choice would be to not
7409 * pend them now and then make a choice about which to throw away
7410 * later if we have two derived exceptions.
7412 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
7416 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7417 * This may change the current stack pointer between Main and Process
7418 * stack pointers if it is done for the CONTROL register for the current
7421 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
7425 bool old_is_psp
= v7m_using_psp(env
);
7427 env
->v7m
.control
[secstate
] =
7428 deposit32(env
->v7m
.control
[secstate
],
7429 R_V7M_CONTROL_SPSEL_SHIFT
,
7430 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
7432 if (secstate
== env
->v7m
.secure
) {
7433 bool new_is_psp
= v7m_using_psp(env
);
7436 if (old_is_psp
!= new_is_psp
) {
7437 tmp
= env
->v7m
.other_sp
;
7438 env
->v7m
.other_sp
= env
->regs
[13];
7439 env
->regs
[13] = tmp
;
7444 /* Write to v7M CONTROL.SPSEL bit. This may change the current
7445 * stack pointer between Main and Process stack pointers.
7447 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
7449 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
7452 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
7454 /* Write a new value to v7m.exception, thus transitioning into or out
7455 * of Handler mode; this may result in a change of active stack pointer.
7457 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
7460 env
->v7m
.exception
= new_exc
;
7462 new_is_psp
= v7m_using_psp(env
);
7464 if (old_is_psp
!= new_is_psp
) {
7465 tmp
= env
->v7m
.other_sp
;
7466 env
->v7m
.other_sp
= env
->regs
[13];
7467 env
->regs
[13] = tmp
;
7471 /* Switch M profile security state between NS and S */
7472 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
7474 uint32_t new_ss_msp
, new_ss_psp
;
7476 if (env
->v7m
.secure
== new_secstate
) {
7480 /* All the banked state is accessed by looking at env->v7m.secure
7481 * except for the stack pointer; rearrange the SP appropriately.
7483 new_ss_msp
= env
->v7m
.other_ss_msp
;
7484 new_ss_psp
= env
->v7m
.other_ss_psp
;
7486 if (v7m_using_psp(env
)) {
7487 env
->v7m
.other_ss_psp
= env
->regs
[13];
7488 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
7490 env
->v7m
.other_ss_msp
= env
->regs
[13];
7491 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
7494 env
->v7m
.secure
= new_secstate
;
7496 if (v7m_using_psp(env
)) {
7497 env
->regs
[13] = new_ss_psp
;
7498 env
->v7m
.other_sp
= new_ss_msp
;
7500 env
->regs
[13] = new_ss_msp
;
7501 env
->v7m
.other_sp
= new_ss_psp
;
7505 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7508 * - if the return value is a magic value, do exception return (like BX)
7509 * - otherwise bit 0 of the return value is the target security state
7513 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7514 /* Covers FNC_RETURN and EXC_RETURN magic */
7515 min_magic
= FNC_RETURN_MIN_MAGIC
;
7517 /* EXC_RETURN magic only */
7518 min_magic
= EXC_RETURN_MIN_MAGIC
;
7521 if (dest
>= min_magic
) {
7522 /* This is an exception return magic value; put it where
7523 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7524 * Note that if we ever add gen_ss_advance() singlestep support to
7525 * M profile this should count as an "instruction execution complete"
7526 * event (compare gen_bx_excret_final_code()).
7528 env
->regs
[15] = dest
& ~1;
7529 env
->thumb
= dest
& 1;
7530 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
7534 /* translate.c should have made BXNS UNDEF unless we're secure */
7535 assert(env
->v7m
.secure
);
7537 switch_v7m_security_state(env
, dest
& 1);
7539 env
->regs
[15] = dest
& ~1;
7542 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7544 /* Handle v7M BLXNS:
7545 * - bit 0 of the destination address is the target security state
7548 /* At this point regs[15] is the address just after the BLXNS */
7549 uint32_t nextinst
= env
->regs
[15] | 1;
7550 uint32_t sp
= env
->regs
[13] - 8;
7553 /* translate.c will have made BLXNS UNDEF unless we're secure */
7554 assert(env
->v7m
.secure
);
7557 /* target is Secure, so this is just a normal BLX,
7558 * except that the low bit doesn't indicate Thumb/not.
7560 env
->regs
[14] = nextinst
;
7562 env
->regs
[15] = dest
& ~1;
7566 /* Target is non-secure: first push a stack frame */
7567 if (!QEMU_IS_ALIGNED(sp
, 8)) {
7568 qemu_log_mask(LOG_GUEST_ERROR
,
7569 "BLXNS with misaligned SP is UNPREDICTABLE\n");
7572 if (sp
< v7m_sp_limit(env
)) {
7573 raise_exception(env
, EXCP_STKOF
, 0, 1);
7576 saved_psr
= env
->v7m
.exception
;
7577 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
7578 saved_psr
|= XPSR_SFPA
;
7581 /* Note that these stores can throw exceptions on MPU faults */
7582 cpu_stl_data(env
, sp
, nextinst
);
7583 cpu_stl_data(env
, sp
+ 4, saved_psr
);
7586 env
->regs
[14] = 0xfeffffff;
7587 if (arm_v7m_is_handler_mode(env
)) {
7588 /* Write a dummy value to IPSR, to avoid leaking the current secure
7589 * exception number to non-secure code. This is guaranteed not
7590 * to cause write_v7m_exception() to actually change stacks.
7592 write_v7m_exception(env
, 1);
7594 switch_v7m_security_state(env
, 0);
7596 env
->regs
[15] = dest
;
7599 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
7602 /* Return a pointer to the location where we currently store the
7603 * stack pointer for the requested security state and thread mode.
7604 * This pointer will become invalid if the CPU state is updated
7605 * such that the stack pointers are switched around (eg changing
7606 * the SPSEL control bit).
7607 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
7608 * Unlike that pseudocode, we require the caller to pass us in the
7609 * SPSEL control bit value; this is because we also use this
7610 * function in handling of pushing of the callee-saves registers
7611 * part of the v8M stack frame (pseudocode PushCalleeStack()),
7612 * and in the tailchain codepath the SPSEL bit comes from the exception
7613 * return magic LR value from the previous exception. The pseudocode
7614 * opencodes the stack-selection in PushCalleeStack(), but we prefer
7615 * to make this utility function generic enough to do the job.
7617 bool want_psp
= threadmode
&& spsel
;
7619 if (secure
== env
->v7m
.secure
) {
7620 if (want_psp
== v7m_using_psp(env
)) {
7621 return &env
->regs
[13];
7623 return &env
->v7m
.other_sp
;
7627 return &env
->v7m
.other_ss_psp
;
7629 return &env
->v7m
.other_ss_msp
;
7634 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
7637 CPUState
*cs
= CPU(cpu
);
7638 CPUARMState
*env
= &cpu
->env
;
7640 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
7641 uint32_t vector_entry
;
7642 MemTxAttrs attrs
= {};
7646 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
7648 /* We don't do a get_phys_addr() here because the rules for vector
7649 * loads are special: they always use the default memory map, and
7650 * the default memory map permits reads from all addresses.
7651 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
7652 * that we want this special case which would always say "yes",
7653 * we just do the SAU lookup here followed by a direct physical load.
7655 attrs
.secure
= targets_secure
;
7658 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7659 V8M_SAttributes sattrs
= {};
7661 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
7663 attrs
.secure
= false;
7664 } else if (!targets_secure
) {
7665 /* NS access to S memory */
7670 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
7672 if (result
!= MEMTX_OK
) {
7675 *pvec
= vector_entry
;
7679 /* All vector table fetch fails are reported as HardFault, with
7680 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
7681 * technically the underlying exception is a MemManage or BusFault
7682 * that is escalated to HardFault.) This is a terminal exception,
7683 * so we will either take the HardFault immediately or else enter
7684 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
7686 exc_secure
= targets_secure
||
7687 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
7688 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
7689 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
7693 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7696 /* For v8M, push the callee-saves register part of the stack frame.
7697 * Compare the v8M pseudocode PushCalleeStack().
7698 * In the tailchaining case this may not be the current stack.
7700 CPUARMState
*env
= &cpu
->env
;
7701 uint32_t *frame_sp_p
;
7709 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
7710 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
7713 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
7714 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
7715 lr
& R_V7M_EXCRET_SPSEL_MASK
);
7716 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
7718 limit
= env
->v7m
.psplim
[M_REG_S
];
7720 limit
= env
->v7m
.msplim
[M_REG_S
];
7723 mmu_idx
= arm_mmu_idx(env
);
7724 frame_sp_p
= &env
->regs
[13];
7725 limit
= v7m_sp_limit(env
);
7728 frameptr
= *frame_sp_p
- 0x28;
7729 if (frameptr
< limit
) {
7731 * Stack limit failure: set SP to the limit value, and generate
7732 * STKOF UsageFault. Stack pushes below the limit must not be
7733 * performed. It is IMPDEF whether pushes above the limit are
7734 * performed; we choose not to.
7736 qemu_log_mask(CPU_LOG_INT
,
7737 "...STKOF during callee-saves register stacking\n");
7738 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
7739 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7741 *frame_sp_p
= limit
;
7745 /* Write as much of the stack frame as we can. A write failure may
7746 * cause us to pend a derived exception.
7749 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
7750 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
7752 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
7754 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
7756 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
7758 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
7760 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
7762 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
7764 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
7767 /* Update SP regardless of whether any of the stack accesses failed. */
7768 *frame_sp_p
= frameptr
;
7773 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7774 bool ignore_stackfaults
)
7776 /* Do the "take the exception" parts of exception entry,
7777 * but not the pushing of state to the stack. This is
7778 * similar to the pseudocode ExceptionTaken() function.
7780 CPUARMState
*env
= &cpu
->env
;
7782 bool targets_secure
;
7784 bool push_failed
= false;
7786 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
7787 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
7788 targets_secure
? "secure" : "nonsecure", exc
);
7790 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7791 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
7792 (lr
& R_V7M_EXCRET_S_MASK
)) {
7793 /* The background code (the owner of the registers in the
7794 * exception frame) is Secure. This means it may either already
7795 * have or now needs to push callee-saves registers.
7797 if (targets_secure
) {
7798 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
7799 /* We took an exception from Secure to NonSecure
7800 * (which means the callee-saved registers got stacked)
7801 * and are now tailchaining to a Secure exception.
7802 * Clear DCRS so eventual return from this Secure
7803 * exception unstacks the callee-saved registers.
7805 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
7808 /* We're going to a non-secure exception; push the
7809 * callee-saves registers to the stack now, if they're
7810 * not already saved.
7812 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
7813 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
7814 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
7815 ignore_stackfaults
);
7817 lr
|= R_V7M_EXCRET_DCRS_MASK
;
7821 lr
&= ~R_V7M_EXCRET_ES_MASK
;
7822 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7823 lr
|= R_V7M_EXCRET_ES_MASK
;
7825 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
7826 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
7827 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
7830 /* Clear registers if necessary to prevent non-secure exception
7831 * code being able to see register values from secure code.
7832 * Where register values become architecturally UNKNOWN we leave
7833 * them with their previous values.
7835 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7836 if (!targets_secure
) {
7837 /* Always clear the caller-saved registers (they have been
7838 * pushed to the stack earlier in v7m_push_stack()).
7839 * Clear callee-saved registers if the background code is
7840 * Secure (in which case these regs were saved in
7841 * v7m_push_callee_stack()).
7845 for (i
= 0; i
< 13; i
++) {
7846 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
7847 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
7852 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
7857 if (push_failed
&& !ignore_stackfaults
) {
7858 /* Derived exception on callee-saves register stacking:
7859 * we might now want to take a different exception which
7860 * targets a different security state, so try again from the top.
7862 qemu_log_mask(CPU_LOG_INT
,
7863 "...derived exception on callee-saves register stacking");
7864 v7m_exception_taken(cpu
, lr
, true, true);
7868 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
7869 /* Vector load failed: derived exception */
7870 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
7871 v7m_exception_taken(cpu
, lr
, true, true);
7875 /* Now we've done everything that might cause a derived exception
7876 * we can go ahead and activate whichever exception we're going to
7877 * take (which might now be the derived exception).
7879 armv7m_nvic_acknowledge_irq(env
->nvic
);
7881 /* Switch to target security state -- must do this before writing SPSEL */
7882 switch_v7m_security_state(env
, targets_secure
);
7883 write_v7m_control_spsel(env
, 0);
7884 arm_clear_exclusive(env
);
7886 env
->condexec_bits
= 0;
7888 env
->regs
[15] = addr
& 0xfffffffe;
7889 env
->thumb
= addr
& 1;
7892 static bool v7m_push_stack(ARMCPU
*cpu
)
7894 /* Do the "set up stack frame" part of exception entry,
7895 * similar to pseudocode PushStack().
7896 * Return true if we generate a derived exception (and so
7897 * should ignore further stack faults trying to process
7898 * that derived exception.)
7901 CPUARMState
*env
= &cpu
->env
;
7902 uint32_t xpsr
= xpsr_read(env
);
7903 uint32_t frameptr
= env
->regs
[13];
7904 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
7906 /* Align stack pointer if the guest wants that */
7907 if ((frameptr
& 4) &&
7908 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
7910 xpsr
|= XPSR_SPREALIGN
;
7915 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7916 uint32_t limit
= v7m_sp_limit(env
);
7918 if (frameptr
< limit
) {
7920 * Stack limit failure: set SP to the limit value, and generate
7921 * STKOF UsageFault. Stack pushes below the limit must not be
7922 * performed. It is IMPDEF whether pushes above the limit are
7923 * performed; we choose not to.
7925 qemu_log_mask(CPU_LOG_INT
,
7926 "...STKOF during stacking\n");
7927 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
7928 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7930 env
->regs
[13] = limit
;
7935 /* Write as much of the stack frame as we can. If we fail a stack
7936 * write this will result in a derived exception being pended
7937 * (which may be taken in preference to the one we started with
7938 * if it has higher priority).
7941 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
7942 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
7943 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
7944 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
7945 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
7946 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
7947 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
7948 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
7950 /* Update SP regardless of whether any of the stack accesses failed. */
7951 env
->regs
[13] = frameptr
;
7956 static void do_v7m_exception_exit(ARMCPU
*cpu
)
7958 CPUARMState
*env
= &cpu
->env
;
7961 bool ufault
= false;
7962 bool sfault
= false;
7963 bool return_to_sp_process
;
7964 bool return_to_handler
;
7965 bool rettobase
= false;
7966 bool exc_secure
= false;
7967 bool return_to_secure
;
7969 /* If we're not in Handler mode then jumps to magic exception-exit
7970 * addresses don't have magic behaviour. However for the v8M
7971 * security extensions the magic secure-function-return has to
7972 * work in thread mode too, so to avoid doing an extra check in
7973 * the generated code we allow exception-exit magic to also cause the
7974 * internal exception and bring us here in thread mode. Correct code
7975 * will never try to do this (the following insn fetch will always
7976 * fault) so we the overhead of having taken an unnecessary exception
7979 if (!arm_v7m_is_handler_mode(env
)) {
7983 /* In the spec pseudocode ExceptionReturn() is called directly
7984 * from BXWritePC() and gets the full target PC value including
7985 * bit zero. In QEMU's implementation we treat it as a normal
7986 * jump-to-register (which is then caught later on), and so split
7987 * the target value up between env->regs[15] and env->thumb in
7988 * gen_bx(). Reconstitute it.
7990 excret
= env
->regs
[15];
7995 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
7996 " previous exception %d\n",
7997 excret
, env
->v7m
.exception
);
7999 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
8000 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
8001 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
8005 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8006 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8007 * we pick which FAULTMASK to clear.
8009 if (!env
->v7m
.secure
&&
8010 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
8011 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
8013 /* For all other purposes, treat ES as 0 (R_HXSR) */
8014 excret
&= ~R_V7M_EXCRET_ES_MASK
;
8016 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
8019 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
8020 /* Auto-clear FAULTMASK on return from other than NMI.
8021 * If the security extension is implemented then this only
8022 * happens if the raw execution priority is >= 0; the
8023 * value of the ES bit in the exception return value indicates
8024 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8026 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8027 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
8028 env
->v7m
.faultmask
[exc_secure
] = 0;
8031 env
->v7m
.faultmask
[M_REG_NS
] = 0;
8035 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
8038 /* attempt to exit an exception that isn't active */
8042 /* still an irq active now */
8045 /* we returned to base exception level, no nesting.
8046 * (In the pseudocode this is written using "NestedActivation != 1"
8047 * where we have 'rettobase == false'.)
8052 g_assert_not_reached();
8055 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
8056 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
8057 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8058 (excret
& R_V7M_EXCRET_S_MASK
);
8060 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8061 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8062 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8063 * we choose to take the UsageFault.
8065 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
8066 (excret
& R_V7M_EXCRET_ES_MASK
) ||
8067 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
8071 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
8075 /* For v7M we only recognize certain combinations of the low bits */
8076 switch (excret
& 0xf) {
8077 case 1: /* Return to Handler */
8079 case 13: /* Return to Thread using Process stack */
8080 case 9: /* Return to Thread using Main stack */
8081 /* We only need to check NONBASETHRDENA for v7M, because in
8082 * v8M this bit does not exist (it is RES1).
8085 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
8086 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
8096 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8097 * Handler mode (and will be until we write the new XPSR.Interrupt
8098 * field) this does not switch around the current stack pointer.
8099 * We must do this before we do any kind of tailchaining, including
8100 * for the derived exceptions on integrity check failures, or we will
8101 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8103 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
8106 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
8107 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8108 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8109 "stackframe: failed EXC_RETURN.ES validity check\n");
8110 v7m_exception_taken(cpu
, excret
, true, false);
8115 /* Bad exception return: instead of popping the exception
8116 * stack, directly take a usage fault on the current stack.
8118 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8119 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8120 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8121 "stackframe: failed exception return integrity check\n");
8122 v7m_exception_taken(cpu
, excret
, true, false);
8127 * Tailchaining: if there is currently a pending exception that
8128 * is high enough priority to preempt execution at the level we're
8129 * about to return to, then just directly take that exception now,
8130 * avoiding an unstack-and-then-stack. Note that now we have
8131 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
8132 * our current execution priority is already the execution priority we are
8133 * returning to -- none of the state we would unstack or set based on
8134 * the EXCRET value affects it.
8136 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
8137 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
8138 v7m_exception_taken(cpu
, excret
, true, false);
8142 switch_v7m_security_state(env
, return_to_secure
);
8145 /* The stack pointer we should be reading the exception frame from
8146 * depends on bits in the magic exception return type value (and
8147 * for v8M isn't necessarily the stack pointer we will eventually
8148 * end up resuming execution with). Get a pointer to the location
8149 * in the CPU state struct where the SP we need is currently being
8150 * stored; we will use and modify it in place.
8151 * We use this limited C variable scope so we don't accidentally
8152 * use 'frame_sp_p' after we do something that makes it invalid.
8154 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
8157 return_to_sp_process
);
8158 uint32_t frameptr
= *frame_sp_p
;
8161 bool return_to_priv
= return_to_handler
||
8162 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
8164 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
8167 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
8168 arm_feature(env
, ARM_FEATURE_V8
)) {
8169 qemu_log_mask(LOG_GUEST_ERROR
,
8170 "M profile exception return with non-8-aligned SP "
8171 "for destination state is UNPREDICTABLE\n");
8174 /* Do we need to pop callee-saved registers? */
8175 if (return_to_secure
&&
8176 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
8177 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
8178 uint32_t expected_sig
= 0xfefa125b;
8179 uint32_t actual_sig
;
8181 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
8183 if (pop_ok
&& expected_sig
!= actual_sig
) {
8184 /* Take a SecureFault on the current stack */
8185 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
8186 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8187 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8188 "stackframe: failed exception return integrity "
8189 "signature check\n");
8190 v7m_exception_taken(cpu
, excret
, true, false);
8195 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
8196 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
8197 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
8198 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
8199 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
8200 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
8201 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
8202 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
8209 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
8210 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
8211 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
8212 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
8213 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
8214 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
8215 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
8216 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
8219 /* v7m_stack_read() pended a fault, so take it (as a tail
8220 * chained exception on the same stack frame)
8222 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
8223 v7m_exception_taken(cpu
, excret
, true, false);
8227 /* Returning from an exception with a PC with bit 0 set is defined
8228 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
8229 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
8230 * the lsbit, and there are several RTOSes out there which incorrectly
8231 * assume the r15 in the stack frame should be a Thumb-style "lsbit
8232 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
8233 * complain about the badly behaved guest.
8235 if (env
->regs
[15] & 1) {
8236 env
->regs
[15] &= ~1U;
8237 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8238 qemu_log_mask(LOG_GUEST_ERROR
,
8239 "M profile return from interrupt with misaligned "
8240 "PC is UNPREDICTABLE on v7M\n");
8244 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8245 /* For v8M we have to check whether the xPSR exception field
8246 * matches the EXCRET value for return to handler/thread
8247 * before we commit to changing the SP and xPSR.
8249 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
8250 if (return_to_handler
!= will_be_handler
) {
8251 /* Take an INVPC UsageFault on the current stack.
8252 * By this point we will have switched to the security state
8253 * for the background state, so this UsageFault will target
8256 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8258 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8259 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8260 "stackframe: failed exception return integrity "
8262 v7m_exception_taken(cpu
, excret
, true, false);
8267 /* Commit to consuming the stack frame */
8269 /* Undo stack alignment (the SPREALIGN bit indicates that the original
8270 * pre-exception SP was not 8-aligned and we added a padding word to
8271 * align it, so we undo this by ORing in the bit that increases it
8272 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
8273 * would work too but a logical OR is how the pseudocode specifies it.)
8275 if (xpsr
& XPSR_SPREALIGN
) {
8278 *frame_sp_p
= frameptr
;
8280 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
8281 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
8283 /* The restored xPSR exception field will be zero if we're
8284 * resuming in Thread mode. If that doesn't match what the
8285 * exception return excret specified then this is a UsageFault.
8286 * v7M requires we make this check here; v8M did it earlier.
8288 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
8289 /* Take an INVPC UsageFault by pushing the stack again;
8290 * we know we're v7M so this is never a Secure UsageFault.
8292 bool ignore_stackfaults
;
8294 assert(!arm_feature(env
, ARM_FEATURE_V8
));
8295 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
8296 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8297 ignore_stackfaults
= v7m_push_stack(cpu
);
8298 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
8299 "failed exception return integrity check\n");
8300 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
8304 /* Otherwise, we have a successful exception exit. */
8305 arm_clear_exclusive(env
);
8306 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
8309 static bool do_v7m_function_return(ARMCPU
*cpu
)
8311 /* v8M security extensions magic function return.
8313 * (1) throw an exception (longjump)
8314 * (2) return true if we successfully handled the function return
8315 * (3) return false if we failed a consistency check and have
8316 * pended a UsageFault that needs to be taken now
8318 * At this point the magic return value is split between env->regs[15]
8319 * and env->thumb. We don't bother to reconstitute it because we don't
8320 * need it (all values are handled the same way).
8322 CPUARMState
*env
= &cpu
->env
;
8323 uint32_t newpc
, newpsr
, newpsr_exc
;
8325 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
8328 bool threadmode
, spsel
;
8331 uint32_t *frame_sp_p
;
8334 /* Pull the return address and IPSR from the Secure stack */
8335 threadmode
= !arm_v7m_is_handler_mode(env
);
8336 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
8338 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
8339 frameptr
= *frame_sp_p
;
8341 /* These loads may throw an exception (for MPU faults). We want to
8342 * do them as secure, so work out what MMU index that is.
8344 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8345 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
8346 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
8347 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
8349 /* Consistency checks on new IPSR */
8350 newpsr_exc
= newpsr
& XPSR_EXCP
;
8351 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
8352 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
8353 /* Pend the fault and tell our caller to take it */
8354 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8355 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8357 qemu_log_mask(CPU_LOG_INT
,
8358 "...taking INVPC UsageFault: "
8359 "IPSR consistency check failed\n");
8363 *frame_sp_p
= frameptr
+ 8;
8366 /* This invalidates frame_sp_p */
8367 switch_v7m_security_state(env
, true);
8368 env
->v7m
.exception
= newpsr_exc
;
8369 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
8370 if (newpsr
& XPSR_SFPA
) {
8371 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
8373 xpsr_write(env
, 0, XPSR_IT
);
8374 env
->thumb
= newpc
& 1;
8375 env
->regs
[15] = newpc
& ~1;
8377 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
8381 static void arm_log_exception(int idx
)
8383 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
8384 const char *exc
= NULL
;
8385 static const char * const excnames
[] = {
8386 [EXCP_UDEF
] = "Undefined Instruction",
8388 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
8389 [EXCP_DATA_ABORT
] = "Data Abort",
8392 [EXCP_BKPT
] = "Breakpoint",
8393 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8394 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8395 [EXCP_HVC
] = "Hypervisor Call",
8396 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8397 [EXCP_SMC
] = "Secure Monitor Call",
8398 [EXCP_VIRQ
] = "Virtual IRQ",
8399 [EXCP_VFIQ
] = "Virtual FIQ",
8400 [EXCP_SEMIHOST
] = "Semihosting call",
8401 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8402 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8403 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8406 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8407 exc
= excnames
[idx
];
8412 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8416 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
8417 uint32_t addr
, uint16_t *insn
)
8419 /* Load a 16-bit portion of a v7M instruction, returning true on success,
8420 * or false on failure (in which case we will have pended the appropriate
8422 * We need to do the instruction fetch's MPU and SAU checks
8423 * like this because there is no MMU index that would allow
8424 * doing the load with a single function call. Instead we must
8425 * first check that the security attributes permit the load
8426 * and that they don't mismatch on the two halves of the instruction,
8427 * and then we do the load as a secure load (ie using the security
8428 * attributes of the address, not the CPU, as architecturally required).
8430 CPUState
*cs
= CPU(cpu
);
8431 CPUARMState
*env
= &cpu
->env
;
8432 V8M_SAttributes sattrs
= {};
8433 MemTxAttrs attrs
= {};
8434 ARMMMUFaultInfo fi
= {};
8436 target_ulong page_size
;
8440 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
8441 if (!sattrs
.nsc
|| sattrs
.ns
) {
8442 /* This must be the second half of the insn, and it straddles a
8443 * region boundary with the second half not being S&NSC.
8445 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8446 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8447 qemu_log_mask(CPU_LOG_INT
,
8448 "...really SecureFault with SFSR.INVEP\n");
8451 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
8452 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
8453 /* the MPU lookup failed */
8454 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8455 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
8456 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
8459 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
8461 if (txres
!= MEMTX_OK
) {
8462 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8463 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8464 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
8470 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
8472 /* Check whether this attempt to execute code in a Secure & NS-Callable
8473 * memory region is for an SG instruction; if so, then emulate the
8474 * effect of the SG instruction and return true. Otherwise pend
8475 * the correct kind of exception and return false.
8477 CPUARMState
*env
= &cpu
->env
;
8481 /* We should never get here unless get_phys_addr_pmsav8() caused
8482 * an exception for NS executing in S&NSC memory.
8484 assert(!env
->v7m
.secure
);
8485 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8487 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
8488 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8490 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
8498 if (insn
!= 0xe97f) {
8499 /* Not an SG instruction first half (we choose the IMPDEF
8500 * early-SG-check option).
8505 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
8509 if (insn
!= 0xe97f) {
8510 /* Not an SG instruction second half (yes, both halves of the SG
8511 * insn have the same hex value)
8516 /* OK, we have confirmed that we really have an SG instruction.
8517 * We know we're NS in S memory so don't need to repeat those checks.
8519 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
8520 ", executing it\n", env
->regs
[15]);
8521 env
->regs
[14] &= ~1;
8522 switch_v7m_security_state(env
, true);
8523 xpsr_write(env
, 0, XPSR_IT
);
8528 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8529 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8530 qemu_log_mask(CPU_LOG_INT
,
8531 "...really SecureFault with SFSR.INVEP\n");
8535 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
8537 ARMCPU
*cpu
= ARM_CPU(cs
);
8538 CPUARMState
*env
= &cpu
->env
;
8540 bool ignore_stackfaults
;
8542 arm_log_exception(cs
->exception_index
);
8544 /* For exceptions we just mark as pending on the NVIC, and let that
8546 switch (cs
->exception_index
) {
8548 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8549 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
8552 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8553 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
8556 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8557 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
8560 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8561 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8564 /* The PC already points to the next instruction. */
8565 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
8567 case EXCP_PREFETCH_ABORT
:
8568 case EXCP_DATA_ABORT
:
8569 /* Note that for M profile we don't have a guest facing FSR, but
8570 * the env->exception.fsr will be populated by the code that
8571 * raises the fault, in the A profile short-descriptor format.
8573 switch (env
->exception
.fsr
& 0xf) {
8574 case M_FAKE_FSR_NSC_EXEC
:
8575 /* Exception generated when we try to execute code at an address
8576 * which is marked as Secure & Non-Secure Callable and the CPU
8577 * is in the Non-Secure state. The only instruction which can
8578 * be executed like this is SG (and that only if both halves of
8579 * the SG instruction have the same security attributes.)
8580 * Everything else must generate an INVEP SecureFault, so we
8581 * emulate the SG instruction here.
8583 if (v7m_handle_execute_nsc(cpu
)) {
8587 case M_FAKE_FSR_SFAULT
:
8588 /* Various flavours of SecureFault for attempts to execute or
8589 * access data in the wrong security state.
8591 switch (cs
->exception_index
) {
8592 case EXCP_PREFETCH_ABORT
:
8593 if (env
->v7m
.secure
) {
8594 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
8595 qemu_log_mask(CPU_LOG_INT
,
8596 "...really SecureFault with SFSR.INVTRAN\n");
8598 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8599 qemu_log_mask(CPU_LOG_INT
,
8600 "...really SecureFault with SFSR.INVEP\n");
8603 case EXCP_DATA_ABORT
:
8604 /* This must be an NS access to S memory */
8605 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
8606 qemu_log_mask(CPU_LOG_INT
,
8607 "...really SecureFault with SFSR.AUVIOL\n");
8610 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8612 case 0x8: /* External Abort */
8613 switch (cs
->exception_index
) {
8614 case EXCP_PREFETCH_ABORT
:
8615 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8616 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
8618 case EXCP_DATA_ABORT
:
8619 env
->v7m
.cfsr
[M_REG_NS
] |=
8620 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
8621 env
->v7m
.bfar
= env
->exception
.vaddress
;
8622 qemu_log_mask(CPU_LOG_INT
,
8623 "...with CFSR.PRECISERR and BFAR 0x%x\n",
8627 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8630 /* All other FSR values are either MPU faults or "can't happen
8631 * for M profile" cases.
8633 switch (cs
->exception_index
) {
8634 case EXCP_PREFETCH_ABORT
:
8635 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8636 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
8638 case EXCP_DATA_ABORT
:
8639 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
8640 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
8641 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
8642 qemu_log_mask(CPU_LOG_INT
,
8643 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
8644 env
->v7m
.mmfar
[env
->v7m
.secure
]);
8647 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
8653 if (semihosting_enabled()) {
8655 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
8658 qemu_log_mask(CPU_LOG_INT
,
8659 "...handling as semihosting call 0x%x\n",
8661 env
->regs
[0] = do_arm_semihosting(env
);
8665 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
8669 case EXCP_EXCEPTION_EXIT
:
8670 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
8671 /* Must be v8M security extension function return */
8672 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
8673 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8674 if (do_v7m_function_return(cpu
)) {
8678 do_v7m_exception_exit(cpu
);
8683 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8684 return; /* Never happens. Keep compiler happy. */
8687 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8688 lr
= R_V7M_EXCRET_RES1_MASK
|
8689 R_V7M_EXCRET_DCRS_MASK
|
8690 R_V7M_EXCRET_FTYPE_MASK
;
8691 /* The S bit indicates whether we should return to Secure
8692 * or NonSecure (ie our current state).
8693 * The ES bit indicates whether we're taking this exception
8694 * to Secure or NonSecure (ie our target state). We set it
8695 * later, in v7m_exception_taken().
8696 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
8697 * This corresponds to the ARM ARM pseudocode for v8M setting
8698 * some LR bits in PushStack() and some in ExceptionTaken();
8699 * the distinction matters for the tailchain cases where we
8700 * can take an exception without pushing the stack.
8702 if (env
->v7m
.secure
) {
8703 lr
|= R_V7M_EXCRET_S_MASK
;
8706 lr
= R_V7M_EXCRET_RES1_MASK
|
8707 R_V7M_EXCRET_S_MASK
|
8708 R_V7M_EXCRET_DCRS_MASK
|
8709 R_V7M_EXCRET_FTYPE_MASK
|
8710 R_V7M_EXCRET_ES_MASK
;
8711 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
8712 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8715 if (!arm_v7m_is_handler_mode(env
)) {
8716 lr
|= R_V7M_EXCRET_MODE_MASK
;
8719 ignore_stackfaults
= v7m_push_stack(cpu
);
8720 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
8723 /* Function used to synchronize QEMU's AArch64 register set with AArch32
8724 * register set. This is necessary when switching between AArch32 and AArch64
8727 void aarch64_sync_32_to_64(CPUARMState
*env
)
8730 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8732 /* We can blanket copy R[0:7] to X[0:7] */
8733 for (i
= 0; i
< 8; i
++) {
8734 env
->xregs
[i
] = env
->regs
[i
];
8737 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8738 * Otherwise, they come from the banked user regs.
8740 if (mode
== ARM_CPU_MODE_FIQ
) {
8741 for (i
= 8; i
< 13; i
++) {
8742 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8745 for (i
= 8; i
< 13; i
++) {
8746 env
->xregs
[i
] = env
->regs
[i
];
8750 /* Registers x13-x23 are the various mode SP and FP registers. Registers
8751 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8752 * from the mode banked register.
8754 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8755 env
->xregs
[13] = env
->regs
[13];
8756 env
->xregs
[14] = env
->regs
[14];
8758 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8759 /* HYP is an exception in that it is copied from r14 */
8760 if (mode
== ARM_CPU_MODE_HYP
) {
8761 env
->xregs
[14] = env
->regs
[14];
8763 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
8767 if (mode
== ARM_CPU_MODE_HYP
) {
8768 env
->xregs
[15] = env
->regs
[13];
8770 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
8773 if (mode
== ARM_CPU_MODE_IRQ
) {
8774 env
->xregs
[16] = env
->regs
[14];
8775 env
->xregs
[17] = env
->regs
[13];
8777 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
8778 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
8781 if (mode
== ARM_CPU_MODE_SVC
) {
8782 env
->xregs
[18] = env
->regs
[14];
8783 env
->xregs
[19] = env
->regs
[13];
8785 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
8786 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
8789 if (mode
== ARM_CPU_MODE_ABT
) {
8790 env
->xregs
[20] = env
->regs
[14];
8791 env
->xregs
[21] = env
->regs
[13];
8793 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
8794 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
8797 if (mode
== ARM_CPU_MODE_UND
) {
8798 env
->xregs
[22] = env
->regs
[14];
8799 env
->xregs
[23] = env
->regs
[13];
8801 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
8802 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
8805 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8806 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8807 * FIQ bank for r8-r14.
8809 if (mode
== ARM_CPU_MODE_FIQ
) {
8810 for (i
= 24; i
< 31; i
++) {
8811 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
8814 for (i
= 24; i
< 29; i
++) {
8815 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
8817 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
8818 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
8821 env
->pc
= env
->regs
[15];
8824 /* Function used to synchronize QEMU's AArch32 register set with AArch64
8825 * register set. This is necessary when switching between AArch32 and AArch64
8828 void aarch64_sync_64_to_32(CPUARMState
*env
)
8831 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8833 /* We can blanket copy X[0:7] to R[0:7] */
8834 for (i
= 0; i
< 8; i
++) {
8835 env
->regs
[i
] = env
->xregs
[i
];
8838 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8839 * Otherwise, we copy x8-x12 into the banked user regs.
8841 if (mode
== ARM_CPU_MODE_FIQ
) {
8842 for (i
= 8; i
< 13; i
++) {
8843 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
8846 for (i
= 8; i
< 13; i
++) {
8847 env
->regs
[i
] = env
->xregs
[i
];
8851 /* Registers r13 & r14 depend on the current mode.
8852 * If we are in a given mode, we copy the corresponding x registers to r13
8853 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8856 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8857 env
->regs
[13] = env
->xregs
[13];
8858 env
->regs
[14] = env
->xregs
[14];
8860 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
8862 /* HYP is an exception in that it does not have its own banked r14 but
8863 * shares the USR r14
8865 if (mode
== ARM_CPU_MODE_HYP
) {
8866 env
->regs
[14] = env
->xregs
[14];
8868 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
8872 if (mode
== ARM_CPU_MODE_HYP
) {
8873 env
->regs
[13] = env
->xregs
[15];
8875 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
8878 if (mode
== ARM_CPU_MODE_IRQ
) {
8879 env
->regs
[14] = env
->xregs
[16];
8880 env
->regs
[13] = env
->xregs
[17];
8882 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
8883 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
8886 if (mode
== ARM_CPU_MODE_SVC
) {
8887 env
->regs
[14] = env
->xregs
[18];
8888 env
->regs
[13] = env
->xregs
[19];
8890 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
8891 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
8894 if (mode
== ARM_CPU_MODE_ABT
) {
8895 env
->regs
[14] = env
->xregs
[20];
8896 env
->regs
[13] = env
->xregs
[21];
8898 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
8899 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
8902 if (mode
== ARM_CPU_MODE_UND
) {
8903 env
->regs
[14] = env
->xregs
[22];
8904 env
->regs
[13] = env
->xregs
[23];
8906 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
8907 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
8910 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8911 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8912 * FIQ bank for r8-r14.
8914 if (mode
== ARM_CPU_MODE_FIQ
) {
8915 for (i
= 24; i
< 31; i
++) {
8916 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
8919 for (i
= 24; i
< 29; i
++) {
8920 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
8922 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
8923 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
8926 env
->regs
[15] = env
->pc
;
8929 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
8930 uint32_t mask
, uint32_t offset
,
8933 /* Change the CPU state so as to actually take the exception. */
8934 switch_mode(env
, new_mode
);
8936 * For exceptions taken to AArch32 we must clear the SS bit in both
8937 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8939 env
->uncached_cpsr
&= ~PSTATE_SS
;
8940 env
->spsr
= cpsr_read(env
);
8941 /* Clear IT bits. */
8942 env
->condexec_bits
= 0;
8943 /* Switch to the new mode, and to the correct instruction set. */
8944 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8945 /* Set new mode endianness */
8946 env
->uncached_cpsr
&= ~CPSR_E
;
8947 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
8948 env
->uncached_cpsr
|= CPSR_E
;
8950 /* J and IL must always be cleared for exception entry */
8951 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
8954 if (new_mode
== ARM_CPU_MODE_HYP
) {
8955 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
8956 env
->elr_el
[2] = env
->regs
[15];
8959 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8960 * and we should just guard the thumb mode on V4
8962 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8964 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8966 env
->regs
[14] = env
->regs
[15] + offset
;
8968 env
->regs
[15] = newpc
;
8971 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
8974 * Handle exception entry to Hyp mode; this is sufficiently
8975 * different to entry to other AArch32 modes that we handle it
8978 * The vector table entry used is always the 0x14 Hyp mode entry point,
8979 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8980 * The offset applied to the preferred return address is always zero
8981 * (see DDI0487C.a section G1.12.3).
8982 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8984 uint32_t addr
, mask
;
8985 ARMCPU
*cpu
= ARM_CPU(cs
);
8986 CPUARMState
*env
= &cpu
->env
;
8988 switch (cs
->exception_index
) {
8996 /* Fall through to prefetch abort. */
8997 case EXCP_PREFETCH_ABORT
:
8998 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
8999 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9000 (uint32_t)env
->exception
.vaddress
);
9003 case EXCP_DATA_ABORT
:
9004 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9005 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9006 (uint32_t)env
->exception
.vaddress
);
9021 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9024 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9025 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9027 * QEMU syndrome values are v8-style. v7 has the IL bit
9028 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9029 * If this is a v7 CPU, squash the IL bit in those cases.
9031 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9032 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9033 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9034 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9035 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9038 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9041 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9046 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9049 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9052 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9056 addr
+= env
->cp15
.hvbar
;
9058 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9061 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9063 ARMCPU
*cpu
= ARM_CPU(cs
);
9064 CPUARMState
*env
= &cpu
->env
;
9071 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9072 switch (syn_get_ec(env
->exception
.syndrome
)) {
9074 case EC_BREAKPOINT_SAME_EL
:
9078 case EC_WATCHPOINT_SAME_EL
:
9084 case EC_VECTORCATCH
:
9093 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9096 if (env
->exception
.target_el
== 2) {
9097 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9101 switch (cs
->exception_index
) {
9103 new_mode
= ARM_CPU_MODE_UND
;
9112 new_mode
= ARM_CPU_MODE_SVC
;
9115 /* The PC already points to the next instruction. */
9119 /* Fall through to prefetch abort. */
9120 case EXCP_PREFETCH_ABORT
:
9121 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9122 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9123 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9124 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9125 new_mode
= ARM_CPU_MODE_ABT
;
9127 mask
= CPSR_A
| CPSR_I
;
9130 case EXCP_DATA_ABORT
:
9131 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9132 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9133 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9135 (uint32_t)env
->exception
.vaddress
);
9136 new_mode
= ARM_CPU_MODE_ABT
;
9138 mask
= CPSR_A
| CPSR_I
;
9142 new_mode
= ARM_CPU_MODE_IRQ
;
9144 /* Disable IRQ and imprecise data aborts. */
9145 mask
= CPSR_A
| CPSR_I
;
9147 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9148 /* IRQ routed to monitor mode */
9149 new_mode
= ARM_CPU_MODE_MON
;
9154 new_mode
= ARM_CPU_MODE_FIQ
;
9156 /* Disable FIQ, IRQ and imprecise data aborts. */
9157 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9158 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9159 /* FIQ routed to monitor mode */
9160 new_mode
= ARM_CPU_MODE_MON
;
9165 new_mode
= ARM_CPU_MODE_IRQ
;
9167 /* Disable IRQ and imprecise data aborts. */
9168 mask
= CPSR_A
| CPSR_I
;
9172 new_mode
= ARM_CPU_MODE_FIQ
;
9174 /* Disable FIQ, IRQ and imprecise data aborts. */
9175 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9179 new_mode
= ARM_CPU_MODE_MON
;
9181 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9185 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9186 return; /* Never happens. Keep compiler happy. */
9189 if (new_mode
== ARM_CPU_MODE_MON
) {
9190 addr
+= env
->cp15
.mvbar
;
9191 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9192 /* High vectors. When enabled, base address cannot be remapped. */
9195 /* ARM v7 architectures provide a vector base address register to remap
9196 * the interrupt vector table.
9197 * This register is only followed in non-monitor mode, and is banked.
9198 * Note: only bits 31:5 are valid.
9200 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9203 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9204 env
->cp15
.scr_el3
&= ~SCR_NS
;
9207 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9210 /* Handle exception entry to a target EL which is using AArch64 */
9211 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9213 ARMCPU
*cpu
= ARM_CPU(cs
);
9214 CPUARMState
*env
= &cpu
->env
;
9215 unsigned int new_el
= env
->exception
.target_el
;
9216 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9217 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9218 unsigned int cur_el
= arm_current_el(env
);
9221 * Note that new_el can never be 0. If cur_el is 0, then
9222 * el0_a64 is is_a64(), else el0_a64 is ignored.
9224 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9226 if (cur_el
< new_el
) {
9227 /* Entry vector offset depends on whether the implemented EL
9228 * immediately lower than the target level is using AArch32 or AArch64
9234 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9237 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
9240 is_aa64
= is_a64(env
);
9243 g_assert_not_reached();
9251 } else if (pstate_read(env
) & PSTATE_SP
) {
9255 switch (cs
->exception_index
) {
9256 case EXCP_PREFETCH_ABORT
:
9257 case EXCP_DATA_ABORT
:
9258 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9259 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9260 env
->cp15
.far_el
[new_el
]);
9268 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
9270 * QEMU internal FP/SIMD syndromes from AArch32 include the
9271 * TA and coproc fields which are only exposed if the exception
9272 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9273 * AArch64 format syndrome.
9275 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9277 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9288 qemu_log_mask(CPU_LOG_INT
,
9289 "...handling as semihosting call 0x%" PRIx64
"\n",
9291 env
->xregs
[0] = do_arm_semihosting(env
);
9294 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9298 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
9299 aarch64_save_sp(env
, arm_current_el(env
));
9300 env
->elr_el
[new_el
] = env
->pc
;
9302 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
9303 env
->elr_el
[new_el
] = env
->regs
[15];
9305 aarch64_sync_32_to_64(env
);
9307 env
->condexec_bits
= 0;
9309 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9310 env
->elr_el
[new_el
]);
9312 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9314 aarch64_restore_sp(env
, new_el
);
9318 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9319 new_el
, env
->pc
, pstate_read(env
));
9322 static inline bool check_for_semihosting(CPUState
*cs
)
9324 /* Check whether this exception is a semihosting call; if so
9325 * then handle it and return true; otherwise return false.
9327 ARMCPU
*cpu
= ARM_CPU(cs
);
9328 CPUARMState
*env
= &cpu
->env
;
9331 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9332 /* This is always the 64-bit semihosting exception.
9333 * The "is this usermode" and "is semihosting enabled"
9334 * checks have been done at translate time.
9336 qemu_log_mask(CPU_LOG_INT
,
9337 "...handling as semihosting call 0x%" PRIx64
"\n",
9339 env
->xregs
[0] = do_arm_semihosting(env
);
9346 /* Only intercept calls from privileged modes, to provide some
9347 * semblance of security.
9349 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
9350 (!semihosting_enabled() ||
9351 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
9355 switch (cs
->exception_index
) {
9357 /* This is always a semihosting call; the "is this usermode"
9358 * and "is semihosting enabled" checks have been done at
9363 /* Check for semihosting interrupt. */
9365 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
9371 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
9373 if (imm
== 0x123456) {
9379 /* See if this is a semihosting syscall. */
9381 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
9393 qemu_log_mask(CPU_LOG_INT
,
9394 "...handling as semihosting call 0x%x\n",
9396 env
->regs
[0] = do_arm_semihosting(env
);
9401 /* Handle a CPU exception for A and R profile CPUs.
9402 * Do any appropriate logging, handle PSCI calls, and then hand off
9403 * to the AArch64-entry or AArch32-entry function depending on the
9404 * target exception level's register width.
9406 void arm_cpu_do_interrupt(CPUState
*cs
)
9408 ARMCPU
*cpu
= ARM_CPU(cs
);
9409 CPUARMState
*env
= &cpu
->env
;
9410 unsigned int new_el
= env
->exception
.target_el
;
9412 assert(!arm_feature(env
, ARM_FEATURE_M
));
9414 arm_log_exception(cs
->exception_index
);
9415 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9417 if (qemu_loglevel_mask(CPU_LOG_INT
)
9418 && !excp_is_internal(cs
->exception_index
)) {
9419 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9420 syn_get_ec(env
->exception
.syndrome
),
9421 env
->exception
.syndrome
);
9424 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9425 arm_handle_psci_call(cpu
);
9426 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9430 /* Semihosting semantics depend on the register width of the
9431 * code that caused the exception, not the target exception level,
9432 * so must be handled here.
9434 if (check_for_semihosting(cs
)) {
9438 /* Hooks may change global state so BQL should be held, also the
9439 * BQL needs to be held for any modification of
9440 * cs->interrupt_request.
9442 g_assert(qemu_mutex_iothread_locked());
9444 arm_call_pre_el_change_hook(cpu
);
9446 assert(!excp_is_internal(cs
->exception_index
));
9447 if (arm_el_is_aa64(env
, new_el
)) {
9448 arm_cpu_do_interrupt_aarch64(cs
);
9450 arm_cpu_do_interrupt_aarch32(cs
);
9453 arm_call_el_change_hook(cpu
);
9455 if (!kvm_enabled()) {
9456 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9460 /* Return the exception level which controls this address translation regime */
9461 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9464 case ARMMMUIdx_S2NS
:
9465 case ARMMMUIdx_S1E2
:
9467 case ARMMMUIdx_S1E3
:
9469 case ARMMMUIdx_S1SE0
:
9470 return arm_el_is_aa64(env
, 3) ? 1 : 3;
9471 case ARMMMUIdx_S1SE1
:
9472 case ARMMMUIdx_S1NSE0
:
9473 case ARMMMUIdx_S1NSE1
:
9474 case ARMMMUIdx_MPrivNegPri
:
9475 case ARMMMUIdx_MUserNegPri
:
9476 case ARMMMUIdx_MPriv
:
9477 case ARMMMUIdx_MUser
:
9478 case ARMMMUIdx_MSPrivNegPri
:
9479 case ARMMMUIdx_MSUserNegPri
:
9480 case ARMMMUIdx_MSPriv
:
9481 case ARMMMUIdx_MSUser
:
9484 g_assert_not_reached();
9488 /* Return the SCTLR value which controls this address translation regime */
9489 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9491 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9494 /* Return true if the specified stage of address translation is disabled */
9495 static inline bool regime_translation_disabled(CPUARMState
*env
,
9498 if (arm_feature(env
, ARM_FEATURE_M
)) {
9499 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9500 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9501 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9502 /* Enabled, but not for HardFault and NMI */
9503 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9504 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9505 /* Enabled for all cases */
9509 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9510 * we warned about that in armv7m_nvic.c when the guest set it.
9516 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9517 /* HCR.DC means HCR.VM behaves as 1 */
9518 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9521 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9522 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9523 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9528 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
9529 (mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
)) {
9530 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9534 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9537 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9540 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9543 /* Return the TCR controlling this translation regime */
9544 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9546 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9547 return &env
->cp15
.vtcr_el2
;
9549 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9552 /* Convert a possible stage1+2 MMU index into the appropriate
9555 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9557 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
9558 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
9563 /* Return the TTBR associated with this translation regime */
9564 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9567 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9568 return env
->cp15
.vttbr_el2
;
9571 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9573 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9577 /* Return true if the translation regime is using LPAE format page tables */
9578 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9581 int el
= regime_el(env
, mmu_idx
);
9582 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9585 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9586 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9592 /* Returns true if the stage 1 translation regime is using LPAE format page
9593 * tables. Used when raising alignment exceptions, whose FSR changes depending
9594 * on whether the long or short descriptor format is in use. */
9595 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9597 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9599 return regime_using_lpae_format(env
, mmu_idx
);
9602 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9605 case ARMMMUIdx_S1SE0
:
9606 case ARMMMUIdx_S1NSE0
:
9607 case ARMMMUIdx_MUser
:
9608 case ARMMMUIdx_MSUser
:
9609 case ARMMMUIdx_MUserNegPri
:
9610 case ARMMMUIdx_MSUserNegPri
:
9614 case ARMMMUIdx_S12NSE0
:
9615 case ARMMMUIdx_S12NSE1
:
9616 g_assert_not_reached();
9620 /* Translate section/page access permissions to page
9621 * R/W protection flags
9624 * @mmu_idx: MMU index indicating required translation regime
9625 * @ap: The 3-bit access permissions (AP[2:0])
9626 * @domain_prot: The 2-bit domain access permissions
9628 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9629 int ap
, int domain_prot
)
9631 bool is_user
= regime_is_user(env
, mmu_idx
);
9633 if (domain_prot
== 3) {
9634 return PAGE_READ
| PAGE_WRITE
;
9639 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9642 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9644 return is_user
? 0 : PAGE_READ
;
9651 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9656 return PAGE_READ
| PAGE_WRITE
;
9659 return PAGE_READ
| PAGE_WRITE
;
9660 case 4: /* Reserved. */
9663 return is_user
? 0 : PAGE_READ
;
9667 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9672 g_assert_not_reached();
9676 /* Translate section/page access permissions to page
9677 * R/W protection flags.
9679 * @ap: The 2-bit simple AP (AP[2:1])
9680 * @is_user: TRUE if accessing from PL0
9682 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9686 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9688 return PAGE_READ
| PAGE_WRITE
;
9690 return is_user
? 0 : PAGE_READ
;
9694 g_assert_not_reached();
9699 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9701 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9704 /* Translate S2 section/page access permissions to protection flags
9707 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9708 * @xn: XN (execute-never) bit
9710 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9721 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9728 /* Translate section/page access permissions to protection flags
9731 * @mmu_idx: MMU index indicating required translation regime
9732 * @is_aa64: TRUE if AArch64
9733 * @ap: The 2-bit simple AP (AP[2:1])
9734 * @ns: NS (non-secure) bit
9735 * @xn: XN (execute-never) bit
9736 * @pxn: PXN (privileged execute-never) bit
9738 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
9739 int ap
, int ns
, int xn
, int pxn
)
9741 bool is_user
= regime_is_user(env
, mmu_idx
);
9742 int prot_rw
, user_rw
;
9746 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
9748 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
9752 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
9755 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
9759 /* TODO have_wxn should be replaced with
9760 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9761 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9762 * compatible processors have EL2, which is required for [U]WXN.
9764 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
9767 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
9771 switch (regime_el(env
, mmu_idx
)) {
9774 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
9781 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9782 switch (regime_el(env
, mmu_idx
)) {
9786 xn
= xn
|| !(user_rw
& PAGE_READ
);
9790 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
9792 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
9793 (uwxn
&& (user_rw
& PAGE_WRITE
));
9803 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
9806 return prot_rw
| PAGE_EXEC
;
9809 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9810 uint32_t *table
, uint32_t address
)
9812 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
9813 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9815 if (address
& tcr
->mask
) {
9816 if (tcr
->raw_tcr
& TTBCR_PD1
) {
9817 /* Translation table walk disabled for TTBR1 */
9820 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
9822 if (tcr
->raw_tcr
& TTBCR_PD0
) {
9823 /* Translation table walk disabled for TTBR0 */
9826 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
9828 *table
|= (address
>> 18) & 0x3ffc;
9832 /* Translate a S1 pagetable walk through S2 if needed. */
9833 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9834 hwaddr addr
, MemTxAttrs txattrs
,
9835 ARMMMUFaultInfo
*fi
)
9837 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
9838 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
9839 target_ulong s2size
;
9843 ARMCacheAttrs cacheattrs
= {};
9844 ARMCacheAttrs
*pcacheattrs
= NULL
;
9846 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
9848 * PTW means we must fault if this S1 walk touches S2 Device
9849 * memory; otherwise we don't care about the attributes and can
9850 * save the S2 translation the effort of computing them.
9852 pcacheattrs
= &cacheattrs
;
9855 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
9856 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
9858 assert(fi
->type
!= ARMFault_None
);
9864 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
9865 /* Access was to Device memory: generate Permission fault */
9866 fi
->type
= ARMFault_Permission
;
9877 /* All loads done in the course of a page table walk go through here. */
9878 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9879 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9881 ARMCPU
*cpu
= ARM_CPU(cs
);
9882 CPUARMState
*env
= &cpu
->env
;
9883 MemTxAttrs attrs
= {};
9884 MemTxResult result
= MEMTX_OK
;
9888 attrs
.secure
= is_secure
;
9889 as
= arm_addressspace(cs
, attrs
);
9890 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9894 if (regime_translation_big_endian(env
, mmu_idx
)) {
9895 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
9897 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
9899 if (result
== MEMTX_OK
) {
9902 fi
->type
= ARMFault_SyncExternalOnWalk
;
9903 fi
->ea
= arm_extabort_type(result
);
9907 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9908 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9910 ARMCPU
*cpu
= ARM_CPU(cs
);
9911 CPUARMState
*env
= &cpu
->env
;
9912 MemTxAttrs attrs
= {};
9913 MemTxResult result
= MEMTX_OK
;
9917 attrs
.secure
= is_secure
;
9918 as
= arm_addressspace(cs
, attrs
);
9919 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9923 if (regime_translation_big_endian(env
, mmu_idx
)) {
9924 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
9926 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
9928 if (result
== MEMTX_OK
) {
9931 fi
->type
= ARMFault_SyncExternalOnWalk
;
9932 fi
->ea
= arm_extabort_type(result
);
9936 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
9937 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9938 hwaddr
*phys_ptr
, int *prot
,
9939 target_ulong
*page_size
,
9940 ARMMMUFaultInfo
*fi
)
9942 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
9953 /* Pagetable walk. */
9954 /* Lookup l1 descriptor. */
9955 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9956 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9957 fi
->type
= ARMFault_Translation
;
9960 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9962 if (fi
->type
!= ARMFault_None
) {
9966 domain
= (desc
>> 5) & 0x0f;
9967 if (regime_el(env
, mmu_idx
) == 1) {
9968 dacr
= env
->cp15
.dacr_ns
;
9970 dacr
= env
->cp15
.dacr_s
;
9972 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9974 /* Section translation fault. */
9975 fi
->type
= ARMFault_Translation
;
9981 if (domain_prot
== 0 || domain_prot
== 2) {
9982 fi
->type
= ARMFault_Domain
;
9987 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9988 ap
= (desc
>> 10) & 3;
9989 *page_size
= 1024 * 1024;
9991 /* Lookup l2 entry. */
9993 /* Coarse pagetable. */
9994 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9996 /* Fine pagetable. */
9997 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
9999 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10001 if (fi
->type
!= ARMFault_None
) {
10004 switch (desc
& 3) {
10005 case 0: /* Page translation fault. */
10006 fi
->type
= ARMFault_Translation
;
10008 case 1: /* 64k page. */
10009 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10010 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10011 *page_size
= 0x10000;
10013 case 2: /* 4k page. */
10014 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10015 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10016 *page_size
= 0x1000;
10018 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10020 /* ARMv6/XScale extended small page format */
10021 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10022 || arm_feature(env
, ARM_FEATURE_V6
)) {
10023 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10024 *page_size
= 0x1000;
10026 /* UNPREDICTABLE in ARMv5; we choose to take a
10027 * page translation fault.
10029 fi
->type
= ARMFault_Translation
;
10033 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10034 *page_size
= 0x400;
10036 ap
= (desc
>> 4) & 3;
10039 /* Never happens, but compiler isn't smart enough to tell. */
10043 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10044 *prot
|= *prot
? PAGE_EXEC
: 0;
10045 if (!(*prot
& (1 << access_type
))) {
10046 /* Access permission fault. */
10047 fi
->type
= ARMFault_Permission
;
10050 *phys_ptr
= phys_addr
;
10053 fi
->domain
= domain
;
10058 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10059 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10060 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10061 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10063 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10077 /* Pagetable walk. */
10078 /* Lookup l1 descriptor. */
10079 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10080 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10081 fi
->type
= ARMFault_Translation
;
10084 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10086 if (fi
->type
!= ARMFault_None
) {
10090 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
10091 /* Section translation fault, or attempt to use the encoding
10092 * which is Reserved on implementations without PXN.
10094 fi
->type
= ARMFault_Translation
;
10097 if ((type
== 1) || !(desc
& (1 << 18))) {
10098 /* Page or Section. */
10099 domain
= (desc
>> 5) & 0x0f;
10101 if (regime_el(env
, mmu_idx
) == 1) {
10102 dacr
= env
->cp15
.dacr_ns
;
10104 dacr
= env
->cp15
.dacr_s
;
10109 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10110 if (domain_prot
== 0 || domain_prot
== 2) {
10111 /* Section or Page domain fault */
10112 fi
->type
= ARMFault_Domain
;
10116 if (desc
& (1 << 18)) {
10117 /* Supersection. */
10118 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10119 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10120 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10121 *page_size
= 0x1000000;
10124 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10125 *page_size
= 0x100000;
10127 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10128 xn
= desc
& (1 << 4);
10130 ns
= extract32(desc
, 19, 1);
10132 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
10133 pxn
= (desc
>> 2) & 1;
10135 ns
= extract32(desc
, 3, 1);
10136 /* Lookup l2 entry. */
10137 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10138 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10140 if (fi
->type
!= ARMFault_None
) {
10143 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10144 switch (desc
& 3) {
10145 case 0: /* Page translation fault. */
10146 fi
->type
= ARMFault_Translation
;
10148 case 1: /* 64k page. */
10149 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10150 xn
= desc
& (1 << 15);
10151 *page_size
= 0x10000;
10153 case 2: case 3: /* 4k page. */
10154 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10156 *page_size
= 0x1000;
10159 /* Never happens, but compiler isn't smart enough to tell. */
10163 if (domain_prot
== 3) {
10164 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10166 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10169 if (xn
&& access_type
== MMU_INST_FETCH
) {
10170 fi
->type
= ARMFault_Permission
;
10174 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10175 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10176 /* The simplified model uses AP[0] as an access control bit. */
10177 if ((ap
& 1) == 0) {
10178 /* Access flag fault. */
10179 fi
->type
= ARMFault_AccessFlag
;
10182 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10184 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10186 if (*prot
&& !xn
) {
10187 *prot
|= PAGE_EXEC
;
10189 if (!(*prot
& (1 << access_type
))) {
10190 /* Access permission fault. */
10191 fi
->type
= ARMFault_Permission
;
10196 /* The NS bit will (as required by the architecture) have no effect if
10197 * the CPU doesn't support TZ or this is a non-secure translation
10198 * regime, because the attribute will already be non-secure.
10200 attrs
->secure
= false;
10202 *phys_ptr
= phys_addr
;
10205 fi
->domain
= domain
;
10211 * check_s2_mmu_setup
10213 * @is_aa64: True if the translation regime is in AArch64 state
10214 * @startlevel: Suggested starting level
10215 * @inputsize: Bitsize of IPAs
10216 * @stride: Page-table stride (See the ARM ARM)
10218 * Returns true if the suggested S2 translation parameters are OK and
10221 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10222 int inputsize
, int stride
)
10224 const int grainsize
= stride
+ 3;
10225 int startsizecheck
;
10227 /* Negative levels are never allowed. */
10232 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10233 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10238 CPUARMState
*env
= &cpu
->env
;
10239 unsigned int pamax
= arm_pamax(cpu
);
10242 case 13: /* 64KB Pages. */
10243 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10247 case 11: /* 16KB Pages. */
10248 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10252 case 9: /* 4KB Pages. */
10253 if (level
== 0 && pamax
<= 42) {
10258 g_assert_not_reached();
10261 /* Inputsize checks. */
10262 if (inputsize
> pamax
&&
10263 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10264 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10268 /* AArch32 only supports 4KB pages. Assert on that. */
10269 assert(stride
== 9);
10278 /* Translate from the 4-bit stage 2 representation of
10279 * memory attributes (without cache-allocation hints) to
10280 * the 8-bit representation of the stage 1 MAIR registers
10281 * (which includes allocation hints).
10283 * ref: shared/translation/attrs/S2AttrDecode()
10284 * .../S2ConvertAttrsHints()
10286 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10288 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10289 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10290 uint8_t hihint
= 0, lohint
= 0;
10292 if (hiattr
!= 0) { /* normal memory */
10293 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10294 hiattr
= loattr
= 1; /* non-cacheable */
10296 if (hiattr
!= 1) { /* Write-through or write-back */
10297 hihint
= 3; /* RW allocate */
10299 if (loattr
!= 1) { /* Write-through or write-back */
10300 lohint
= 3; /* RW allocate */
10305 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10308 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
10311 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10312 uint32_t el
= regime_el(env
, mmu_idx
);
10313 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
10317 * Bit 55 is always between the two regions, and is canonical for
10318 * determining if address tagging is enabled.
10320 select
= extract64(va
, 55, 1);
10323 tsz
= extract32(tcr
, 0, 6);
10324 using64k
= extract32(tcr
, 14, 1);
10325 using16k
= extract32(tcr
, 15, 1);
10326 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10328 tbi
= tbid
= hpd
= false;
10330 tbi
= extract32(tcr
, 20, 1);
10331 hpd
= extract32(tcr
, 24, 1);
10332 tbid
= extract32(tcr
, 29, 1);
10335 } else if (!select
) {
10336 tsz
= extract32(tcr
, 0, 6);
10337 epd
= extract32(tcr
, 7, 1);
10338 using64k
= extract32(tcr
, 14, 1);
10339 using16k
= extract32(tcr
, 15, 1);
10340 tbi
= extract64(tcr
, 37, 1);
10341 hpd
= extract64(tcr
, 41, 1);
10342 tbid
= extract64(tcr
, 51, 1);
10344 int tg
= extract32(tcr
, 30, 2);
10345 using16k
= tg
== 1;
10346 using64k
= tg
== 3;
10347 tsz
= extract32(tcr
, 16, 6);
10348 epd
= extract32(tcr
, 23, 1);
10349 tbi
= extract64(tcr
, 38, 1);
10350 hpd
= extract64(tcr
, 42, 1);
10351 tbid
= extract64(tcr
, 52, 1);
10353 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10354 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10356 return (ARMVAParameters
) {
10363 .using16k
= using16k
,
10364 .using64k
= using64k
,
10368 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10369 ARMMMUIdx mmu_idx
, bool data
)
10371 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
10373 /* Present TBI as a composite with TBID. */
10374 ret
.tbi
&= (data
|| !ret
.tbid
);
10378 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10381 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10382 uint32_t el
= regime_el(env
, mmu_idx
);
10386 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10388 bool sext
= extract32(tcr
, 4, 1);
10389 bool sign
= extract32(tcr
, 3, 1);
10392 * If the sign-extend bit is not the same as t0sz[3], the result
10393 * is unpredictable. Flag this as a guest error.
10395 if (sign
!= sext
) {
10396 qemu_log_mask(LOG_GUEST_ERROR
,
10397 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10399 tsz
= sextract32(tcr
, 0, 4) + 8;
10403 } else if (el
== 2) {
10405 tsz
= extract32(tcr
, 0, 3);
10407 hpd
= extract64(tcr
, 24, 1);
10410 int t0sz
= extract32(tcr
, 0, 3);
10411 int t1sz
= extract32(tcr
, 16, 3);
10414 select
= va
> (0xffffffffu
>> t0sz
);
10416 /* Note that we will detect errors later. */
10417 select
= va
>= ~(0xffffffffu
>> t1sz
);
10421 epd
= extract32(tcr
, 7, 1);
10422 hpd
= extract64(tcr
, 41, 1);
10425 epd
= extract32(tcr
, 23, 1);
10426 hpd
= extract64(tcr
, 42, 1);
10428 /* For aarch32, hpd0 is not enabled without t2e as well. */
10429 hpd
&= extract32(tcr
, 6, 1);
10432 return (ARMVAParameters
) {
10440 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10441 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10442 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10443 target_ulong
*page_size_ptr
,
10444 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10446 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10447 CPUState
*cs
= CPU(cpu
);
10448 /* Read an LPAE long-descriptor translation table. */
10449 ARMFaultType fault_type
= ARMFault_Translation
;
10451 ARMVAParameters param
;
10453 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10454 uint32_t tableattrs
;
10455 target_ulong page_size
;
10458 int addrsize
, inputsize
;
10459 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10460 int ap
, ns
, xn
, pxn
;
10461 uint32_t el
= regime_el(env
, mmu_idx
);
10463 uint64_t descaddrmask
;
10464 bool aarch64
= arm_el_is_aa64(env
, el
);
10467 * This code does not handle the different format TCR for VTCR_EL2.
10468 * This code also does not support shareability levels.
10469 * Attribute and permission bit handling should also be checked when adding
10470 * support for those page table walks.
10473 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10474 access_type
!= MMU_INST_FETCH
);
10476 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
10479 ttbr1_valid
= (el
< 2);
10480 addrsize
= 64 - 8 * param
.tbi
;
10481 inputsize
= 64 - param
.tsz
;
10483 param
= aa32_va_parameters(env
, address
, mmu_idx
);
10485 /* There is no TTBR1 for EL2 */
10486 ttbr1_valid
= (el
!= 2);
10487 addrsize
= (mmu_idx
== ARMMMUIdx_S2NS
? 40 : 32);
10488 inputsize
= addrsize
- param
.tsz
;
10492 * We determined the region when collecting the parameters, but we
10493 * have not yet validated that the address is valid for the region.
10494 * Extract the top bits and verify that they all match select.
10496 * For aa32, if inputsize == addrsize, then we have selected the
10497 * region by exclusion in aa32_va_parameters and there is no more
10498 * validation to do here.
10500 if (inputsize
< addrsize
) {
10501 target_ulong top_bits
= sextract64(address
, inputsize
,
10502 addrsize
- inputsize
);
10503 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
10504 /* The gap between the two regions is a Translation fault */
10505 fault_type
= ARMFault_Translation
;
10510 if (param
.using64k
) {
10512 } else if (param
.using16k
) {
10518 /* Note that QEMU ignores shareability and cacheability attributes,
10519 * so we don't need to do anything with the SH, ORGN, IRGN fields
10520 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10521 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10522 * implement any ASID-like capability so we can ignore it (instead
10523 * we will always flush the TLB any time the ASID is changed).
10525 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10527 /* Here we should have set up all the parameters for the translation:
10528 * inputsize, ttbr, epd, stride, tbi
10532 /* Translation table walk disabled => Translation fault on TLB miss
10533 * Note: This is always 0 on 64-bit EL2 and EL3.
10538 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
10539 /* The starting level depends on the virtual address size (which can
10540 * be up to 48 bits) and the translation granule size. It indicates
10541 * the number of strides (stride bits at a time) needed to
10542 * consume the bits of the input address. In the pseudocode this is:
10543 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10544 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10545 * our 'stride + 3' and 'stride' is our 'stride'.
10546 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10547 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10548 * = 4 - (inputsize - 4) / stride;
10550 level
= 4 - (inputsize
- 4) / stride
;
10552 /* For stage 2 translations the starting level is specified by the
10553 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10555 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10556 uint32_t startlevel
;
10559 if (!aarch64
|| stride
== 9) {
10560 /* AArch32 or 4KB pages */
10561 startlevel
= 2 - sl0
;
10563 /* 16KB or 64KB pages */
10564 startlevel
= 3 - sl0
;
10567 /* Check that the starting level is valid. */
10568 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10569 inputsize
, stride
);
10571 fault_type
= ARMFault_Translation
;
10574 level
= startlevel
;
10577 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10578 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10580 /* Now we can extract the actual base address from the TTBR */
10581 descaddr
= extract64(ttbr
, 0, 48);
10582 descaddr
&= ~indexmask
;
10584 /* The address field in the descriptor goes up to bit 39 for ARMv7
10585 * but up to bit 47 for ARMv8, but we use the descaddrmask
10586 * up to bit 39 for AArch32, because we don't need other bits in that case
10587 * to construct next descriptor address (anyway they should be all zeroes).
10589 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10590 ~indexmask_grainsize
;
10592 /* Secure accesses start with the page table in secure memory and
10593 * can be downgraded to non-secure at any step. Non-secure accesses
10594 * remain non-secure. We implement this by just ORing in the NSTable/NS
10595 * bits at each step.
10597 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10599 uint64_t descriptor
;
10602 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10604 nstable
= extract32(tableattrs
, 4, 1);
10605 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10606 if (fi
->type
!= ARMFault_None
) {
10610 if (!(descriptor
& 1) ||
10611 (!(descriptor
& 2) && (level
== 3))) {
10612 /* Invalid, or the Reserved level 3 encoding */
10615 descaddr
= descriptor
& descaddrmask
;
10617 if ((descriptor
& 2) && (level
< 3)) {
10618 /* Table entry. The top five bits are attributes which may
10619 * propagate down through lower levels of the table (and
10620 * which are all arranged so that 0 means "no effect", so
10621 * we can gather them up by ORing in the bits at each level).
10623 tableattrs
|= extract64(descriptor
, 59, 5);
10625 indexmask
= indexmask_grainsize
;
10628 /* Block entry at level 1 or 2, or page entry at level 3.
10629 * These are basically the same thing, although the number
10630 * of bits we pull in from the vaddr varies.
10632 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10633 descaddr
|= (address
& (page_size
- 1));
10634 /* Extract attributes from the descriptor */
10635 attrs
= extract64(descriptor
, 2, 10)
10636 | (extract64(descriptor
, 52, 12) << 10);
10638 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10639 /* Stage 2 table descriptors do not include any attribute fields */
10642 /* Merge in attributes from table descriptors */
10643 attrs
|= nstable
<< 3; /* NS */
10645 /* HPD disables all the table attributes except NSTable. */
10648 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10649 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10650 * means "force PL1 access only", which means forcing AP[1] to 0.
10652 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10653 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10656 /* Here descaddr is the final physical address, and attributes
10657 * are all in attrs.
10659 fault_type
= ARMFault_AccessFlag
;
10660 if ((attrs
& (1 << 8)) == 0) {
10665 ap
= extract32(attrs
, 4, 2);
10666 xn
= extract32(attrs
, 12, 1);
10668 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10670 *prot
= get_S2prot(env
, ap
, xn
);
10672 ns
= extract32(attrs
, 3, 1);
10673 pxn
= extract32(attrs
, 11, 1);
10674 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10677 fault_type
= ARMFault_Permission
;
10678 if (!(*prot
& (1 << access_type
))) {
10683 /* The NS bit will (as required by the architecture) have no effect if
10684 * the CPU doesn't support TZ or this is a non-secure translation
10685 * regime, because the attribute will already be non-secure.
10687 txattrs
->secure
= false;
10690 if (cacheattrs
!= NULL
) {
10691 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10692 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10693 extract32(attrs
, 0, 4));
10695 /* Index into MAIR registers for cache attributes */
10696 uint8_t attrindx
= extract32(attrs
, 0, 3);
10697 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10698 assert(attrindx
<= 7);
10699 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10701 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10704 *phys_ptr
= descaddr
;
10705 *page_size_ptr
= page_size
;
10709 fi
->type
= fault_type
;
10711 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10712 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
10716 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
10718 int32_t address
, int *prot
)
10720 if (!arm_feature(env
, ARM_FEATURE_M
)) {
10721 *prot
= PAGE_READ
| PAGE_WRITE
;
10723 case 0xF0000000 ... 0xFFFFFFFF:
10724 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
10725 /* hivecs execing is ok */
10726 *prot
|= PAGE_EXEC
;
10729 case 0x00000000 ... 0x7FFFFFFF:
10730 *prot
|= PAGE_EXEC
;
10734 /* Default system address map for M profile cores.
10735 * The architecture specifies which regions are execute-never;
10736 * at the MPU level no other checks are defined.
10739 case 0x00000000 ... 0x1fffffff: /* ROM */
10740 case 0x20000000 ... 0x3fffffff: /* SRAM */
10741 case 0x60000000 ... 0x7fffffff: /* RAM */
10742 case 0x80000000 ... 0x9fffffff: /* RAM */
10743 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10745 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10746 case 0xa0000000 ... 0xbfffffff: /* Device */
10747 case 0xc0000000 ... 0xdfffffff: /* Device */
10748 case 0xe0000000 ... 0xffffffff: /* System */
10749 *prot
= PAGE_READ
| PAGE_WRITE
;
10752 g_assert_not_reached();
10757 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
10758 ARMMMUIdx mmu_idx
, bool is_user
)
10760 /* Return true if we should use the default memory map as a
10761 * "background" region if there are no hits against any MPU regions.
10763 CPUARMState
*env
= &cpu
->env
;
10769 if (arm_feature(env
, ARM_FEATURE_M
)) {
10770 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
10771 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
10773 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
10777 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
10779 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10780 return arm_feature(env
, ARM_FEATURE_M
) &&
10781 extract32(address
, 20, 12) == 0xe00;
10784 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
10786 /* True if address is in the M profile system region
10787 * 0xe0000000 - 0xffffffff
10789 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
10792 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
10793 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10794 hwaddr
*phys_ptr
, int *prot
,
10795 target_ulong
*page_size
,
10796 ARMMMUFaultInfo
*fi
)
10798 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10800 bool is_user
= regime_is_user(env
, mmu_idx
);
10802 *phys_ptr
= address
;
10803 *page_size
= TARGET_PAGE_SIZE
;
10806 if (regime_translation_disabled(env
, mmu_idx
) ||
10807 m_is_ppb_region(env
, address
)) {
10808 /* MPU disabled or M profile PPB access: use default memory map.
10809 * The other case which uses the default memory map in the
10810 * v7M ARM ARM pseudocode is exception vector reads from the vector
10811 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10812 * which always does a direct read using address_space_ldl(), rather
10813 * than going via this function, so we don't need to check that here.
10815 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10816 } else { /* MPU enabled */
10817 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10818 /* region search */
10819 uint32_t base
= env
->pmsav7
.drbar
[n
];
10820 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
10822 bool srdis
= false;
10824 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
10829 qemu_log_mask(LOG_GUEST_ERROR
,
10830 "DRSR[%d]: Rsize field cannot be 0\n", n
);
10834 rmask
= (1ull << rsize
) - 1;
10836 if (base
& rmask
) {
10837 qemu_log_mask(LOG_GUEST_ERROR
,
10838 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
10839 "to DRSR region size, mask = 0x%" PRIx32
"\n",
10844 if (address
< base
|| address
> base
+ rmask
) {
10846 * Address not in this region. We must check whether the
10847 * region covers addresses in the same page as our address.
10848 * In that case we must not report a size that covers the
10849 * whole page for a subsequent hit against a different MPU
10850 * region or the background region, because it would result in
10851 * incorrect TLB hits for subsequent accesses to addresses that
10852 * are in this MPU region.
10854 if (ranges_overlap(base
, rmask
,
10855 address
& TARGET_PAGE_MASK
,
10856 TARGET_PAGE_SIZE
)) {
10862 /* Region matched */
10864 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
10866 uint32_t srdis_mask
;
10868 rsize
-= 3; /* sub region size (power of 2) */
10869 snd
= ((address
- base
) >> rsize
) & 0x7;
10870 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
10872 srdis_mask
= srdis
? 0x3 : 0x0;
10873 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
10874 /* This will check in groups of 2, 4 and then 8, whether
10875 * the subregion bits are consistent. rsize is incremented
10876 * back up to give the region size, considering consistent
10877 * adjacent subregions as one region. Stop testing if rsize
10878 * is already big enough for an entire QEMU page.
10880 int snd_rounded
= snd
& ~(i
- 1);
10881 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
10882 snd_rounded
+ 8, i
);
10883 if (srdis_mask
^ srdis_multi
) {
10886 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
10893 if (rsize
< TARGET_PAGE_BITS
) {
10894 *page_size
= 1 << rsize
;
10899 if (n
== -1) { /* no hits */
10900 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10901 /* background fault */
10902 fi
->type
= ARMFault_Background
;
10905 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10906 } else { /* a MPU hit! */
10907 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
10908 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
10910 if (m_is_system_region(env
, address
)) {
10911 /* System space is always execute never */
10915 if (is_user
) { /* User mode AP bit decoding */
10920 break; /* no access */
10922 *prot
|= PAGE_WRITE
;
10926 *prot
|= PAGE_READ
| PAGE_EXEC
;
10929 /* for v7M, same as 6; for R profile a reserved value */
10930 if (arm_feature(env
, ARM_FEATURE_M
)) {
10931 *prot
|= PAGE_READ
| PAGE_EXEC
;
10936 qemu_log_mask(LOG_GUEST_ERROR
,
10937 "DRACR[%d]: Bad value for AP bits: 0x%"
10938 PRIx32
"\n", n
, ap
);
10940 } else { /* Priv. mode AP bits decoding */
10943 break; /* no access */
10947 *prot
|= PAGE_WRITE
;
10951 *prot
|= PAGE_READ
| PAGE_EXEC
;
10954 /* for v7M, same as 6; for R profile a reserved value */
10955 if (arm_feature(env
, ARM_FEATURE_M
)) {
10956 *prot
|= PAGE_READ
| PAGE_EXEC
;
10961 qemu_log_mask(LOG_GUEST_ERROR
,
10962 "DRACR[%d]: Bad value for AP bits: 0x%"
10963 PRIx32
"\n", n
, ap
);
10967 /* execute never */
10969 *prot
&= ~PAGE_EXEC
;
10974 fi
->type
= ARMFault_Permission
;
10976 return !(*prot
& (1 << access_type
));
10979 static bool v8m_is_sau_exempt(CPUARMState
*env
,
10980 uint32_t address
, MMUAccessType access_type
)
10982 /* The architecture specifies that certain address ranges are
10983 * exempt from v8M SAU/IDAU checks.
10986 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
10987 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
10988 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
10989 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
10990 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
10991 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
10994 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
10995 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10996 V8M_SAttributes
*sattrs
)
10998 /* Look up the security attributes for this address. Compare the
10999 * pseudocode SecurityCheck() function.
11000 * We assume the caller has zero-initialized *sattrs.
11002 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11004 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11005 int idau_region
= IREGION_NOTVALID
;
11006 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11007 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11010 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11011 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11013 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11017 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11018 /* 0xf0000000..0xffffffff is always S for insn fetches */
11022 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11023 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11027 if (idau_region
!= IREGION_NOTVALID
) {
11028 sattrs
->irvalid
= true;
11029 sattrs
->iregion
= idau_region
;
11032 switch (env
->sau
.ctrl
& 3) {
11033 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11035 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11038 default: /* SAU.ENABLE == 1 */
11039 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
11040 if (env
->sau
.rlar
[r
] & 1) {
11041 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
11042 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
11044 if (base
<= address
&& limit
>= address
) {
11045 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11046 sattrs
->subpage
= true;
11048 if (sattrs
->srvalid
) {
11049 /* If we hit in more than one region then we must report
11050 * as Secure, not NS-Callable, with no valid region
11053 sattrs
->ns
= false;
11054 sattrs
->nsc
= false;
11055 sattrs
->sregion
= 0;
11056 sattrs
->srvalid
= false;
11059 if (env
->sau
.rlar
[r
] & 2) {
11060 sattrs
->nsc
= true;
11064 sattrs
->srvalid
= true;
11065 sattrs
->sregion
= r
;
11069 * Address not in this region. We must check whether the
11070 * region covers addresses in the same page as our address.
11071 * In that case we must not report a size that covers the
11072 * whole page for a subsequent hit against a different MPU
11073 * region or the background region, because it would result
11074 * in incorrect TLB hits for subsequent accesses to
11075 * addresses that are in this MPU region.
11077 if (limit
>= base
&&
11078 ranges_overlap(base
, limit
- base
+ 1,
11080 TARGET_PAGE_SIZE
)) {
11081 sattrs
->subpage
= true;
11090 * The IDAU will override the SAU lookup results if it specifies
11091 * higher security than the SAU does.
11094 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11095 sattrs
->ns
= false;
11096 sattrs
->nsc
= idau_nsc
;
11101 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11102 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11103 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11104 int *prot
, bool *is_subpage
,
11105 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11107 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11108 * that a full phys-to-virt translation does).
11109 * mregion is (if not NULL) set to the region number which matched,
11110 * or -1 if no region number is returned (MPU off, address did not
11111 * hit a region, address hit in multiple regions).
11112 * We set is_subpage to true if the region hit doesn't cover the
11113 * entire TARGET_PAGE the address is within.
11115 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11116 bool is_user
= regime_is_user(env
, mmu_idx
);
11117 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11119 int matchregion
= -1;
11121 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11122 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11124 *is_subpage
= false;
11125 *phys_ptr
= address
;
11131 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11132 * was an exception vector read from the vector table (which is always
11133 * done using the default system address map), because those accesses
11134 * are done in arm_v7m_load_vector(), which always does a direct
11135 * read using address_space_ldl(), rather than going via this function.
11137 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11139 } else if (m_is_ppb_region(env
, address
)) {
11141 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11144 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11145 /* region search */
11146 /* Note that the base address is bits [31:5] from the register
11147 * with bits [4:0] all zeroes, but the limit address is bits
11148 * [31:5] from the register with bits [4:0] all ones.
11150 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11151 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11153 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11154 /* Region disabled */
11158 if (address
< base
|| address
> limit
) {
11160 * Address not in this region. We must check whether the
11161 * region covers addresses in the same page as our address.
11162 * In that case we must not report a size that covers the
11163 * whole page for a subsequent hit against a different MPU
11164 * region or the background region, because it would result in
11165 * incorrect TLB hits for subsequent accesses to addresses that
11166 * are in this MPU region.
11168 if (limit
>= base
&&
11169 ranges_overlap(base
, limit
- base
+ 1,
11171 TARGET_PAGE_SIZE
)) {
11172 *is_subpage
= true;
11177 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11178 *is_subpage
= true;
11182 /* Multiple regions match -- always a failure (unlike
11183 * PMSAv7 where highest-numbered-region wins)
11185 fi
->type
= ARMFault_Permission
;
11196 /* background fault */
11197 fi
->type
= ARMFault_Background
;
11201 if (matchregion
== -1) {
11202 /* hit using the background region */
11203 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11205 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11206 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11208 if (m_is_system_region(env
, address
)) {
11209 /* System space is always execute never */
11213 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11214 if (*prot
&& !xn
) {
11215 *prot
|= PAGE_EXEC
;
11217 /* We don't need to look the attribute up in the MAIR0/MAIR1
11218 * registers because that only tells us about cacheability.
11221 *mregion
= matchregion
;
11225 fi
->type
= ARMFault_Permission
;
11227 return !(*prot
& (1 << access_type
));
11231 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11232 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11233 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11234 int *prot
, target_ulong
*page_size
,
11235 ARMMMUFaultInfo
*fi
)
11237 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11238 V8M_SAttributes sattrs
= {};
11240 bool mpu_is_subpage
;
11242 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11243 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11244 if (access_type
== MMU_INST_FETCH
) {
11245 /* Instruction fetches always use the MMU bank and the
11246 * transaction attribute determined by the fetch address,
11247 * regardless of CPU state. This is painful for QEMU
11248 * to handle, because it would mean we need to encode
11249 * into the mmu_idx not just the (user, negpri) information
11250 * for the current security state but also that for the
11251 * other security state, which would balloon the number
11252 * of mmu_idx values needed alarmingly.
11253 * Fortunately we can avoid this because it's not actually
11254 * possible to arbitrarily execute code from memory with
11255 * the wrong security attribute: it will always generate
11256 * an exception of some kind or another, apart from the
11257 * special case of an NS CPU executing an SG instruction
11258 * in S&NSC memory. So we always just fail the translation
11259 * here and sort things out in the exception handler
11260 * (including possibly emulating an SG instruction).
11262 if (sattrs
.ns
!= !secure
) {
11264 fi
->type
= ARMFault_QEMU_NSCExec
;
11266 fi
->type
= ARMFault_QEMU_SFault
;
11268 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11269 *phys_ptr
= address
;
11274 /* For data accesses we always use the MMU bank indicated
11275 * by the current CPU state, but the security attributes
11276 * might downgrade a secure access to nonsecure.
11279 txattrs
->secure
= false;
11280 } else if (!secure
) {
11281 /* NS access to S memory must fault.
11282 * Architecturally we should first check whether the
11283 * MPU information for this address indicates that we
11284 * are doing an unaligned access to Device memory, which
11285 * should generate a UsageFault instead. QEMU does not
11286 * currently check for that kind of unaligned access though.
11287 * If we added it we would need to do so as a special case
11288 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11290 fi
->type
= ARMFault_QEMU_SFault
;
11291 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11292 *phys_ptr
= address
;
11299 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11300 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11301 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11305 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11306 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11307 hwaddr
*phys_ptr
, int *prot
,
11308 ARMMMUFaultInfo
*fi
)
11313 bool is_user
= regime_is_user(env
, mmu_idx
);
11315 if (regime_translation_disabled(env
, mmu_idx
)) {
11316 /* MPU disabled. */
11317 *phys_ptr
= address
;
11318 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11322 *phys_ptr
= address
;
11323 for (n
= 7; n
>= 0; n
--) {
11324 base
= env
->cp15
.c6_region
[n
];
11325 if ((base
& 1) == 0) {
11328 mask
= 1 << ((base
>> 1) & 0x1f);
11329 /* Keep this shift separate from the above to avoid an
11330 (undefined) << 32. */
11331 mask
= (mask
<< 1) - 1;
11332 if (((base
^ address
) & ~mask
) == 0) {
11337 fi
->type
= ARMFault_Background
;
11341 if (access_type
== MMU_INST_FETCH
) {
11342 mask
= env
->cp15
.pmsav5_insn_ap
;
11344 mask
= env
->cp15
.pmsav5_data_ap
;
11346 mask
= (mask
>> (n
* 4)) & 0xf;
11349 fi
->type
= ARMFault_Permission
;
11354 fi
->type
= ARMFault_Permission
;
11358 *prot
= PAGE_READ
| PAGE_WRITE
;
11363 *prot
|= PAGE_WRITE
;
11367 *prot
= PAGE_READ
| PAGE_WRITE
;
11371 fi
->type
= ARMFault_Permission
;
11381 /* Bad permission. */
11382 fi
->type
= ARMFault_Permission
;
11386 *prot
|= PAGE_EXEC
;
11390 /* Combine either inner or outer cacheability attributes for normal
11391 * memory, according to table D4-42 and pseudocode procedure
11392 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11394 * NB: only stage 1 includes allocation hints (RW bits), leading to
11397 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11399 if (s1
== 4 || s2
== 4) {
11400 /* non-cacheable has precedence */
11402 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11403 /* stage 1 write-through takes precedence */
11405 } else if (extract32(s2
, 2, 2) == 2) {
11406 /* stage 2 write-through takes precedence, but the allocation hint
11407 * is still taken from stage 1
11409 return (2 << 2) | extract32(s1
, 0, 2);
11410 } else { /* write-back */
11415 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11416 * and CombineS1S2Desc()
11418 * @s1: Attributes from stage 1 walk
11419 * @s2: Attributes from stage 2 walk
11421 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11423 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
11424 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
11427 /* Combine shareability attributes (table D4-43) */
11428 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11429 /* if either are outer-shareable, the result is outer-shareable */
11430 ret
.shareability
= 2;
11431 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11432 /* if either are inner-shareable, the result is inner-shareable */
11433 ret
.shareability
= 3;
11435 /* both non-shareable */
11436 ret
.shareability
= 0;
11439 /* Combine memory type and cacheability attributes */
11440 if (s1hi
== 0 || s2hi
== 0) {
11441 /* Device has precedence over normal */
11442 if (s1lo
== 0 || s2lo
== 0) {
11443 /* nGnRnE has precedence over anything */
11445 } else if (s1lo
== 4 || s2lo
== 4) {
11446 /* non-Reordering has precedence over Reordering */
11447 ret
.attrs
= 4; /* nGnRE */
11448 } else if (s1lo
== 8 || s2lo
== 8) {
11449 /* non-Gathering has precedence over Gathering */
11450 ret
.attrs
= 8; /* nGRE */
11452 ret
.attrs
= 0xc; /* GRE */
11455 /* Any location for which the resultant memory type is any
11456 * type of Device memory is always treated as Outer Shareable.
11458 ret
.shareability
= 2;
11459 } else { /* Normal memory */
11460 /* Outer/inner cacheability combine independently */
11461 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11462 | combine_cacheattr_nibble(s1lo
, s2lo
);
11464 if (ret
.attrs
== 0x44) {
11465 /* Any location for which the resultant memory type is Normal
11466 * Inner Non-cacheable, Outer Non-cacheable is always treated
11467 * as Outer Shareable.
11469 ret
.shareability
= 2;
11477 /* get_phys_addr - get the physical address for this virtual address
11479 * Find the physical address corresponding to the given virtual address,
11480 * by doing a translation table walk on MMU based systems or using the
11481 * MPU state on MPU based systems.
11483 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11484 * prot and page_size may not be filled in, and the populated fsr value provides
11485 * information on why the translation aborted, in the format of a
11486 * DFSR/IFSR fault register, with the following caveats:
11487 * * we honour the short vs long DFSR format differences.
11488 * * the WnR bit is never set (the caller must do this).
11489 * * for PSMAv5 based systems we don't bother to return a full FSR format
11492 * @env: CPUARMState
11493 * @address: virtual address to get physical address for
11494 * @access_type: 0 for read, 1 for write, 2 for execute
11495 * @mmu_idx: MMU index indicating required translation regime
11496 * @phys_ptr: set to the physical address corresponding to the virtual address
11497 * @attrs: set to the memory transaction attributes to use
11498 * @prot: set to the permissions for the page containing phys_ptr
11499 * @page_size: set to the size of the page containing phys_ptr
11500 * @fi: set to fault info if the translation fails
11501 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11503 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
11504 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11505 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11506 target_ulong
*page_size
,
11507 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11509 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
11510 /* Call ourselves recursively to do the stage 1 and then stage 2
11513 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11517 ARMCacheAttrs cacheattrs2
= {};
11519 ret
= get_phys_addr(env
, address
, access_type
,
11520 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11521 prot
, page_size
, fi
, cacheattrs
);
11523 /* If S1 fails or S2 is disabled, return early. */
11524 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
11529 /* S1 is done. Now do S2 translation. */
11530 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
11531 phys_ptr
, attrs
, &s2_prot
,
11533 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11535 /* Combine the S1 and S2 perms. */
11538 /* Combine the S1 and S2 cache attributes, if needed */
11539 if (!ret
&& cacheattrs
!= NULL
) {
11540 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11542 * HCR.DC forces the first stage attributes to
11543 * Normal Non-Shareable,
11544 * Inner Write-Back Read-Allocate Write-Allocate,
11545 * Outer Write-Back Read-Allocate Write-Allocate.
11547 cacheattrs
->attrs
= 0xff;
11548 cacheattrs
->shareability
= 0;
11550 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11556 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11558 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11562 /* The page table entries may downgrade secure to non-secure, but
11563 * cannot upgrade an non-secure translation regime's attributes
11566 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11567 attrs
->user
= regime_is_user(env
, mmu_idx
);
11569 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11570 * In v7 and earlier it affects all stage 1 translations.
11572 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
11573 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11574 if (regime_el(env
, mmu_idx
) == 3) {
11575 address
+= env
->cp15
.fcseidr_s
;
11577 address
+= env
->cp15
.fcseidr_ns
;
11581 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11583 *page_size
= TARGET_PAGE_SIZE
;
11585 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11587 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11588 phys_ptr
, attrs
, prot
, page_size
, fi
);
11589 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11591 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11592 phys_ptr
, prot
, page_size
, fi
);
11595 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11596 phys_ptr
, prot
, fi
);
11598 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11599 " mmu_idx %u -> %s (prot %c%c%c)\n",
11600 access_type
== MMU_DATA_LOAD
? "reading" :
11601 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11602 (uint32_t)address
, mmu_idx
,
11603 ret
? "Miss" : "Hit",
11604 *prot
& PAGE_READ
? 'r' : '-',
11605 *prot
& PAGE_WRITE
? 'w' : '-',
11606 *prot
& PAGE_EXEC
? 'x' : '-');
11611 /* Definitely a real MMU, not an MPU */
11613 if (regime_translation_disabled(env
, mmu_idx
)) {
11614 /* MMU disabled. */
11615 *phys_ptr
= address
;
11616 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11617 *page_size
= TARGET_PAGE_SIZE
;
11621 if (regime_using_lpae_format(env
, mmu_idx
)) {
11622 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11623 phys_ptr
, attrs
, prot
, page_size
,
11625 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11626 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11627 phys_ptr
, attrs
, prot
, page_size
, fi
);
11629 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11630 phys_ptr
, prot
, page_size
, fi
);
11634 /* Walk the page table and (if the mapping exists) add the page
11635 * to the TLB. Return false on success, or true on failure. Populate
11636 * fsr with ARM DFSR/IFSR fault register format value on failure.
11638 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
11639 MMUAccessType access_type
, int mmu_idx
,
11640 ARMMMUFaultInfo
*fi
)
11642 ARMCPU
*cpu
= ARM_CPU(cs
);
11643 CPUARMState
*env
= &cpu
->env
;
11645 target_ulong page_size
;
11648 MemTxAttrs attrs
= {};
11650 ret
= get_phys_addr(env
, address
, access_type
,
11651 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
11652 &attrs
, &prot
, &page_size
, fi
, NULL
);
11655 * Map a single [sub]page. Regions smaller than our declared
11656 * target page size are handled specially, so for those we
11657 * pass in the exact addresses.
11659 if (page_size
>= TARGET_PAGE_SIZE
) {
11660 phys_addr
&= TARGET_PAGE_MASK
;
11661 address
&= TARGET_PAGE_MASK
;
11663 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
11664 prot
, mmu_idx
, page_size
);
11671 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11674 ARMCPU
*cpu
= ARM_CPU(cs
);
11675 CPUARMState
*env
= &cpu
->env
;
11677 target_ulong page_size
;
11680 ARMMMUFaultInfo fi
= {};
11681 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11683 *attrs
= (MemTxAttrs
) {};
11685 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11686 attrs
, &prot
, &page_size
, &fi
, NULL
);
11694 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
11697 unsigned el
= arm_current_el(env
);
11699 /* First handle registers which unprivileged can read */
11702 case 0 ... 7: /* xPSR sub-fields */
11704 if ((reg
& 1) && el
) {
11705 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
11708 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
11710 /* EPSR reads as zero */
11711 return xpsr_read(env
) & mask
;
11713 case 20: /* CONTROL */
11714 return env
->v7m
.control
[env
->v7m
.secure
];
11715 case 0x94: /* CONTROL_NS */
11716 /* We have to handle this here because unprivileged Secure code
11717 * can read the NS CONTROL register.
11719 if (!env
->v7m
.secure
) {
11722 return env
->v7m
.control
[M_REG_NS
];
11726 return 0; /* unprivileged reads others as zero */
11729 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11731 case 0x88: /* MSP_NS */
11732 if (!env
->v7m
.secure
) {
11735 return env
->v7m
.other_ss_msp
;
11736 case 0x89: /* PSP_NS */
11737 if (!env
->v7m
.secure
) {
11740 return env
->v7m
.other_ss_psp
;
11741 case 0x8a: /* MSPLIM_NS */
11742 if (!env
->v7m
.secure
) {
11745 return env
->v7m
.msplim
[M_REG_NS
];
11746 case 0x8b: /* PSPLIM_NS */
11747 if (!env
->v7m
.secure
) {
11750 return env
->v7m
.psplim
[M_REG_NS
];
11751 case 0x90: /* PRIMASK_NS */
11752 if (!env
->v7m
.secure
) {
11755 return env
->v7m
.primask
[M_REG_NS
];
11756 case 0x91: /* BASEPRI_NS */
11757 if (!env
->v7m
.secure
) {
11760 return env
->v7m
.basepri
[M_REG_NS
];
11761 case 0x93: /* FAULTMASK_NS */
11762 if (!env
->v7m
.secure
) {
11765 return env
->v7m
.faultmask
[M_REG_NS
];
11766 case 0x98: /* SP_NS */
11768 /* This gives the non-secure SP selected based on whether we're
11769 * currently in handler mode or not, using the NS CONTROL.SPSEL.
11771 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
11773 if (!env
->v7m
.secure
) {
11776 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
11777 return env
->v7m
.other_ss_psp
;
11779 return env
->v7m
.other_ss_msp
;
11789 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
11791 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
11792 case 10: /* MSPLIM */
11793 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11796 return env
->v7m
.msplim
[env
->v7m
.secure
];
11797 case 11: /* PSPLIM */
11798 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11801 return env
->v7m
.psplim
[env
->v7m
.secure
];
11802 case 16: /* PRIMASK */
11803 return env
->v7m
.primask
[env
->v7m
.secure
];
11804 case 17: /* BASEPRI */
11805 case 18: /* BASEPRI_MAX */
11806 return env
->v7m
.basepri
[env
->v7m
.secure
];
11807 case 19: /* FAULTMASK */
11808 return env
->v7m
.faultmask
[env
->v7m
.secure
];
11811 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
11812 " register %d\n", reg
);
11817 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
11819 /* We're passed bits [11..0] of the instruction; extract
11820 * SYSm and the mask bits.
11821 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
11822 * we choose to treat them as if the mask bits were valid.
11823 * NB that the pseudocode 'mask' variable is bits [11..10],
11824 * whereas ours is [11..8].
11826 uint32_t mask
= extract32(maskreg
, 8, 4);
11827 uint32_t reg
= extract32(maskreg
, 0, 8);
11829 if (arm_current_el(env
) == 0 && reg
> 7) {
11830 /* only xPSR sub-fields may be written by unprivileged */
11834 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11836 case 0x88: /* MSP_NS */
11837 if (!env
->v7m
.secure
) {
11840 env
->v7m
.other_ss_msp
= val
;
11842 case 0x89: /* PSP_NS */
11843 if (!env
->v7m
.secure
) {
11846 env
->v7m
.other_ss_psp
= val
;
11848 case 0x8a: /* MSPLIM_NS */
11849 if (!env
->v7m
.secure
) {
11852 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
11854 case 0x8b: /* PSPLIM_NS */
11855 if (!env
->v7m
.secure
) {
11858 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
11860 case 0x90: /* PRIMASK_NS */
11861 if (!env
->v7m
.secure
) {
11864 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
11866 case 0x91: /* BASEPRI_NS */
11867 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11870 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
11872 case 0x93: /* FAULTMASK_NS */
11873 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11876 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
11878 case 0x94: /* CONTROL_NS */
11879 if (!env
->v7m
.secure
) {
11882 write_v7m_control_spsel_for_secstate(env
,
11883 val
& R_V7M_CONTROL_SPSEL_MASK
,
11885 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11886 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
11887 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
11890 case 0x98: /* SP_NS */
11892 /* This gives the non-secure SP selected based on whether we're
11893 * currently in handler mode or not, using the NS CONTROL.SPSEL.
11895 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
11896 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
11899 if (!env
->v7m
.secure
) {
11903 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
11906 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
11908 cpu_restore_state(cs
, GETPC(), true);
11909 raise_exception(env
, EXCP_STKOF
, 0, 1);
11913 env
->v7m
.other_ss_psp
= val
;
11915 env
->v7m
.other_ss_msp
= val
;
11925 case 0 ... 7: /* xPSR sub-fields */
11926 /* only APSR is actually writable */
11928 uint32_t apsrmask
= 0;
11931 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
11933 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
11934 apsrmask
|= XPSR_GE
;
11936 xpsr_write(env
, val
, apsrmask
);
11940 if (v7m_using_psp(env
)) {
11941 env
->v7m
.other_sp
= val
;
11943 env
->regs
[13] = val
;
11947 if (v7m_using_psp(env
)) {
11948 env
->regs
[13] = val
;
11950 env
->v7m
.other_sp
= val
;
11953 case 10: /* MSPLIM */
11954 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11957 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
11959 case 11: /* PSPLIM */
11960 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11963 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
11965 case 16: /* PRIMASK */
11966 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
11968 case 17: /* BASEPRI */
11969 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11972 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
11974 case 18: /* BASEPRI_MAX */
11975 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11979 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
11980 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
11981 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
11984 case 19: /* FAULTMASK */
11985 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11988 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
11990 case 20: /* CONTROL */
11991 /* Writing to the SPSEL bit only has an effect if we are in
11992 * thread mode; other bits can be updated by any privileged code.
11993 * write_v7m_control_spsel() deals with updating the SPSEL bit in
11994 * env->v7m.control, so we only need update the others.
11995 * For v7M, we must just ignore explicit writes to SPSEL in handler
11996 * mode; for v8M the write is permitted but will have no effect.
11998 if (arm_feature(env
, ARM_FEATURE_V8
) ||
11999 !arm_v7m_is_handler_mode(env
)) {
12000 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
12002 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12003 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12004 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12009 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
12010 " register %d\n", reg
);
12015 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
12017 /* Implement the TT instruction. op is bits [7:6] of the insn. */
12018 bool forceunpriv
= op
& 1;
12020 V8M_SAttributes sattrs
= {};
12022 bool r
, rw
, nsr
, nsrw
, mrvalid
;
12024 ARMMMUFaultInfo fi
= {};
12025 MemTxAttrs attrs
= {};
12030 bool targetsec
= env
->v7m
.secure
;
12033 /* Work out what the security state and privilege level we're
12034 * interested in is...
12037 targetsec
= !targetsec
;
12041 targetpriv
= false;
12043 targetpriv
= arm_v7m_is_handler_mode(env
) ||
12044 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
12047 /* ...and then figure out which MMU index this is */
12048 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
12050 /* We know that the MPU and SAU don't care about the access type
12051 * for our purposes beyond that we don't want to claim to be
12052 * an insn fetch, so we arbitrarily call this a read.
12055 /* MPU region info only available for privileged or if
12056 * inspecting the other MPU state.
12058 if (arm_current_el(env
) != 0 || alt
) {
12059 /* We can ignore the return value as prot is always set */
12060 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
12061 &phys_addr
, &attrs
, &prot
, &is_subpage
,
12063 if (mregion
== -1) {
12069 r
= prot
& PAGE_READ
;
12070 rw
= prot
& PAGE_WRITE
;
12078 if (env
->v7m
.secure
) {
12079 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
12080 nsr
= sattrs
.ns
&& r
;
12081 nsrw
= sattrs
.ns
&& rw
;
12088 tt_resp
= (sattrs
.iregion
<< 24) |
12089 (sattrs
.irvalid
<< 23) |
12090 ((!sattrs
.ns
) << 22) |
12095 (sattrs
.srvalid
<< 17) |
12097 (sattrs
.sregion
<< 8) |
12105 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
12107 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
12108 * Note that we do not implement the (architecturally mandated)
12109 * alignment fault for attempts to use this on Device memory
12110 * (which matches the usual QEMU behaviour of not implementing either
12111 * alignment faults or any memory attribute handling).
12114 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12115 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
12116 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
12118 #ifndef CONFIG_USER_ONLY
12120 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
12121 * the block size so we might have to do more than one TLB lookup.
12122 * We know that in fact for any v8 CPU the page size is at least 4K
12123 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
12124 * 1K as an artefact of legacy v5 subpage support being present in the
12125 * same QEMU executable.
12127 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
12128 void *hostaddr
[maxidx
];
12130 unsigned mmu_idx
= cpu_mmu_index(env
, false);
12131 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
12133 for (try = 0; try < 2; try++) {
12135 for (i
= 0; i
< maxidx
; i
++) {
12136 hostaddr
[i
] = tlb_vaddr_to_host(env
,
12137 vaddr
+ TARGET_PAGE_SIZE
* i
,
12139 if (!hostaddr
[i
]) {
12144 /* If it's all in the TLB it's fair game for just writing to;
12145 * we know we don't need to update dirty status, etc.
12147 for (i
= 0; i
< maxidx
- 1; i
++) {
12148 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
12150 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
12153 /* OK, try a store and see if we can populate the tlb. This
12154 * might cause an exception if the memory isn't writable,
12155 * in which case we will longjmp out of here. We must for
12156 * this purpose use the actual register value passed to us
12157 * so that we get the fault address right.
12159 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
12160 /* Now we can populate the other TLB entries, if any */
12161 for (i
= 0; i
< maxidx
; i
++) {
12162 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
12163 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
12164 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
12169 /* Slow path (probably attempt to do this to an I/O device or
12170 * similar, or clearing of a block of code we have translations
12171 * cached for). Just do a series of byte writes as the architecture
12172 * demands. It's not worth trying to use a cpu_physical_memory_map(),
12173 * memset(), unmap() sequence here because:
12174 * + we'd need to account for the blocksize being larger than a page
12175 * + the direct-RAM access case is almost always going to be dealt
12176 * with in the fastpath code above, so there's no speed benefit
12177 * + we would have to deal with the map returning NULL because the
12178 * bounce buffer was in use
12180 for (i
= 0; i
< blocklen
; i
++) {
12181 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
12185 memset(g2h(vaddr
), 0, blocklen
);
12189 /* Note that signed overflow is undefined in C. The following routines are
12190 careful to use unsigned types where modulo arithmetic is required.
12191 Failure to do so _will_ break on newer gcc. */
12193 /* Signed saturating arithmetic. */
12195 /* Perform 16-bit signed saturating addition. */
12196 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12201 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12210 /* Perform 8-bit signed saturating addition. */
12211 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12216 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12225 /* Perform 16-bit signed saturating subtraction. */
12226 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12231 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12240 /* Perform 8-bit signed saturating subtraction. */
12241 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12246 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12255 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12256 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12257 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12258 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12261 #include "op_addsub.h"
12263 /* Unsigned saturating arithmetic. */
12264 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12273 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12281 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12290 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12298 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12299 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12300 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12301 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12304 #include "op_addsub.h"
12306 /* Signed modulo arithmetic. */
12307 #define SARITH16(a, b, n, op) do { \
12309 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12310 RESULT(sum, n, 16); \
12312 ge |= 3 << (n * 2); \
12315 #define SARITH8(a, b, n, op) do { \
12317 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12318 RESULT(sum, n, 8); \
12324 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12325 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12326 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12327 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12331 #include "op_addsub.h"
12333 /* Unsigned modulo arithmetic. */
12334 #define ADD16(a, b, n) do { \
12336 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12337 RESULT(sum, n, 16); \
12338 if ((sum >> 16) == 1) \
12339 ge |= 3 << (n * 2); \
12342 #define ADD8(a, b, n) do { \
12344 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12345 RESULT(sum, n, 8); \
12346 if ((sum >> 8) == 1) \
12350 #define SUB16(a, b, n) do { \
12352 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12353 RESULT(sum, n, 16); \
12354 if ((sum >> 16) == 0) \
12355 ge |= 3 << (n * 2); \
12358 #define SUB8(a, b, n) do { \
12360 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12361 RESULT(sum, n, 8); \
12362 if ((sum >> 8) == 0) \
12369 #include "op_addsub.h"
12371 /* Halved signed arithmetic. */
12372 #define ADD16(a, b, n) \
12373 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12374 #define SUB16(a, b, n) \
12375 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12376 #define ADD8(a, b, n) \
12377 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12378 #define SUB8(a, b, n) \
12379 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12382 #include "op_addsub.h"
12384 /* Halved unsigned arithmetic. */
12385 #define ADD16(a, b, n) \
12386 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12387 #define SUB16(a, b, n) \
12388 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12389 #define ADD8(a, b, n) \
12390 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12391 #define SUB8(a, b, n) \
12392 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12395 #include "op_addsub.h"
12397 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12405 /* Unsigned sum of absolute byte differences. */
12406 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12409 sum
= do_usad(a
, b
);
12410 sum
+= do_usad(a
>> 8, b
>> 8);
12411 sum
+= do_usad(a
>> 16, b
>>16);
12412 sum
+= do_usad(a
>> 24, b
>> 24);
12416 /* For ARMv6 SEL instruction. */
12417 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12429 mask
|= 0xff000000;
12430 return (a
& mask
) | (b
& ~mask
);
12433 /* VFP support. We follow the convention used for VFP instructions:
12434 Single precision routines have a "s" suffix, double precision a
12437 /* Convert host exception flags to vfp form. */
12438 static inline int vfp_exceptbits_from_host(int host_bits
)
12440 int target_bits
= 0;
12442 if (host_bits
& float_flag_invalid
)
12444 if (host_bits
& float_flag_divbyzero
)
12446 if (host_bits
& float_flag_overflow
)
12448 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
12450 if (host_bits
& float_flag_inexact
)
12451 target_bits
|= 0x10;
12452 if (host_bits
& float_flag_input_denormal
)
12453 target_bits
|= 0x80;
12454 return target_bits
;
12457 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
12462 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
12463 | (env
->vfp
.vec_len
<< 16)
12464 | (env
->vfp
.vec_stride
<< 20);
12466 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
12467 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
12468 /* FZ16 does not generate an input denormal exception. */
12469 i
|= (get_float_exception_flags(&env
->vfp
.fp_status_f16
)
12470 & ~float_flag_input_denormal
);
12472 fpscr
|= vfp_exceptbits_from_host(i
);
12476 uint32_t vfp_get_fpscr(CPUARMState
*env
)
12478 return HELPER(vfp_get_fpscr
)(env
);
12481 /* Convert vfp exception flags to target form. */
12482 static inline int vfp_exceptbits_to_host(int target_bits
)
12486 if (target_bits
& 1)
12487 host_bits
|= float_flag_invalid
;
12488 if (target_bits
& 2)
12489 host_bits
|= float_flag_divbyzero
;
12490 if (target_bits
& 4)
12491 host_bits
|= float_flag_overflow
;
12492 if (target_bits
& 8)
12493 host_bits
|= float_flag_underflow
;
12494 if (target_bits
& 0x10)
12495 host_bits
|= float_flag_inexact
;
12496 if (target_bits
& 0x80)
12497 host_bits
|= float_flag_input_denormal
;
12501 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
12506 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
12507 if (!cpu_isar_feature(aa64_fp16
, arm_env_get_cpu(env
))) {
12511 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
12512 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
12513 env
->vfp
.vec_len
= (val
>> 16) & 7;
12514 env
->vfp
.vec_stride
= (val
>> 20) & 3;
12517 if (changed
& (3 << 22)) {
12518 i
= (val
>> 22) & 3;
12520 case FPROUNDING_TIEEVEN
:
12521 i
= float_round_nearest_even
;
12523 case FPROUNDING_POSINF
:
12524 i
= float_round_up
;
12526 case FPROUNDING_NEGINF
:
12527 i
= float_round_down
;
12529 case FPROUNDING_ZERO
:
12530 i
= float_round_to_zero
;
12533 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
12534 set_float_rounding_mode(i
, &env
->vfp
.fp_status_f16
);
12536 if (changed
& FPCR_FZ16
) {
12537 bool ftz_enabled
= val
& FPCR_FZ16
;
12538 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
12539 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
12541 if (changed
& FPCR_FZ
) {
12542 bool ftz_enabled
= val
& FPCR_FZ
;
12543 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
12544 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
12546 if (changed
& FPCR_DN
) {
12547 bool dnan_enabled
= val
& FPCR_DN
;
12548 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status
);
12549 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status_f16
);
12552 /* The exception flags are ORed together when we read fpscr so we
12553 * only need to preserve the current state in one of our
12554 * float_status values.
12556 i
= vfp_exceptbits_to_host(val
);
12557 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
12558 set_float_exception_flags(0, &env
->vfp
.fp_status_f16
);
12559 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
12562 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
12564 HELPER(vfp_set_fpscr
)(env
, val
);
12567 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
12569 #define VFP_BINOP(name) \
12570 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
12572 float_status *fpst = fpstp; \
12573 return float32_ ## name(a, b, fpst); \
12575 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
12577 float_status *fpst = fpstp; \
12578 return float64_ ## name(a, b, fpst); \
12590 float32
VFP_HELPER(neg
, s
)(float32 a
)
12592 return float32_chs(a
);
12595 float64
VFP_HELPER(neg
, d
)(float64 a
)
12597 return float64_chs(a
);
12600 float32
VFP_HELPER(abs
, s
)(float32 a
)
12602 return float32_abs(a
);
12605 float64
VFP_HELPER(abs
, d
)(float64 a
)
12607 return float64_abs(a
);
12610 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
12612 return float32_sqrt(a
, &env
->vfp
.fp_status
);
12615 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
12617 return float64_sqrt(a
, &env
->vfp
.fp_status
);
12620 /* XXX: check quiet/signaling case */
12621 #define DO_VFP_cmp(p, type) \
12622 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
12625 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
12626 case 0: flags = 0x6; break; \
12627 case -1: flags = 0x8; break; \
12628 case 1: flags = 0x2; break; \
12629 default: case 2: flags = 0x3; break; \
12631 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
12632 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
12634 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
12637 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
12638 case 0: flags = 0x6; break; \
12639 case -1: flags = 0x8; break; \
12640 case 1: flags = 0x2; break; \
12641 default: case 2: flags = 0x3; break; \
12643 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
12644 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
12646 DO_VFP_cmp(s
, float32
)
12647 DO_VFP_cmp(d
, float64
)
12650 /* Integer to float and float to integer conversions */
12652 #define CONV_ITOF(name, ftype, fsz, sign) \
12653 ftype HELPER(name)(uint32_t x, void *fpstp) \
12655 float_status *fpst = fpstp; \
12656 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
12659 #define CONV_FTOI(name, ftype, fsz, sign, round) \
12660 sign##int32_t HELPER(name)(ftype x, void *fpstp) \
12662 float_status *fpst = fpstp; \
12663 if (float##fsz##_is_any_nan(x)) { \
12664 float_raise(float_flag_invalid, fpst); \
12667 return float##fsz##_to_##sign##int32##round(x, fpst); \
12670 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \
12671 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
12672 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
12673 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
12675 FLOAT_CONVS(si
, h
, uint32_t, 16, )
12676 FLOAT_CONVS(si
, s
, float32
, 32, )
12677 FLOAT_CONVS(si
, d
, float64
, 64, )
12678 FLOAT_CONVS(ui
, h
, uint32_t, 16, u
)
12679 FLOAT_CONVS(ui
, s
, float32
, 32, u
)
12680 FLOAT_CONVS(ui
, d
, float64
, 64, u
)
12686 /* floating point conversion */
12687 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
12689 return float32_to_float64(x
, &env
->vfp
.fp_status
);
12692 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
12694 return float64_to_float32(x
, &env
->vfp
.fp_status
);
12697 /* VFP3 fixed point conversion. */
12698 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12699 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
12701 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
12703 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \
12704 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
12707 if (unlikely(float##fsz##_is_any_nan(x))) { \
12708 float_raise(float_flag_invalid, fpst); \
12711 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
12714 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
12715 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12716 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12717 float_round_to_zero, _round_to_zero) \
12718 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12719 get_float_rounding_mode(fpst), )
12721 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
12722 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12723 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12724 get_float_rounding_mode(fpst), )
12726 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
12727 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
12728 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
12729 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
12730 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
12731 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
12732 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
12733 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
12734 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
12735 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
12736 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
12737 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
12739 #undef VFP_CONV_FIX
12740 #undef VFP_CONV_FIX_FLOAT
12741 #undef VFP_CONV_FLOAT_FIX_ROUND
12742 #undef VFP_CONV_FIX_A64
12744 uint32_t HELPER(vfp_sltoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12746 return int32_to_float16_scalbn(x
, -shift
, fpst
);
12749 uint32_t HELPER(vfp_ultoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12751 return uint32_to_float16_scalbn(x
, -shift
, fpst
);
12754 uint32_t HELPER(vfp_sqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
12756 return int64_to_float16_scalbn(x
, -shift
, fpst
);
12759 uint32_t HELPER(vfp_uqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
12761 return uint64_to_float16_scalbn(x
, -shift
, fpst
);
12764 uint32_t HELPER(vfp_toshh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12766 if (unlikely(float16_is_any_nan(x
))) {
12767 float_raise(float_flag_invalid
, fpst
);
12770 return float16_to_int16_scalbn(x
, get_float_rounding_mode(fpst
),
12774 uint32_t HELPER(vfp_touhh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12776 if (unlikely(float16_is_any_nan(x
))) {
12777 float_raise(float_flag_invalid
, fpst
);
12780 return float16_to_uint16_scalbn(x
, get_float_rounding_mode(fpst
),
12784 uint32_t HELPER(vfp_toslh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12786 if (unlikely(float16_is_any_nan(x
))) {
12787 float_raise(float_flag_invalid
, fpst
);
12790 return float16_to_int32_scalbn(x
, get_float_rounding_mode(fpst
),
12794 uint32_t HELPER(vfp_toulh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12796 if (unlikely(float16_is_any_nan(x
))) {
12797 float_raise(float_flag_invalid
, fpst
);
12800 return float16_to_uint32_scalbn(x
, get_float_rounding_mode(fpst
),
12804 uint64_t HELPER(vfp_tosqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12806 if (unlikely(float16_is_any_nan(x
))) {
12807 float_raise(float_flag_invalid
, fpst
);
12810 return float16_to_int64_scalbn(x
, get_float_rounding_mode(fpst
),
12814 uint64_t HELPER(vfp_touqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12816 if (unlikely(float16_is_any_nan(x
))) {
12817 float_raise(float_flag_invalid
, fpst
);
12820 return float16_to_uint64_scalbn(x
, get_float_rounding_mode(fpst
),
12824 /* Set the current fp rounding mode and return the old one.
12825 * The argument is a softfloat float_round_ value.
12827 uint32_t HELPER(set_rmode
)(uint32_t rmode
, void *fpstp
)
12829 float_status
*fp_status
= fpstp
;
12831 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
12832 set_float_rounding_mode(rmode
, fp_status
);
12837 /* Set the current fp rounding mode in the standard fp status and return
12838 * the old one. This is for NEON instructions that need to change the
12839 * rounding mode but wish to use the standard FPSCR values for everything
12840 * else. Always set the rounding mode back to the correct value after
12842 * The argument is a softfloat float_round_ value.
12844 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
12846 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
12848 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
12849 set_float_rounding_mode(rmode
, fp_status
);
12854 /* Half precision conversions. */
12855 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
12857 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12858 * it would affect flushing input denormals.
12860 float_status
*fpst
= fpstp
;
12861 flag save
= get_flush_inputs_to_zero(fpst
);
12862 set_flush_inputs_to_zero(false, fpst
);
12863 float32 r
= float16_to_float32(a
, !ahp_mode
, fpst
);
12864 set_flush_inputs_to_zero(save
, fpst
);
12868 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, void *fpstp
, uint32_t ahp_mode
)
12870 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12871 * it would affect flushing output denormals.
12873 float_status
*fpst
= fpstp
;
12874 flag save
= get_flush_to_zero(fpst
);
12875 set_flush_to_zero(false, fpst
);
12876 float16 r
= float32_to_float16(a
, !ahp_mode
, fpst
);
12877 set_flush_to_zero(save
, fpst
);
12881 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
12883 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12884 * it would affect flushing input denormals.
12886 float_status
*fpst
= fpstp
;
12887 flag save
= get_flush_inputs_to_zero(fpst
);
12888 set_flush_inputs_to_zero(false, fpst
);
12889 float64 r
= float16_to_float64(a
, !ahp_mode
, fpst
);
12890 set_flush_inputs_to_zero(save
, fpst
);
12894 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, void *fpstp
, uint32_t ahp_mode
)
12896 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12897 * it would affect flushing output denormals.
12899 float_status
*fpst
= fpstp
;
12900 flag save
= get_flush_to_zero(fpst
);
12901 set_flush_to_zero(false, fpst
);
12902 float16 r
= float64_to_float16(a
, !ahp_mode
, fpst
);
12903 set_flush_to_zero(save
, fpst
);
12907 #define float32_two make_float32(0x40000000)
12908 #define float32_three make_float32(0x40400000)
12909 #define float32_one_point_five make_float32(0x3fc00000)
12911 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
12913 float_status
*s
= &env
->vfp
.standard_fp_status
;
12914 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
12915 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
12916 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
12917 float_raise(float_flag_input_denormal
, s
);
12919 return float32_two
;
12921 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
12924 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
12926 float_status
*s
= &env
->vfp
.standard_fp_status
;
12928 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
12929 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
12930 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
12931 float_raise(float_flag_input_denormal
, s
);
12933 return float32_one_point_five
;
12935 product
= float32_mul(a
, b
, s
);
12936 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
12939 /* NEON helpers. */
12941 /* Constants 256 and 512 are used in some helpers; we avoid relying on
12942 * int->float conversions at run-time. */
12943 #define float64_256 make_float64(0x4070000000000000LL)
12944 #define float64_512 make_float64(0x4080000000000000LL)
12945 #define float16_maxnorm make_float16(0x7bff)
12946 #define float32_maxnorm make_float32(0x7f7fffff)
12947 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
12949 /* Reciprocal functions
12951 * The algorithm that must be used to calculate the estimate
12952 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
12955 /* See RecipEstimate()
12957 * input is a 9 bit fixed point number
12958 * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
12959 * result range 256 .. 511 for a number from 1.0 to 511/256.
12962 static int recip_estimate(int input
)
12965 assert(256 <= input
&& input
< 512);
12966 a
= (input
* 2) + 1;
12969 assert(256 <= r
&& r
< 512);
12974 * Common wrapper to call recip_estimate
12976 * The parameters are exponent and 64 bit fraction (without implicit
12977 * bit) where the binary point is nominally at bit 52. Returns a
12978 * float64 which can then be rounded to the appropriate size by the
12982 static uint64_t call_recip_estimate(int *exp
, int exp_off
, uint64_t frac
)
12984 uint32_t scaled
, estimate
;
12985 uint64_t result_frac
;
12988 /* Handle sub-normals */
12990 if (extract64(frac
, 51, 1) == 0) {
12998 /* scaled = UInt('1':fraction<51:44>) */
12999 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
13000 estimate
= recip_estimate(scaled
);
13002 result_exp
= exp_off
- *exp
;
13003 result_frac
= deposit64(0, 44, 8, estimate
);
13004 if (result_exp
== 0) {
13005 result_frac
= deposit64(result_frac
>> 1, 51, 1, 1);
13006 } else if (result_exp
== -1) {
13007 result_frac
= deposit64(result_frac
>> 2, 50, 2, 1);
13013 return result_frac
;
13016 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
13018 switch (fpst
->float_rounding_mode
) {
13019 case float_round_nearest_even
: /* Round to Nearest */
13021 case float_round_up
: /* Round to +Inf */
13023 case float_round_down
: /* Round to -Inf */
13025 case float_round_to_zero
: /* Round to Zero */
13029 g_assert_not_reached();
13032 uint32_t HELPER(recpe_f16
)(uint32_t input
, void *fpstp
)
13034 float_status
*fpst
= fpstp
;
13035 float16 f16
= float16_squash_input_denormal(input
, fpst
);
13036 uint32_t f16_val
= float16_val(f16
);
13037 uint32_t f16_sign
= float16_is_neg(f16
);
13038 int f16_exp
= extract32(f16_val
, 10, 5);
13039 uint32_t f16_frac
= extract32(f16_val
, 0, 10);
13042 if (float16_is_any_nan(f16
)) {
13044 if (float16_is_signaling_nan(f16
, fpst
)) {
13045 float_raise(float_flag_invalid
, fpst
);
13046 nan
= float16_silence_nan(f16
, fpst
);
13048 if (fpst
->default_nan_mode
) {
13049 nan
= float16_default_nan(fpst
);
13052 } else if (float16_is_infinity(f16
)) {
13053 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
13054 } else if (float16_is_zero(f16
)) {
13055 float_raise(float_flag_divbyzero
, fpst
);
13056 return float16_set_sign(float16_infinity
, float16_is_neg(f16
));
13057 } else if (float16_abs(f16
) < (1 << 8)) {
13058 /* Abs(value) < 2.0^-16 */
13059 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
13060 if (round_to_inf(fpst
, f16_sign
)) {
13061 return float16_set_sign(float16_infinity
, f16_sign
);
13063 return float16_set_sign(float16_maxnorm
, f16_sign
);
13065 } else if (f16_exp
>= 29 && fpst
->flush_to_zero
) {
13066 float_raise(float_flag_underflow
, fpst
);
13067 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
13070 f64_frac
= call_recip_estimate(&f16_exp
, 29,
13071 ((uint64_t) f16_frac
) << (52 - 10));
13073 /* result = sign : result_exp<4:0> : fraction<51:42> */
13074 f16_val
= deposit32(0, 15, 1, f16_sign
);
13075 f16_val
= deposit32(f16_val
, 10, 5, f16_exp
);
13076 f16_val
= deposit32(f16_val
, 0, 10, extract64(f64_frac
, 52 - 10, 10));
13077 return make_float16(f16_val
);
13080 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
13082 float_status
*fpst
= fpstp
;
13083 float32 f32
= float32_squash_input_denormal(input
, fpst
);
13084 uint32_t f32_val
= float32_val(f32
);
13085 bool f32_sign
= float32_is_neg(f32
);
13086 int f32_exp
= extract32(f32_val
, 23, 8);
13087 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
13090 if (float32_is_any_nan(f32
)) {
13092 if (float32_is_signaling_nan(f32
, fpst
)) {
13093 float_raise(float_flag_invalid
, fpst
);
13094 nan
= float32_silence_nan(f32
, fpst
);
13096 if (fpst
->default_nan_mode
) {
13097 nan
= float32_default_nan(fpst
);
13100 } else if (float32_is_infinity(f32
)) {
13101 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
13102 } else if (float32_is_zero(f32
)) {
13103 float_raise(float_flag_divbyzero
, fpst
);
13104 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
13105 } else if (float32_abs(f32
) < (1ULL << 21)) {
13106 /* Abs(value) < 2.0^-128 */
13107 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
13108 if (round_to_inf(fpst
, f32_sign
)) {
13109 return float32_set_sign(float32_infinity
, f32_sign
);
13111 return float32_set_sign(float32_maxnorm
, f32_sign
);
13113 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
13114 float_raise(float_flag_underflow
, fpst
);
13115 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
13118 f64_frac
= call_recip_estimate(&f32_exp
, 253,
13119 ((uint64_t) f32_frac
) << (52 - 23));
13121 /* result = sign : result_exp<7:0> : fraction<51:29> */
13122 f32_val
= deposit32(0, 31, 1, f32_sign
);
13123 f32_val
= deposit32(f32_val
, 23, 8, f32_exp
);
13124 f32_val
= deposit32(f32_val
, 0, 23, extract64(f64_frac
, 52 - 23, 23));
13125 return make_float32(f32_val
);
13128 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
13130 float_status
*fpst
= fpstp
;
13131 float64 f64
= float64_squash_input_denormal(input
, fpst
);
13132 uint64_t f64_val
= float64_val(f64
);
13133 bool f64_sign
= float64_is_neg(f64
);
13134 int f64_exp
= extract64(f64_val
, 52, 11);
13135 uint64_t f64_frac
= extract64(f64_val
, 0, 52);
13137 /* Deal with any special cases */
13138 if (float64_is_any_nan(f64
)) {
13140 if (float64_is_signaling_nan(f64
, fpst
)) {
13141 float_raise(float_flag_invalid
, fpst
);
13142 nan
= float64_silence_nan(f64
, fpst
);
13144 if (fpst
->default_nan_mode
) {
13145 nan
= float64_default_nan(fpst
);
13148 } else if (float64_is_infinity(f64
)) {
13149 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
13150 } else if (float64_is_zero(f64
)) {
13151 float_raise(float_flag_divbyzero
, fpst
);
13152 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
13153 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
13154 /* Abs(value) < 2.0^-1024 */
13155 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
13156 if (round_to_inf(fpst
, f64_sign
)) {
13157 return float64_set_sign(float64_infinity
, f64_sign
);
13159 return float64_set_sign(float64_maxnorm
, f64_sign
);
13161 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
13162 float_raise(float_flag_underflow
, fpst
);
13163 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
13166 f64_frac
= call_recip_estimate(&f64_exp
, 2045, f64_frac
);
13168 /* result = sign : result_exp<10:0> : fraction<51:0>; */
13169 f64_val
= deposit64(0, 63, 1, f64_sign
);
13170 f64_val
= deposit64(f64_val
, 52, 11, f64_exp
);
13171 f64_val
= deposit64(f64_val
, 0, 52, f64_frac
);
13172 return make_float64(f64_val
);
13175 /* The algorithm that must be used to calculate the estimate
13176 * is specified by the ARM ARM.
13179 static int do_recip_sqrt_estimate(int a
)
13183 assert(128 <= a
&& a
< 512);
13191 while (a
* (b
+ 1) * (b
+ 1) < (1 << 28)) {
13194 estimate
= (b
+ 1) / 2;
13195 assert(256 <= estimate
&& estimate
< 512);
13201 static uint64_t recip_sqrt_estimate(int *exp
, int exp_off
, uint64_t frac
)
13207 while (extract64(frac
, 51, 1) == 0) {
13211 frac
= extract64(frac
, 0, 51) << 1;
13215 /* scaled = UInt('01':fraction<51:45>) */
13216 scaled
= deposit32(1 << 7, 0, 7, extract64(frac
, 45, 7));
13218 /* scaled = UInt('1':fraction<51:44>) */
13219 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
13221 estimate
= do_recip_sqrt_estimate(scaled
);
13223 *exp
= (exp_off
- *exp
) / 2;
13224 return extract64(estimate
, 0, 8) << 44;
13227 uint32_t HELPER(rsqrte_f16
)(uint32_t input
, void *fpstp
)
13229 float_status
*s
= fpstp
;
13230 float16 f16
= float16_squash_input_denormal(input
, s
);
13231 uint16_t val
= float16_val(f16
);
13232 bool f16_sign
= float16_is_neg(f16
);
13233 int f16_exp
= extract32(val
, 10, 5);
13234 uint16_t f16_frac
= extract32(val
, 0, 10);
13237 if (float16_is_any_nan(f16
)) {
13239 if (float16_is_signaling_nan(f16
, s
)) {
13240 float_raise(float_flag_invalid
, s
);
13241 nan
= float16_silence_nan(f16
, s
);
13243 if (s
->default_nan_mode
) {
13244 nan
= float16_default_nan(s
);
13247 } else if (float16_is_zero(f16
)) {
13248 float_raise(float_flag_divbyzero
, s
);
13249 return float16_set_sign(float16_infinity
, f16_sign
);
13250 } else if (f16_sign
) {
13251 float_raise(float_flag_invalid
, s
);
13252 return float16_default_nan(s
);
13253 } else if (float16_is_infinity(f16
)) {
13254 return float16_zero
;
13257 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
13258 * preserving the parity of the exponent. */
13260 f64_frac
= ((uint64_t) f16_frac
) << (52 - 10);
13262 f64_frac
= recip_sqrt_estimate(&f16_exp
, 44, f64_frac
);
13264 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
13265 val
= deposit32(0, 15, 1, f16_sign
);
13266 val
= deposit32(val
, 10, 5, f16_exp
);
13267 val
= deposit32(val
, 2, 8, extract64(f64_frac
, 52 - 8, 8));
13268 return make_float16(val
);
13271 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
13273 float_status
*s
= fpstp
;
13274 float32 f32
= float32_squash_input_denormal(input
, s
);
13275 uint32_t val
= float32_val(f32
);
13276 uint32_t f32_sign
= float32_is_neg(f32
);
13277 int f32_exp
= extract32(val
, 23, 8);
13278 uint32_t f32_frac
= extract32(val
, 0, 23);
13281 if (float32_is_any_nan(f32
)) {
13283 if (float32_is_signaling_nan(f32
, s
)) {
13284 float_raise(float_flag_invalid
, s
);
13285 nan
= float32_silence_nan(f32
, s
);
13287 if (s
->default_nan_mode
) {
13288 nan
= float32_default_nan(s
);
13291 } else if (float32_is_zero(f32
)) {
13292 float_raise(float_flag_divbyzero
, s
);
13293 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
13294 } else if (float32_is_neg(f32
)) {
13295 float_raise(float_flag_invalid
, s
);
13296 return float32_default_nan(s
);
13297 } else if (float32_is_infinity(f32
)) {
13298 return float32_zero
;
13301 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
13302 * preserving the parity of the exponent. */
13304 f64_frac
= ((uint64_t) f32_frac
) << 29;
13306 f64_frac
= recip_sqrt_estimate(&f32_exp
, 380, f64_frac
);
13308 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
13309 val
= deposit32(0, 31, 1, f32_sign
);
13310 val
= deposit32(val
, 23, 8, f32_exp
);
13311 val
= deposit32(val
, 15, 8, extract64(f64_frac
, 52 - 8, 8));
13312 return make_float32(val
);
13315 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
13317 float_status
*s
= fpstp
;
13318 float64 f64
= float64_squash_input_denormal(input
, s
);
13319 uint64_t val
= float64_val(f64
);
13320 bool f64_sign
= float64_is_neg(f64
);
13321 int f64_exp
= extract64(val
, 52, 11);
13322 uint64_t f64_frac
= extract64(val
, 0, 52);
13324 if (float64_is_any_nan(f64
)) {
13326 if (float64_is_signaling_nan(f64
, s
)) {
13327 float_raise(float_flag_invalid
, s
);
13328 nan
= float64_silence_nan(f64
, s
);
13330 if (s
->default_nan_mode
) {
13331 nan
= float64_default_nan(s
);
13334 } else if (float64_is_zero(f64
)) {
13335 float_raise(float_flag_divbyzero
, s
);
13336 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
13337 } else if (float64_is_neg(f64
)) {
13338 float_raise(float_flag_invalid
, s
);
13339 return float64_default_nan(s
);
13340 } else if (float64_is_infinity(f64
)) {
13341 return float64_zero
;
13344 f64_frac
= recip_sqrt_estimate(&f64_exp
, 3068, f64_frac
);
13346 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
13347 val
= deposit64(0, 61, 1, f64_sign
);
13348 val
= deposit64(val
, 52, 11, f64_exp
);
13349 val
= deposit64(val
, 44, 8, extract64(f64_frac
, 52 - 8, 8));
13350 return make_float64(val
);
13353 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
13355 /* float_status *s = fpstp; */
13356 int input
, estimate
;
13358 if ((a
& 0x80000000) == 0) {
13362 input
= extract32(a
, 23, 9);
13363 estimate
= recip_estimate(input
);
13365 return deposit32(0, (32 - 9), 9, estimate
);
13368 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
13372 if ((a
& 0xc0000000) == 0) {
13376 estimate
= do_recip_sqrt_estimate(extract32(a
, 23, 9));
13378 return deposit32(0, 23, 9, estimate
);
13381 /* VFPv4 fused multiply-accumulate */
13382 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
13384 float_status
*fpst
= fpstp
;
13385 return float32_muladd(a
, b
, c
, 0, fpst
);
13388 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
13390 float_status
*fpst
= fpstp
;
13391 return float64_muladd(a
, b
, c
, 0, fpst
);
13394 /* ARMv8 round to integral */
13395 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
13397 return float32_round_to_int(x
, fp_status
);
13400 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
13402 return float64_round_to_int(x
, fp_status
);
13405 float32
HELPER(rints
)(float32 x
, void *fp_status
)
13407 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
13410 ret
= float32_round_to_int(x
, fp_status
);
13412 /* Suppress any inexact exceptions the conversion produced */
13413 if (!(old_flags
& float_flag_inexact
)) {
13414 new_flags
= get_float_exception_flags(fp_status
);
13415 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
13421 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
13423 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
13426 ret
= float64_round_to_int(x
, fp_status
);
13428 new_flags
= get_float_exception_flags(fp_status
);
13430 /* Suppress any inexact exceptions the conversion produced */
13431 if (!(old_flags
& float_flag_inexact
)) {
13432 new_flags
= get_float_exception_flags(fp_status
);
13433 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
13439 /* Convert ARM rounding mode to softfloat */
13440 int arm_rmode_to_sf(int rmode
)
13443 case FPROUNDING_TIEAWAY
:
13444 rmode
= float_round_ties_away
;
13446 case FPROUNDING_ODD
:
13447 /* FIXME: add support for TIEAWAY and ODD */
13448 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
13450 /* fall through for now */
13451 case FPROUNDING_TIEEVEN
:
13453 rmode
= float_round_nearest_even
;
13455 case FPROUNDING_POSINF
:
13456 rmode
= float_round_up
;
13458 case FPROUNDING_NEGINF
:
13459 rmode
= float_round_down
;
13461 case FPROUNDING_ZERO
:
13462 rmode
= float_round_to_zero
;
13469 * The upper bytes of val (above the number specified by 'bytes') must have
13470 * been zeroed out by the caller.
13472 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13476 stl_le_p(buf
, val
);
13478 /* zlib crc32 converts the accumulator and output to one's complement. */
13479 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
13482 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
13486 stl_le_p(buf
, val
);
13488 /* Linux crc32c converts the output to one's complement. */
13489 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
13492 /* Return the exception level to which FP-disabled exceptions should
13493 * be taken, or 0 if FP is enabled.
13495 int fp_exception_el(CPUARMState
*env
, int cur_el
)
13497 #ifndef CONFIG_USER_ONLY
13500 /* CPACR and the CPTR registers don't exist before v6, so FP is
13501 * always accessible
13503 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
13507 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
13508 * 0, 2 : trap EL0 and EL1/PL1 accesses
13509 * 1 : trap only EL0 accesses
13510 * 3 : trap no accesses
13512 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
13516 if (cur_el
== 0 || cur_el
== 1) {
13517 /* Trap to PL1, which might be EL1 or EL3 */
13518 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
13523 if (cur_el
== 3 && !is_a64(env
)) {
13524 /* Secure PL1 running at EL3 */
13537 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
13538 * check because zero bits in the registers mean "don't trap".
13541 /* CPTR_EL2 : present in v7VE or v8 */
13542 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
13543 && !arm_is_secure_below_el3(env
)) {
13544 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
13548 /* CPTR_EL3 : present in v8 */
13549 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
13550 /* Trap all FP ops to EL3 */
13557 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
13558 bool secstate
, bool priv
)
13560 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
13563 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
13566 if (armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
)) {
13567 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
13571 mmu_idx
|= ARM_MMU_IDX_M_S
;
13577 /* Return the MMU index for a v7M CPU in the specified security state */
13578 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
13580 bool priv
= arm_current_el(env
) != 0;
13582 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
13585 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13589 if (arm_feature(env
, ARM_FEATURE_M
)) {
13590 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13593 el
= arm_current_el(env
);
13594 if (el
< 2 && arm_is_secure_below_el3(env
)) {
13595 return ARMMMUIdx_S1SE0
+ el
;
13597 return ARMMMUIdx_S12NSE0
+ el
;
13601 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
13603 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
13606 #ifndef CONFIG_USER_ONLY
13607 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13609 return stage_1_mmu_idx(arm_mmu_idx(env
));
13613 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13614 target_ulong
*cs_base
, uint32_t *pflags
)
13616 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
13617 int current_el
= arm_current_el(env
);
13618 int fp_el
= fp_exception_el(env
, current_el
);
13619 uint32_t flags
= 0;
13622 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13625 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
13627 #ifndef CONFIG_USER_ONLY
13629 * Get control bits for tagged addresses. Note that the
13630 * translator only uses this for instruction addresses.
13633 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13634 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
13637 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13638 if (regime_el(env
, stage1
) < 2) {
13639 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
13640 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
13641 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
13644 tbii
= tbid
& !p0
.tbid
;
13647 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
13651 if (cpu_isar_feature(aa64_sve
, cpu
)) {
13652 int sve_el
= sve_exception_el(env
, current_el
);
13655 /* If SVE is disabled, but FP is enabled,
13656 * then the effective len is 0.
13658 if (sve_el
!= 0 && fp_el
== 0) {
13661 zcr_len
= sve_zcr_len_for_el(env
, current_el
);
13663 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
13664 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
13667 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
13669 * In order to save space in flags, we record only whether
13670 * pauth is "inactive", meaning all insns are implemented as
13671 * a nop, or "active" when some action must be performed.
13672 * The decision of which action to take is left to a helper.
13675 if (current_el
== 0) {
13676 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13677 sctlr
= env
->cp15
.sctlr_el
[1];
13679 sctlr
= env
->cp15
.sctlr_el
[current_el
];
13681 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13682 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
13686 *pc
= env
->regs
[15];
13687 flags
= FIELD_DP32(flags
, TBFLAG_A32
, THUMB
, env
->thumb
);
13688 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
, env
->vfp
.vec_len
);
13689 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
, env
->vfp
.vec_stride
);
13690 flags
= FIELD_DP32(flags
, TBFLAG_A32
, CONDEXEC
, env
->condexec_bits
);
13691 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, arm_sctlr_b(env
));
13692 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
13693 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
13694 || arm_el_is_aa64(env
, 1)) {
13695 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
13697 flags
= FIELD_DP32(flags
, TBFLAG_A32
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13700 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13702 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13703 * states defined in the ARM ARM for software singlestep:
13704 * SS_ACTIVE PSTATE.SS State
13705 * 0 x Inactive (the TB flag for SS is always 0)
13706 * 1 0 Active-pending
13707 * 1 1 Active-not-pending
13709 if (arm_singlestep_active(env
)) {
13710 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
13712 if (env
->pstate
& PSTATE_SS
) {
13713 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13716 if (env
->uncached_cpsr
& PSTATE_SS
) {
13717 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13721 if (arm_cpu_data_is_big_endian(env
)) {
13722 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
13724 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
13726 if (arm_v7m_is_handler_mode(env
)) {
13727 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HANDLER
, 1);
13730 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
13731 * suppressing them because the requested execution priority is less than 0.
13733 if (arm_feature(env
, ARM_FEATURE_V8
) &&
13734 arm_feature(env
, ARM_FEATURE_M
) &&
13735 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
13736 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
13737 flags
= FIELD_DP32(flags
, TBFLAG_A32
, STACKCHECK
, 1);
13744 #ifdef TARGET_AARCH64
13746 * The manual says that when SVE is enabled and VQ is widened the
13747 * implementation is allowed to zero the previously inaccessible
13748 * portion of the registers. The corollary to that is that when
13749 * SVE is enabled and VQ is narrowed we are also allowed to zero
13750 * the now inaccessible portion of the registers.
13752 * The intent of this is that no predicate bit beyond VQ is ever set.
13753 * Which means that some operations on predicate registers themselves
13754 * may operate on full uint64_t or even unrolled across the maximum
13755 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13756 * may well be cheaper than conditionals to restrict the operation
13757 * to the relevant portion of a uint16_t[16].
13759 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13764 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13765 assert(vq
<= arm_env_get_cpu(env
)->sve_max_vq
);
13767 /* Zap the high bits of the zregs. */
13768 for (i
= 0; i
< 32; i
++) {
13769 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13772 /* Zap the high bits of the pregs and ffr. */
13775 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13777 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13778 for (i
= 0; i
< 17; ++i
) {
13779 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13786 * Notice a change in SVE vector size when changing EL.
13788 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13789 int new_el
, bool el0_a64
)
13791 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13792 int old_len
, new_len
;
13793 bool old_a64
, new_a64
;
13795 /* Nothing to do if no SVE. */
13796 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13800 /* Nothing to do if FP is disabled in either EL. */
13801 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13806 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13807 * at ELx, or not available because the EL is in AArch32 state, then
13808 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13809 * has an effective value of 0".
13811 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13812 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13813 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13814 * we already have the correct register contents when encountering the
13815 * vq0->vq0 transition between EL0->EL1.
13817 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13818 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13819 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13820 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13821 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13822 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13824 /* When changing vector length, clear inaccessible state. */
13825 if (new_len
< old_len
) {
13826 aarch64_sve_narrow_vq(env
, new_len
+ 1);