1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/kvm.h"
20 #include "fpu/softfloat.h"
21 #include "qemu/range.h"
22 #include "qapi/qapi-commands-target.h"
24 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
26 #ifndef CONFIG_USER_ONLY
27 /* Cacheability and shareability attributes for a memory access */
28 typedef struct ARMCacheAttrs
{
29 unsigned int attrs
:8; /* as in the MAIR register encoding */
30 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
33 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
34 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
35 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
36 target_ulong
*page_size
,
37 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
39 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
40 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
41 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
42 target_ulong
*page_size_ptr
,
43 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
45 /* Security attributes for an address, as returned by v8m_security_lookup. */
46 typedef struct V8M_SAttributes
{
47 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
56 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
57 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
58 V8M_SAttributes
*sattrs
);
61 static void switch_mode(CPUARMState
*env
, int mode
);
63 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
67 /* VFP data registers are always little-endian. */
68 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
70 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
73 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
74 /* Aliases for Q regs. */
77 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
79 stq_le_p(buf
+ 8, q
[1]);
83 switch (reg
- nregs
) {
84 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
85 case 1: stl_p(buf
, vfp_get_fpscr(env
)); return 4;
86 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
91 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
95 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
97 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
100 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
103 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
104 q
[0] = ldq_le_p(buf
);
105 q
[1] = ldq_le_p(buf
+ 8);
109 switch (reg
- nregs
) {
110 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
111 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
112 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
117 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
121 /* 128 bit FP register */
123 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
125 stq_le_p(buf
+ 8, q
[1]);
130 stl_p(buf
, vfp_get_fpsr(env
));
134 stl_p(buf
, vfp_get_fpcr(env
));
141 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
145 /* 128 bit FP register */
147 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
148 q
[0] = ldq_le_p(buf
);
149 q
[1] = ldq_le_p(buf
+ 8);
154 vfp_set_fpsr(env
, ldl_p(buf
));
158 vfp_set_fpcr(env
, ldl_p(buf
));
165 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
167 assert(ri
->fieldoffset
);
168 if (cpreg_field_is_64bit(ri
)) {
169 return CPREG_FIELD64(env
, ri
);
171 return CPREG_FIELD32(env
, ri
);
175 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
178 assert(ri
->fieldoffset
);
179 if (cpreg_field_is_64bit(ri
)) {
180 CPREG_FIELD64(env
, ri
) = value
;
182 CPREG_FIELD32(env
, ri
) = value
;
186 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
188 return (char *)env
+ ri
->fieldoffset
;
191 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
193 /* Raw read of a coprocessor register (as needed for migration, etc). */
194 if (ri
->type
& ARM_CP_CONST
) {
195 return ri
->resetvalue
;
196 } else if (ri
->raw_readfn
) {
197 return ri
->raw_readfn(env
, ri
);
198 } else if (ri
->readfn
) {
199 return ri
->readfn(env
, ri
);
201 return raw_read(env
, ri
);
205 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
208 /* Raw write of a coprocessor register (as needed for migration, etc).
209 * Note that constant registers are treated as write-ignored; the
210 * caller should check for success by whether a readback gives the
213 if (ri
->type
& ARM_CP_CONST
) {
215 } else if (ri
->raw_writefn
) {
216 ri
->raw_writefn(env
, ri
, v
);
217 } else if (ri
->writefn
) {
218 ri
->writefn(env
, ri
, v
);
220 raw_write(env
, ri
, v
);
224 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
226 ARMCPU
*cpu
= arm_env_get_cpu(env
);
227 const ARMCPRegInfo
*ri
;
230 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
231 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
233 if (cpreg_field_is_64bit(ri
)) {
234 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
236 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
242 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
247 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
249 /* Return true if the regdef would cause an assertion if you called
250 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
251 * program bug for it not to have the NO_RAW flag).
252 * NB that returning false here doesn't necessarily mean that calling
253 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
254 * read/write access functions which are safe for raw use" from "has
255 * read/write access functions which have side effects but has forgotten
256 * to provide raw access functions".
257 * The tests here line up with the conditions in read/write_raw_cp_reg()
258 * and assertions in raw_read()/raw_write().
260 if ((ri
->type
& ARM_CP_CONST
) ||
262 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
268 bool write_cpustate_to_list(ARMCPU
*cpu
)
270 /* Write the coprocessor state from cpu->env to the (index,value) list. */
274 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
275 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
276 const ARMCPRegInfo
*ri
;
278 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
283 if (ri
->type
& ARM_CP_NO_RAW
) {
286 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
291 bool write_list_to_cpustate(ARMCPU
*cpu
)
296 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
297 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
298 uint64_t v
= cpu
->cpreg_values
[i
];
299 const ARMCPRegInfo
*ri
;
301 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
306 if (ri
->type
& ARM_CP_NO_RAW
) {
309 /* Write value and confirm it reads back as written
310 * (to catch read-only registers and partially read-only
311 * registers where the incoming migration value doesn't match)
313 write_raw_cp_reg(&cpu
->env
, ri
, v
);
314 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
321 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
323 ARMCPU
*cpu
= opaque
;
325 const ARMCPRegInfo
*ri
;
327 regidx
= *(uint32_t *)key
;
328 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
330 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
331 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
332 /* The value array need not be initialized at this point */
333 cpu
->cpreg_array_len
++;
337 static void count_cpreg(gpointer key
, gpointer opaque
)
339 ARMCPU
*cpu
= opaque
;
341 const ARMCPRegInfo
*ri
;
343 regidx
= *(uint32_t *)key
;
344 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
346 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
347 cpu
->cpreg_array_len
++;
351 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
353 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
354 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
365 void init_cpreg_list(ARMCPU
*cpu
)
367 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
368 * Note that we require cpreg_tuples[] to be sorted by key ID.
373 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
374 keys
= g_list_sort(keys
, cpreg_key_compare
);
376 cpu
->cpreg_array_len
= 0;
378 g_list_foreach(keys
, count_cpreg
, cpu
);
380 arraylen
= cpu
->cpreg_array_len
;
381 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
382 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
383 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
384 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
385 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
386 cpu
->cpreg_array_len
= 0;
388 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
390 assert(cpu
->cpreg_array_len
== arraylen
);
396 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
397 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
399 * access_el3_aa32ns: Used to check AArch32 register views.
400 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
402 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
403 const ARMCPRegInfo
*ri
,
406 bool secure
= arm_is_secure_below_el3(env
);
408 assert(!arm_el_is_aa64(env
, 3));
410 return CP_ACCESS_TRAP_UNCATEGORIZED
;
415 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
416 const ARMCPRegInfo
*ri
,
419 if (!arm_el_is_aa64(env
, 3)) {
420 return access_el3_aa32ns(env
, ri
, isread
);
425 /* Some secure-only AArch32 registers trap to EL3 if used from
426 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
427 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
428 * We assume that the .access field is set to PL1_RW.
430 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
431 const ARMCPRegInfo
*ri
,
434 if (arm_current_el(env
) == 3) {
437 if (arm_is_secure_below_el3(env
)) {
438 return CP_ACCESS_TRAP_EL3
;
440 /* This will be EL1 NS and EL2 NS, which just UNDEF */
441 return CP_ACCESS_TRAP_UNCATEGORIZED
;
444 /* Check for traps to "powerdown debug" registers, which are controlled
447 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
450 int el
= arm_current_el(env
);
451 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
452 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
453 (arm_hcr_el2_eff(env
) & HCR_TGE
);
455 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
456 return CP_ACCESS_TRAP_EL2
;
458 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
459 return CP_ACCESS_TRAP_EL3
;
464 /* Check for traps to "debug ROM" registers, which are controlled
465 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
467 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
470 int el
= arm_current_el(env
);
471 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
472 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
473 (arm_hcr_el2_eff(env
) & HCR_TGE
);
475 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
476 return CP_ACCESS_TRAP_EL2
;
478 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
479 return CP_ACCESS_TRAP_EL3
;
484 /* Check for traps to general debug registers, which are controlled
485 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
487 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
490 int el
= arm_current_el(env
);
491 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
492 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
493 (arm_hcr_el2_eff(env
) & HCR_TGE
);
495 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
496 return CP_ACCESS_TRAP_EL2
;
498 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
499 return CP_ACCESS_TRAP_EL3
;
504 /* Check for traps to performance monitor registers, which are controlled
505 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
507 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
510 int el
= arm_current_el(env
);
512 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
513 && !arm_is_secure_below_el3(env
)) {
514 return CP_ACCESS_TRAP_EL2
;
516 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
517 return CP_ACCESS_TRAP_EL3
;
522 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
524 ARMCPU
*cpu
= arm_env_get_cpu(env
);
526 raw_write(env
, ri
, value
);
527 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
530 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
532 ARMCPU
*cpu
= arm_env_get_cpu(env
);
534 if (raw_read(env
, ri
) != value
) {
535 /* Unlike real hardware the qemu TLB uses virtual addresses,
536 * not modified virtual addresses, so this causes a TLB flush.
539 raw_write(env
, ri
, value
);
543 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
546 ARMCPU
*cpu
= arm_env_get_cpu(env
);
548 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
549 && !extended_addresses_enabled(env
)) {
550 /* For VMSA (when not using the LPAE long descriptor page table
551 * format) this register includes the ASID, so do a TLB flush.
552 * For PMSA it is purely a process ID and no action is needed.
556 raw_write(env
, ri
, value
);
559 /* IS variants of TLB operations must affect all cores */
560 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
563 CPUState
*cs
= ENV_GET_CPU(env
);
565 tlb_flush_all_cpus_synced(cs
);
568 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
571 CPUState
*cs
= ENV_GET_CPU(env
);
573 tlb_flush_all_cpus_synced(cs
);
576 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
579 CPUState
*cs
= ENV_GET_CPU(env
);
581 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
584 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
587 CPUState
*cs
= ENV_GET_CPU(env
);
589 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
593 * Non-IS variants of TLB operations are upgraded to
594 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
595 * force broadcast of these operations.
597 static bool tlb_force_broadcast(CPUARMState
*env
)
599 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
600 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
603 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
606 /* Invalidate all (TLBIALL) */
607 ARMCPU
*cpu
= arm_env_get_cpu(env
);
609 if (tlb_force_broadcast(env
)) {
610 tlbiall_is_write(env
, NULL
, value
);
617 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
620 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
621 ARMCPU
*cpu
= arm_env_get_cpu(env
);
623 if (tlb_force_broadcast(env
)) {
624 tlbimva_is_write(env
, NULL
, value
);
628 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
631 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
634 /* Invalidate by ASID (TLBIASID) */
635 ARMCPU
*cpu
= arm_env_get_cpu(env
);
637 if (tlb_force_broadcast(env
)) {
638 tlbiasid_is_write(env
, NULL
, value
);
645 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
648 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
649 ARMCPU
*cpu
= arm_env_get_cpu(env
);
651 if (tlb_force_broadcast(env
)) {
652 tlbimvaa_is_write(env
, NULL
, value
);
656 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
659 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
662 CPUState
*cs
= ENV_GET_CPU(env
);
664 tlb_flush_by_mmuidx(cs
,
665 ARMMMUIdxBit_S12NSE1
|
666 ARMMMUIdxBit_S12NSE0
|
670 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
673 CPUState
*cs
= ENV_GET_CPU(env
);
675 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
676 ARMMMUIdxBit_S12NSE1
|
677 ARMMMUIdxBit_S12NSE0
|
681 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
684 /* Invalidate by IPA. This has to invalidate any structures that
685 * contain only stage 2 translation information, but does not need
686 * to apply to structures that contain combined stage 1 and stage 2
687 * translation information.
688 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
690 CPUState
*cs
= ENV_GET_CPU(env
);
693 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
697 pageaddr
= sextract64(value
<< 12, 0, 40);
699 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
702 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
705 CPUState
*cs
= ENV_GET_CPU(env
);
708 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
712 pageaddr
= sextract64(value
<< 12, 0, 40);
714 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
718 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
721 CPUState
*cs
= ENV_GET_CPU(env
);
723 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
726 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
729 CPUState
*cs
= ENV_GET_CPU(env
);
731 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
734 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
737 CPUState
*cs
= ENV_GET_CPU(env
);
738 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
740 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
743 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
746 CPUState
*cs
= ENV_GET_CPU(env
);
747 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
749 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
753 static const ARMCPRegInfo cp_reginfo
[] = {
754 /* Define the secure and non-secure FCSE identifier CP registers
755 * separately because there is no secure bank in V8 (no _EL3). This allows
756 * the secure register to be properly reset and migrated. There is also no
757 * v8 EL1 version of the register so the non-secure instance stands alone.
760 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
761 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
762 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
763 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
764 { .name
= "FCSEIDR_S",
765 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
766 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
767 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
768 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
769 /* Define the secure and non-secure context identifier CP registers
770 * separately because there is no secure bank in V8 (no _EL3). This allows
771 * the secure register to be properly reset and migrated. In the
772 * non-secure case, the 32-bit register will have reset and migration
773 * disabled during registration as it is handled by the 64-bit instance.
775 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
776 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
777 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
778 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
779 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
780 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
781 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
782 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
783 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
784 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
788 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
789 /* NB: Some of these registers exist in v8 but with more precise
790 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
792 /* MMU Domain access control / MPU write buffer control */
794 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
795 .access
= PL1_RW
, .resetvalue
= 0,
796 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
797 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
798 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
799 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
800 * For v6 and v5, these mappings are overly broad.
802 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
803 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
804 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
805 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
806 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
807 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
808 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
809 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
810 /* Cache maintenance ops; some of this space may be overridden later. */
811 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
812 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
813 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
817 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
818 /* Not all pre-v6 cores implemented this WFI, so this is slightly
821 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
822 .access
= PL1_W
, .type
= ARM_CP_WFI
},
826 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
827 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
828 * is UNPREDICTABLE; we choose to NOP as most implementations do).
830 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
831 .access
= PL1_W
, .type
= ARM_CP_WFI
},
832 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
833 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
834 * OMAPCP will override this space.
836 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
837 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
839 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
840 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
842 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
843 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
844 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
846 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
847 * implementing it as RAZ means the "debug architecture version" bits
848 * will read as a reserved value, which should cause Linux to not try
849 * to use the debug hardware.
851 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
852 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
853 /* MMU TLB control. Note that the wildcarding means we cover not just
854 * the unified TLB ops but also the dside/iside/inner-shareable variants.
856 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
857 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
858 .type
= ARM_CP_NO_RAW
},
859 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
860 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
861 .type
= ARM_CP_NO_RAW
},
862 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
863 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
864 .type
= ARM_CP_NO_RAW
},
865 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
866 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
867 .type
= ARM_CP_NO_RAW
},
868 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
869 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
870 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
871 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
875 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
880 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
881 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
882 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
883 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
884 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
886 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
887 /* VFP coprocessor: cp10 & cp11 [23:20] */
888 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
890 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
891 /* ASEDIS [31] bit is RAO/WI */
895 /* VFPv3 and upwards with NEON implement 32 double precision
896 * registers (D0-D31).
898 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
899 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
900 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
906 env
->cp15
.cpacr_el1
= value
;
909 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
911 /* Call cpacr_write() so that we reset with the correct RAO bits set
912 * for our CPU features.
914 cpacr_write(env
, ri
, 0);
917 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
920 if (arm_feature(env
, ARM_FEATURE_V8
)) {
921 /* Check if CPACR accesses are to be trapped to EL2 */
922 if (arm_current_el(env
) == 1 &&
923 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
924 return CP_ACCESS_TRAP_EL2
;
925 /* Check if CPACR accesses are to be trapped to EL3 */
926 } else if (arm_current_el(env
) < 3 &&
927 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
928 return CP_ACCESS_TRAP_EL3
;
935 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
938 /* Check if CPTR accesses are set to trap to EL3 */
939 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
940 return CP_ACCESS_TRAP_EL3
;
946 static const ARMCPRegInfo v6_cp_reginfo
[] = {
947 /* prefetch by MVA in v6, NOP in v7 */
948 { .name
= "MVA_prefetch",
949 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
950 .access
= PL1_W
, .type
= ARM_CP_NOP
},
951 /* We need to break the TB after ISB to execute self-modifying code
952 * correctly and also to take any pending interrupts immediately.
953 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
955 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
956 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
957 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
958 .access
= PL0_W
, .type
= ARM_CP_NOP
},
959 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
960 .access
= PL0_W
, .type
= ARM_CP_NOP
},
961 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
963 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
964 offsetof(CPUARMState
, cp15
.ifar_ns
) },
966 /* Watchpoint Fault Address Register : should actually only be present
967 * for 1136, 1176, 11MPCore.
969 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
970 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
971 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
972 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
973 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
974 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
978 /* Definitions for the PMU registers */
979 #define PMCRN_MASK 0xf800
980 #define PMCRN_SHIFT 11
988 #define PMXEVTYPER_P 0x80000000
989 #define PMXEVTYPER_U 0x40000000
990 #define PMXEVTYPER_NSK 0x20000000
991 #define PMXEVTYPER_NSU 0x10000000
992 #define PMXEVTYPER_NSH 0x08000000
993 #define PMXEVTYPER_M 0x04000000
994 #define PMXEVTYPER_MT 0x02000000
995 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
996 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
997 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
998 PMXEVTYPER_M | PMXEVTYPER_MT | \
1001 #define PMCCFILTR 0xf8000000
1002 #define PMCCFILTR_M PMXEVTYPER_M
1003 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1005 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1007 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1010 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1011 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1013 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1016 typedef struct pm_event
{
1017 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1018 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1019 bool (*supported
)(CPUARMState
*);
1021 * Retrieve the current count of the underlying event. The programmed
1022 * counters hold a difference from the return value from this function
1024 uint64_t (*get_count
)(CPUARMState
*);
1026 * Return how many nanoseconds it will take (at a minimum) for count events
1027 * to occur. A negative value indicates the counter will never overflow, or
1028 * that the counter has otherwise arranged for the overflow bit to be set
1029 * and the PMU interrupt to be raised on overflow.
1031 int64_t (*ns_per_count
)(uint64_t);
1034 static bool event_always_supported(CPUARMState
*env
)
1039 static uint64_t swinc_get_count(CPUARMState
*env
)
1042 * SW_INCR events are written directly to the pmevcntr's by writes to
1043 * PMSWINC, so there is no underlying count maintained by the PMU itself
1048 static int64_t swinc_ns_per(uint64_t ignored
)
1054 * Return the underlying cycle count for the PMU cycle counters. If we're in
1055 * usermode, simply return 0.
1057 static uint64_t cycles_get_count(CPUARMState
*env
)
1059 #ifndef CONFIG_USER_ONLY
1060 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1061 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1063 return cpu_get_host_ticks();
1067 #ifndef CONFIG_USER_ONLY
1068 static int64_t cycles_ns_per(uint64_t cycles
)
1070 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1073 static bool instructions_supported(CPUARMState
*env
)
1075 return use_icount
== 1 /* Precise instruction counting */;
1078 static uint64_t instructions_get_count(CPUARMState
*env
)
1080 return (uint64_t)cpu_get_icount_raw();
1083 static int64_t instructions_ns_per(uint64_t icount
)
1085 return cpu_icount_to_ns((int64_t)icount
);
1089 static const pm_event pm_events
[] = {
1090 { .number
= 0x000, /* SW_INCR */
1091 .supported
= event_always_supported
,
1092 .get_count
= swinc_get_count
,
1093 .ns_per_count
= swinc_ns_per
,
1095 #ifndef CONFIG_USER_ONLY
1096 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1097 .supported
= instructions_supported
,
1098 .get_count
= instructions_get_count
,
1099 .ns_per_count
= instructions_ns_per
,
1101 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1102 .supported
= event_always_supported
,
1103 .get_count
= cycles_get_count
,
1104 .ns_per_count
= cycles_ns_per
,
1110 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1111 * events (i.e. the statistical profiling extension), this implementation
1112 * should first be updated to something sparse instead of the current
1113 * supported_event_map[] array.
1115 #define MAX_EVENT_ID 0x11
1116 #define UNSUPPORTED_EVENT UINT16_MAX
1117 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1120 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1121 * of ARM event numbers to indices in our pm_events array.
1123 * Note: Events in the 0x40XX range are not currently supported.
1125 void pmu_init(ARMCPU
*cpu
)
1130 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1133 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1134 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1139 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1140 const pm_event
*cnt
= &pm_events
[i
];
1141 assert(cnt
->number
<= MAX_EVENT_ID
);
1142 /* We do not currently support events in the 0x40xx range */
1143 assert(cnt
->number
<= 0x3f);
1145 if (cnt
->supported(&cpu
->env
)) {
1146 supported_event_map
[cnt
->number
] = i
;
1147 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1148 if (cnt
->number
& 0x20) {
1149 cpu
->pmceid1
|= event_mask
;
1151 cpu
->pmceid0
|= event_mask
;
1158 * Check at runtime whether a PMU event is supported for the current machine
1160 static bool event_supported(uint16_t number
)
1162 if (number
> MAX_EVENT_ID
) {
1165 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1168 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1171 /* Performance monitor registers user accessibility is controlled
1172 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1173 * trapping to EL2 or EL3 for other accesses.
1175 int el
= arm_current_el(env
);
1177 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1178 return CP_ACCESS_TRAP
;
1180 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1181 && !arm_is_secure_below_el3(env
)) {
1182 return CP_ACCESS_TRAP_EL2
;
1184 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1185 return CP_ACCESS_TRAP_EL3
;
1188 return CP_ACCESS_OK
;
1191 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1192 const ARMCPRegInfo
*ri
,
1195 /* ER: event counter read trap control */
1196 if (arm_feature(env
, ARM_FEATURE_V8
)
1197 && arm_current_el(env
) == 0
1198 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1200 return CP_ACCESS_OK
;
1203 return pmreg_access(env
, ri
, isread
);
1206 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1207 const ARMCPRegInfo
*ri
,
1210 /* SW: software increment write trap control */
1211 if (arm_feature(env
, ARM_FEATURE_V8
)
1212 && arm_current_el(env
) == 0
1213 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1215 return CP_ACCESS_OK
;
1218 return pmreg_access(env
, ri
, isread
);
1221 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1222 const ARMCPRegInfo
*ri
,
1225 /* ER: event counter read trap control */
1226 if (arm_feature(env
, ARM_FEATURE_V8
)
1227 && arm_current_el(env
) == 0
1228 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1229 return CP_ACCESS_OK
;
1232 return pmreg_access(env
, ri
, isread
);
1235 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1236 const ARMCPRegInfo
*ri
,
1239 /* CR: cycle counter read trap control */
1240 if (arm_feature(env
, ARM_FEATURE_V8
)
1241 && arm_current_el(env
) == 0
1242 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1244 return CP_ACCESS_OK
;
1247 return pmreg_access(env
, ri
, isread
);
1250 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1251 * the current EL, security state, and register configuration.
1253 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1256 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1257 bool enabled
, prohibited
, filtered
;
1258 bool secure
= arm_is_secure(env
);
1259 int el
= arm_current_el(env
);
1260 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1262 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1263 (counter
< hpmn
|| counter
== 31)) {
1264 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1266 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1268 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1271 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1272 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1277 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1278 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1281 if (prohibited
&& counter
== 31) {
1282 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1285 if (counter
== 31) {
1286 filter
= env
->cp15
.pmccfiltr_el0
;
1288 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1291 p
= filter
& PMXEVTYPER_P
;
1292 u
= filter
& PMXEVTYPER_U
;
1293 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1294 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1295 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1296 m
= arm_el_is_aa64(env
, 1) &&
1297 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1300 filtered
= secure
? u
: u
!= nsu
;
1301 } else if (el
== 1) {
1302 filtered
= secure
? p
: p
!= nsk
;
1303 } else if (el
== 2) {
1309 if (counter
!= 31) {
1311 * If not checking PMCCNTR, ensure the counter is setup to an event we
1314 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1315 if (!event_supported(event
)) {
1320 return enabled
&& !prohibited
&& !filtered
;
1323 static void pmu_update_irq(CPUARMState
*env
)
1325 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1326 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1327 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1331 * Ensure c15_ccnt is the guest-visible count so that operations such as
1332 * enabling/disabling the counter or filtering, modifying the count itself,
1333 * etc. can be done logically. This is essentially a no-op if the counter is
1334 * not enabled at the time of the call.
1336 void pmccntr_op_start(CPUARMState
*env
)
1338 uint64_t cycles
= cycles_get_count(env
);
1340 if (pmu_counter_enabled(env
, 31)) {
1341 uint64_t eff_cycles
= cycles
;
1342 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1343 /* Increment once every 64 processor clock cycles */
1347 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1349 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1350 1ull << 63 : 1ull << 31;
1351 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1352 env
->cp15
.c9_pmovsr
|= (1 << 31);
1353 pmu_update_irq(env
);
1356 env
->cp15
.c15_ccnt
= new_pmccntr
;
1358 env
->cp15
.c15_ccnt_delta
= cycles
;
1362 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1363 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1366 void pmccntr_op_finish(CPUARMState
*env
)
1368 if (pmu_counter_enabled(env
, 31)) {
1369 #ifndef CONFIG_USER_ONLY
1370 /* Calculate when the counter will next overflow */
1371 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1372 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1373 remaining_cycles
= (uint32_t)remaining_cycles
;
1375 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1377 if (overflow_in
> 0) {
1378 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1380 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1381 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1385 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1386 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1387 /* Increment once every 64 processor clock cycles */
1390 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1394 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1397 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1399 if (event_supported(event
)) {
1400 uint16_t event_idx
= supported_event_map
[event
];
1401 count
= pm_events
[event_idx
].get_count(env
);
1404 if (pmu_counter_enabled(env
, counter
)) {
1405 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1407 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1408 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1409 pmu_update_irq(env
);
1411 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1413 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1416 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1418 if (pmu_counter_enabled(env
, counter
)) {
1419 #ifndef CONFIG_USER_ONLY
1420 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1421 uint16_t event_idx
= supported_event_map
[event
];
1422 uint64_t delta
= UINT32_MAX
-
1423 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1424 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1426 if (overflow_in
> 0) {
1427 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1429 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1430 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1434 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1435 env
->cp15
.c14_pmevcntr
[counter
];
1439 void pmu_op_start(CPUARMState
*env
)
1442 pmccntr_op_start(env
);
1443 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1444 pmevcntr_op_start(env
, i
);
1448 void pmu_op_finish(CPUARMState
*env
)
1451 pmccntr_op_finish(env
);
1452 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1453 pmevcntr_op_finish(env
, i
);
1457 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1459 pmu_op_start(&cpu
->env
);
1462 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1464 pmu_op_finish(&cpu
->env
);
1467 void arm_pmu_timer_cb(void *opaque
)
1469 ARMCPU
*cpu
= opaque
;
1472 * Update all the counter values based on the current underlying counts,
1473 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1474 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1475 * counter may expire.
1477 pmu_op_start(&cpu
->env
);
1478 pmu_op_finish(&cpu
->env
);
1481 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1486 if (value
& PMCRC
) {
1487 /* The counter has been reset */
1488 env
->cp15
.c15_ccnt
= 0;
1491 if (value
& PMCRP
) {
1493 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1494 env
->cp15
.c14_pmevcntr
[i
] = 0;
1498 /* only the DP, X, D and E bits are writable */
1499 env
->cp15
.c9_pmcr
&= ~0x39;
1500 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1505 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1509 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1510 /* Increment a counter's count iff: */
1511 if ((value
& (1 << i
)) && /* counter's bit is set */
1512 /* counter is enabled and not filtered */
1513 pmu_counter_enabled(env
, i
) &&
1514 /* counter is SW_INCR */
1515 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1516 pmevcntr_op_start(env
, i
);
1519 * Detect if this write causes an overflow since we can't predict
1520 * PMSWINC overflows like we can for other events
1522 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1524 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1525 env
->cp15
.c9_pmovsr
|= (1 << i
);
1526 pmu_update_irq(env
);
1529 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1531 pmevcntr_op_finish(env
, i
);
1536 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1539 pmccntr_op_start(env
);
1540 ret
= env
->cp15
.c15_ccnt
;
1541 pmccntr_op_finish(env
);
1545 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1548 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1549 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1550 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1553 env
->cp15
.c9_pmselr
= value
& 0x1f;
1556 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1559 pmccntr_op_start(env
);
1560 env
->cp15
.c15_ccnt
= value
;
1561 pmccntr_op_finish(env
);
1564 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1567 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1569 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1572 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1575 pmccntr_op_start(env
);
1576 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1577 pmccntr_op_finish(env
);
1580 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1583 pmccntr_op_start(env
);
1584 /* M is not accessible from AArch32 */
1585 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1586 (value
& PMCCFILTR
);
1587 pmccntr_op_finish(env
);
1590 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1592 /* M is not visible in AArch32 */
1593 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1596 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1599 value
&= pmu_counter_mask(env
);
1600 env
->cp15
.c9_pmcnten
|= value
;
1603 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1606 value
&= pmu_counter_mask(env
);
1607 env
->cp15
.c9_pmcnten
&= ~value
;
1610 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1613 value
&= pmu_counter_mask(env
);
1614 env
->cp15
.c9_pmovsr
&= ~value
;
1615 pmu_update_irq(env
);
1618 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1621 value
&= pmu_counter_mask(env
);
1622 env
->cp15
.c9_pmovsr
|= value
;
1623 pmu_update_irq(env
);
1626 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1627 uint64_t value
, const uint8_t counter
)
1629 if (counter
== 31) {
1630 pmccfiltr_write(env
, ri
, value
);
1631 } else if (counter
< pmu_num_counters(env
)) {
1632 pmevcntr_op_start(env
, counter
);
1635 * If this counter's event type is changing, store the current
1636 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1637 * pmevcntr_op_finish has the correct baseline when it converts back to
1640 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1641 PMXEVTYPER_EVTCOUNT
;
1642 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1643 if (old_event
!= new_event
) {
1645 if (event_supported(new_event
)) {
1646 uint16_t event_idx
= supported_event_map
[new_event
];
1647 count
= pm_events
[event_idx
].get_count(env
);
1649 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1652 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1653 pmevcntr_op_finish(env
, counter
);
1655 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1656 * PMSELR value is equal to or greater than the number of implemented
1657 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1661 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1662 const uint8_t counter
)
1664 if (counter
== 31) {
1665 return env
->cp15
.pmccfiltr_el0
;
1666 } else if (counter
< pmu_num_counters(env
)) {
1667 return env
->cp15
.c14_pmevtyper
[counter
];
1670 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1671 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1677 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1680 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1681 pmevtyper_write(env
, ri
, value
, counter
);
1684 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1687 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1688 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1691 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1692 * pmu_op_finish calls when loading saved state for a migration. Because
1693 * we're potentially updating the type of event here, the value written to
1694 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1695 * different counter type. Therefore, we need to set this value to the
1696 * current count for the counter type we're writing so that pmu_op_finish
1697 * has the correct count for its calculation.
1699 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1700 if (event_supported(event
)) {
1701 uint16_t event_idx
= supported_event_map
[event
];
1702 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1703 pm_events
[event_idx
].get_count(env
);
1707 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1709 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1710 return pmevtyper_read(env
, ri
, counter
);
1713 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1716 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1719 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1721 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1724 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1725 uint64_t value
, uint8_t counter
)
1727 if (counter
< pmu_num_counters(env
)) {
1728 pmevcntr_op_start(env
, counter
);
1729 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1730 pmevcntr_op_finish(env
, counter
);
1733 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1734 * are CONSTRAINED UNPREDICTABLE.
1738 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1741 if (counter
< pmu_num_counters(env
)) {
1743 pmevcntr_op_start(env
, counter
);
1744 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1745 pmevcntr_op_finish(env
, counter
);
1748 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1749 * are CONSTRAINED UNPREDICTABLE. */
1754 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1757 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1758 pmevcntr_write(env
, ri
, value
, counter
);
1761 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1763 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1764 return pmevcntr_read(env
, ri
, counter
);
1767 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1770 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1771 assert(counter
< pmu_num_counters(env
));
1772 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1773 pmevcntr_write(env
, ri
, value
, counter
);
1776 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1778 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1779 assert(counter
< pmu_num_counters(env
));
1780 return env
->cp15
.c14_pmevcntr
[counter
];
1783 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1786 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1789 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1791 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1794 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1797 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1798 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1800 env
->cp15
.c9_pmuserenr
= value
& 1;
1804 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1807 /* We have no event counters so only the C bit can be changed */
1808 value
&= pmu_counter_mask(env
);
1809 env
->cp15
.c9_pminten
|= value
;
1810 pmu_update_irq(env
);
1813 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1816 value
&= pmu_counter_mask(env
);
1817 env
->cp15
.c9_pminten
&= ~value
;
1818 pmu_update_irq(env
);
1821 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1824 /* Note that even though the AArch64 view of this register has bits
1825 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1826 * architectural requirements for bits which are RES0 only in some
1827 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1828 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1830 raw_write(env
, ri
, value
& ~0x1FULL
);
1833 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1835 /* Begin with base v8.0 state. */
1836 uint32_t valid_mask
= 0x3fff;
1837 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1839 if (arm_el_is_aa64(env
, 3)) {
1840 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1841 valid_mask
&= ~SCR_NET
;
1843 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1846 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1847 valid_mask
&= ~SCR_HCE
;
1849 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1850 * supported if EL2 exists. The bit is UNK/SBZP when
1851 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1852 * when EL2 is unavailable.
1853 * On ARMv8, this bit is always available.
1855 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1856 !arm_feature(env
, ARM_FEATURE_V8
)) {
1857 valid_mask
&= ~SCR_SMD
;
1860 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1861 valid_mask
|= SCR_TLOR
;
1863 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1864 valid_mask
|= SCR_API
| SCR_APK
;
1867 /* Clear all-context RES0 bits. */
1868 value
&= valid_mask
;
1869 raw_write(env
, ri
, value
);
1872 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1874 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1876 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1879 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1880 ri
->secure
& ARM_CP_SECSTATE_S
);
1882 return cpu
->ccsidr
[index
];
1885 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1888 raw_write(env
, ri
, value
& 0xf);
1891 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1893 CPUState
*cs
= ENV_GET_CPU(env
);
1894 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1897 if (hcr_el2
& HCR_IMO
) {
1898 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1902 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1907 if (hcr_el2
& HCR_FMO
) {
1908 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1912 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1917 /* External aborts are not possible in QEMU so A bit is always clear */
1921 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1922 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1923 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1924 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1925 /* Performance monitors are implementation defined in v7,
1926 * but with an ARM recommended set of registers, which we
1929 * Performance registers fall into three categories:
1930 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1931 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1932 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1933 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1934 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1936 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1937 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1938 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1939 .writefn
= pmcntenset_write
,
1940 .accessfn
= pmreg_access
,
1941 .raw_writefn
= raw_write
},
1942 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1943 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1944 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1945 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1946 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1947 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1949 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1950 .accessfn
= pmreg_access
,
1951 .writefn
= pmcntenclr_write
,
1952 .type
= ARM_CP_ALIAS
},
1953 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1954 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1955 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1956 .type
= ARM_CP_ALIAS
,
1957 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1958 .writefn
= pmcntenclr_write
},
1959 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1960 .access
= PL0_RW
, .type
= ARM_CP_IO
,
1961 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1962 .accessfn
= pmreg_access
,
1963 .writefn
= pmovsr_write
,
1964 .raw_writefn
= raw_write
},
1965 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1966 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1967 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1968 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1969 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1970 .writefn
= pmovsr_write
,
1971 .raw_writefn
= raw_write
},
1972 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1973 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1974 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1975 .writefn
= pmswinc_write
},
1976 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
1977 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
1978 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1979 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1980 .writefn
= pmswinc_write
},
1981 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1982 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1983 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1984 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1985 .raw_writefn
= raw_write
},
1986 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1987 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1988 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1989 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1990 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1991 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1992 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1993 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1994 .accessfn
= pmreg_access_ccntr
},
1995 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1996 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1997 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1999 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2000 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2001 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2002 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2003 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2004 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2005 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2007 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2008 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2009 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2010 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2012 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2014 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2015 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2016 .accessfn
= pmreg_access
,
2017 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2018 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2019 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2020 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2021 .accessfn
= pmreg_access
,
2022 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2023 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2024 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2025 .accessfn
= pmreg_access_xevcntr
,
2026 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2027 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2028 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2029 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2030 .accessfn
= pmreg_access_xevcntr
,
2031 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2032 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2033 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2034 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2036 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2037 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2038 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2039 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2040 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2042 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2043 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2044 .access
= PL1_RW
, .accessfn
= access_tpm
,
2045 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2046 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2048 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2049 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2050 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2051 .access
= PL1_RW
, .accessfn
= access_tpm
,
2053 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2054 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2055 .resetvalue
= 0x0 },
2056 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2057 .access
= PL1_RW
, .accessfn
= access_tpm
,
2058 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2059 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2060 .writefn
= pmintenclr_write
, },
2061 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2062 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2063 .access
= PL1_RW
, .accessfn
= access_tpm
,
2064 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2065 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2066 .writefn
= pmintenclr_write
},
2067 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2068 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2069 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2070 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2071 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2072 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
2073 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2074 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2075 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2076 * just RAZ for all cores:
2078 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2079 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2080 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2081 /* Auxiliary fault status registers: these also are IMPDEF, and we
2082 * choose to RAZ/WI for all cores.
2084 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2085 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2086 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2087 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2088 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2089 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2090 /* MAIR can just read-as-written because we don't implement caches
2091 * and so don't need to care about memory attributes.
2093 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2094 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2095 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2097 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2098 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2099 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2101 /* For non-long-descriptor page tables these are PRRR and NMRR;
2102 * regardless they still act as reads-as-written for QEMU.
2104 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2105 * allows them to assign the correct fieldoffset based on the endianness
2106 * handled in the field definitions.
2108 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2109 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
2110 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2111 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2112 .resetfn
= arm_cp_reset_ignore
},
2113 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2114 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2115 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2116 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2117 .resetfn
= arm_cp_reset_ignore
},
2118 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2119 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2120 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2121 /* 32 bit ITLB invalidates */
2122 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2123 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2124 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2125 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2126 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2127 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2128 /* 32 bit DTLB invalidates */
2129 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2130 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2131 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2132 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2133 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2134 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2135 /* 32 bit TLB invalidates */
2136 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2137 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2138 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2139 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2140 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2141 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2142 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2143 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2147 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2148 /* 32 bit TLB invalidates, Inner Shareable */
2149 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2150 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2151 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2152 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2153 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2154 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2155 .writefn
= tlbiasid_is_write
},
2156 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2157 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2158 .writefn
= tlbimvaa_is_write
},
2162 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2163 /* PMOVSSET is not implemented in v7 before v7ve */
2164 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2165 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2166 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2167 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2168 .writefn
= pmovsset_write
,
2169 .raw_writefn
= raw_write
},
2170 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2171 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2172 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2173 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2174 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2175 .writefn
= pmovsset_write
,
2176 .raw_writefn
= raw_write
},
2180 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2187 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2190 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2191 return CP_ACCESS_TRAP
;
2193 return CP_ACCESS_OK
;
2196 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2197 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2198 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2200 .writefn
= teecr_write
},
2201 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2202 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2203 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2207 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2208 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2209 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2211 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2212 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2214 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2215 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2216 .resetfn
= arm_cp_reset_ignore
},
2217 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2218 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2219 .access
= PL0_R
|PL1_W
,
2220 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2222 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2223 .access
= PL0_R
|PL1_W
,
2224 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2225 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2226 .resetfn
= arm_cp_reset_ignore
},
2227 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2228 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2230 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2231 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2233 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2234 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2239 #ifndef CONFIG_USER_ONLY
2241 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2244 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2245 * Writable only at the highest implemented exception level.
2247 int el
= arm_current_el(env
);
2251 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
2252 return CP_ACCESS_TRAP
;
2256 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2257 arm_is_secure_below_el3(env
)) {
2258 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2259 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2267 if (!isread
&& el
< arm_highest_el(env
)) {
2268 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2271 return CP_ACCESS_OK
;
2274 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2277 unsigned int cur_el
= arm_current_el(env
);
2278 bool secure
= arm_is_secure(env
);
2280 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2282 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2283 return CP_ACCESS_TRAP
;
2286 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2287 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2288 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
2289 return CP_ACCESS_TRAP_EL2
;
2291 return CP_ACCESS_OK
;
2294 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2297 unsigned int cur_el
= arm_current_el(env
);
2298 bool secure
= arm_is_secure(env
);
2300 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2301 * EL0[PV]TEN is zero.
2304 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2305 return CP_ACCESS_TRAP
;
2308 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2309 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2310 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2311 return CP_ACCESS_TRAP_EL2
;
2313 return CP_ACCESS_OK
;
2316 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2317 const ARMCPRegInfo
*ri
,
2320 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2323 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2324 const ARMCPRegInfo
*ri
,
2327 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2330 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2333 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2336 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2339 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2342 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2343 const ARMCPRegInfo
*ri
,
2346 /* The AArch64 register view of the secure physical timer is
2347 * always accessible from EL3, and configurably accessible from
2350 switch (arm_current_el(env
)) {
2352 if (!arm_is_secure(env
)) {
2353 return CP_ACCESS_TRAP
;
2355 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2356 return CP_ACCESS_TRAP_EL3
;
2358 return CP_ACCESS_OK
;
2361 return CP_ACCESS_TRAP
;
2363 return CP_ACCESS_OK
;
2365 g_assert_not_reached();
2369 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2371 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
2374 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2376 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2379 /* Timer enabled: calculate and set current ISTATUS, irq, and
2380 * reset timer to when ISTATUS next has to change
2382 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2383 cpu
->env
.cp15
.cntvoff_el2
: 0;
2384 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2385 /* Note that this must be unsigned 64 bit arithmetic: */
2386 int istatus
= count
- offset
>= gt
->cval
;
2390 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2392 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2393 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2396 /* Next transition is when count rolls back over to zero */
2397 nexttick
= UINT64_MAX
;
2399 /* Next transition is when we hit cval */
2400 nexttick
= gt
->cval
+ offset
;
2402 /* Note that the desired next expiry time might be beyond the
2403 * signed-64-bit range of a QEMUTimer -- in this case we just
2404 * set the timer for as far in the future as possible. When the
2405 * timer expires we will reset the timer for any remaining period.
2407 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
2408 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
2410 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2411 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2413 /* Timer disabled: ISTATUS and timer output always clear */
2415 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2416 timer_del(cpu
->gt_timer
[timeridx
]);
2417 trace_arm_gt_recalc_disabled(timeridx
);
2421 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2424 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2426 timer_del(cpu
->gt_timer
[timeridx
]);
2429 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2431 return gt_get_countervalue(env
);
2434 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2436 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
2439 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2443 trace_arm_gt_cval_write(timeridx
, value
);
2444 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2445 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2448 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2451 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2453 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2454 (gt_get_countervalue(env
) - offset
));
2457 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2461 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2463 trace_arm_gt_tval_write(timeridx
, value
);
2464 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2465 sextract64(value
, 0, 32);
2466 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2469 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2473 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2474 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2476 trace_arm_gt_ctl_write(timeridx
, value
);
2477 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2478 if ((oldval
^ value
) & 1) {
2479 /* Enable toggled */
2480 gt_recalc_timer(cpu
, timeridx
);
2481 } else if ((oldval
^ value
) & 2) {
2482 /* IMASK toggled: don't need to recalculate,
2483 * just set the interrupt line based on ISTATUS
2485 int irqstate
= (oldval
& 4) && !(value
& 2);
2487 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2488 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2492 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2494 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2497 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2500 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2503 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2505 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2508 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2511 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2514 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2517 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2520 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2522 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2525 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2528 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2531 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2533 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2536 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2539 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2542 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2545 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2548 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2551 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2553 trace_arm_gt_cntvoff_write(value
);
2554 raw_write(env
, ri
, value
);
2555 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2558 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2560 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2563 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2566 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2569 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2571 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2574 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2577 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2580 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2583 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2586 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2588 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2591 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2594 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2597 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2599 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2602 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2605 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2608 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2611 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2614 void arm_gt_ptimer_cb(void *opaque
)
2616 ARMCPU
*cpu
= opaque
;
2618 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2621 void arm_gt_vtimer_cb(void *opaque
)
2623 ARMCPU
*cpu
= opaque
;
2625 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2628 void arm_gt_htimer_cb(void *opaque
)
2630 ARMCPU
*cpu
= opaque
;
2632 gt_recalc_timer(cpu
, GTIMER_HYP
);
2635 void arm_gt_stimer_cb(void *opaque
)
2637 ARMCPU
*cpu
= opaque
;
2639 gt_recalc_timer(cpu
, GTIMER_SEC
);
2642 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2643 /* Note that CNTFRQ is purely reads-as-written for the benefit
2644 * of software; writing it doesn't actually change the timer frequency.
2645 * Our reset value matches the fixed frequency we implement the timer at.
2647 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2648 .type
= ARM_CP_ALIAS
,
2649 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2650 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2652 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2653 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2654 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2655 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2656 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2658 /* overall control: mostly access permissions */
2659 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2660 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2662 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2665 /* per-timer control */
2666 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2667 .secure
= ARM_CP_SECSTATE_NS
,
2668 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2669 .accessfn
= gt_ptimer_access
,
2670 .fieldoffset
= offsetoflow32(CPUARMState
,
2671 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2672 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2674 { .name
= "CNTP_CTL_S",
2675 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2676 .secure
= ARM_CP_SECSTATE_S
,
2677 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2678 .accessfn
= gt_ptimer_access
,
2679 .fieldoffset
= offsetoflow32(CPUARMState
,
2680 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2681 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2683 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2684 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2685 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2686 .accessfn
= gt_ptimer_access
,
2687 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2689 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2691 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2692 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2693 .accessfn
= gt_vtimer_access
,
2694 .fieldoffset
= offsetoflow32(CPUARMState
,
2695 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2696 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2698 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2699 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2700 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2701 .accessfn
= gt_vtimer_access
,
2702 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2704 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2706 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2707 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2708 .secure
= ARM_CP_SECSTATE_NS
,
2709 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2710 .accessfn
= gt_ptimer_access
,
2711 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2713 { .name
= "CNTP_TVAL_S",
2714 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2715 .secure
= ARM_CP_SECSTATE_S
,
2716 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2717 .accessfn
= gt_ptimer_access
,
2718 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2720 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2721 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2722 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2723 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2724 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2726 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2727 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2728 .accessfn
= gt_vtimer_access
,
2729 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2731 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2732 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2733 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2734 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2735 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2737 /* The counter itself */
2738 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2739 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2740 .accessfn
= gt_pct_access
,
2741 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2743 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2744 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2745 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2746 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2748 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2749 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2750 .accessfn
= gt_vct_access
,
2751 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2753 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2754 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2755 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2756 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2758 /* Comparison value, indicating when the timer goes off */
2759 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2760 .secure
= ARM_CP_SECSTATE_NS
,
2761 .access
= PL1_RW
| PL0_R
,
2762 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2763 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2764 .accessfn
= gt_ptimer_access
,
2765 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2767 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2768 .secure
= ARM_CP_SECSTATE_S
,
2769 .access
= PL1_RW
| PL0_R
,
2770 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2771 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2772 .accessfn
= gt_ptimer_access
,
2773 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2775 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2776 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2777 .access
= PL1_RW
| PL0_R
,
2779 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2780 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2781 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2783 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2784 .access
= PL1_RW
| PL0_R
,
2785 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2786 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2787 .accessfn
= gt_vtimer_access
,
2788 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2790 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2791 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2792 .access
= PL1_RW
| PL0_R
,
2794 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2795 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2796 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2798 /* Secure timer -- this is actually restricted to only EL3
2799 * and configurably Secure-EL1 via the accessfn.
2801 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2802 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2803 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2804 .accessfn
= gt_stimer_access
,
2805 .readfn
= gt_sec_tval_read
,
2806 .writefn
= gt_sec_tval_write
,
2807 .resetfn
= gt_sec_timer_reset
,
2809 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2810 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2811 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2812 .accessfn
= gt_stimer_access
,
2813 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2815 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2817 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2818 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2819 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2820 .accessfn
= gt_stimer_access
,
2821 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2822 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2829 /* In user-mode most of the generic timer registers are inaccessible
2830 * however modern kernels (4.12+) allow access to cntvct_el0
2833 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2835 /* Currently we have no support for QEMUTimer in linux-user so we
2836 * can't call gt_get_countervalue(env), instead we directly
2837 * call the lower level functions.
2839 return cpu_get_clock() / GTIMER_SCALE
;
2842 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2843 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2844 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2845 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2846 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2847 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2849 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2850 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2851 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2852 .readfn
= gt_virt_cnt_read
,
2859 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2861 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2862 raw_write(env
, ri
, value
);
2863 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2864 raw_write(env
, ri
, value
& 0xfffff6ff);
2866 raw_write(env
, ri
, value
& 0xfffff1ff);
2870 #ifndef CONFIG_USER_ONLY
2871 /* get_phys_addr() isn't present for user-mode-only targets */
2873 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2877 /* The ATS12NSO* operations must trap to EL3 if executed in
2878 * Secure EL1 (which can only happen if EL3 is AArch64).
2879 * They are simply UNDEF if executed from NS EL1.
2880 * They function normally from EL2 or EL3.
2882 if (arm_current_el(env
) == 1) {
2883 if (arm_is_secure_below_el3(env
)) {
2884 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2886 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2889 return CP_ACCESS_OK
;
2892 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2893 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2896 target_ulong page_size
;
2900 bool format64
= false;
2901 MemTxAttrs attrs
= {};
2902 ARMMMUFaultInfo fi
= {};
2903 ARMCacheAttrs cacheattrs
= {};
2905 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2906 &prot
, &page_size
, &fi
, &cacheattrs
);
2910 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2913 * * TTBCR.EAE determines whether the result is returned using the
2914 * 32-bit or the 64-bit PAR format
2915 * * Instructions executed in Hyp mode always use the 64bit format
2917 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2918 * * The Non-secure TTBCR.EAE bit is set to 1
2919 * * The implementation includes EL2, and the value of HCR.VM is 1
2921 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2923 * ATS1Hx always uses the 64bit format.
2925 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2927 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2928 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2929 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
2931 format64
|= arm_current_el(env
) == 2;
2937 /* Create a 64-bit PAR */
2938 par64
= (1 << 11); /* LPAE bit always set */
2940 par64
|= phys_addr
& ~0xfffULL
;
2941 if (!attrs
.secure
) {
2942 par64
|= (1 << 9); /* NS */
2944 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2945 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2947 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2950 par64
|= (fsr
& 0x3f) << 1; /* FS */
2952 par64
|= (1 << 9); /* S */
2955 par64
|= (1 << 8); /* PTW */
2959 /* fsr is a DFSR/IFSR value for the short descriptor
2960 * translation table format (with WnR always clear).
2961 * Convert it to a 32-bit PAR.
2964 /* We do not set any attribute bits in the PAR */
2965 if (page_size
== (1 << 24)
2966 && arm_feature(env
, ARM_FEATURE_V7
)) {
2967 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2969 par64
= phys_addr
& 0xfffff000;
2971 if (!attrs
.secure
) {
2972 par64
|= (1 << 9); /* NS */
2975 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2977 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2978 ((fsr
& 0xf) << 1) | 1;
2984 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2986 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2989 int el
= arm_current_el(env
);
2990 bool secure
= arm_is_secure_below_el3(env
);
2992 switch (ri
->opc2
& 6) {
2994 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2997 mmu_idx
= ARMMMUIdx_S1E3
;
3000 mmu_idx
= ARMMMUIdx_S1NSE1
;
3003 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3006 g_assert_not_reached();
3010 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3013 mmu_idx
= ARMMMUIdx_S1SE0
;
3016 mmu_idx
= ARMMMUIdx_S1NSE0
;
3019 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3022 g_assert_not_reached();
3026 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3027 mmu_idx
= ARMMMUIdx_S12NSE1
;
3030 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3031 mmu_idx
= ARMMMUIdx_S12NSE0
;
3034 g_assert_not_reached();
3037 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3039 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3042 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3045 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3048 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S1E2
);
3050 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3053 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3056 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3057 return CP_ACCESS_TRAP
;
3059 return CP_ACCESS_OK
;
3062 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3065 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3067 int secure
= arm_is_secure_below_el3(env
);
3069 switch (ri
->opc2
& 6) {
3072 case 0: /* AT S1E1R, AT S1E1W */
3073 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3075 case 4: /* AT S1E2R, AT S1E2W */
3076 mmu_idx
= ARMMMUIdx_S1E2
;
3078 case 6: /* AT S1E3R, AT S1E3W */
3079 mmu_idx
= ARMMMUIdx_S1E3
;
3082 g_assert_not_reached();
3085 case 2: /* AT S1E0R, AT S1E0W */
3086 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3088 case 4: /* AT S12E1R, AT S12E1W */
3089 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
3091 case 6: /* AT S12E0R, AT S12E0W */
3092 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
3095 g_assert_not_reached();
3098 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3102 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3103 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3104 .access
= PL1_RW
, .resetvalue
= 0,
3105 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3106 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3107 .writefn
= par_write
},
3108 #ifndef CONFIG_USER_ONLY
3109 /* This underdecoding is safe because the reginfo is NO_RAW. */
3110 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3111 .access
= PL1_W
, .accessfn
= ats_access
,
3112 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
3117 /* Return basic MPU access permission bits. */
3118 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3125 for (i
= 0; i
< 16; i
+= 2) {
3126 ret
|= (val
>> i
) & mask
;
3132 /* Pad basic MPU access permission bits to extended format. */
3133 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3140 for (i
= 0; i
< 16; i
+= 2) {
3141 ret
|= (val
& mask
) << i
;
3147 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3150 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3153 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3155 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3158 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3161 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3164 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3166 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3169 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3171 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3177 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3181 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3184 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3185 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3191 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3192 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3196 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3199 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3200 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3202 if (value
>= nrgs
) {
3203 qemu_log_mask(LOG_GUEST_ERROR
,
3204 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3205 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3209 raw_write(env
, ri
, value
);
3212 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3213 /* Reset for all these registers is handled in arm_cpu_reset(),
3214 * because the PMSAv7 is also used by M-profile CPUs, which do
3215 * not register cpregs but still need the state to be reset.
3217 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3218 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3219 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3220 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3221 .resetfn
= arm_cp_reset_ignore
},
3222 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3223 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3224 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3225 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3226 .resetfn
= arm_cp_reset_ignore
},
3227 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3228 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3229 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3230 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3231 .resetfn
= arm_cp_reset_ignore
},
3232 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3234 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3235 .writefn
= pmsav7_rgnr_write
,
3236 .resetfn
= arm_cp_reset_ignore
},
3240 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3241 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3242 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3243 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3244 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3245 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3246 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3247 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3248 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3249 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3251 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3253 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3255 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3257 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3259 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3260 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3262 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3263 /* Protection region base and size registers */
3264 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3265 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3266 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3267 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3268 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3269 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3270 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3271 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3272 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3273 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3274 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3275 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3276 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3277 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3278 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3279 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3280 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3281 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3282 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3283 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3284 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3285 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3286 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3287 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3291 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3294 TCR
*tcr
= raw_ptr(env
, ri
);
3295 int maskshift
= extract32(value
, 0, 3);
3297 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3298 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3299 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3300 * using Long-desciptor translation table format */
3301 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3302 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3303 /* In an implementation that includes the Security Extensions
3304 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3305 * Short-descriptor translation table format.
3307 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3313 /* Update the masks corresponding to the TCR bank being written
3314 * Note that we always calculate mask and base_mask, but
3315 * they are only used for short-descriptor tables (ie if EAE is 0);
3316 * for long-descriptor tables the TCR fields are used differently
3317 * and the mask and base_mask values are meaningless.
3319 tcr
->raw_tcr
= value
;
3320 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3321 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3324 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3327 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3328 TCR
*tcr
= raw_ptr(env
, ri
);
3330 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3331 /* With LPAE the TTBCR could result in a change of ASID
3332 * via the TTBCR.A1 bit, so do a TLB flush.
3334 tlb_flush(CPU(cpu
));
3336 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3337 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3338 vmsa_ttbcr_raw_write(env
, ri
, value
);
3341 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3343 TCR
*tcr
= raw_ptr(env
, ri
);
3345 /* Reset both the TCR as well as the masks corresponding to the bank of
3346 * the TCR being reset.
3350 tcr
->base_mask
= 0xffffc000u
;
3353 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3356 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3357 TCR
*tcr
= raw_ptr(env
, ri
);
3359 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3360 tlb_flush(CPU(cpu
));
3361 tcr
->raw_tcr
= value
;
3364 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3367 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3368 if (cpreg_field_is_64bit(ri
) &&
3369 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3370 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3371 tlb_flush(CPU(cpu
));
3373 raw_write(env
, ri
, value
);
3376 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3379 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3380 CPUState
*cs
= CPU(cpu
);
3382 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3383 if (raw_read(env
, ri
) != value
) {
3384 tlb_flush_by_mmuidx(cs
,
3385 ARMMMUIdxBit_S12NSE1
|
3386 ARMMMUIdxBit_S12NSE0
|
3388 raw_write(env
, ri
, value
);
3392 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3393 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3394 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3395 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3396 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3397 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3398 .access
= PL1_RW
, .resetvalue
= 0,
3399 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3400 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3401 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3402 .access
= PL1_RW
, .resetvalue
= 0,
3403 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3404 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3405 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3406 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3407 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3412 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3413 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3414 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3416 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3417 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3418 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3419 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3420 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3421 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3422 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3423 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3424 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3425 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3426 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3427 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3428 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3429 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
3430 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3431 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3432 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3433 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3434 .raw_writefn
= vmsa_ttbcr_raw_write
,
3435 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3436 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3440 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3441 * qemu tlbs nor adjusting cached masks.
3443 static const ARMCPRegInfo ttbcr2_reginfo
= {
3444 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3445 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3446 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3447 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3450 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3453 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3454 /* The OS_TYPE bit in this register changes the reported CPUID! */
3455 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3456 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3459 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3462 env
->cp15
.c15_threadid
= value
& 0xffff;
3465 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3468 /* Wait-for-interrupt (deprecated) */
3469 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
3472 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3475 /* On OMAP there are registers indicating the max/min index of dcache lines
3476 * containing a dirty line; cache flush operations have to reset these.
3478 env
->cp15
.c15_i_max
= 0x000;
3479 env
->cp15
.c15_i_min
= 0xff0;
3482 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3483 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3484 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3485 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3487 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3488 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3489 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3491 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3492 .writefn
= omap_ticonfig_write
},
3493 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3495 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3496 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3497 .access
= PL1_RW
, .resetvalue
= 0xff0,
3498 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3499 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3501 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3502 .writefn
= omap_threadid_write
},
3503 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3504 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3505 .type
= ARM_CP_NO_RAW
,
3506 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3507 /* TODO: Peripheral port remap register:
3508 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3509 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3512 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3513 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3514 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3515 .writefn
= omap_cachemaint_write
},
3516 { .name
= "C9", .cp
= 15, .crn
= 9,
3517 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3518 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3522 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3525 env
->cp15
.c15_cpar
= value
& 0x3fff;
3528 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3529 { .name
= "XSCALE_CPAR",
3530 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3531 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3532 .writefn
= xscale_cpar_write
, },
3533 { .name
= "XSCALE_AUXCR",
3534 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3535 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3537 /* XScale specific cache-lockdown: since we have no cache we NOP these
3538 * and hope the guest does not really rely on cache behaviour.
3540 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3541 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3542 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3543 { .name
= "XSCALE_UNLOCK_ICACHE",
3544 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3545 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3546 { .name
= "XSCALE_DCACHE_LOCK",
3547 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3548 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3549 { .name
= "XSCALE_UNLOCK_DCACHE",
3550 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3551 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3555 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3556 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3557 * implementation of this implementation-defined space.
3558 * Ideally this should eventually disappear in favour of actually
3559 * implementing the correct behaviour for all cores.
3561 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3562 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3564 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3569 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3570 /* Cache status: RAZ because we have no cache so it's always clean */
3571 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3572 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3577 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3578 /* We never have a a block transfer operation in progress */
3579 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3580 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3582 /* The cache ops themselves: these all NOP for QEMU */
3583 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3584 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3585 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3586 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3587 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3588 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3589 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3590 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3591 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3592 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3593 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3594 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3598 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3599 /* The cache test-and-clean instructions always return (1 << 30)
3600 * to indicate that there are no dirty cache lines.
3602 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3603 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3604 .resetvalue
= (1 << 30) },
3605 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3606 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3607 .resetvalue
= (1 << 30) },
3611 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3612 /* Ignore ReadBuffer accesses */
3613 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3614 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3615 .access
= PL1_RW
, .resetvalue
= 0,
3616 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3620 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3622 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3623 unsigned int cur_el
= arm_current_el(env
);
3624 bool secure
= arm_is_secure(env
);
3626 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3627 return env
->cp15
.vpidr_el2
;
3629 return raw_read(env
, ri
);
3632 static uint64_t mpidr_read_val(CPUARMState
*env
)
3634 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
3635 uint64_t mpidr
= cpu
->mp_affinity
;
3637 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3638 mpidr
|= (1U << 31);
3639 /* Cores which are uniprocessor (non-coherent)
3640 * but still implement the MP extensions set
3641 * bit 30. (For instance, Cortex-R5).
3643 if (cpu
->mp_is_up
) {
3644 mpidr
|= (1u << 30);
3650 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3652 unsigned int cur_el
= arm_current_el(env
);
3653 bool secure
= arm_is_secure(env
);
3655 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3656 return env
->cp15
.vmpidr_el2
;
3658 return mpidr_read_val(env
);
3661 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3663 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3664 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3665 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3667 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3668 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3669 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3671 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3672 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3673 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3674 offsetof(CPUARMState
, cp15
.par_ns
)} },
3675 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3676 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3677 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3678 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3679 .writefn
= vmsa_ttbr_write
, },
3680 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3681 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3682 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3683 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3684 .writefn
= vmsa_ttbr_write
, },
3688 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3690 return vfp_get_fpcr(env
);
3693 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3696 vfp_set_fpcr(env
, value
);
3699 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3701 return vfp_get_fpsr(env
);
3704 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3707 vfp_set_fpsr(env
, value
);
3710 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3713 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3714 return CP_ACCESS_TRAP
;
3716 return CP_ACCESS_OK
;
3719 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3722 env
->daif
= value
& PSTATE_DAIF
;
3725 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3726 const ARMCPRegInfo
*ri
,
3729 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3730 * SCTLR_EL1.UCI is set.
3732 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3733 return CP_ACCESS_TRAP
;
3735 return CP_ACCESS_OK
;
3738 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3739 * Page D4-1736 (DDI0487A.b)
3742 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3745 CPUState
*cs
= ENV_GET_CPU(env
);
3746 bool sec
= arm_is_secure_below_el3(env
);
3749 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3750 ARMMMUIdxBit_S1SE1
|
3751 ARMMMUIdxBit_S1SE0
);
3753 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3754 ARMMMUIdxBit_S12NSE1
|
3755 ARMMMUIdxBit_S12NSE0
);
3759 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3762 CPUState
*cs
= ENV_GET_CPU(env
);
3764 if (tlb_force_broadcast(env
)) {
3765 tlbi_aa64_vmalle1is_write(env
, NULL
, value
);
3769 if (arm_is_secure_below_el3(env
)) {
3770 tlb_flush_by_mmuidx(cs
,
3771 ARMMMUIdxBit_S1SE1
|
3772 ARMMMUIdxBit_S1SE0
);
3774 tlb_flush_by_mmuidx(cs
,
3775 ARMMMUIdxBit_S12NSE1
|
3776 ARMMMUIdxBit_S12NSE0
);
3780 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3783 /* Note that the 'ALL' scope must invalidate both stage 1 and
3784 * stage 2 translations, whereas most other scopes only invalidate
3785 * stage 1 translations.
3787 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3788 CPUState
*cs
= CPU(cpu
);
3790 if (arm_is_secure_below_el3(env
)) {
3791 tlb_flush_by_mmuidx(cs
,
3792 ARMMMUIdxBit_S1SE1
|
3793 ARMMMUIdxBit_S1SE0
);
3795 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3796 tlb_flush_by_mmuidx(cs
,
3797 ARMMMUIdxBit_S12NSE1
|
3798 ARMMMUIdxBit_S12NSE0
|
3801 tlb_flush_by_mmuidx(cs
,
3802 ARMMMUIdxBit_S12NSE1
|
3803 ARMMMUIdxBit_S12NSE0
);
3808 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3811 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3812 CPUState
*cs
= CPU(cpu
);
3814 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3817 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3820 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3821 CPUState
*cs
= CPU(cpu
);
3823 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3826 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3829 /* Note that the 'ALL' scope must invalidate both stage 1 and
3830 * stage 2 translations, whereas most other scopes only invalidate
3831 * stage 1 translations.
3833 CPUState
*cs
= ENV_GET_CPU(env
);
3834 bool sec
= arm_is_secure_below_el3(env
);
3835 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3838 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3839 ARMMMUIdxBit_S1SE1
|
3840 ARMMMUIdxBit_S1SE0
);
3841 } else if (has_el2
) {
3842 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3843 ARMMMUIdxBit_S12NSE1
|
3844 ARMMMUIdxBit_S12NSE0
|
3847 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3848 ARMMMUIdxBit_S12NSE1
|
3849 ARMMMUIdxBit_S12NSE0
);
3853 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3856 CPUState
*cs
= ENV_GET_CPU(env
);
3858 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3861 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3864 CPUState
*cs
= ENV_GET_CPU(env
);
3866 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3869 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3872 /* Invalidate by VA, EL2
3873 * Currently handles both VAE2 and VALE2, since we don't support
3874 * flush-last-level-only.
3876 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3877 CPUState
*cs
= CPU(cpu
);
3878 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3880 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3883 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3886 /* Invalidate by VA, EL3
3887 * Currently handles both VAE3 and VALE3, since we don't support
3888 * flush-last-level-only.
3890 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3891 CPUState
*cs
= CPU(cpu
);
3892 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3894 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3897 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3900 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3901 CPUState
*cs
= CPU(cpu
);
3902 bool sec
= arm_is_secure_below_el3(env
);
3903 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3906 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3907 ARMMMUIdxBit_S1SE1
|
3908 ARMMMUIdxBit_S1SE0
);
3910 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3911 ARMMMUIdxBit_S12NSE1
|
3912 ARMMMUIdxBit_S12NSE0
);
3916 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3919 /* Invalidate by VA, EL1&0 (AArch64 version).
3920 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3921 * since we don't support flush-for-specific-ASID-only or
3922 * flush-last-level-only.
3924 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3925 CPUState
*cs
= CPU(cpu
);
3926 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3928 if (tlb_force_broadcast(env
)) {
3929 tlbi_aa64_vae1is_write(env
, NULL
, value
);
3933 if (arm_is_secure_below_el3(env
)) {
3934 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3935 ARMMMUIdxBit_S1SE1
|
3936 ARMMMUIdxBit_S1SE0
);
3938 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3939 ARMMMUIdxBit_S12NSE1
|
3940 ARMMMUIdxBit_S12NSE0
);
3944 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3947 CPUState
*cs
= ENV_GET_CPU(env
);
3948 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3950 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3954 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3957 CPUState
*cs
= ENV_GET_CPU(env
);
3958 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3960 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3964 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3967 /* Invalidate by IPA. This has to invalidate any structures that
3968 * contain only stage 2 translation information, but does not need
3969 * to apply to structures that contain combined stage 1 and stage 2
3970 * translation information.
3971 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3973 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3974 CPUState
*cs
= CPU(cpu
);
3977 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3981 pageaddr
= sextract64(value
<< 12, 0, 48);
3983 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3986 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3989 CPUState
*cs
= ENV_GET_CPU(env
);
3992 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3996 pageaddr
= sextract64(value
<< 12, 0, 48);
3998 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4002 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4005 /* We don't implement EL2, so the only control on DC ZVA is the
4006 * bit in the SCTLR which can prohibit access for EL0.
4008 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4009 return CP_ACCESS_TRAP
;
4011 return CP_ACCESS_OK
;
4014 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4016 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4017 int dzp_bit
= 1 << 4;
4019 /* DZP indicates whether DC ZVA access is allowed */
4020 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4023 return cpu
->dcz_blocksize
| dzp_bit
;
4026 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4029 if (!(env
->pstate
& PSTATE_SP
)) {
4030 /* Access to SP_EL0 is undefined if it's being used as
4031 * the stack pointer.
4033 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4035 return CP_ACCESS_OK
;
4038 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4040 return env
->pstate
& PSTATE_SP
;
4043 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4045 update_spsel(env
, val
);
4048 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4051 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4053 if (raw_read(env
, ri
) == value
) {
4054 /* Skip the TLB flush if nothing actually changed; Linux likes
4055 * to do a lot of pointless SCTLR writes.
4060 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4061 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4065 raw_write(env
, ri
, value
);
4066 /* ??? Lots of these bits are not implemented. */
4067 /* This may enable/disable the MMU, so do a TLB flush. */
4068 tlb_flush(CPU(cpu
));
4071 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4074 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4075 return CP_ACCESS_TRAP_FP_EL2
;
4077 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4078 return CP_ACCESS_TRAP_FP_EL3
;
4080 return CP_ACCESS_OK
;
4083 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4086 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4089 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4090 /* Minimal set of EL0-visible registers. This will need to be expanded
4091 * significantly for system emulation of AArch64 CPUs.
4093 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4094 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4095 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4096 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4097 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4098 .type
= ARM_CP_NO_RAW
,
4099 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4100 .fieldoffset
= offsetof(CPUARMState
, daif
),
4101 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4102 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4103 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4104 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4105 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4106 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4107 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4108 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4109 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4110 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4111 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4112 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4113 .readfn
= aa64_dczid_read
},
4114 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4115 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4116 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4117 #ifndef CONFIG_USER_ONLY
4118 /* Avoid overhead of an access check that always passes in user-mode */
4119 .accessfn
= aa64_zva_access
,
4122 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4123 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4124 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4125 /* Cache ops: all NOPs since we don't emulate caches */
4126 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4127 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4128 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4129 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4130 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4131 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4132 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4133 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4134 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4135 .accessfn
= aa64_cacheop_access
},
4136 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4137 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4138 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4139 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4140 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4141 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4142 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4143 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4144 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4145 .accessfn
= aa64_cacheop_access
},
4146 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4147 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4148 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4149 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4150 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4151 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4152 .accessfn
= aa64_cacheop_access
},
4153 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4154 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4155 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4156 .accessfn
= aa64_cacheop_access
},
4157 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4158 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4159 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4160 /* TLBI operations */
4161 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4162 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4163 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4164 .writefn
= tlbi_aa64_vmalle1is_write
},
4165 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4166 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4167 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4168 .writefn
= tlbi_aa64_vae1is_write
},
4169 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4170 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4171 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4172 .writefn
= tlbi_aa64_vmalle1is_write
},
4173 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4174 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4175 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4176 .writefn
= tlbi_aa64_vae1is_write
},
4177 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4178 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4179 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4180 .writefn
= tlbi_aa64_vae1is_write
},
4181 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4182 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4183 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4184 .writefn
= tlbi_aa64_vae1is_write
},
4185 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4186 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4187 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4188 .writefn
= tlbi_aa64_vmalle1_write
},
4189 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4190 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4191 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4192 .writefn
= tlbi_aa64_vae1_write
},
4193 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4194 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4195 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4196 .writefn
= tlbi_aa64_vmalle1_write
},
4197 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4198 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4199 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4200 .writefn
= tlbi_aa64_vae1_write
},
4201 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4202 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4203 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4204 .writefn
= tlbi_aa64_vae1_write
},
4205 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4206 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4207 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4208 .writefn
= tlbi_aa64_vae1_write
},
4209 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4210 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4211 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4212 .writefn
= tlbi_aa64_ipas2e1is_write
},
4213 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4214 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4215 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4216 .writefn
= tlbi_aa64_ipas2e1is_write
},
4217 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4218 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4219 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4220 .writefn
= tlbi_aa64_alle1is_write
},
4221 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4222 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4223 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4224 .writefn
= tlbi_aa64_alle1is_write
},
4225 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4226 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4227 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4228 .writefn
= tlbi_aa64_ipas2e1_write
},
4229 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4230 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4231 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4232 .writefn
= tlbi_aa64_ipas2e1_write
},
4233 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4234 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4235 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4236 .writefn
= tlbi_aa64_alle1_write
},
4237 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4238 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4239 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4240 .writefn
= tlbi_aa64_alle1is_write
},
4241 #ifndef CONFIG_USER_ONLY
4242 /* 64 bit address translation operations */
4243 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4244 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4245 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4246 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4247 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4248 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4249 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4250 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4251 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4252 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4253 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4254 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4255 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4256 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4257 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4258 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4259 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4260 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4261 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4262 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4263 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4264 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4265 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4266 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4267 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4268 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4269 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4270 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4271 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4272 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4273 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4274 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4275 .type
= ARM_CP_ALIAS
,
4276 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4277 .access
= PL1_RW
, .resetvalue
= 0,
4278 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4279 .writefn
= par_write
},
4281 /* TLB invalidate last level of translation table walk */
4282 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4283 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4284 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4285 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4286 .writefn
= tlbimvaa_is_write
},
4287 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4288 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4289 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4290 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4291 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4292 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4293 .writefn
= tlbimva_hyp_write
},
4294 { .name
= "TLBIMVALHIS",
4295 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4296 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4297 .writefn
= tlbimva_hyp_is_write
},
4298 { .name
= "TLBIIPAS2",
4299 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4300 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4301 .writefn
= tlbiipas2_write
},
4302 { .name
= "TLBIIPAS2IS",
4303 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4304 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4305 .writefn
= tlbiipas2_is_write
},
4306 { .name
= "TLBIIPAS2L",
4307 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4308 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4309 .writefn
= tlbiipas2_write
},
4310 { .name
= "TLBIIPAS2LIS",
4311 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4312 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4313 .writefn
= tlbiipas2_is_write
},
4314 /* 32 bit cache operations */
4315 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4316 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4317 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4318 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4319 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4320 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4321 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4322 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4323 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4324 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4325 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4326 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4327 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4328 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4329 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4330 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4331 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4332 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4333 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4334 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4335 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4336 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4337 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4338 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4339 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4340 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4341 /* MMU Domain access control / MPU write buffer control */
4342 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4343 .access
= PL1_RW
, .resetvalue
= 0,
4344 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4345 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4346 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4347 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4348 .type
= ARM_CP_ALIAS
,
4349 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4351 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4352 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4353 .type
= ARM_CP_ALIAS
,
4354 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4356 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4357 /* We rely on the access checks not allowing the guest to write to the
4358 * state field when SPSel indicates that it's being used as the stack
4361 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4362 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4363 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4364 .type
= ARM_CP_ALIAS
,
4365 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4366 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4367 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4368 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4369 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4370 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4371 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4372 .type
= ARM_CP_NO_RAW
,
4373 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4374 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4375 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4376 .type
= ARM_CP_ALIAS
,
4377 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4378 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4379 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4380 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4381 .access
= PL2_RW
, .resetvalue
= 0,
4382 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4383 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4384 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4385 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4386 .access
= PL2_RW
, .resetvalue
= 0,
4387 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4388 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4389 .type
= ARM_CP_ALIAS
,
4390 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4392 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4393 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4394 .type
= ARM_CP_ALIAS
,
4395 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4397 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4398 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4399 .type
= ARM_CP_ALIAS
,
4400 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4402 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4403 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4404 .type
= ARM_CP_ALIAS
,
4405 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4407 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4408 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4409 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4411 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4412 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4413 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4414 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4415 .writefn
= sdcr_write
,
4416 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4420 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4421 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4422 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4423 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4425 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4426 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4427 .type
= ARM_CP_NO_RAW
,
4428 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4430 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4431 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4432 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4433 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4434 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4435 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4437 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4438 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4439 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4440 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4441 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4442 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4443 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4445 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4446 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4447 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4448 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4449 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4450 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4452 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4453 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4454 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4456 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4457 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4458 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4460 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4461 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4462 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4464 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4465 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4466 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4467 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4468 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4469 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4470 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4471 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4472 .cp
= 15, .opc1
= 6, .crm
= 2,
4473 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4474 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4475 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4476 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4477 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4478 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4479 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4480 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4481 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4482 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4483 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4484 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4485 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4486 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4487 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4488 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4490 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4491 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4492 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4493 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4494 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4495 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4496 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4497 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4499 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4500 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4501 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4502 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4503 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4505 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4506 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4507 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4508 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4509 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4510 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4511 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4512 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4513 .access
= PL2_RW
, .accessfn
= access_tda
,
4514 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4515 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4516 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4517 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4518 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4519 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4520 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4521 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4522 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4523 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4524 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4525 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4526 .type
= ARM_CP_CONST
,
4527 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4528 .access
= PL2_RW
, .resetvalue
= 0 },
4532 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4533 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
4534 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4535 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4537 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4541 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
4543 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4544 uint64_t valid_mask
= HCR_MASK
;
4546 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4547 valid_mask
&= ~HCR_HCD
;
4548 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
4549 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4550 * However, if we're using the SMC PSCI conduit then QEMU is
4551 * effectively acting like EL3 firmware and so the guest at
4552 * EL2 should retain the ability to prevent EL1 from being
4553 * able to make SMC calls into the ersatz firmware, so in
4554 * that case HCR.TSC should be read/write.
4556 valid_mask
&= ~HCR_TSC
;
4558 if (cpu_isar_feature(aa64_lor
, cpu
)) {
4559 valid_mask
|= HCR_TLOR
;
4561 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
4562 valid_mask
|= HCR_API
| HCR_APK
;
4565 /* Clear RES0 bits. */
4566 value
&= valid_mask
;
4568 /* These bits change the MMU setup:
4569 * HCR_VM enables stage 2 translation
4570 * HCR_PTW forbids certain page-table setups
4571 * HCR_DC Disables stage1 and enables stage2 translation
4573 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4574 tlb_flush(CPU(cpu
));
4576 env
->cp15
.hcr_el2
= value
;
4579 * Updates to VI and VF require us to update the status of
4580 * virtual interrupts, which are the logical OR of these bits
4581 * and the state of the input lines from the GIC. (This requires
4582 * that we have the iothread lock, which is done by marking the
4583 * reginfo structs as ARM_CP_IO.)
4584 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4585 * possible for it to be taken immediately, because VIRQ and
4586 * VFIQ are masked unless running at EL0 or EL1, and HCR
4587 * can only be written at EL2.
4589 g_assert(qemu_mutex_iothread_locked());
4590 arm_cpu_update_virq(cpu
);
4591 arm_cpu_update_vfiq(cpu
);
4594 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4597 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4598 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4599 hcr_write(env
, NULL
, value
);
4602 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4605 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4606 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4607 hcr_write(env
, NULL
, value
);
4611 * Return the effective value of HCR_EL2.
4612 * Bits that are not included here:
4613 * RW (read from SCR_EL3.RW as needed)
4615 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4617 uint64_t ret
= env
->cp15
.hcr_el2
;
4619 if (arm_is_secure_below_el3(env
)) {
4621 * "This register has no effect if EL2 is not enabled in the
4622 * current Security state". This is ARMv8.4-SecEL2 speak for
4623 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4625 * Prior to that, the language was "In an implementation that
4626 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4627 * as if this field is 0 for all purposes other than a direct
4628 * read or write access of HCR_EL2". With lots of enumeration
4629 * on a per-field basis. In current QEMU, this is condition
4630 * is arm_is_secure_below_el3.
4632 * Since the v8.4 language applies to the entire register, and
4633 * appears to be backward compatible, use that.
4636 } else if (ret
& HCR_TGE
) {
4637 /* These bits are up-to-date as of ARMv8.4. */
4638 if (ret
& HCR_E2H
) {
4639 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4640 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4641 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4642 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4644 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4646 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4647 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4648 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4655 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4656 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4658 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4659 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4660 .writefn
= hcr_write
},
4661 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4662 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4663 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4664 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4665 .writefn
= hcr_writelow
},
4666 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4667 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4668 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4669 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4670 .type
= ARM_CP_ALIAS
,
4671 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4673 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4674 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4675 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4676 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4677 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4678 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4679 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4680 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4681 .type
= ARM_CP_ALIAS
,
4682 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4684 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4685 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4686 .type
= ARM_CP_ALIAS
,
4687 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4689 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4690 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4691 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4692 .access
= PL2_RW
, .writefn
= vbar_write
,
4693 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4695 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4696 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4697 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4698 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4699 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4700 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4701 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4702 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
4703 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4704 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4705 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4707 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4708 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4709 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4710 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4711 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4712 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4713 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4715 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4716 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4717 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4718 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4720 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4721 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4722 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4724 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4725 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4726 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4728 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4729 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4731 /* no .writefn needed as this can't cause an ASID change;
4732 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4734 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4735 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4736 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4737 .type
= ARM_CP_ALIAS
,
4738 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4739 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4740 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4741 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4743 /* no .writefn needed as this can't cause an ASID change;
4744 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4746 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4747 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4748 .cp
= 15, .opc1
= 6, .crm
= 2,
4749 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4750 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4751 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4752 .writefn
= vttbr_write
},
4753 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4754 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4755 .access
= PL2_RW
, .writefn
= vttbr_write
,
4756 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4757 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4758 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4759 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4760 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4761 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4762 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4763 .access
= PL2_RW
, .resetvalue
= 0,
4764 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4765 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4766 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4767 .access
= PL2_RW
, .resetvalue
= 0,
4768 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4769 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4770 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4771 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4772 { .name
= "TLBIALLNSNH",
4773 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4774 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4775 .writefn
= tlbiall_nsnh_write
},
4776 { .name
= "TLBIALLNSNHIS",
4777 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4778 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4779 .writefn
= tlbiall_nsnh_is_write
},
4780 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4781 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4782 .writefn
= tlbiall_hyp_write
},
4783 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4784 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4785 .writefn
= tlbiall_hyp_is_write
},
4786 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4787 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4788 .writefn
= tlbimva_hyp_write
},
4789 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4790 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4791 .writefn
= tlbimva_hyp_is_write
},
4792 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4793 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4794 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4795 .writefn
= tlbi_aa64_alle2_write
},
4796 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4797 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4798 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4799 .writefn
= tlbi_aa64_vae2_write
},
4800 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4801 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4802 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4803 .writefn
= tlbi_aa64_vae2_write
},
4804 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4805 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4806 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4807 .writefn
= tlbi_aa64_alle2is_write
},
4808 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4809 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4810 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4811 .writefn
= tlbi_aa64_vae2is_write
},
4812 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4813 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4814 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4815 .writefn
= tlbi_aa64_vae2is_write
},
4816 #ifndef CONFIG_USER_ONLY
4817 /* Unlike the other EL2-related AT operations, these must
4818 * UNDEF from EL3 if EL2 is not implemented, which is why we
4819 * define them here rather than with the rest of the AT ops.
4821 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4822 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4823 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4824 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4825 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4826 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4827 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4828 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4829 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4830 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4831 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4832 * to behave as if SCR.NS was 1.
4834 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4836 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4837 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4839 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4840 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4841 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4842 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4843 * reset values as IMPDEF. We choose to reset to 3 to comply with
4844 * both ARMv7 and ARMv8.
4846 .access
= PL2_RW
, .resetvalue
= 3,
4847 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4848 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4849 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4850 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4851 .writefn
= gt_cntvoff_write
,
4852 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4853 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4854 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4855 .writefn
= gt_cntvoff_write
,
4856 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4857 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4858 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4859 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4860 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4861 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4862 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4863 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4864 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4865 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4866 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4867 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4868 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4869 .resetfn
= gt_hyp_timer_reset
,
4870 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4871 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4873 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4875 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4877 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4879 /* The only field of MDCR_EL2 that has a defined architectural reset value
4880 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4881 * don't implement any PMU event counters, so using zero as a reset
4882 * value for MDCR_EL2 is okay
4884 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4885 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4886 .access
= PL2_RW
, .resetvalue
= 0,
4887 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4888 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4889 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4890 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4891 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4892 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4893 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4895 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4896 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4897 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4899 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4903 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4904 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4905 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4906 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4908 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
4909 .writefn
= hcr_writehigh
},
4913 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4916 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4917 * At Secure EL1 it traps to EL3.
4919 if (arm_current_el(env
) == 3) {
4920 return CP_ACCESS_OK
;
4922 if (arm_is_secure_below_el3(env
)) {
4923 return CP_ACCESS_TRAP_EL3
;
4925 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4927 return CP_ACCESS_OK
;
4929 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4932 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4933 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4934 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4935 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4936 .resetvalue
= 0, .writefn
= scr_write
},
4937 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4938 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4939 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4940 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4941 .writefn
= scr_write
},
4942 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4943 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4944 .access
= PL3_RW
, .resetvalue
= 0,
4945 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4947 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4948 .access
= PL3_RW
, .resetvalue
= 0,
4949 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4950 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4951 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4952 .writefn
= vbar_write
, .resetvalue
= 0,
4953 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4954 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4955 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4956 .access
= PL3_RW
, .resetvalue
= 0,
4957 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4958 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4959 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4961 /* no .writefn needed as this can't cause an ASID change;
4962 * we must provide a .raw_writefn and .resetfn because we handle
4963 * reset and migration for the AArch32 TTBCR(S), which might be
4964 * using mask and base_mask.
4966 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4967 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4968 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4969 .type
= ARM_CP_ALIAS
,
4970 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4972 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4973 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4974 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4975 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4976 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4977 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4978 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4979 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4980 .type
= ARM_CP_ALIAS
,
4981 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4983 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4984 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4985 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4986 .access
= PL3_RW
, .writefn
= vbar_write
,
4987 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4989 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4990 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4991 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4992 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4993 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4994 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4995 .access
= PL3_RW
, .resetvalue
= 0,
4996 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4997 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4998 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4999 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5001 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5002 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5003 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5005 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5006 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5007 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5009 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5010 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5011 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5012 .writefn
= tlbi_aa64_alle3is_write
},
5013 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5014 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5015 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5016 .writefn
= tlbi_aa64_vae3is_write
},
5017 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5018 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5019 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5020 .writefn
= tlbi_aa64_vae3is_write
},
5021 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5022 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5023 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5024 .writefn
= tlbi_aa64_alle3_write
},
5025 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5026 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5027 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5028 .writefn
= tlbi_aa64_vae3_write
},
5029 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5030 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5031 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5032 .writefn
= tlbi_aa64_vae3_write
},
5036 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5039 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5040 * but the AArch32 CTR has its own reginfo struct)
5042 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5043 return CP_ACCESS_TRAP
;
5045 return CP_ACCESS_OK
;
5048 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5051 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5052 * read via a bit in OSLSR_EL1.
5056 if (ri
->state
== ARM_CP_STATE_AA32
) {
5057 oslock
= (value
== 0xC5ACCE55);
5062 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5065 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5066 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5067 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5068 * unlike DBGDRAR it is never accessible from EL0.
5069 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5072 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5073 .access
= PL0_R
, .accessfn
= access_tdra
,
5074 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5075 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5076 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5077 .access
= PL1_R
, .accessfn
= access_tdra
,
5078 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5079 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5080 .access
= PL0_R
, .accessfn
= access_tdra
,
5081 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5082 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5083 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5084 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5085 .access
= PL1_RW
, .accessfn
= access_tda
,
5086 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5088 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5089 * We don't implement the configurable EL0 access.
5091 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
5092 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5093 .type
= ARM_CP_ALIAS
,
5094 .access
= PL1_R
, .accessfn
= access_tda
,
5095 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
5096 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
5097 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
5098 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
5099 .accessfn
= access_tdosa
,
5100 .writefn
= oslar_write
},
5101 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
5102 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
5103 .access
= PL1_R
, .resetvalue
= 10,
5104 .accessfn
= access_tdosa
,
5105 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
5106 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5107 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5108 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
5109 .access
= PL1_RW
, .accessfn
= access_tdosa
,
5110 .type
= ARM_CP_NOP
},
5111 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5112 * implement vector catch debug events yet.
5115 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5116 .access
= PL1_RW
, .accessfn
= access_tda
,
5117 .type
= ARM_CP_NOP
},
5118 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5119 * to save and restore a 32-bit guest's DBGVCR)
5121 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5122 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5123 .access
= PL2_RW
, .accessfn
= access_tda
,
5124 .type
= ARM_CP_NOP
},
5125 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5126 * Channel but Linux may try to access this register. The 32-bit
5127 * alias is DBGDCCINT.
5129 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5130 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5131 .access
= PL1_RW
, .accessfn
= access_tda
,
5132 .type
= ARM_CP_NOP
},
5136 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5137 /* 64 bit access versions of the (dummy) debug registers */
5138 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5139 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5140 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5141 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5145 /* Return the exception level to which exceptions should be taken
5146 * via SVEAccessTrap. If an exception should be routed through
5147 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5148 * take care of raising that exception.
5149 * C.f. the ARM pseudocode function CheckSVEEnabled.
5151 int sve_exception_el(CPUARMState
*env
, int el
)
5153 #ifndef CONFIG_USER_ONLY
5155 bool disabled
= false;
5157 /* The CPACR.ZEN controls traps to EL1:
5158 * 0, 2 : trap EL0 and EL1 accesses
5159 * 1 : trap only EL0 accesses
5160 * 3 : trap no accesses
5162 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5164 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5169 return (arm_feature(env
, ARM_FEATURE_EL2
)
5170 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
5173 /* Check CPACR.FPEN. */
5174 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5176 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5184 /* CPTR_EL2. Since TZ and TFP are positive,
5185 * they will be zero when EL2 is not present.
5187 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5188 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5191 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5196 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5197 if (arm_feature(env
, ARM_FEATURE_EL3
)
5198 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5206 * Given that SVE is enabled, return the vector length for EL.
5208 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5210 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5211 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5214 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5216 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5217 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5219 if (el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
5220 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5225 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5228 int cur_el
= arm_current_el(env
);
5229 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5232 /* Bits other than [3:0] are RAZ/WI. */
5233 raw_write(env
, ri
, value
& 0xf);
5236 * Because we arrived here, we know both FP and SVE are enabled;
5237 * otherwise we would have trapped access to the ZCR_ELn register.
5239 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5240 if (new_len
< old_len
) {
5241 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5245 static const ARMCPRegInfo zcr_el1_reginfo
= {
5246 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5247 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5248 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5249 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5250 .writefn
= zcr_write
, .raw_writefn
= raw_write
5253 static const ARMCPRegInfo zcr_el2_reginfo
= {
5254 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5255 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5256 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5257 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5258 .writefn
= zcr_write
, .raw_writefn
= raw_write
5261 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5262 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5263 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5264 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5265 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5268 static const ARMCPRegInfo zcr_el3_reginfo
= {
5269 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5270 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5271 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5272 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5273 .writefn
= zcr_write
, .raw_writefn
= raw_write
5276 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
5278 CPUARMState
*env
= &cpu
->env
;
5280 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
5281 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
5283 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
5285 if (env
->cpu_watchpoint
[n
]) {
5286 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
5287 env
->cpu_watchpoint
[n
] = NULL
;
5290 if (!extract64(wcr
, 0, 1)) {
5291 /* E bit clear : watchpoint disabled */
5295 switch (extract64(wcr
, 3, 2)) {
5297 /* LSC 00 is reserved and must behave as if the wp is disabled */
5300 flags
|= BP_MEM_READ
;
5303 flags
|= BP_MEM_WRITE
;
5306 flags
|= BP_MEM_ACCESS
;
5310 /* Attempts to use both MASK and BAS fields simultaneously are
5311 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5312 * thus generating a watchpoint for every byte in the masked region.
5314 mask
= extract64(wcr
, 24, 4);
5315 if (mask
== 1 || mask
== 2) {
5316 /* Reserved values of MASK; we must act as if the mask value was
5317 * some non-reserved value, or as if the watchpoint were disabled.
5318 * We choose the latter.
5322 /* Watchpoint covers an aligned area up to 2GB in size */
5324 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5325 * whether the watchpoint fires when the unmasked bits match; we opt
5326 * to generate the exceptions.
5330 /* Watchpoint covers bytes defined by the byte address select bits */
5331 int bas
= extract64(wcr
, 5, 8);
5335 /* This must act as if the watchpoint is disabled */
5339 if (extract64(wvr
, 2, 1)) {
5340 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5341 * ignored, and BAS[3:0] define which bytes to watch.
5345 /* The BAS bits are supposed to be programmed to indicate a contiguous
5346 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5347 * we fire for each byte in the word/doubleword addressed by the WVR.
5348 * We choose to ignore any non-zero bits after the first range of 1s.
5350 basstart
= ctz32(bas
);
5351 len
= cto32(bas
>> basstart
);
5355 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
5356 &env
->cpu_watchpoint
[n
]);
5359 void hw_watchpoint_update_all(ARMCPU
*cpu
)
5362 CPUARMState
*env
= &cpu
->env
;
5364 /* Completely clear out existing QEMU watchpoints and our array, to
5365 * avoid possible stale entries following migration load.
5367 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
5368 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
5370 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
5371 hw_watchpoint_update(cpu
, i
);
5375 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5378 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5381 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5382 * register reads and behaves as if values written are sign extended.
5383 * Bits [1:0] are RES0.
5385 value
= sextract64(value
, 0, 49) & ~3ULL;
5387 raw_write(env
, ri
, value
);
5388 hw_watchpoint_update(cpu
, i
);
5391 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5394 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5397 raw_write(env
, ri
, value
);
5398 hw_watchpoint_update(cpu
, i
);
5401 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
5403 CPUARMState
*env
= &cpu
->env
;
5404 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
5405 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
5410 if (env
->cpu_breakpoint
[n
]) {
5411 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
5412 env
->cpu_breakpoint
[n
] = NULL
;
5415 if (!extract64(bcr
, 0, 1)) {
5416 /* E bit clear : watchpoint disabled */
5420 bt
= extract64(bcr
, 20, 4);
5423 case 4: /* unlinked address mismatch (reserved if AArch64) */
5424 case 5: /* linked address mismatch (reserved if AArch64) */
5425 qemu_log_mask(LOG_UNIMP
,
5426 "arm: address mismatch breakpoint types not implemented\n");
5428 case 0: /* unlinked address match */
5429 case 1: /* linked address match */
5431 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5432 * we behave as if the register was sign extended. Bits [1:0] are
5433 * RES0. The BAS field is used to allow setting breakpoints on 16
5434 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5435 * a bp will fire if the addresses covered by the bp and the addresses
5436 * covered by the insn overlap but the insn doesn't start at the
5437 * start of the bp address range. We choose to require the insn and
5438 * the bp to have the same address. The constraints on writing to
5439 * BAS enforced in dbgbcr_write mean we have only four cases:
5440 * 0b0000 => no breakpoint
5441 * 0b0011 => breakpoint on addr
5442 * 0b1100 => breakpoint on addr + 2
5443 * 0b1111 => breakpoint on addr
5444 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5446 int bas
= extract64(bcr
, 5, 4);
5447 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
5456 case 2: /* unlinked context ID match */
5457 case 8: /* unlinked VMID match (reserved if no EL2) */
5458 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5459 qemu_log_mask(LOG_UNIMP
,
5460 "arm: unlinked context breakpoint types not implemented\n");
5462 case 9: /* linked VMID match (reserved if no EL2) */
5463 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5464 case 3: /* linked context ID match */
5466 /* We must generate no events for Linked context matches (unless
5467 * they are linked to by some other bp/wp, which is handled in
5468 * updates for the linking bp/wp). We choose to also generate no events
5469 * for reserved values.
5474 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
5477 void hw_breakpoint_update_all(ARMCPU
*cpu
)
5480 CPUARMState
*env
= &cpu
->env
;
5482 /* Completely clear out existing QEMU breakpoints and our array, to
5483 * avoid possible stale entries following migration load.
5485 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
5486 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
5488 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
5489 hw_breakpoint_update(cpu
, i
);
5493 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5496 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5499 raw_write(env
, ri
, value
);
5500 hw_breakpoint_update(cpu
, i
);
5503 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5506 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5509 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5512 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
5513 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
5515 raw_write(env
, ri
, value
);
5516 hw_breakpoint_update(cpu
, i
);
5519 static void define_debug_regs(ARMCPU
*cpu
)
5521 /* Define v7 and v8 architectural debug registers.
5522 * These are just dummy implementations for now.
5525 int wrps
, brps
, ctx_cmps
;
5526 ARMCPRegInfo dbgdidr
= {
5527 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
5528 .access
= PL0_R
, .accessfn
= access_tda
,
5529 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
5532 /* Note that all these register fields hold "number of Xs minus 1". */
5533 brps
= extract32(cpu
->dbgdidr
, 24, 4);
5534 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
5535 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
5537 assert(ctx_cmps
<= brps
);
5539 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5540 * of the debug registers such as number of breakpoints;
5541 * check that if they both exist then they agree.
5543 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
5544 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
5545 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
5546 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
5549 define_one_arm_cp_reg(cpu
, &dbgdidr
);
5550 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
5552 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
5553 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
5556 for (i
= 0; i
< brps
+ 1; i
++) {
5557 ARMCPRegInfo dbgregs
[] = {
5558 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
5559 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
5560 .access
= PL1_RW
, .accessfn
= access_tda
,
5561 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
5562 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
5564 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
5565 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
5566 .access
= PL1_RW
, .accessfn
= access_tda
,
5567 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5568 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5572 define_arm_cp_regs(cpu
, dbgregs
);
5575 for (i
= 0; i
< wrps
+ 1; i
++) {
5576 ARMCPRegInfo dbgregs
[] = {
5577 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5578 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5579 .access
= PL1_RW
, .accessfn
= access_tda
,
5580 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5581 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5583 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5584 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5585 .access
= PL1_RW
, .accessfn
= access_tda
,
5586 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5587 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5591 define_arm_cp_regs(cpu
, dbgregs
);
5595 /* We don't know until after realize whether there's a GICv3
5596 * attached, and that is what registers the gicv3 sysregs.
5597 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5600 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5602 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5603 uint64_t pfr1
= cpu
->id_pfr1
;
5605 if (env
->gicv3state
) {
5611 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5613 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5614 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5616 if (env
->gicv3state
) {
5622 /* Shared logic between LORID and the rest of the LOR* registers.
5623 * Secure state has already been delt with.
5625 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5627 int el
= arm_current_el(env
);
5629 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5630 return CP_ACCESS_TRAP_EL2
;
5632 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5633 return CP_ACCESS_TRAP_EL3
;
5635 return CP_ACCESS_OK
;
5638 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5641 if (arm_is_secure_below_el3(env
)) {
5642 /* Access ok in secure mode. */
5643 return CP_ACCESS_OK
;
5645 return access_lor_ns(env
);
5648 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5649 const ARMCPRegInfo
*ri
, bool isread
)
5651 if (arm_is_secure_below_el3(env
)) {
5652 /* Access denied in secure mode. */
5653 return CP_ACCESS_TRAP
;
5655 return access_lor_ns(env
);
5658 #ifdef TARGET_AARCH64
5659 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5662 int el
= arm_current_el(env
);
5665 arm_feature(env
, ARM_FEATURE_EL2
) &&
5666 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5667 return CP_ACCESS_TRAP_EL2
;
5670 arm_feature(env
, ARM_FEATURE_EL3
) &&
5671 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5672 return CP_ACCESS_TRAP_EL3
;
5674 return CP_ACCESS_OK
;
5677 static const ARMCPRegInfo pauth_reginfo
[] = {
5678 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5679 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5680 .access
= PL1_RW
, .accessfn
= access_pauth
,
5681 .fieldoffset
= offsetof(CPUARMState
, apda_key
.lo
) },
5682 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5683 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5684 .access
= PL1_RW
, .accessfn
= access_pauth
,
5685 .fieldoffset
= offsetof(CPUARMState
, apda_key
.hi
) },
5686 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5687 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5688 .access
= PL1_RW
, .accessfn
= access_pauth
,
5689 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.lo
) },
5690 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5691 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5692 .access
= PL1_RW
, .accessfn
= access_pauth
,
5693 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.hi
) },
5694 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5695 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5696 .access
= PL1_RW
, .accessfn
= access_pauth
,
5697 .fieldoffset
= offsetof(CPUARMState
, apga_key
.lo
) },
5698 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5699 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5700 .access
= PL1_RW
, .accessfn
= access_pauth
,
5701 .fieldoffset
= offsetof(CPUARMState
, apga_key
.hi
) },
5702 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5703 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5704 .access
= PL1_RW
, .accessfn
= access_pauth
,
5705 .fieldoffset
= offsetof(CPUARMState
, apia_key
.lo
) },
5706 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5707 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5708 .access
= PL1_RW
, .accessfn
= access_pauth
,
5709 .fieldoffset
= offsetof(CPUARMState
, apia_key
.hi
) },
5710 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5711 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5712 .access
= PL1_RW
, .accessfn
= access_pauth
,
5713 .fieldoffset
= offsetof(CPUARMState
, apib_key
.lo
) },
5714 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5715 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5716 .access
= PL1_RW
, .accessfn
= access_pauth
,
5717 .fieldoffset
= offsetof(CPUARMState
, apib_key
.hi
) },
5722 void register_cp_regs_for_features(ARMCPU
*cpu
)
5724 /* Register all the coprocessor registers based on feature bits */
5725 CPUARMState
*env
= &cpu
->env
;
5726 if (arm_feature(env
, ARM_FEATURE_M
)) {
5727 /* M profile has no coprocessor registers */
5731 define_arm_cp_regs(cpu
, cp_reginfo
);
5732 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
5733 /* Must go early as it is full of wildcards that may be
5734 * overridden by later definitions.
5736 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
5739 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5740 /* The ID registers all have impdef reset values */
5741 ARMCPRegInfo v6_idregs
[] = {
5742 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
5743 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5744 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5745 .resetvalue
= cpu
->id_pfr0
},
5746 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5747 * the value of the GIC field until after we define these regs.
5749 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
5750 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
5751 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5752 .readfn
= id_pfr1_read
,
5753 .writefn
= arm_cp_write_ignore
},
5754 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
5755 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
5756 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5757 .resetvalue
= cpu
->id_dfr0
},
5758 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
5759 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
5760 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5761 .resetvalue
= cpu
->id_afr0
},
5762 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
5763 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
5764 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5765 .resetvalue
= cpu
->id_mmfr0
},
5766 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
5767 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
5768 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5769 .resetvalue
= cpu
->id_mmfr1
},
5770 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
5771 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
5772 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5773 .resetvalue
= cpu
->id_mmfr2
},
5774 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
5775 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
5776 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5777 .resetvalue
= cpu
->id_mmfr3
},
5778 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
5779 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5780 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5781 .resetvalue
= cpu
->isar
.id_isar0
},
5782 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
5783 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
5784 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5785 .resetvalue
= cpu
->isar
.id_isar1
},
5786 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
5787 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5788 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5789 .resetvalue
= cpu
->isar
.id_isar2
},
5790 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
5791 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
5792 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5793 .resetvalue
= cpu
->isar
.id_isar3
},
5794 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
5795 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
5796 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5797 .resetvalue
= cpu
->isar
.id_isar4
},
5798 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
5799 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
5800 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5801 .resetvalue
= cpu
->isar
.id_isar5
},
5802 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
5803 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
5804 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5805 .resetvalue
= cpu
->id_mmfr4
},
5806 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
5807 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
5808 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5809 .resetvalue
= cpu
->isar
.id_isar6
},
5812 define_arm_cp_regs(cpu
, v6_idregs
);
5813 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
5815 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
5817 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
5818 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
5820 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
5821 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
5822 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
5824 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
5825 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
5827 if (arm_feature(env
, ARM_FEATURE_V7
)) {
5828 /* v7 performance monitor control register: same implementor
5829 * field as main ID register, and we implement four counters in
5830 * addition to the cycle count register.
5832 unsigned int i
, pmcrn
= 4;
5833 ARMCPRegInfo pmcr
= {
5834 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
5836 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5837 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
5838 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
5839 .raw_writefn
= raw_write
,
5841 ARMCPRegInfo pmcr64
= {
5842 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
5843 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
5844 .access
= PL0_RW
, .accessfn
= pmreg_access
,
5846 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
5847 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
5848 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
5850 define_one_arm_cp_reg(cpu
, &pmcr
);
5851 define_one_arm_cp_reg(cpu
, &pmcr64
);
5852 for (i
= 0; i
< pmcrn
; i
++) {
5853 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
5854 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
5855 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
5856 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
5857 ARMCPRegInfo pmev_regs
[] = {
5858 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
5859 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5860 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5861 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5862 .accessfn
= pmreg_access
},
5863 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
5864 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
5865 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5867 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5868 .raw_readfn
= pmevcntr_rawread
,
5869 .raw_writefn
= pmevcntr_rawwrite
},
5870 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
5871 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5872 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5873 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5874 .accessfn
= pmreg_access
},
5875 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
5876 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
5877 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5879 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5880 .raw_writefn
= pmevtyper_rawwrite
},
5883 define_arm_cp_regs(cpu
, pmev_regs
);
5884 g_free(pmevcntr_name
);
5885 g_free(pmevcntr_el0_name
);
5886 g_free(pmevtyper_name
);
5887 g_free(pmevtyper_el0_name
);
5889 ARMCPRegInfo clidr
= {
5890 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
5891 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
5892 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
5894 define_one_arm_cp_reg(cpu
, &clidr
);
5895 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
5896 define_debug_regs(cpu
);
5898 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
5900 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
5901 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
5902 ARMCPRegInfo v81_pmu_regs
[] = {
5903 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
5904 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
5905 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5906 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
5907 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
5908 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
5909 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5910 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
5913 define_arm_cp_regs(cpu
, v81_pmu_regs
);
5915 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5916 /* AArch64 ID registers, which all have impdef reset values.
5917 * Note that within the ID register ranges the unused slots
5918 * must all RAZ, not UNDEF; future architecture versions may
5919 * define new registers here.
5921 ARMCPRegInfo v8_idregs
[] = {
5922 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5923 * know the right value for the GIC field until after we
5924 * define these regs.
5926 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5927 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
5928 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5929 .readfn
= id_aa64pfr0_read
,
5930 .writefn
= arm_cp_write_ignore
},
5931 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5932 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
5933 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5934 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
5935 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5936 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
5937 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5939 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5940 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
5941 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5943 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5944 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
5945 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5946 /* At present, only SVEver == 0 is defined anyway. */
5948 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5949 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
5950 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5952 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5953 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
5954 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5956 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5957 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
5958 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5960 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5961 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
5962 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5963 .resetvalue
= cpu
->id_aa64dfr0
},
5964 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5965 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
5966 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5967 .resetvalue
= cpu
->id_aa64dfr1
},
5968 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5969 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
5970 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5972 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5973 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
5974 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5976 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5977 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
5978 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5979 .resetvalue
= cpu
->id_aa64afr0
},
5980 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5981 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
5982 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5983 .resetvalue
= cpu
->id_aa64afr1
},
5984 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5985 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
5986 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5988 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5989 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
5990 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5992 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
5993 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
5994 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5995 .resetvalue
= cpu
->isar
.id_aa64isar0
},
5996 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
5997 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
5998 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5999 .resetvalue
= cpu
->isar
.id_aa64isar1
},
6000 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6001 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
6002 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6004 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
6006 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6008 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6009 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
6010 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6012 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6013 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
6014 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6016 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6017 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
6018 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6020 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6021 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
6022 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6024 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6025 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6026 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6027 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
6028 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6029 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
6030 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6031 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
6032 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6033 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
6034 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6036 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6037 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
6038 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6040 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6041 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
6042 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6044 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6045 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
6046 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6048 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6049 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
6050 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6052 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6053 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
6054 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6056 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6057 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
6058 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6059 .resetvalue
= cpu
->isar
.mvfr0
},
6060 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6061 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
6062 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6063 .resetvalue
= cpu
->isar
.mvfr1
},
6064 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
6065 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
6066 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6067 .resetvalue
= cpu
->isar
.mvfr2
},
6068 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6069 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
6070 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6072 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6073 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
6074 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6076 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6077 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
6078 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6080 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6081 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
6082 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6084 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6085 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
6086 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6088 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
6089 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
6090 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6091 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
6092 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
6093 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
6094 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6095 .resetvalue
= cpu
->pmceid0
},
6096 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
6097 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
6098 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6099 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
6100 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
6101 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
6102 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6103 .resetvalue
= cpu
->pmceid1
},
6106 #ifdef CONFIG_USER_ONLY
6107 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
6108 { .name
= "ID_AA64PFR0_EL1",
6109 .exported_bits
= 0x000f000f00ff0000,
6110 .fixed_bits
= 0x0000000000000011 },
6111 { .name
= "ID_AA64PFR1_EL1",
6112 .exported_bits
= 0x00000000000000f0 },
6113 { .name
= "ID_AA64PFR*_EL1_RESERVED",
6115 { .name
= "ID_AA64ZFR0_EL1" },
6116 { .name
= "ID_AA64MMFR0_EL1",
6117 .fixed_bits
= 0x00000000ff000000 },
6118 { .name
= "ID_AA64MMFR1_EL1" },
6119 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
6121 { .name
= "ID_AA64DFR0_EL1",
6122 .fixed_bits
= 0x0000000000000006 },
6123 { .name
= "ID_AA64DFR1_EL1" },
6124 { .name
= "ID_AA64DFR*_EL1_RESERVED",
6126 { .name
= "ID_AA64AFR*",
6128 { .name
= "ID_AA64ISAR0_EL1",
6129 .exported_bits
= 0x00fffffff0fffff0 },
6130 { .name
= "ID_AA64ISAR1_EL1",
6131 .exported_bits
= 0x000000f0ffffffff },
6132 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
6134 REGUSERINFO_SENTINEL
6136 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
6138 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6139 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
6140 !arm_feature(env
, ARM_FEATURE_EL2
)) {
6141 ARMCPRegInfo rvbar
= {
6142 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6143 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6144 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
6146 define_one_arm_cp_reg(cpu
, &rvbar
);
6148 define_arm_cp_regs(cpu
, v8_idregs
);
6149 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
6151 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6152 uint64_t vmpidr_def
= mpidr_read_val(env
);
6153 ARMCPRegInfo vpidr_regs
[] = {
6154 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
6155 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6156 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6157 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
6158 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
6159 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6160 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6161 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
6162 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6163 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
6164 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6165 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6166 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
6167 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
6168 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6169 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6171 .resetvalue
= vmpidr_def
,
6172 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
6175 define_arm_cp_regs(cpu
, vpidr_regs
);
6176 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
6177 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6178 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
6180 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6181 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
6182 ARMCPRegInfo rvbar
= {
6183 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
6184 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
6185 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
6187 define_one_arm_cp_reg(cpu
, &rvbar
);
6190 /* If EL2 is missing but higher ELs are enabled, we need to
6191 * register the no_el2 reginfos.
6193 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6194 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6195 * of MIDR_EL1 and MPIDR_EL1.
6197 ARMCPRegInfo vpidr_regs
[] = {
6198 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6199 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6200 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6201 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
6202 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6203 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6204 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6205 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6206 .type
= ARM_CP_NO_RAW
,
6207 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
6210 define_arm_cp_regs(cpu
, vpidr_regs
);
6211 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
6212 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6213 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
6217 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6218 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
6219 ARMCPRegInfo el3_regs
[] = {
6220 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6221 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
6222 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
6223 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6224 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
6226 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6227 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
6228 .resetvalue
= cpu
->reset_sctlr
},
6232 define_arm_cp_regs(cpu
, el3_regs
);
6234 /* The behaviour of NSACR is sufficiently various that we don't
6235 * try to describe it in a single reginfo:
6236 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6237 * reads as constant 0xc00 from NS EL1 and NS EL2
6238 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6239 * if v7 without EL3, register doesn't exist
6240 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6242 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6243 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6244 ARMCPRegInfo nsacr
= {
6245 .name
= "NSACR", .type
= ARM_CP_CONST
,
6246 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6247 .access
= PL1_RW
, .accessfn
= nsacr_access
,
6250 define_one_arm_cp_reg(cpu
, &nsacr
);
6252 ARMCPRegInfo nsacr
= {
6254 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6255 .access
= PL3_RW
| PL1_R
,
6257 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
6259 define_one_arm_cp_reg(cpu
, &nsacr
);
6262 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6263 ARMCPRegInfo nsacr
= {
6264 .name
= "NSACR", .type
= ARM_CP_CONST
,
6265 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6269 define_one_arm_cp_reg(cpu
, &nsacr
);
6273 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
6274 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6275 /* PMSAv6 not implemented */
6276 assert(arm_feature(env
, ARM_FEATURE_V7
));
6277 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6278 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
6280 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
6283 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6284 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
6285 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6286 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
6287 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
6290 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6291 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
6293 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
6294 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
6296 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
6297 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
6299 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
6300 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
6302 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
6303 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
6305 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
6306 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
6308 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
6309 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
6311 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6312 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
6314 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6315 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
6317 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
6318 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
6320 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
6321 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
6323 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6324 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6325 * be read-only (ie write causes UNDEF exception).
6328 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
6329 /* Pre-v8 MIDR space.
6330 * Note that the MIDR isn't a simple constant register because
6331 * of the TI925 behaviour where writes to another register can
6332 * cause the MIDR value to change.
6334 * Unimplemented registers in the c15 0 0 0 space default to
6335 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6336 * and friends override accordingly.
6339 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
6340 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
6341 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
6342 .readfn
= midr_read
,
6343 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6344 .type
= ARM_CP_OVERRIDE
},
6345 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6347 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
6348 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6350 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
6351 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6353 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
6354 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6356 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
6357 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6359 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
6360 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6363 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
6364 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6365 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
6366 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
6367 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6368 .readfn
= midr_read
},
6369 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6370 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6371 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6372 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6373 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6374 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
6375 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6376 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6377 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
6378 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
6381 ARMCPRegInfo id_cp_reginfo
[] = {
6382 /* These are common to v8 and pre-v8 */
6384 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
6385 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6386 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
6387 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
6388 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
6389 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6390 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6392 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
6393 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6396 /* TLBTR is specific to VMSA */
6397 ARMCPRegInfo id_tlbtr_reginfo
= {
6399 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
6400 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
6402 /* MPUIR is specific to PMSA V6+ */
6403 ARMCPRegInfo id_mpuir_reginfo
= {
6405 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6406 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6407 .resetvalue
= cpu
->pmsav7_dregion
<< 8
6409 ARMCPRegInfo crn0_wi_reginfo
= {
6410 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
6411 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
6412 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
6414 #ifdef CONFIG_USER_ONLY
6415 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
6416 { .name
= "MIDR_EL1",
6417 .exported_bits
= 0x00000000ffffffff },
6418 { .name
= "REVIDR_EL1" },
6419 REGUSERINFO_SENTINEL
6421 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
6423 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
6424 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6426 /* Register the blanket "writes ignored" value first to cover the
6427 * whole space. Then update the specific ID registers to allow write
6428 * access, so that they ignore writes rather than causing them to
6431 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
6432 for (r
= id_pre_v8_midr_cp_reginfo
;
6433 r
->type
!= ARM_CP_SENTINEL
; r
++) {
6436 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6439 id_mpuir_reginfo
.access
= PL1_RW
;
6440 id_tlbtr_reginfo
.access
= PL1_RW
;
6442 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6443 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
6445 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
6447 define_arm_cp_regs(cpu
, id_cp_reginfo
);
6448 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
6449 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
6450 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
6451 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
6455 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
6456 ARMCPRegInfo mpidr_cp_reginfo
[] = {
6457 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6458 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
6459 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
6462 #ifdef CONFIG_USER_ONLY
6463 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
6464 { .name
= "MPIDR_EL1",
6465 .fixed_bits
= 0x0000000080000000 },
6466 REGUSERINFO_SENTINEL
6468 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
6470 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
6473 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
6474 ARMCPRegInfo auxcr_reginfo
[] = {
6475 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6476 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
6477 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
6478 .resetvalue
= cpu
->reset_auxcr
},
6479 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6480 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
6481 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6483 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6484 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
6485 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6489 define_arm_cp_regs(cpu
, auxcr_reginfo
);
6490 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6491 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6492 ARMCPRegInfo hactlr2_reginfo
= {
6493 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
6494 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
6495 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6498 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
6502 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
6503 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6504 /* 32 bit view is [31:18] 0...0 [43:32]. */
6505 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
6506 | extract64(cpu
->reset_cbar
, 32, 12);
6507 ARMCPRegInfo cbar_reginfo
[] = {
6509 .type
= ARM_CP_CONST
,
6510 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6511 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
6512 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6513 .type
= ARM_CP_CONST
,
6514 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
6515 .access
= PL1_R
, .resetvalue
= cbar32
},
6518 /* We don't implement a r/w 64 bit CBAR currently */
6519 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
6520 define_arm_cp_regs(cpu
, cbar_reginfo
);
6522 ARMCPRegInfo cbar
= {
6524 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6525 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
6526 .fieldoffset
= offsetof(CPUARMState
,
6527 cp15
.c15_config_base_address
)
6529 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
6530 cbar
.access
= PL1_R
;
6531 cbar
.fieldoffset
= 0;
6532 cbar
.type
= ARM_CP_CONST
;
6534 define_one_arm_cp_reg(cpu
, &cbar
);
6538 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
6539 ARMCPRegInfo vbar_cp_reginfo
[] = {
6540 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
6541 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
6542 .access
= PL1_RW
, .writefn
= vbar_write
,
6543 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
6544 offsetof(CPUARMState
, cp15
.vbar_ns
) },
6548 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
6551 /* Generic registers whose values depend on the implementation */
6553 ARMCPRegInfo sctlr
= {
6554 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
6555 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6557 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
6558 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
6559 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
6560 .raw_writefn
= raw_write
,
6562 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6563 /* Normally we would always end the TB on an SCTLR write, but Linux
6564 * arch/arm/mach-pxa/sleep.S expects two instructions following
6565 * an MMU enable to execute from cache. Imitate this behaviour.
6567 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
6569 define_one_arm_cp_reg(cpu
, &sctlr
);
6572 if (cpu_isar_feature(aa64_lor
, cpu
)) {
6574 * A trivial implementation of ARMv8.1-LOR leaves all of these
6575 * registers fixed at 0, which indicates that there are zero
6576 * supported Limited Ordering regions.
6578 static const ARMCPRegInfo lor_reginfo
[] = {
6579 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6580 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6581 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6582 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6583 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6584 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6585 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6586 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6587 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6588 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6589 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6590 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6591 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6592 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6593 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6594 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6595 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6596 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6597 .access
= PL1_R
, .accessfn
= access_lorid
,
6598 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6601 define_arm_cp_regs(cpu
, lor_reginfo
);
6604 if (cpu_isar_feature(aa64_sve
, cpu
)) {
6605 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
6606 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6607 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
6609 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
6611 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6612 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
6616 #ifdef TARGET_AARCH64
6617 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
6618 define_arm_cp_regs(cpu
, pauth_reginfo
);
6623 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
6625 CPUState
*cs
= CPU(cpu
);
6626 CPUARMState
*env
= &cpu
->env
;
6628 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6629 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
6630 aarch64_fpu_gdb_set_reg
,
6631 34, "aarch64-fpu.xml", 0);
6632 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
6633 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6634 51, "arm-neon.xml", 0);
6635 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
6636 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6637 35, "arm-vfp3.xml", 0);
6638 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
6639 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6640 19, "arm-vfp.xml", 0);
6642 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
6643 arm_gen_dynamic_xml(cs
),
6644 "system-registers.xml", 0);
6647 /* Sort alphabetically by type name, except for "any". */
6648 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
6650 ObjectClass
*class_a
= (ObjectClass
*)a
;
6651 ObjectClass
*class_b
= (ObjectClass
*)b
;
6652 const char *name_a
, *name_b
;
6654 name_a
= object_class_get_name(class_a
);
6655 name_b
= object_class_get_name(class_b
);
6656 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
6658 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
6661 return strcmp(name_a
, name_b
);
6665 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
6667 ObjectClass
*oc
= data
;
6668 CPUListState
*s
= user_data
;
6669 const char *typename
;
6672 typename
= object_class_get_name(oc
);
6673 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6674 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
6679 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
6683 .cpu_fprintf
= cpu_fprintf
,
6687 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6688 list
= g_slist_sort(list
, arm_cpu_list_compare
);
6689 (*cpu_fprintf
)(f
, "Available CPUs:\n");
6690 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
6694 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
6696 ObjectClass
*oc
= data
;
6697 CpuDefinitionInfoList
**cpu_list
= user_data
;
6698 CpuDefinitionInfoList
*entry
;
6699 CpuDefinitionInfo
*info
;
6700 const char *typename
;
6702 typename
= object_class_get_name(oc
);
6703 info
= g_malloc0(sizeof(*info
));
6704 info
->name
= g_strndup(typename
,
6705 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6706 info
->q_typename
= g_strdup(typename
);
6708 entry
= g_malloc0(sizeof(*entry
));
6709 entry
->value
= info
;
6710 entry
->next
= *cpu_list
;
6714 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
6716 CpuDefinitionInfoList
*cpu_list
= NULL
;
6719 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6720 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
6726 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
6727 void *opaque
, int state
, int secstate
,
6728 int crm
, int opc1
, int opc2
,
6731 /* Private utility function for define_one_arm_cp_reg_with_opaque():
6732 * add a single reginfo struct to the hash table.
6734 uint32_t *key
= g_new(uint32_t, 1);
6735 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
6736 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
6737 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
6739 r2
->name
= g_strdup(name
);
6740 /* Reset the secure state to the specific incoming state. This is
6741 * necessary as the register may have been defined with both states.
6743 r2
->secure
= secstate
;
6745 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6746 /* Register is banked (using both entries in array).
6747 * Overwriting fieldoffset as the array is only used to define
6748 * banked registers but later only fieldoffset is used.
6750 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
6753 if (state
== ARM_CP_STATE_AA32
) {
6754 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6755 /* If the register is banked then we don't need to migrate or
6756 * reset the 32-bit instance in certain cases:
6758 * 1) If the register has both 32-bit and 64-bit instances then we
6759 * can count on the 64-bit instance taking care of the
6761 * 2) If ARMv8 is enabled then we can count on a 64-bit version
6762 * taking care of the secure bank. This requires that separate
6763 * 32 and 64-bit definitions are provided.
6765 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
6766 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
6767 r2
->type
|= ARM_CP_ALIAS
;
6769 } else if ((secstate
!= r
->secure
) && !ns
) {
6770 /* The register is not banked so we only want to allow migration of
6771 * the non-secure instance.
6773 r2
->type
|= ARM_CP_ALIAS
;
6776 if (r
->state
== ARM_CP_STATE_BOTH
) {
6777 /* We assume it is a cp15 register if the .cp field is left unset.
6783 #ifdef HOST_WORDS_BIGENDIAN
6784 if (r2
->fieldoffset
) {
6785 r2
->fieldoffset
+= sizeof(uint32_t);
6790 if (state
== ARM_CP_STATE_AA64
) {
6791 /* To allow abbreviation of ARMCPRegInfo
6792 * definitions, we treat cp == 0 as equivalent to
6793 * the value for "standard guest-visible sysreg".
6794 * STATE_BOTH definitions are also always "standard
6795 * sysreg" in their AArch64 view (the .cp value may
6796 * be non-zero for the benefit of the AArch32 view).
6798 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
6799 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
6801 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
6802 r2
->opc0
, opc1
, opc2
);
6804 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
6807 r2
->opaque
= opaque
;
6809 /* reginfo passed to helpers is correct for the actual access,
6810 * and is never ARM_CP_STATE_BOTH:
6813 /* Make sure reginfo passed to helpers for wildcarded regs
6814 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6819 /* By convention, for wildcarded registers only the first
6820 * entry is used for migration; the others are marked as
6821 * ALIAS so we don't try to transfer the register
6822 * multiple times. Special registers (ie NOP/WFI) are
6823 * never migratable and not even raw-accessible.
6825 if ((r
->type
& ARM_CP_SPECIAL
)) {
6826 r2
->type
|= ARM_CP_NO_RAW
;
6828 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
6829 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
6830 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
6831 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
6834 /* Check that raw accesses are either forbidden or handled. Note that
6835 * we can't assert this earlier because the setup of fieldoffset for
6836 * banked registers has to be done first.
6838 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
6839 assert(!raw_accessors_invalid(r2
));
6842 /* Overriding of an existing definition must be explicitly
6845 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
6846 ARMCPRegInfo
*oldreg
;
6847 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
6848 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
6849 fprintf(stderr
, "Register redefined: cp=%d %d bit "
6850 "crn=%d crm=%d opc1=%d opc2=%d, "
6851 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
6852 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
6853 oldreg
->name
, r2
->name
);
6854 g_assert_not_reached();
6857 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
6861 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
6862 const ARMCPRegInfo
*r
, void *opaque
)
6864 /* Define implementations of coprocessor registers.
6865 * We store these in a hashtable because typically
6866 * there are less than 150 registers in a space which
6867 * is 16*16*16*8*8 = 262144 in size.
6868 * Wildcarding is supported for the crm, opc1 and opc2 fields.
6869 * If a register is defined twice then the second definition is
6870 * used, so this can be used to define some generic registers and
6871 * then override them with implementation specific variations.
6872 * At least one of the original and the second definition should
6873 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6874 * against accidental use.
6876 * The state field defines whether the register is to be
6877 * visible in the AArch32 or AArch64 execution state. If the
6878 * state is set to ARM_CP_STATE_BOTH then we synthesise a
6879 * reginfo structure for the AArch32 view, which sees the lower
6880 * 32 bits of the 64 bit register.
6882 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6883 * be wildcarded. AArch64 registers are always considered to be 64
6884 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6885 * the register, if any.
6887 int crm
, opc1
, opc2
, state
;
6888 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
6889 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
6890 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
6891 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
6892 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
6893 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
6894 /* 64 bit registers have only CRm and Opc1 fields */
6895 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
6896 /* op0 only exists in the AArch64 encodings */
6897 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
6898 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6899 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
6900 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6901 * encodes a minimum access level for the register. We roll this
6902 * runtime check into our general permission check code, so check
6903 * here that the reginfo's specified permissions are strict enough
6904 * to encompass the generic architectural permission check.
6906 if (r
->state
!= ARM_CP_STATE_AA32
) {
6910 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
6911 mask
= PL0U_R
| PL1_RW
;
6926 /* unallocated encoding, so not possible */
6934 /* min_EL EL1, secure mode only (we don't check the latter) */
6938 /* broken reginfo with out-of-range opc1 */
6942 /* assert our permissions are not too lax (stricter is fine) */
6943 assert((r
->access
& ~mask
) == 0);
6946 /* Check that the register definition has enough info to handle
6947 * reads and writes if they are permitted.
6949 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
6950 if (r
->access
& PL3_R
) {
6951 assert((r
->fieldoffset
||
6952 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6955 if (r
->access
& PL3_W
) {
6956 assert((r
->fieldoffset
||
6957 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6961 /* Bad type field probably means missing sentinel at end of reg list */
6962 assert(cptype_valid(r
->type
));
6963 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
6964 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
6965 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
6966 for (state
= ARM_CP_STATE_AA32
;
6967 state
<= ARM_CP_STATE_AA64
; state
++) {
6968 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
6971 if (state
== ARM_CP_STATE_AA32
) {
6972 /* Under AArch32 CP registers can be common
6973 * (same for secure and non-secure world) or banked.
6977 switch (r
->secure
) {
6978 case ARM_CP_SECSTATE_S
:
6979 case ARM_CP_SECSTATE_NS
:
6980 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6981 r
->secure
, crm
, opc1
, opc2
,
6985 name
= g_strdup_printf("%s_S", r
->name
);
6986 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6988 crm
, opc1
, opc2
, name
);
6990 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6992 crm
, opc1
, opc2
, r
->name
);
6996 /* AArch64 registers get mapped to non-secure instance
6998 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7000 crm
, opc1
, opc2
, r
->name
);
7008 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
7009 const ARMCPRegInfo
*regs
, void *opaque
)
7011 /* Define a whole list of registers */
7012 const ARMCPRegInfo
*r
;
7013 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7014 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
7019 * Modify ARMCPRegInfo for access from userspace.
7021 * This is a data driven modification directed by
7022 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7023 * user-space cannot alter any values and dynamic values pertaining to
7024 * execution state are hidden from user space view anyway.
7026 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
7028 const ARMCPRegUserSpaceInfo
*m
;
7031 for (m
= mods
; m
->name
; m
++) {
7032 GPatternSpec
*pat
= NULL
;
7034 pat
= g_pattern_spec_new(m
->name
);
7036 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7037 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
7038 r
->type
= ARM_CP_CONST
;
7042 } else if (strcmp(r
->name
, m
->name
) == 0) {
7043 r
->type
= ARM_CP_CONST
;
7045 r
->resetvalue
&= m
->exported_bits
;
7046 r
->resetvalue
|= m
->fixed_bits
;
7051 g_pattern_spec_free(pat
);
7056 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
7058 return g_hash_table_lookup(cpregs
, &encoded_cp
);
7061 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7064 /* Helper coprocessor write function for write-ignore registers */
7067 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7069 /* Helper coprocessor write function for read-as-zero registers */
7073 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
7075 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7078 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
7080 /* Return true if it is not valid for us to switch to
7081 * this CPU mode (ie all the UNPREDICTABLE cases in
7082 * the ARM ARM CPSRWriteByInstr pseudocode).
7085 /* Changes to or from Hyp via MSR and CPS are illegal. */
7086 if (write_type
== CPSRWriteByInstr
&&
7087 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
7088 mode
== ARM_CPU_MODE_HYP
)) {
7093 case ARM_CPU_MODE_USR
:
7095 case ARM_CPU_MODE_SYS
:
7096 case ARM_CPU_MODE_SVC
:
7097 case ARM_CPU_MODE_ABT
:
7098 case ARM_CPU_MODE_UND
:
7099 case ARM_CPU_MODE_IRQ
:
7100 case ARM_CPU_MODE_FIQ
:
7101 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7102 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7104 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7105 * and CPS are treated as illegal mode changes.
7107 if (write_type
== CPSRWriteByInstr
&&
7108 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
7109 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
7113 case ARM_CPU_MODE_HYP
:
7114 return !arm_feature(env
, ARM_FEATURE_EL2
)
7115 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
7116 case ARM_CPU_MODE_MON
:
7117 return arm_current_el(env
) < 3;
7123 uint32_t cpsr_read(CPUARMState
*env
)
7126 ZF
= (env
->ZF
== 0);
7127 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
7128 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
7129 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
7130 | ((env
->condexec_bits
& 0xfc) << 8)
7131 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
7134 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
7135 CPSRWriteType write_type
)
7137 uint32_t changed_daif
;
7139 if (mask
& CPSR_NZCV
) {
7140 env
->ZF
= (~val
) & CPSR_Z
;
7142 env
->CF
= (val
>> 29) & 1;
7143 env
->VF
= (val
<< 3) & 0x80000000;
7146 env
->QF
= ((val
& CPSR_Q
) != 0);
7148 env
->thumb
= ((val
& CPSR_T
) != 0);
7149 if (mask
& CPSR_IT_0_1
) {
7150 env
->condexec_bits
&= ~3;
7151 env
->condexec_bits
|= (val
>> 25) & 3;
7153 if (mask
& CPSR_IT_2_7
) {
7154 env
->condexec_bits
&= 3;
7155 env
->condexec_bits
|= (val
>> 8) & 0xfc;
7157 if (mask
& CPSR_GE
) {
7158 env
->GE
= (val
>> 16) & 0xf;
7161 /* In a V7 implementation that includes the security extensions but does
7162 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7163 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7164 * bits respectively.
7166 * In a V8 implementation, it is permitted for privileged software to
7167 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7169 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
7170 arm_feature(env
, ARM_FEATURE_EL3
) &&
7171 !arm_feature(env
, ARM_FEATURE_EL2
) &&
7172 !arm_is_secure(env
)) {
7174 changed_daif
= (env
->daif
^ val
) & mask
;
7176 if (changed_daif
& CPSR_A
) {
7177 /* Check to see if we are allowed to change the masking of async
7178 * abort exceptions from a non-secure state.
7180 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
7181 qemu_log_mask(LOG_GUEST_ERROR
,
7182 "Ignoring attempt to switch CPSR_A flag from "
7183 "non-secure world with SCR.AW bit clear\n");
7188 if (changed_daif
& CPSR_F
) {
7189 /* Check to see if we are allowed to change the masking of FIQ
7190 * exceptions from a non-secure state.
7192 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
7193 qemu_log_mask(LOG_GUEST_ERROR
,
7194 "Ignoring attempt to switch CPSR_F flag from "
7195 "non-secure world with SCR.FW bit clear\n");
7199 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7200 * If this bit is set software is not allowed to mask
7201 * FIQs, but is allowed to set CPSR_F to 0.
7203 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
7205 qemu_log_mask(LOG_GUEST_ERROR
,
7206 "Ignoring attempt to enable CPSR_F flag "
7207 "(non-maskable FIQ [NMFI] support enabled)\n");
7213 env
->daif
&= ~(CPSR_AIF
& mask
);
7214 env
->daif
|= val
& CPSR_AIF
& mask
;
7216 if (write_type
!= CPSRWriteRaw
&&
7217 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
7218 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
7219 /* Note that we can only get here in USR mode if this is a
7220 * gdb stub write; for this case we follow the architectural
7221 * behaviour for guest writes in USR mode of ignoring an attempt
7222 * to switch mode. (Those are caught by translate.c for writes
7223 * triggered by guest instructions.)
7226 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
7227 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7228 * v7, and has defined behaviour in v8:
7229 * + leave CPSR.M untouched
7230 * + allow changes to the other CPSR fields
7232 * For user changes via the GDB stub, we don't set PSTATE.IL,
7233 * as this would be unnecessarily harsh for a user error.
7236 if (write_type
!= CPSRWriteByGDBStub
&&
7237 arm_feature(env
, ARM_FEATURE_V8
)) {
7241 qemu_log_mask(LOG_GUEST_ERROR
,
7242 "Illegal AArch32 mode switch attempt from %s to %s\n",
7243 aarch32_mode_name(env
->uncached_cpsr
),
7244 aarch32_mode_name(val
));
7246 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
7247 write_type
== CPSRWriteExceptionReturn
?
7248 "Exception return from AArch32" :
7249 "AArch32 mode switch from",
7250 aarch32_mode_name(env
->uncached_cpsr
),
7251 aarch32_mode_name(val
), env
->regs
[15]);
7252 switch_mode(env
, val
& CPSR_M
);
7255 mask
&= ~CACHED_CPSR_BITS
;
7256 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
7259 /* Sign/zero extend */
7260 uint32_t HELPER(sxtb16
)(uint32_t x
)
7263 res
= (uint16_t)(int8_t)x
;
7264 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
7268 uint32_t HELPER(uxtb16
)(uint32_t x
)
7271 res
= (uint16_t)(uint8_t)x
;
7272 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
7276 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
7280 if (num
== INT_MIN
&& den
== -1)
7285 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
7292 uint32_t HELPER(rbit
)(uint32_t x
)
7297 #ifdef CONFIG_USER_ONLY
7299 /* These should probably raise undefined insn exceptions. */
7300 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
7302 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7304 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
7307 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
7309 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7311 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
7315 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7317 /* translate.c should never generate calls here in user-only mode */
7318 g_assert_not_reached();
7321 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7323 /* translate.c should never generate calls here in user-only mode */
7324 g_assert_not_reached();
7327 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
7329 /* The TT instructions can be used by unprivileged code, but in
7330 * user-only emulation we don't have the MPU.
7331 * Luckily since we know we are NonSecure unprivileged (and that in
7332 * turn means that the A flag wasn't specified), all the bits in the
7333 * register must be zero:
7334 * IREGION: 0 because IRVALID is 0
7335 * IRVALID: 0 because NS
7337 * NSRW: 0 because NS
7339 * RW: 0 because unpriv and A flag not set
7340 * R: 0 because unpriv and A flag not set
7341 * SRVALID: 0 because NS
7342 * MRVALID: 0 because unpriv and A flag not set
7343 * SREGION: 0 becaus SRVALID is 0
7344 * MREGION: 0 because MRVALID is 0
7349 static void switch_mode(CPUARMState
*env
, int mode
)
7351 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7353 if (mode
!= ARM_CPU_MODE_USR
) {
7354 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
7358 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7359 uint32_t cur_el
, bool secure
)
7364 void aarch64_sync_64_to_32(CPUARMState
*env
)
7366 g_assert_not_reached();
7371 static void switch_mode(CPUARMState
*env
, int mode
)
7376 old_mode
= env
->uncached_cpsr
& CPSR_M
;
7377 if (mode
== old_mode
)
7380 if (old_mode
== ARM_CPU_MODE_FIQ
) {
7381 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7382 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
7383 } else if (mode
== ARM_CPU_MODE_FIQ
) {
7384 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7385 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
7388 i
= bank_number(old_mode
);
7389 env
->banked_r13
[i
] = env
->regs
[13];
7390 env
->banked_spsr
[i
] = env
->spsr
;
7392 i
= bank_number(mode
);
7393 env
->regs
[13] = env
->banked_r13
[i
];
7394 env
->spsr
= env
->banked_spsr
[i
];
7396 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
7397 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
7400 /* Physical Interrupt Target EL Lookup Table
7402 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7404 * The below multi-dimensional table is used for looking up the target
7405 * exception level given numerous condition criteria. Specifically, the
7406 * target EL is based on SCR and HCR routing controls as well as the
7407 * currently executing EL and secure state.
7410 * target_el_table[2][2][2][2][2][4]
7411 * | | | | | +--- Current EL
7412 * | | | | +------ Non-secure(0)/Secure(1)
7413 * | | | +--------- HCR mask override
7414 * | | +------------ SCR exec state control
7415 * | +--------------- SCR mask override
7416 * +------------------ 32-bit(0)/64-bit(1) EL3
7418 * The table values are as such:
7422 * The ARM ARM target EL table includes entries indicating that an "exception
7423 * is not taken". The two cases where this is applicable are:
7424 * 1) An exception is taken from EL3 but the SCR does not have the exception
7426 * 2) An exception is taken from EL2 but the HCR does not have the exception
7428 * In these two cases, the below table contain a target of EL1. This value is
7429 * returned as it is expected that the consumer of the table data will check
7430 * for "target EL >= current EL" to ensure the exception is not taken.
7434 * BIT IRQ IMO Non-secure Secure
7435 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7437 static const int8_t target_el_table
[2][2][2][2][2][4] = {
7438 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7439 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7440 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7441 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7442 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7443 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7444 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7445 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7446 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7447 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7448 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7449 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7450 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7451 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7452 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7453 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7457 * Determine the target EL for physical exceptions
7459 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7460 uint32_t cur_el
, bool secure
)
7462 CPUARMState
*env
= cs
->env_ptr
;
7467 /* Is the highest EL AArch64? */
7468 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
7471 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7472 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
7474 /* Either EL2 is the highest EL (and so the EL2 register width
7475 * is given by is64); or there is no EL2 or EL3, in which case
7476 * the value of 'rw' does not affect the table lookup anyway.
7481 hcr_el2
= arm_hcr_el2_eff(env
);
7484 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
7485 hcr
= hcr_el2
& HCR_IMO
;
7488 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
7489 hcr
= hcr_el2
& HCR_FMO
;
7492 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
7493 hcr
= hcr_el2
& HCR_AMO
;
7497 /* Perform a table-lookup for the target EL given the current state */
7498 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
7500 assert(target_el
> 0);
7505 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
7506 ARMMMUIdx mmu_idx
, bool ignfault
)
7508 CPUState
*cs
= CPU(cpu
);
7509 CPUARMState
*env
= &cpu
->env
;
7510 MemTxAttrs attrs
= {};
7512 target_ulong page_size
;
7515 ARMMMUFaultInfo fi
= {};
7516 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7520 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
7521 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7522 /* MPU/SAU lookup failed */
7523 if (fi
.type
== ARMFault_QEMU_SFault
) {
7524 qemu_log_mask(CPU_LOG_INT
,
7525 "...SecureFault with SFSR.AUVIOL during stacking\n");
7526 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7527 env
->v7m
.sfar
= addr
;
7528 exc
= ARMV7M_EXCP_SECURE
;
7531 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
7532 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
7533 exc
= ARMV7M_EXCP_MEM
;
7534 exc_secure
= secure
;
7538 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
7540 if (txres
!= MEMTX_OK
) {
7541 /* BusFault trying to write the data */
7542 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
7543 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
7544 exc
= ARMV7M_EXCP_BUS
;
7551 /* By pending the exception at this point we are making
7552 * the IMPDEF choice "overridden exceptions pended" (see the
7553 * MergeExcInfo() pseudocode). The other choice would be to not
7554 * pend them now and then make a choice about which to throw away
7555 * later if we have two derived exceptions.
7556 * The only case when we must not pend the exception but instead
7557 * throw it away is if we are doing the push of the callee registers
7558 * and we've already generated a derived exception. Even in this
7559 * case we will still update the fault status registers.
7562 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
7567 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
7570 CPUState
*cs
= CPU(cpu
);
7571 CPUARMState
*env
= &cpu
->env
;
7572 MemTxAttrs attrs
= {};
7574 target_ulong page_size
;
7577 ARMMMUFaultInfo fi
= {};
7578 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7583 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
7584 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7585 /* MPU/SAU lookup failed */
7586 if (fi
.type
== ARMFault_QEMU_SFault
) {
7587 qemu_log_mask(CPU_LOG_INT
,
7588 "...SecureFault with SFSR.AUVIOL during unstack\n");
7589 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7590 env
->v7m
.sfar
= addr
;
7591 exc
= ARMV7M_EXCP_SECURE
;
7594 qemu_log_mask(CPU_LOG_INT
,
7595 "...MemManageFault with CFSR.MUNSTKERR\n");
7596 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
7597 exc
= ARMV7M_EXCP_MEM
;
7598 exc_secure
= secure
;
7603 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
7605 if (txres
!= MEMTX_OK
) {
7606 /* BusFault trying to read the data */
7607 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
7608 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
7609 exc
= ARMV7M_EXCP_BUS
;
7618 /* By pending the exception at this point we are making
7619 * the IMPDEF choice "overridden exceptions pended" (see the
7620 * MergeExcInfo() pseudocode). The other choice would be to not
7621 * pend them now and then make a choice about which to throw away
7622 * later if we have two derived exceptions.
7624 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
7628 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7629 * This may change the current stack pointer between Main and Process
7630 * stack pointers if it is done for the CONTROL register for the current
7633 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
7637 bool old_is_psp
= v7m_using_psp(env
);
7639 env
->v7m
.control
[secstate
] =
7640 deposit32(env
->v7m
.control
[secstate
],
7641 R_V7M_CONTROL_SPSEL_SHIFT
,
7642 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
7644 if (secstate
== env
->v7m
.secure
) {
7645 bool new_is_psp
= v7m_using_psp(env
);
7648 if (old_is_psp
!= new_is_psp
) {
7649 tmp
= env
->v7m
.other_sp
;
7650 env
->v7m
.other_sp
= env
->regs
[13];
7651 env
->regs
[13] = tmp
;
7656 /* Write to v7M CONTROL.SPSEL bit. This may change the current
7657 * stack pointer between Main and Process stack pointers.
7659 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
7661 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
7664 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
7666 /* Write a new value to v7m.exception, thus transitioning into or out
7667 * of Handler mode; this may result in a change of active stack pointer.
7669 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
7672 env
->v7m
.exception
= new_exc
;
7674 new_is_psp
= v7m_using_psp(env
);
7676 if (old_is_psp
!= new_is_psp
) {
7677 tmp
= env
->v7m
.other_sp
;
7678 env
->v7m
.other_sp
= env
->regs
[13];
7679 env
->regs
[13] = tmp
;
7683 /* Switch M profile security state between NS and S */
7684 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
7686 uint32_t new_ss_msp
, new_ss_psp
;
7688 if (env
->v7m
.secure
== new_secstate
) {
7692 /* All the banked state is accessed by looking at env->v7m.secure
7693 * except for the stack pointer; rearrange the SP appropriately.
7695 new_ss_msp
= env
->v7m
.other_ss_msp
;
7696 new_ss_psp
= env
->v7m
.other_ss_psp
;
7698 if (v7m_using_psp(env
)) {
7699 env
->v7m
.other_ss_psp
= env
->regs
[13];
7700 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
7702 env
->v7m
.other_ss_msp
= env
->regs
[13];
7703 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
7706 env
->v7m
.secure
= new_secstate
;
7708 if (v7m_using_psp(env
)) {
7709 env
->regs
[13] = new_ss_psp
;
7710 env
->v7m
.other_sp
= new_ss_msp
;
7712 env
->regs
[13] = new_ss_msp
;
7713 env
->v7m
.other_sp
= new_ss_psp
;
7717 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7720 * - if the return value is a magic value, do exception return (like BX)
7721 * - otherwise bit 0 of the return value is the target security state
7725 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7726 /* Covers FNC_RETURN and EXC_RETURN magic */
7727 min_magic
= FNC_RETURN_MIN_MAGIC
;
7729 /* EXC_RETURN magic only */
7730 min_magic
= EXC_RETURN_MIN_MAGIC
;
7733 if (dest
>= min_magic
) {
7734 /* This is an exception return magic value; put it where
7735 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7736 * Note that if we ever add gen_ss_advance() singlestep support to
7737 * M profile this should count as an "instruction execution complete"
7738 * event (compare gen_bx_excret_final_code()).
7740 env
->regs
[15] = dest
& ~1;
7741 env
->thumb
= dest
& 1;
7742 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
7746 /* translate.c should have made BXNS UNDEF unless we're secure */
7747 assert(env
->v7m
.secure
);
7749 switch_v7m_security_state(env
, dest
& 1);
7751 env
->regs
[15] = dest
& ~1;
7754 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7756 /* Handle v7M BLXNS:
7757 * - bit 0 of the destination address is the target security state
7760 /* At this point regs[15] is the address just after the BLXNS */
7761 uint32_t nextinst
= env
->regs
[15] | 1;
7762 uint32_t sp
= env
->regs
[13] - 8;
7765 /* translate.c will have made BLXNS UNDEF unless we're secure */
7766 assert(env
->v7m
.secure
);
7769 /* target is Secure, so this is just a normal BLX,
7770 * except that the low bit doesn't indicate Thumb/not.
7772 env
->regs
[14] = nextinst
;
7774 env
->regs
[15] = dest
& ~1;
7778 /* Target is non-secure: first push a stack frame */
7779 if (!QEMU_IS_ALIGNED(sp
, 8)) {
7780 qemu_log_mask(LOG_GUEST_ERROR
,
7781 "BLXNS with misaligned SP is UNPREDICTABLE\n");
7784 if (sp
< v7m_sp_limit(env
)) {
7785 raise_exception(env
, EXCP_STKOF
, 0, 1);
7788 saved_psr
= env
->v7m
.exception
;
7789 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
7790 saved_psr
|= XPSR_SFPA
;
7793 /* Note that these stores can throw exceptions on MPU faults */
7794 cpu_stl_data(env
, sp
, nextinst
);
7795 cpu_stl_data(env
, sp
+ 4, saved_psr
);
7798 env
->regs
[14] = 0xfeffffff;
7799 if (arm_v7m_is_handler_mode(env
)) {
7800 /* Write a dummy value to IPSR, to avoid leaking the current secure
7801 * exception number to non-secure code. This is guaranteed not
7802 * to cause write_v7m_exception() to actually change stacks.
7804 write_v7m_exception(env
, 1);
7806 switch_v7m_security_state(env
, 0);
7808 env
->regs
[15] = dest
;
7811 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
7814 /* Return a pointer to the location where we currently store the
7815 * stack pointer for the requested security state and thread mode.
7816 * This pointer will become invalid if the CPU state is updated
7817 * such that the stack pointers are switched around (eg changing
7818 * the SPSEL control bit).
7819 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
7820 * Unlike that pseudocode, we require the caller to pass us in the
7821 * SPSEL control bit value; this is because we also use this
7822 * function in handling of pushing of the callee-saves registers
7823 * part of the v8M stack frame (pseudocode PushCalleeStack()),
7824 * and in the tailchain codepath the SPSEL bit comes from the exception
7825 * return magic LR value from the previous exception. The pseudocode
7826 * opencodes the stack-selection in PushCalleeStack(), but we prefer
7827 * to make this utility function generic enough to do the job.
7829 bool want_psp
= threadmode
&& spsel
;
7831 if (secure
== env
->v7m
.secure
) {
7832 if (want_psp
== v7m_using_psp(env
)) {
7833 return &env
->regs
[13];
7835 return &env
->v7m
.other_sp
;
7839 return &env
->v7m
.other_ss_psp
;
7841 return &env
->v7m
.other_ss_msp
;
7846 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
7849 CPUState
*cs
= CPU(cpu
);
7850 CPUARMState
*env
= &cpu
->env
;
7852 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
7853 uint32_t vector_entry
;
7854 MemTxAttrs attrs
= {};
7858 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
7860 /* We don't do a get_phys_addr() here because the rules for vector
7861 * loads are special: they always use the default memory map, and
7862 * the default memory map permits reads from all addresses.
7863 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
7864 * that we want this special case which would always say "yes",
7865 * we just do the SAU lookup here followed by a direct physical load.
7867 attrs
.secure
= targets_secure
;
7870 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7871 V8M_SAttributes sattrs
= {};
7873 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
7875 attrs
.secure
= false;
7876 } else if (!targets_secure
) {
7877 /* NS access to S memory */
7882 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
7884 if (result
!= MEMTX_OK
) {
7887 *pvec
= vector_entry
;
7891 /* All vector table fetch fails are reported as HardFault, with
7892 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
7893 * technically the underlying exception is a MemManage or BusFault
7894 * that is escalated to HardFault.) This is a terminal exception,
7895 * so we will either take the HardFault immediately or else enter
7896 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
7898 exc_secure
= targets_secure
||
7899 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
7900 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
7901 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
7905 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7908 /* For v8M, push the callee-saves register part of the stack frame.
7909 * Compare the v8M pseudocode PushCalleeStack().
7910 * In the tailchaining case this may not be the current stack.
7912 CPUARMState
*env
= &cpu
->env
;
7913 uint32_t *frame_sp_p
;
7921 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
7922 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
7925 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
7926 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
7927 lr
& R_V7M_EXCRET_SPSEL_MASK
);
7928 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
7930 limit
= env
->v7m
.psplim
[M_REG_S
];
7932 limit
= env
->v7m
.msplim
[M_REG_S
];
7935 mmu_idx
= arm_mmu_idx(env
);
7936 frame_sp_p
= &env
->regs
[13];
7937 limit
= v7m_sp_limit(env
);
7940 frameptr
= *frame_sp_p
- 0x28;
7941 if (frameptr
< limit
) {
7943 * Stack limit failure: set SP to the limit value, and generate
7944 * STKOF UsageFault. Stack pushes below the limit must not be
7945 * performed. It is IMPDEF whether pushes above the limit are
7946 * performed; we choose not to.
7948 qemu_log_mask(CPU_LOG_INT
,
7949 "...STKOF during callee-saves register stacking\n");
7950 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
7951 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7953 *frame_sp_p
= limit
;
7957 /* Write as much of the stack frame as we can. A write failure may
7958 * cause us to pend a derived exception.
7961 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
7962 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
7964 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
7966 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
7968 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
7970 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
7972 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
7974 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
7976 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
7979 /* Update SP regardless of whether any of the stack accesses failed. */
7980 *frame_sp_p
= frameptr
;
7985 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7986 bool ignore_stackfaults
)
7988 /* Do the "take the exception" parts of exception entry,
7989 * but not the pushing of state to the stack. This is
7990 * similar to the pseudocode ExceptionTaken() function.
7992 CPUARMState
*env
= &cpu
->env
;
7994 bool targets_secure
;
7996 bool push_failed
= false;
7998 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
7999 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
8000 targets_secure
? "secure" : "nonsecure", exc
);
8002 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8003 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8004 (lr
& R_V7M_EXCRET_S_MASK
)) {
8005 /* The background code (the owner of the registers in the
8006 * exception frame) is Secure. This means it may either already
8007 * have or now needs to push callee-saves registers.
8009 if (targets_secure
) {
8010 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
8011 /* We took an exception from Secure to NonSecure
8012 * (which means the callee-saved registers got stacked)
8013 * and are now tailchaining to a Secure exception.
8014 * Clear DCRS so eventual return from this Secure
8015 * exception unstacks the callee-saved registers.
8017 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
8020 /* We're going to a non-secure exception; push the
8021 * callee-saves registers to the stack now, if they're
8022 * not already saved.
8024 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
8025 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
8026 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
8027 ignore_stackfaults
);
8029 lr
|= R_V7M_EXCRET_DCRS_MASK
;
8033 lr
&= ~R_V7M_EXCRET_ES_MASK
;
8034 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8035 lr
|= R_V7M_EXCRET_ES_MASK
;
8037 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
8038 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
8039 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8042 /* Clear registers if necessary to prevent non-secure exception
8043 * code being able to see register values from secure code.
8044 * Where register values become architecturally UNKNOWN we leave
8045 * them with their previous values.
8047 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8048 if (!targets_secure
) {
8049 /* Always clear the caller-saved registers (they have been
8050 * pushed to the stack earlier in v7m_push_stack()).
8051 * Clear callee-saved registers if the background code is
8052 * Secure (in which case these regs were saved in
8053 * v7m_push_callee_stack()).
8057 for (i
= 0; i
< 13; i
++) {
8058 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
8059 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
8064 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
8069 if (push_failed
&& !ignore_stackfaults
) {
8070 /* Derived exception on callee-saves register stacking:
8071 * we might now want to take a different exception which
8072 * targets a different security state, so try again from the top.
8074 qemu_log_mask(CPU_LOG_INT
,
8075 "...derived exception on callee-saves register stacking");
8076 v7m_exception_taken(cpu
, lr
, true, true);
8080 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
8081 /* Vector load failed: derived exception */
8082 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
8083 v7m_exception_taken(cpu
, lr
, true, true);
8087 /* Now we've done everything that might cause a derived exception
8088 * we can go ahead and activate whichever exception we're going to
8089 * take (which might now be the derived exception).
8091 armv7m_nvic_acknowledge_irq(env
->nvic
);
8093 /* Switch to target security state -- must do this before writing SPSEL */
8094 switch_v7m_security_state(env
, targets_secure
);
8095 write_v7m_control_spsel(env
, 0);
8096 arm_clear_exclusive(env
);
8098 env
->condexec_bits
= 0;
8100 env
->regs
[15] = addr
& 0xfffffffe;
8101 env
->thumb
= addr
& 1;
8104 static bool v7m_push_stack(ARMCPU
*cpu
)
8106 /* Do the "set up stack frame" part of exception entry,
8107 * similar to pseudocode PushStack().
8108 * Return true if we generate a derived exception (and so
8109 * should ignore further stack faults trying to process
8110 * that derived exception.)
8113 CPUARMState
*env
= &cpu
->env
;
8114 uint32_t xpsr
= xpsr_read(env
);
8115 uint32_t frameptr
= env
->regs
[13];
8116 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
8118 /* Align stack pointer if the guest wants that */
8119 if ((frameptr
& 4) &&
8120 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
8122 xpsr
|= XPSR_SPREALIGN
;
8127 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8128 uint32_t limit
= v7m_sp_limit(env
);
8130 if (frameptr
< limit
) {
8132 * Stack limit failure: set SP to the limit value, and generate
8133 * STKOF UsageFault. Stack pushes below the limit must not be
8134 * performed. It is IMPDEF whether pushes above the limit are
8135 * performed; we choose not to.
8137 qemu_log_mask(CPU_LOG_INT
,
8138 "...STKOF during stacking\n");
8139 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8140 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8142 env
->regs
[13] = limit
;
8147 /* Write as much of the stack frame as we can. If we fail a stack
8148 * write this will result in a derived exception being pended
8149 * (which may be taken in preference to the one we started with
8150 * if it has higher priority).
8153 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
8154 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
8155 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
8156 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
8157 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
8158 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
8159 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
8160 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
8162 /* Update SP regardless of whether any of the stack accesses failed. */
8163 env
->regs
[13] = frameptr
;
8168 static void do_v7m_exception_exit(ARMCPU
*cpu
)
8170 CPUARMState
*env
= &cpu
->env
;
8173 bool ufault
= false;
8174 bool sfault
= false;
8175 bool return_to_sp_process
;
8176 bool return_to_handler
;
8177 bool rettobase
= false;
8178 bool exc_secure
= false;
8179 bool return_to_secure
;
8181 /* If we're not in Handler mode then jumps to magic exception-exit
8182 * addresses don't have magic behaviour. However for the v8M
8183 * security extensions the magic secure-function-return has to
8184 * work in thread mode too, so to avoid doing an extra check in
8185 * the generated code we allow exception-exit magic to also cause the
8186 * internal exception and bring us here in thread mode. Correct code
8187 * will never try to do this (the following insn fetch will always
8188 * fault) so we the overhead of having taken an unnecessary exception
8191 if (!arm_v7m_is_handler_mode(env
)) {
8195 /* In the spec pseudocode ExceptionReturn() is called directly
8196 * from BXWritePC() and gets the full target PC value including
8197 * bit zero. In QEMU's implementation we treat it as a normal
8198 * jump-to-register (which is then caught later on), and so split
8199 * the target value up between env->regs[15] and env->thumb in
8200 * gen_bx(). Reconstitute it.
8202 excret
= env
->regs
[15];
8207 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
8208 " previous exception %d\n",
8209 excret
, env
->v7m
.exception
);
8211 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
8212 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
8213 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
8217 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8218 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8219 * we pick which FAULTMASK to clear.
8221 if (!env
->v7m
.secure
&&
8222 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
8223 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
8225 /* For all other purposes, treat ES as 0 (R_HXSR) */
8226 excret
&= ~R_V7M_EXCRET_ES_MASK
;
8228 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
8231 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
8232 /* Auto-clear FAULTMASK on return from other than NMI.
8233 * If the security extension is implemented then this only
8234 * happens if the raw execution priority is >= 0; the
8235 * value of the ES bit in the exception return value indicates
8236 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8238 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8239 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
8240 env
->v7m
.faultmask
[exc_secure
] = 0;
8243 env
->v7m
.faultmask
[M_REG_NS
] = 0;
8247 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
8250 /* attempt to exit an exception that isn't active */
8254 /* still an irq active now */
8257 /* we returned to base exception level, no nesting.
8258 * (In the pseudocode this is written using "NestedActivation != 1"
8259 * where we have 'rettobase == false'.)
8264 g_assert_not_reached();
8267 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
8268 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
8269 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8270 (excret
& R_V7M_EXCRET_S_MASK
);
8272 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8273 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8274 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8275 * we choose to take the UsageFault.
8277 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
8278 (excret
& R_V7M_EXCRET_ES_MASK
) ||
8279 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
8283 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
8287 /* For v7M we only recognize certain combinations of the low bits */
8288 switch (excret
& 0xf) {
8289 case 1: /* Return to Handler */
8291 case 13: /* Return to Thread using Process stack */
8292 case 9: /* Return to Thread using Main stack */
8293 /* We only need to check NONBASETHRDENA for v7M, because in
8294 * v8M this bit does not exist (it is RES1).
8297 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
8298 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
8308 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8309 * Handler mode (and will be until we write the new XPSR.Interrupt
8310 * field) this does not switch around the current stack pointer.
8311 * We must do this before we do any kind of tailchaining, including
8312 * for the derived exceptions on integrity check failures, or we will
8313 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8315 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
8318 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
8319 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8320 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8321 "stackframe: failed EXC_RETURN.ES validity check\n");
8322 v7m_exception_taken(cpu
, excret
, true, false);
8327 /* Bad exception return: instead of popping the exception
8328 * stack, directly take a usage fault on the current stack.
8330 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8331 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8332 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8333 "stackframe: failed exception return integrity check\n");
8334 v7m_exception_taken(cpu
, excret
, true, false);
8339 * Tailchaining: if there is currently a pending exception that
8340 * is high enough priority to preempt execution at the level we're
8341 * about to return to, then just directly take that exception now,
8342 * avoiding an unstack-and-then-stack. Note that now we have
8343 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
8344 * our current execution priority is already the execution priority we are
8345 * returning to -- none of the state we would unstack or set based on
8346 * the EXCRET value affects it.
8348 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
8349 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
8350 v7m_exception_taken(cpu
, excret
, true, false);
8354 switch_v7m_security_state(env
, return_to_secure
);
8357 /* The stack pointer we should be reading the exception frame from
8358 * depends on bits in the magic exception return type value (and
8359 * for v8M isn't necessarily the stack pointer we will eventually
8360 * end up resuming execution with). Get a pointer to the location
8361 * in the CPU state struct where the SP we need is currently being
8362 * stored; we will use and modify it in place.
8363 * We use this limited C variable scope so we don't accidentally
8364 * use 'frame_sp_p' after we do something that makes it invalid.
8366 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
8369 return_to_sp_process
);
8370 uint32_t frameptr
= *frame_sp_p
;
8373 bool return_to_priv
= return_to_handler
||
8374 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
8376 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
8379 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
8380 arm_feature(env
, ARM_FEATURE_V8
)) {
8381 qemu_log_mask(LOG_GUEST_ERROR
,
8382 "M profile exception return with non-8-aligned SP "
8383 "for destination state is UNPREDICTABLE\n");
8386 /* Do we need to pop callee-saved registers? */
8387 if (return_to_secure
&&
8388 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
8389 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
8390 uint32_t expected_sig
= 0xfefa125b;
8391 uint32_t actual_sig
;
8393 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
8395 if (pop_ok
&& expected_sig
!= actual_sig
) {
8396 /* Take a SecureFault on the current stack */
8397 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
8398 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8399 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8400 "stackframe: failed exception return integrity "
8401 "signature check\n");
8402 v7m_exception_taken(cpu
, excret
, true, false);
8407 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
8408 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
8409 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
8410 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
8411 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
8412 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
8413 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
8414 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
8421 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
8422 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
8423 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
8424 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
8425 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
8426 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
8427 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
8428 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
8431 /* v7m_stack_read() pended a fault, so take it (as a tail
8432 * chained exception on the same stack frame)
8434 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
8435 v7m_exception_taken(cpu
, excret
, true, false);
8439 /* Returning from an exception with a PC with bit 0 set is defined
8440 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
8441 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
8442 * the lsbit, and there are several RTOSes out there which incorrectly
8443 * assume the r15 in the stack frame should be a Thumb-style "lsbit
8444 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
8445 * complain about the badly behaved guest.
8447 if (env
->regs
[15] & 1) {
8448 env
->regs
[15] &= ~1U;
8449 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8450 qemu_log_mask(LOG_GUEST_ERROR
,
8451 "M profile return from interrupt with misaligned "
8452 "PC is UNPREDICTABLE on v7M\n");
8456 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8457 /* For v8M we have to check whether the xPSR exception field
8458 * matches the EXCRET value for return to handler/thread
8459 * before we commit to changing the SP and xPSR.
8461 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
8462 if (return_to_handler
!= will_be_handler
) {
8463 /* Take an INVPC UsageFault on the current stack.
8464 * By this point we will have switched to the security state
8465 * for the background state, so this UsageFault will target
8468 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8470 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8471 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8472 "stackframe: failed exception return integrity "
8474 v7m_exception_taken(cpu
, excret
, true, false);
8479 /* Commit to consuming the stack frame */
8481 /* Undo stack alignment (the SPREALIGN bit indicates that the original
8482 * pre-exception SP was not 8-aligned and we added a padding word to
8483 * align it, so we undo this by ORing in the bit that increases it
8484 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
8485 * would work too but a logical OR is how the pseudocode specifies it.)
8487 if (xpsr
& XPSR_SPREALIGN
) {
8490 *frame_sp_p
= frameptr
;
8492 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
8493 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
8495 /* The restored xPSR exception field will be zero if we're
8496 * resuming in Thread mode. If that doesn't match what the
8497 * exception return excret specified then this is a UsageFault.
8498 * v7M requires we make this check here; v8M did it earlier.
8500 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
8501 /* Take an INVPC UsageFault by pushing the stack again;
8502 * we know we're v7M so this is never a Secure UsageFault.
8504 bool ignore_stackfaults
;
8506 assert(!arm_feature(env
, ARM_FEATURE_V8
));
8507 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
8508 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8509 ignore_stackfaults
= v7m_push_stack(cpu
);
8510 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
8511 "failed exception return integrity check\n");
8512 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
8516 /* Otherwise, we have a successful exception exit. */
8517 arm_clear_exclusive(env
);
8518 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
8521 static bool do_v7m_function_return(ARMCPU
*cpu
)
8523 /* v8M security extensions magic function return.
8525 * (1) throw an exception (longjump)
8526 * (2) return true if we successfully handled the function return
8527 * (3) return false if we failed a consistency check and have
8528 * pended a UsageFault that needs to be taken now
8530 * At this point the magic return value is split between env->regs[15]
8531 * and env->thumb. We don't bother to reconstitute it because we don't
8532 * need it (all values are handled the same way).
8534 CPUARMState
*env
= &cpu
->env
;
8535 uint32_t newpc
, newpsr
, newpsr_exc
;
8537 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
8540 bool threadmode
, spsel
;
8543 uint32_t *frame_sp_p
;
8546 /* Pull the return address and IPSR from the Secure stack */
8547 threadmode
= !arm_v7m_is_handler_mode(env
);
8548 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
8550 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
8551 frameptr
= *frame_sp_p
;
8553 /* These loads may throw an exception (for MPU faults). We want to
8554 * do them as secure, so work out what MMU index that is.
8556 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8557 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
8558 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
8559 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
8561 /* Consistency checks on new IPSR */
8562 newpsr_exc
= newpsr
& XPSR_EXCP
;
8563 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
8564 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
8565 /* Pend the fault and tell our caller to take it */
8566 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8567 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8569 qemu_log_mask(CPU_LOG_INT
,
8570 "...taking INVPC UsageFault: "
8571 "IPSR consistency check failed\n");
8575 *frame_sp_p
= frameptr
+ 8;
8578 /* This invalidates frame_sp_p */
8579 switch_v7m_security_state(env
, true);
8580 env
->v7m
.exception
= newpsr_exc
;
8581 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
8582 if (newpsr
& XPSR_SFPA
) {
8583 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
8585 xpsr_write(env
, 0, XPSR_IT
);
8586 env
->thumb
= newpc
& 1;
8587 env
->regs
[15] = newpc
& ~1;
8589 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
8593 static void arm_log_exception(int idx
)
8595 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
8596 const char *exc
= NULL
;
8597 static const char * const excnames
[] = {
8598 [EXCP_UDEF
] = "Undefined Instruction",
8600 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
8601 [EXCP_DATA_ABORT
] = "Data Abort",
8604 [EXCP_BKPT
] = "Breakpoint",
8605 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8606 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8607 [EXCP_HVC
] = "Hypervisor Call",
8608 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8609 [EXCP_SMC
] = "Secure Monitor Call",
8610 [EXCP_VIRQ
] = "Virtual IRQ",
8611 [EXCP_VFIQ
] = "Virtual FIQ",
8612 [EXCP_SEMIHOST
] = "Semihosting call",
8613 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8614 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8615 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8618 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8619 exc
= excnames
[idx
];
8624 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8628 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
8629 uint32_t addr
, uint16_t *insn
)
8631 /* Load a 16-bit portion of a v7M instruction, returning true on success,
8632 * or false on failure (in which case we will have pended the appropriate
8634 * We need to do the instruction fetch's MPU and SAU checks
8635 * like this because there is no MMU index that would allow
8636 * doing the load with a single function call. Instead we must
8637 * first check that the security attributes permit the load
8638 * and that they don't mismatch on the two halves of the instruction,
8639 * and then we do the load as a secure load (ie using the security
8640 * attributes of the address, not the CPU, as architecturally required).
8642 CPUState
*cs
= CPU(cpu
);
8643 CPUARMState
*env
= &cpu
->env
;
8644 V8M_SAttributes sattrs
= {};
8645 MemTxAttrs attrs
= {};
8646 ARMMMUFaultInfo fi
= {};
8648 target_ulong page_size
;
8652 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
8653 if (!sattrs
.nsc
|| sattrs
.ns
) {
8654 /* This must be the second half of the insn, and it straddles a
8655 * region boundary with the second half not being S&NSC.
8657 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8658 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8659 qemu_log_mask(CPU_LOG_INT
,
8660 "...really SecureFault with SFSR.INVEP\n");
8663 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
8664 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
8665 /* the MPU lookup failed */
8666 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8667 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
8668 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
8671 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
8673 if (txres
!= MEMTX_OK
) {
8674 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8675 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8676 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
8682 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
8684 /* Check whether this attempt to execute code in a Secure & NS-Callable
8685 * memory region is for an SG instruction; if so, then emulate the
8686 * effect of the SG instruction and return true. Otherwise pend
8687 * the correct kind of exception and return false.
8689 CPUARMState
*env
= &cpu
->env
;
8693 /* We should never get here unless get_phys_addr_pmsav8() caused
8694 * an exception for NS executing in S&NSC memory.
8696 assert(!env
->v7m
.secure
);
8697 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8699 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
8700 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8702 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
8710 if (insn
!= 0xe97f) {
8711 /* Not an SG instruction first half (we choose the IMPDEF
8712 * early-SG-check option).
8717 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
8721 if (insn
!= 0xe97f) {
8722 /* Not an SG instruction second half (yes, both halves of the SG
8723 * insn have the same hex value)
8728 /* OK, we have confirmed that we really have an SG instruction.
8729 * We know we're NS in S memory so don't need to repeat those checks.
8731 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
8732 ", executing it\n", env
->regs
[15]);
8733 env
->regs
[14] &= ~1;
8734 switch_v7m_security_state(env
, true);
8735 xpsr_write(env
, 0, XPSR_IT
);
8740 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8741 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8742 qemu_log_mask(CPU_LOG_INT
,
8743 "...really SecureFault with SFSR.INVEP\n");
8747 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
8749 ARMCPU
*cpu
= ARM_CPU(cs
);
8750 CPUARMState
*env
= &cpu
->env
;
8752 bool ignore_stackfaults
;
8754 arm_log_exception(cs
->exception_index
);
8756 /* For exceptions we just mark as pending on the NVIC, and let that
8758 switch (cs
->exception_index
) {
8760 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8761 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
8764 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8765 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
8768 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8769 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
8772 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8773 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8776 /* The PC already points to the next instruction. */
8777 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
8779 case EXCP_PREFETCH_ABORT
:
8780 case EXCP_DATA_ABORT
:
8781 /* Note that for M profile we don't have a guest facing FSR, but
8782 * the env->exception.fsr will be populated by the code that
8783 * raises the fault, in the A profile short-descriptor format.
8785 switch (env
->exception
.fsr
& 0xf) {
8786 case M_FAKE_FSR_NSC_EXEC
:
8787 /* Exception generated when we try to execute code at an address
8788 * which is marked as Secure & Non-Secure Callable and the CPU
8789 * is in the Non-Secure state. The only instruction which can
8790 * be executed like this is SG (and that only if both halves of
8791 * the SG instruction have the same security attributes.)
8792 * Everything else must generate an INVEP SecureFault, so we
8793 * emulate the SG instruction here.
8795 if (v7m_handle_execute_nsc(cpu
)) {
8799 case M_FAKE_FSR_SFAULT
:
8800 /* Various flavours of SecureFault for attempts to execute or
8801 * access data in the wrong security state.
8803 switch (cs
->exception_index
) {
8804 case EXCP_PREFETCH_ABORT
:
8805 if (env
->v7m
.secure
) {
8806 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
8807 qemu_log_mask(CPU_LOG_INT
,
8808 "...really SecureFault with SFSR.INVTRAN\n");
8810 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8811 qemu_log_mask(CPU_LOG_INT
,
8812 "...really SecureFault with SFSR.INVEP\n");
8815 case EXCP_DATA_ABORT
:
8816 /* This must be an NS access to S memory */
8817 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
8818 qemu_log_mask(CPU_LOG_INT
,
8819 "...really SecureFault with SFSR.AUVIOL\n");
8822 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8824 case 0x8: /* External Abort */
8825 switch (cs
->exception_index
) {
8826 case EXCP_PREFETCH_ABORT
:
8827 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8828 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
8830 case EXCP_DATA_ABORT
:
8831 env
->v7m
.cfsr
[M_REG_NS
] |=
8832 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
8833 env
->v7m
.bfar
= env
->exception
.vaddress
;
8834 qemu_log_mask(CPU_LOG_INT
,
8835 "...with CFSR.PRECISERR and BFAR 0x%x\n",
8839 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8842 /* All other FSR values are either MPU faults or "can't happen
8843 * for M profile" cases.
8845 switch (cs
->exception_index
) {
8846 case EXCP_PREFETCH_ABORT
:
8847 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8848 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
8850 case EXCP_DATA_ABORT
:
8851 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
8852 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
8853 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
8854 qemu_log_mask(CPU_LOG_INT
,
8855 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
8856 env
->v7m
.mmfar
[env
->v7m
.secure
]);
8859 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
8865 if (semihosting_enabled()) {
8867 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
8870 qemu_log_mask(CPU_LOG_INT
,
8871 "...handling as semihosting call 0x%x\n",
8873 env
->regs
[0] = do_arm_semihosting(env
);
8877 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
8881 case EXCP_EXCEPTION_EXIT
:
8882 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
8883 /* Must be v8M security extension function return */
8884 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
8885 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8886 if (do_v7m_function_return(cpu
)) {
8890 do_v7m_exception_exit(cpu
);
8895 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8896 return; /* Never happens. Keep compiler happy. */
8899 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8900 lr
= R_V7M_EXCRET_RES1_MASK
|
8901 R_V7M_EXCRET_DCRS_MASK
|
8902 R_V7M_EXCRET_FTYPE_MASK
;
8903 /* The S bit indicates whether we should return to Secure
8904 * or NonSecure (ie our current state).
8905 * The ES bit indicates whether we're taking this exception
8906 * to Secure or NonSecure (ie our target state). We set it
8907 * later, in v7m_exception_taken().
8908 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
8909 * This corresponds to the ARM ARM pseudocode for v8M setting
8910 * some LR bits in PushStack() and some in ExceptionTaken();
8911 * the distinction matters for the tailchain cases where we
8912 * can take an exception without pushing the stack.
8914 if (env
->v7m
.secure
) {
8915 lr
|= R_V7M_EXCRET_S_MASK
;
8918 lr
= R_V7M_EXCRET_RES1_MASK
|
8919 R_V7M_EXCRET_S_MASK
|
8920 R_V7M_EXCRET_DCRS_MASK
|
8921 R_V7M_EXCRET_FTYPE_MASK
|
8922 R_V7M_EXCRET_ES_MASK
;
8923 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
8924 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8927 if (!arm_v7m_is_handler_mode(env
)) {
8928 lr
|= R_V7M_EXCRET_MODE_MASK
;
8931 ignore_stackfaults
= v7m_push_stack(cpu
);
8932 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
8935 /* Function used to synchronize QEMU's AArch64 register set with AArch32
8936 * register set. This is necessary when switching between AArch32 and AArch64
8939 void aarch64_sync_32_to_64(CPUARMState
*env
)
8942 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8944 /* We can blanket copy R[0:7] to X[0:7] */
8945 for (i
= 0; i
< 8; i
++) {
8946 env
->xregs
[i
] = env
->regs
[i
];
8949 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8950 * Otherwise, they come from the banked user regs.
8952 if (mode
== ARM_CPU_MODE_FIQ
) {
8953 for (i
= 8; i
< 13; i
++) {
8954 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8957 for (i
= 8; i
< 13; i
++) {
8958 env
->xregs
[i
] = env
->regs
[i
];
8962 /* Registers x13-x23 are the various mode SP and FP registers. Registers
8963 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8964 * from the mode banked register.
8966 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8967 env
->xregs
[13] = env
->regs
[13];
8968 env
->xregs
[14] = env
->regs
[14];
8970 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8971 /* HYP is an exception in that it is copied from r14 */
8972 if (mode
== ARM_CPU_MODE_HYP
) {
8973 env
->xregs
[14] = env
->regs
[14];
8975 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
8979 if (mode
== ARM_CPU_MODE_HYP
) {
8980 env
->xregs
[15] = env
->regs
[13];
8982 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
8985 if (mode
== ARM_CPU_MODE_IRQ
) {
8986 env
->xregs
[16] = env
->regs
[14];
8987 env
->xregs
[17] = env
->regs
[13];
8989 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
8990 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
8993 if (mode
== ARM_CPU_MODE_SVC
) {
8994 env
->xregs
[18] = env
->regs
[14];
8995 env
->xregs
[19] = env
->regs
[13];
8997 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
8998 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9001 if (mode
== ARM_CPU_MODE_ABT
) {
9002 env
->xregs
[20] = env
->regs
[14];
9003 env
->xregs
[21] = env
->regs
[13];
9005 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9006 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9009 if (mode
== ARM_CPU_MODE_UND
) {
9010 env
->xregs
[22] = env
->regs
[14];
9011 env
->xregs
[23] = env
->regs
[13];
9013 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9014 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9017 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9018 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9019 * FIQ bank for r8-r14.
9021 if (mode
== ARM_CPU_MODE_FIQ
) {
9022 for (i
= 24; i
< 31; i
++) {
9023 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9026 for (i
= 24; i
< 29; i
++) {
9027 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9029 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9030 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9033 env
->pc
= env
->regs
[15];
9036 /* Function used to synchronize QEMU's AArch32 register set with AArch64
9037 * register set. This is necessary when switching between AArch32 and AArch64
9040 void aarch64_sync_64_to_32(CPUARMState
*env
)
9043 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9045 /* We can blanket copy X[0:7] to R[0:7] */
9046 for (i
= 0; i
< 8; i
++) {
9047 env
->regs
[i
] = env
->xregs
[i
];
9050 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9051 * Otherwise, we copy x8-x12 into the banked user regs.
9053 if (mode
== ARM_CPU_MODE_FIQ
) {
9054 for (i
= 8; i
< 13; i
++) {
9055 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9058 for (i
= 8; i
< 13; i
++) {
9059 env
->regs
[i
] = env
->xregs
[i
];
9063 /* Registers r13 & r14 depend on the current mode.
9064 * If we are in a given mode, we copy the corresponding x registers to r13
9065 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9068 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9069 env
->regs
[13] = env
->xregs
[13];
9070 env
->regs
[14] = env
->xregs
[14];
9072 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9074 /* HYP is an exception in that it does not have its own banked r14 but
9075 * shares the USR r14
9077 if (mode
== ARM_CPU_MODE_HYP
) {
9078 env
->regs
[14] = env
->xregs
[14];
9080 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9084 if (mode
== ARM_CPU_MODE_HYP
) {
9085 env
->regs
[13] = env
->xregs
[15];
9087 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9090 if (mode
== ARM_CPU_MODE_IRQ
) {
9091 env
->regs
[14] = env
->xregs
[16];
9092 env
->regs
[13] = env
->xregs
[17];
9094 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9095 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9098 if (mode
== ARM_CPU_MODE_SVC
) {
9099 env
->regs
[14] = env
->xregs
[18];
9100 env
->regs
[13] = env
->xregs
[19];
9102 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9103 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9106 if (mode
== ARM_CPU_MODE_ABT
) {
9107 env
->regs
[14] = env
->xregs
[20];
9108 env
->regs
[13] = env
->xregs
[21];
9110 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9111 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9114 if (mode
== ARM_CPU_MODE_UND
) {
9115 env
->regs
[14] = env
->xregs
[22];
9116 env
->regs
[13] = env
->xregs
[23];
9118 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9119 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9122 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9123 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9124 * FIQ bank for r8-r14.
9126 if (mode
== ARM_CPU_MODE_FIQ
) {
9127 for (i
= 24; i
< 31; i
++) {
9128 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9131 for (i
= 24; i
< 29; i
++) {
9132 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9134 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9135 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9138 env
->regs
[15] = env
->pc
;
9141 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9142 uint32_t mask
, uint32_t offset
,
9145 /* Change the CPU state so as to actually take the exception. */
9146 switch_mode(env
, new_mode
);
9148 * For exceptions taken to AArch32 we must clear the SS bit in both
9149 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9151 env
->uncached_cpsr
&= ~PSTATE_SS
;
9152 env
->spsr
= cpsr_read(env
);
9153 /* Clear IT bits. */
9154 env
->condexec_bits
= 0;
9155 /* Switch to the new mode, and to the correct instruction set. */
9156 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9157 /* Set new mode endianness */
9158 env
->uncached_cpsr
&= ~CPSR_E
;
9159 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
9160 env
->uncached_cpsr
|= CPSR_E
;
9162 /* J and IL must always be cleared for exception entry */
9163 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9166 if (new_mode
== ARM_CPU_MODE_HYP
) {
9167 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9168 env
->elr_el
[2] = env
->regs
[15];
9171 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9172 * and we should just guard the thumb mode on V4
9174 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9176 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9178 env
->regs
[14] = env
->regs
[15] + offset
;
9180 env
->regs
[15] = newpc
;
9183 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9186 * Handle exception entry to Hyp mode; this is sufficiently
9187 * different to entry to other AArch32 modes that we handle it
9190 * The vector table entry used is always the 0x14 Hyp mode entry point,
9191 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9192 * The offset applied to the preferred return address is always zero
9193 * (see DDI0487C.a section G1.12.3).
9194 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9196 uint32_t addr
, mask
;
9197 ARMCPU
*cpu
= ARM_CPU(cs
);
9198 CPUARMState
*env
= &cpu
->env
;
9200 switch (cs
->exception_index
) {
9208 /* Fall through to prefetch abort. */
9209 case EXCP_PREFETCH_ABORT
:
9210 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9211 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9212 (uint32_t)env
->exception
.vaddress
);
9215 case EXCP_DATA_ABORT
:
9216 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9217 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9218 (uint32_t)env
->exception
.vaddress
);
9233 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9236 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9237 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9239 * QEMU syndrome values are v8-style. v7 has the IL bit
9240 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9241 * If this is a v7 CPU, squash the IL bit in those cases.
9243 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9244 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9245 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9246 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9247 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9250 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9253 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9258 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9261 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9264 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9268 addr
+= env
->cp15
.hvbar
;
9270 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9273 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9275 ARMCPU
*cpu
= ARM_CPU(cs
);
9276 CPUARMState
*env
= &cpu
->env
;
9283 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9284 switch (syn_get_ec(env
->exception
.syndrome
)) {
9286 case EC_BREAKPOINT_SAME_EL
:
9290 case EC_WATCHPOINT_SAME_EL
:
9296 case EC_VECTORCATCH
:
9305 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9308 if (env
->exception
.target_el
== 2) {
9309 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9313 switch (cs
->exception_index
) {
9315 new_mode
= ARM_CPU_MODE_UND
;
9324 new_mode
= ARM_CPU_MODE_SVC
;
9327 /* The PC already points to the next instruction. */
9331 /* Fall through to prefetch abort. */
9332 case EXCP_PREFETCH_ABORT
:
9333 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9334 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9335 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9336 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9337 new_mode
= ARM_CPU_MODE_ABT
;
9339 mask
= CPSR_A
| CPSR_I
;
9342 case EXCP_DATA_ABORT
:
9343 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9344 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9345 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9347 (uint32_t)env
->exception
.vaddress
);
9348 new_mode
= ARM_CPU_MODE_ABT
;
9350 mask
= CPSR_A
| CPSR_I
;
9354 new_mode
= ARM_CPU_MODE_IRQ
;
9356 /* Disable IRQ and imprecise data aborts. */
9357 mask
= CPSR_A
| CPSR_I
;
9359 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9360 /* IRQ routed to monitor mode */
9361 new_mode
= ARM_CPU_MODE_MON
;
9366 new_mode
= ARM_CPU_MODE_FIQ
;
9368 /* Disable FIQ, IRQ and imprecise data aborts. */
9369 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9370 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9371 /* FIQ routed to monitor mode */
9372 new_mode
= ARM_CPU_MODE_MON
;
9377 new_mode
= ARM_CPU_MODE_IRQ
;
9379 /* Disable IRQ and imprecise data aborts. */
9380 mask
= CPSR_A
| CPSR_I
;
9384 new_mode
= ARM_CPU_MODE_FIQ
;
9386 /* Disable FIQ, IRQ and imprecise data aborts. */
9387 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9391 new_mode
= ARM_CPU_MODE_MON
;
9393 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9397 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9398 return; /* Never happens. Keep compiler happy. */
9401 if (new_mode
== ARM_CPU_MODE_MON
) {
9402 addr
+= env
->cp15
.mvbar
;
9403 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9404 /* High vectors. When enabled, base address cannot be remapped. */
9407 /* ARM v7 architectures provide a vector base address register to remap
9408 * the interrupt vector table.
9409 * This register is only followed in non-monitor mode, and is banked.
9410 * Note: only bits 31:5 are valid.
9412 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9415 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9416 env
->cp15
.scr_el3
&= ~SCR_NS
;
9419 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9422 /* Handle exception entry to a target EL which is using AArch64 */
9423 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9425 ARMCPU
*cpu
= ARM_CPU(cs
);
9426 CPUARMState
*env
= &cpu
->env
;
9427 unsigned int new_el
= env
->exception
.target_el
;
9428 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9429 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9430 unsigned int cur_el
= arm_current_el(env
);
9433 * Note that new_el can never be 0. If cur_el is 0, then
9434 * el0_a64 is is_a64(), else el0_a64 is ignored.
9436 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9438 if (cur_el
< new_el
) {
9439 /* Entry vector offset depends on whether the implemented EL
9440 * immediately lower than the target level is using AArch32 or AArch64
9446 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9449 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
9452 is_aa64
= is_a64(env
);
9455 g_assert_not_reached();
9463 } else if (pstate_read(env
) & PSTATE_SP
) {
9467 switch (cs
->exception_index
) {
9468 case EXCP_PREFETCH_ABORT
:
9469 case EXCP_DATA_ABORT
:
9470 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9471 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9472 env
->cp15
.far_el
[new_el
]);
9480 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
9482 * QEMU internal FP/SIMD syndromes from AArch32 include the
9483 * TA and coproc fields which are only exposed if the exception
9484 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9485 * AArch64 format syndrome.
9487 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9489 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9500 qemu_log_mask(CPU_LOG_INT
,
9501 "...handling as semihosting call 0x%" PRIx64
"\n",
9503 env
->xregs
[0] = do_arm_semihosting(env
);
9506 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9510 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
9511 aarch64_save_sp(env
, arm_current_el(env
));
9512 env
->elr_el
[new_el
] = env
->pc
;
9514 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
9515 env
->elr_el
[new_el
] = env
->regs
[15];
9517 aarch64_sync_32_to_64(env
);
9519 env
->condexec_bits
= 0;
9521 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9522 env
->elr_el
[new_el
]);
9524 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9526 aarch64_restore_sp(env
, new_el
);
9530 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9531 new_el
, env
->pc
, pstate_read(env
));
9534 static inline bool check_for_semihosting(CPUState
*cs
)
9536 /* Check whether this exception is a semihosting call; if so
9537 * then handle it and return true; otherwise return false.
9539 ARMCPU
*cpu
= ARM_CPU(cs
);
9540 CPUARMState
*env
= &cpu
->env
;
9543 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9544 /* This is always the 64-bit semihosting exception.
9545 * The "is this usermode" and "is semihosting enabled"
9546 * checks have been done at translate time.
9548 qemu_log_mask(CPU_LOG_INT
,
9549 "...handling as semihosting call 0x%" PRIx64
"\n",
9551 env
->xregs
[0] = do_arm_semihosting(env
);
9558 /* Only intercept calls from privileged modes, to provide some
9559 * semblance of security.
9561 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
9562 (!semihosting_enabled() ||
9563 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
9567 switch (cs
->exception_index
) {
9569 /* This is always a semihosting call; the "is this usermode"
9570 * and "is semihosting enabled" checks have been done at
9575 /* Check for semihosting interrupt. */
9577 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
9583 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
9585 if (imm
== 0x123456) {
9591 /* See if this is a semihosting syscall. */
9593 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
9605 qemu_log_mask(CPU_LOG_INT
,
9606 "...handling as semihosting call 0x%x\n",
9608 env
->regs
[0] = do_arm_semihosting(env
);
9613 /* Handle a CPU exception for A and R profile CPUs.
9614 * Do any appropriate logging, handle PSCI calls, and then hand off
9615 * to the AArch64-entry or AArch32-entry function depending on the
9616 * target exception level's register width.
9618 void arm_cpu_do_interrupt(CPUState
*cs
)
9620 ARMCPU
*cpu
= ARM_CPU(cs
);
9621 CPUARMState
*env
= &cpu
->env
;
9622 unsigned int new_el
= env
->exception
.target_el
;
9624 assert(!arm_feature(env
, ARM_FEATURE_M
));
9626 arm_log_exception(cs
->exception_index
);
9627 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9629 if (qemu_loglevel_mask(CPU_LOG_INT
)
9630 && !excp_is_internal(cs
->exception_index
)) {
9631 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9632 syn_get_ec(env
->exception
.syndrome
),
9633 env
->exception
.syndrome
);
9636 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9637 arm_handle_psci_call(cpu
);
9638 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9642 /* Semihosting semantics depend on the register width of the
9643 * code that caused the exception, not the target exception level,
9644 * so must be handled here.
9646 if (check_for_semihosting(cs
)) {
9650 /* Hooks may change global state so BQL should be held, also the
9651 * BQL needs to be held for any modification of
9652 * cs->interrupt_request.
9654 g_assert(qemu_mutex_iothread_locked());
9656 arm_call_pre_el_change_hook(cpu
);
9658 assert(!excp_is_internal(cs
->exception_index
));
9659 if (arm_el_is_aa64(env
, new_el
)) {
9660 arm_cpu_do_interrupt_aarch64(cs
);
9662 arm_cpu_do_interrupt_aarch32(cs
);
9665 arm_call_el_change_hook(cpu
);
9667 if (!kvm_enabled()) {
9668 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9671 #endif /* !CONFIG_USER_ONLY */
9673 /* Return the exception level which controls this address translation regime */
9674 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9677 case ARMMMUIdx_S2NS
:
9678 case ARMMMUIdx_S1E2
:
9680 case ARMMMUIdx_S1E3
:
9682 case ARMMMUIdx_S1SE0
:
9683 return arm_el_is_aa64(env
, 3) ? 1 : 3;
9684 case ARMMMUIdx_S1SE1
:
9685 case ARMMMUIdx_S1NSE0
:
9686 case ARMMMUIdx_S1NSE1
:
9687 case ARMMMUIdx_MPrivNegPri
:
9688 case ARMMMUIdx_MUserNegPri
:
9689 case ARMMMUIdx_MPriv
:
9690 case ARMMMUIdx_MUser
:
9691 case ARMMMUIdx_MSPrivNegPri
:
9692 case ARMMMUIdx_MSUserNegPri
:
9693 case ARMMMUIdx_MSPriv
:
9694 case ARMMMUIdx_MSUser
:
9697 g_assert_not_reached();
9701 #ifndef CONFIG_USER_ONLY
9703 /* Return the SCTLR value which controls this address translation regime */
9704 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9706 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9709 /* Return true if the specified stage of address translation is disabled */
9710 static inline bool regime_translation_disabled(CPUARMState
*env
,
9713 if (arm_feature(env
, ARM_FEATURE_M
)) {
9714 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9715 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9716 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9717 /* Enabled, but not for HardFault and NMI */
9718 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9719 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9720 /* Enabled for all cases */
9724 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9725 * we warned about that in armv7m_nvic.c when the guest set it.
9731 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9732 /* HCR.DC means HCR.VM behaves as 1 */
9733 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9736 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9737 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9738 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9743 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
9744 (mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
)) {
9745 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9749 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9752 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9755 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9758 /* Return the TTBR associated with this translation regime */
9759 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9762 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9763 return env
->cp15
.vttbr_el2
;
9766 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9768 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9772 #endif /* !CONFIG_USER_ONLY */
9774 /* Return the TCR controlling this translation regime */
9775 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9777 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9778 return &env
->cp15
.vtcr_el2
;
9780 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9783 /* Convert a possible stage1+2 MMU index into the appropriate
9786 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9788 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
9789 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
9794 /* Return true if the translation regime is using LPAE format page tables */
9795 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9798 int el
= regime_el(env
, mmu_idx
);
9799 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9802 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9803 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9809 /* Returns true if the stage 1 translation regime is using LPAE format page
9810 * tables. Used when raising alignment exceptions, whose FSR changes depending
9811 * on whether the long or short descriptor format is in use. */
9812 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9814 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9816 return regime_using_lpae_format(env
, mmu_idx
);
9819 #ifndef CONFIG_USER_ONLY
9820 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9823 case ARMMMUIdx_S1SE0
:
9824 case ARMMMUIdx_S1NSE0
:
9825 case ARMMMUIdx_MUser
:
9826 case ARMMMUIdx_MSUser
:
9827 case ARMMMUIdx_MUserNegPri
:
9828 case ARMMMUIdx_MSUserNegPri
:
9832 case ARMMMUIdx_S12NSE0
:
9833 case ARMMMUIdx_S12NSE1
:
9834 g_assert_not_reached();
9838 /* Translate section/page access permissions to page
9839 * R/W protection flags
9842 * @mmu_idx: MMU index indicating required translation regime
9843 * @ap: The 3-bit access permissions (AP[2:0])
9844 * @domain_prot: The 2-bit domain access permissions
9846 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9847 int ap
, int domain_prot
)
9849 bool is_user
= regime_is_user(env
, mmu_idx
);
9851 if (domain_prot
== 3) {
9852 return PAGE_READ
| PAGE_WRITE
;
9857 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9860 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9862 return is_user
? 0 : PAGE_READ
;
9869 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9874 return PAGE_READ
| PAGE_WRITE
;
9877 return PAGE_READ
| PAGE_WRITE
;
9878 case 4: /* Reserved. */
9881 return is_user
? 0 : PAGE_READ
;
9885 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9890 g_assert_not_reached();
9894 /* Translate section/page access permissions to page
9895 * R/W protection flags.
9897 * @ap: The 2-bit simple AP (AP[2:1])
9898 * @is_user: TRUE if accessing from PL0
9900 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9904 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9906 return PAGE_READ
| PAGE_WRITE
;
9908 return is_user
? 0 : PAGE_READ
;
9912 g_assert_not_reached();
9917 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9919 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9922 /* Translate S2 section/page access permissions to protection flags
9925 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9926 * @xn: XN (execute-never) bit
9928 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9939 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9946 /* Translate section/page access permissions to protection flags
9949 * @mmu_idx: MMU index indicating required translation regime
9950 * @is_aa64: TRUE if AArch64
9951 * @ap: The 2-bit simple AP (AP[2:1])
9952 * @ns: NS (non-secure) bit
9953 * @xn: XN (execute-never) bit
9954 * @pxn: PXN (privileged execute-never) bit
9956 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
9957 int ap
, int ns
, int xn
, int pxn
)
9959 bool is_user
= regime_is_user(env
, mmu_idx
);
9960 int prot_rw
, user_rw
;
9964 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
9966 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
9970 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
9973 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
9977 /* TODO have_wxn should be replaced with
9978 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9979 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9980 * compatible processors have EL2, which is required for [U]WXN.
9982 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
9985 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
9989 switch (regime_el(env
, mmu_idx
)) {
9992 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
9999 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10000 switch (regime_el(env
, mmu_idx
)) {
10004 xn
= xn
|| !(user_rw
& PAGE_READ
);
10008 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10010 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10011 (uwxn
&& (user_rw
& PAGE_WRITE
));
10021 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10024 return prot_rw
| PAGE_EXEC
;
10027 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10028 uint32_t *table
, uint32_t address
)
10030 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10031 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10033 if (address
& tcr
->mask
) {
10034 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10035 /* Translation table walk disabled for TTBR1 */
10038 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10040 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10041 /* Translation table walk disabled for TTBR0 */
10044 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10046 *table
|= (address
>> 18) & 0x3ffc;
10050 /* Translate a S1 pagetable walk through S2 if needed. */
10051 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10052 hwaddr addr
, MemTxAttrs txattrs
,
10053 ARMMMUFaultInfo
*fi
)
10055 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
10056 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10057 target_ulong s2size
;
10061 ARMCacheAttrs cacheattrs
= {};
10062 ARMCacheAttrs
*pcacheattrs
= NULL
;
10064 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
10066 * PTW means we must fault if this S1 walk touches S2 Device
10067 * memory; otherwise we don't care about the attributes and can
10068 * save the S2 translation the effort of computing them.
10070 pcacheattrs
= &cacheattrs
;
10073 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
10074 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
10076 assert(fi
->type
!= ARMFault_None
);
10082 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
10083 /* Access was to Device memory: generate Permission fault */
10084 fi
->type
= ARMFault_Permission
;
10095 /* All loads done in the course of a page table walk go through here. */
10096 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10097 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10099 ARMCPU
*cpu
= ARM_CPU(cs
);
10100 CPUARMState
*env
= &cpu
->env
;
10101 MemTxAttrs attrs
= {};
10102 MemTxResult result
= MEMTX_OK
;
10106 attrs
.secure
= is_secure
;
10107 as
= arm_addressspace(cs
, attrs
);
10108 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10112 if (regime_translation_big_endian(env
, mmu_idx
)) {
10113 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10115 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10117 if (result
== MEMTX_OK
) {
10120 fi
->type
= ARMFault_SyncExternalOnWalk
;
10121 fi
->ea
= arm_extabort_type(result
);
10125 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10126 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10128 ARMCPU
*cpu
= ARM_CPU(cs
);
10129 CPUARMState
*env
= &cpu
->env
;
10130 MemTxAttrs attrs
= {};
10131 MemTxResult result
= MEMTX_OK
;
10135 attrs
.secure
= is_secure
;
10136 as
= arm_addressspace(cs
, attrs
);
10137 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10141 if (regime_translation_big_endian(env
, mmu_idx
)) {
10142 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10144 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10146 if (result
== MEMTX_OK
) {
10149 fi
->type
= ARMFault_SyncExternalOnWalk
;
10150 fi
->ea
= arm_extabort_type(result
);
10154 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10155 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10156 hwaddr
*phys_ptr
, int *prot
,
10157 target_ulong
*page_size
,
10158 ARMMMUFaultInfo
*fi
)
10160 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10171 /* Pagetable walk. */
10172 /* Lookup l1 descriptor. */
10173 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10174 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10175 fi
->type
= ARMFault_Translation
;
10178 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10180 if (fi
->type
!= ARMFault_None
) {
10184 domain
= (desc
>> 5) & 0x0f;
10185 if (regime_el(env
, mmu_idx
) == 1) {
10186 dacr
= env
->cp15
.dacr_ns
;
10188 dacr
= env
->cp15
.dacr_s
;
10190 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10192 /* Section translation fault. */
10193 fi
->type
= ARMFault_Translation
;
10199 if (domain_prot
== 0 || domain_prot
== 2) {
10200 fi
->type
= ARMFault_Domain
;
10205 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10206 ap
= (desc
>> 10) & 3;
10207 *page_size
= 1024 * 1024;
10209 /* Lookup l2 entry. */
10211 /* Coarse pagetable. */
10212 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10214 /* Fine pagetable. */
10215 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
10217 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10219 if (fi
->type
!= ARMFault_None
) {
10222 switch (desc
& 3) {
10223 case 0: /* Page translation fault. */
10224 fi
->type
= ARMFault_Translation
;
10226 case 1: /* 64k page. */
10227 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10228 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10229 *page_size
= 0x10000;
10231 case 2: /* 4k page. */
10232 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10233 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10234 *page_size
= 0x1000;
10236 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10238 /* ARMv6/XScale extended small page format */
10239 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10240 || arm_feature(env
, ARM_FEATURE_V6
)) {
10241 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10242 *page_size
= 0x1000;
10244 /* UNPREDICTABLE in ARMv5; we choose to take a
10245 * page translation fault.
10247 fi
->type
= ARMFault_Translation
;
10251 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10252 *page_size
= 0x400;
10254 ap
= (desc
>> 4) & 3;
10257 /* Never happens, but compiler isn't smart enough to tell. */
10261 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10262 *prot
|= *prot
? PAGE_EXEC
: 0;
10263 if (!(*prot
& (1 << access_type
))) {
10264 /* Access permission fault. */
10265 fi
->type
= ARMFault_Permission
;
10268 *phys_ptr
= phys_addr
;
10271 fi
->domain
= domain
;
10276 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10277 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10278 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10279 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10281 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10295 /* Pagetable walk. */
10296 /* Lookup l1 descriptor. */
10297 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10298 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10299 fi
->type
= ARMFault_Translation
;
10302 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10304 if (fi
->type
!= ARMFault_None
) {
10308 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
10309 /* Section translation fault, or attempt to use the encoding
10310 * which is Reserved on implementations without PXN.
10312 fi
->type
= ARMFault_Translation
;
10315 if ((type
== 1) || !(desc
& (1 << 18))) {
10316 /* Page or Section. */
10317 domain
= (desc
>> 5) & 0x0f;
10319 if (regime_el(env
, mmu_idx
) == 1) {
10320 dacr
= env
->cp15
.dacr_ns
;
10322 dacr
= env
->cp15
.dacr_s
;
10327 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10328 if (domain_prot
== 0 || domain_prot
== 2) {
10329 /* Section or Page domain fault */
10330 fi
->type
= ARMFault_Domain
;
10334 if (desc
& (1 << 18)) {
10335 /* Supersection. */
10336 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10337 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10338 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10339 *page_size
= 0x1000000;
10342 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10343 *page_size
= 0x100000;
10345 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10346 xn
= desc
& (1 << 4);
10348 ns
= extract32(desc
, 19, 1);
10350 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
10351 pxn
= (desc
>> 2) & 1;
10353 ns
= extract32(desc
, 3, 1);
10354 /* Lookup l2 entry. */
10355 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10356 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10358 if (fi
->type
!= ARMFault_None
) {
10361 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10362 switch (desc
& 3) {
10363 case 0: /* Page translation fault. */
10364 fi
->type
= ARMFault_Translation
;
10366 case 1: /* 64k page. */
10367 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10368 xn
= desc
& (1 << 15);
10369 *page_size
= 0x10000;
10371 case 2: case 3: /* 4k page. */
10372 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10374 *page_size
= 0x1000;
10377 /* Never happens, but compiler isn't smart enough to tell. */
10381 if (domain_prot
== 3) {
10382 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10384 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10387 if (xn
&& access_type
== MMU_INST_FETCH
) {
10388 fi
->type
= ARMFault_Permission
;
10392 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10393 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10394 /* The simplified model uses AP[0] as an access control bit. */
10395 if ((ap
& 1) == 0) {
10396 /* Access flag fault. */
10397 fi
->type
= ARMFault_AccessFlag
;
10400 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10402 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10404 if (*prot
&& !xn
) {
10405 *prot
|= PAGE_EXEC
;
10407 if (!(*prot
& (1 << access_type
))) {
10408 /* Access permission fault. */
10409 fi
->type
= ARMFault_Permission
;
10414 /* The NS bit will (as required by the architecture) have no effect if
10415 * the CPU doesn't support TZ or this is a non-secure translation
10416 * regime, because the attribute will already be non-secure.
10418 attrs
->secure
= false;
10420 *phys_ptr
= phys_addr
;
10423 fi
->domain
= domain
;
10429 * check_s2_mmu_setup
10431 * @is_aa64: True if the translation regime is in AArch64 state
10432 * @startlevel: Suggested starting level
10433 * @inputsize: Bitsize of IPAs
10434 * @stride: Page-table stride (See the ARM ARM)
10436 * Returns true if the suggested S2 translation parameters are OK and
10439 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10440 int inputsize
, int stride
)
10442 const int grainsize
= stride
+ 3;
10443 int startsizecheck
;
10445 /* Negative levels are never allowed. */
10450 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10451 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10456 CPUARMState
*env
= &cpu
->env
;
10457 unsigned int pamax
= arm_pamax(cpu
);
10460 case 13: /* 64KB Pages. */
10461 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10465 case 11: /* 16KB Pages. */
10466 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10470 case 9: /* 4KB Pages. */
10471 if (level
== 0 && pamax
<= 42) {
10476 g_assert_not_reached();
10479 /* Inputsize checks. */
10480 if (inputsize
> pamax
&&
10481 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10482 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10486 /* AArch32 only supports 4KB pages. Assert on that. */
10487 assert(stride
== 9);
10496 /* Translate from the 4-bit stage 2 representation of
10497 * memory attributes (without cache-allocation hints) to
10498 * the 8-bit representation of the stage 1 MAIR registers
10499 * (which includes allocation hints).
10501 * ref: shared/translation/attrs/S2AttrDecode()
10502 * .../S2ConvertAttrsHints()
10504 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10506 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10507 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10508 uint8_t hihint
= 0, lohint
= 0;
10510 if (hiattr
!= 0) { /* normal memory */
10511 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10512 hiattr
= loattr
= 1; /* non-cacheable */
10514 if (hiattr
!= 1) { /* Write-through or write-back */
10515 hihint
= 3; /* RW allocate */
10517 if (loattr
!= 1) { /* Write-through or write-back */
10518 lohint
= 3; /* RW allocate */
10523 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10525 #endif /* !CONFIG_USER_ONLY */
10527 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
10530 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10531 uint32_t el
= regime_el(env
, mmu_idx
);
10532 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
10536 * Bit 55 is always between the two regions, and is canonical for
10537 * determining if address tagging is enabled.
10539 select
= extract64(va
, 55, 1);
10542 tsz
= extract32(tcr
, 0, 6);
10543 using64k
= extract32(tcr
, 14, 1);
10544 using16k
= extract32(tcr
, 15, 1);
10545 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10547 tbi
= tbid
= hpd
= false;
10549 tbi
= extract32(tcr
, 20, 1);
10550 hpd
= extract32(tcr
, 24, 1);
10551 tbid
= extract32(tcr
, 29, 1);
10554 } else if (!select
) {
10555 tsz
= extract32(tcr
, 0, 6);
10556 epd
= extract32(tcr
, 7, 1);
10557 using64k
= extract32(tcr
, 14, 1);
10558 using16k
= extract32(tcr
, 15, 1);
10559 tbi
= extract64(tcr
, 37, 1);
10560 hpd
= extract64(tcr
, 41, 1);
10561 tbid
= extract64(tcr
, 51, 1);
10563 int tg
= extract32(tcr
, 30, 2);
10564 using16k
= tg
== 1;
10565 using64k
= tg
== 3;
10566 tsz
= extract32(tcr
, 16, 6);
10567 epd
= extract32(tcr
, 23, 1);
10568 tbi
= extract64(tcr
, 38, 1);
10569 hpd
= extract64(tcr
, 42, 1);
10570 tbid
= extract64(tcr
, 52, 1);
10572 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10573 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10575 return (ARMVAParameters
) {
10582 .using16k
= using16k
,
10583 .using64k
= using64k
,
10587 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10588 ARMMMUIdx mmu_idx
, bool data
)
10590 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
10592 /* Present TBI as a composite with TBID. */
10593 ret
.tbi
&= (data
|| !ret
.tbid
);
10597 #ifndef CONFIG_USER_ONLY
10598 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10601 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10602 uint32_t el
= regime_el(env
, mmu_idx
);
10606 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10608 bool sext
= extract32(tcr
, 4, 1);
10609 bool sign
= extract32(tcr
, 3, 1);
10612 * If the sign-extend bit is not the same as t0sz[3], the result
10613 * is unpredictable. Flag this as a guest error.
10615 if (sign
!= sext
) {
10616 qemu_log_mask(LOG_GUEST_ERROR
,
10617 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10619 tsz
= sextract32(tcr
, 0, 4) + 8;
10623 } else if (el
== 2) {
10625 tsz
= extract32(tcr
, 0, 3);
10627 hpd
= extract64(tcr
, 24, 1);
10630 int t0sz
= extract32(tcr
, 0, 3);
10631 int t1sz
= extract32(tcr
, 16, 3);
10634 select
= va
> (0xffffffffu
>> t0sz
);
10636 /* Note that we will detect errors later. */
10637 select
= va
>= ~(0xffffffffu
>> t1sz
);
10641 epd
= extract32(tcr
, 7, 1);
10642 hpd
= extract64(tcr
, 41, 1);
10645 epd
= extract32(tcr
, 23, 1);
10646 hpd
= extract64(tcr
, 42, 1);
10648 /* For aarch32, hpd0 is not enabled without t2e as well. */
10649 hpd
&= extract32(tcr
, 6, 1);
10652 return (ARMVAParameters
) {
10660 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10661 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10662 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10663 target_ulong
*page_size_ptr
,
10664 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10666 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10667 CPUState
*cs
= CPU(cpu
);
10668 /* Read an LPAE long-descriptor translation table. */
10669 ARMFaultType fault_type
= ARMFault_Translation
;
10671 ARMVAParameters param
;
10673 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10674 uint32_t tableattrs
;
10675 target_ulong page_size
;
10678 int addrsize
, inputsize
;
10679 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10680 int ap
, ns
, xn
, pxn
;
10681 uint32_t el
= regime_el(env
, mmu_idx
);
10683 uint64_t descaddrmask
;
10684 bool aarch64
= arm_el_is_aa64(env
, el
);
10685 bool guarded
= false;
10688 * This code does not handle the different format TCR for VTCR_EL2.
10689 * This code also does not support shareability levels.
10690 * Attribute and permission bit handling should also be checked when adding
10691 * support for those page table walks.
10694 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10695 access_type
!= MMU_INST_FETCH
);
10697 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
10700 ttbr1_valid
= (el
< 2);
10701 addrsize
= 64 - 8 * param
.tbi
;
10702 inputsize
= 64 - param
.tsz
;
10704 param
= aa32_va_parameters(env
, address
, mmu_idx
);
10706 /* There is no TTBR1 for EL2 */
10707 ttbr1_valid
= (el
!= 2);
10708 addrsize
= (mmu_idx
== ARMMMUIdx_S2NS
? 40 : 32);
10709 inputsize
= addrsize
- param
.tsz
;
10713 * We determined the region when collecting the parameters, but we
10714 * have not yet validated that the address is valid for the region.
10715 * Extract the top bits and verify that they all match select.
10717 * For aa32, if inputsize == addrsize, then we have selected the
10718 * region by exclusion in aa32_va_parameters and there is no more
10719 * validation to do here.
10721 if (inputsize
< addrsize
) {
10722 target_ulong top_bits
= sextract64(address
, inputsize
,
10723 addrsize
- inputsize
);
10724 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
10725 /* The gap between the two regions is a Translation fault */
10726 fault_type
= ARMFault_Translation
;
10731 if (param
.using64k
) {
10733 } else if (param
.using16k
) {
10739 /* Note that QEMU ignores shareability and cacheability attributes,
10740 * so we don't need to do anything with the SH, ORGN, IRGN fields
10741 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10742 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10743 * implement any ASID-like capability so we can ignore it (instead
10744 * we will always flush the TLB any time the ASID is changed).
10746 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10748 /* Here we should have set up all the parameters for the translation:
10749 * inputsize, ttbr, epd, stride, tbi
10753 /* Translation table walk disabled => Translation fault on TLB miss
10754 * Note: This is always 0 on 64-bit EL2 and EL3.
10759 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
10760 /* The starting level depends on the virtual address size (which can
10761 * be up to 48 bits) and the translation granule size. It indicates
10762 * the number of strides (stride bits at a time) needed to
10763 * consume the bits of the input address. In the pseudocode this is:
10764 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10765 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10766 * our 'stride + 3' and 'stride' is our 'stride'.
10767 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10768 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10769 * = 4 - (inputsize - 4) / stride;
10771 level
= 4 - (inputsize
- 4) / stride
;
10773 /* For stage 2 translations the starting level is specified by the
10774 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10776 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10777 uint32_t startlevel
;
10780 if (!aarch64
|| stride
== 9) {
10781 /* AArch32 or 4KB pages */
10782 startlevel
= 2 - sl0
;
10784 /* 16KB or 64KB pages */
10785 startlevel
= 3 - sl0
;
10788 /* Check that the starting level is valid. */
10789 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10790 inputsize
, stride
);
10792 fault_type
= ARMFault_Translation
;
10795 level
= startlevel
;
10798 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10799 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10801 /* Now we can extract the actual base address from the TTBR */
10802 descaddr
= extract64(ttbr
, 0, 48);
10803 descaddr
&= ~indexmask
;
10805 /* The address field in the descriptor goes up to bit 39 for ARMv7
10806 * but up to bit 47 for ARMv8, but we use the descaddrmask
10807 * up to bit 39 for AArch32, because we don't need other bits in that case
10808 * to construct next descriptor address (anyway they should be all zeroes).
10810 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10811 ~indexmask_grainsize
;
10813 /* Secure accesses start with the page table in secure memory and
10814 * can be downgraded to non-secure at any step. Non-secure accesses
10815 * remain non-secure. We implement this by just ORing in the NSTable/NS
10816 * bits at each step.
10818 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10820 uint64_t descriptor
;
10823 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10825 nstable
= extract32(tableattrs
, 4, 1);
10826 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10827 if (fi
->type
!= ARMFault_None
) {
10831 if (!(descriptor
& 1) ||
10832 (!(descriptor
& 2) && (level
== 3))) {
10833 /* Invalid, or the Reserved level 3 encoding */
10836 descaddr
= descriptor
& descaddrmask
;
10838 if ((descriptor
& 2) && (level
< 3)) {
10839 /* Table entry. The top five bits are attributes which may
10840 * propagate down through lower levels of the table (and
10841 * which are all arranged so that 0 means "no effect", so
10842 * we can gather them up by ORing in the bits at each level).
10844 tableattrs
|= extract64(descriptor
, 59, 5);
10846 indexmask
= indexmask_grainsize
;
10849 /* Block entry at level 1 or 2, or page entry at level 3.
10850 * These are basically the same thing, although the number
10851 * of bits we pull in from the vaddr varies.
10853 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10854 descaddr
|= (address
& (page_size
- 1));
10855 /* Extract attributes from the descriptor */
10856 attrs
= extract64(descriptor
, 2, 10)
10857 | (extract64(descriptor
, 52, 12) << 10);
10859 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10860 /* Stage 2 table descriptors do not include any attribute fields */
10863 /* Merge in attributes from table descriptors */
10864 attrs
|= nstable
<< 3; /* NS */
10865 guarded
= extract64(descriptor
, 50, 1); /* GP */
10867 /* HPD disables all the table attributes except NSTable. */
10870 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10871 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10872 * means "force PL1 access only", which means forcing AP[1] to 0.
10874 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10875 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10878 /* Here descaddr is the final physical address, and attributes
10879 * are all in attrs.
10881 fault_type
= ARMFault_AccessFlag
;
10882 if ((attrs
& (1 << 8)) == 0) {
10887 ap
= extract32(attrs
, 4, 2);
10888 xn
= extract32(attrs
, 12, 1);
10890 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10892 *prot
= get_S2prot(env
, ap
, xn
);
10894 ns
= extract32(attrs
, 3, 1);
10895 pxn
= extract32(attrs
, 11, 1);
10896 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10899 fault_type
= ARMFault_Permission
;
10900 if (!(*prot
& (1 << access_type
))) {
10905 /* The NS bit will (as required by the architecture) have no effect if
10906 * the CPU doesn't support TZ or this is a non-secure translation
10907 * regime, because the attribute will already be non-secure.
10909 txattrs
->secure
= false;
10911 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
10912 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
10913 txattrs
->target_tlb_bit0
= true;
10916 if (cacheattrs
!= NULL
) {
10917 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10918 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10919 extract32(attrs
, 0, 4));
10921 /* Index into MAIR registers for cache attributes */
10922 uint8_t attrindx
= extract32(attrs
, 0, 3);
10923 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10924 assert(attrindx
<= 7);
10925 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10927 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10930 *phys_ptr
= descaddr
;
10931 *page_size_ptr
= page_size
;
10935 fi
->type
= fault_type
;
10937 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10938 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
10942 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
10944 int32_t address
, int *prot
)
10946 if (!arm_feature(env
, ARM_FEATURE_M
)) {
10947 *prot
= PAGE_READ
| PAGE_WRITE
;
10949 case 0xF0000000 ... 0xFFFFFFFF:
10950 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
10951 /* hivecs execing is ok */
10952 *prot
|= PAGE_EXEC
;
10955 case 0x00000000 ... 0x7FFFFFFF:
10956 *prot
|= PAGE_EXEC
;
10960 /* Default system address map for M profile cores.
10961 * The architecture specifies which regions are execute-never;
10962 * at the MPU level no other checks are defined.
10965 case 0x00000000 ... 0x1fffffff: /* ROM */
10966 case 0x20000000 ... 0x3fffffff: /* SRAM */
10967 case 0x60000000 ... 0x7fffffff: /* RAM */
10968 case 0x80000000 ... 0x9fffffff: /* RAM */
10969 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10971 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10972 case 0xa0000000 ... 0xbfffffff: /* Device */
10973 case 0xc0000000 ... 0xdfffffff: /* Device */
10974 case 0xe0000000 ... 0xffffffff: /* System */
10975 *prot
= PAGE_READ
| PAGE_WRITE
;
10978 g_assert_not_reached();
10983 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
10984 ARMMMUIdx mmu_idx
, bool is_user
)
10986 /* Return true if we should use the default memory map as a
10987 * "background" region if there are no hits against any MPU regions.
10989 CPUARMState
*env
= &cpu
->env
;
10995 if (arm_feature(env
, ARM_FEATURE_M
)) {
10996 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
10997 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
10999 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11003 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11005 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11006 return arm_feature(env
, ARM_FEATURE_M
) &&
11007 extract32(address
, 20, 12) == 0xe00;
11010 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11012 /* True if address is in the M profile system region
11013 * 0xe0000000 - 0xffffffff
11015 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11018 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11019 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11020 hwaddr
*phys_ptr
, int *prot
,
11021 target_ulong
*page_size
,
11022 ARMMMUFaultInfo
*fi
)
11024 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11026 bool is_user
= regime_is_user(env
, mmu_idx
);
11028 *phys_ptr
= address
;
11029 *page_size
= TARGET_PAGE_SIZE
;
11032 if (regime_translation_disabled(env
, mmu_idx
) ||
11033 m_is_ppb_region(env
, address
)) {
11034 /* MPU disabled or M profile PPB access: use default memory map.
11035 * The other case which uses the default memory map in the
11036 * v7M ARM ARM pseudocode is exception vector reads from the vector
11037 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11038 * which always does a direct read using address_space_ldl(), rather
11039 * than going via this function, so we don't need to check that here.
11041 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11042 } else { /* MPU enabled */
11043 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11044 /* region search */
11045 uint32_t base
= env
->pmsav7
.drbar
[n
];
11046 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11048 bool srdis
= false;
11050 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11055 qemu_log_mask(LOG_GUEST_ERROR
,
11056 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11060 rmask
= (1ull << rsize
) - 1;
11062 if (base
& rmask
) {
11063 qemu_log_mask(LOG_GUEST_ERROR
,
11064 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11065 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11070 if (address
< base
|| address
> base
+ rmask
) {
11072 * Address not in this region. We must check whether the
11073 * region covers addresses in the same page as our address.
11074 * In that case we must not report a size that covers the
11075 * whole page for a subsequent hit against a different MPU
11076 * region or the background region, because it would result in
11077 * incorrect TLB hits for subsequent accesses to addresses that
11078 * are in this MPU region.
11080 if (ranges_overlap(base
, rmask
,
11081 address
& TARGET_PAGE_MASK
,
11082 TARGET_PAGE_SIZE
)) {
11088 /* Region matched */
11090 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11092 uint32_t srdis_mask
;
11094 rsize
-= 3; /* sub region size (power of 2) */
11095 snd
= ((address
- base
) >> rsize
) & 0x7;
11096 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11098 srdis_mask
= srdis
? 0x3 : 0x0;
11099 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11100 /* This will check in groups of 2, 4 and then 8, whether
11101 * the subregion bits are consistent. rsize is incremented
11102 * back up to give the region size, considering consistent
11103 * adjacent subregions as one region. Stop testing if rsize
11104 * is already big enough for an entire QEMU page.
11106 int snd_rounded
= snd
& ~(i
- 1);
11107 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11108 snd_rounded
+ 8, i
);
11109 if (srdis_mask
^ srdis_multi
) {
11112 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11119 if (rsize
< TARGET_PAGE_BITS
) {
11120 *page_size
= 1 << rsize
;
11125 if (n
== -1) { /* no hits */
11126 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11127 /* background fault */
11128 fi
->type
= ARMFault_Background
;
11131 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11132 } else { /* a MPU hit! */
11133 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11134 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11136 if (m_is_system_region(env
, address
)) {
11137 /* System space is always execute never */
11141 if (is_user
) { /* User mode AP bit decoding */
11146 break; /* no access */
11148 *prot
|= PAGE_WRITE
;
11152 *prot
|= PAGE_READ
| PAGE_EXEC
;
11155 /* for v7M, same as 6; for R profile a reserved value */
11156 if (arm_feature(env
, ARM_FEATURE_M
)) {
11157 *prot
|= PAGE_READ
| PAGE_EXEC
;
11162 qemu_log_mask(LOG_GUEST_ERROR
,
11163 "DRACR[%d]: Bad value for AP bits: 0x%"
11164 PRIx32
"\n", n
, ap
);
11166 } else { /* Priv. mode AP bits decoding */
11169 break; /* no access */
11173 *prot
|= PAGE_WRITE
;
11177 *prot
|= PAGE_READ
| PAGE_EXEC
;
11180 /* for v7M, same as 6; for R profile a reserved value */
11181 if (arm_feature(env
, ARM_FEATURE_M
)) {
11182 *prot
|= PAGE_READ
| PAGE_EXEC
;
11187 qemu_log_mask(LOG_GUEST_ERROR
,
11188 "DRACR[%d]: Bad value for AP bits: 0x%"
11189 PRIx32
"\n", n
, ap
);
11193 /* execute never */
11195 *prot
&= ~PAGE_EXEC
;
11200 fi
->type
= ARMFault_Permission
;
11202 return !(*prot
& (1 << access_type
));
11205 static bool v8m_is_sau_exempt(CPUARMState
*env
,
11206 uint32_t address
, MMUAccessType access_type
)
11208 /* The architecture specifies that certain address ranges are
11209 * exempt from v8M SAU/IDAU checks.
11212 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
11213 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
11214 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
11215 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
11216 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
11217 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
11220 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
11221 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11222 V8M_SAttributes
*sattrs
)
11224 /* Look up the security attributes for this address. Compare the
11225 * pseudocode SecurityCheck() function.
11226 * We assume the caller has zero-initialized *sattrs.
11228 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11230 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11231 int idau_region
= IREGION_NOTVALID
;
11232 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11233 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11236 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11237 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11239 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11243 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11244 /* 0xf0000000..0xffffffff is always S for insn fetches */
11248 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11249 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11253 if (idau_region
!= IREGION_NOTVALID
) {
11254 sattrs
->irvalid
= true;
11255 sattrs
->iregion
= idau_region
;
11258 switch (env
->sau
.ctrl
& 3) {
11259 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11261 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11264 default: /* SAU.ENABLE == 1 */
11265 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
11266 if (env
->sau
.rlar
[r
] & 1) {
11267 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
11268 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
11270 if (base
<= address
&& limit
>= address
) {
11271 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11272 sattrs
->subpage
= true;
11274 if (sattrs
->srvalid
) {
11275 /* If we hit in more than one region then we must report
11276 * as Secure, not NS-Callable, with no valid region
11279 sattrs
->ns
= false;
11280 sattrs
->nsc
= false;
11281 sattrs
->sregion
= 0;
11282 sattrs
->srvalid
= false;
11285 if (env
->sau
.rlar
[r
] & 2) {
11286 sattrs
->nsc
= true;
11290 sattrs
->srvalid
= true;
11291 sattrs
->sregion
= r
;
11295 * Address not in this region. We must check whether the
11296 * region covers addresses in the same page as our address.
11297 * In that case we must not report a size that covers the
11298 * whole page for a subsequent hit against a different MPU
11299 * region or the background region, because it would result
11300 * in incorrect TLB hits for subsequent accesses to
11301 * addresses that are in this MPU region.
11303 if (limit
>= base
&&
11304 ranges_overlap(base
, limit
- base
+ 1,
11306 TARGET_PAGE_SIZE
)) {
11307 sattrs
->subpage
= true;
11316 * The IDAU will override the SAU lookup results if it specifies
11317 * higher security than the SAU does.
11320 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11321 sattrs
->ns
= false;
11322 sattrs
->nsc
= idau_nsc
;
11327 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11328 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11329 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11330 int *prot
, bool *is_subpage
,
11331 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11333 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11334 * that a full phys-to-virt translation does).
11335 * mregion is (if not NULL) set to the region number which matched,
11336 * or -1 if no region number is returned (MPU off, address did not
11337 * hit a region, address hit in multiple regions).
11338 * We set is_subpage to true if the region hit doesn't cover the
11339 * entire TARGET_PAGE the address is within.
11341 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11342 bool is_user
= regime_is_user(env
, mmu_idx
);
11343 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11345 int matchregion
= -1;
11347 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11348 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11350 *is_subpage
= false;
11351 *phys_ptr
= address
;
11357 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11358 * was an exception vector read from the vector table (which is always
11359 * done using the default system address map), because those accesses
11360 * are done in arm_v7m_load_vector(), which always does a direct
11361 * read using address_space_ldl(), rather than going via this function.
11363 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11365 } else if (m_is_ppb_region(env
, address
)) {
11368 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11372 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11373 /* region search */
11374 /* Note that the base address is bits [31:5] from the register
11375 * with bits [4:0] all zeroes, but the limit address is bits
11376 * [31:5] from the register with bits [4:0] all ones.
11378 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11379 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11381 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11382 /* Region disabled */
11386 if (address
< base
|| address
> limit
) {
11388 * Address not in this region. We must check whether the
11389 * region covers addresses in the same page as our address.
11390 * In that case we must not report a size that covers the
11391 * whole page for a subsequent hit against a different MPU
11392 * region or the background region, because it would result in
11393 * incorrect TLB hits for subsequent accesses to addresses that
11394 * are in this MPU region.
11396 if (limit
>= base
&&
11397 ranges_overlap(base
, limit
- base
+ 1,
11399 TARGET_PAGE_SIZE
)) {
11400 *is_subpage
= true;
11405 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11406 *is_subpage
= true;
11409 if (matchregion
!= -1) {
11410 /* Multiple regions match -- always a failure (unlike
11411 * PMSAv7 where highest-numbered-region wins)
11413 fi
->type
= ARMFault_Permission
;
11424 /* background fault */
11425 fi
->type
= ARMFault_Background
;
11429 if (matchregion
== -1) {
11430 /* hit using the background region */
11431 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11433 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11434 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11436 if (m_is_system_region(env
, address
)) {
11437 /* System space is always execute never */
11441 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11442 if (*prot
&& !xn
) {
11443 *prot
|= PAGE_EXEC
;
11445 /* We don't need to look the attribute up in the MAIR0/MAIR1
11446 * registers because that only tells us about cacheability.
11449 *mregion
= matchregion
;
11453 fi
->type
= ARMFault_Permission
;
11455 return !(*prot
& (1 << access_type
));
11459 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11460 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11461 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11462 int *prot
, target_ulong
*page_size
,
11463 ARMMMUFaultInfo
*fi
)
11465 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11466 V8M_SAttributes sattrs
= {};
11468 bool mpu_is_subpage
;
11470 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11471 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11472 if (access_type
== MMU_INST_FETCH
) {
11473 /* Instruction fetches always use the MMU bank and the
11474 * transaction attribute determined by the fetch address,
11475 * regardless of CPU state. This is painful for QEMU
11476 * to handle, because it would mean we need to encode
11477 * into the mmu_idx not just the (user, negpri) information
11478 * for the current security state but also that for the
11479 * other security state, which would balloon the number
11480 * of mmu_idx values needed alarmingly.
11481 * Fortunately we can avoid this because it's not actually
11482 * possible to arbitrarily execute code from memory with
11483 * the wrong security attribute: it will always generate
11484 * an exception of some kind or another, apart from the
11485 * special case of an NS CPU executing an SG instruction
11486 * in S&NSC memory. So we always just fail the translation
11487 * here and sort things out in the exception handler
11488 * (including possibly emulating an SG instruction).
11490 if (sattrs
.ns
!= !secure
) {
11492 fi
->type
= ARMFault_QEMU_NSCExec
;
11494 fi
->type
= ARMFault_QEMU_SFault
;
11496 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11497 *phys_ptr
= address
;
11502 /* For data accesses we always use the MMU bank indicated
11503 * by the current CPU state, but the security attributes
11504 * might downgrade a secure access to nonsecure.
11507 txattrs
->secure
= false;
11508 } else if (!secure
) {
11509 /* NS access to S memory must fault.
11510 * Architecturally we should first check whether the
11511 * MPU information for this address indicates that we
11512 * are doing an unaligned access to Device memory, which
11513 * should generate a UsageFault instead. QEMU does not
11514 * currently check for that kind of unaligned access though.
11515 * If we added it we would need to do so as a special case
11516 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11518 fi
->type
= ARMFault_QEMU_SFault
;
11519 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11520 *phys_ptr
= address
;
11527 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11528 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11529 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11533 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11534 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11535 hwaddr
*phys_ptr
, int *prot
,
11536 ARMMMUFaultInfo
*fi
)
11541 bool is_user
= regime_is_user(env
, mmu_idx
);
11543 if (regime_translation_disabled(env
, mmu_idx
)) {
11544 /* MPU disabled. */
11545 *phys_ptr
= address
;
11546 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11550 *phys_ptr
= address
;
11551 for (n
= 7; n
>= 0; n
--) {
11552 base
= env
->cp15
.c6_region
[n
];
11553 if ((base
& 1) == 0) {
11556 mask
= 1 << ((base
>> 1) & 0x1f);
11557 /* Keep this shift separate from the above to avoid an
11558 (undefined) << 32. */
11559 mask
= (mask
<< 1) - 1;
11560 if (((base
^ address
) & ~mask
) == 0) {
11565 fi
->type
= ARMFault_Background
;
11569 if (access_type
== MMU_INST_FETCH
) {
11570 mask
= env
->cp15
.pmsav5_insn_ap
;
11572 mask
= env
->cp15
.pmsav5_data_ap
;
11574 mask
= (mask
>> (n
* 4)) & 0xf;
11577 fi
->type
= ARMFault_Permission
;
11582 fi
->type
= ARMFault_Permission
;
11586 *prot
= PAGE_READ
| PAGE_WRITE
;
11591 *prot
|= PAGE_WRITE
;
11595 *prot
= PAGE_READ
| PAGE_WRITE
;
11599 fi
->type
= ARMFault_Permission
;
11609 /* Bad permission. */
11610 fi
->type
= ARMFault_Permission
;
11614 *prot
|= PAGE_EXEC
;
11618 /* Combine either inner or outer cacheability attributes for normal
11619 * memory, according to table D4-42 and pseudocode procedure
11620 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11622 * NB: only stage 1 includes allocation hints (RW bits), leading to
11625 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11627 if (s1
== 4 || s2
== 4) {
11628 /* non-cacheable has precedence */
11630 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11631 /* stage 1 write-through takes precedence */
11633 } else if (extract32(s2
, 2, 2) == 2) {
11634 /* stage 2 write-through takes precedence, but the allocation hint
11635 * is still taken from stage 1
11637 return (2 << 2) | extract32(s1
, 0, 2);
11638 } else { /* write-back */
11643 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11644 * and CombineS1S2Desc()
11646 * @s1: Attributes from stage 1 walk
11647 * @s2: Attributes from stage 2 walk
11649 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11651 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
11652 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
11655 /* Combine shareability attributes (table D4-43) */
11656 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11657 /* if either are outer-shareable, the result is outer-shareable */
11658 ret
.shareability
= 2;
11659 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11660 /* if either are inner-shareable, the result is inner-shareable */
11661 ret
.shareability
= 3;
11663 /* both non-shareable */
11664 ret
.shareability
= 0;
11667 /* Combine memory type and cacheability attributes */
11668 if (s1hi
== 0 || s2hi
== 0) {
11669 /* Device has precedence over normal */
11670 if (s1lo
== 0 || s2lo
== 0) {
11671 /* nGnRnE has precedence over anything */
11673 } else if (s1lo
== 4 || s2lo
== 4) {
11674 /* non-Reordering has precedence over Reordering */
11675 ret
.attrs
= 4; /* nGnRE */
11676 } else if (s1lo
== 8 || s2lo
== 8) {
11677 /* non-Gathering has precedence over Gathering */
11678 ret
.attrs
= 8; /* nGRE */
11680 ret
.attrs
= 0xc; /* GRE */
11683 /* Any location for which the resultant memory type is any
11684 * type of Device memory is always treated as Outer Shareable.
11686 ret
.shareability
= 2;
11687 } else { /* Normal memory */
11688 /* Outer/inner cacheability combine independently */
11689 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11690 | combine_cacheattr_nibble(s1lo
, s2lo
);
11692 if (ret
.attrs
== 0x44) {
11693 /* Any location for which the resultant memory type is Normal
11694 * Inner Non-cacheable, Outer Non-cacheable is always treated
11695 * as Outer Shareable.
11697 ret
.shareability
= 2;
11705 /* get_phys_addr - get the physical address for this virtual address
11707 * Find the physical address corresponding to the given virtual address,
11708 * by doing a translation table walk on MMU based systems or using the
11709 * MPU state on MPU based systems.
11711 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11712 * prot and page_size may not be filled in, and the populated fsr value provides
11713 * information on why the translation aborted, in the format of a
11714 * DFSR/IFSR fault register, with the following caveats:
11715 * * we honour the short vs long DFSR format differences.
11716 * * the WnR bit is never set (the caller must do this).
11717 * * for PSMAv5 based systems we don't bother to return a full FSR format
11720 * @env: CPUARMState
11721 * @address: virtual address to get physical address for
11722 * @access_type: 0 for read, 1 for write, 2 for execute
11723 * @mmu_idx: MMU index indicating required translation regime
11724 * @phys_ptr: set to the physical address corresponding to the virtual address
11725 * @attrs: set to the memory transaction attributes to use
11726 * @prot: set to the permissions for the page containing phys_ptr
11727 * @page_size: set to the size of the page containing phys_ptr
11728 * @fi: set to fault info if the translation fails
11729 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11731 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
11732 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11733 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11734 target_ulong
*page_size
,
11735 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11737 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
11738 /* Call ourselves recursively to do the stage 1 and then stage 2
11741 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11745 ARMCacheAttrs cacheattrs2
= {};
11747 ret
= get_phys_addr(env
, address
, access_type
,
11748 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11749 prot
, page_size
, fi
, cacheattrs
);
11751 /* If S1 fails or S2 is disabled, return early. */
11752 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
11757 /* S1 is done. Now do S2 translation. */
11758 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
11759 phys_ptr
, attrs
, &s2_prot
,
11761 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11763 /* Combine the S1 and S2 perms. */
11766 /* Combine the S1 and S2 cache attributes, if needed */
11767 if (!ret
&& cacheattrs
!= NULL
) {
11768 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11770 * HCR.DC forces the first stage attributes to
11771 * Normal Non-Shareable,
11772 * Inner Write-Back Read-Allocate Write-Allocate,
11773 * Outer Write-Back Read-Allocate Write-Allocate.
11775 cacheattrs
->attrs
= 0xff;
11776 cacheattrs
->shareability
= 0;
11778 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11784 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11786 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11790 /* The page table entries may downgrade secure to non-secure, but
11791 * cannot upgrade an non-secure translation regime's attributes
11794 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11795 attrs
->user
= regime_is_user(env
, mmu_idx
);
11797 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11798 * In v7 and earlier it affects all stage 1 translations.
11800 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
11801 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11802 if (regime_el(env
, mmu_idx
) == 3) {
11803 address
+= env
->cp15
.fcseidr_s
;
11805 address
+= env
->cp15
.fcseidr_ns
;
11809 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11811 *page_size
= TARGET_PAGE_SIZE
;
11813 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11815 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11816 phys_ptr
, attrs
, prot
, page_size
, fi
);
11817 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11819 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11820 phys_ptr
, prot
, page_size
, fi
);
11823 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11824 phys_ptr
, prot
, fi
);
11826 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11827 " mmu_idx %u -> %s (prot %c%c%c)\n",
11828 access_type
== MMU_DATA_LOAD
? "reading" :
11829 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11830 (uint32_t)address
, mmu_idx
,
11831 ret
? "Miss" : "Hit",
11832 *prot
& PAGE_READ
? 'r' : '-',
11833 *prot
& PAGE_WRITE
? 'w' : '-',
11834 *prot
& PAGE_EXEC
? 'x' : '-');
11839 /* Definitely a real MMU, not an MPU */
11841 if (regime_translation_disabled(env
, mmu_idx
)) {
11842 /* MMU disabled. */
11843 *phys_ptr
= address
;
11844 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11845 *page_size
= TARGET_PAGE_SIZE
;
11849 if (regime_using_lpae_format(env
, mmu_idx
)) {
11850 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11851 phys_ptr
, attrs
, prot
, page_size
,
11853 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11854 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11855 phys_ptr
, attrs
, prot
, page_size
, fi
);
11857 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11858 phys_ptr
, prot
, page_size
, fi
);
11862 /* Walk the page table and (if the mapping exists) add the page
11863 * to the TLB. Return false on success, or true on failure. Populate
11864 * fsr with ARM DFSR/IFSR fault register format value on failure.
11866 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
11867 MMUAccessType access_type
, int mmu_idx
,
11868 ARMMMUFaultInfo
*fi
)
11870 ARMCPU
*cpu
= ARM_CPU(cs
);
11871 CPUARMState
*env
= &cpu
->env
;
11873 target_ulong page_size
;
11876 MemTxAttrs attrs
= {};
11878 ret
= get_phys_addr(env
, address
, access_type
,
11879 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
11880 &attrs
, &prot
, &page_size
, fi
, NULL
);
11883 * Map a single [sub]page. Regions smaller than our declared
11884 * target page size are handled specially, so for those we
11885 * pass in the exact addresses.
11887 if (page_size
>= TARGET_PAGE_SIZE
) {
11888 phys_addr
&= TARGET_PAGE_MASK
;
11889 address
&= TARGET_PAGE_MASK
;
11891 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
11892 prot
, mmu_idx
, page_size
);
11899 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11902 ARMCPU
*cpu
= ARM_CPU(cs
);
11903 CPUARMState
*env
= &cpu
->env
;
11905 target_ulong page_size
;
11908 ARMMMUFaultInfo fi
= {};
11909 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11911 *attrs
= (MemTxAttrs
) {};
11913 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11914 attrs
, &prot
, &page_size
, &fi
, NULL
);
11922 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
11925 unsigned el
= arm_current_el(env
);
11927 /* First handle registers which unprivileged can read */
11930 case 0 ... 7: /* xPSR sub-fields */
11932 if ((reg
& 1) && el
) {
11933 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
11936 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
11938 /* EPSR reads as zero */
11939 return xpsr_read(env
) & mask
;
11941 case 20: /* CONTROL */
11942 return env
->v7m
.control
[env
->v7m
.secure
];
11943 case 0x94: /* CONTROL_NS */
11944 /* We have to handle this here because unprivileged Secure code
11945 * can read the NS CONTROL register.
11947 if (!env
->v7m
.secure
) {
11950 return env
->v7m
.control
[M_REG_NS
];
11954 return 0; /* unprivileged reads others as zero */
11957 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11959 case 0x88: /* MSP_NS */
11960 if (!env
->v7m
.secure
) {
11963 return env
->v7m
.other_ss_msp
;
11964 case 0x89: /* PSP_NS */
11965 if (!env
->v7m
.secure
) {
11968 return env
->v7m
.other_ss_psp
;
11969 case 0x8a: /* MSPLIM_NS */
11970 if (!env
->v7m
.secure
) {
11973 return env
->v7m
.msplim
[M_REG_NS
];
11974 case 0x8b: /* PSPLIM_NS */
11975 if (!env
->v7m
.secure
) {
11978 return env
->v7m
.psplim
[M_REG_NS
];
11979 case 0x90: /* PRIMASK_NS */
11980 if (!env
->v7m
.secure
) {
11983 return env
->v7m
.primask
[M_REG_NS
];
11984 case 0x91: /* BASEPRI_NS */
11985 if (!env
->v7m
.secure
) {
11988 return env
->v7m
.basepri
[M_REG_NS
];
11989 case 0x93: /* FAULTMASK_NS */
11990 if (!env
->v7m
.secure
) {
11993 return env
->v7m
.faultmask
[M_REG_NS
];
11994 case 0x98: /* SP_NS */
11996 /* This gives the non-secure SP selected based on whether we're
11997 * currently in handler mode or not, using the NS CONTROL.SPSEL.
11999 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12001 if (!env
->v7m
.secure
) {
12004 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
12005 return env
->v7m
.other_ss_psp
;
12007 return env
->v7m
.other_ss_msp
;
12017 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
12019 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
12020 case 10: /* MSPLIM */
12021 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12024 return env
->v7m
.msplim
[env
->v7m
.secure
];
12025 case 11: /* PSPLIM */
12026 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12029 return env
->v7m
.psplim
[env
->v7m
.secure
];
12030 case 16: /* PRIMASK */
12031 return env
->v7m
.primask
[env
->v7m
.secure
];
12032 case 17: /* BASEPRI */
12033 case 18: /* BASEPRI_MAX */
12034 return env
->v7m
.basepri
[env
->v7m
.secure
];
12035 case 19: /* FAULTMASK */
12036 return env
->v7m
.faultmask
[env
->v7m
.secure
];
12039 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
12040 " register %d\n", reg
);
12045 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
12047 /* We're passed bits [11..0] of the instruction; extract
12048 * SYSm and the mask bits.
12049 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
12050 * we choose to treat them as if the mask bits were valid.
12051 * NB that the pseudocode 'mask' variable is bits [11..10],
12052 * whereas ours is [11..8].
12054 uint32_t mask
= extract32(maskreg
, 8, 4);
12055 uint32_t reg
= extract32(maskreg
, 0, 8);
12057 if (arm_current_el(env
) == 0 && reg
> 7) {
12058 /* only xPSR sub-fields may be written by unprivileged */
12062 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12064 case 0x88: /* MSP_NS */
12065 if (!env
->v7m
.secure
) {
12068 env
->v7m
.other_ss_msp
= val
;
12070 case 0x89: /* PSP_NS */
12071 if (!env
->v7m
.secure
) {
12074 env
->v7m
.other_ss_psp
= val
;
12076 case 0x8a: /* MSPLIM_NS */
12077 if (!env
->v7m
.secure
) {
12080 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
12082 case 0x8b: /* PSPLIM_NS */
12083 if (!env
->v7m
.secure
) {
12086 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
12088 case 0x90: /* PRIMASK_NS */
12089 if (!env
->v7m
.secure
) {
12092 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
12094 case 0x91: /* BASEPRI_NS */
12095 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12098 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
12100 case 0x93: /* FAULTMASK_NS */
12101 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12104 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
12106 case 0x94: /* CONTROL_NS */
12107 if (!env
->v7m
.secure
) {
12110 write_v7m_control_spsel_for_secstate(env
,
12111 val
& R_V7M_CONTROL_SPSEL_MASK
,
12113 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12114 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12115 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12118 case 0x98: /* SP_NS */
12120 /* This gives the non-secure SP selected based on whether we're
12121 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12123 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12124 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
12127 if (!env
->v7m
.secure
) {
12131 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
12134 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
12136 cpu_restore_state(cs
, GETPC(), true);
12137 raise_exception(env
, EXCP_STKOF
, 0, 1);
12141 env
->v7m
.other_ss_psp
= val
;
12143 env
->v7m
.other_ss_msp
= val
;
12153 case 0 ... 7: /* xPSR sub-fields */
12154 /* only APSR is actually writable */
12156 uint32_t apsrmask
= 0;
12159 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
12161 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
12162 apsrmask
|= XPSR_GE
;
12164 xpsr_write(env
, val
, apsrmask
);
12168 if (v7m_using_psp(env
)) {
12169 env
->v7m
.other_sp
= val
;
12171 env
->regs
[13] = val
;
12175 if (v7m_using_psp(env
)) {
12176 env
->regs
[13] = val
;
12178 env
->v7m
.other_sp
= val
;
12181 case 10: /* MSPLIM */
12182 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12185 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
12187 case 11: /* PSPLIM */
12188 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12191 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
12193 case 16: /* PRIMASK */
12194 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
12196 case 17: /* BASEPRI */
12197 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12200 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
12202 case 18: /* BASEPRI_MAX */
12203 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12207 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
12208 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
12209 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
12212 case 19: /* FAULTMASK */
12213 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12216 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
12218 case 20: /* CONTROL */
12219 /* Writing to the SPSEL bit only has an effect if we are in
12220 * thread mode; other bits can be updated by any privileged code.
12221 * write_v7m_control_spsel() deals with updating the SPSEL bit in
12222 * env->v7m.control, so we only need update the others.
12223 * For v7M, we must just ignore explicit writes to SPSEL in handler
12224 * mode; for v8M the write is permitted but will have no effect.
12226 if (arm_feature(env
, ARM_FEATURE_V8
) ||
12227 !arm_v7m_is_handler_mode(env
)) {
12228 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
12230 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12231 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12232 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12237 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
12238 " register %d\n", reg
);
12243 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
12245 /* Implement the TT instruction. op is bits [7:6] of the insn. */
12246 bool forceunpriv
= op
& 1;
12248 V8M_SAttributes sattrs
= {};
12250 bool r
, rw
, nsr
, nsrw
, mrvalid
;
12252 ARMMMUFaultInfo fi
= {};
12253 MemTxAttrs attrs
= {};
12258 bool targetsec
= env
->v7m
.secure
;
12261 /* Work out what the security state and privilege level we're
12262 * interested in is...
12265 targetsec
= !targetsec
;
12269 targetpriv
= false;
12271 targetpriv
= arm_v7m_is_handler_mode(env
) ||
12272 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
12275 /* ...and then figure out which MMU index this is */
12276 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
12278 /* We know that the MPU and SAU don't care about the access type
12279 * for our purposes beyond that we don't want to claim to be
12280 * an insn fetch, so we arbitrarily call this a read.
12283 /* MPU region info only available for privileged or if
12284 * inspecting the other MPU state.
12286 if (arm_current_el(env
) != 0 || alt
) {
12287 /* We can ignore the return value as prot is always set */
12288 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
12289 &phys_addr
, &attrs
, &prot
, &is_subpage
,
12291 if (mregion
== -1) {
12297 r
= prot
& PAGE_READ
;
12298 rw
= prot
& PAGE_WRITE
;
12306 if (env
->v7m
.secure
) {
12307 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
12308 nsr
= sattrs
.ns
&& r
;
12309 nsrw
= sattrs
.ns
&& rw
;
12316 tt_resp
= (sattrs
.iregion
<< 24) |
12317 (sattrs
.irvalid
<< 23) |
12318 ((!sattrs
.ns
) << 22) |
12323 (sattrs
.srvalid
<< 17) |
12325 (sattrs
.sregion
<< 8) |
12333 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
12335 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
12336 * Note that we do not implement the (architecturally mandated)
12337 * alignment fault for attempts to use this on Device memory
12338 * (which matches the usual QEMU behaviour of not implementing either
12339 * alignment faults or any memory attribute handling).
12342 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12343 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
12344 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
12346 #ifndef CONFIG_USER_ONLY
12348 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
12349 * the block size so we might have to do more than one TLB lookup.
12350 * We know that in fact for any v8 CPU the page size is at least 4K
12351 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
12352 * 1K as an artefact of legacy v5 subpage support being present in the
12353 * same QEMU executable.
12355 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
12356 void *hostaddr
[maxidx
];
12358 unsigned mmu_idx
= cpu_mmu_index(env
, false);
12359 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
12361 for (try = 0; try < 2; try++) {
12363 for (i
= 0; i
< maxidx
; i
++) {
12364 hostaddr
[i
] = tlb_vaddr_to_host(env
,
12365 vaddr
+ TARGET_PAGE_SIZE
* i
,
12367 if (!hostaddr
[i
]) {
12372 /* If it's all in the TLB it's fair game for just writing to;
12373 * we know we don't need to update dirty status, etc.
12375 for (i
= 0; i
< maxidx
- 1; i
++) {
12376 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
12378 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
12381 /* OK, try a store and see if we can populate the tlb. This
12382 * might cause an exception if the memory isn't writable,
12383 * in which case we will longjmp out of here. We must for
12384 * this purpose use the actual register value passed to us
12385 * so that we get the fault address right.
12387 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
12388 /* Now we can populate the other TLB entries, if any */
12389 for (i
= 0; i
< maxidx
; i
++) {
12390 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
12391 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
12392 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
12397 /* Slow path (probably attempt to do this to an I/O device or
12398 * similar, or clearing of a block of code we have translations
12399 * cached for). Just do a series of byte writes as the architecture
12400 * demands. It's not worth trying to use a cpu_physical_memory_map(),
12401 * memset(), unmap() sequence here because:
12402 * + we'd need to account for the blocksize being larger than a page
12403 * + the direct-RAM access case is almost always going to be dealt
12404 * with in the fastpath code above, so there's no speed benefit
12405 * + we would have to deal with the map returning NULL because the
12406 * bounce buffer was in use
12408 for (i
= 0; i
< blocklen
; i
++) {
12409 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
12413 memset(g2h(vaddr
), 0, blocklen
);
12417 /* Note that signed overflow is undefined in C. The following routines are
12418 careful to use unsigned types where modulo arithmetic is required.
12419 Failure to do so _will_ break on newer gcc. */
12421 /* Signed saturating arithmetic. */
12423 /* Perform 16-bit signed saturating addition. */
12424 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12429 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12438 /* Perform 8-bit signed saturating addition. */
12439 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12444 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12453 /* Perform 16-bit signed saturating subtraction. */
12454 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12459 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12468 /* Perform 8-bit signed saturating subtraction. */
12469 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12474 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12483 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12484 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12485 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12486 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12489 #include "op_addsub.h"
12491 /* Unsigned saturating arithmetic. */
12492 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12501 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12509 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12518 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12526 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12527 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12528 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12529 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12532 #include "op_addsub.h"
12534 /* Signed modulo arithmetic. */
12535 #define SARITH16(a, b, n, op) do { \
12537 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12538 RESULT(sum, n, 16); \
12540 ge |= 3 << (n * 2); \
12543 #define SARITH8(a, b, n, op) do { \
12545 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12546 RESULT(sum, n, 8); \
12552 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12553 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12554 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12555 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12559 #include "op_addsub.h"
12561 /* Unsigned modulo arithmetic. */
12562 #define ADD16(a, b, n) do { \
12564 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12565 RESULT(sum, n, 16); \
12566 if ((sum >> 16) == 1) \
12567 ge |= 3 << (n * 2); \
12570 #define ADD8(a, b, n) do { \
12572 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12573 RESULT(sum, n, 8); \
12574 if ((sum >> 8) == 1) \
12578 #define SUB16(a, b, n) do { \
12580 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12581 RESULT(sum, n, 16); \
12582 if ((sum >> 16) == 0) \
12583 ge |= 3 << (n * 2); \
12586 #define SUB8(a, b, n) do { \
12588 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12589 RESULT(sum, n, 8); \
12590 if ((sum >> 8) == 0) \
12597 #include "op_addsub.h"
12599 /* Halved signed arithmetic. */
12600 #define ADD16(a, b, n) \
12601 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12602 #define SUB16(a, b, n) \
12603 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12604 #define ADD8(a, b, n) \
12605 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12606 #define SUB8(a, b, n) \
12607 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12610 #include "op_addsub.h"
12612 /* Halved unsigned arithmetic. */
12613 #define ADD16(a, b, n) \
12614 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12615 #define SUB16(a, b, n) \
12616 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12617 #define ADD8(a, b, n) \
12618 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12619 #define SUB8(a, b, n) \
12620 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12623 #include "op_addsub.h"
12625 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12633 /* Unsigned sum of absolute byte differences. */
12634 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12637 sum
= do_usad(a
, b
);
12638 sum
+= do_usad(a
>> 8, b
>> 8);
12639 sum
+= do_usad(a
>> 16, b
>>16);
12640 sum
+= do_usad(a
>> 24, b
>> 24);
12644 /* For ARMv6 SEL instruction. */
12645 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12657 mask
|= 0xff000000;
12658 return (a
& mask
) | (b
& ~mask
);
12662 * The upper bytes of val (above the number specified by 'bytes') must have
12663 * been zeroed out by the caller.
12665 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12669 stl_le_p(buf
, val
);
12671 /* zlib crc32 converts the accumulator and output to one's complement. */
12672 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12675 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12679 stl_le_p(buf
, val
);
12681 /* Linux crc32c converts the output to one's complement. */
12682 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12685 /* Return the exception level to which FP-disabled exceptions should
12686 * be taken, or 0 if FP is enabled.
12688 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12690 #ifndef CONFIG_USER_ONLY
12693 /* CPACR and the CPTR registers don't exist before v6, so FP is
12694 * always accessible
12696 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12700 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12701 * 0, 2 : trap EL0 and EL1/PL1 accesses
12702 * 1 : trap only EL0 accesses
12703 * 3 : trap no accesses
12705 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12709 if (cur_el
== 0 || cur_el
== 1) {
12710 /* Trap to PL1, which might be EL1 or EL3 */
12711 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12716 if (cur_el
== 3 && !is_a64(env
)) {
12717 /* Secure PL1 running at EL3 */
12730 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12731 * check because zero bits in the registers mean "don't trap".
12734 /* CPTR_EL2 : present in v7VE or v8 */
12735 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12736 && !arm_is_secure_below_el3(env
)) {
12737 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12741 /* CPTR_EL3 : present in v8 */
12742 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12743 /* Trap all FP ops to EL3 */
12750 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
12751 bool secstate
, bool priv
)
12753 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
12756 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
12759 if (armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
)) {
12760 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
12764 mmu_idx
|= ARM_MMU_IDX_M_S
;
12770 /* Return the MMU index for a v7M CPU in the specified security state */
12771 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
12773 bool priv
= arm_current_el(env
) != 0;
12775 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
12778 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
12782 if (arm_feature(env
, ARM_FEATURE_M
)) {
12783 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
12786 el
= arm_current_el(env
);
12787 if (el
< 2 && arm_is_secure_below_el3(env
)) {
12788 return ARMMMUIdx_S1SE0
+ el
;
12790 return ARMMMUIdx_S12NSE0
+ el
;
12794 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
12796 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
12799 #ifndef CONFIG_USER_ONLY
12800 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
12802 return stage_1_mmu_idx(arm_mmu_idx(env
));
12806 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12807 target_ulong
*cs_base
, uint32_t *pflags
)
12809 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12810 int current_el
= arm_current_el(env
);
12811 int fp_el
= fp_exception_el(env
, current_el
);
12812 uint32_t flags
= 0;
12815 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12819 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
12821 /* Get control bits for tagged addresses. */
12823 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
12824 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
12827 /* FIXME: ARMv8.1-VHE S2 translation regime. */
12828 if (regime_el(env
, stage1
) < 2) {
12829 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
12830 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
12831 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
12834 tbii
= tbid
& !p0
.tbid
;
12837 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
12838 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
12841 if (cpu_isar_feature(aa64_sve
, cpu
)) {
12842 int sve_el
= sve_exception_el(env
, current_el
);
12845 /* If SVE is disabled, but FP is enabled,
12846 * then the effective len is 0.
12848 if (sve_el
!= 0 && fp_el
== 0) {
12851 zcr_len
= sve_zcr_len_for_el(env
, current_el
);
12853 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
12854 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
12857 if (current_el
== 0) {
12858 /* FIXME: ARMv8.1-VHE S2 translation regime. */
12859 sctlr
= env
->cp15
.sctlr_el
[1];
12861 sctlr
= env
->cp15
.sctlr_el
[current_el
];
12863 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
12865 * In order to save space in flags, we record only whether
12866 * pauth is "inactive", meaning all insns are implemented as
12867 * a nop, or "active" when some action must be performed.
12868 * The decision of which action to take is left to a helper.
12870 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
12871 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
12875 if (cpu_isar_feature(aa64_bti
, cpu
)) {
12876 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
12877 if (sctlr
& (current_el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
12878 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
12880 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
12883 *pc
= env
->regs
[15];
12884 flags
= FIELD_DP32(flags
, TBFLAG_A32
, THUMB
, env
->thumb
);
12885 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
, env
->vfp
.vec_len
);
12886 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
, env
->vfp
.vec_stride
);
12887 flags
= FIELD_DP32(flags
, TBFLAG_A32
, CONDEXEC
, env
->condexec_bits
);
12888 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, arm_sctlr_b(env
));
12889 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
12890 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
12891 || arm_el_is_aa64(env
, 1)) {
12892 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12894 flags
= FIELD_DP32(flags
, TBFLAG_A32
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
12897 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
12899 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12900 * states defined in the ARM ARM for software singlestep:
12901 * SS_ACTIVE PSTATE.SS State
12902 * 0 x Inactive (the TB flag for SS is always 0)
12903 * 1 0 Active-pending
12904 * 1 1 Active-not-pending
12906 if (arm_singlestep_active(env
)) {
12907 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
12909 if (env
->pstate
& PSTATE_SS
) {
12910 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
12913 if (env
->uncached_cpsr
& PSTATE_SS
) {
12914 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
12918 if (arm_cpu_data_is_big_endian(env
)) {
12919 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12921 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
12923 if (arm_v7m_is_handler_mode(env
)) {
12924 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HANDLER
, 1);
12927 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
12928 * suppressing them because the requested execution priority is less than 0.
12930 if (arm_feature(env
, ARM_FEATURE_V8
) &&
12931 arm_feature(env
, ARM_FEATURE_M
) &&
12932 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
12933 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
12934 flags
= FIELD_DP32(flags
, TBFLAG_A32
, STACKCHECK
, 1);
12941 #ifdef TARGET_AARCH64
12943 * The manual says that when SVE is enabled and VQ is widened the
12944 * implementation is allowed to zero the previously inaccessible
12945 * portion of the registers. The corollary to that is that when
12946 * SVE is enabled and VQ is narrowed we are also allowed to zero
12947 * the now inaccessible portion of the registers.
12949 * The intent of this is that no predicate bit beyond VQ is ever set.
12950 * Which means that some operations on predicate registers themselves
12951 * may operate on full uint64_t or even unrolled across the maximum
12952 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
12953 * may well be cheaper than conditionals to restrict the operation
12954 * to the relevant portion of a uint16_t[16].
12956 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
12961 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
12962 assert(vq
<= arm_env_get_cpu(env
)->sve_max_vq
);
12964 /* Zap the high bits of the zregs. */
12965 for (i
= 0; i
< 32; i
++) {
12966 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
12969 /* Zap the high bits of the pregs and ffr. */
12972 pmask
= ~(-1ULL << (16 * (vq
& 3)));
12974 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
12975 for (i
= 0; i
< 17; ++i
) {
12976 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
12983 * Notice a change in SVE vector size when changing EL.
12985 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
12986 int new_el
, bool el0_a64
)
12988 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12989 int old_len
, new_len
;
12990 bool old_a64
, new_a64
;
12992 /* Nothing to do if no SVE. */
12993 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
12997 /* Nothing to do if FP is disabled in either EL. */
12998 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13003 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13004 * at ELx, or not available because the EL is in AArch32 state, then
13005 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13006 * has an effective value of 0".
13008 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13009 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13010 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13011 * we already have the correct register contents when encountering the
13012 * vq0->vq0 transition between EL0->EL1.
13014 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13015 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13016 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13017 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13018 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13019 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13021 /* When changing vector length, clear inaccessible state. */
13022 if (new_len
< old_len
) {
13023 aarch64_sve_narrow_vq(env
, new_len
+ 1);