1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/kvm.h"
20 #include "fpu/softfloat.h"
21 #include "qemu/range.h"
22 #include "qapi/qapi-commands-target.h"
24 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
26 #ifndef CONFIG_USER_ONLY
27 /* Cacheability and shareability attributes for a memory access */
28 typedef struct ARMCacheAttrs
{
29 unsigned int attrs
:8; /* as in the MAIR register encoding */
30 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
33 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
34 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
35 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
36 target_ulong
*page_size
,
37 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
39 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
40 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
41 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
42 target_ulong
*page_size_ptr
,
43 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
45 /* Security attributes for an address, as returned by v8m_security_lookup. */
46 typedef struct V8M_SAttributes
{
47 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
56 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
57 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
58 V8M_SAttributes
*sattrs
);
61 static void switch_mode(CPUARMState
*env
, int mode
);
63 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
67 /* VFP data registers are always little-endian. */
68 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
70 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
73 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
74 /* Aliases for Q regs. */
77 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
79 stq_le_p(buf
+ 8, q
[1]);
83 switch (reg
- nregs
) {
84 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
85 case 1: stl_p(buf
, vfp_get_fpscr(env
)); return 4;
86 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
91 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
95 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
97 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
100 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
103 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
104 q
[0] = ldq_le_p(buf
);
105 q
[1] = ldq_le_p(buf
+ 8);
109 switch (reg
- nregs
) {
110 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
111 case 1: vfp_set_fpscr(env
, ldl_p(buf
)); return 4;
112 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
117 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
121 /* 128 bit FP register */
123 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
125 stq_le_p(buf
+ 8, q
[1]);
130 stl_p(buf
, vfp_get_fpsr(env
));
134 stl_p(buf
, vfp_get_fpcr(env
));
141 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
145 /* 128 bit FP register */
147 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
148 q
[0] = ldq_le_p(buf
);
149 q
[1] = ldq_le_p(buf
+ 8);
154 vfp_set_fpsr(env
, ldl_p(buf
));
158 vfp_set_fpcr(env
, ldl_p(buf
));
165 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
167 assert(ri
->fieldoffset
);
168 if (cpreg_field_is_64bit(ri
)) {
169 return CPREG_FIELD64(env
, ri
);
171 return CPREG_FIELD32(env
, ri
);
175 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
178 assert(ri
->fieldoffset
);
179 if (cpreg_field_is_64bit(ri
)) {
180 CPREG_FIELD64(env
, ri
) = value
;
182 CPREG_FIELD32(env
, ri
) = value
;
186 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
188 return (char *)env
+ ri
->fieldoffset
;
191 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
193 /* Raw read of a coprocessor register (as needed for migration, etc). */
194 if (ri
->type
& ARM_CP_CONST
) {
195 return ri
->resetvalue
;
196 } else if (ri
->raw_readfn
) {
197 return ri
->raw_readfn(env
, ri
);
198 } else if (ri
->readfn
) {
199 return ri
->readfn(env
, ri
);
201 return raw_read(env
, ri
);
205 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
208 /* Raw write of a coprocessor register (as needed for migration, etc).
209 * Note that constant registers are treated as write-ignored; the
210 * caller should check for success by whether a readback gives the
213 if (ri
->type
& ARM_CP_CONST
) {
215 } else if (ri
->raw_writefn
) {
216 ri
->raw_writefn(env
, ri
, v
);
217 } else if (ri
->writefn
) {
218 ri
->writefn(env
, ri
, v
);
220 raw_write(env
, ri
, v
);
224 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
226 ARMCPU
*cpu
= arm_env_get_cpu(env
);
227 const ARMCPRegInfo
*ri
;
230 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
231 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
233 if (cpreg_field_is_64bit(ri
)) {
234 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
236 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
242 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
247 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
249 /* Return true if the regdef would cause an assertion if you called
250 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
251 * program bug for it not to have the NO_RAW flag).
252 * NB that returning false here doesn't necessarily mean that calling
253 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
254 * read/write access functions which are safe for raw use" from "has
255 * read/write access functions which have side effects but has forgotten
256 * to provide raw access functions".
257 * The tests here line up with the conditions in read/write_raw_cp_reg()
258 * and assertions in raw_read()/raw_write().
260 if ((ri
->type
& ARM_CP_CONST
) ||
262 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
268 bool write_cpustate_to_list(ARMCPU
*cpu
)
270 /* Write the coprocessor state from cpu->env to the (index,value) list. */
274 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
275 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
276 const ARMCPRegInfo
*ri
;
278 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
283 if (ri
->type
& ARM_CP_NO_RAW
) {
286 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
291 bool write_list_to_cpustate(ARMCPU
*cpu
)
296 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
297 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
298 uint64_t v
= cpu
->cpreg_values
[i
];
299 const ARMCPRegInfo
*ri
;
301 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
306 if (ri
->type
& ARM_CP_NO_RAW
) {
309 /* Write value and confirm it reads back as written
310 * (to catch read-only registers and partially read-only
311 * registers where the incoming migration value doesn't match)
313 write_raw_cp_reg(&cpu
->env
, ri
, v
);
314 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
321 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
323 ARMCPU
*cpu
= opaque
;
325 const ARMCPRegInfo
*ri
;
327 regidx
= *(uint32_t *)key
;
328 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
330 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
331 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
332 /* The value array need not be initialized at this point */
333 cpu
->cpreg_array_len
++;
337 static void count_cpreg(gpointer key
, gpointer opaque
)
339 ARMCPU
*cpu
= opaque
;
341 const ARMCPRegInfo
*ri
;
343 regidx
= *(uint32_t *)key
;
344 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
346 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
347 cpu
->cpreg_array_len
++;
351 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
353 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
354 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
365 void init_cpreg_list(ARMCPU
*cpu
)
367 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
368 * Note that we require cpreg_tuples[] to be sorted by key ID.
373 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
374 keys
= g_list_sort(keys
, cpreg_key_compare
);
376 cpu
->cpreg_array_len
= 0;
378 g_list_foreach(keys
, count_cpreg
, cpu
);
380 arraylen
= cpu
->cpreg_array_len
;
381 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
382 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
383 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
384 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
385 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
386 cpu
->cpreg_array_len
= 0;
388 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
390 assert(cpu
->cpreg_array_len
== arraylen
);
396 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
397 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
399 * access_el3_aa32ns: Used to check AArch32 register views.
400 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
402 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
403 const ARMCPRegInfo
*ri
,
406 bool secure
= arm_is_secure_below_el3(env
);
408 assert(!arm_el_is_aa64(env
, 3));
410 return CP_ACCESS_TRAP_UNCATEGORIZED
;
415 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
416 const ARMCPRegInfo
*ri
,
419 if (!arm_el_is_aa64(env
, 3)) {
420 return access_el3_aa32ns(env
, ri
, isread
);
425 /* Some secure-only AArch32 registers trap to EL3 if used from
426 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
427 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
428 * We assume that the .access field is set to PL1_RW.
430 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
431 const ARMCPRegInfo
*ri
,
434 if (arm_current_el(env
) == 3) {
437 if (arm_is_secure_below_el3(env
)) {
438 return CP_ACCESS_TRAP_EL3
;
440 /* This will be EL1 NS and EL2 NS, which just UNDEF */
441 return CP_ACCESS_TRAP_UNCATEGORIZED
;
444 /* Check for traps to "powerdown debug" registers, which are controlled
447 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
450 int el
= arm_current_el(env
);
451 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
452 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
453 (arm_hcr_el2_eff(env
) & HCR_TGE
);
455 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
456 return CP_ACCESS_TRAP_EL2
;
458 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
459 return CP_ACCESS_TRAP_EL3
;
464 /* Check for traps to "debug ROM" registers, which are controlled
465 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
467 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
470 int el
= arm_current_el(env
);
471 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
472 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
473 (arm_hcr_el2_eff(env
) & HCR_TGE
);
475 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
476 return CP_ACCESS_TRAP_EL2
;
478 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
479 return CP_ACCESS_TRAP_EL3
;
484 /* Check for traps to general debug registers, which are controlled
485 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
487 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
490 int el
= arm_current_el(env
);
491 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
492 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
493 (arm_hcr_el2_eff(env
) & HCR_TGE
);
495 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
496 return CP_ACCESS_TRAP_EL2
;
498 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
499 return CP_ACCESS_TRAP_EL3
;
504 /* Check for traps to performance monitor registers, which are controlled
505 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
507 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
510 int el
= arm_current_el(env
);
512 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
513 && !arm_is_secure_below_el3(env
)) {
514 return CP_ACCESS_TRAP_EL2
;
516 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
517 return CP_ACCESS_TRAP_EL3
;
522 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
524 ARMCPU
*cpu
= arm_env_get_cpu(env
);
526 raw_write(env
, ri
, value
);
527 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
530 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
532 ARMCPU
*cpu
= arm_env_get_cpu(env
);
534 if (raw_read(env
, ri
) != value
) {
535 /* Unlike real hardware the qemu TLB uses virtual addresses,
536 * not modified virtual addresses, so this causes a TLB flush.
539 raw_write(env
, ri
, value
);
543 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
546 ARMCPU
*cpu
= arm_env_get_cpu(env
);
548 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
549 && !extended_addresses_enabled(env
)) {
550 /* For VMSA (when not using the LPAE long descriptor page table
551 * format) this register includes the ASID, so do a TLB flush.
552 * For PMSA it is purely a process ID and no action is needed.
556 raw_write(env
, ri
, value
);
559 /* IS variants of TLB operations must affect all cores */
560 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
563 CPUState
*cs
= ENV_GET_CPU(env
);
565 tlb_flush_all_cpus_synced(cs
);
568 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
571 CPUState
*cs
= ENV_GET_CPU(env
);
573 tlb_flush_all_cpus_synced(cs
);
576 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
579 CPUState
*cs
= ENV_GET_CPU(env
);
581 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
584 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
587 CPUState
*cs
= ENV_GET_CPU(env
);
589 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
593 * Non-IS variants of TLB operations are upgraded to
594 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
595 * force broadcast of these operations.
597 static bool tlb_force_broadcast(CPUARMState
*env
)
599 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
600 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
603 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
606 /* Invalidate all (TLBIALL) */
607 ARMCPU
*cpu
= arm_env_get_cpu(env
);
609 if (tlb_force_broadcast(env
)) {
610 tlbiall_is_write(env
, NULL
, value
);
617 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
620 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
621 ARMCPU
*cpu
= arm_env_get_cpu(env
);
623 if (tlb_force_broadcast(env
)) {
624 tlbimva_is_write(env
, NULL
, value
);
628 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
631 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
634 /* Invalidate by ASID (TLBIASID) */
635 ARMCPU
*cpu
= arm_env_get_cpu(env
);
637 if (tlb_force_broadcast(env
)) {
638 tlbiasid_is_write(env
, NULL
, value
);
645 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
648 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
649 ARMCPU
*cpu
= arm_env_get_cpu(env
);
651 if (tlb_force_broadcast(env
)) {
652 tlbimvaa_is_write(env
, NULL
, value
);
656 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
659 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
662 CPUState
*cs
= ENV_GET_CPU(env
);
664 tlb_flush_by_mmuidx(cs
,
665 ARMMMUIdxBit_S12NSE1
|
666 ARMMMUIdxBit_S12NSE0
|
670 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
673 CPUState
*cs
= ENV_GET_CPU(env
);
675 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
676 ARMMMUIdxBit_S12NSE1
|
677 ARMMMUIdxBit_S12NSE0
|
681 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
684 /* Invalidate by IPA. This has to invalidate any structures that
685 * contain only stage 2 translation information, but does not need
686 * to apply to structures that contain combined stage 1 and stage 2
687 * translation information.
688 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
690 CPUState
*cs
= ENV_GET_CPU(env
);
693 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
697 pageaddr
= sextract64(value
<< 12, 0, 40);
699 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
702 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
705 CPUState
*cs
= ENV_GET_CPU(env
);
708 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
712 pageaddr
= sextract64(value
<< 12, 0, 40);
714 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
718 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
721 CPUState
*cs
= ENV_GET_CPU(env
);
723 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
726 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
729 CPUState
*cs
= ENV_GET_CPU(env
);
731 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
734 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
737 CPUState
*cs
= ENV_GET_CPU(env
);
738 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
740 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
743 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
746 CPUState
*cs
= ENV_GET_CPU(env
);
747 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
749 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
753 static const ARMCPRegInfo cp_reginfo
[] = {
754 /* Define the secure and non-secure FCSE identifier CP registers
755 * separately because there is no secure bank in V8 (no _EL3). This allows
756 * the secure register to be properly reset and migrated. There is also no
757 * v8 EL1 version of the register so the non-secure instance stands alone.
760 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
761 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
762 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
763 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
764 { .name
= "FCSEIDR_S",
765 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
766 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
767 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
768 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
769 /* Define the secure and non-secure context identifier CP registers
770 * separately because there is no secure bank in V8 (no _EL3). This allows
771 * the secure register to be properly reset and migrated. In the
772 * non-secure case, the 32-bit register will have reset and migration
773 * disabled during registration as it is handled by the 64-bit instance.
775 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
776 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
777 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
778 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
779 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
780 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
781 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
782 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
783 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
784 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
788 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
789 /* NB: Some of these registers exist in v8 but with more precise
790 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
792 /* MMU Domain access control / MPU write buffer control */
794 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
795 .access
= PL1_RW
, .resetvalue
= 0,
796 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
797 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
798 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
799 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
800 * For v6 and v5, these mappings are overly broad.
802 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
803 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
804 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
805 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
806 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
807 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
808 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
809 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
810 /* Cache maintenance ops; some of this space may be overridden later. */
811 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
812 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
813 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
817 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
818 /* Not all pre-v6 cores implemented this WFI, so this is slightly
821 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
822 .access
= PL1_W
, .type
= ARM_CP_WFI
},
826 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
827 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
828 * is UNPREDICTABLE; we choose to NOP as most implementations do).
830 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
831 .access
= PL1_W
, .type
= ARM_CP_WFI
},
832 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
833 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
834 * OMAPCP will override this space.
836 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
837 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
839 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
840 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
842 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
843 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
844 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
846 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
847 * implementing it as RAZ means the "debug architecture version" bits
848 * will read as a reserved value, which should cause Linux to not try
849 * to use the debug hardware.
851 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
852 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
853 /* MMU TLB control. Note that the wildcarding means we cover not just
854 * the unified TLB ops but also the dside/iside/inner-shareable variants.
856 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
857 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
858 .type
= ARM_CP_NO_RAW
},
859 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
860 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
861 .type
= ARM_CP_NO_RAW
},
862 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
863 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
864 .type
= ARM_CP_NO_RAW
},
865 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
866 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
867 .type
= ARM_CP_NO_RAW
},
868 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
869 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
870 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
871 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
875 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
880 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
881 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
882 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
883 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
884 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
886 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
887 /* VFP coprocessor: cp10 & cp11 [23:20] */
888 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
890 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
891 /* ASEDIS [31] bit is RAO/WI */
895 /* VFPv3 and upwards with NEON implement 32 double precision
896 * registers (D0-D31).
898 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
899 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
900 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
906 env
->cp15
.cpacr_el1
= value
;
909 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
911 /* Call cpacr_write() so that we reset with the correct RAO bits set
912 * for our CPU features.
914 cpacr_write(env
, ri
, 0);
917 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
920 if (arm_feature(env
, ARM_FEATURE_V8
)) {
921 /* Check if CPACR accesses are to be trapped to EL2 */
922 if (arm_current_el(env
) == 1 &&
923 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
924 return CP_ACCESS_TRAP_EL2
;
925 /* Check if CPACR accesses are to be trapped to EL3 */
926 } else if (arm_current_el(env
) < 3 &&
927 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
928 return CP_ACCESS_TRAP_EL3
;
935 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
938 /* Check if CPTR accesses are set to trap to EL3 */
939 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
940 return CP_ACCESS_TRAP_EL3
;
946 static const ARMCPRegInfo v6_cp_reginfo
[] = {
947 /* prefetch by MVA in v6, NOP in v7 */
948 { .name
= "MVA_prefetch",
949 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
950 .access
= PL1_W
, .type
= ARM_CP_NOP
},
951 /* We need to break the TB after ISB to execute self-modifying code
952 * correctly and also to take any pending interrupts immediately.
953 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
955 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
956 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
957 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
958 .access
= PL0_W
, .type
= ARM_CP_NOP
},
959 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
960 .access
= PL0_W
, .type
= ARM_CP_NOP
},
961 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
963 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
964 offsetof(CPUARMState
, cp15
.ifar_ns
) },
966 /* Watchpoint Fault Address Register : should actually only be present
967 * for 1136, 1176, 11MPCore.
969 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
970 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
971 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
972 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
973 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
974 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
978 /* Definitions for the PMU registers */
979 #define PMCRN_MASK 0xf800
980 #define PMCRN_SHIFT 11
988 #define PMXEVTYPER_P 0x80000000
989 #define PMXEVTYPER_U 0x40000000
990 #define PMXEVTYPER_NSK 0x20000000
991 #define PMXEVTYPER_NSU 0x10000000
992 #define PMXEVTYPER_NSH 0x08000000
993 #define PMXEVTYPER_M 0x04000000
994 #define PMXEVTYPER_MT 0x02000000
995 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
996 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
997 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
998 PMXEVTYPER_M | PMXEVTYPER_MT | \
1001 #define PMCCFILTR 0xf8000000
1002 #define PMCCFILTR_M PMXEVTYPER_M
1003 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1005 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
1007 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
1010 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1011 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
1013 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
1016 typedef struct pm_event
{
1017 uint16_t number
; /* PMEVTYPER.evtCount is 16 bits wide */
1018 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1019 bool (*supported
)(CPUARMState
*);
1021 * Retrieve the current count of the underlying event. The programmed
1022 * counters hold a difference from the return value from this function
1024 uint64_t (*get_count
)(CPUARMState
*);
1026 * Return how many nanoseconds it will take (at a minimum) for count events
1027 * to occur. A negative value indicates the counter will never overflow, or
1028 * that the counter has otherwise arranged for the overflow bit to be set
1029 * and the PMU interrupt to be raised on overflow.
1031 int64_t (*ns_per_count
)(uint64_t);
1034 static bool event_always_supported(CPUARMState
*env
)
1039 static uint64_t swinc_get_count(CPUARMState
*env
)
1042 * SW_INCR events are written directly to the pmevcntr's by writes to
1043 * PMSWINC, so there is no underlying count maintained by the PMU itself
1048 static int64_t swinc_ns_per(uint64_t ignored
)
1054 * Return the underlying cycle count for the PMU cycle counters. If we're in
1055 * usermode, simply return 0.
1057 static uint64_t cycles_get_count(CPUARMState
*env
)
1059 #ifndef CONFIG_USER_ONLY
1060 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1061 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1063 return cpu_get_host_ticks();
1067 #ifndef CONFIG_USER_ONLY
1068 static int64_t cycles_ns_per(uint64_t cycles
)
1070 return (ARM_CPU_FREQ
/ NANOSECONDS_PER_SECOND
) * cycles
;
1073 static bool instructions_supported(CPUARMState
*env
)
1075 return use_icount
== 1 /* Precise instruction counting */;
1078 static uint64_t instructions_get_count(CPUARMState
*env
)
1080 return (uint64_t)cpu_get_icount_raw();
1083 static int64_t instructions_ns_per(uint64_t icount
)
1085 return cpu_icount_to_ns((int64_t)icount
);
1089 static const pm_event pm_events
[] = {
1090 { .number
= 0x000, /* SW_INCR */
1091 .supported
= event_always_supported
,
1092 .get_count
= swinc_get_count
,
1093 .ns_per_count
= swinc_ns_per
,
1095 #ifndef CONFIG_USER_ONLY
1096 { .number
= 0x008, /* INST_RETIRED, Instruction architecturally executed */
1097 .supported
= instructions_supported
,
1098 .get_count
= instructions_get_count
,
1099 .ns_per_count
= instructions_ns_per
,
1101 { .number
= 0x011, /* CPU_CYCLES, Cycle */
1102 .supported
= event_always_supported
,
1103 .get_count
= cycles_get_count
,
1104 .ns_per_count
= cycles_ns_per
,
1110 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1111 * events (i.e. the statistical profiling extension), this implementation
1112 * should first be updated to something sparse instead of the current
1113 * supported_event_map[] array.
1115 #define MAX_EVENT_ID 0x11
1116 #define UNSUPPORTED_EVENT UINT16_MAX
1117 static uint16_t supported_event_map
[MAX_EVENT_ID
+ 1];
1120 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1121 * of ARM event numbers to indices in our pm_events array.
1123 * Note: Events in the 0x40XX range are not currently supported.
1125 void pmu_init(ARMCPU
*cpu
)
1130 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1133 for (i
= 0; i
< ARRAY_SIZE(supported_event_map
); i
++) {
1134 supported_event_map
[i
] = UNSUPPORTED_EVENT
;
1139 for (i
= 0; i
< ARRAY_SIZE(pm_events
); i
++) {
1140 const pm_event
*cnt
= &pm_events
[i
];
1141 assert(cnt
->number
<= MAX_EVENT_ID
);
1142 /* We do not currently support events in the 0x40xx range */
1143 assert(cnt
->number
<= 0x3f);
1145 if (cnt
->supported(&cpu
->env
)) {
1146 supported_event_map
[cnt
->number
] = i
;
1147 uint64_t event_mask
= 1ULL << (cnt
->number
& 0x1f);
1148 if (cnt
->number
& 0x20) {
1149 cpu
->pmceid1
|= event_mask
;
1151 cpu
->pmceid0
|= event_mask
;
1158 * Check at runtime whether a PMU event is supported for the current machine
1160 static bool event_supported(uint16_t number
)
1162 if (number
> MAX_EVENT_ID
) {
1165 return supported_event_map
[number
] != UNSUPPORTED_EVENT
;
1168 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1171 /* Performance monitor registers user accessibility is controlled
1172 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1173 * trapping to EL2 or EL3 for other accesses.
1175 int el
= arm_current_el(env
);
1177 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1178 return CP_ACCESS_TRAP
;
1180 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1181 && !arm_is_secure_below_el3(env
)) {
1182 return CP_ACCESS_TRAP_EL2
;
1184 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1185 return CP_ACCESS_TRAP_EL3
;
1188 return CP_ACCESS_OK
;
1191 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1192 const ARMCPRegInfo
*ri
,
1195 /* ER: event counter read trap control */
1196 if (arm_feature(env
, ARM_FEATURE_V8
)
1197 && arm_current_el(env
) == 0
1198 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1200 return CP_ACCESS_OK
;
1203 return pmreg_access(env
, ri
, isread
);
1206 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1207 const ARMCPRegInfo
*ri
,
1210 /* SW: software increment write trap control */
1211 if (arm_feature(env
, ARM_FEATURE_V8
)
1212 && arm_current_el(env
) == 0
1213 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1215 return CP_ACCESS_OK
;
1218 return pmreg_access(env
, ri
, isread
);
1221 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1222 const ARMCPRegInfo
*ri
,
1225 /* ER: event counter read trap control */
1226 if (arm_feature(env
, ARM_FEATURE_V8
)
1227 && arm_current_el(env
) == 0
1228 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1229 return CP_ACCESS_OK
;
1232 return pmreg_access(env
, ri
, isread
);
1235 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1236 const ARMCPRegInfo
*ri
,
1239 /* CR: cycle counter read trap control */
1240 if (arm_feature(env
, ARM_FEATURE_V8
)
1241 && arm_current_el(env
) == 0
1242 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1244 return CP_ACCESS_OK
;
1247 return pmreg_access(env
, ri
, isread
);
1250 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1251 * the current EL, security state, and register configuration.
1253 static bool pmu_counter_enabled(CPUARMState
*env
, uint8_t counter
)
1256 bool e
, p
, u
, nsk
, nsu
, nsh
, m
;
1257 bool enabled
, prohibited
, filtered
;
1258 bool secure
= arm_is_secure(env
);
1259 int el
= arm_current_el(env
);
1260 uint8_t hpmn
= env
->cp15
.mdcr_el2
& MDCR_HPMN
;
1262 if (!arm_feature(env
, ARM_FEATURE_PMU
)) {
1266 if (!arm_feature(env
, ARM_FEATURE_EL2
) ||
1267 (counter
< hpmn
|| counter
== 31)) {
1268 e
= env
->cp15
.c9_pmcr
& PMCRE
;
1270 e
= env
->cp15
.mdcr_el2
& MDCR_HPME
;
1272 enabled
= e
&& (env
->cp15
.c9_pmcnten
& (1 << counter
));
1275 if (el
== 2 && (counter
< hpmn
|| counter
== 31)) {
1276 prohibited
= env
->cp15
.mdcr_el2
& MDCR_HPMD
;
1281 prohibited
= arm_feature(env
, ARM_FEATURE_EL3
) &&
1282 (env
->cp15
.mdcr_el3
& MDCR_SPME
);
1285 if (prohibited
&& counter
== 31) {
1286 prohibited
= env
->cp15
.c9_pmcr
& PMCRDP
;
1289 if (counter
== 31) {
1290 filter
= env
->cp15
.pmccfiltr_el0
;
1292 filter
= env
->cp15
.c14_pmevtyper
[counter
];
1295 p
= filter
& PMXEVTYPER_P
;
1296 u
= filter
& PMXEVTYPER_U
;
1297 nsk
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSK
);
1298 nsu
= arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_NSU
);
1299 nsh
= arm_feature(env
, ARM_FEATURE_EL2
) && (filter
& PMXEVTYPER_NSH
);
1300 m
= arm_el_is_aa64(env
, 1) &&
1301 arm_feature(env
, ARM_FEATURE_EL3
) && (filter
& PMXEVTYPER_M
);
1304 filtered
= secure
? u
: u
!= nsu
;
1305 } else if (el
== 1) {
1306 filtered
= secure
? p
: p
!= nsk
;
1307 } else if (el
== 2) {
1313 if (counter
!= 31) {
1315 * If not checking PMCCNTR, ensure the counter is setup to an event we
1318 uint16_t event
= filter
& PMXEVTYPER_EVTCOUNT
;
1319 if (!event_supported(event
)) {
1324 return enabled
&& !prohibited
&& !filtered
;
1327 static void pmu_update_irq(CPUARMState
*env
)
1329 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1330 qemu_set_irq(cpu
->pmu_interrupt
, (env
->cp15
.c9_pmcr
& PMCRE
) &&
1331 (env
->cp15
.c9_pminten
& env
->cp15
.c9_pmovsr
));
1335 * Ensure c15_ccnt is the guest-visible count so that operations such as
1336 * enabling/disabling the counter or filtering, modifying the count itself,
1337 * etc. can be done logically. This is essentially a no-op if the counter is
1338 * not enabled at the time of the call.
1340 static void pmccntr_op_start(CPUARMState
*env
)
1342 uint64_t cycles
= cycles_get_count(env
);
1344 if (pmu_counter_enabled(env
, 31)) {
1345 uint64_t eff_cycles
= cycles
;
1346 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1347 /* Increment once every 64 processor clock cycles */
1351 uint64_t new_pmccntr
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1353 uint64_t overflow_mask
= env
->cp15
.c9_pmcr
& PMCRLC
? \
1354 1ull << 63 : 1ull << 31;
1355 if (env
->cp15
.c15_ccnt
& ~new_pmccntr
& overflow_mask
) {
1356 env
->cp15
.c9_pmovsr
|= (1 << 31);
1357 pmu_update_irq(env
);
1360 env
->cp15
.c15_ccnt
= new_pmccntr
;
1362 env
->cp15
.c15_ccnt_delta
= cycles
;
1366 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1367 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1370 static void pmccntr_op_finish(CPUARMState
*env
)
1372 if (pmu_counter_enabled(env
, 31)) {
1373 #ifndef CONFIG_USER_ONLY
1374 /* Calculate when the counter will next overflow */
1375 uint64_t remaining_cycles
= -env
->cp15
.c15_ccnt
;
1376 if (!(env
->cp15
.c9_pmcr
& PMCRLC
)) {
1377 remaining_cycles
= (uint32_t)remaining_cycles
;
1379 int64_t overflow_in
= cycles_ns_per(remaining_cycles
);
1381 if (overflow_in
> 0) {
1382 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1384 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1385 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1389 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1390 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1391 /* Increment once every 64 processor clock cycles */
1394 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1398 static void pmevcntr_op_start(CPUARMState
*env
, uint8_t counter
)
1401 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1403 if (event_supported(event
)) {
1404 uint16_t event_idx
= supported_event_map
[event
];
1405 count
= pm_events
[event_idx
].get_count(env
);
1408 if (pmu_counter_enabled(env
, counter
)) {
1409 uint32_t new_pmevcntr
= count
- env
->cp15
.c14_pmevcntr_delta
[counter
];
1411 if (env
->cp15
.c14_pmevcntr
[counter
] & ~new_pmevcntr
& INT32_MIN
) {
1412 env
->cp15
.c9_pmovsr
|= (1 << counter
);
1413 pmu_update_irq(env
);
1415 env
->cp15
.c14_pmevcntr
[counter
] = new_pmevcntr
;
1417 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1420 static void pmevcntr_op_finish(CPUARMState
*env
, uint8_t counter
)
1422 if (pmu_counter_enabled(env
, counter
)) {
1423 #ifndef CONFIG_USER_ONLY
1424 uint16_t event
= env
->cp15
.c14_pmevtyper
[counter
] & PMXEVTYPER_EVTCOUNT
;
1425 uint16_t event_idx
= supported_event_map
[event
];
1426 uint64_t delta
= UINT32_MAX
-
1427 (uint32_t)env
->cp15
.c14_pmevcntr
[counter
] + 1;
1428 int64_t overflow_in
= pm_events
[event_idx
].ns_per_count(delta
);
1430 if (overflow_in
> 0) {
1431 int64_t overflow_at
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1433 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1434 timer_mod_anticipate_ns(cpu
->pmu_timer
, overflow_at
);
1438 env
->cp15
.c14_pmevcntr_delta
[counter
] -=
1439 env
->cp15
.c14_pmevcntr
[counter
];
1443 void pmu_op_start(CPUARMState
*env
)
1446 pmccntr_op_start(env
);
1447 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1448 pmevcntr_op_start(env
, i
);
1452 void pmu_op_finish(CPUARMState
*env
)
1455 pmccntr_op_finish(env
);
1456 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1457 pmevcntr_op_finish(env
, i
);
1461 void pmu_pre_el_change(ARMCPU
*cpu
, void *ignored
)
1463 pmu_op_start(&cpu
->env
);
1466 void pmu_post_el_change(ARMCPU
*cpu
, void *ignored
)
1468 pmu_op_finish(&cpu
->env
);
1471 void arm_pmu_timer_cb(void *opaque
)
1473 ARMCPU
*cpu
= opaque
;
1476 * Update all the counter values based on the current underlying counts,
1477 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1478 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1479 * counter may expire.
1481 pmu_op_start(&cpu
->env
);
1482 pmu_op_finish(&cpu
->env
);
1485 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1490 if (value
& PMCRC
) {
1491 /* The counter has been reset */
1492 env
->cp15
.c15_ccnt
= 0;
1495 if (value
& PMCRP
) {
1497 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1498 env
->cp15
.c14_pmevcntr
[i
] = 0;
1502 /* only the DP, X, D and E bits are writable */
1503 env
->cp15
.c9_pmcr
&= ~0x39;
1504 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1509 static void pmswinc_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1513 for (i
= 0; i
< pmu_num_counters(env
); i
++) {
1514 /* Increment a counter's count iff: */
1515 if ((value
& (1 << i
)) && /* counter's bit is set */
1516 /* counter is enabled and not filtered */
1517 pmu_counter_enabled(env
, i
) &&
1518 /* counter is SW_INCR */
1519 (env
->cp15
.c14_pmevtyper
[i
] & PMXEVTYPER_EVTCOUNT
) == 0x0) {
1520 pmevcntr_op_start(env
, i
);
1523 * Detect if this write causes an overflow since we can't predict
1524 * PMSWINC overflows like we can for other events
1526 uint32_t new_pmswinc
= env
->cp15
.c14_pmevcntr
[i
] + 1;
1528 if (env
->cp15
.c14_pmevcntr
[i
] & ~new_pmswinc
& INT32_MIN
) {
1529 env
->cp15
.c9_pmovsr
|= (1 << i
);
1530 pmu_update_irq(env
);
1533 env
->cp15
.c14_pmevcntr
[i
] = new_pmswinc
;
1535 pmevcntr_op_finish(env
, i
);
1540 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1543 pmccntr_op_start(env
);
1544 ret
= env
->cp15
.c15_ccnt
;
1545 pmccntr_op_finish(env
);
1549 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1552 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1553 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1554 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1557 env
->cp15
.c9_pmselr
= value
& 0x1f;
1560 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1563 pmccntr_op_start(env
);
1564 env
->cp15
.c15_ccnt
= value
;
1565 pmccntr_op_finish(env
);
1568 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1571 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1573 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1576 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1579 pmccntr_op_start(env
);
1580 env
->cp15
.pmccfiltr_el0
= value
& PMCCFILTR_EL0
;
1581 pmccntr_op_finish(env
);
1584 static void pmccfiltr_write_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1587 pmccntr_op_start(env
);
1588 /* M is not accessible from AArch32 */
1589 env
->cp15
.pmccfiltr_el0
= (env
->cp15
.pmccfiltr_el0
& PMCCFILTR_M
) |
1590 (value
& PMCCFILTR
);
1591 pmccntr_op_finish(env
);
1594 static uint64_t pmccfiltr_read_a32(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1596 /* M is not visible in AArch32 */
1597 return env
->cp15
.pmccfiltr_el0
& PMCCFILTR
;
1600 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1603 value
&= pmu_counter_mask(env
);
1604 env
->cp15
.c9_pmcnten
|= value
;
1607 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1610 value
&= pmu_counter_mask(env
);
1611 env
->cp15
.c9_pmcnten
&= ~value
;
1614 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1617 value
&= pmu_counter_mask(env
);
1618 env
->cp15
.c9_pmovsr
&= ~value
;
1619 pmu_update_irq(env
);
1622 static void pmovsset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1625 value
&= pmu_counter_mask(env
);
1626 env
->cp15
.c9_pmovsr
|= value
;
1627 pmu_update_irq(env
);
1630 static void pmevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1631 uint64_t value
, const uint8_t counter
)
1633 if (counter
== 31) {
1634 pmccfiltr_write(env
, ri
, value
);
1635 } else if (counter
< pmu_num_counters(env
)) {
1636 pmevcntr_op_start(env
, counter
);
1639 * If this counter's event type is changing, store the current
1640 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1641 * pmevcntr_op_finish has the correct baseline when it converts back to
1644 uint16_t old_event
= env
->cp15
.c14_pmevtyper
[counter
] &
1645 PMXEVTYPER_EVTCOUNT
;
1646 uint16_t new_event
= value
& PMXEVTYPER_EVTCOUNT
;
1647 if (old_event
!= new_event
) {
1649 if (event_supported(new_event
)) {
1650 uint16_t event_idx
= supported_event_map
[new_event
];
1651 count
= pm_events
[event_idx
].get_count(env
);
1653 env
->cp15
.c14_pmevcntr_delta
[counter
] = count
;
1656 env
->cp15
.c14_pmevtyper
[counter
] = value
& PMXEVTYPER_MASK
;
1657 pmevcntr_op_finish(env
, counter
);
1659 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1660 * PMSELR value is equal to or greater than the number of implemented
1661 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1665 static uint64_t pmevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1666 const uint8_t counter
)
1668 if (counter
== 31) {
1669 return env
->cp15
.pmccfiltr_el0
;
1670 } else if (counter
< pmu_num_counters(env
)) {
1671 return env
->cp15
.c14_pmevtyper
[counter
];
1674 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1675 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1681 static void pmevtyper_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1684 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1685 pmevtyper_write(env
, ri
, value
, counter
);
1688 static void pmevtyper_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1691 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1692 env
->cp15
.c14_pmevtyper
[counter
] = value
;
1695 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1696 * pmu_op_finish calls when loading saved state for a migration. Because
1697 * we're potentially updating the type of event here, the value written to
1698 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1699 * different counter type. Therefore, we need to set this value to the
1700 * current count for the counter type we're writing so that pmu_op_finish
1701 * has the correct count for its calculation.
1703 uint16_t event
= value
& PMXEVTYPER_EVTCOUNT
;
1704 if (event_supported(event
)) {
1705 uint16_t event_idx
= supported_event_map
[event
];
1706 env
->cp15
.c14_pmevcntr_delta
[counter
] =
1707 pm_events
[event_idx
].get_count(env
);
1711 static uint64_t pmevtyper_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1713 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1714 return pmevtyper_read(env
, ri
, counter
);
1717 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1720 pmevtyper_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1723 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1725 return pmevtyper_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1728 static void pmevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1729 uint64_t value
, uint8_t counter
)
1731 if (counter
< pmu_num_counters(env
)) {
1732 pmevcntr_op_start(env
, counter
);
1733 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1734 pmevcntr_op_finish(env
, counter
);
1737 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1738 * are CONSTRAINED UNPREDICTABLE.
1742 static uint64_t pmevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1745 if (counter
< pmu_num_counters(env
)) {
1747 pmevcntr_op_start(env
, counter
);
1748 ret
= env
->cp15
.c14_pmevcntr
[counter
];
1749 pmevcntr_op_finish(env
, counter
);
1752 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1753 * are CONSTRAINED UNPREDICTABLE. */
1758 static void pmevcntr_writefn(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1761 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1762 pmevcntr_write(env
, ri
, value
, counter
);
1765 static uint64_t pmevcntr_readfn(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1767 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1768 return pmevcntr_read(env
, ri
, counter
);
1771 static void pmevcntr_rawwrite(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1774 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1775 assert(counter
< pmu_num_counters(env
));
1776 env
->cp15
.c14_pmevcntr
[counter
] = value
;
1777 pmevcntr_write(env
, ri
, value
, counter
);
1780 static uint64_t pmevcntr_rawread(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1782 uint8_t counter
= ((ri
->crm
& 3) << 3) | (ri
->opc2
& 7);
1783 assert(counter
< pmu_num_counters(env
));
1784 return env
->cp15
.c14_pmevcntr
[counter
];
1787 static void pmxevcntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1790 pmevcntr_write(env
, ri
, value
, env
->cp15
.c9_pmselr
& 31);
1793 static uint64_t pmxevcntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1795 return pmevcntr_read(env
, ri
, env
->cp15
.c9_pmselr
& 31);
1798 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1801 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1802 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1804 env
->cp15
.c9_pmuserenr
= value
& 1;
1808 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1811 /* We have no event counters so only the C bit can be changed */
1812 value
&= pmu_counter_mask(env
);
1813 env
->cp15
.c9_pminten
|= value
;
1814 pmu_update_irq(env
);
1817 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1820 value
&= pmu_counter_mask(env
);
1821 env
->cp15
.c9_pminten
&= ~value
;
1822 pmu_update_irq(env
);
1825 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1828 /* Note that even though the AArch64 view of this register has bits
1829 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1830 * architectural requirements for bits which are RES0 only in some
1831 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1832 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1834 raw_write(env
, ri
, value
& ~0x1FULL
);
1837 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1839 /* Begin with base v8.0 state. */
1840 uint32_t valid_mask
= 0x3fff;
1841 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1843 if (arm_el_is_aa64(env
, 3)) {
1844 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1845 valid_mask
&= ~SCR_NET
;
1847 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1850 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1851 valid_mask
&= ~SCR_HCE
;
1853 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1854 * supported if EL2 exists. The bit is UNK/SBZP when
1855 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1856 * when EL2 is unavailable.
1857 * On ARMv8, this bit is always available.
1859 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1860 !arm_feature(env
, ARM_FEATURE_V8
)) {
1861 valid_mask
&= ~SCR_SMD
;
1864 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1865 valid_mask
|= SCR_TLOR
;
1867 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1868 valid_mask
|= SCR_API
| SCR_APK
;
1871 /* Clear all-context RES0 bits. */
1872 value
&= valid_mask
;
1873 raw_write(env
, ri
, value
);
1876 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1878 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1880 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1883 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1884 ri
->secure
& ARM_CP_SECSTATE_S
);
1886 return cpu
->ccsidr
[index
];
1889 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1892 raw_write(env
, ri
, value
& 0xf);
1895 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1897 CPUState
*cs
= ENV_GET_CPU(env
);
1898 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1901 if (hcr_el2
& HCR_IMO
) {
1902 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1906 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1911 if (hcr_el2
& HCR_FMO
) {
1912 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1916 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1921 /* External aborts are not possible in QEMU so A bit is always clear */
1925 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1926 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1927 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1928 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1929 /* Performance monitors are implementation defined in v7,
1930 * but with an ARM recommended set of registers, which we
1933 * Performance registers fall into three categories:
1934 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1935 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1936 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1937 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1938 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1940 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1941 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1942 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1943 .writefn
= pmcntenset_write
,
1944 .accessfn
= pmreg_access
,
1945 .raw_writefn
= raw_write
},
1946 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1947 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1948 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1949 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1950 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1951 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1953 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1954 .accessfn
= pmreg_access
,
1955 .writefn
= pmcntenclr_write
,
1956 .type
= ARM_CP_ALIAS
},
1957 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1958 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1959 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1960 .type
= ARM_CP_ALIAS
,
1961 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1962 .writefn
= pmcntenclr_write
},
1963 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1964 .access
= PL0_RW
, .type
= ARM_CP_IO
,
1965 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1966 .accessfn
= pmreg_access
,
1967 .writefn
= pmovsr_write
,
1968 .raw_writefn
= raw_write
},
1969 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1970 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1971 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1972 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1973 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1974 .writefn
= pmovsr_write
,
1975 .raw_writefn
= raw_write
},
1976 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1977 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1978 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1979 .writefn
= pmswinc_write
},
1980 { .name
= "PMSWINC_EL0", .state
= ARM_CP_STATE_AA64
,
1981 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 4,
1982 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
,
1983 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
1984 .writefn
= pmswinc_write
},
1985 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1986 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1987 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1988 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1989 .raw_writefn
= raw_write
},
1990 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1991 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1992 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1993 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1994 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1995 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1996 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1997 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1998 .accessfn
= pmreg_access_ccntr
},
1999 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2000 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
2001 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
2003 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
2004 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
2005 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
2006 { .name
= "PMCCFILTR", .cp
= 15, .opc1
= 0, .crn
= 14, .crm
= 15, .opc2
= 7,
2007 .writefn
= pmccfiltr_write_a32
, .readfn
= pmccfiltr_read_a32
,
2008 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2009 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2011 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
2012 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
2013 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
2014 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2016 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
2018 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
2019 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2020 .accessfn
= pmreg_access
,
2021 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2022 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
2023 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
2024 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2025 .accessfn
= pmreg_access
,
2026 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
2027 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
2028 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2029 .accessfn
= pmreg_access_xevcntr
,
2030 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2031 { .name
= "PMXEVCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
2032 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 2,
2033 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2034 .accessfn
= pmreg_access_xevcntr
,
2035 .writefn
= pmxevcntr_write
, .readfn
= pmxevcntr_read
},
2036 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
2037 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
2038 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
2040 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2041 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
2042 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
2043 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
2044 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
2046 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
2047 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
2048 .access
= PL1_RW
, .accessfn
= access_tpm
,
2049 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2050 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
2052 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
2053 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
2054 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
2055 .access
= PL1_RW
, .accessfn
= access_tpm
,
2057 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2058 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
2059 .resetvalue
= 0x0 },
2060 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
2061 .access
= PL1_RW
, .accessfn
= access_tpm
,
2062 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2063 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2064 .writefn
= pmintenclr_write
, },
2065 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
2066 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
2067 .access
= PL1_RW
, .accessfn
= access_tpm
,
2068 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2069 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
2070 .writefn
= pmintenclr_write
},
2071 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
2072 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
2073 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
2074 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
2075 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
2076 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
2077 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
2078 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
2079 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2080 * just RAZ for all cores:
2082 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
2083 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
2084 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2085 /* Auxiliary fault status registers: these also are IMPDEF, and we
2086 * choose to RAZ/WI for all cores.
2088 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2089 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
2090 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2091 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2092 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
2093 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2094 /* MAIR can just read-as-written because we don't implement caches
2095 * and so don't need to care about memory attributes.
2097 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
2098 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
2099 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
2101 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
2102 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
2103 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
2105 /* For non-long-descriptor page tables these are PRRR and NMRR;
2106 * regardless they still act as reads-as-written for QEMU.
2108 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2109 * allows them to assign the correct fieldoffset based on the endianness
2110 * handled in the field definitions.
2112 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
2113 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
2114 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
2115 offsetof(CPUARMState
, cp15
.mair0_ns
) },
2116 .resetfn
= arm_cp_reset_ignore
},
2117 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
2118 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
2119 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
2120 offsetof(CPUARMState
, cp15
.mair1_ns
) },
2121 .resetfn
= arm_cp_reset_ignore
},
2122 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
2123 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
2124 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
2125 /* 32 bit ITLB invalidates */
2126 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
2127 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2128 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
2129 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2130 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
2131 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2132 /* 32 bit DTLB invalidates */
2133 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
2134 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2135 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
2136 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2137 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
2138 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2139 /* 32 bit TLB invalidates */
2140 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
2141 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
2142 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
2143 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
2144 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
2145 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
2146 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
2147 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
2151 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
2152 /* 32 bit TLB invalidates, Inner Shareable */
2153 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
2154 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
2155 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
2156 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
2157 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
2158 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2159 .writefn
= tlbiasid_is_write
},
2160 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
2161 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
2162 .writefn
= tlbimvaa_is_write
},
2166 static const ARMCPRegInfo pmovsset_cp_reginfo
[] = {
2167 /* PMOVSSET is not implemented in v7 before v7ve */
2168 { .name
= "PMOVSSET", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 3,
2169 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2170 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2171 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
2172 .writefn
= pmovsset_write
,
2173 .raw_writefn
= raw_write
},
2174 { .name
= "PMOVSSET_EL0", .state
= ARM_CP_STATE_AA64
,
2175 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 3,
2176 .access
= PL0_RW
, .accessfn
= pmreg_access
,
2177 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
2178 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
2179 .writefn
= pmovsset_write
,
2180 .raw_writefn
= raw_write
},
2184 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2191 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2194 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
2195 return CP_ACCESS_TRAP
;
2197 return CP_ACCESS_OK
;
2200 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
2201 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
2202 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
2204 .writefn
= teecr_write
},
2205 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
2206 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
2207 .accessfn
= teehbr_access
, .resetvalue
= 0 },
2211 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
2212 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
2213 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
2215 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
2216 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
2218 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
2219 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
2220 .resetfn
= arm_cp_reset_ignore
},
2221 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
2222 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
2223 .access
= PL0_R
|PL1_W
,
2224 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
2226 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
2227 .access
= PL0_R
|PL1_W
,
2228 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
2229 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
2230 .resetfn
= arm_cp_reset_ignore
},
2231 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2232 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
2234 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
2235 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
2237 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
2238 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
2243 #ifndef CONFIG_USER_ONLY
2245 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2248 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2249 * Writable only at the highest implemented exception level.
2251 int el
= arm_current_el(env
);
2255 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
2256 return CP_ACCESS_TRAP
;
2260 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
2261 arm_is_secure_below_el3(env
)) {
2262 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2263 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2271 if (!isread
&& el
< arm_highest_el(env
)) {
2272 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2275 return CP_ACCESS_OK
;
2278 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
2281 unsigned int cur_el
= arm_current_el(env
);
2282 bool secure
= arm_is_secure(env
);
2284 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2286 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
2287 return CP_ACCESS_TRAP
;
2290 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2291 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2292 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
2293 return CP_ACCESS_TRAP_EL2
;
2295 return CP_ACCESS_OK
;
2298 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
2301 unsigned int cur_el
= arm_current_el(env
);
2302 bool secure
= arm_is_secure(env
);
2304 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2305 * EL0[PV]TEN is zero.
2308 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
2309 return CP_ACCESS_TRAP
;
2312 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
2313 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
2314 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
2315 return CP_ACCESS_TRAP_EL2
;
2317 return CP_ACCESS_OK
;
2320 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
2321 const ARMCPRegInfo
*ri
,
2324 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
2327 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
2328 const ARMCPRegInfo
*ri
,
2331 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
2334 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2337 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
2340 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2343 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
2346 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
2347 const ARMCPRegInfo
*ri
,
2350 /* The AArch64 register view of the secure physical timer is
2351 * always accessible from EL3, and configurably accessible from
2354 switch (arm_current_el(env
)) {
2356 if (!arm_is_secure(env
)) {
2357 return CP_ACCESS_TRAP
;
2359 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
2360 return CP_ACCESS_TRAP_EL3
;
2362 return CP_ACCESS_OK
;
2365 return CP_ACCESS_TRAP
;
2367 return CP_ACCESS_OK
;
2369 g_assert_not_reached();
2373 static uint64_t gt_get_countervalue(CPUARMState
*env
)
2375 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
2378 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
2380 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
2383 /* Timer enabled: calculate and set current ISTATUS, irq, and
2384 * reset timer to when ISTATUS next has to change
2386 uint64_t offset
= timeridx
== GTIMER_VIRT
?
2387 cpu
->env
.cp15
.cntvoff_el2
: 0;
2388 uint64_t count
= gt_get_countervalue(&cpu
->env
);
2389 /* Note that this must be unsigned 64 bit arithmetic: */
2390 int istatus
= count
- offset
>= gt
->cval
;
2394 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
2396 irqstate
= (istatus
&& !(gt
->ctl
& 2));
2397 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2400 /* Next transition is when count rolls back over to zero */
2401 nexttick
= UINT64_MAX
;
2403 /* Next transition is when we hit cval */
2404 nexttick
= gt
->cval
+ offset
;
2406 /* Note that the desired next expiry time might be beyond the
2407 * signed-64-bit range of a QEMUTimer -- in this case we just
2408 * set the timer for as far in the future as possible. When the
2409 * timer expires we will reset the timer for any remaining period.
2411 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
2412 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
2414 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
2415 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
2417 /* Timer disabled: ISTATUS and timer output always clear */
2419 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
2420 timer_del(cpu
->gt_timer
[timeridx
]);
2421 trace_arm_gt_recalc_disabled(timeridx
);
2425 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2428 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2430 timer_del(cpu
->gt_timer
[timeridx
]);
2433 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2435 return gt_get_countervalue(env
);
2438 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2440 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
2443 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2447 trace_arm_gt_cval_write(timeridx
, value
);
2448 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
2449 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2452 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2455 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2457 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
2458 (gt_get_countervalue(env
) - offset
));
2461 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2465 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
2467 trace_arm_gt_tval_write(timeridx
, value
);
2468 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
2469 sextract64(value
, 0, 32);
2470 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
2473 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2477 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2478 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
2480 trace_arm_gt_ctl_write(timeridx
, value
);
2481 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
2482 if ((oldval
^ value
) & 1) {
2483 /* Enable toggled */
2484 gt_recalc_timer(cpu
, timeridx
);
2485 } else if ((oldval
^ value
) & 2) {
2486 /* IMASK toggled: don't need to recalculate,
2487 * just set the interrupt line based on ISTATUS
2489 int irqstate
= (oldval
& 4) && !(value
& 2);
2491 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
2492 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
2496 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2498 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
2501 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2504 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
2507 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2509 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
2512 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2515 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
2518 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2521 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
2524 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2526 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
2529 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2532 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
2535 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2537 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
2540 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2543 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
2546 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2549 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
2552 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2555 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2557 trace_arm_gt_cntvoff_write(value
);
2558 raw_write(env
, ri
, value
);
2559 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2562 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2564 gt_timer_reset(env
, ri
, GTIMER_HYP
);
2567 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2570 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2573 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2575 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2578 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2581 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2584 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2587 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2590 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2592 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2595 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2598 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2601 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2603 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2606 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2609 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2612 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2615 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2618 void arm_gt_ptimer_cb(void *opaque
)
2620 ARMCPU
*cpu
= opaque
;
2622 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2625 void arm_gt_vtimer_cb(void *opaque
)
2627 ARMCPU
*cpu
= opaque
;
2629 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2632 void arm_gt_htimer_cb(void *opaque
)
2634 ARMCPU
*cpu
= opaque
;
2636 gt_recalc_timer(cpu
, GTIMER_HYP
);
2639 void arm_gt_stimer_cb(void *opaque
)
2641 ARMCPU
*cpu
= opaque
;
2643 gt_recalc_timer(cpu
, GTIMER_SEC
);
2646 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2647 /* Note that CNTFRQ is purely reads-as-written for the benefit
2648 * of software; writing it doesn't actually change the timer frequency.
2649 * Our reset value matches the fixed frequency we implement the timer at.
2651 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2652 .type
= ARM_CP_ALIAS
,
2653 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2654 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2656 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2657 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2658 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2659 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2660 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2662 /* overall control: mostly access permissions */
2663 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2664 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2666 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2669 /* per-timer control */
2670 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2671 .secure
= ARM_CP_SECSTATE_NS
,
2672 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2673 .accessfn
= gt_ptimer_access
,
2674 .fieldoffset
= offsetoflow32(CPUARMState
,
2675 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2676 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2678 { .name
= "CNTP_CTL_S",
2679 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2680 .secure
= ARM_CP_SECSTATE_S
,
2681 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2682 .accessfn
= gt_ptimer_access
,
2683 .fieldoffset
= offsetoflow32(CPUARMState
,
2684 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2685 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2687 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2688 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2689 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2690 .accessfn
= gt_ptimer_access
,
2691 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2693 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2695 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2696 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL0_RW
,
2697 .accessfn
= gt_vtimer_access
,
2698 .fieldoffset
= offsetoflow32(CPUARMState
,
2699 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2700 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2702 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2703 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2704 .type
= ARM_CP_IO
, .access
= PL0_RW
,
2705 .accessfn
= gt_vtimer_access
,
2706 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2708 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2710 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2711 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2712 .secure
= ARM_CP_SECSTATE_NS
,
2713 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2714 .accessfn
= gt_ptimer_access
,
2715 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2717 { .name
= "CNTP_TVAL_S",
2718 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2719 .secure
= ARM_CP_SECSTATE_S
,
2720 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2721 .accessfn
= gt_ptimer_access
,
2722 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2724 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2725 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2726 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2727 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2728 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2730 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2731 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2732 .accessfn
= gt_vtimer_access
,
2733 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2735 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2736 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2737 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL0_RW
,
2738 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2739 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2741 /* The counter itself */
2742 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2743 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2744 .accessfn
= gt_pct_access
,
2745 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2747 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2748 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2749 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2750 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2752 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2753 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2754 .accessfn
= gt_vct_access
,
2755 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2757 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2758 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2759 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2760 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2762 /* Comparison value, indicating when the timer goes off */
2763 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2764 .secure
= ARM_CP_SECSTATE_NS
,
2766 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2767 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2768 .accessfn
= gt_ptimer_access
,
2769 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2771 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2772 .secure
= ARM_CP_SECSTATE_S
,
2774 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2775 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2776 .accessfn
= gt_ptimer_access
,
2777 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2779 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2780 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2783 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2784 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2785 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2787 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2789 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2790 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2791 .accessfn
= gt_vtimer_access
,
2792 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2794 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2795 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2798 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2799 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2800 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2802 /* Secure timer -- this is actually restricted to only EL3
2803 * and configurably Secure-EL1 via the accessfn.
2805 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2806 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2807 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2808 .accessfn
= gt_stimer_access
,
2809 .readfn
= gt_sec_tval_read
,
2810 .writefn
= gt_sec_tval_write
,
2811 .resetfn
= gt_sec_timer_reset
,
2813 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2814 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2815 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2816 .accessfn
= gt_stimer_access
,
2817 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2819 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2821 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2822 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2823 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2824 .accessfn
= gt_stimer_access
,
2825 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2826 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2833 /* In user-mode most of the generic timer registers are inaccessible
2834 * however modern kernels (4.12+) allow access to cntvct_el0
2837 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2839 /* Currently we have no support for QEMUTimer in linux-user so we
2840 * can't call gt_get_countervalue(env), instead we directly
2841 * call the lower level functions.
2843 return cpu_get_clock() / GTIMER_SCALE
;
2846 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2847 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2848 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2849 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2850 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2851 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2853 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2854 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2855 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2856 .readfn
= gt_virt_cnt_read
,
2863 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2865 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2866 raw_write(env
, ri
, value
);
2867 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2868 raw_write(env
, ri
, value
& 0xfffff6ff);
2870 raw_write(env
, ri
, value
& 0xfffff1ff);
2874 #ifndef CONFIG_USER_ONLY
2875 /* get_phys_addr() isn't present for user-mode-only targets */
2877 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2881 /* The ATS12NSO* operations must trap to EL3 if executed in
2882 * Secure EL1 (which can only happen if EL3 is AArch64).
2883 * They are simply UNDEF if executed from NS EL1.
2884 * They function normally from EL2 or EL3.
2886 if (arm_current_el(env
) == 1) {
2887 if (arm_is_secure_below_el3(env
)) {
2888 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2890 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2893 return CP_ACCESS_OK
;
2896 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2897 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2900 target_ulong page_size
;
2904 bool format64
= false;
2905 MemTxAttrs attrs
= {};
2906 ARMMMUFaultInfo fi
= {};
2907 ARMCacheAttrs cacheattrs
= {};
2909 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2910 &prot
, &page_size
, &fi
, &cacheattrs
);
2914 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2917 * * TTBCR.EAE determines whether the result is returned using the
2918 * 32-bit or the 64-bit PAR format
2919 * * Instructions executed in Hyp mode always use the 64bit format
2921 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2922 * * The Non-secure TTBCR.EAE bit is set to 1
2923 * * The implementation includes EL2, and the value of HCR.VM is 1
2925 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2927 * ATS1Hx always uses the 64bit format.
2929 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2931 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2932 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2933 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
2935 format64
|= arm_current_el(env
) == 2;
2941 /* Create a 64-bit PAR */
2942 par64
= (1 << 11); /* LPAE bit always set */
2944 par64
|= phys_addr
& ~0xfffULL
;
2945 if (!attrs
.secure
) {
2946 par64
|= (1 << 9); /* NS */
2948 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2949 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2951 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2954 par64
|= (fsr
& 0x3f) << 1; /* FS */
2956 par64
|= (1 << 9); /* S */
2959 par64
|= (1 << 8); /* PTW */
2963 /* fsr is a DFSR/IFSR value for the short descriptor
2964 * translation table format (with WnR always clear).
2965 * Convert it to a 32-bit PAR.
2968 /* We do not set any attribute bits in the PAR */
2969 if (page_size
== (1 << 24)
2970 && arm_feature(env
, ARM_FEATURE_V7
)) {
2971 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2973 par64
= phys_addr
& 0xfffff000;
2975 if (!attrs
.secure
) {
2976 par64
|= (1 << 9); /* NS */
2979 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2981 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2982 ((fsr
& 0xf) << 1) | 1;
2988 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2990 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2993 int el
= arm_current_el(env
);
2994 bool secure
= arm_is_secure_below_el3(env
);
2996 switch (ri
->opc2
& 6) {
2998 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3001 mmu_idx
= ARMMMUIdx_S1E3
;
3004 mmu_idx
= ARMMMUIdx_S1NSE1
;
3007 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3010 g_assert_not_reached();
3014 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3017 mmu_idx
= ARMMMUIdx_S1SE0
;
3020 mmu_idx
= ARMMMUIdx_S1NSE0
;
3023 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3026 g_assert_not_reached();
3030 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3031 mmu_idx
= ARMMMUIdx_S12NSE1
;
3034 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3035 mmu_idx
= ARMMMUIdx_S12NSE0
;
3038 g_assert_not_reached();
3041 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
3043 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3046 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3049 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3052 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S1E2
);
3054 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
3057 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3060 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
3061 return CP_ACCESS_TRAP
;
3063 return CP_ACCESS_OK
;
3066 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3069 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
3071 int secure
= arm_is_secure_below_el3(env
);
3073 switch (ri
->opc2
& 6) {
3076 case 0: /* AT S1E1R, AT S1E1W */
3077 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
3079 case 4: /* AT S1E2R, AT S1E2W */
3080 mmu_idx
= ARMMMUIdx_S1E2
;
3082 case 6: /* AT S1E3R, AT S1E3W */
3083 mmu_idx
= ARMMMUIdx_S1E3
;
3086 g_assert_not_reached();
3089 case 2: /* AT S1E0R, AT S1E0W */
3090 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
3092 case 4: /* AT S12E1R, AT S12E1W */
3093 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
3095 case 6: /* AT S12E0R, AT S12E0W */
3096 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
3099 g_assert_not_reached();
3102 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
3106 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
3107 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
3108 .access
= PL1_RW
, .resetvalue
= 0,
3109 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
3110 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
3111 .writefn
= par_write
},
3112 #ifndef CONFIG_USER_ONLY
3113 /* This underdecoding is safe because the reginfo is NO_RAW. */
3114 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
3115 .access
= PL1_W
, .accessfn
= ats_access
,
3116 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
3121 /* Return basic MPU access permission bits. */
3122 static uint32_t simple_mpu_ap_bits(uint32_t val
)
3129 for (i
= 0; i
< 16; i
+= 2) {
3130 ret
|= (val
>> i
) & mask
;
3136 /* Pad basic MPU access permission bits to extended format. */
3137 static uint32_t extended_mpu_ap_bits(uint32_t val
)
3144 for (i
= 0; i
< 16; i
+= 2) {
3145 ret
|= (val
& mask
) << i
;
3151 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3154 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
3157 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3159 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
3162 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3165 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
3168 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3170 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
3173 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3175 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3181 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3185 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3188 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3189 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
3195 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
3196 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
3200 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3203 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3204 uint32_t nrgs
= cpu
->pmsav7_dregion
;
3206 if (value
>= nrgs
) {
3207 qemu_log_mask(LOG_GUEST_ERROR
,
3208 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3209 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
3213 raw_write(env
, ri
, value
);
3216 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
3217 /* Reset for all these registers is handled in arm_cpu_reset(),
3218 * because the PMSAv7 is also used by M-profile CPUs, which do
3219 * not register cpregs but still need the state to be reset.
3221 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
3222 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3223 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
3224 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3225 .resetfn
= arm_cp_reset_ignore
},
3226 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
3227 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3228 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
3229 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3230 .resetfn
= arm_cp_reset_ignore
},
3231 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
3232 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
3233 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
3234 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
3235 .resetfn
= arm_cp_reset_ignore
},
3236 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
3238 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
3239 .writefn
= pmsav7_rgnr_write
,
3240 .resetfn
= arm_cp_reset_ignore
},
3244 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
3245 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3246 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3247 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3248 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
3249 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3250 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3251 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3252 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
3253 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
3255 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
3257 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
3259 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
3261 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
3263 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
3264 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
3266 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
3267 /* Protection region base and size registers */
3268 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
3269 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3270 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
3271 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
3272 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3273 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
3274 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
3275 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3276 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
3277 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
3278 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3279 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
3280 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
3281 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3282 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
3283 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
3284 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3285 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
3286 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
3287 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3288 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
3289 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
3290 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
3291 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
3295 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3298 TCR
*tcr
= raw_ptr(env
, ri
);
3299 int maskshift
= extract32(value
, 0, 3);
3301 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
3302 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
3303 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3304 * using Long-desciptor translation table format */
3305 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
3306 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3307 /* In an implementation that includes the Security Extensions
3308 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3309 * Short-descriptor translation table format.
3311 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
3317 /* Update the masks corresponding to the TCR bank being written
3318 * Note that we always calculate mask and base_mask, but
3319 * they are only used for short-descriptor tables (ie if EAE is 0);
3320 * for long-descriptor tables the TCR fields are used differently
3321 * and the mask and base_mask values are meaningless.
3323 tcr
->raw_tcr
= value
;
3324 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
3325 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
3328 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3331 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3332 TCR
*tcr
= raw_ptr(env
, ri
);
3334 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
3335 /* With LPAE the TTBCR could result in a change of ASID
3336 * via the TTBCR.A1 bit, so do a TLB flush.
3338 tlb_flush(CPU(cpu
));
3340 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3341 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
3342 vmsa_ttbcr_raw_write(env
, ri
, value
);
3345 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3347 TCR
*tcr
= raw_ptr(env
, ri
);
3349 /* Reset both the TCR as well as the masks corresponding to the bank of
3350 * the TCR being reset.
3354 tcr
->base_mask
= 0xffffc000u
;
3357 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3360 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3361 TCR
*tcr
= raw_ptr(env
, ri
);
3363 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3364 tlb_flush(CPU(cpu
));
3365 tcr
->raw_tcr
= value
;
3368 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3371 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
3372 if (cpreg_field_is_64bit(ri
) &&
3373 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
3374 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3375 tlb_flush(CPU(cpu
));
3377 raw_write(env
, ri
, value
);
3380 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3383 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3384 CPUState
*cs
= CPU(cpu
);
3386 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
3387 if (raw_read(env
, ri
) != value
) {
3388 tlb_flush_by_mmuidx(cs
,
3389 ARMMMUIdxBit_S12NSE1
|
3390 ARMMMUIdxBit_S12NSE0
|
3392 raw_write(env
, ri
, value
);
3396 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
3397 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
3398 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3399 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
3400 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
3401 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
3402 .access
= PL1_RW
, .resetvalue
= 0,
3403 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
3404 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
3405 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
3406 .access
= PL1_RW
, .resetvalue
= 0,
3407 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
3408 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
3409 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
3410 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
3411 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
3416 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
3417 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
3418 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
3420 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
3421 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
3422 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
3423 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3424 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3425 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
3426 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
3427 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
3428 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
3429 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3430 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
3431 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
3432 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3433 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
3434 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
3435 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
3436 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
3437 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
3438 .raw_writefn
= vmsa_ttbcr_raw_write
,
3439 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
3440 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
3444 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3445 * qemu tlbs nor adjusting cached masks.
3447 static const ARMCPRegInfo ttbcr2_reginfo
= {
3448 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
3449 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
3450 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
3451 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
3454 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3457 env
->cp15
.c15_ticonfig
= value
& 0xe7;
3458 /* The OS_TYPE bit in this register changes the reported CPUID! */
3459 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
3460 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
3463 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3466 env
->cp15
.c15_threadid
= value
& 0xffff;
3469 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3472 /* Wait-for-interrupt (deprecated) */
3473 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
3476 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3479 /* On OMAP there are registers indicating the max/min index of dcache lines
3480 * containing a dirty line; cache flush operations have to reset these.
3482 env
->cp15
.c15_i_max
= 0x000;
3483 env
->cp15
.c15_i_min
= 0xff0;
3486 static const ARMCPRegInfo omap_cp_reginfo
[] = {
3487 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
3488 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
3489 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
3491 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
3492 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3493 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
3495 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
3496 .writefn
= omap_ticonfig_write
},
3497 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
3499 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
3500 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
3501 .access
= PL1_RW
, .resetvalue
= 0xff0,
3502 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
3503 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
3505 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
3506 .writefn
= omap_threadid_write
},
3507 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
3508 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3509 .type
= ARM_CP_NO_RAW
,
3510 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
3511 /* TODO: Peripheral port remap register:
3512 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3513 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3516 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
3517 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
3518 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
3519 .writefn
= omap_cachemaint_write
},
3520 { .name
= "C9", .cp
= 15, .crn
= 9,
3521 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
3522 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
3526 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3529 env
->cp15
.c15_cpar
= value
& 0x3fff;
3532 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
3533 { .name
= "XSCALE_CPAR",
3534 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
3535 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
3536 .writefn
= xscale_cpar_write
, },
3537 { .name
= "XSCALE_AUXCR",
3538 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
3539 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
3541 /* XScale specific cache-lockdown: since we have no cache we NOP these
3542 * and hope the guest does not really rely on cache behaviour.
3544 { .name
= "XSCALE_LOCK_ICACHE_LINE",
3545 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
3546 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3547 { .name
= "XSCALE_UNLOCK_ICACHE",
3548 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
3549 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3550 { .name
= "XSCALE_DCACHE_LOCK",
3551 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
3552 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
3553 { .name
= "XSCALE_UNLOCK_DCACHE",
3554 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
3555 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3559 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
3560 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3561 * implementation of this implementation-defined space.
3562 * Ideally this should eventually disappear in favour of actually
3563 * implementing the correct behaviour for all cores.
3565 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
3566 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3568 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3573 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3574 /* Cache status: RAZ because we have no cache so it's always clean */
3575 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3576 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3581 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3582 /* We never have a a block transfer operation in progress */
3583 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3584 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3586 /* The cache ops themselves: these all NOP for QEMU */
3587 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3588 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3589 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3590 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3591 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3592 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3593 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3594 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3595 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3596 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3597 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3598 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3602 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3603 /* The cache test-and-clean instructions always return (1 << 30)
3604 * to indicate that there are no dirty cache lines.
3606 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3607 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3608 .resetvalue
= (1 << 30) },
3609 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3610 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3611 .resetvalue
= (1 << 30) },
3615 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3616 /* Ignore ReadBuffer accesses */
3617 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3618 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3619 .access
= PL1_RW
, .resetvalue
= 0,
3620 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3624 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3626 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3627 unsigned int cur_el
= arm_current_el(env
);
3628 bool secure
= arm_is_secure(env
);
3630 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3631 return env
->cp15
.vpidr_el2
;
3633 return raw_read(env
, ri
);
3636 static uint64_t mpidr_read_val(CPUARMState
*env
)
3638 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
3639 uint64_t mpidr
= cpu
->mp_affinity
;
3641 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3642 mpidr
|= (1U << 31);
3643 /* Cores which are uniprocessor (non-coherent)
3644 * but still implement the MP extensions set
3645 * bit 30. (For instance, Cortex-R5).
3647 if (cpu
->mp_is_up
) {
3648 mpidr
|= (1u << 30);
3654 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3656 unsigned int cur_el
= arm_current_el(env
);
3657 bool secure
= arm_is_secure(env
);
3659 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3660 return env
->cp15
.vmpidr_el2
;
3662 return mpidr_read_val(env
);
3665 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3667 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3668 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3669 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3671 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3672 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3673 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3675 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3676 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3677 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3678 offsetof(CPUARMState
, cp15
.par_ns
)} },
3679 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3680 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3681 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3682 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3683 .writefn
= vmsa_ttbr_write
, },
3684 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3685 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3686 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3687 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3688 .writefn
= vmsa_ttbr_write
, },
3692 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3694 return vfp_get_fpcr(env
);
3697 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3700 vfp_set_fpcr(env
, value
);
3703 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3705 return vfp_get_fpsr(env
);
3708 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3711 vfp_set_fpsr(env
, value
);
3714 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3717 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3718 return CP_ACCESS_TRAP
;
3720 return CP_ACCESS_OK
;
3723 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3726 env
->daif
= value
& PSTATE_DAIF
;
3729 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3730 const ARMCPRegInfo
*ri
,
3733 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3734 * SCTLR_EL1.UCI is set.
3736 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3737 return CP_ACCESS_TRAP
;
3739 return CP_ACCESS_OK
;
3742 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3743 * Page D4-1736 (DDI0487A.b)
3746 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3749 CPUState
*cs
= ENV_GET_CPU(env
);
3750 bool sec
= arm_is_secure_below_el3(env
);
3753 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3754 ARMMMUIdxBit_S1SE1
|
3755 ARMMMUIdxBit_S1SE0
);
3757 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3758 ARMMMUIdxBit_S12NSE1
|
3759 ARMMMUIdxBit_S12NSE0
);
3763 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3766 CPUState
*cs
= ENV_GET_CPU(env
);
3768 if (tlb_force_broadcast(env
)) {
3769 tlbi_aa64_vmalle1is_write(env
, NULL
, value
);
3773 if (arm_is_secure_below_el3(env
)) {
3774 tlb_flush_by_mmuidx(cs
,
3775 ARMMMUIdxBit_S1SE1
|
3776 ARMMMUIdxBit_S1SE0
);
3778 tlb_flush_by_mmuidx(cs
,
3779 ARMMMUIdxBit_S12NSE1
|
3780 ARMMMUIdxBit_S12NSE0
);
3784 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3787 /* Note that the 'ALL' scope must invalidate both stage 1 and
3788 * stage 2 translations, whereas most other scopes only invalidate
3789 * stage 1 translations.
3791 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3792 CPUState
*cs
= CPU(cpu
);
3794 if (arm_is_secure_below_el3(env
)) {
3795 tlb_flush_by_mmuidx(cs
,
3796 ARMMMUIdxBit_S1SE1
|
3797 ARMMMUIdxBit_S1SE0
);
3799 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3800 tlb_flush_by_mmuidx(cs
,
3801 ARMMMUIdxBit_S12NSE1
|
3802 ARMMMUIdxBit_S12NSE0
|
3805 tlb_flush_by_mmuidx(cs
,
3806 ARMMMUIdxBit_S12NSE1
|
3807 ARMMMUIdxBit_S12NSE0
);
3812 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3815 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3816 CPUState
*cs
= CPU(cpu
);
3818 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3821 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3824 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3825 CPUState
*cs
= CPU(cpu
);
3827 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3830 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3833 /* Note that the 'ALL' scope must invalidate both stage 1 and
3834 * stage 2 translations, whereas most other scopes only invalidate
3835 * stage 1 translations.
3837 CPUState
*cs
= ENV_GET_CPU(env
);
3838 bool sec
= arm_is_secure_below_el3(env
);
3839 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3842 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3843 ARMMMUIdxBit_S1SE1
|
3844 ARMMMUIdxBit_S1SE0
);
3845 } else if (has_el2
) {
3846 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3847 ARMMMUIdxBit_S12NSE1
|
3848 ARMMMUIdxBit_S12NSE0
|
3851 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3852 ARMMMUIdxBit_S12NSE1
|
3853 ARMMMUIdxBit_S12NSE0
);
3857 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3860 CPUState
*cs
= ENV_GET_CPU(env
);
3862 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3865 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3868 CPUState
*cs
= ENV_GET_CPU(env
);
3870 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3873 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3876 /* Invalidate by VA, EL2
3877 * Currently handles both VAE2 and VALE2, since we don't support
3878 * flush-last-level-only.
3880 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3881 CPUState
*cs
= CPU(cpu
);
3882 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3884 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3887 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3890 /* Invalidate by VA, EL3
3891 * Currently handles both VAE3 and VALE3, since we don't support
3892 * flush-last-level-only.
3894 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3895 CPUState
*cs
= CPU(cpu
);
3896 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3898 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3901 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3904 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3905 CPUState
*cs
= CPU(cpu
);
3906 bool sec
= arm_is_secure_below_el3(env
);
3907 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3910 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3911 ARMMMUIdxBit_S1SE1
|
3912 ARMMMUIdxBit_S1SE0
);
3914 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3915 ARMMMUIdxBit_S12NSE1
|
3916 ARMMMUIdxBit_S12NSE0
);
3920 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3923 /* Invalidate by VA, EL1&0 (AArch64 version).
3924 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3925 * since we don't support flush-for-specific-ASID-only or
3926 * flush-last-level-only.
3928 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3929 CPUState
*cs
= CPU(cpu
);
3930 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3932 if (tlb_force_broadcast(env
)) {
3933 tlbi_aa64_vae1is_write(env
, NULL
, value
);
3937 if (arm_is_secure_below_el3(env
)) {
3938 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3939 ARMMMUIdxBit_S1SE1
|
3940 ARMMMUIdxBit_S1SE0
);
3942 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3943 ARMMMUIdxBit_S12NSE1
|
3944 ARMMMUIdxBit_S12NSE0
);
3948 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3951 CPUState
*cs
= ENV_GET_CPU(env
);
3952 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3954 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3958 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3961 CPUState
*cs
= ENV_GET_CPU(env
);
3962 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3964 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3968 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3971 /* Invalidate by IPA. This has to invalidate any structures that
3972 * contain only stage 2 translation information, but does not need
3973 * to apply to structures that contain combined stage 1 and stage 2
3974 * translation information.
3975 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3977 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3978 CPUState
*cs
= CPU(cpu
);
3981 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3985 pageaddr
= sextract64(value
<< 12, 0, 48);
3987 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3990 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3993 CPUState
*cs
= ENV_GET_CPU(env
);
3996 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
4000 pageaddr
= sextract64(value
<< 12, 0, 48);
4002 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
4006 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4009 /* We don't implement EL2, so the only control on DC ZVA is the
4010 * bit in the SCTLR which can prohibit access for EL0.
4012 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
4013 return CP_ACCESS_TRAP
;
4015 return CP_ACCESS_OK
;
4018 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4020 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4021 int dzp_bit
= 1 << 4;
4023 /* DZP indicates whether DC ZVA access is allowed */
4024 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
4027 return cpu
->dcz_blocksize
| dzp_bit
;
4030 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4033 if (!(env
->pstate
& PSTATE_SP
)) {
4034 /* Access to SP_EL0 is undefined if it's being used as
4035 * the stack pointer.
4037 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4039 return CP_ACCESS_OK
;
4042 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4044 return env
->pstate
& PSTATE_SP
;
4047 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
4049 update_spsel(env
, val
);
4052 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4055 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4057 if (raw_read(env
, ri
) == value
) {
4058 /* Skip the TLB flush if nothing actually changed; Linux likes
4059 * to do a lot of pointless SCTLR writes.
4064 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
4065 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4069 raw_write(env
, ri
, value
);
4070 /* ??? Lots of these bits are not implemented. */
4071 /* This may enable/disable the MMU, so do a TLB flush. */
4072 tlb_flush(CPU(cpu
));
4075 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4078 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
4079 return CP_ACCESS_TRAP_FP_EL2
;
4081 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
4082 return CP_ACCESS_TRAP_FP_EL3
;
4084 return CP_ACCESS_OK
;
4087 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4090 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
4093 static const ARMCPRegInfo v8_cp_reginfo
[] = {
4094 /* Minimal set of EL0-visible registers. This will need to be expanded
4095 * significantly for system emulation of AArch64 CPUs.
4097 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
4098 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
4099 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
4100 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
4101 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
4102 .type
= ARM_CP_NO_RAW
,
4103 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
4104 .fieldoffset
= offsetof(CPUARMState
, daif
),
4105 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
4106 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
4107 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
4108 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4109 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
4110 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
4111 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
4112 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
4113 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
4114 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
4115 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
4116 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
4117 .readfn
= aa64_dczid_read
},
4118 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
4119 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
4120 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
4121 #ifndef CONFIG_USER_ONLY
4122 /* Avoid overhead of an access check that always passes in user-mode */
4123 .accessfn
= aa64_zva_access
,
4126 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
4127 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
4128 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
4129 /* Cache ops: all NOPs since we don't emulate caches */
4130 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
4131 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4132 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4133 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
4134 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4135 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4136 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
4137 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
4138 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4139 .accessfn
= aa64_cacheop_access
},
4140 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
4141 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4142 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4143 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
4144 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4145 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4146 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
4147 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
4148 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4149 .accessfn
= aa64_cacheop_access
},
4150 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
4151 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4152 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4153 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
4154 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
4155 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4156 .accessfn
= aa64_cacheop_access
},
4157 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
4158 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
4159 .access
= PL0_W
, .type
= ARM_CP_NOP
,
4160 .accessfn
= aa64_cacheop_access
},
4161 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
4162 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4163 .access
= PL1_W
, .type
= ARM_CP_NOP
},
4164 /* TLBI operations */
4165 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
4166 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
4167 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4168 .writefn
= tlbi_aa64_vmalle1is_write
},
4169 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
4170 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
4171 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4172 .writefn
= tlbi_aa64_vae1is_write
},
4173 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
4174 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
4175 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4176 .writefn
= tlbi_aa64_vmalle1is_write
},
4177 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
4178 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
4179 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4180 .writefn
= tlbi_aa64_vae1is_write
},
4181 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
4182 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4183 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4184 .writefn
= tlbi_aa64_vae1is_write
},
4185 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
4186 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4187 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4188 .writefn
= tlbi_aa64_vae1is_write
},
4189 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
4190 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
4191 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4192 .writefn
= tlbi_aa64_vmalle1_write
},
4193 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
4194 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
4195 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4196 .writefn
= tlbi_aa64_vae1_write
},
4197 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
4198 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
4199 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4200 .writefn
= tlbi_aa64_vmalle1_write
},
4201 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
4202 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
4203 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4204 .writefn
= tlbi_aa64_vae1_write
},
4205 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
4206 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4207 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4208 .writefn
= tlbi_aa64_vae1_write
},
4209 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
4210 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4211 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4212 .writefn
= tlbi_aa64_vae1_write
},
4213 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
4214 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4215 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4216 .writefn
= tlbi_aa64_ipas2e1is_write
},
4217 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
4218 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4219 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4220 .writefn
= tlbi_aa64_ipas2e1is_write
},
4221 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
4222 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4223 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4224 .writefn
= tlbi_aa64_alle1is_write
},
4225 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
4226 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
4227 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4228 .writefn
= tlbi_aa64_alle1is_write
},
4229 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
4230 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4231 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4232 .writefn
= tlbi_aa64_ipas2e1_write
},
4233 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
4234 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4235 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4236 .writefn
= tlbi_aa64_ipas2e1_write
},
4237 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
4238 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4239 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4240 .writefn
= tlbi_aa64_alle1_write
},
4241 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
4242 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
4243 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4244 .writefn
= tlbi_aa64_alle1is_write
},
4245 #ifndef CONFIG_USER_ONLY
4246 /* 64 bit address translation operations */
4247 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
4248 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
4249 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4250 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
4251 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
4252 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4253 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
4254 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
4255 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4256 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
4257 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
4258 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4259 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
4260 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
4261 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4262 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
4263 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
4264 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4265 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
4266 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
4267 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4268 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
4269 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
4270 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4271 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4272 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
4273 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
4274 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4275 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
4276 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
4277 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4278 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
4279 .type
= ARM_CP_ALIAS
,
4280 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
4281 .access
= PL1_RW
, .resetvalue
= 0,
4282 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
4283 .writefn
= par_write
},
4285 /* TLB invalidate last level of translation table walk */
4286 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
4287 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
4288 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
4289 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
4290 .writefn
= tlbimvaa_is_write
},
4291 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
4292 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
4293 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
4294 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
4295 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4296 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4297 .writefn
= tlbimva_hyp_write
},
4298 { .name
= "TLBIMVALHIS",
4299 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4300 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4301 .writefn
= tlbimva_hyp_is_write
},
4302 { .name
= "TLBIIPAS2",
4303 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
4304 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4305 .writefn
= tlbiipas2_write
},
4306 { .name
= "TLBIIPAS2IS",
4307 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
4308 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4309 .writefn
= tlbiipas2_is_write
},
4310 { .name
= "TLBIIPAS2L",
4311 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
4312 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4313 .writefn
= tlbiipas2_write
},
4314 { .name
= "TLBIIPAS2LIS",
4315 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
4316 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4317 .writefn
= tlbiipas2_is_write
},
4318 /* 32 bit cache operations */
4319 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
4320 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4321 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
4322 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4323 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
4324 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4325 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
4326 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4327 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
4328 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4329 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
4330 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4331 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
4332 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4333 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
4334 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4335 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
4336 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4337 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
4338 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4339 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
4340 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4341 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
4342 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4343 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
4344 .type
= ARM_CP_NOP
, .access
= PL1_W
},
4345 /* MMU Domain access control / MPU write buffer control */
4346 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
4347 .access
= PL1_RW
, .resetvalue
= 0,
4348 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4349 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
4350 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
4351 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
4352 .type
= ARM_CP_ALIAS
,
4353 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
4355 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
4356 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
4357 .type
= ARM_CP_ALIAS
,
4358 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
4360 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
4361 /* We rely on the access checks not allowing the guest to write to the
4362 * state field when SPSel indicates that it's being used as the stack
4365 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
4366 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
4367 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
4368 .type
= ARM_CP_ALIAS
,
4369 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
4370 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
4371 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
4372 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4373 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
4374 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
4375 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
4376 .type
= ARM_CP_NO_RAW
,
4377 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
4378 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
4379 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
4380 .type
= ARM_CP_ALIAS
,
4381 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
4382 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
4383 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
4384 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
4385 .access
= PL2_RW
, .resetvalue
= 0,
4386 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
4387 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
4388 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
4389 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
4390 .access
= PL2_RW
, .resetvalue
= 0,
4391 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
4392 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
4393 .type
= ARM_CP_ALIAS
,
4394 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
4396 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
4397 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
4398 .type
= ARM_CP_ALIAS
,
4399 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
4401 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
4402 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
4403 .type
= ARM_CP_ALIAS
,
4404 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
4406 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
4407 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
4408 .type
= ARM_CP_ALIAS
,
4409 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
4411 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
4412 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
4413 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
4415 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
4416 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
4417 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
4418 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4419 .writefn
= sdcr_write
,
4420 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
4424 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
4425 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
4426 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4427 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4429 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
4430 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4431 .type
= ARM_CP_NO_RAW
,
4432 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4434 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4435 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4436 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4437 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4438 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4439 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4441 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4442 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4443 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4444 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4445 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4446 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4447 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4449 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4450 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4451 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4452 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4453 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4454 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4456 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4457 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4458 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4460 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4461 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4462 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4464 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4465 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4466 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4468 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4469 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4470 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4471 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4472 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4473 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4474 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4475 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4476 .cp
= 15, .opc1
= 6, .crm
= 2,
4477 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4478 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
4479 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4480 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4481 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4482 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4483 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4484 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4485 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4486 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4487 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4488 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4489 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4490 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4491 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4492 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4494 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4495 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4496 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4497 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4498 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4499 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4500 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4501 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4503 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4504 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4505 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4506 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4507 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
4509 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4510 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4511 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4512 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4513 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4514 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4515 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4516 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4517 .access
= PL2_RW
, .accessfn
= access_tda
,
4518 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4519 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4520 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4521 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
4522 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4523 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4524 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4525 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4526 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4527 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4528 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4529 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4530 .type
= ARM_CP_CONST
,
4531 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4532 .access
= PL2_RW
, .resetvalue
= 0 },
4536 /* Ditto, but for registers which exist in ARMv8 but not v7 */
4537 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
4538 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4539 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4541 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4545 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
4547 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4548 uint64_t valid_mask
= HCR_MASK
;
4550 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
4551 valid_mask
&= ~HCR_HCD
;
4552 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
4553 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4554 * However, if we're using the SMC PSCI conduit then QEMU is
4555 * effectively acting like EL3 firmware and so the guest at
4556 * EL2 should retain the ability to prevent EL1 from being
4557 * able to make SMC calls into the ersatz firmware, so in
4558 * that case HCR.TSC should be read/write.
4560 valid_mask
&= ~HCR_TSC
;
4562 if (cpu_isar_feature(aa64_lor
, cpu
)) {
4563 valid_mask
|= HCR_TLOR
;
4565 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
4566 valid_mask
|= HCR_API
| HCR_APK
;
4569 /* Clear RES0 bits. */
4570 value
&= valid_mask
;
4572 /* These bits change the MMU setup:
4573 * HCR_VM enables stage 2 translation
4574 * HCR_PTW forbids certain page-table setups
4575 * HCR_DC Disables stage1 and enables stage2 translation
4577 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4578 tlb_flush(CPU(cpu
));
4580 env
->cp15
.hcr_el2
= value
;
4583 * Updates to VI and VF require us to update the status of
4584 * virtual interrupts, which are the logical OR of these bits
4585 * and the state of the input lines from the GIC. (This requires
4586 * that we have the iothread lock, which is done by marking the
4587 * reginfo structs as ARM_CP_IO.)
4588 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4589 * possible for it to be taken immediately, because VIRQ and
4590 * VFIQ are masked unless running at EL0 or EL1, and HCR
4591 * can only be written at EL2.
4593 g_assert(qemu_mutex_iothread_locked());
4594 arm_cpu_update_virq(cpu
);
4595 arm_cpu_update_vfiq(cpu
);
4598 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4601 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4602 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4603 hcr_write(env
, NULL
, value
);
4606 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4609 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4610 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4611 hcr_write(env
, NULL
, value
);
4615 * Return the effective value of HCR_EL2.
4616 * Bits that are not included here:
4617 * RW (read from SCR_EL3.RW as needed)
4619 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4621 uint64_t ret
= env
->cp15
.hcr_el2
;
4623 if (arm_is_secure_below_el3(env
)) {
4625 * "This register has no effect if EL2 is not enabled in the
4626 * current Security state". This is ARMv8.4-SecEL2 speak for
4627 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4629 * Prior to that, the language was "In an implementation that
4630 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4631 * as if this field is 0 for all purposes other than a direct
4632 * read or write access of HCR_EL2". With lots of enumeration
4633 * on a per-field basis. In current QEMU, this is condition
4634 * is arm_is_secure_below_el3.
4636 * Since the v8.4 language applies to the entire register, and
4637 * appears to be backward compatible, use that.
4640 } else if (ret
& HCR_TGE
) {
4641 /* These bits are up-to-date as of ARMv8.4. */
4642 if (ret
& HCR_E2H
) {
4643 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4644 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4645 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4646 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4648 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4650 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4651 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4652 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4659 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4660 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4662 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4663 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4664 .writefn
= hcr_write
},
4665 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4666 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4667 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4668 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4669 .writefn
= hcr_writelow
},
4670 { .name
= "HACR_EL2", .state
= ARM_CP_STATE_BOTH
,
4671 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 7,
4672 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4673 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4674 .type
= ARM_CP_ALIAS
,
4675 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4677 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4678 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4679 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4680 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4681 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4682 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4683 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4684 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4685 .type
= ARM_CP_ALIAS
,
4686 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4688 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4689 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4690 .type
= ARM_CP_ALIAS
,
4691 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4693 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4694 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4695 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4696 .access
= PL2_RW
, .writefn
= vbar_write
,
4697 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4699 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4700 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4701 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4702 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4703 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4704 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4705 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4706 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
4707 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4708 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4709 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4711 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4712 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4713 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4714 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4715 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4716 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4717 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4719 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4720 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4721 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4722 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4724 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4725 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4726 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4728 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4729 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4730 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4732 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4733 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4735 /* no .writefn needed as this can't cause an ASID change;
4736 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4738 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4739 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4740 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4741 .type
= ARM_CP_ALIAS
,
4742 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4743 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4744 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4745 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4747 /* no .writefn needed as this can't cause an ASID change;
4748 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4750 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4751 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4752 .cp
= 15, .opc1
= 6, .crm
= 2,
4753 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4754 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4755 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4756 .writefn
= vttbr_write
},
4757 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4758 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4759 .access
= PL2_RW
, .writefn
= vttbr_write
,
4760 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4761 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4762 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4763 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4764 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4765 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4766 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4767 .access
= PL2_RW
, .resetvalue
= 0,
4768 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4769 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4770 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4771 .access
= PL2_RW
, .resetvalue
= 0,
4772 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4773 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4774 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4775 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4776 { .name
= "TLBIALLNSNH",
4777 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4778 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4779 .writefn
= tlbiall_nsnh_write
},
4780 { .name
= "TLBIALLNSNHIS",
4781 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4782 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4783 .writefn
= tlbiall_nsnh_is_write
},
4784 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4785 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4786 .writefn
= tlbiall_hyp_write
},
4787 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4788 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4789 .writefn
= tlbiall_hyp_is_write
},
4790 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4791 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4792 .writefn
= tlbimva_hyp_write
},
4793 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4794 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4795 .writefn
= tlbimva_hyp_is_write
},
4796 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4797 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4798 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4799 .writefn
= tlbi_aa64_alle2_write
},
4800 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4801 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4802 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4803 .writefn
= tlbi_aa64_vae2_write
},
4804 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4805 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4806 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4807 .writefn
= tlbi_aa64_vae2_write
},
4808 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4809 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4810 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4811 .writefn
= tlbi_aa64_alle2is_write
},
4812 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4813 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4814 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4815 .writefn
= tlbi_aa64_vae2is_write
},
4816 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4817 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4818 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4819 .writefn
= tlbi_aa64_vae2is_write
},
4820 #ifndef CONFIG_USER_ONLY
4821 /* Unlike the other EL2-related AT operations, these must
4822 * UNDEF from EL3 if EL2 is not implemented, which is why we
4823 * define them here rather than with the rest of the AT ops.
4825 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4826 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4827 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4828 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4829 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4830 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4831 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4832 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4833 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4834 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4835 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4836 * to behave as if SCR.NS was 1.
4838 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4840 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4841 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4843 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4844 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4845 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4846 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4847 * reset values as IMPDEF. We choose to reset to 3 to comply with
4848 * both ARMv7 and ARMv8.
4850 .access
= PL2_RW
, .resetvalue
= 3,
4851 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4852 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4853 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4854 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4855 .writefn
= gt_cntvoff_write
,
4856 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4857 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4858 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4859 .writefn
= gt_cntvoff_write
,
4860 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4861 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4862 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4863 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4864 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4865 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4866 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4867 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4868 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4869 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4870 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4871 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4872 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4873 .resetfn
= gt_hyp_timer_reset
,
4874 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4875 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4877 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4879 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4881 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4883 /* The only field of MDCR_EL2 that has a defined architectural reset value
4884 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4885 * don't implement any PMU event counters, so using zero as a reset
4886 * value for MDCR_EL2 is okay
4888 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4889 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4890 .access
= PL2_RW
, .resetvalue
= 0,
4891 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4892 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4893 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4894 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4895 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4896 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4897 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4899 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4900 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4901 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4903 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4907 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4908 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4909 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4910 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4912 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
4913 .writefn
= hcr_writehigh
},
4917 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4920 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4921 * At Secure EL1 it traps to EL3.
4923 if (arm_current_el(env
) == 3) {
4924 return CP_ACCESS_OK
;
4926 if (arm_is_secure_below_el3(env
)) {
4927 return CP_ACCESS_TRAP_EL3
;
4929 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4931 return CP_ACCESS_OK
;
4933 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4936 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4937 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4938 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4939 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4940 .resetvalue
= 0, .writefn
= scr_write
},
4941 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4942 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4943 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4944 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4945 .writefn
= scr_write
},
4946 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4947 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4948 .access
= PL3_RW
, .resetvalue
= 0,
4949 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4951 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4952 .access
= PL3_RW
, .resetvalue
= 0,
4953 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4954 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4955 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4956 .writefn
= vbar_write
, .resetvalue
= 0,
4957 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4958 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4959 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4960 .access
= PL3_RW
, .resetvalue
= 0,
4961 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4962 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4963 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4965 /* no .writefn needed as this can't cause an ASID change;
4966 * we must provide a .raw_writefn and .resetfn because we handle
4967 * reset and migration for the AArch32 TTBCR(S), which might be
4968 * using mask and base_mask.
4970 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4971 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4972 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4973 .type
= ARM_CP_ALIAS
,
4974 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4976 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4977 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4978 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4979 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4980 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4981 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4982 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4983 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4984 .type
= ARM_CP_ALIAS
,
4985 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4987 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4988 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4989 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4990 .access
= PL3_RW
, .writefn
= vbar_write
,
4991 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4993 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4994 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4995 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4996 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4997 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4998 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4999 .access
= PL3_RW
, .resetvalue
= 0,
5000 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
5001 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
5002 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
5003 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5005 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
5006 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
5007 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5009 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
5010 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
5011 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5013 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
5014 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
5015 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5016 .writefn
= tlbi_aa64_alle3is_write
},
5017 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
5018 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
5019 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5020 .writefn
= tlbi_aa64_vae3is_write
},
5021 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
5022 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
5023 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5024 .writefn
= tlbi_aa64_vae3is_write
},
5025 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
5026 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
5027 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5028 .writefn
= tlbi_aa64_alle3_write
},
5029 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
5030 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
5031 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5032 .writefn
= tlbi_aa64_vae3_write
},
5033 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
5034 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
5035 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
5036 .writefn
= tlbi_aa64_vae3_write
},
5040 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5043 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5044 * but the AArch32 CTR has its own reginfo struct)
5046 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
5047 return CP_ACCESS_TRAP
;
5049 return CP_ACCESS_OK
;
5052 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5055 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5056 * read via a bit in OSLSR_EL1.
5060 if (ri
->state
== ARM_CP_STATE_AA32
) {
5061 oslock
= (value
== 0xC5ACCE55);
5066 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
5069 static const ARMCPRegInfo debug_cp_reginfo
[] = {
5070 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5071 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5072 * unlike DBGDRAR it is never accessible from EL0.
5073 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5076 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
5077 .access
= PL0_R
, .accessfn
= access_tdra
,
5078 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5079 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
5080 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5081 .access
= PL1_R
, .accessfn
= access_tdra
,
5082 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5083 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
5084 .access
= PL0_R
, .accessfn
= access_tdra
,
5085 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5086 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5087 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
5088 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5089 .access
= PL1_RW
, .accessfn
= access_tda
,
5090 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
5092 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5093 * We don't implement the configurable EL0 access.
5095 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
5096 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5097 .type
= ARM_CP_ALIAS
,
5098 .access
= PL1_R
, .accessfn
= access_tda
,
5099 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
5100 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
5101 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
5102 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
5103 .accessfn
= access_tdosa
,
5104 .writefn
= oslar_write
},
5105 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
5106 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
5107 .access
= PL1_R
, .resetvalue
= 10,
5108 .accessfn
= access_tdosa
,
5109 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
5110 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5111 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5112 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
5113 .access
= PL1_RW
, .accessfn
= access_tdosa
,
5114 .type
= ARM_CP_NOP
},
5115 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5116 * implement vector catch debug events yet.
5119 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5120 .access
= PL1_RW
, .accessfn
= access_tda
,
5121 .type
= ARM_CP_NOP
},
5122 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5123 * to save and restore a 32-bit guest's DBGVCR)
5125 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
5126 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
5127 .access
= PL2_RW
, .accessfn
= access_tda
,
5128 .type
= ARM_CP_NOP
},
5129 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5130 * Channel but Linux may try to access this register. The 32-bit
5131 * alias is DBGDCCINT.
5133 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
5134 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5135 .access
= PL1_RW
, .accessfn
= access_tda
,
5136 .type
= ARM_CP_NOP
},
5140 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
5141 /* 64 bit access versions of the (dummy) debug registers */
5142 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
5143 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5144 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
5145 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
5149 /* Return the exception level to which exceptions should be taken
5150 * via SVEAccessTrap. If an exception should be routed through
5151 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5152 * take care of raising that exception.
5153 * C.f. the ARM pseudocode function CheckSVEEnabled.
5155 int sve_exception_el(CPUARMState
*env
, int el
)
5157 #ifndef CONFIG_USER_ONLY
5159 bool disabled
= false;
5161 /* The CPACR.ZEN controls traps to EL1:
5162 * 0, 2 : trap EL0 and EL1 accesses
5163 * 1 : trap only EL0 accesses
5164 * 3 : trap no accesses
5166 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
5168 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
5173 return (arm_feature(env
, ARM_FEATURE_EL2
)
5174 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
5177 /* Check CPACR.FPEN. */
5178 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
5180 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
5188 /* CPTR_EL2. Since TZ and TFP are positive,
5189 * they will be zero when EL2 is not present.
5191 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
5192 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
5195 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
5200 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
5201 if (arm_feature(env
, ARM_FEATURE_EL3
)
5202 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
5210 * Given that SVE is enabled, return the vector length for EL.
5212 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
5214 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5215 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
5218 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
5220 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
5221 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
5223 if (el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
5224 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
5229 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5232 int cur_el
= arm_current_el(env
);
5233 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
5236 /* Bits other than [3:0] are RAZ/WI. */
5237 raw_write(env
, ri
, value
& 0xf);
5240 * Because we arrived here, we know both FP and SVE are enabled;
5241 * otherwise we would have trapped access to the ZCR_ELn register.
5243 new_len
= sve_zcr_len_for_el(env
, cur_el
);
5244 if (new_len
< old_len
) {
5245 aarch64_sve_narrow_vq(env
, new_len
+ 1);
5249 static const ARMCPRegInfo zcr_el1_reginfo
= {
5250 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
5251 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
5252 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
5253 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
5254 .writefn
= zcr_write
, .raw_writefn
= raw_write
5257 static const ARMCPRegInfo zcr_el2_reginfo
= {
5258 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5259 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5260 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5261 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
5262 .writefn
= zcr_write
, .raw_writefn
= raw_write
5265 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
5266 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
5267 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
5268 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
5269 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
5272 static const ARMCPRegInfo zcr_el3_reginfo
= {
5273 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
5274 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
5275 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
5276 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
5277 .writefn
= zcr_write
, .raw_writefn
= raw_write
5280 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
5282 CPUARMState
*env
= &cpu
->env
;
5284 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
5285 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
5287 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
5289 if (env
->cpu_watchpoint
[n
]) {
5290 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
5291 env
->cpu_watchpoint
[n
] = NULL
;
5294 if (!extract64(wcr
, 0, 1)) {
5295 /* E bit clear : watchpoint disabled */
5299 switch (extract64(wcr
, 3, 2)) {
5301 /* LSC 00 is reserved and must behave as if the wp is disabled */
5304 flags
|= BP_MEM_READ
;
5307 flags
|= BP_MEM_WRITE
;
5310 flags
|= BP_MEM_ACCESS
;
5314 /* Attempts to use both MASK and BAS fields simultaneously are
5315 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5316 * thus generating a watchpoint for every byte in the masked region.
5318 mask
= extract64(wcr
, 24, 4);
5319 if (mask
== 1 || mask
== 2) {
5320 /* Reserved values of MASK; we must act as if the mask value was
5321 * some non-reserved value, or as if the watchpoint were disabled.
5322 * We choose the latter.
5326 /* Watchpoint covers an aligned area up to 2GB in size */
5328 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5329 * whether the watchpoint fires when the unmasked bits match; we opt
5330 * to generate the exceptions.
5334 /* Watchpoint covers bytes defined by the byte address select bits */
5335 int bas
= extract64(wcr
, 5, 8);
5339 /* This must act as if the watchpoint is disabled */
5343 if (extract64(wvr
, 2, 1)) {
5344 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5345 * ignored, and BAS[3:0] define which bytes to watch.
5349 /* The BAS bits are supposed to be programmed to indicate a contiguous
5350 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5351 * we fire for each byte in the word/doubleword addressed by the WVR.
5352 * We choose to ignore any non-zero bits after the first range of 1s.
5354 basstart
= ctz32(bas
);
5355 len
= cto32(bas
>> basstart
);
5359 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
5360 &env
->cpu_watchpoint
[n
]);
5363 void hw_watchpoint_update_all(ARMCPU
*cpu
)
5366 CPUARMState
*env
= &cpu
->env
;
5368 /* Completely clear out existing QEMU watchpoints and our array, to
5369 * avoid possible stale entries following migration load.
5371 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
5372 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
5374 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
5375 hw_watchpoint_update(cpu
, i
);
5379 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5382 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5385 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5386 * register reads and behaves as if values written are sign extended.
5387 * Bits [1:0] are RES0.
5389 value
= sextract64(value
, 0, 49) & ~3ULL;
5391 raw_write(env
, ri
, value
);
5392 hw_watchpoint_update(cpu
, i
);
5395 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5398 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5401 raw_write(env
, ri
, value
);
5402 hw_watchpoint_update(cpu
, i
);
5405 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
5407 CPUARMState
*env
= &cpu
->env
;
5408 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
5409 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
5414 if (env
->cpu_breakpoint
[n
]) {
5415 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
5416 env
->cpu_breakpoint
[n
] = NULL
;
5419 if (!extract64(bcr
, 0, 1)) {
5420 /* E bit clear : watchpoint disabled */
5424 bt
= extract64(bcr
, 20, 4);
5427 case 4: /* unlinked address mismatch (reserved if AArch64) */
5428 case 5: /* linked address mismatch (reserved if AArch64) */
5429 qemu_log_mask(LOG_UNIMP
,
5430 "arm: address mismatch breakpoint types not implemented\n");
5432 case 0: /* unlinked address match */
5433 case 1: /* linked address match */
5435 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5436 * we behave as if the register was sign extended. Bits [1:0] are
5437 * RES0. The BAS field is used to allow setting breakpoints on 16
5438 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5439 * a bp will fire if the addresses covered by the bp and the addresses
5440 * covered by the insn overlap but the insn doesn't start at the
5441 * start of the bp address range. We choose to require the insn and
5442 * the bp to have the same address. The constraints on writing to
5443 * BAS enforced in dbgbcr_write mean we have only four cases:
5444 * 0b0000 => no breakpoint
5445 * 0b0011 => breakpoint on addr
5446 * 0b1100 => breakpoint on addr + 2
5447 * 0b1111 => breakpoint on addr
5448 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5450 int bas
= extract64(bcr
, 5, 4);
5451 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
5460 case 2: /* unlinked context ID match */
5461 case 8: /* unlinked VMID match (reserved if no EL2) */
5462 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5463 qemu_log_mask(LOG_UNIMP
,
5464 "arm: unlinked context breakpoint types not implemented\n");
5466 case 9: /* linked VMID match (reserved if no EL2) */
5467 case 11: /* linked context ID and VMID match (reserved if no EL2) */
5468 case 3: /* linked context ID match */
5470 /* We must generate no events for Linked context matches (unless
5471 * they are linked to by some other bp/wp, which is handled in
5472 * updates for the linking bp/wp). We choose to also generate no events
5473 * for reserved values.
5478 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
5481 void hw_breakpoint_update_all(ARMCPU
*cpu
)
5484 CPUARMState
*env
= &cpu
->env
;
5486 /* Completely clear out existing QEMU breakpoints and our array, to
5487 * avoid possible stale entries following migration load.
5489 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
5490 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
5492 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
5493 hw_breakpoint_update(cpu
, i
);
5497 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5500 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5503 raw_write(env
, ri
, value
);
5504 hw_breakpoint_update(cpu
, i
);
5507 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5510 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5513 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5516 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
5517 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
5519 raw_write(env
, ri
, value
);
5520 hw_breakpoint_update(cpu
, i
);
5523 static void define_debug_regs(ARMCPU
*cpu
)
5525 /* Define v7 and v8 architectural debug registers.
5526 * These are just dummy implementations for now.
5529 int wrps
, brps
, ctx_cmps
;
5530 ARMCPRegInfo dbgdidr
= {
5531 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
5532 .access
= PL0_R
, .accessfn
= access_tda
,
5533 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
5536 /* Note that all these register fields hold "number of Xs minus 1". */
5537 brps
= extract32(cpu
->dbgdidr
, 24, 4);
5538 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
5539 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
5541 assert(ctx_cmps
<= brps
);
5543 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5544 * of the debug registers such as number of breakpoints;
5545 * check that if they both exist then they agree.
5547 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
5548 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
5549 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
5550 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
5553 define_one_arm_cp_reg(cpu
, &dbgdidr
);
5554 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
5556 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
5557 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
5560 for (i
= 0; i
< brps
+ 1; i
++) {
5561 ARMCPRegInfo dbgregs
[] = {
5562 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
5563 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
5564 .access
= PL1_RW
, .accessfn
= access_tda
,
5565 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
5566 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
5568 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
5569 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
5570 .access
= PL1_RW
, .accessfn
= access_tda
,
5571 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5572 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5576 define_arm_cp_regs(cpu
, dbgregs
);
5579 for (i
= 0; i
< wrps
+ 1; i
++) {
5580 ARMCPRegInfo dbgregs
[] = {
5581 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5582 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5583 .access
= PL1_RW
, .accessfn
= access_tda
,
5584 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5585 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5587 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5588 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5589 .access
= PL1_RW
, .accessfn
= access_tda
,
5590 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5591 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5595 define_arm_cp_regs(cpu
, dbgregs
);
5599 /* We don't know until after realize whether there's a GICv3
5600 * attached, and that is what registers the gicv3 sysregs.
5601 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5604 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5606 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5607 uint64_t pfr1
= cpu
->id_pfr1
;
5609 if (env
->gicv3state
) {
5615 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5617 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5618 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5620 if (env
->gicv3state
) {
5626 /* Shared logic between LORID and the rest of the LOR* registers.
5627 * Secure state has already been delt with.
5629 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5631 int el
= arm_current_el(env
);
5633 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5634 return CP_ACCESS_TRAP_EL2
;
5636 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5637 return CP_ACCESS_TRAP_EL3
;
5639 return CP_ACCESS_OK
;
5642 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5645 if (arm_is_secure_below_el3(env
)) {
5646 /* Access ok in secure mode. */
5647 return CP_ACCESS_OK
;
5649 return access_lor_ns(env
);
5652 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5653 const ARMCPRegInfo
*ri
, bool isread
)
5655 if (arm_is_secure_below_el3(env
)) {
5656 /* Access denied in secure mode. */
5657 return CP_ACCESS_TRAP
;
5659 return access_lor_ns(env
);
5662 #ifdef TARGET_AARCH64
5663 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5666 int el
= arm_current_el(env
);
5669 arm_feature(env
, ARM_FEATURE_EL2
) &&
5670 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5671 return CP_ACCESS_TRAP_EL2
;
5674 arm_feature(env
, ARM_FEATURE_EL3
) &&
5675 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5676 return CP_ACCESS_TRAP_EL3
;
5678 return CP_ACCESS_OK
;
5681 static const ARMCPRegInfo pauth_reginfo
[] = {
5682 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5683 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5684 .access
= PL1_RW
, .accessfn
= access_pauth
,
5685 .fieldoffset
= offsetof(CPUARMState
, apda_key
.lo
) },
5686 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5687 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5688 .access
= PL1_RW
, .accessfn
= access_pauth
,
5689 .fieldoffset
= offsetof(CPUARMState
, apda_key
.hi
) },
5690 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5691 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5692 .access
= PL1_RW
, .accessfn
= access_pauth
,
5693 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.lo
) },
5694 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5695 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5696 .access
= PL1_RW
, .accessfn
= access_pauth
,
5697 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.hi
) },
5698 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5699 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5700 .access
= PL1_RW
, .accessfn
= access_pauth
,
5701 .fieldoffset
= offsetof(CPUARMState
, apga_key
.lo
) },
5702 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5703 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5704 .access
= PL1_RW
, .accessfn
= access_pauth
,
5705 .fieldoffset
= offsetof(CPUARMState
, apga_key
.hi
) },
5706 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5707 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5708 .access
= PL1_RW
, .accessfn
= access_pauth
,
5709 .fieldoffset
= offsetof(CPUARMState
, apia_key
.lo
) },
5710 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5711 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5712 .access
= PL1_RW
, .accessfn
= access_pauth
,
5713 .fieldoffset
= offsetof(CPUARMState
, apia_key
.hi
) },
5714 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5715 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5716 .access
= PL1_RW
, .accessfn
= access_pauth
,
5717 .fieldoffset
= offsetof(CPUARMState
, apib_key
.lo
) },
5718 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5719 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5720 .access
= PL1_RW
, .accessfn
= access_pauth
,
5721 .fieldoffset
= offsetof(CPUARMState
, apib_key
.hi
) },
5726 static CPAccessResult
access_predinv(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5729 int el
= arm_current_el(env
);
5732 uint64_t sctlr
= arm_sctlr(env
, el
);
5733 if (!(sctlr
& SCTLR_EnRCTX
)) {
5734 return CP_ACCESS_TRAP
;
5736 } else if (el
== 1) {
5737 uint64_t hcr
= arm_hcr_el2_eff(env
);
5739 return CP_ACCESS_TRAP_EL2
;
5742 return CP_ACCESS_OK
;
5745 static const ARMCPRegInfo predinv_reginfo
[] = {
5746 { .name
= "CFP_RCTX", .state
= ARM_CP_STATE_AA64
,
5747 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 4,
5748 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5749 { .name
= "DVP_RCTX", .state
= ARM_CP_STATE_AA64
,
5750 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 5,
5751 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5752 { .name
= "CPP_RCTX", .state
= ARM_CP_STATE_AA64
,
5753 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 3, .opc2
= 7,
5754 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5756 * Note the AArch32 opcodes have a different OPC1.
5758 { .name
= "CFPRCTX", .state
= ARM_CP_STATE_AA32
,
5759 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 4,
5760 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5761 { .name
= "DVPRCTX", .state
= ARM_CP_STATE_AA32
,
5762 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 5,
5763 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5764 { .name
= "CPPRCTX", .state
= ARM_CP_STATE_AA32
,
5765 .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 3, .opc2
= 7,
5766 .type
= ARM_CP_NOP
, .access
= PL0_W
, .accessfn
= access_predinv
},
5770 void register_cp_regs_for_features(ARMCPU
*cpu
)
5772 /* Register all the coprocessor registers based on feature bits */
5773 CPUARMState
*env
= &cpu
->env
;
5774 if (arm_feature(env
, ARM_FEATURE_M
)) {
5775 /* M profile has no coprocessor registers */
5779 define_arm_cp_regs(cpu
, cp_reginfo
);
5780 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
5781 /* Must go early as it is full of wildcards that may be
5782 * overridden by later definitions.
5784 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
5787 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5788 /* The ID registers all have impdef reset values */
5789 ARMCPRegInfo v6_idregs
[] = {
5790 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
5791 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5792 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5793 .resetvalue
= cpu
->id_pfr0
},
5794 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5795 * the value of the GIC field until after we define these regs.
5797 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
5798 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
5799 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5800 .readfn
= id_pfr1_read
,
5801 .writefn
= arm_cp_write_ignore
},
5802 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
5803 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
5804 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5805 .resetvalue
= cpu
->id_dfr0
},
5806 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
5807 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
5808 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5809 .resetvalue
= cpu
->id_afr0
},
5810 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
5811 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
5812 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5813 .resetvalue
= cpu
->id_mmfr0
},
5814 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
5815 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
5816 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5817 .resetvalue
= cpu
->id_mmfr1
},
5818 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
5819 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
5820 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5821 .resetvalue
= cpu
->id_mmfr2
},
5822 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
5823 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
5824 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5825 .resetvalue
= cpu
->id_mmfr3
},
5826 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
5827 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5828 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5829 .resetvalue
= cpu
->isar
.id_isar0
},
5830 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
5831 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
5832 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5833 .resetvalue
= cpu
->isar
.id_isar1
},
5834 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
5835 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5836 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5837 .resetvalue
= cpu
->isar
.id_isar2
},
5838 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
5839 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
5840 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5841 .resetvalue
= cpu
->isar
.id_isar3
},
5842 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
5843 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
5844 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5845 .resetvalue
= cpu
->isar
.id_isar4
},
5846 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
5847 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
5848 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5849 .resetvalue
= cpu
->isar
.id_isar5
},
5850 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
5851 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
5852 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5853 .resetvalue
= cpu
->id_mmfr4
},
5854 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
5855 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
5856 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5857 .resetvalue
= cpu
->isar
.id_isar6
},
5860 define_arm_cp_regs(cpu
, v6_idregs
);
5861 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
5863 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
5865 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
5866 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
5868 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
5869 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
5870 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
5872 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
5873 define_arm_cp_regs(cpu
, pmovsset_cp_reginfo
);
5875 if (arm_feature(env
, ARM_FEATURE_V7
)) {
5876 /* v7 performance monitor control register: same implementor
5877 * field as main ID register, and we implement four counters in
5878 * addition to the cycle count register.
5880 unsigned int i
, pmcrn
= 4;
5881 ARMCPRegInfo pmcr
= {
5882 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
5884 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5885 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
5886 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
5887 .raw_writefn
= raw_write
,
5889 ARMCPRegInfo pmcr64
= {
5890 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
5891 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
5892 .access
= PL0_RW
, .accessfn
= pmreg_access
,
5894 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
5895 .resetvalue
= (cpu
->midr
& 0xff000000) | (pmcrn
<< PMCRN_SHIFT
),
5896 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
5898 define_one_arm_cp_reg(cpu
, &pmcr
);
5899 define_one_arm_cp_reg(cpu
, &pmcr64
);
5900 for (i
= 0; i
< pmcrn
; i
++) {
5901 char *pmevcntr_name
= g_strdup_printf("PMEVCNTR%d", i
);
5902 char *pmevcntr_el0_name
= g_strdup_printf("PMEVCNTR%d_EL0", i
);
5903 char *pmevtyper_name
= g_strdup_printf("PMEVTYPER%d", i
);
5904 char *pmevtyper_el0_name
= g_strdup_printf("PMEVTYPER%d_EL0", i
);
5905 ARMCPRegInfo pmev_regs
[] = {
5906 { .name
= pmevcntr_name
, .cp
= 15, .crn
= 14,
5907 .crm
= 8 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5908 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5909 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5910 .accessfn
= pmreg_access
},
5911 { .name
= pmevcntr_el0_name
, .state
= ARM_CP_STATE_AA64
,
5912 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 8 | (3 & (i
>> 3)),
5913 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5915 .readfn
= pmevcntr_readfn
, .writefn
= pmevcntr_writefn
,
5916 .raw_readfn
= pmevcntr_rawread
,
5917 .raw_writefn
= pmevcntr_rawwrite
},
5918 { .name
= pmevtyper_name
, .cp
= 15, .crn
= 14,
5919 .crm
= 12 | (3 & (i
>> 3)), .opc1
= 0, .opc2
= i
& 7,
5920 .access
= PL0_RW
, .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5921 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5922 .accessfn
= pmreg_access
},
5923 { .name
= pmevtyper_el0_name
, .state
= ARM_CP_STATE_AA64
,
5924 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 12 | (3 & (i
>> 3)),
5925 .opc2
= i
& 7, .access
= PL0_RW
, .accessfn
= pmreg_access
,
5927 .readfn
= pmevtyper_readfn
, .writefn
= pmevtyper_writefn
,
5928 .raw_writefn
= pmevtyper_rawwrite
},
5931 define_arm_cp_regs(cpu
, pmev_regs
);
5932 g_free(pmevcntr_name
);
5933 g_free(pmevcntr_el0_name
);
5934 g_free(pmevtyper_name
);
5935 g_free(pmevtyper_el0_name
);
5937 ARMCPRegInfo clidr
= {
5938 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
5939 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
5940 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
5942 define_one_arm_cp_reg(cpu
, &clidr
);
5943 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
5944 define_debug_regs(cpu
);
5946 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
5948 if (FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) >= 4 &&
5949 FIELD_EX32(cpu
->id_dfr0
, ID_DFR0
, PERFMON
) != 0xf) {
5950 ARMCPRegInfo v81_pmu_regs
[] = {
5951 { .name
= "PMCEID2", .state
= ARM_CP_STATE_AA32
,
5952 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 4,
5953 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5954 .resetvalue
= extract64(cpu
->pmceid0
, 32, 32) },
5955 { .name
= "PMCEID3", .state
= ARM_CP_STATE_AA32
,
5956 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 5,
5957 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5958 .resetvalue
= extract64(cpu
->pmceid1
, 32, 32) },
5961 define_arm_cp_regs(cpu
, v81_pmu_regs
);
5963 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5964 /* AArch64 ID registers, which all have impdef reset values.
5965 * Note that within the ID register ranges the unused slots
5966 * must all RAZ, not UNDEF; future architecture versions may
5967 * define new registers here.
5969 ARMCPRegInfo v8_idregs
[] = {
5970 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5971 * know the right value for the GIC field until after we
5972 * define these regs.
5974 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5975 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
5976 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5977 .readfn
= id_aa64pfr0_read
,
5978 .writefn
= arm_cp_write_ignore
},
5979 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5980 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
5981 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5982 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
5983 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5984 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
5985 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5987 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5988 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
5989 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5991 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5992 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
5993 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5994 /* At present, only SVEver == 0 is defined anyway. */
5996 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5997 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
5998 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6000 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6001 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
6002 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6004 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
6006 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6008 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6009 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
6010 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6011 .resetvalue
= cpu
->id_aa64dfr0
},
6012 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6013 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
6014 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6015 .resetvalue
= cpu
->id_aa64dfr1
},
6016 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6017 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
6018 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6020 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6021 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
6022 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6024 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6025 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
6026 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6027 .resetvalue
= cpu
->id_aa64afr0
},
6028 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6029 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
6030 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6031 .resetvalue
= cpu
->id_aa64afr1
},
6032 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6033 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
6034 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6036 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6037 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
6038 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6040 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
6041 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
6042 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6043 .resetvalue
= cpu
->isar
.id_aa64isar0
},
6044 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
6045 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
6046 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6047 .resetvalue
= cpu
->isar
.id_aa64isar1
},
6048 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6049 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
6050 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6052 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6053 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
6054 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6056 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6057 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
6058 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6060 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6061 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
6062 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6064 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6065 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
6066 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6068 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6069 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
6070 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6072 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6073 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
6074 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6075 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
6076 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6077 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
6078 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6079 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
6080 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6081 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
6082 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6084 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6085 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
6086 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6088 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6089 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
6090 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6092 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6093 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
6094 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6096 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6097 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
6098 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6100 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6101 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
6102 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6104 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
6105 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
6106 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6107 .resetvalue
= cpu
->isar
.mvfr0
},
6108 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
6109 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
6110 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6111 .resetvalue
= cpu
->isar
.mvfr1
},
6112 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
6113 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
6114 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6115 .resetvalue
= cpu
->isar
.mvfr2
},
6116 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6117 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
6118 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6120 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6121 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
6122 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6124 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6125 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
6126 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6128 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6129 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
6130 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6132 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
6133 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
6134 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6136 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
6137 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
6138 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6139 .resetvalue
= extract64(cpu
->pmceid0
, 0, 32) },
6140 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
6141 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
6142 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6143 .resetvalue
= cpu
->pmceid0
},
6144 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
6145 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
6146 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6147 .resetvalue
= extract64(cpu
->pmceid1
, 0, 32) },
6148 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
6149 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
6150 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
6151 .resetvalue
= cpu
->pmceid1
},
6154 #ifdef CONFIG_USER_ONLY
6155 ARMCPRegUserSpaceInfo v8_user_idregs
[] = {
6156 { .name
= "ID_AA64PFR0_EL1",
6157 .exported_bits
= 0x000f000f00ff0000,
6158 .fixed_bits
= 0x0000000000000011 },
6159 { .name
= "ID_AA64PFR1_EL1",
6160 .exported_bits
= 0x00000000000000f0 },
6161 { .name
= "ID_AA64PFR*_EL1_RESERVED",
6163 { .name
= "ID_AA64ZFR0_EL1" },
6164 { .name
= "ID_AA64MMFR0_EL1",
6165 .fixed_bits
= 0x00000000ff000000 },
6166 { .name
= "ID_AA64MMFR1_EL1" },
6167 { .name
= "ID_AA64MMFR*_EL1_RESERVED",
6169 { .name
= "ID_AA64DFR0_EL1",
6170 .fixed_bits
= 0x0000000000000006 },
6171 { .name
= "ID_AA64DFR1_EL1" },
6172 { .name
= "ID_AA64DFR*_EL1_RESERVED",
6174 { .name
= "ID_AA64AFR*",
6176 { .name
= "ID_AA64ISAR0_EL1",
6177 .exported_bits
= 0x00fffffff0fffff0 },
6178 { .name
= "ID_AA64ISAR1_EL1",
6179 .exported_bits
= 0x000000f0ffffffff },
6180 { .name
= "ID_AA64ISAR*_EL1_RESERVED",
6182 REGUSERINFO_SENTINEL
6184 modify_arm_cp_regs(v8_idregs
, v8_user_idregs
);
6186 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6187 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
6188 !arm_feature(env
, ARM_FEATURE_EL2
)) {
6189 ARMCPRegInfo rvbar
= {
6190 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6191 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
6192 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
6194 define_one_arm_cp_reg(cpu
, &rvbar
);
6196 define_arm_cp_regs(cpu
, v8_idregs
);
6197 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
6199 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6200 uint64_t vmpidr_def
= mpidr_read_val(env
);
6201 ARMCPRegInfo vpidr_regs
[] = {
6202 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
6203 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6204 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6205 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
6206 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
6207 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6208 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6209 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
6210 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6211 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
6212 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6213 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
6214 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
6215 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
6216 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
6217 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6219 .resetvalue
= vmpidr_def
,
6220 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
6223 define_arm_cp_regs(cpu
, vpidr_regs
);
6224 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
6225 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6226 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
6228 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6229 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
6230 ARMCPRegInfo rvbar
= {
6231 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
6232 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
6233 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
6235 define_one_arm_cp_reg(cpu
, &rvbar
);
6238 /* If EL2 is missing but higher ELs are enabled, we need to
6239 * register the no_el2 reginfos.
6241 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6242 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6243 * of MIDR_EL1 and MPIDR_EL1.
6245 ARMCPRegInfo vpidr_regs
[] = {
6246 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6247 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
6248 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6249 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
6250 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
6251 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
6252 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
6253 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
6254 .type
= ARM_CP_NO_RAW
,
6255 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
6258 define_arm_cp_regs(cpu
, vpidr_regs
);
6259 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
6260 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6261 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
6265 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6266 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
6267 ARMCPRegInfo el3_regs
[] = {
6268 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
6269 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
6270 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
6271 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6272 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
6274 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
6275 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
6276 .resetvalue
= cpu
->reset_sctlr
},
6280 define_arm_cp_regs(cpu
, el3_regs
);
6282 /* The behaviour of NSACR is sufficiently various that we don't
6283 * try to describe it in a single reginfo:
6284 * if EL3 is 64 bit, then trap to EL3 from S EL1,
6285 * reads as constant 0xc00 from NS EL1 and NS EL2
6286 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6287 * if v7 without EL3, register doesn't exist
6288 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6290 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6291 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6292 ARMCPRegInfo nsacr
= {
6293 .name
= "NSACR", .type
= ARM_CP_CONST
,
6294 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6295 .access
= PL1_RW
, .accessfn
= nsacr_access
,
6298 define_one_arm_cp_reg(cpu
, &nsacr
);
6300 ARMCPRegInfo nsacr
= {
6302 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6303 .access
= PL3_RW
| PL1_R
,
6305 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
6307 define_one_arm_cp_reg(cpu
, &nsacr
);
6310 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6311 ARMCPRegInfo nsacr
= {
6312 .name
= "NSACR", .type
= ARM_CP_CONST
,
6313 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
6317 define_one_arm_cp_reg(cpu
, &nsacr
);
6321 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
6322 if (arm_feature(env
, ARM_FEATURE_V6
)) {
6323 /* PMSAv6 not implemented */
6324 assert(arm_feature(env
, ARM_FEATURE_V7
));
6325 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6326 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
6328 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
6331 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
6332 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
6333 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
6334 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
6335 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
6338 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
6339 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
6341 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
6342 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
6344 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
6345 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
6347 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
6348 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
6350 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
6351 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
6353 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
6354 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
6356 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
6357 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
6359 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6360 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
6362 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6363 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
6365 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
6366 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
6368 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
6369 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
6371 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6372 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6373 * be read-only (ie write causes UNDEF exception).
6376 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
6377 /* Pre-v8 MIDR space.
6378 * Note that the MIDR isn't a simple constant register because
6379 * of the TI925 behaviour where writes to another register can
6380 * cause the MIDR value to change.
6382 * Unimplemented registers in the c15 0 0 0 space default to
6383 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6384 * and friends override accordingly.
6387 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
6388 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
6389 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
6390 .readfn
= midr_read
,
6391 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6392 .type
= ARM_CP_OVERRIDE
},
6393 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6395 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
6396 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6398 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
6399 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6401 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
6402 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6404 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
6405 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6407 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
6408 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6411 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
6412 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6413 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
6414 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
6415 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
6416 .readfn
= midr_read
},
6417 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6418 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6419 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6420 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6421 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
6422 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
6423 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
6424 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6425 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
6426 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
6429 ARMCPRegInfo id_cp_reginfo
[] = {
6430 /* These are common to v8 and pre-v8 */
6432 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
6433 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6434 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
6435 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
6436 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
6437 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
6438 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6440 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
6441 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6444 /* TLBTR is specific to VMSA */
6445 ARMCPRegInfo id_tlbtr_reginfo
= {
6447 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
6448 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
6450 /* MPUIR is specific to PMSA V6+ */
6451 ARMCPRegInfo id_mpuir_reginfo
= {
6453 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
6454 .access
= PL1_R
, .type
= ARM_CP_CONST
,
6455 .resetvalue
= cpu
->pmsav7_dregion
<< 8
6457 ARMCPRegInfo crn0_wi_reginfo
= {
6458 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
6459 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
6460 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
6462 #ifdef CONFIG_USER_ONLY
6463 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo
[] = {
6464 { .name
= "MIDR_EL1",
6465 .exported_bits
= 0x00000000ffffffff },
6466 { .name
= "REVIDR_EL1" },
6467 REGUSERINFO_SENTINEL
6469 modify_arm_cp_regs(id_v8_midr_cp_reginfo
, id_v8_user_midr_cp_reginfo
);
6471 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
6472 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
6474 /* Register the blanket "writes ignored" value first to cover the
6475 * whole space. Then update the specific ID registers to allow write
6476 * access, so that they ignore writes rather than causing them to
6479 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
6480 for (r
= id_pre_v8_midr_cp_reginfo
;
6481 r
->type
!= ARM_CP_SENTINEL
; r
++) {
6484 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6487 id_mpuir_reginfo
.access
= PL1_RW
;
6488 id_tlbtr_reginfo
.access
= PL1_RW
;
6490 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6491 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
6493 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
6495 define_arm_cp_regs(cpu
, id_cp_reginfo
);
6496 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
6497 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
6498 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
6499 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
6503 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
6504 ARMCPRegInfo mpidr_cp_reginfo
[] = {
6505 { .name
= "MPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
6506 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
6507 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
6510 #ifdef CONFIG_USER_ONLY
6511 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo
[] = {
6512 { .name
= "MPIDR_EL1",
6513 .fixed_bits
= 0x0000000080000000 },
6514 REGUSERINFO_SENTINEL
6516 modify_arm_cp_regs(mpidr_cp_reginfo
, mpidr_user_cp_reginfo
);
6518 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
6521 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
6522 ARMCPRegInfo auxcr_reginfo
[] = {
6523 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
6524 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
6525 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
6526 .resetvalue
= cpu
->reset_auxcr
},
6527 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
6528 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
6529 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6531 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
6532 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
6533 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
6537 define_arm_cp_regs(cpu
, auxcr_reginfo
);
6538 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6539 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6540 ARMCPRegInfo hactlr2_reginfo
= {
6541 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
6542 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
6543 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
6546 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
6550 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
6551 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6552 /* 32 bit view is [31:18] 0...0 [43:32]. */
6553 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
6554 | extract64(cpu
->reset_cbar
, 32, 12);
6555 ARMCPRegInfo cbar_reginfo
[] = {
6557 .type
= ARM_CP_CONST
,
6558 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6559 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
6560 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
6561 .type
= ARM_CP_CONST
,
6562 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
6563 .access
= PL1_R
, .resetvalue
= cbar32
},
6566 /* We don't implement a r/w 64 bit CBAR currently */
6567 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
6568 define_arm_cp_regs(cpu
, cbar_reginfo
);
6570 ARMCPRegInfo cbar
= {
6572 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
6573 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
6574 .fieldoffset
= offsetof(CPUARMState
,
6575 cp15
.c15_config_base_address
)
6577 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
6578 cbar
.access
= PL1_R
;
6579 cbar
.fieldoffset
= 0;
6580 cbar
.type
= ARM_CP_CONST
;
6582 define_one_arm_cp_reg(cpu
, &cbar
);
6586 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
6587 ARMCPRegInfo vbar_cp_reginfo
[] = {
6588 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
6589 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
6590 .access
= PL1_RW
, .writefn
= vbar_write
,
6591 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
6592 offsetof(CPUARMState
, cp15
.vbar_ns
) },
6596 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
6599 /* Generic registers whose values depend on the implementation */
6601 ARMCPRegInfo sctlr
= {
6602 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
6603 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
6605 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
6606 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
6607 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
6608 .raw_writefn
= raw_write
,
6610 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
6611 /* Normally we would always end the TB on an SCTLR write, but Linux
6612 * arch/arm/mach-pxa/sleep.S expects two instructions following
6613 * an MMU enable to execute from cache. Imitate this behaviour.
6615 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
6617 define_one_arm_cp_reg(cpu
, &sctlr
);
6620 if (cpu_isar_feature(aa64_lor
, cpu
)) {
6622 * A trivial implementation of ARMv8.1-LOR leaves all of these
6623 * registers fixed at 0, which indicates that there are zero
6624 * supported Limited Ordering regions.
6626 static const ARMCPRegInfo lor_reginfo
[] = {
6627 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
6628 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
6629 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6630 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6631 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
6632 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
6633 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6634 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6635 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
6636 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
6637 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6638 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6639 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
6640 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
6641 .access
= PL1_RW
, .accessfn
= access_lor_other
,
6642 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6643 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
6644 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
6645 .access
= PL1_R
, .accessfn
= access_lorid
,
6646 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
6649 define_arm_cp_regs(cpu
, lor_reginfo
);
6652 if (cpu_isar_feature(aa64_sve
, cpu
)) {
6653 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
6654 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
6655 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
6657 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
6659 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6660 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
6664 #ifdef TARGET_AARCH64
6665 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
6666 define_arm_cp_regs(cpu
, pauth_reginfo
);
6671 * While all v8.0 cpus support aarch64, QEMU does have configurations
6672 * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
6673 * which will set ID_ISAR6.
6675 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
6676 ? cpu_isar_feature(aa64_predinv
, cpu
)
6677 : cpu_isar_feature(aa32_predinv
, cpu
)) {
6678 define_arm_cp_regs(cpu
, predinv_reginfo
);
6682 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
6684 CPUState
*cs
= CPU(cpu
);
6685 CPUARMState
*env
= &cpu
->env
;
6687 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
6688 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
6689 aarch64_fpu_gdb_set_reg
,
6690 34, "aarch64-fpu.xml", 0);
6691 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
6692 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6693 51, "arm-neon.xml", 0);
6694 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
6695 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6696 35, "arm-vfp3.xml", 0);
6697 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
6698 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
6699 19, "arm-vfp.xml", 0);
6701 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
6702 arm_gen_dynamic_xml(cs
),
6703 "system-registers.xml", 0);
6706 /* Sort alphabetically by type name, except for "any". */
6707 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
6709 ObjectClass
*class_a
= (ObjectClass
*)a
;
6710 ObjectClass
*class_b
= (ObjectClass
*)b
;
6711 const char *name_a
, *name_b
;
6713 name_a
= object_class_get_name(class_a
);
6714 name_b
= object_class_get_name(class_b
);
6715 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
6717 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
6720 return strcmp(name_a
, name_b
);
6724 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
6726 ObjectClass
*oc
= data
;
6727 CPUListState
*s
= user_data
;
6728 const char *typename
;
6731 typename
= object_class_get_name(oc
);
6732 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6733 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
6738 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
6742 .cpu_fprintf
= cpu_fprintf
,
6746 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6747 list
= g_slist_sort(list
, arm_cpu_list_compare
);
6748 (*cpu_fprintf
)(f
, "Available CPUs:\n");
6749 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
6753 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
6755 ObjectClass
*oc
= data
;
6756 CpuDefinitionInfoList
**cpu_list
= user_data
;
6757 CpuDefinitionInfoList
*entry
;
6758 CpuDefinitionInfo
*info
;
6759 const char *typename
;
6761 typename
= object_class_get_name(oc
);
6762 info
= g_malloc0(sizeof(*info
));
6763 info
->name
= g_strndup(typename
,
6764 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6765 info
->q_typename
= g_strdup(typename
);
6767 entry
= g_malloc0(sizeof(*entry
));
6768 entry
->value
= info
;
6769 entry
->next
= *cpu_list
;
6773 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
6775 CpuDefinitionInfoList
*cpu_list
= NULL
;
6778 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6779 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
6785 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
6786 void *opaque
, int state
, int secstate
,
6787 int crm
, int opc1
, int opc2
,
6790 /* Private utility function for define_one_arm_cp_reg_with_opaque():
6791 * add a single reginfo struct to the hash table.
6793 uint32_t *key
= g_new(uint32_t, 1);
6794 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
6795 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
6796 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
6798 r2
->name
= g_strdup(name
);
6799 /* Reset the secure state to the specific incoming state. This is
6800 * necessary as the register may have been defined with both states.
6802 r2
->secure
= secstate
;
6804 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6805 /* Register is banked (using both entries in array).
6806 * Overwriting fieldoffset as the array is only used to define
6807 * banked registers but later only fieldoffset is used.
6809 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
6812 if (state
== ARM_CP_STATE_AA32
) {
6813 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6814 /* If the register is banked then we don't need to migrate or
6815 * reset the 32-bit instance in certain cases:
6817 * 1) If the register has both 32-bit and 64-bit instances then we
6818 * can count on the 64-bit instance taking care of the
6820 * 2) If ARMv8 is enabled then we can count on a 64-bit version
6821 * taking care of the secure bank. This requires that separate
6822 * 32 and 64-bit definitions are provided.
6824 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
6825 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
6826 r2
->type
|= ARM_CP_ALIAS
;
6828 } else if ((secstate
!= r
->secure
) && !ns
) {
6829 /* The register is not banked so we only want to allow migration of
6830 * the non-secure instance.
6832 r2
->type
|= ARM_CP_ALIAS
;
6835 if (r
->state
== ARM_CP_STATE_BOTH
) {
6836 /* We assume it is a cp15 register if the .cp field is left unset.
6842 #ifdef HOST_WORDS_BIGENDIAN
6843 if (r2
->fieldoffset
) {
6844 r2
->fieldoffset
+= sizeof(uint32_t);
6849 if (state
== ARM_CP_STATE_AA64
) {
6850 /* To allow abbreviation of ARMCPRegInfo
6851 * definitions, we treat cp == 0 as equivalent to
6852 * the value for "standard guest-visible sysreg".
6853 * STATE_BOTH definitions are also always "standard
6854 * sysreg" in their AArch64 view (the .cp value may
6855 * be non-zero for the benefit of the AArch32 view).
6857 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
6858 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
6860 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
6861 r2
->opc0
, opc1
, opc2
);
6863 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
6866 r2
->opaque
= opaque
;
6868 /* reginfo passed to helpers is correct for the actual access,
6869 * and is never ARM_CP_STATE_BOTH:
6872 /* Make sure reginfo passed to helpers for wildcarded regs
6873 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6878 /* By convention, for wildcarded registers only the first
6879 * entry is used for migration; the others are marked as
6880 * ALIAS so we don't try to transfer the register
6881 * multiple times. Special registers (ie NOP/WFI) are
6882 * never migratable and not even raw-accessible.
6884 if ((r
->type
& ARM_CP_SPECIAL
)) {
6885 r2
->type
|= ARM_CP_NO_RAW
;
6887 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
6888 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
6889 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
6890 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
6893 /* Check that raw accesses are either forbidden or handled. Note that
6894 * we can't assert this earlier because the setup of fieldoffset for
6895 * banked registers has to be done first.
6897 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
6898 assert(!raw_accessors_invalid(r2
));
6901 /* Overriding of an existing definition must be explicitly
6904 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
6905 ARMCPRegInfo
*oldreg
;
6906 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
6907 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
6908 fprintf(stderr
, "Register redefined: cp=%d %d bit "
6909 "crn=%d crm=%d opc1=%d opc2=%d, "
6910 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
6911 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
6912 oldreg
->name
, r2
->name
);
6913 g_assert_not_reached();
6916 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
6920 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
6921 const ARMCPRegInfo
*r
, void *opaque
)
6923 /* Define implementations of coprocessor registers.
6924 * We store these in a hashtable because typically
6925 * there are less than 150 registers in a space which
6926 * is 16*16*16*8*8 = 262144 in size.
6927 * Wildcarding is supported for the crm, opc1 and opc2 fields.
6928 * If a register is defined twice then the second definition is
6929 * used, so this can be used to define some generic registers and
6930 * then override them with implementation specific variations.
6931 * At least one of the original and the second definition should
6932 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6933 * against accidental use.
6935 * The state field defines whether the register is to be
6936 * visible in the AArch32 or AArch64 execution state. If the
6937 * state is set to ARM_CP_STATE_BOTH then we synthesise a
6938 * reginfo structure for the AArch32 view, which sees the lower
6939 * 32 bits of the 64 bit register.
6941 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6942 * be wildcarded. AArch64 registers are always considered to be 64
6943 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6944 * the register, if any.
6946 int crm
, opc1
, opc2
, state
;
6947 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
6948 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
6949 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
6950 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
6951 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
6952 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
6953 /* 64 bit registers have only CRm and Opc1 fields */
6954 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
6955 /* op0 only exists in the AArch64 encodings */
6956 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
6957 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6958 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
6959 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6960 * encodes a minimum access level for the register. We roll this
6961 * runtime check into our general permission check code, so check
6962 * here that the reginfo's specified permissions are strict enough
6963 * to encompass the generic architectural permission check.
6965 if (r
->state
!= ARM_CP_STATE_AA32
) {
6969 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
6970 mask
= PL0U_R
| PL1_RW
;
6985 /* unallocated encoding, so not possible */
6993 /* min_EL EL1, secure mode only (we don't check the latter) */
6997 /* broken reginfo with out-of-range opc1 */
7001 /* assert our permissions are not too lax (stricter is fine) */
7002 assert((r
->access
& ~mask
) == 0);
7005 /* Check that the register definition has enough info to handle
7006 * reads and writes if they are permitted.
7008 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
7009 if (r
->access
& PL3_R
) {
7010 assert((r
->fieldoffset
||
7011 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
7014 if (r
->access
& PL3_W
) {
7015 assert((r
->fieldoffset
||
7016 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
7020 /* Bad type field probably means missing sentinel at end of reg list */
7021 assert(cptype_valid(r
->type
));
7022 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
7023 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
7024 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
7025 for (state
= ARM_CP_STATE_AA32
;
7026 state
<= ARM_CP_STATE_AA64
; state
++) {
7027 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
7030 if (state
== ARM_CP_STATE_AA32
) {
7031 /* Under AArch32 CP registers can be common
7032 * (same for secure and non-secure world) or banked.
7036 switch (r
->secure
) {
7037 case ARM_CP_SECSTATE_S
:
7038 case ARM_CP_SECSTATE_NS
:
7039 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7040 r
->secure
, crm
, opc1
, opc2
,
7044 name
= g_strdup_printf("%s_S", r
->name
);
7045 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7047 crm
, opc1
, opc2
, name
);
7049 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7051 crm
, opc1
, opc2
, r
->name
);
7055 /* AArch64 registers get mapped to non-secure instance
7057 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
7059 crm
, opc1
, opc2
, r
->name
);
7067 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
7068 const ARMCPRegInfo
*regs
, void *opaque
)
7070 /* Define a whole list of registers */
7071 const ARMCPRegInfo
*r
;
7072 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7073 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
7078 * Modify ARMCPRegInfo for access from userspace.
7080 * This is a data driven modification directed by
7081 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7082 * user-space cannot alter any values and dynamic values pertaining to
7083 * execution state are hidden from user space view anyway.
7085 void modify_arm_cp_regs(ARMCPRegInfo
*regs
, const ARMCPRegUserSpaceInfo
*mods
)
7087 const ARMCPRegUserSpaceInfo
*m
;
7090 for (m
= mods
; m
->name
; m
++) {
7091 GPatternSpec
*pat
= NULL
;
7093 pat
= g_pattern_spec_new(m
->name
);
7095 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
7096 if (pat
&& g_pattern_match_string(pat
, r
->name
)) {
7097 r
->type
= ARM_CP_CONST
;
7101 } else if (strcmp(r
->name
, m
->name
) == 0) {
7102 r
->type
= ARM_CP_CONST
;
7104 r
->resetvalue
&= m
->exported_bits
;
7105 r
->resetvalue
|= m
->fixed_bits
;
7110 g_pattern_spec_free(pat
);
7115 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
7117 return g_hash_table_lookup(cpregs
, &encoded_cp
);
7120 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
7123 /* Helper coprocessor write function for write-ignore registers */
7126 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
7128 /* Helper coprocessor write function for read-as-zero registers */
7132 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
7134 /* Helper coprocessor reset function for do-nothing-on-reset registers */
7137 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
7139 /* Return true if it is not valid for us to switch to
7140 * this CPU mode (ie all the UNPREDICTABLE cases in
7141 * the ARM ARM CPSRWriteByInstr pseudocode).
7144 /* Changes to or from Hyp via MSR and CPS are illegal. */
7145 if (write_type
== CPSRWriteByInstr
&&
7146 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
7147 mode
== ARM_CPU_MODE_HYP
)) {
7152 case ARM_CPU_MODE_USR
:
7154 case ARM_CPU_MODE_SYS
:
7155 case ARM_CPU_MODE_SVC
:
7156 case ARM_CPU_MODE_ABT
:
7157 case ARM_CPU_MODE_UND
:
7158 case ARM_CPU_MODE_IRQ
:
7159 case ARM_CPU_MODE_FIQ
:
7160 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7161 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7163 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7164 * and CPS are treated as illegal mode changes.
7166 if (write_type
== CPSRWriteByInstr
&&
7167 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
7168 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
7172 case ARM_CPU_MODE_HYP
:
7173 return !arm_feature(env
, ARM_FEATURE_EL2
)
7174 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
7175 case ARM_CPU_MODE_MON
:
7176 return arm_current_el(env
) < 3;
7182 uint32_t cpsr_read(CPUARMState
*env
)
7185 ZF
= (env
->ZF
== 0);
7186 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
7187 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
7188 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
7189 | ((env
->condexec_bits
& 0xfc) << 8)
7190 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
7193 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
7194 CPSRWriteType write_type
)
7196 uint32_t changed_daif
;
7198 if (mask
& CPSR_NZCV
) {
7199 env
->ZF
= (~val
) & CPSR_Z
;
7201 env
->CF
= (val
>> 29) & 1;
7202 env
->VF
= (val
<< 3) & 0x80000000;
7205 env
->QF
= ((val
& CPSR_Q
) != 0);
7207 env
->thumb
= ((val
& CPSR_T
) != 0);
7208 if (mask
& CPSR_IT_0_1
) {
7209 env
->condexec_bits
&= ~3;
7210 env
->condexec_bits
|= (val
>> 25) & 3;
7212 if (mask
& CPSR_IT_2_7
) {
7213 env
->condexec_bits
&= 3;
7214 env
->condexec_bits
|= (val
>> 8) & 0xfc;
7216 if (mask
& CPSR_GE
) {
7217 env
->GE
= (val
>> 16) & 0xf;
7220 /* In a V7 implementation that includes the security extensions but does
7221 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7222 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7223 * bits respectively.
7225 * In a V8 implementation, it is permitted for privileged software to
7226 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7228 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
7229 arm_feature(env
, ARM_FEATURE_EL3
) &&
7230 !arm_feature(env
, ARM_FEATURE_EL2
) &&
7231 !arm_is_secure(env
)) {
7233 changed_daif
= (env
->daif
^ val
) & mask
;
7235 if (changed_daif
& CPSR_A
) {
7236 /* Check to see if we are allowed to change the masking of async
7237 * abort exceptions from a non-secure state.
7239 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
7240 qemu_log_mask(LOG_GUEST_ERROR
,
7241 "Ignoring attempt to switch CPSR_A flag from "
7242 "non-secure world with SCR.AW bit clear\n");
7247 if (changed_daif
& CPSR_F
) {
7248 /* Check to see if we are allowed to change the masking of FIQ
7249 * exceptions from a non-secure state.
7251 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
7252 qemu_log_mask(LOG_GUEST_ERROR
,
7253 "Ignoring attempt to switch CPSR_F flag from "
7254 "non-secure world with SCR.FW bit clear\n");
7258 /* Check whether non-maskable FIQ (NMFI) support is enabled.
7259 * If this bit is set software is not allowed to mask
7260 * FIQs, but is allowed to set CPSR_F to 0.
7262 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
7264 qemu_log_mask(LOG_GUEST_ERROR
,
7265 "Ignoring attempt to enable CPSR_F flag "
7266 "(non-maskable FIQ [NMFI] support enabled)\n");
7272 env
->daif
&= ~(CPSR_AIF
& mask
);
7273 env
->daif
|= val
& CPSR_AIF
& mask
;
7275 if (write_type
!= CPSRWriteRaw
&&
7276 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
7277 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
7278 /* Note that we can only get here in USR mode if this is a
7279 * gdb stub write; for this case we follow the architectural
7280 * behaviour for guest writes in USR mode of ignoring an attempt
7281 * to switch mode. (Those are caught by translate.c for writes
7282 * triggered by guest instructions.)
7285 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
7286 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7287 * v7, and has defined behaviour in v8:
7288 * + leave CPSR.M untouched
7289 * + allow changes to the other CPSR fields
7291 * For user changes via the GDB stub, we don't set PSTATE.IL,
7292 * as this would be unnecessarily harsh for a user error.
7295 if (write_type
!= CPSRWriteByGDBStub
&&
7296 arm_feature(env
, ARM_FEATURE_V8
)) {
7300 qemu_log_mask(LOG_GUEST_ERROR
,
7301 "Illegal AArch32 mode switch attempt from %s to %s\n",
7302 aarch32_mode_name(env
->uncached_cpsr
),
7303 aarch32_mode_name(val
));
7305 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
7306 write_type
== CPSRWriteExceptionReturn
?
7307 "Exception return from AArch32" :
7308 "AArch32 mode switch from",
7309 aarch32_mode_name(env
->uncached_cpsr
),
7310 aarch32_mode_name(val
), env
->regs
[15]);
7311 switch_mode(env
, val
& CPSR_M
);
7314 mask
&= ~CACHED_CPSR_BITS
;
7315 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
7318 /* Sign/zero extend */
7319 uint32_t HELPER(sxtb16
)(uint32_t x
)
7322 res
= (uint16_t)(int8_t)x
;
7323 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
7327 uint32_t HELPER(uxtb16
)(uint32_t x
)
7330 res
= (uint16_t)(uint8_t)x
;
7331 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
7335 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
7339 if (num
== INT_MIN
&& den
== -1)
7344 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
7351 uint32_t HELPER(rbit
)(uint32_t x
)
7356 #ifdef CONFIG_USER_ONLY
7358 /* These should probably raise undefined insn exceptions. */
7359 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
7361 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7363 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
7366 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
7368 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7370 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
7374 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7376 /* translate.c should never generate calls here in user-only mode */
7377 g_assert_not_reached();
7380 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7382 /* translate.c should never generate calls here in user-only mode */
7383 g_assert_not_reached();
7386 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
7388 /* The TT instructions can be used by unprivileged code, but in
7389 * user-only emulation we don't have the MPU.
7390 * Luckily since we know we are NonSecure unprivileged (and that in
7391 * turn means that the A flag wasn't specified), all the bits in the
7392 * register must be zero:
7393 * IREGION: 0 because IRVALID is 0
7394 * IRVALID: 0 because NS
7396 * NSRW: 0 because NS
7398 * RW: 0 because unpriv and A flag not set
7399 * R: 0 because unpriv and A flag not set
7400 * SRVALID: 0 because NS
7401 * MRVALID: 0 because unpriv and A flag not set
7402 * SREGION: 0 becaus SRVALID is 0
7403 * MREGION: 0 because MRVALID is 0
7408 static void switch_mode(CPUARMState
*env
, int mode
)
7410 ARMCPU
*cpu
= arm_env_get_cpu(env
);
7412 if (mode
!= ARM_CPU_MODE_USR
) {
7413 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
7417 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7418 uint32_t cur_el
, bool secure
)
7423 void aarch64_sync_64_to_32(CPUARMState
*env
)
7425 g_assert_not_reached();
7430 static void switch_mode(CPUARMState
*env
, int mode
)
7435 old_mode
= env
->uncached_cpsr
& CPSR_M
;
7436 if (mode
== old_mode
)
7439 if (old_mode
== ARM_CPU_MODE_FIQ
) {
7440 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7441 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
7442 } else if (mode
== ARM_CPU_MODE_FIQ
) {
7443 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
7444 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
7447 i
= bank_number(old_mode
);
7448 env
->banked_r13
[i
] = env
->regs
[13];
7449 env
->banked_spsr
[i
] = env
->spsr
;
7451 i
= bank_number(mode
);
7452 env
->regs
[13] = env
->banked_r13
[i
];
7453 env
->spsr
= env
->banked_spsr
[i
];
7455 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
7456 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
7459 /* Physical Interrupt Target EL Lookup Table
7461 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7463 * The below multi-dimensional table is used for looking up the target
7464 * exception level given numerous condition criteria. Specifically, the
7465 * target EL is based on SCR and HCR routing controls as well as the
7466 * currently executing EL and secure state.
7469 * target_el_table[2][2][2][2][2][4]
7470 * | | | | | +--- Current EL
7471 * | | | | +------ Non-secure(0)/Secure(1)
7472 * | | | +--------- HCR mask override
7473 * | | +------------ SCR exec state control
7474 * | +--------------- SCR mask override
7475 * +------------------ 32-bit(0)/64-bit(1) EL3
7477 * The table values are as such:
7481 * The ARM ARM target EL table includes entries indicating that an "exception
7482 * is not taken". The two cases where this is applicable are:
7483 * 1) An exception is taken from EL3 but the SCR does not have the exception
7485 * 2) An exception is taken from EL2 but the HCR does not have the exception
7487 * In these two cases, the below table contain a target of EL1. This value is
7488 * returned as it is expected that the consumer of the table data will check
7489 * for "target EL >= current EL" to ensure the exception is not taken.
7493 * BIT IRQ IMO Non-secure Secure
7494 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
7496 static const int8_t target_el_table
[2][2][2][2][2][4] = {
7497 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7498 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
7499 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
7500 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
7501 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7502 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
7503 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
7504 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
7505 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
7506 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
7507 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
7508 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
7509 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7510 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
7511 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
7512 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
7516 * Determine the target EL for physical exceptions
7518 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
7519 uint32_t cur_el
, bool secure
)
7521 CPUARMState
*env
= cs
->env_ptr
;
7526 /* Is the highest EL AArch64? */
7527 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
7530 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
7531 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
7533 /* Either EL2 is the highest EL (and so the EL2 register width
7534 * is given by is64); or there is no EL2 or EL3, in which case
7535 * the value of 'rw' does not affect the table lookup anyway.
7540 hcr_el2
= arm_hcr_el2_eff(env
);
7543 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
7544 hcr
= hcr_el2
& HCR_IMO
;
7547 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
7548 hcr
= hcr_el2
& HCR_FMO
;
7551 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
7552 hcr
= hcr_el2
& HCR_AMO
;
7556 /* Perform a table-lookup for the target EL given the current state */
7557 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
7559 assert(target_el
> 0);
7564 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
7565 ARMMMUIdx mmu_idx
, bool ignfault
)
7567 CPUState
*cs
= CPU(cpu
);
7568 CPUARMState
*env
= &cpu
->env
;
7569 MemTxAttrs attrs
= {};
7571 target_ulong page_size
;
7574 ARMMMUFaultInfo fi
= {};
7575 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7579 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
7580 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7581 /* MPU/SAU lookup failed */
7582 if (fi
.type
== ARMFault_QEMU_SFault
) {
7583 qemu_log_mask(CPU_LOG_INT
,
7584 "...SecureFault with SFSR.AUVIOL during stacking\n");
7585 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7586 env
->v7m
.sfar
= addr
;
7587 exc
= ARMV7M_EXCP_SECURE
;
7590 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
7591 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
7592 exc
= ARMV7M_EXCP_MEM
;
7593 exc_secure
= secure
;
7597 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
7599 if (txres
!= MEMTX_OK
) {
7600 /* BusFault trying to write the data */
7601 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
7602 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
7603 exc
= ARMV7M_EXCP_BUS
;
7610 /* By pending the exception at this point we are making
7611 * the IMPDEF choice "overridden exceptions pended" (see the
7612 * MergeExcInfo() pseudocode). The other choice would be to not
7613 * pend them now and then make a choice about which to throw away
7614 * later if we have two derived exceptions.
7615 * The only case when we must not pend the exception but instead
7616 * throw it away is if we are doing the push of the callee registers
7617 * and we've already generated a derived exception. Even in this
7618 * case we will still update the fault status registers.
7621 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
7626 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
7629 CPUState
*cs
= CPU(cpu
);
7630 CPUARMState
*env
= &cpu
->env
;
7631 MemTxAttrs attrs
= {};
7633 target_ulong page_size
;
7636 ARMMMUFaultInfo fi
= {};
7637 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
7642 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
7643 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7644 /* MPU/SAU lookup failed */
7645 if (fi
.type
== ARMFault_QEMU_SFault
) {
7646 qemu_log_mask(CPU_LOG_INT
,
7647 "...SecureFault with SFSR.AUVIOL during unstack\n");
7648 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
7649 env
->v7m
.sfar
= addr
;
7650 exc
= ARMV7M_EXCP_SECURE
;
7653 qemu_log_mask(CPU_LOG_INT
,
7654 "...MemManageFault with CFSR.MUNSTKERR\n");
7655 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
7656 exc
= ARMV7M_EXCP_MEM
;
7657 exc_secure
= secure
;
7662 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
7664 if (txres
!= MEMTX_OK
) {
7665 /* BusFault trying to read the data */
7666 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
7667 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
7668 exc
= ARMV7M_EXCP_BUS
;
7677 /* By pending the exception at this point we are making
7678 * the IMPDEF choice "overridden exceptions pended" (see the
7679 * MergeExcInfo() pseudocode). The other choice would be to not
7680 * pend them now and then make a choice about which to throw away
7681 * later if we have two derived exceptions.
7683 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
7687 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
7688 * This may change the current stack pointer between Main and Process
7689 * stack pointers if it is done for the CONTROL register for the current
7692 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
7696 bool old_is_psp
= v7m_using_psp(env
);
7698 env
->v7m
.control
[secstate
] =
7699 deposit32(env
->v7m
.control
[secstate
],
7700 R_V7M_CONTROL_SPSEL_SHIFT
,
7701 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
7703 if (secstate
== env
->v7m
.secure
) {
7704 bool new_is_psp
= v7m_using_psp(env
);
7707 if (old_is_psp
!= new_is_psp
) {
7708 tmp
= env
->v7m
.other_sp
;
7709 env
->v7m
.other_sp
= env
->regs
[13];
7710 env
->regs
[13] = tmp
;
7715 /* Write to v7M CONTROL.SPSEL bit. This may change the current
7716 * stack pointer between Main and Process stack pointers.
7718 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
7720 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
7723 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
7725 /* Write a new value to v7m.exception, thus transitioning into or out
7726 * of Handler mode; this may result in a change of active stack pointer.
7728 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
7731 env
->v7m
.exception
= new_exc
;
7733 new_is_psp
= v7m_using_psp(env
);
7735 if (old_is_psp
!= new_is_psp
) {
7736 tmp
= env
->v7m
.other_sp
;
7737 env
->v7m
.other_sp
= env
->regs
[13];
7738 env
->regs
[13] = tmp
;
7742 /* Switch M profile security state between NS and S */
7743 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
7745 uint32_t new_ss_msp
, new_ss_psp
;
7747 if (env
->v7m
.secure
== new_secstate
) {
7751 /* All the banked state is accessed by looking at env->v7m.secure
7752 * except for the stack pointer; rearrange the SP appropriately.
7754 new_ss_msp
= env
->v7m
.other_ss_msp
;
7755 new_ss_psp
= env
->v7m
.other_ss_psp
;
7757 if (v7m_using_psp(env
)) {
7758 env
->v7m
.other_ss_psp
= env
->regs
[13];
7759 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
7761 env
->v7m
.other_ss_msp
= env
->regs
[13];
7762 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
7765 env
->v7m
.secure
= new_secstate
;
7767 if (v7m_using_psp(env
)) {
7768 env
->regs
[13] = new_ss_psp
;
7769 env
->v7m
.other_sp
= new_ss_msp
;
7771 env
->regs
[13] = new_ss_msp
;
7772 env
->v7m
.other_sp
= new_ss_psp
;
7776 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7779 * - if the return value is a magic value, do exception return (like BX)
7780 * - otherwise bit 0 of the return value is the target security state
7784 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7785 /* Covers FNC_RETURN and EXC_RETURN magic */
7786 min_magic
= FNC_RETURN_MIN_MAGIC
;
7788 /* EXC_RETURN magic only */
7789 min_magic
= EXC_RETURN_MIN_MAGIC
;
7792 if (dest
>= min_magic
) {
7793 /* This is an exception return magic value; put it where
7794 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7795 * Note that if we ever add gen_ss_advance() singlestep support to
7796 * M profile this should count as an "instruction execution complete"
7797 * event (compare gen_bx_excret_final_code()).
7799 env
->regs
[15] = dest
& ~1;
7800 env
->thumb
= dest
& 1;
7801 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
7805 /* translate.c should have made BXNS UNDEF unless we're secure */
7806 assert(env
->v7m
.secure
);
7808 switch_v7m_security_state(env
, dest
& 1);
7810 env
->regs
[15] = dest
& ~1;
7813 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7815 /* Handle v7M BLXNS:
7816 * - bit 0 of the destination address is the target security state
7819 /* At this point regs[15] is the address just after the BLXNS */
7820 uint32_t nextinst
= env
->regs
[15] | 1;
7821 uint32_t sp
= env
->regs
[13] - 8;
7824 /* translate.c will have made BLXNS UNDEF unless we're secure */
7825 assert(env
->v7m
.secure
);
7828 /* target is Secure, so this is just a normal BLX,
7829 * except that the low bit doesn't indicate Thumb/not.
7831 env
->regs
[14] = nextinst
;
7833 env
->regs
[15] = dest
& ~1;
7837 /* Target is non-secure: first push a stack frame */
7838 if (!QEMU_IS_ALIGNED(sp
, 8)) {
7839 qemu_log_mask(LOG_GUEST_ERROR
,
7840 "BLXNS with misaligned SP is UNPREDICTABLE\n");
7843 if (sp
< v7m_sp_limit(env
)) {
7844 raise_exception(env
, EXCP_STKOF
, 0, 1);
7847 saved_psr
= env
->v7m
.exception
;
7848 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
7849 saved_psr
|= XPSR_SFPA
;
7852 /* Note that these stores can throw exceptions on MPU faults */
7853 cpu_stl_data(env
, sp
, nextinst
);
7854 cpu_stl_data(env
, sp
+ 4, saved_psr
);
7857 env
->regs
[14] = 0xfeffffff;
7858 if (arm_v7m_is_handler_mode(env
)) {
7859 /* Write a dummy value to IPSR, to avoid leaking the current secure
7860 * exception number to non-secure code. This is guaranteed not
7861 * to cause write_v7m_exception() to actually change stacks.
7863 write_v7m_exception(env
, 1);
7865 switch_v7m_security_state(env
, 0);
7867 env
->regs
[15] = dest
;
7870 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
7873 /* Return a pointer to the location where we currently store the
7874 * stack pointer for the requested security state and thread mode.
7875 * This pointer will become invalid if the CPU state is updated
7876 * such that the stack pointers are switched around (eg changing
7877 * the SPSEL control bit).
7878 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
7879 * Unlike that pseudocode, we require the caller to pass us in the
7880 * SPSEL control bit value; this is because we also use this
7881 * function in handling of pushing of the callee-saves registers
7882 * part of the v8M stack frame (pseudocode PushCalleeStack()),
7883 * and in the tailchain codepath the SPSEL bit comes from the exception
7884 * return magic LR value from the previous exception. The pseudocode
7885 * opencodes the stack-selection in PushCalleeStack(), but we prefer
7886 * to make this utility function generic enough to do the job.
7888 bool want_psp
= threadmode
&& spsel
;
7890 if (secure
== env
->v7m
.secure
) {
7891 if (want_psp
== v7m_using_psp(env
)) {
7892 return &env
->regs
[13];
7894 return &env
->v7m
.other_sp
;
7898 return &env
->v7m
.other_ss_psp
;
7900 return &env
->v7m
.other_ss_msp
;
7905 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
7908 CPUState
*cs
= CPU(cpu
);
7909 CPUARMState
*env
= &cpu
->env
;
7911 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
7912 uint32_t vector_entry
;
7913 MemTxAttrs attrs
= {};
7917 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
7919 /* We don't do a get_phys_addr() here because the rules for vector
7920 * loads are special: they always use the default memory map, and
7921 * the default memory map permits reads from all addresses.
7922 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
7923 * that we want this special case which would always say "yes",
7924 * we just do the SAU lookup here followed by a direct physical load.
7926 attrs
.secure
= targets_secure
;
7929 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7930 V8M_SAttributes sattrs
= {};
7932 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
7934 attrs
.secure
= false;
7935 } else if (!targets_secure
) {
7936 /* NS access to S memory */
7941 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
7943 if (result
!= MEMTX_OK
) {
7946 *pvec
= vector_entry
;
7950 /* All vector table fetch fails are reported as HardFault, with
7951 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
7952 * technically the underlying exception is a MemManage or BusFault
7953 * that is escalated to HardFault.) This is a terminal exception,
7954 * so we will either take the HardFault immediately or else enter
7955 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
7957 exc_secure
= targets_secure
||
7958 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
7959 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
7960 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
7964 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7967 /* For v8M, push the callee-saves register part of the stack frame.
7968 * Compare the v8M pseudocode PushCalleeStack().
7969 * In the tailchaining case this may not be the current stack.
7971 CPUARMState
*env
= &cpu
->env
;
7972 uint32_t *frame_sp_p
;
7980 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
7981 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
7984 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
7985 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
7986 lr
& R_V7M_EXCRET_SPSEL_MASK
);
7987 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
7989 limit
= env
->v7m
.psplim
[M_REG_S
];
7991 limit
= env
->v7m
.msplim
[M_REG_S
];
7994 mmu_idx
= arm_mmu_idx(env
);
7995 frame_sp_p
= &env
->regs
[13];
7996 limit
= v7m_sp_limit(env
);
7999 frameptr
= *frame_sp_p
- 0x28;
8000 if (frameptr
< limit
) {
8002 * Stack limit failure: set SP to the limit value, and generate
8003 * STKOF UsageFault. Stack pushes below the limit must not be
8004 * performed. It is IMPDEF whether pushes above the limit are
8005 * performed; we choose not to.
8007 qemu_log_mask(CPU_LOG_INT
,
8008 "...STKOF during callee-saves register stacking\n");
8009 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8010 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8012 *frame_sp_p
= limit
;
8016 /* Write as much of the stack frame as we can. A write failure may
8017 * cause us to pend a derived exception.
8020 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
8021 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
8023 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
8025 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
8027 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
8029 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
8031 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
8033 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
8035 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
8038 /* Update SP regardless of whether any of the stack accesses failed. */
8039 *frame_sp_p
= frameptr
;
8044 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
8045 bool ignore_stackfaults
)
8047 /* Do the "take the exception" parts of exception entry,
8048 * but not the pushing of state to the stack. This is
8049 * similar to the pseudocode ExceptionTaken() function.
8051 CPUARMState
*env
= &cpu
->env
;
8053 bool targets_secure
;
8055 bool push_failed
= false;
8057 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
8058 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
8059 targets_secure
? "secure" : "nonsecure", exc
);
8061 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8062 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8063 (lr
& R_V7M_EXCRET_S_MASK
)) {
8064 /* The background code (the owner of the registers in the
8065 * exception frame) is Secure. This means it may either already
8066 * have or now needs to push callee-saves registers.
8068 if (targets_secure
) {
8069 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
8070 /* We took an exception from Secure to NonSecure
8071 * (which means the callee-saved registers got stacked)
8072 * and are now tailchaining to a Secure exception.
8073 * Clear DCRS so eventual return from this Secure
8074 * exception unstacks the callee-saved registers.
8076 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
8079 /* We're going to a non-secure exception; push the
8080 * callee-saves registers to the stack now, if they're
8081 * not already saved.
8083 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
8084 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
8085 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
8086 ignore_stackfaults
);
8088 lr
|= R_V7M_EXCRET_DCRS_MASK
;
8092 lr
&= ~R_V7M_EXCRET_ES_MASK
;
8093 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8094 lr
|= R_V7M_EXCRET_ES_MASK
;
8096 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
8097 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
8098 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8101 /* Clear registers if necessary to prevent non-secure exception
8102 * code being able to see register values from secure code.
8103 * Where register values become architecturally UNKNOWN we leave
8104 * them with their previous values.
8106 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8107 if (!targets_secure
) {
8108 /* Always clear the caller-saved registers (they have been
8109 * pushed to the stack earlier in v7m_push_stack()).
8110 * Clear callee-saved registers if the background code is
8111 * Secure (in which case these regs were saved in
8112 * v7m_push_callee_stack()).
8116 for (i
= 0; i
< 13; i
++) {
8117 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
8118 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
8123 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
8128 if (push_failed
&& !ignore_stackfaults
) {
8129 /* Derived exception on callee-saves register stacking:
8130 * we might now want to take a different exception which
8131 * targets a different security state, so try again from the top.
8133 qemu_log_mask(CPU_LOG_INT
,
8134 "...derived exception on callee-saves register stacking");
8135 v7m_exception_taken(cpu
, lr
, true, true);
8139 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
8140 /* Vector load failed: derived exception */
8141 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
8142 v7m_exception_taken(cpu
, lr
, true, true);
8146 /* Now we've done everything that might cause a derived exception
8147 * we can go ahead and activate whichever exception we're going to
8148 * take (which might now be the derived exception).
8150 armv7m_nvic_acknowledge_irq(env
->nvic
);
8152 /* Switch to target security state -- must do this before writing SPSEL */
8153 switch_v7m_security_state(env
, targets_secure
);
8154 write_v7m_control_spsel(env
, 0);
8155 arm_clear_exclusive(env
);
8157 env
->condexec_bits
= 0;
8159 env
->regs
[15] = addr
& 0xfffffffe;
8160 env
->thumb
= addr
& 1;
8163 static bool v7m_push_stack(ARMCPU
*cpu
)
8165 /* Do the "set up stack frame" part of exception entry,
8166 * similar to pseudocode PushStack().
8167 * Return true if we generate a derived exception (and so
8168 * should ignore further stack faults trying to process
8169 * that derived exception.)
8172 CPUARMState
*env
= &cpu
->env
;
8173 uint32_t xpsr
= xpsr_read(env
);
8174 uint32_t frameptr
= env
->regs
[13];
8175 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
8177 /* Align stack pointer if the guest wants that */
8178 if ((frameptr
& 4) &&
8179 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
8181 xpsr
|= XPSR_SPREALIGN
;
8186 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8187 uint32_t limit
= v7m_sp_limit(env
);
8189 if (frameptr
< limit
) {
8191 * Stack limit failure: set SP to the limit value, and generate
8192 * STKOF UsageFault. Stack pushes below the limit must not be
8193 * performed. It is IMPDEF whether pushes above the limit are
8194 * performed; we choose not to.
8196 qemu_log_mask(CPU_LOG_INT
,
8197 "...STKOF during stacking\n");
8198 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8199 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8201 env
->regs
[13] = limit
;
8206 /* Write as much of the stack frame as we can. If we fail a stack
8207 * write this will result in a derived exception being pended
8208 * (which may be taken in preference to the one we started with
8209 * if it has higher priority).
8212 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
8213 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
8214 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
8215 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
8216 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
8217 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
8218 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
8219 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
8221 /* Update SP regardless of whether any of the stack accesses failed. */
8222 env
->regs
[13] = frameptr
;
8227 static void do_v7m_exception_exit(ARMCPU
*cpu
)
8229 CPUARMState
*env
= &cpu
->env
;
8232 bool ufault
= false;
8233 bool sfault
= false;
8234 bool return_to_sp_process
;
8235 bool return_to_handler
;
8236 bool rettobase
= false;
8237 bool exc_secure
= false;
8238 bool return_to_secure
;
8240 /* If we're not in Handler mode then jumps to magic exception-exit
8241 * addresses don't have magic behaviour. However for the v8M
8242 * security extensions the magic secure-function-return has to
8243 * work in thread mode too, so to avoid doing an extra check in
8244 * the generated code we allow exception-exit magic to also cause the
8245 * internal exception and bring us here in thread mode. Correct code
8246 * will never try to do this (the following insn fetch will always
8247 * fault) so we the overhead of having taken an unnecessary exception
8250 if (!arm_v7m_is_handler_mode(env
)) {
8254 /* In the spec pseudocode ExceptionReturn() is called directly
8255 * from BXWritePC() and gets the full target PC value including
8256 * bit zero. In QEMU's implementation we treat it as a normal
8257 * jump-to-register (which is then caught later on), and so split
8258 * the target value up between env->regs[15] and env->thumb in
8259 * gen_bx(). Reconstitute it.
8261 excret
= env
->regs
[15];
8266 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
8267 " previous exception %d\n",
8268 excret
, env
->v7m
.exception
);
8270 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
8271 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
8272 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
8276 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8277 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
8278 * we pick which FAULTMASK to clear.
8280 if (!env
->v7m
.secure
&&
8281 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
8282 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
8284 /* For all other purposes, treat ES as 0 (R_HXSR) */
8285 excret
&= ~R_V7M_EXCRET_ES_MASK
;
8287 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
8290 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
8291 /* Auto-clear FAULTMASK on return from other than NMI.
8292 * If the security extension is implemented then this only
8293 * happens if the raw execution priority is >= 0; the
8294 * value of the ES bit in the exception return value indicates
8295 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
8297 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8298 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
8299 env
->v7m
.faultmask
[exc_secure
] = 0;
8302 env
->v7m
.faultmask
[M_REG_NS
] = 0;
8306 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
8309 /* attempt to exit an exception that isn't active */
8313 /* still an irq active now */
8316 /* we returned to base exception level, no nesting.
8317 * (In the pseudocode this is written using "NestedActivation != 1"
8318 * where we have 'rettobase == false'.)
8323 g_assert_not_reached();
8326 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
8327 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
8328 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
8329 (excret
& R_V7M_EXCRET_S_MASK
);
8331 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8332 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
8333 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
8334 * we choose to take the UsageFault.
8336 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
8337 (excret
& R_V7M_EXCRET_ES_MASK
) ||
8338 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
8342 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
8346 /* For v7M we only recognize certain combinations of the low bits */
8347 switch (excret
& 0xf) {
8348 case 1: /* Return to Handler */
8350 case 13: /* Return to Thread using Process stack */
8351 case 9: /* Return to Thread using Main stack */
8352 /* We only need to check NONBASETHRDENA for v7M, because in
8353 * v8M this bit does not exist (it is RES1).
8356 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
8357 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
8367 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
8368 * Handler mode (and will be until we write the new XPSR.Interrupt
8369 * field) this does not switch around the current stack pointer.
8370 * We must do this before we do any kind of tailchaining, including
8371 * for the derived exceptions on integrity check failures, or we will
8372 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
8374 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
8377 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
8378 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8379 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8380 "stackframe: failed EXC_RETURN.ES validity check\n");
8381 v7m_exception_taken(cpu
, excret
, true, false);
8386 /* Bad exception return: instead of popping the exception
8387 * stack, directly take a usage fault on the current stack.
8389 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8390 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8391 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8392 "stackframe: failed exception return integrity check\n");
8393 v7m_exception_taken(cpu
, excret
, true, false);
8398 * Tailchaining: if there is currently a pending exception that
8399 * is high enough priority to preempt execution at the level we're
8400 * about to return to, then just directly take that exception now,
8401 * avoiding an unstack-and-then-stack. Note that now we have
8402 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
8403 * our current execution priority is already the execution priority we are
8404 * returning to -- none of the state we would unstack or set based on
8405 * the EXCRET value affects it.
8407 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
8408 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
8409 v7m_exception_taken(cpu
, excret
, true, false);
8413 switch_v7m_security_state(env
, return_to_secure
);
8416 /* The stack pointer we should be reading the exception frame from
8417 * depends on bits in the magic exception return type value (and
8418 * for v8M isn't necessarily the stack pointer we will eventually
8419 * end up resuming execution with). Get a pointer to the location
8420 * in the CPU state struct where the SP we need is currently being
8421 * stored; we will use and modify it in place.
8422 * We use this limited C variable scope so we don't accidentally
8423 * use 'frame_sp_p' after we do something that makes it invalid.
8425 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
8428 return_to_sp_process
);
8429 uint32_t frameptr
= *frame_sp_p
;
8432 bool return_to_priv
= return_to_handler
||
8433 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
8435 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
8438 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
8439 arm_feature(env
, ARM_FEATURE_V8
)) {
8440 qemu_log_mask(LOG_GUEST_ERROR
,
8441 "M profile exception return with non-8-aligned SP "
8442 "for destination state is UNPREDICTABLE\n");
8445 /* Do we need to pop callee-saved registers? */
8446 if (return_to_secure
&&
8447 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
8448 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
8449 uint32_t expected_sig
= 0xfefa125b;
8450 uint32_t actual_sig
;
8452 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
8454 if (pop_ok
&& expected_sig
!= actual_sig
) {
8455 /* Take a SecureFault on the current stack */
8456 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
8457 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8458 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
8459 "stackframe: failed exception return integrity "
8460 "signature check\n");
8461 v7m_exception_taken(cpu
, excret
, true, false);
8466 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
8467 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
8468 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
8469 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
8470 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
8471 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
8472 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
8473 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
8480 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
8481 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
8482 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
8483 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
8484 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
8485 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
8486 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
8487 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
8490 /* v7m_stack_read() pended a fault, so take it (as a tail
8491 * chained exception on the same stack frame)
8493 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
8494 v7m_exception_taken(cpu
, excret
, true, false);
8498 /* Returning from an exception with a PC with bit 0 set is defined
8499 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
8500 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
8501 * the lsbit, and there are several RTOSes out there which incorrectly
8502 * assume the r15 in the stack frame should be a Thumb-style "lsbit
8503 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
8504 * complain about the badly behaved guest.
8506 if (env
->regs
[15] & 1) {
8507 env
->regs
[15] &= ~1U;
8508 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8509 qemu_log_mask(LOG_GUEST_ERROR
,
8510 "M profile return from interrupt with misaligned "
8511 "PC is UNPREDICTABLE on v7M\n");
8515 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8516 /* For v8M we have to check whether the xPSR exception field
8517 * matches the EXCRET value for return to handler/thread
8518 * before we commit to changing the SP and xPSR.
8520 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
8521 if (return_to_handler
!= will_be_handler
) {
8522 /* Take an INVPC UsageFault on the current stack.
8523 * By this point we will have switched to the security state
8524 * for the background state, so this UsageFault will target
8527 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8529 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8530 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
8531 "stackframe: failed exception return integrity "
8533 v7m_exception_taken(cpu
, excret
, true, false);
8538 /* Commit to consuming the stack frame */
8540 /* Undo stack alignment (the SPREALIGN bit indicates that the original
8541 * pre-exception SP was not 8-aligned and we added a padding word to
8542 * align it, so we undo this by ORing in the bit that increases it
8543 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
8544 * would work too but a logical OR is how the pseudocode specifies it.)
8546 if (xpsr
& XPSR_SPREALIGN
) {
8549 *frame_sp_p
= frameptr
;
8551 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
8552 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
8554 /* The restored xPSR exception field will be zero if we're
8555 * resuming in Thread mode. If that doesn't match what the
8556 * exception return excret specified then this is a UsageFault.
8557 * v7M requires we make this check here; v8M did it earlier.
8559 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
8560 /* Take an INVPC UsageFault by pushing the stack again;
8561 * we know we're v7M so this is never a Secure UsageFault.
8563 bool ignore_stackfaults
;
8565 assert(!arm_feature(env
, ARM_FEATURE_V8
));
8566 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
8567 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8568 ignore_stackfaults
= v7m_push_stack(cpu
);
8569 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
8570 "failed exception return integrity check\n");
8571 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
8575 /* Otherwise, we have a successful exception exit. */
8576 arm_clear_exclusive(env
);
8577 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
8580 static bool do_v7m_function_return(ARMCPU
*cpu
)
8582 /* v8M security extensions magic function return.
8584 * (1) throw an exception (longjump)
8585 * (2) return true if we successfully handled the function return
8586 * (3) return false if we failed a consistency check and have
8587 * pended a UsageFault that needs to be taken now
8589 * At this point the magic return value is split between env->regs[15]
8590 * and env->thumb. We don't bother to reconstitute it because we don't
8591 * need it (all values are handled the same way).
8593 CPUARMState
*env
= &cpu
->env
;
8594 uint32_t newpc
, newpsr
, newpsr_exc
;
8596 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
8599 bool threadmode
, spsel
;
8602 uint32_t *frame_sp_p
;
8605 /* Pull the return address and IPSR from the Secure stack */
8606 threadmode
= !arm_v7m_is_handler_mode(env
);
8607 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
8609 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
8610 frameptr
= *frame_sp_p
;
8612 /* These loads may throw an exception (for MPU faults). We want to
8613 * do them as secure, so work out what MMU index that is.
8615 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8616 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
8617 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
8618 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
8620 /* Consistency checks on new IPSR */
8621 newpsr_exc
= newpsr
& XPSR_EXCP
;
8622 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
8623 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
8624 /* Pend the fault and tell our caller to take it */
8625 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
8626 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
8628 qemu_log_mask(CPU_LOG_INT
,
8629 "...taking INVPC UsageFault: "
8630 "IPSR consistency check failed\n");
8634 *frame_sp_p
= frameptr
+ 8;
8637 /* This invalidates frame_sp_p */
8638 switch_v7m_security_state(env
, true);
8639 env
->v7m
.exception
= newpsr_exc
;
8640 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
8641 if (newpsr
& XPSR_SFPA
) {
8642 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
8644 xpsr_write(env
, 0, XPSR_IT
);
8645 env
->thumb
= newpc
& 1;
8646 env
->regs
[15] = newpc
& ~1;
8648 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
8652 static void arm_log_exception(int idx
)
8654 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
8655 const char *exc
= NULL
;
8656 static const char * const excnames
[] = {
8657 [EXCP_UDEF
] = "Undefined Instruction",
8659 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
8660 [EXCP_DATA_ABORT
] = "Data Abort",
8663 [EXCP_BKPT
] = "Breakpoint",
8664 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
8665 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
8666 [EXCP_HVC
] = "Hypervisor Call",
8667 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
8668 [EXCP_SMC
] = "Secure Monitor Call",
8669 [EXCP_VIRQ
] = "Virtual IRQ",
8670 [EXCP_VFIQ
] = "Virtual FIQ",
8671 [EXCP_SEMIHOST
] = "Semihosting call",
8672 [EXCP_NOCP
] = "v7M NOCP UsageFault",
8673 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
8674 [EXCP_STKOF
] = "v8M STKOF UsageFault",
8677 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
8678 exc
= excnames
[idx
];
8683 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
8687 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
8688 uint32_t addr
, uint16_t *insn
)
8690 /* Load a 16-bit portion of a v7M instruction, returning true on success,
8691 * or false on failure (in which case we will have pended the appropriate
8693 * We need to do the instruction fetch's MPU and SAU checks
8694 * like this because there is no MMU index that would allow
8695 * doing the load with a single function call. Instead we must
8696 * first check that the security attributes permit the load
8697 * and that they don't mismatch on the two halves of the instruction,
8698 * and then we do the load as a secure load (ie using the security
8699 * attributes of the address, not the CPU, as architecturally required).
8701 CPUState
*cs
= CPU(cpu
);
8702 CPUARMState
*env
= &cpu
->env
;
8703 V8M_SAttributes sattrs
= {};
8704 MemTxAttrs attrs
= {};
8705 ARMMMUFaultInfo fi
= {};
8707 target_ulong page_size
;
8711 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
8712 if (!sattrs
.nsc
|| sattrs
.ns
) {
8713 /* This must be the second half of the insn, and it straddles a
8714 * region boundary with the second half not being S&NSC.
8716 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8717 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8718 qemu_log_mask(CPU_LOG_INT
,
8719 "...really SecureFault with SFSR.INVEP\n");
8722 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
8723 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
8724 /* the MPU lookup failed */
8725 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8726 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
8727 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
8730 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
8732 if (txres
!= MEMTX_OK
) {
8733 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8734 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8735 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
8741 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
8743 /* Check whether this attempt to execute code in a Secure & NS-Callable
8744 * memory region is for an SG instruction; if so, then emulate the
8745 * effect of the SG instruction and return true. Otherwise pend
8746 * the correct kind of exception and return false.
8748 CPUARMState
*env
= &cpu
->env
;
8752 /* We should never get here unless get_phys_addr_pmsav8() caused
8753 * an exception for NS executing in S&NSC memory.
8755 assert(!env
->v7m
.secure
);
8756 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8758 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
8759 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
8761 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
8769 if (insn
!= 0xe97f) {
8770 /* Not an SG instruction first half (we choose the IMPDEF
8771 * early-SG-check option).
8776 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
8780 if (insn
!= 0xe97f) {
8781 /* Not an SG instruction second half (yes, both halves of the SG
8782 * insn have the same hex value)
8787 /* OK, we have confirmed that we really have an SG instruction.
8788 * We know we're NS in S memory so don't need to repeat those checks.
8790 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
8791 ", executing it\n", env
->regs
[15]);
8792 env
->regs
[14] &= ~1;
8793 switch_v7m_security_state(env
, true);
8794 xpsr_write(env
, 0, XPSR_IT
);
8799 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8800 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8801 qemu_log_mask(CPU_LOG_INT
,
8802 "...really SecureFault with SFSR.INVEP\n");
8806 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
8808 ARMCPU
*cpu
= ARM_CPU(cs
);
8809 CPUARMState
*env
= &cpu
->env
;
8811 bool ignore_stackfaults
;
8813 arm_log_exception(cs
->exception_index
);
8815 /* For exceptions we just mark as pending on the NVIC, and let that
8817 switch (cs
->exception_index
) {
8819 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8820 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
8823 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8824 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
8827 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8828 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
8831 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8832 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8835 /* The PC already points to the next instruction. */
8836 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
8838 case EXCP_PREFETCH_ABORT
:
8839 case EXCP_DATA_ABORT
:
8840 /* Note that for M profile we don't have a guest facing FSR, but
8841 * the env->exception.fsr will be populated by the code that
8842 * raises the fault, in the A profile short-descriptor format.
8844 switch (env
->exception
.fsr
& 0xf) {
8845 case M_FAKE_FSR_NSC_EXEC
:
8846 /* Exception generated when we try to execute code at an address
8847 * which is marked as Secure & Non-Secure Callable and the CPU
8848 * is in the Non-Secure state. The only instruction which can
8849 * be executed like this is SG (and that only if both halves of
8850 * the SG instruction have the same security attributes.)
8851 * Everything else must generate an INVEP SecureFault, so we
8852 * emulate the SG instruction here.
8854 if (v7m_handle_execute_nsc(cpu
)) {
8858 case M_FAKE_FSR_SFAULT
:
8859 /* Various flavours of SecureFault for attempts to execute or
8860 * access data in the wrong security state.
8862 switch (cs
->exception_index
) {
8863 case EXCP_PREFETCH_ABORT
:
8864 if (env
->v7m
.secure
) {
8865 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
8866 qemu_log_mask(CPU_LOG_INT
,
8867 "...really SecureFault with SFSR.INVTRAN\n");
8869 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8870 qemu_log_mask(CPU_LOG_INT
,
8871 "...really SecureFault with SFSR.INVEP\n");
8874 case EXCP_DATA_ABORT
:
8875 /* This must be an NS access to S memory */
8876 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
8877 qemu_log_mask(CPU_LOG_INT
,
8878 "...really SecureFault with SFSR.AUVIOL\n");
8881 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8883 case 0x8: /* External Abort */
8884 switch (cs
->exception_index
) {
8885 case EXCP_PREFETCH_ABORT
:
8886 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8887 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
8889 case EXCP_DATA_ABORT
:
8890 env
->v7m
.cfsr
[M_REG_NS
] |=
8891 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
8892 env
->v7m
.bfar
= env
->exception
.vaddress
;
8893 qemu_log_mask(CPU_LOG_INT
,
8894 "...with CFSR.PRECISERR and BFAR 0x%x\n",
8898 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8901 /* All other FSR values are either MPU faults or "can't happen
8902 * for M profile" cases.
8904 switch (cs
->exception_index
) {
8905 case EXCP_PREFETCH_ABORT
:
8906 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8907 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
8909 case EXCP_DATA_ABORT
:
8910 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
8911 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
8912 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
8913 qemu_log_mask(CPU_LOG_INT
,
8914 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
8915 env
->v7m
.mmfar
[env
->v7m
.secure
]);
8918 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
8924 if (semihosting_enabled()) {
8926 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
8929 qemu_log_mask(CPU_LOG_INT
,
8930 "...handling as semihosting call 0x%x\n",
8932 env
->regs
[0] = do_arm_semihosting(env
);
8936 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
8940 case EXCP_EXCEPTION_EXIT
:
8941 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
8942 /* Must be v8M security extension function return */
8943 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
8944 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8945 if (do_v7m_function_return(cpu
)) {
8949 do_v7m_exception_exit(cpu
);
8954 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8955 return; /* Never happens. Keep compiler happy. */
8958 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8959 lr
= R_V7M_EXCRET_RES1_MASK
|
8960 R_V7M_EXCRET_DCRS_MASK
|
8961 R_V7M_EXCRET_FTYPE_MASK
;
8962 /* The S bit indicates whether we should return to Secure
8963 * or NonSecure (ie our current state).
8964 * The ES bit indicates whether we're taking this exception
8965 * to Secure or NonSecure (ie our target state). We set it
8966 * later, in v7m_exception_taken().
8967 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
8968 * This corresponds to the ARM ARM pseudocode for v8M setting
8969 * some LR bits in PushStack() and some in ExceptionTaken();
8970 * the distinction matters for the tailchain cases where we
8971 * can take an exception without pushing the stack.
8973 if (env
->v7m
.secure
) {
8974 lr
|= R_V7M_EXCRET_S_MASK
;
8977 lr
= R_V7M_EXCRET_RES1_MASK
|
8978 R_V7M_EXCRET_S_MASK
|
8979 R_V7M_EXCRET_DCRS_MASK
|
8980 R_V7M_EXCRET_FTYPE_MASK
|
8981 R_V7M_EXCRET_ES_MASK
;
8982 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
8983 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8986 if (!arm_v7m_is_handler_mode(env
)) {
8987 lr
|= R_V7M_EXCRET_MODE_MASK
;
8990 ignore_stackfaults
= v7m_push_stack(cpu
);
8991 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
8994 /* Function used to synchronize QEMU's AArch64 register set with AArch32
8995 * register set. This is necessary when switching between AArch32 and AArch64
8998 void aarch64_sync_32_to_64(CPUARMState
*env
)
9001 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9003 /* We can blanket copy R[0:7] to X[0:7] */
9004 for (i
= 0; i
< 8; i
++) {
9005 env
->xregs
[i
] = env
->regs
[i
];
9008 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9009 * Otherwise, they come from the banked user regs.
9011 if (mode
== ARM_CPU_MODE_FIQ
) {
9012 for (i
= 8; i
< 13; i
++) {
9013 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
9016 for (i
= 8; i
< 13; i
++) {
9017 env
->xregs
[i
] = env
->regs
[i
];
9021 /* Registers x13-x23 are the various mode SP and FP registers. Registers
9022 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9023 * from the mode banked register.
9025 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9026 env
->xregs
[13] = env
->regs
[13];
9027 env
->xregs
[14] = env
->regs
[14];
9029 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
9030 /* HYP is an exception in that it is copied from r14 */
9031 if (mode
== ARM_CPU_MODE_HYP
) {
9032 env
->xregs
[14] = env
->regs
[14];
9034 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
9038 if (mode
== ARM_CPU_MODE_HYP
) {
9039 env
->xregs
[15] = env
->regs
[13];
9041 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
9044 if (mode
== ARM_CPU_MODE_IRQ
) {
9045 env
->xregs
[16] = env
->regs
[14];
9046 env
->xregs
[17] = env
->regs
[13];
9048 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
9049 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
9052 if (mode
== ARM_CPU_MODE_SVC
) {
9053 env
->xregs
[18] = env
->regs
[14];
9054 env
->xregs
[19] = env
->regs
[13];
9056 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
9057 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
9060 if (mode
== ARM_CPU_MODE_ABT
) {
9061 env
->xregs
[20] = env
->regs
[14];
9062 env
->xregs
[21] = env
->regs
[13];
9064 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
9065 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
9068 if (mode
== ARM_CPU_MODE_UND
) {
9069 env
->xregs
[22] = env
->regs
[14];
9070 env
->xregs
[23] = env
->regs
[13];
9072 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
9073 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
9076 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9077 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9078 * FIQ bank for r8-r14.
9080 if (mode
== ARM_CPU_MODE_FIQ
) {
9081 for (i
= 24; i
< 31; i
++) {
9082 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
9085 for (i
= 24; i
< 29; i
++) {
9086 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
9088 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
9089 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
9092 env
->pc
= env
->regs
[15];
9095 /* Function used to synchronize QEMU's AArch32 register set with AArch64
9096 * register set. This is necessary when switching between AArch32 and AArch64
9099 void aarch64_sync_64_to_32(CPUARMState
*env
)
9102 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
9104 /* We can blanket copy X[0:7] to R[0:7] */
9105 for (i
= 0; i
< 8; i
++) {
9106 env
->regs
[i
] = env
->xregs
[i
];
9109 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9110 * Otherwise, we copy x8-x12 into the banked user regs.
9112 if (mode
== ARM_CPU_MODE_FIQ
) {
9113 for (i
= 8; i
< 13; i
++) {
9114 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
9117 for (i
= 8; i
< 13; i
++) {
9118 env
->regs
[i
] = env
->xregs
[i
];
9122 /* Registers r13 & r14 depend on the current mode.
9123 * If we are in a given mode, we copy the corresponding x registers to r13
9124 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9127 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
9128 env
->regs
[13] = env
->xregs
[13];
9129 env
->regs
[14] = env
->xregs
[14];
9131 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
9133 /* HYP is an exception in that it does not have its own banked r14 but
9134 * shares the USR r14
9136 if (mode
== ARM_CPU_MODE_HYP
) {
9137 env
->regs
[14] = env
->xregs
[14];
9139 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
9143 if (mode
== ARM_CPU_MODE_HYP
) {
9144 env
->regs
[13] = env
->xregs
[15];
9146 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
9149 if (mode
== ARM_CPU_MODE_IRQ
) {
9150 env
->regs
[14] = env
->xregs
[16];
9151 env
->regs
[13] = env
->xregs
[17];
9153 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
9154 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
9157 if (mode
== ARM_CPU_MODE_SVC
) {
9158 env
->regs
[14] = env
->xregs
[18];
9159 env
->regs
[13] = env
->xregs
[19];
9161 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
9162 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
9165 if (mode
== ARM_CPU_MODE_ABT
) {
9166 env
->regs
[14] = env
->xregs
[20];
9167 env
->regs
[13] = env
->xregs
[21];
9169 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
9170 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
9173 if (mode
== ARM_CPU_MODE_UND
) {
9174 env
->regs
[14] = env
->xregs
[22];
9175 env
->regs
[13] = env
->xregs
[23];
9177 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
9178 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
9181 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9182 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9183 * FIQ bank for r8-r14.
9185 if (mode
== ARM_CPU_MODE_FIQ
) {
9186 for (i
= 24; i
< 31; i
++) {
9187 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
9190 for (i
= 24; i
< 29; i
++) {
9191 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
9193 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
9194 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
9197 env
->regs
[15] = env
->pc
;
9200 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
9201 uint32_t mask
, uint32_t offset
,
9204 /* Change the CPU state so as to actually take the exception. */
9205 switch_mode(env
, new_mode
);
9207 * For exceptions taken to AArch32 we must clear the SS bit in both
9208 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9210 env
->uncached_cpsr
&= ~PSTATE_SS
;
9211 env
->spsr
= cpsr_read(env
);
9212 /* Clear IT bits. */
9213 env
->condexec_bits
= 0;
9214 /* Switch to the new mode, and to the correct instruction set. */
9215 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
9216 /* Set new mode endianness */
9217 env
->uncached_cpsr
&= ~CPSR_E
;
9218 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
9219 env
->uncached_cpsr
|= CPSR_E
;
9221 /* J and IL must always be cleared for exception entry */
9222 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
9225 if (new_mode
== ARM_CPU_MODE_HYP
) {
9226 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
9227 env
->elr_el
[2] = env
->regs
[15];
9230 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9231 * and we should just guard the thumb mode on V4
9233 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
9235 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
9237 env
->regs
[14] = env
->regs
[15] + offset
;
9239 env
->regs
[15] = newpc
;
9242 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
9245 * Handle exception entry to Hyp mode; this is sufficiently
9246 * different to entry to other AArch32 modes that we handle it
9249 * The vector table entry used is always the 0x14 Hyp mode entry point,
9250 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9251 * The offset applied to the preferred return address is always zero
9252 * (see DDI0487C.a section G1.12.3).
9253 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9255 uint32_t addr
, mask
;
9256 ARMCPU
*cpu
= ARM_CPU(cs
);
9257 CPUARMState
*env
= &cpu
->env
;
9259 switch (cs
->exception_index
) {
9267 /* Fall through to prefetch abort. */
9268 case EXCP_PREFETCH_ABORT
:
9269 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
9270 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
9271 (uint32_t)env
->exception
.vaddress
);
9274 case EXCP_DATA_ABORT
:
9275 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
9276 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
9277 (uint32_t)env
->exception
.vaddress
);
9292 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9295 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
9296 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
9298 * QEMU syndrome values are v8-style. v7 has the IL bit
9299 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9300 * If this is a v7 CPU, squash the IL bit in those cases.
9302 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
9303 (cs
->exception_index
== EXCP_DATA_ABORT
&&
9304 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
9305 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
9306 env
->exception
.syndrome
&= ~ARM_EL_IL
;
9309 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
9312 if (arm_current_el(env
) != 2 && addr
< 0x14) {
9317 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
9320 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
9323 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
9327 addr
+= env
->cp15
.hvbar
;
9329 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
9332 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
9334 ARMCPU
*cpu
= ARM_CPU(cs
);
9335 CPUARMState
*env
= &cpu
->env
;
9342 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9343 switch (syn_get_ec(env
->exception
.syndrome
)) {
9345 case EC_BREAKPOINT_SAME_EL
:
9349 case EC_WATCHPOINT_SAME_EL
:
9355 case EC_VECTORCATCH
:
9364 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
9367 if (env
->exception
.target_el
== 2) {
9368 arm_cpu_do_interrupt_aarch32_hyp(cs
);
9372 switch (cs
->exception_index
) {
9374 new_mode
= ARM_CPU_MODE_UND
;
9383 new_mode
= ARM_CPU_MODE_SVC
;
9386 /* The PC already points to the next instruction. */
9390 /* Fall through to prefetch abort. */
9391 case EXCP_PREFETCH_ABORT
:
9392 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
9393 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
9394 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
9395 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
9396 new_mode
= ARM_CPU_MODE_ABT
;
9398 mask
= CPSR_A
| CPSR_I
;
9401 case EXCP_DATA_ABORT
:
9402 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
9403 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
9404 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
9406 (uint32_t)env
->exception
.vaddress
);
9407 new_mode
= ARM_CPU_MODE_ABT
;
9409 mask
= CPSR_A
| CPSR_I
;
9413 new_mode
= ARM_CPU_MODE_IRQ
;
9415 /* Disable IRQ and imprecise data aborts. */
9416 mask
= CPSR_A
| CPSR_I
;
9418 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
9419 /* IRQ routed to monitor mode */
9420 new_mode
= ARM_CPU_MODE_MON
;
9425 new_mode
= ARM_CPU_MODE_FIQ
;
9427 /* Disable FIQ, IRQ and imprecise data aborts. */
9428 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9429 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
9430 /* FIQ routed to monitor mode */
9431 new_mode
= ARM_CPU_MODE_MON
;
9436 new_mode
= ARM_CPU_MODE_IRQ
;
9438 /* Disable IRQ and imprecise data aborts. */
9439 mask
= CPSR_A
| CPSR_I
;
9443 new_mode
= ARM_CPU_MODE_FIQ
;
9445 /* Disable FIQ, IRQ and imprecise data aborts. */
9446 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9450 new_mode
= ARM_CPU_MODE_MON
;
9452 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
9456 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9457 return; /* Never happens. Keep compiler happy. */
9460 if (new_mode
== ARM_CPU_MODE_MON
) {
9461 addr
+= env
->cp15
.mvbar
;
9462 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
9463 /* High vectors. When enabled, base address cannot be remapped. */
9466 /* ARM v7 architectures provide a vector base address register to remap
9467 * the interrupt vector table.
9468 * This register is only followed in non-monitor mode, and is banked.
9469 * Note: only bits 31:5 are valid.
9471 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
9474 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
9475 env
->cp15
.scr_el3
&= ~SCR_NS
;
9478 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
9481 /* Handle exception entry to a target EL which is using AArch64 */
9482 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
9484 ARMCPU
*cpu
= ARM_CPU(cs
);
9485 CPUARMState
*env
= &cpu
->env
;
9486 unsigned int new_el
= env
->exception
.target_el
;
9487 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
9488 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
9489 unsigned int cur_el
= arm_current_el(env
);
9492 * Note that new_el can never be 0. If cur_el is 0, then
9493 * el0_a64 is is_a64(), else el0_a64 is ignored.
9495 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
9497 if (cur_el
< new_el
) {
9498 /* Entry vector offset depends on whether the implemented EL
9499 * immediately lower than the target level is using AArch32 or AArch64
9505 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
9508 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
9511 is_aa64
= is_a64(env
);
9514 g_assert_not_reached();
9522 } else if (pstate_read(env
) & PSTATE_SP
) {
9526 switch (cs
->exception_index
) {
9527 case EXCP_PREFETCH_ABORT
:
9528 case EXCP_DATA_ABORT
:
9529 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
9530 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
9531 env
->cp15
.far_el
[new_el
]);
9539 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
9541 * QEMU internal FP/SIMD syndromes from AArch32 include the
9542 * TA and coproc fields which are only exposed if the exception
9543 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9544 * AArch64 format syndrome.
9546 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
9548 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
9559 qemu_log_mask(CPU_LOG_INT
,
9560 "...handling as semihosting call 0x%" PRIx64
"\n",
9562 env
->xregs
[0] = do_arm_semihosting(env
);
9565 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
9569 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
9570 aarch64_save_sp(env
, arm_current_el(env
));
9571 env
->elr_el
[new_el
] = env
->pc
;
9573 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
9574 env
->elr_el
[new_el
] = env
->regs
[15];
9576 aarch64_sync_32_to_64(env
);
9578 env
->condexec_bits
= 0;
9580 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
9581 env
->elr_el
[new_el
]);
9583 pstate_write(env
, PSTATE_DAIF
| new_mode
);
9585 aarch64_restore_sp(env
, new_el
);
9589 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
9590 new_el
, env
->pc
, pstate_read(env
));
9593 static inline bool check_for_semihosting(CPUState
*cs
)
9595 /* Check whether this exception is a semihosting call; if so
9596 * then handle it and return true; otherwise return false.
9598 ARMCPU
*cpu
= ARM_CPU(cs
);
9599 CPUARMState
*env
= &cpu
->env
;
9602 if (cs
->exception_index
== EXCP_SEMIHOST
) {
9603 /* This is always the 64-bit semihosting exception.
9604 * The "is this usermode" and "is semihosting enabled"
9605 * checks have been done at translate time.
9607 qemu_log_mask(CPU_LOG_INT
,
9608 "...handling as semihosting call 0x%" PRIx64
"\n",
9610 env
->xregs
[0] = do_arm_semihosting(env
);
9617 /* Only intercept calls from privileged modes, to provide some
9618 * semblance of security.
9620 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
9621 (!semihosting_enabled() ||
9622 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
9626 switch (cs
->exception_index
) {
9628 /* This is always a semihosting call; the "is this usermode"
9629 * and "is semihosting enabled" checks have been done at
9634 /* Check for semihosting interrupt. */
9636 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
9642 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
9644 if (imm
== 0x123456) {
9650 /* See if this is a semihosting syscall. */
9652 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
9664 qemu_log_mask(CPU_LOG_INT
,
9665 "...handling as semihosting call 0x%x\n",
9667 env
->regs
[0] = do_arm_semihosting(env
);
9672 /* Handle a CPU exception for A and R profile CPUs.
9673 * Do any appropriate logging, handle PSCI calls, and then hand off
9674 * to the AArch64-entry or AArch32-entry function depending on the
9675 * target exception level's register width.
9677 void arm_cpu_do_interrupt(CPUState
*cs
)
9679 ARMCPU
*cpu
= ARM_CPU(cs
);
9680 CPUARMState
*env
= &cpu
->env
;
9681 unsigned int new_el
= env
->exception
.target_el
;
9683 assert(!arm_feature(env
, ARM_FEATURE_M
));
9685 arm_log_exception(cs
->exception_index
);
9686 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
9688 if (qemu_loglevel_mask(CPU_LOG_INT
)
9689 && !excp_is_internal(cs
->exception_index
)) {
9690 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
9691 syn_get_ec(env
->exception
.syndrome
),
9692 env
->exception
.syndrome
);
9695 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
9696 arm_handle_psci_call(cpu
);
9697 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
9701 /* Semihosting semantics depend on the register width of the
9702 * code that caused the exception, not the target exception level,
9703 * so must be handled here.
9705 if (check_for_semihosting(cs
)) {
9709 /* Hooks may change global state so BQL should be held, also the
9710 * BQL needs to be held for any modification of
9711 * cs->interrupt_request.
9713 g_assert(qemu_mutex_iothread_locked());
9715 arm_call_pre_el_change_hook(cpu
);
9717 assert(!excp_is_internal(cs
->exception_index
));
9718 if (arm_el_is_aa64(env
, new_el
)) {
9719 arm_cpu_do_interrupt_aarch64(cs
);
9721 arm_cpu_do_interrupt_aarch32(cs
);
9724 arm_call_el_change_hook(cpu
);
9726 if (!kvm_enabled()) {
9727 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
9730 #endif /* !CONFIG_USER_ONLY */
9732 /* Return the exception level which controls this address translation regime */
9733 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9736 case ARMMMUIdx_S2NS
:
9737 case ARMMMUIdx_S1E2
:
9739 case ARMMMUIdx_S1E3
:
9741 case ARMMMUIdx_S1SE0
:
9742 return arm_el_is_aa64(env
, 3) ? 1 : 3;
9743 case ARMMMUIdx_S1SE1
:
9744 case ARMMMUIdx_S1NSE0
:
9745 case ARMMMUIdx_S1NSE1
:
9746 case ARMMMUIdx_MPrivNegPri
:
9747 case ARMMMUIdx_MUserNegPri
:
9748 case ARMMMUIdx_MPriv
:
9749 case ARMMMUIdx_MUser
:
9750 case ARMMMUIdx_MSPrivNegPri
:
9751 case ARMMMUIdx_MSUserNegPri
:
9752 case ARMMMUIdx_MSPriv
:
9753 case ARMMMUIdx_MSUser
:
9756 g_assert_not_reached();
9760 #ifndef CONFIG_USER_ONLY
9762 /* Return the SCTLR value which controls this address translation regime */
9763 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9765 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
9768 /* Return true if the specified stage of address translation is disabled */
9769 static inline bool regime_translation_disabled(CPUARMState
*env
,
9772 if (arm_feature(env
, ARM_FEATURE_M
)) {
9773 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
9774 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
9775 case R_V7M_MPU_CTRL_ENABLE_MASK
:
9776 /* Enabled, but not for HardFault and NMI */
9777 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
9778 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
9779 /* Enabled for all cases */
9783 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9784 * we warned about that in armv7m_nvic.c when the guest set it.
9790 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9791 /* HCR.DC means HCR.VM behaves as 1 */
9792 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9795 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9796 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9797 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9802 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
9803 (mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
)) {
9804 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9808 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9811 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9814 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9817 /* Return the TTBR associated with this translation regime */
9818 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9821 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9822 return env
->cp15
.vttbr_el2
;
9825 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9827 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9831 #endif /* !CONFIG_USER_ONLY */
9833 /* Return the TCR controlling this translation regime */
9834 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9836 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9837 return &env
->cp15
.vtcr_el2
;
9839 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9842 /* Convert a possible stage1+2 MMU index into the appropriate
9845 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9847 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
9848 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
9853 /* Return true if the translation regime is using LPAE format page tables */
9854 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9857 int el
= regime_el(env
, mmu_idx
);
9858 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9861 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9862 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9868 /* Returns true if the stage 1 translation regime is using LPAE format page
9869 * tables. Used when raising alignment exceptions, whose FSR changes depending
9870 * on whether the long or short descriptor format is in use. */
9871 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9873 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9875 return regime_using_lpae_format(env
, mmu_idx
);
9878 #ifndef CONFIG_USER_ONLY
9879 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9882 case ARMMMUIdx_S1SE0
:
9883 case ARMMMUIdx_S1NSE0
:
9884 case ARMMMUIdx_MUser
:
9885 case ARMMMUIdx_MSUser
:
9886 case ARMMMUIdx_MUserNegPri
:
9887 case ARMMMUIdx_MSUserNegPri
:
9891 case ARMMMUIdx_S12NSE0
:
9892 case ARMMMUIdx_S12NSE1
:
9893 g_assert_not_reached();
9897 /* Translate section/page access permissions to page
9898 * R/W protection flags
9901 * @mmu_idx: MMU index indicating required translation regime
9902 * @ap: The 3-bit access permissions (AP[2:0])
9903 * @domain_prot: The 2-bit domain access permissions
9905 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9906 int ap
, int domain_prot
)
9908 bool is_user
= regime_is_user(env
, mmu_idx
);
9910 if (domain_prot
== 3) {
9911 return PAGE_READ
| PAGE_WRITE
;
9916 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9919 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9921 return is_user
? 0 : PAGE_READ
;
9928 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9933 return PAGE_READ
| PAGE_WRITE
;
9936 return PAGE_READ
| PAGE_WRITE
;
9937 case 4: /* Reserved. */
9940 return is_user
? 0 : PAGE_READ
;
9944 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9949 g_assert_not_reached();
9953 /* Translate section/page access permissions to page
9954 * R/W protection flags.
9956 * @ap: The 2-bit simple AP (AP[2:1])
9957 * @is_user: TRUE if accessing from PL0
9959 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9963 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9965 return PAGE_READ
| PAGE_WRITE
;
9967 return is_user
? 0 : PAGE_READ
;
9971 g_assert_not_reached();
9976 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9978 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9981 /* Translate S2 section/page access permissions to protection flags
9984 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9985 * @xn: XN (execute-never) bit
9987 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9998 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
10005 /* Translate section/page access permissions to protection flags
10007 * @env: CPUARMState
10008 * @mmu_idx: MMU index indicating required translation regime
10009 * @is_aa64: TRUE if AArch64
10010 * @ap: The 2-bit simple AP (AP[2:1])
10011 * @ns: NS (non-secure) bit
10012 * @xn: XN (execute-never) bit
10013 * @pxn: PXN (privileged execute-never) bit
10015 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
10016 int ap
, int ns
, int xn
, int pxn
)
10018 bool is_user
= regime_is_user(env
, mmu_idx
);
10019 int prot_rw
, user_rw
;
10023 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
10025 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
10029 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
10032 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
10036 /* TODO have_wxn should be replaced with
10037 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10038 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10039 * compatible processors have EL2, which is required for [U]WXN.
10041 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
10044 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
10048 switch (regime_el(env
, mmu_idx
)) {
10051 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
10058 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10059 switch (regime_el(env
, mmu_idx
)) {
10063 xn
= xn
|| !(user_rw
& PAGE_READ
);
10067 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
10069 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
10070 (uwxn
&& (user_rw
& PAGE_WRITE
));
10080 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
10083 return prot_rw
| PAGE_EXEC
;
10086 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10087 uint32_t *table
, uint32_t address
)
10089 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10090 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10092 if (address
& tcr
->mask
) {
10093 if (tcr
->raw_tcr
& TTBCR_PD1
) {
10094 /* Translation table walk disabled for TTBR1 */
10097 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
10099 if (tcr
->raw_tcr
& TTBCR_PD0
) {
10100 /* Translation table walk disabled for TTBR0 */
10103 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
10105 *table
|= (address
>> 18) & 0x3ffc;
10109 /* Translate a S1 pagetable walk through S2 if needed. */
10110 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
10111 hwaddr addr
, MemTxAttrs txattrs
,
10112 ARMMMUFaultInfo
*fi
)
10114 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
10115 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10116 target_ulong s2size
;
10120 ARMCacheAttrs cacheattrs
= {};
10121 ARMCacheAttrs
*pcacheattrs
= NULL
;
10123 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
10125 * PTW means we must fault if this S1 walk touches S2 Device
10126 * memory; otherwise we don't care about the attributes and can
10127 * save the S2 translation the effort of computing them.
10129 pcacheattrs
= &cacheattrs
;
10132 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
10133 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
10135 assert(fi
->type
!= ARMFault_None
);
10141 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
10142 /* Access was to Device memory: generate Permission fault */
10143 fi
->type
= ARMFault_Permission
;
10154 /* All loads done in the course of a page table walk go through here. */
10155 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10156 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10158 ARMCPU
*cpu
= ARM_CPU(cs
);
10159 CPUARMState
*env
= &cpu
->env
;
10160 MemTxAttrs attrs
= {};
10161 MemTxResult result
= MEMTX_OK
;
10165 attrs
.secure
= is_secure
;
10166 as
= arm_addressspace(cs
, attrs
);
10167 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10171 if (regime_translation_big_endian(env
, mmu_idx
)) {
10172 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
10174 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
10176 if (result
== MEMTX_OK
) {
10179 fi
->type
= ARMFault_SyncExternalOnWalk
;
10180 fi
->ea
= arm_extabort_type(result
);
10184 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
10185 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
10187 ARMCPU
*cpu
= ARM_CPU(cs
);
10188 CPUARMState
*env
= &cpu
->env
;
10189 MemTxAttrs attrs
= {};
10190 MemTxResult result
= MEMTX_OK
;
10194 attrs
.secure
= is_secure
;
10195 as
= arm_addressspace(cs
, attrs
);
10196 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
10200 if (regime_translation_big_endian(env
, mmu_idx
)) {
10201 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
10203 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
10205 if (result
== MEMTX_OK
) {
10208 fi
->type
= ARMFault_SyncExternalOnWalk
;
10209 fi
->ea
= arm_extabort_type(result
);
10213 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
10214 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10215 hwaddr
*phys_ptr
, int *prot
,
10216 target_ulong
*page_size
,
10217 ARMMMUFaultInfo
*fi
)
10219 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10230 /* Pagetable walk. */
10231 /* Lookup l1 descriptor. */
10232 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10233 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10234 fi
->type
= ARMFault_Translation
;
10237 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10239 if (fi
->type
!= ARMFault_None
) {
10243 domain
= (desc
>> 5) & 0x0f;
10244 if (regime_el(env
, mmu_idx
) == 1) {
10245 dacr
= env
->cp15
.dacr_ns
;
10247 dacr
= env
->cp15
.dacr_s
;
10249 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10251 /* Section translation fault. */
10252 fi
->type
= ARMFault_Translation
;
10258 if (domain_prot
== 0 || domain_prot
== 2) {
10259 fi
->type
= ARMFault_Domain
;
10264 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10265 ap
= (desc
>> 10) & 3;
10266 *page_size
= 1024 * 1024;
10268 /* Lookup l2 entry. */
10270 /* Coarse pagetable. */
10271 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10273 /* Fine pagetable. */
10274 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
10276 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10278 if (fi
->type
!= ARMFault_None
) {
10281 switch (desc
& 3) {
10282 case 0: /* Page translation fault. */
10283 fi
->type
= ARMFault_Translation
;
10285 case 1: /* 64k page. */
10286 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10287 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
10288 *page_size
= 0x10000;
10290 case 2: /* 4k page. */
10291 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10292 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
10293 *page_size
= 0x1000;
10295 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10297 /* ARMv6/XScale extended small page format */
10298 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
10299 || arm_feature(env
, ARM_FEATURE_V6
)) {
10300 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10301 *page_size
= 0x1000;
10303 /* UNPREDICTABLE in ARMv5; we choose to take a
10304 * page translation fault.
10306 fi
->type
= ARMFault_Translation
;
10310 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
10311 *page_size
= 0x400;
10313 ap
= (desc
>> 4) & 3;
10316 /* Never happens, but compiler isn't smart enough to tell. */
10320 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10321 *prot
|= *prot
? PAGE_EXEC
: 0;
10322 if (!(*prot
& (1 << access_type
))) {
10323 /* Access permission fault. */
10324 fi
->type
= ARMFault_Permission
;
10327 *phys_ptr
= phys_addr
;
10330 fi
->domain
= domain
;
10335 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
10336 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10337 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10338 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
10340 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
10354 /* Pagetable walk. */
10355 /* Lookup l1 descriptor. */
10356 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
10357 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10358 fi
->type
= ARMFault_Translation
;
10361 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10363 if (fi
->type
!= ARMFault_None
) {
10367 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
10368 /* Section translation fault, or attempt to use the encoding
10369 * which is Reserved on implementations without PXN.
10371 fi
->type
= ARMFault_Translation
;
10374 if ((type
== 1) || !(desc
& (1 << 18))) {
10375 /* Page or Section. */
10376 domain
= (desc
>> 5) & 0x0f;
10378 if (regime_el(env
, mmu_idx
) == 1) {
10379 dacr
= env
->cp15
.dacr_ns
;
10381 dacr
= env
->cp15
.dacr_s
;
10386 domain_prot
= (dacr
>> (domain
* 2)) & 3;
10387 if (domain_prot
== 0 || domain_prot
== 2) {
10388 /* Section or Page domain fault */
10389 fi
->type
= ARMFault_Domain
;
10393 if (desc
& (1 << 18)) {
10394 /* Supersection. */
10395 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
10396 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
10397 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
10398 *page_size
= 0x1000000;
10401 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
10402 *page_size
= 0x100000;
10404 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
10405 xn
= desc
& (1 << 4);
10407 ns
= extract32(desc
, 19, 1);
10409 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
10410 pxn
= (desc
>> 2) & 1;
10412 ns
= extract32(desc
, 3, 1);
10413 /* Lookup l2 entry. */
10414 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
10415 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
10417 if (fi
->type
!= ARMFault_None
) {
10420 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
10421 switch (desc
& 3) {
10422 case 0: /* Page translation fault. */
10423 fi
->type
= ARMFault_Translation
;
10425 case 1: /* 64k page. */
10426 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
10427 xn
= desc
& (1 << 15);
10428 *page_size
= 0x10000;
10430 case 2: case 3: /* 4k page. */
10431 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
10433 *page_size
= 0x1000;
10436 /* Never happens, but compiler isn't smart enough to tell. */
10440 if (domain_prot
== 3) {
10441 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10443 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
10446 if (xn
&& access_type
== MMU_INST_FETCH
) {
10447 fi
->type
= ARMFault_Permission
;
10451 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
10452 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
10453 /* The simplified model uses AP[0] as an access control bit. */
10454 if ((ap
& 1) == 0) {
10455 /* Access flag fault. */
10456 fi
->type
= ARMFault_AccessFlag
;
10459 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
10461 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
10463 if (*prot
&& !xn
) {
10464 *prot
|= PAGE_EXEC
;
10466 if (!(*prot
& (1 << access_type
))) {
10467 /* Access permission fault. */
10468 fi
->type
= ARMFault_Permission
;
10473 /* The NS bit will (as required by the architecture) have no effect if
10474 * the CPU doesn't support TZ or this is a non-secure translation
10475 * regime, because the attribute will already be non-secure.
10477 attrs
->secure
= false;
10479 *phys_ptr
= phys_addr
;
10482 fi
->domain
= domain
;
10488 * check_s2_mmu_setup
10490 * @is_aa64: True if the translation regime is in AArch64 state
10491 * @startlevel: Suggested starting level
10492 * @inputsize: Bitsize of IPAs
10493 * @stride: Page-table stride (See the ARM ARM)
10495 * Returns true if the suggested S2 translation parameters are OK and
10498 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
10499 int inputsize
, int stride
)
10501 const int grainsize
= stride
+ 3;
10502 int startsizecheck
;
10504 /* Negative levels are never allowed. */
10509 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
10510 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
10515 CPUARMState
*env
= &cpu
->env
;
10516 unsigned int pamax
= arm_pamax(cpu
);
10519 case 13: /* 64KB Pages. */
10520 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
10524 case 11: /* 16KB Pages. */
10525 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
10529 case 9: /* 4KB Pages. */
10530 if (level
== 0 && pamax
<= 42) {
10535 g_assert_not_reached();
10538 /* Inputsize checks. */
10539 if (inputsize
> pamax
&&
10540 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
10541 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10545 /* AArch32 only supports 4KB pages. Assert on that. */
10546 assert(stride
== 9);
10555 /* Translate from the 4-bit stage 2 representation of
10556 * memory attributes (without cache-allocation hints) to
10557 * the 8-bit representation of the stage 1 MAIR registers
10558 * (which includes allocation hints).
10560 * ref: shared/translation/attrs/S2AttrDecode()
10561 * .../S2ConvertAttrsHints()
10563 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
10565 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
10566 uint8_t loattr
= extract32(s2attrs
, 0, 2);
10567 uint8_t hihint
= 0, lohint
= 0;
10569 if (hiattr
!= 0) { /* normal memory */
10570 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
10571 hiattr
= loattr
= 1; /* non-cacheable */
10573 if (hiattr
!= 1) { /* Write-through or write-back */
10574 hihint
= 3; /* RW allocate */
10576 if (loattr
!= 1) { /* Write-through or write-back */
10577 lohint
= 3; /* RW allocate */
10582 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
10584 #endif /* !CONFIG_USER_ONLY */
10586 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
10589 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10590 uint32_t el
= regime_el(env
, mmu_idx
);
10591 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
10595 * Bit 55 is always between the two regions, and is canonical for
10596 * determining if address tagging is enabled.
10598 select
= extract64(va
, 55, 1);
10601 tsz
= extract32(tcr
, 0, 6);
10602 using64k
= extract32(tcr
, 14, 1);
10603 using16k
= extract32(tcr
, 15, 1);
10604 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10606 tbi
= tbid
= hpd
= false;
10608 tbi
= extract32(tcr
, 20, 1);
10609 hpd
= extract32(tcr
, 24, 1);
10610 tbid
= extract32(tcr
, 29, 1);
10613 } else if (!select
) {
10614 tsz
= extract32(tcr
, 0, 6);
10615 epd
= extract32(tcr
, 7, 1);
10616 using64k
= extract32(tcr
, 14, 1);
10617 using16k
= extract32(tcr
, 15, 1);
10618 tbi
= extract64(tcr
, 37, 1);
10619 hpd
= extract64(tcr
, 41, 1);
10620 tbid
= extract64(tcr
, 51, 1);
10622 int tg
= extract32(tcr
, 30, 2);
10623 using16k
= tg
== 1;
10624 using64k
= tg
== 3;
10625 tsz
= extract32(tcr
, 16, 6);
10626 epd
= extract32(tcr
, 23, 1);
10627 tbi
= extract64(tcr
, 38, 1);
10628 hpd
= extract64(tcr
, 42, 1);
10629 tbid
= extract64(tcr
, 52, 1);
10631 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
10632 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
10634 return (ARMVAParameters
) {
10641 .using16k
= using16k
,
10642 .using64k
= using64k
,
10646 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
10647 ARMMMUIdx mmu_idx
, bool data
)
10649 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
10651 /* Present TBI as a composite with TBID. */
10652 ret
.tbi
&= (data
|| !ret
.tbid
);
10656 #ifndef CONFIG_USER_ONLY
10657 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
10660 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
10661 uint32_t el
= regime_el(env
, mmu_idx
);
10665 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10667 bool sext
= extract32(tcr
, 4, 1);
10668 bool sign
= extract32(tcr
, 3, 1);
10671 * If the sign-extend bit is not the same as t0sz[3], the result
10672 * is unpredictable. Flag this as a guest error.
10674 if (sign
!= sext
) {
10675 qemu_log_mask(LOG_GUEST_ERROR
,
10676 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10678 tsz
= sextract32(tcr
, 0, 4) + 8;
10682 } else if (el
== 2) {
10684 tsz
= extract32(tcr
, 0, 3);
10686 hpd
= extract64(tcr
, 24, 1);
10689 int t0sz
= extract32(tcr
, 0, 3);
10690 int t1sz
= extract32(tcr
, 16, 3);
10693 select
= va
> (0xffffffffu
>> t0sz
);
10695 /* Note that we will detect errors later. */
10696 select
= va
>= ~(0xffffffffu
>> t1sz
);
10700 epd
= extract32(tcr
, 7, 1);
10701 hpd
= extract64(tcr
, 41, 1);
10704 epd
= extract32(tcr
, 23, 1);
10705 hpd
= extract64(tcr
, 42, 1);
10707 /* For aarch32, hpd0 is not enabled without t2e as well. */
10708 hpd
&= extract32(tcr
, 6, 1);
10711 return (ARMVAParameters
) {
10719 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
10720 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10721 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
10722 target_ulong
*page_size_ptr
,
10723 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10725 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10726 CPUState
*cs
= CPU(cpu
);
10727 /* Read an LPAE long-descriptor translation table. */
10728 ARMFaultType fault_type
= ARMFault_Translation
;
10730 ARMVAParameters param
;
10732 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
10733 uint32_t tableattrs
;
10734 target_ulong page_size
;
10737 int addrsize
, inputsize
;
10738 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
10739 int ap
, ns
, xn
, pxn
;
10740 uint32_t el
= regime_el(env
, mmu_idx
);
10742 uint64_t descaddrmask
;
10743 bool aarch64
= arm_el_is_aa64(env
, el
);
10744 bool guarded
= false;
10747 * This code does not handle the different format TCR for VTCR_EL2.
10748 * This code also does not support shareability levels.
10749 * Attribute and permission bit handling should also be checked when adding
10750 * support for those page table walks.
10753 param
= aa64_va_parameters(env
, address
, mmu_idx
,
10754 access_type
!= MMU_INST_FETCH
);
10756 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
10759 ttbr1_valid
= (el
< 2);
10760 addrsize
= 64 - 8 * param
.tbi
;
10761 inputsize
= 64 - param
.tsz
;
10763 param
= aa32_va_parameters(env
, address
, mmu_idx
);
10765 /* There is no TTBR1 for EL2 */
10766 ttbr1_valid
= (el
!= 2);
10767 addrsize
= (mmu_idx
== ARMMMUIdx_S2NS
? 40 : 32);
10768 inputsize
= addrsize
- param
.tsz
;
10772 * We determined the region when collecting the parameters, but we
10773 * have not yet validated that the address is valid for the region.
10774 * Extract the top bits and verify that they all match select.
10776 * For aa32, if inputsize == addrsize, then we have selected the
10777 * region by exclusion in aa32_va_parameters and there is no more
10778 * validation to do here.
10780 if (inputsize
< addrsize
) {
10781 target_ulong top_bits
= sextract64(address
, inputsize
,
10782 addrsize
- inputsize
);
10783 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
10784 /* The gap between the two regions is a Translation fault */
10785 fault_type
= ARMFault_Translation
;
10790 if (param
.using64k
) {
10792 } else if (param
.using16k
) {
10798 /* Note that QEMU ignores shareability and cacheability attributes,
10799 * so we don't need to do anything with the SH, ORGN, IRGN fields
10800 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10801 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10802 * implement any ASID-like capability so we can ignore it (instead
10803 * we will always flush the TLB any time the ASID is changed).
10805 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10807 /* Here we should have set up all the parameters for the translation:
10808 * inputsize, ttbr, epd, stride, tbi
10812 /* Translation table walk disabled => Translation fault on TLB miss
10813 * Note: This is always 0 on 64-bit EL2 and EL3.
10818 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
10819 /* The starting level depends on the virtual address size (which can
10820 * be up to 48 bits) and the translation granule size. It indicates
10821 * the number of strides (stride bits at a time) needed to
10822 * consume the bits of the input address. In the pseudocode this is:
10823 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10824 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10825 * our 'stride + 3' and 'stride' is our 'stride'.
10826 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10827 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10828 * = 4 - (inputsize - 4) / stride;
10830 level
= 4 - (inputsize
- 4) / stride
;
10832 /* For stage 2 translations the starting level is specified by the
10833 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10835 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10836 uint32_t startlevel
;
10839 if (!aarch64
|| stride
== 9) {
10840 /* AArch32 or 4KB pages */
10841 startlevel
= 2 - sl0
;
10843 /* 16KB or 64KB pages */
10844 startlevel
= 3 - sl0
;
10847 /* Check that the starting level is valid. */
10848 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10849 inputsize
, stride
);
10851 fault_type
= ARMFault_Translation
;
10854 level
= startlevel
;
10857 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10858 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10860 /* Now we can extract the actual base address from the TTBR */
10861 descaddr
= extract64(ttbr
, 0, 48);
10862 descaddr
&= ~indexmask
;
10864 /* The address field in the descriptor goes up to bit 39 for ARMv7
10865 * but up to bit 47 for ARMv8, but we use the descaddrmask
10866 * up to bit 39 for AArch32, because we don't need other bits in that case
10867 * to construct next descriptor address (anyway they should be all zeroes).
10869 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10870 ~indexmask_grainsize
;
10872 /* Secure accesses start with the page table in secure memory and
10873 * can be downgraded to non-secure at any step. Non-secure accesses
10874 * remain non-secure. We implement this by just ORing in the NSTable/NS
10875 * bits at each step.
10877 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10879 uint64_t descriptor
;
10882 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10884 nstable
= extract32(tableattrs
, 4, 1);
10885 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10886 if (fi
->type
!= ARMFault_None
) {
10890 if (!(descriptor
& 1) ||
10891 (!(descriptor
& 2) && (level
== 3))) {
10892 /* Invalid, or the Reserved level 3 encoding */
10895 descaddr
= descriptor
& descaddrmask
;
10897 if ((descriptor
& 2) && (level
< 3)) {
10898 /* Table entry. The top five bits are attributes which may
10899 * propagate down through lower levels of the table (and
10900 * which are all arranged so that 0 means "no effect", so
10901 * we can gather them up by ORing in the bits at each level).
10903 tableattrs
|= extract64(descriptor
, 59, 5);
10905 indexmask
= indexmask_grainsize
;
10908 /* Block entry at level 1 or 2, or page entry at level 3.
10909 * These are basically the same thing, although the number
10910 * of bits we pull in from the vaddr varies.
10912 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10913 descaddr
|= (address
& (page_size
- 1));
10914 /* Extract attributes from the descriptor */
10915 attrs
= extract64(descriptor
, 2, 10)
10916 | (extract64(descriptor
, 52, 12) << 10);
10918 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10919 /* Stage 2 table descriptors do not include any attribute fields */
10922 /* Merge in attributes from table descriptors */
10923 attrs
|= nstable
<< 3; /* NS */
10924 guarded
= extract64(descriptor
, 50, 1); /* GP */
10926 /* HPD disables all the table attributes except NSTable. */
10929 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10930 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10931 * means "force PL1 access only", which means forcing AP[1] to 0.
10933 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10934 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10937 /* Here descaddr is the final physical address, and attributes
10938 * are all in attrs.
10940 fault_type
= ARMFault_AccessFlag
;
10941 if ((attrs
& (1 << 8)) == 0) {
10946 ap
= extract32(attrs
, 4, 2);
10947 xn
= extract32(attrs
, 12, 1);
10949 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10951 *prot
= get_S2prot(env
, ap
, xn
);
10953 ns
= extract32(attrs
, 3, 1);
10954 pxn
= extract32(attrs
, 11, 1);
10955 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10958 fault_type
= ARMFault_Permission
;
10959 if (!(*prot
& (1 << access_type
))) {
10964 /* The NS bit will (as required by the architecture) have no effect if
10965 * the CPU doesn't support TZ or this is a non-secure translation
10966 * regime, because the attribute will already be non-secure.
10968 txattrs
->secure
= false;
10970 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
10971 if (aarch64
&& guarded
&& cpu_isar_feature(aa64_bti
, cpu
)) {
10972 txattrs
->target_tlb_bit0
= true;
10975 if (cacheattrs
!= NULL
) {
10976 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10977 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10978 extract32(attrs
, 0, 4));
10980 /* Index into MAIR registers for cache attributes */
10981 uint8_t attrindx
= extract32(attrs
, 0, 3);
10982 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10983 assert(attrindx
<= 7);
10984 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10986 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10989 *phys_ptr
= descaddr
;
10990 *page_size_ptr
= page_size
;
10994 fi
->type
= fault_type
;
10996 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10997 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
11001 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
11003 int32_t address
, int *prot
)
11005 if (!arm_feature(env
, ARM_FEATURE_M
)) {
11006 *prot
= PAGE_READ
| PAGE_WRITE
;
11008 case 0xF0000000 ... 0xFFFFFFFF:
11009 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
11010 /* hivecs execing is ok */
11011 *prot
|= PAGE_EXEC
;
11014 case 0x00000000 ... 0x7FFFFFFF:
11015 *prot
|= PAGE_EXEC
;
11019 /* Default system address map for M profile cores.
11020 * The architecture specifies which regions are execute-never;
11021 * at the MPU level no other checks are defined.
11024 case 0x00000000 ... 0x1fffffff: /* ROM */
11025 case 0x20000000 ... 0x3fffffff: /* SRAM */
11026 case 0x60000000 ... 0x7fffffff: /* RAM */
11027 case 0x80000000 ... 0x9fffffff: /* RAM */
11028 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11030 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11031 case 0xa0000000 ... 0xbfffffff: /* Device */
11032 case 0xc0000000 ... 0xdfffffff: /* Device */
11033 case 0xe0000000 ... 0xffffffff: /* System */
11034 *prot
= PAGE_READ
| PAGE_WRITE
;
11037 g_assert_not_reached();
11042 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
11043 ARMMMUIdx mmu_idx
, bool is_user
)
11045 /* Return true if we should use the default memory map as a
11046 * "background" region if there are no hits against any MPU regions.
11048 CPUARMState
*env
= &cpu
->env
;
11054 if (arm_feature(env
, ARM_FEATURE_M
)) {
11055 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
11056 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
11058 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
11062 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
11064 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11065 return arm_feature(env
, ARM_FEATURE_M
) &&
11066 extract32(address
, 20, 12) == 0xe00;
11069 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
11071 /* True if address is in the M profile system region
11072 * 0xe0000000 - 0xffffffff
11074 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
11077 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
11078 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11079 hwaddr
*phys_ptr
, int *prot
,
11080 target_ulong
*page_size
,
11081 ARMMMUFaultInfo
*fi
)
11083 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11085 bool is_user
= regime_is_user(env
, mmu_idx
);
11087 *phys_ptr
= address
;
11088 *page_size
= TARGET_PAGE_SIZE
;
11091 if (regime_translation_disabled(env
, mmu_idx
) ||
11092 m_is_ppb_region(env
, address
)) {
11093 /* MPU disabled or M profile PPB access: use default memory map.
11094 * The other case which uses the default memory map in the
11095 * v7M ARM ARM pseudocode is exception vector reads from the vector
11096 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11097 * which always does a direct read using address_space_ldl(), rather
11098 * than going via this function, so we don't need to check that here.
11100 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11101 } else { /* MPU enabled */
11102 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11103 /* region search */
11104 uint32_t base
= env
->pmsav7
.drbar
[n
];
11105 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
11107 bool srdis
= false;
11109 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
11114 qemu_log_mask(LOG_GUEST_ERROR
,
11115 "DRSR[%d]: Rsize field cannot be 0\n", n
);
11119 rmask
= (1ull << rsize
) - 1;
11121 if (base
& rmask
) {
11122 qemu_log_mask(LOG_GUEST_ERROR
,
11123 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
11124 "to DRSR region size, mask = 0x%" PRIx32
"\n",
11129 if (address
< base
|| address
> base
+ rmask
) {
11131 * Address not in this region. We must check whether the
11132 * region covers addresses in the same page as our address.
11133 * In that case we must not report a size that covers the
11134 * whole page for a subsequent hit against a different MPU
11135 * region or the background region, because it would result in
11136 * incorrect TLB hits for subsequent accesses to addresses that
11137 * are in this MPU region.
11139 if (ranges_overlap(base
, rmask
,
11140 address
& TARGET_PAGE_MASK
,
11141 TARGET_PAGE_SIZE
)) {
11147 /* Region matched */
11149 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
11151 uint32_t srdis_mask
;
11153 rsize
-= 3; /* sub region size (power of 2) */
11154 snd
= ((address
- base
) >> rsize
) & 0x7;
11155 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
11157 srdis_mask
= srdis
? 0x3 : 0x0;
11158 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
11159 /* This will check in groups of 2, 4 and then 8, whether
11160 * the subregion bits are consistent. rsize is incremented
11161 * back up to give the region size, considering consistent
11162 * adjacent subregions as one region. Stop testing if rsize
11163 * is already big enough for an entire QEMU page.
11165 int snd_rounded
= snd
& ~(i
- 1);
11166 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
11167 snd_rounded
+ 8, i
);
11168 if (srdis_mask
^ srdis_multi
) {
11171 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
11178 if (rsize
< TARGET_PAGE_BITS
) {
11179 *page_size
= 1 << rsize
;
11184 if (n
== -1) { /* no hits */
11185 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11186 /* background fault */
11187 fi
->type
= ARMFault_Background
;
11190 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11191 } else { /* a MPU hit! */
11192 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
11193 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
11195 if (m_is_system_region(env
, address
)) {
11196 /* System space is always execute never */
11200 if (is_user
) { /* User mode AP bit decoding */
11205 break; /* no access */
11207 *prot
|= PAGE_WRITE
;
11211 *prot
|= PAGE_READ
| PAGE_EXEC
;
11214 /* for v7M, same as 6; for R profile a reserved value */
11215 if (arm_feature(env
, ARM_FEATURE_M
)) {
11216 *prot
|= PAGE_READ
| PAGE_EXEC
;
11221 qemu_log_mask(LOG_GUEST_ERROR
,
11222 "DRACR[%d]: Bad value for AP bits: 0x%"
11223 PRIx32
"\n", n
, ap
);
11225 } else { /* Priv. mode AP bits decoding */
11228 break; /* no access */
11232 *prot
|= PAGE_WRITE
;
11236 *prot
|= PAGE_READ
| PAGE_EXEC
;
11239 /* for v7M, same as 6; for R profile a reserved value */
11240 if (arm_feature(env
, ARM_FEATURE_M
)) {
11241 *prot
|= PAGE_READ
| PAGE_EXEC
;
11246 qemu_log_mask(LOG_GUEST_ERROR
,
11247 "DRACR[%d]: Bad value for AP bits: 0x%"
11248 PRIx32
"\n", n
, ap
);
11252 /* execute never */
11254 *prot
&= ~PAGE_EXEC
;
11259 fi
->type
= ARMFault_Permission
;
11261 return !(*prot
& (1 << access_type
));
11264 static bool v8m_is_sau_exempt(CPUARMState
*env
,
11265 uint32_t address
, MMUAccessType access_type
)
11267 /* The architecture specifies that certain address ranges are
11268 * exempt from v8M SAU/IDAU checks.
11271 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
11272 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
11273 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
11274 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
11275 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
11276 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
11279 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
11280 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11281 V8M_SAttributes
*sattrs
)
11283 /* Look up the security attributes for this address. Compare the
11284 * pseudocode SecurityCheck() function.
11285 * We assume the caller has zero-initialized *sattrs.
11287 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11289 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
11290 int idau_region
= IREGION_NOTVALID
;
11291 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11292 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11295 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
11296 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
11298 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
11302 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
11303 /* 0xf0000000..0xffffffff is always S for insn fetches */
11307 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
11308 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
11312 if (idau_region
!= IREGION_NOTVALID
) {
11313 sattrs
->irvalid
= true;
11314 sattrs
->iregion
= idau_region
;
11317 switch (env
->sau
.ctrl
& 3) {
11318 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11320 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11323 default: /* SAU.ENABLE == 1 */
11324 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
11325 if (env
->sau
.rlar
[r
] & 1) {
11326 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
11327 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
11329 if (base
<= address
&& limit
>= address
) {
11330 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11331 sattrs
->subpage
= true;
11333 if (sattrs
->srvalid
) {
11334 /* If we hit in more than one region then we must report
11335 * as Secure, not NS-Callable, with no valid region
11338 sattrs
->ns
= false;
11339 sattrs
->nsc
= false;
11340 sattrs
->sregion
= 0;
11341 sattrs
->srvalid
= false;
11344 if (env
->sau
.rlar
[r
] & 2) {
11345 sattrs
->nsc
= true;
11349 sattrs
->srvalid
= true;
11350 sattrs
->sregion
= r
;
11354 * Address not in this region. We must check whether the
11355 * region covers addresses in the same page as our address.
11356 * In that case we must not report a size that covers the
11357 * whole page for a subsequent hit against a different MPU
11358 * region or the background region, because it would result
11359 * in incorrect TLB hits for subsequent accesses to
11360 * addresses that are in this MPU region.
11362 if (limit
>= base
&&
11363 ranges_overlap(base
, limit
- base
+ 1,
11365 TARGET_PAGE_SIZE
)) {
11366 sattrs
->subpage
= true;
11375 * The IDAU will override the SAU lookup results if it specifies
11376 * higher security than the SAU does.
11379 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
11380 sattrs
->ns
= false;
11381 sattrs
->nsc
= idau_nsc
;
11386 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
11387 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11388 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11389 int *prot
, bool *is_subpage
,
11390 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
11392 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11393 * that a full phys-to-virt translation does).
11394 * mregion is (if not NULL) set to the region number which matched,
11395 * or -1 if no region number is returned (MPU off, address did not
11396 * hit a region, address hit in multiple regions).
11397 * We set is_subpage to true if the region hit doesn't cover the
11398 * entire TARGET_PAGE the address is within.
11400 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11401 bool is_user
= regime_is_user(env
, mmu_idx
);
11402 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11404 int matchregion
= -1;
11406 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
11407 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
11409 *is_subpage
= false;
11410 *phys_ptr
= address
;
11416 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11417 * was an exception vector read from the vector table (which is always
11418 * done using the default system address map), because those accesses
11419 * are done in arm_v7m_load_vector(), which always does a direct
11420 * read using address_space_ldl(), rather than going via this function.
11422 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
11424 } else if (m_is_ppb_region(env
, address
)) {
11427 if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
11431 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
11432 /* region search */
11433 /* Note that the base address is bits [31:5] from the register
11434 * with bits [4:0] all zeroes, but the limit address is bits
11435 * [31:5] from the register with bits [4:0] all ones.
11437 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
11438 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
11440 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
11441 /* Region disabled */
11445 if (address
< base
|| address
> limit
) {
11447 * Address not in this region. We must check whether the
11448 * region covers addresses in the same page as our address.
11449 * In that case we must not report a size that covers the
11450 * whole page for a subsequent hit against a different MPU
11451 * region or the background region, because it would result in
11452 * incorrect TLB hits for subsequent accesses to addresses that
11453 * are in this MPU region.
11455 if (limit
>= base
&&
11456 ranges_overlap(base
, limit
- base
+ 1,
11458 TARGET_PAGE_SIZE
)) {
11459 *is_subpage
= true;
11464 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
11465 *is_subpage
= true;
11468 if (matchregion
!= -1) {
11469 /* Multiple regions match -- always a failure (unlike
11470 * PMSAv7 where highest-numbered-region wins)
11472 fi
->type
= ARMFault_Permission
;
11483 /* background fault */
11484 fi
->type
= ARMFault_Background
;
11488 if (matchregion
== -1) {
11489 /* hit using the background region */
11490 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
11492 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
11493 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
11495 if (m_is_system_region(env
, address
)) {
11496 /* System space is always execute never */
11500 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
11501 if (*prot
&& !xn
) {
11502 *prot
|= PAGE_EXEC
;
11504 /* We don't need to look the attribute up in the MAIR0/MAIR1
11505 * registers because that only tells us about cacheability.
11508 *mregion
= matchregion
;
11512 fi
->type
= ARMFault_Permission
;
11514 return !(*prot
& (1 << access_type
));
11518 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
11519 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11520 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
11521 int *prot
, target_ulong
*page_size
,
11522 ARMMMUFaultInfo
*fi
)
11524 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
11525 V8M_SAttributes sattrs
= {};
11527 bool mpu_is_subpage
;
11529 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11530 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
11531 if (access_type
== MMU_INST_FETCH
) {
11532 /* Instruction fetches always use the MMU bank and the
11533 * transaction attribute determined by the fetch address,
11534 * regardless of CPU state. This is painful for QEMU
11535 * to handle, because it would mean we need to encode
11536 * into the mmu_idx not just the (user, negpri) information
11537 * for the current security state but also that for the
11538 * other security state, which would balloon the number
11539 * of mmu_idx values needed alarmingly.
11540 * Fortunately we can avoid this because it's not actually
11541 * possible to arbitrarily execute code from memory with
11542 * the wrong security attribute: it will always generate
11543 * an exception of some kind or another, apart from the
11544 * special case of an NS CPU executing an SG instruction
11545 * in S&NSC memory. So we always just fail the translation
11546 * here and sort things out in the exception handler
11547 * (including possibly emulating an SG instruction).
11549 if (sattrs
.ns
!= !secure
) {
11551 fi
->type
= ARMFault_QEMU_NSCExec
;
11553 fi
->type
= ARMFault_QEMU_SFault
;
11555 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11556 *phys_ptr
= address
;
11561 /* For data accesses we always use the MMU bank indicated
11562 * by the current CPU state, but the security attributes
11563 * might downgrade a secure access to nonsecure.
11566 txattrs
->secure
= false;
11567 } else if (!secure
) {
11568 /* NS access to S memory must fault.
11569 * Architecturally we should first check whether the
11570 * MPU information for this address indicates that we
11571 * are doing an unaligned access to Device memory, which
11572 * should generate a UsageFault instead. QEMU does not
11573 * currently check for that kind of unaligned access though.
11574 * If we added it we would need to do so as a special case
11575 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11577 fi
->type
= ARMFault_QEMU_SFault
;
11578 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
11579 *phys_ptr
= address
;
11586 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
11587 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
11588 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
11592 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
11593 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11594 hwaddr
*phys_ptr
, int *prot
,
11595 ARMMMUFaultInfo
*fi
)
11600 bool is_user
= regime_is_user(env
, mmu_idx
);
11602 if (regime_translation_disabled(env
, mmu_idx
)) {
11603 /* MPU disabled. */
11604 *phys_ptr
= address
;
11605 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11609 *phys_ptr
= address
;
11610 for (n
= 7; n
>= 0; n
--) {
11611 base
= env
->cp15
.c6_region
[n
];
11612 if ((base
& 1) == 0) {
11615 mask
= 1 << ((base
>> 1) & 0x1f);
11616 /* Keep this shift separate from the above to avoid an
11617 (undefined) << 32. */
11618 mask
= (mask
<< 1) - 1;
11619 if (((base
^ address
) & ~mask
) == 0) {
11624 fi
->type
= ARMFault_Background
;
11628 if (access_type
== MMU_INST_FETCH
) {
11629 mask
= env
->cp15
.pmsav5_insn_ap
;
11631 mask
= env
->cp15
.pmsav5_data_ap
;
11633 mask
= (mask
>> (n
* 4)) & 0xf;
11636 fi
->type
= ARMFault_Permission
;
11641 fi
->type
= ARMFault_Permission
;
11645 *prot
= PAGE_READ
| PAGE_WRITE
;
11650 *prot
|= PAGE_WRITE
;
11654 *prot
= PAGE_READ
| PAGE_WRITE
;
11658 fi
->type
= ARMFault_Permission
;
11668 /* Bad permission. */
11669 fi
->type
= ARMFault_Permission
;
11673 *prot
|= PAGE_EXEC
;
11677 /* Combine either inner or outer cacheability attributes for normal
11678 * memory, according to table D4-42 and pseudocode procedure
11679 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11681 * NB: only stage 1 includes allocation hints (RW bits), leading to
11684 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
11686 if (s1
== 4 || s2
== 4) {
11687 /* non-cacheable has precedence */
11689 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
11690 /* stage 1 write-through takes precedence */
11692 } else if (extract32(s2
, 2, 2) == 2) {
11693 /* stage 2 write-through takes precedence, but the allocation hint
11694 * is still taken from stage 1
11696 return (2 << 2) | extract32(s1
, 0, 2);
11697 } else { /* write-back */
11702 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11703 * and CombineS1S2Desc()
11705 * @s1: Attributes from stage 1 walk
11706 * @s2: Attributes from stage 2 walk
11708 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
11710 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
11711 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
11714 /* Combine shareability attributes (table D4-43) */
11715 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
11716 /* if either are outer-shareable, the result is outer-shareable */
11717 ret
.shareability
= 2;
11718 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
11719 /* if either are inner-shareable, the result is inner-shareable */
11720 ret
.shareability
= 3;
11722 /* both non-shareable */
11723 ret
.shareability
= 0;
11726 /* Combine memory type and cacheability attributes */
11727 if (s1hi
== 0 || s2hi
== 0) {
11728 /* Device has precedence over normal */
11729 if (s1lo
== 0 || s2lo
== 0) {
11730 /* nGnRnE has precedence over anything */
11732 } else if (s1lo
== 4 || s2lo
== 4) {
11733 /* non-Reordering has precedence over Reordering */
11734 ret
.attrs
= 4; /* nGnRE */
11735 } else if (s1lo
== 8 || s2lo
== 8) {
11736 /* non-Gathering has precedence over Gathering */
11737 ret
.attrs
= 8; /* nGRE */
11739 ret
.attrs
= 0xc; /* GRE */
11742 /* Any location for which the resultant memory type is any
11743 * type of Device memory is always treated as Outer Shareable.
11745 ret
.shareability
= 2;
11746 } else { /* Normal memory */
11747 /* Outer/inner cacheability combine independently */
11748 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
11749 | combine_cacheattr_nibble(s1lo
, s2lo
);
11751 if (ret
.attrs
== 0x44) {
11752 /* Any location for which the resultant memory type is Normal
11753 * Inner Non-cacheable, Outer Non-cacheable is always treated
11754 * as Outer Shareable.
11756 ret
.shareability
= 2;
11764 /* get_phys_addr - get the physical address for this virtual address
11766 * Find the physical address corresponding to the given virtual address,
11767 * by doing a translation table walk on MMU based systems or using the
11768 * MPU state on MPU based systems.
11770 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11771 * prot and page_size may not be filled in, and the populated fsr value provides
11772 * information on why the translation aborted, in the format of a
11773 * DFSR/IFSR fault register, with the following caveats:
11774 * * we honour the short vs long DFSR format differences.
11775 * * the WnR bit is never set (the caller must do this).
11776 * * for PSMAv5 based systems we don't bother to return a full FSR format
11779 * @env: CPUARMState
11780 * @address: virtual address to get physical address for
11781 * @access_type: 0 for read, 1 for write, 2 for execute
11782 * @mmu_idx: MMU index indicating required translation regime
11783 * @phys_ptr: set to the physical address corresponding to the virtual address
11784 * @attrs: set to the memory transaction attributes to use
11785 * @prot: set to the permissions for the page containing phys_ptr
11786 * @page_size: set to the size of the page containing phys_ptr
11787 * @fi: set to fault info if the translation fails
11788 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11790 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
11791 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
11792 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
11793 target_ulong
*page_size
,
11794 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
11796 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
11797 /* Call ourselves recursively to do the stage 1 and then stage 2
11800 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11804 ARMCacheAttrs cacheattrs2
= {};
11806 ret
= get_phys_addr(env
, address
, access_type
,
11807 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11808 prot
, page_size
, fi
, cacheattrs
);
11810 /* If S1 fails or S2 is disabled, return early. */
11811 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
11816 /* S1 is done. Now do S2 translation. */
11817 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
11818 phys_ptr
, attrs
, &s2_prot
,
11820 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11822 /* Combine the S1 and S2 perms. */
11825 /* Combine the S1 and S2 cache attributes, if needed */
11826 if (!ret
&& cacheattrs
!= NULL
) {
11827 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11829 * HCR.DC forces the first stage attributes to
11830 * Normal Non-Shareable,
11831 * Inner Write-Back Read-Allocate Write-Allocate,
11832 * Outer Write-Back Read-Allocate Write-Allocate.
11834 cacheattrs
->attrs
= 0xff;
11835 cacheattrs
->shareability
= 0;
11837 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11843 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11845 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11849 /* The page table entries may downgrade secure to non-secure, but
11850 * cannot upgrade an non-secure translation regime's attributes
11853 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11854 attrs
->user
= regime_is_user(env
, mmu_idx
);
11856 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11857 * In v7 and earlier it affects all stage 1 translations.
11859 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
11860 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11861 if (regime_el(env
, mmu_idx
) == 3) {
11862 address
+= env
->cp15
.fcseidr_s
;
11864 address
+= env
->cp15
.fcseidr_ns
;
11868 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11870 *page_size
= TARGET_PAGE_SIZE
;
11872 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11874 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11875 phys_ptr
, attrs
, prot
, page_size
, fi
);
11876 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11878 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11879 phys_ptr
, prot
, page_size
, fi
);
11882 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11883 phys_ptr
, prot
, fi
);
11885 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11886 " mmu_idx %u -> %s (prot %c%c%c)\n",
11887 access_type
== MMU_DATA_LOAD
? "reading" :
11888 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11889 (uint32_t)address
, mmu_idx
,
11890 ret
? "Miss" : "Hit",
11891 *prot
& PAGE_READ
? 'r' : '-',
11892 *prot
& PAGE_WRITE
? 'w' : '-',
11893 *prot
& PAGE_EXEC
? 'x' : '-');
11898 /* Definitely a real MMU, not an MPU */
11900 if (regime_translation_disabled(env
, mmu_idx
)) {
11901 /* MMU disabled. */
11902 *phys_ptr
= address
;
11903 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11904 *page_size
= TARGET_PAGE_SIZE
;
11908 if (regime_using_lpae_format(env
, mmu_idx
)) {
11909 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11910 phys_ptr
, attrs
, prot
, page_size
,
11912 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11913 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11914 phys_ptr
, attrs
, prot
, page_size
, fi
);
11916 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11917 phys_ptr
, prot
, page_size
, fi
);
11921 /* Walk the page table and (if the mapping exists) add the page
11922 * to the TLB. Return false on success, or true on failure. Populate
11923 * fsr with ARM DFSR/IFSR fault register format value on failure.
11925 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
11926 MMUAccessType access_type
, int mmu_idx
,
11927 ARMMMUFaultInfo
*fi
)
11929 ARMCPU
*cpu
= ARM_CPU(cs
);
11930 CPUARMState
*env
= &cpu
->env
;
11932 target_ulong page_size
;
11935 MemTxAttrs attrs
= {};
11937 ret
= get_phys_addr(env
, address
, access_type
,
11938 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
11939 &attrs
, &prot
, &page_size
, fi
, NULL
);
11942 * Map a single [sub]page. Regions smaller than our declared
11943 * target page size are handled specially, so for those we
11944 * pass in the exact addresses.
11946 if (page_size
>= TARGET_PAGE_SIZE
) {
11947 phys_addr
&= TARGET_PAGE_MASK
;
11948 address
&= TARGET_PAGE_MASK
;
11950 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
11951 prot
, mmu_idx
, page_size
);
11958 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11961 ARMCPU
*cpu
= ARM_CPU(cs
);
11962 CPUARMState
*env
= &cpu
->env
;
11964 target_ulong page_size
;
11967 ARMMMUFaultInfo fi
= {};
11968 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11970 *attrs
= (MemTxAttrs
) {};
11972 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11973 attrs
, &prot
, &page_size
, &fi
, NULL
);
11981 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
11984 unsigned el
= arm_current_el(env
);
11986 /* First handle registers which unprivileged can read */
11989 case 0 ... 7: /* xPSR sub-fields */
11991 if ((reg
& 1) && el
) {
11992 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
11995 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
11997 /* EPSR reads as zero */
11998 return xpsr_read(env
) & mask
;
12000 case 20: /* CONTROL */
12001 return env
->v7m
.control
[env
->v7m
.secure
];
12002 case 0x94: /* CONTROL_NS */
12003 /* We have to handle this here because unprivileged Secure code
12004 * can read the NS CONTROL register.
12006 if (!env
->v7m
.secure
) {
12009 return env
->v7m
.control
[M_REG_NS
];
12013 return 0; /* unprivileged reads others as zero */
12016 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12018 case 0x88: /* MSP_NS */
12019 if (!env
->v7m
.secure
) {
12022 return env
->v7m
.other_ss_msp
;
12023 case 0x89: /* PSP_NS */
12024 if (!env
->v7m
.secure
) {
12027 return env
->v7m
.other_ss_psp
;
12028 case 0x8a: /* MSPLIM_NS */
12029 if (!env
->v7m
.secure
) {
12032 return env
->v7m
.msplim
[M_REG_NS
];
12033 case 0x8b: /* PSPLIM_NS */
12034 if (!env
->v7m
.secure
) {
12037 return env
->v7m
.psplim
[M_REG_NS
];
12038 case 0x90: /* PRIMASK_NS */
12039 if (!env
->v7m
.secure
) {
12042 return env
->v7m
.primask
[M_REG_NS
];
12043 case 0x91: /* BASEPRI_NS */
12044 if (!env
->v7m
.secure
) {
12047 return env
->v7m
.basepri
[M_REG_NS
];
12048 case 0x93: /* FAULTMASK_NS */
12049 if (!env
->v7m
.secure
) {
12052 return env
->v7m
.faultmask
[M_REG_NS
];
12053 case 0x98: /* SP_NS */
12055 /* This gives the non-secure SP selected based on whether we're
12056 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12058 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12060 if (!env
->v7m
.secure
) {
12063 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
12064 return env
->v7m
.other_ss_psp
;
12066 return env
->v7m
.other_ss_msp
;
12076 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
12078 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
12079 case 10: /* MSPLIM */
12080 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12083 return env
->v7m
.msplim
[env
->v7m
.secure
];
12084 case 11: /* PSPLIM */
12085 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12088 return env
->v7m
.psplim
[env
->v7m
.secure
];
12089 case 16: /* PRIMASK */
12090 return env
->v7m
.primask
[env
->v7m
.secure
];
12091 case 17: /* BASEPRI */
12092 case 18: /* BASEPRI_MAX */
12093 return env
->v7m
.basepri
[env
->v7m
.secure
];
12094 case 19: /* FAULTMASK */
12095 return env
->v7m
.faultmask
[env
->v7m
.secure
];
12098 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
12099 " register %d\n", reg
);
12104 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
12106 /* We're passed bits [11..0] of the instruction; extract
12107 * SYSm and the mask bits.
12108 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
12109 * we choose to treat them as if the mask bits were valid.
12110 * NB that the pseudocode 'mask' variable is bits [11..10],
12111 * whereas ours is [11..8].
12113 uint32_t mask
= extract32(maskreg
, 8, 4);
12114 uint32_t reg
= extract32(maskreg
, 0, 8);
12116 if (arm_current_el(env
) == 0 && reg
> 7) {
12117 /* only xPSR sub-fields may be written by unprivileged */
12121 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
12123 case 0x88: /* MSP_NS */
12124 if (!env
->v7m
.secure
) {
12127 env
->v7m
.other_ss_msp
= val
;
12129 case 0x89: /* PSP_NS */
12130 if (!env
->v7m
.secure
) {
12133 env
->v7m
.other_ss_psp
= val
;
12135 case 0x8a: /* MSPLIM_NS */
12136 if (!env
->v7m
.secure
) {
12139 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
12141 case 0x8b: /* PSPLIM_NS */
12142 if (!env
->v7m
.secure
) {
12145 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
12147 case 0x90: /* PRIMASK_NS */
12148 if (!env
->v7m
.secure
) {
12151 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
12153 case 0x91: /* BASEPRI_NS */
12154 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12157 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
12159 case 0x93: /* FAULTMASK_NS */
12160 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12163 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
12165 case 0x94: /* CONTROL_NS */
12166 if (!env
->v7m
.secure
) {
12169 write_v7m_control_spsel_for_secstate(env
,
12170 val
& R_V7M_CONTROL_SPSEL_MASK
,
12172 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12173 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12174 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12177 case 0x98: /* SP_NS */
12179 /* This gives the non-secure SP selected based on whether we're
12180 * currently in handler mode or not, using the NS CONTROL.SPSEL.
12182 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
12183 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
12186 if (!env
->v7m
.secure
) {
12190 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
12193 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
12195 cpu_restore_state(cs
, GETPC(), true);
12196 raise_exception(env
, EXCP_STKOF
, 0, 1);
12200 env
->v7m
.other_ss_psp
= val
;
12202 env
->v7m
.other_ss_msp
= val
;
12212 case 0 ... 7: /* xPSR sub-fields */
12213 /* only APSR is actually writable */
12215 uint32_t apsrmask
= 0;
12218 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
12220 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
12221 apsrmask
|= XPSR_GE
;
12223 xpsr_write(env
, val
, apsrmask
);
12227 if (v7m_using_psp(env
)) {
12228 env
->v7m
.other_sp
= val
;
12230 env
->regs
[13] = val
;
12234 if (v7m_using_psp(env
)) {
12235 env
->regs
[13] = val
;
12237 env
->v7m
.other_sp
= val
;
12240 case 10: /* MSPLIM */
12241 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12244 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
12246 case 11: /* PSPLIM */
12247 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
12250 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
12252 case 16: /* PRIMASK */
12253 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
12255 case 17: /* BASEPRI */
12256 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12259 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
12261 case 18: /* BASEPRI_MAX */
12262 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12266 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
12267 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
12268 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
12271 case 19: /* FAULTMASK */
12272 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12275 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
12277 case 20: /* CONTROL */
12278 /* Writing to the SPSEL bit only has an effect if we are in
12279 * thread mode; other bits can be updated by any privileged code.
12280 * write_v7m_control_spsel() deals with updating the SPSEL bit in
12281 * env->v7m.control, so we only need update the others.
12282 * For v7M, we must just ignore explicit writes to SPSEL in handler
12283 * mode; for v8M the write is permitted but will have no effect.
12285 if (arm_feature(env
, ARM_FEATURE_V8
) ||
12286 !arm_v7m_is_handler_mode(env
)) {
12287 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
12289 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
12290 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
12291 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
12296 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
12297 " register %d\n", reg
);
12302 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
12304 /* Implement the TT instruction. op is bits [7:6] of the insn. */
12305 bool forceunpriv
= op
& 1;
12307 V8M_SAttributes sattrs
= {};
12309 bool r
, rw
, nsr
, nsrw
, mrvalid
;
12311 ARMMMUFaultInfo fi
= {};
12312 MemTxAttrs attrs
= {};
12317 bool targetsec
= env
->v7m
.secure
;
12320 /* Work out what the security state and privilege level we're
12321 * interested in is...
12324 targetsec
= !targetsec
;
12328 targetpriv
= false;
12330 targetpriv
= arm_v7m_is_handler_mode(env
) ||
12331 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
12334 /* ...and then figure out which MMU index this is */
12335 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
12337 /* We know that the MPU and SAU don't care about the access type
12338 * for our purposes beyond that we don't want to claim to be
12339 * an insn fetch, so we arbitrarily call this a read.
12342 /* MPU region info only available for privileged or if
12343 * inspecting the other MPU state.
12345 if (arm_current_el(env
) != 0 || alt
) {
12346 /* We can ignore the return value as prot is always set */
12347 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
12348 &phys_addr
, &attrs
, &prot
, &is_subpage
,
12350 if (mregion
== -1) {
12356 r
= prot
& PAGE_READ
;
12357 rw
= prot
& PAGE_WRITE
;
12365 if (env
->v7m
.secure
) {
12366 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
12367 nsr
= sattrs
.ns
&& r
;
12368 nsrw
= sattrs
.ns
&& rw
;
12375 tt_resp
= (sattrs
.iregion
<< 24) |
12376 (sattrs
.irvalid
<< 23) |
12377 ((!sattrs
.ns
) << 22) |
12382 (sattrs
.srvalid
<< 17) |
12384 (sattrs
.sregion
<< 8) |
12392 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
12394 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
12395 * Note that we do not implement the (architecturally mandated)
12396 * alignment fault for attempts to use this on Device memory
12397 * (which matches the usual QEMU behaviour of not implementing either
12398 * alignment faults or any memory attribute handling).
12401 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12402 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
12403 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
12405 #ifndef CONFIG_USER_ONLY
12407 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
12408 * the block size so we might have to do more than one TLB lookup.
12409 * We know that in fact for any v8 CPU the page size is at least 4K
12410 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
12411 * 1K as an artefact of legacy v5 subpage support being present in the
12412 * same QEMU executable.
12414 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
12415 void *hostaddr
[maxidx
];
12417 unsigned mmu_idx
= cpu_mmu_index(env
, false);
12418 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
12420 for (try = 0; try < 2; try++) {
12422 for (i
= 0; i
< maxidx
; i
++) {
12423 hostaddr
[i
] = tlb_vaddr_to_host(env
,
12424 vaddr
+ TARGET_PAGE_SIZE
* i
,
12426 if (!hostaddr
[i
]) {
12431 /* If it's all in the TLB it's fair game for just writing to;
12432 * we know we don't need to update dirty status, etc.
12434 for (i
= 0; i
< maxidx
- 1; i
++) {
12435 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
12437 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
12440 /* OK, try a store and see if we can populate the tlb. This
12441 * might cause an exception if the memory isn't writable,
12442 * in which case we will longjmp out of here. We must for
12443 * this purpose use the actual register value passed to us
12444 * so that we get the fault address right.
12446 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
12447 /* Now we can populate the other TLB entries, if any */
12448 for (i
= 0; i
< maxidx
; i
++) {
12449 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
12450 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
12451 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
12456 /* Slow path (probably attempt to do this to an I/O device or
12457 * similar, or clearing of a block of code we have translations
12458 * cached for). Just do a series of byte writes as the architecture
12459 * demands. It's not worth trying to use a cpu_physical_memory_map(),
12460 * memset(), unmap() sequence here because:
12461 * + we'd need to account for the blocksize being larger than a page
12462 * + the direct-RAM access case is almost always going to be dealt
12463 * with in the fastpath code above, so there's no speed benefit
12464 * + we would have to deal with the map returning NULL because the
12465 * bounce buffer was in use
12467 for (i
= 0; i
< blocklen
; i
++) {
12468 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
12472 memset(g2h(vaddr
), 0, blocklen
);
12476 /* Note that signed overflow is undefined in C. The following routines are
12477 careful to use unsigned types where modulo arithmetic is required.
12478 Failure to do so _will_ break on newer gcc. */
12480 /* Signed saturating arithmetic. */
12482 /* Perform 16-bit signed saturating addition. */
12483 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
12488 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
12497 /* Perform 8-bit signed saturating addition. */
12498 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
12503 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
12512 /* Perform 16-bit signed saturating subtraction. */
12513 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
12518 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
12527 /* Perform 8-bit signed saturating subtraction. */
12528 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
12533 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
12542 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12543 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12544 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12545 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12548 #include "op_addsub.h"
12550 /* Unsigned saturating arithmetic. */
12551 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
12560 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
12568 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
12577 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
12585 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12586 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12587 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12588 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12591 #include "op_addsub.h"
12593 /* Signed modulo arithmetic. */
12594 #define SARITH16(a, b, n, op) do { \
12596 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12597 RESULT(sum, n, 16); \
12599 ge |= 3 << (n * 2); \
12602 #define SARITH8(a, b, n, op) do { \
12604 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12605 RESULT(sum, n, 8); \
12611 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12612 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12613 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12614 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12618 #include "op_addsub.h"
12620 /* Unsigned modulo arithmetic. */
12621 #define ADD16(a, b, n) do { \
12623 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12624 RESULT(sum, n, 16); \
12625 if ((sum >> 16) == 1) \
12626 ge |= 3 << (n * 2); \
12629 #define ADD8(a, b, n) do { \
12631 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12632 RESULT(sum, n, 8); \
12633 if ((sum >> 8) == 1) \
12637 #define SUB16(a, b, n) do { \
12639 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12640 RESULT(sum, n, 16); \
12641 if ((sum >> 16) == 0) \
12642 ge |= 3 << (n * 2); \
12645 #define SUB8(a, b, n) do { \
12647 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12648 RESULT(sum, n, 8); \
12649 if ((sum >> 8) == 0) \
12656 #include "op_addsub.h"
12658 /* Halved signed arithmetic. */
12659 #define ADD16(a, b, n) \
12660 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12661 #define SUB16(a, b, n) \
12662 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12663 #define ADD8(a, b, n) \
12664 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12665 #define SUB8(a, b, n) \
12666 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12669 #include "op_addsub.h"
12671 /* Halved unsigned arithmetic. */
12672 #define ADD16(a, b, n) \
12673 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12674 #define SUB16(a, b, n) \
12675 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12676 #define ADD8(a, b, n) \
12677 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12678 #define SUB8(a, b, n) \
12679 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12682 #include "op_addsub.h"
12684 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
12692 /* Unsigned sum of absolute byte differences. */
12693 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
12696 sum
= do_usad(a
, b
);
12697 sum
+= do_usad(a
>> 8, b
>> 8);
12698 sum
+= do_usad(a
>> 16, b
>>16);
12699 sum
+= do_usad(a
>> 24, b
>> 24);
12703 /* For ARMv6 SEL instruction. */
12704 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
12716 mask
|= 0xff000000;
12717 return (a
& mask
) | (b
& ~mask
);
12721 * The upper bytes of val (above the number specified by 'bytes') must have
12722 * been zeroed out by the caller.
12724 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12728 stl_le_p(buf
, val
);
12730 /* zlib crc32 converts the accumulator and output to one's complement. */
12731 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12734 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12738 stl_le_p(buf
, val
);
12740 /* Linux crc32c converts the output to one's complement. */
12741 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12744 /* Return the exception level to which FP-disabled exceptions should
12745 * be taken, or 0 if FP is enabled.
12747 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12749 #ifndef CONFIG_USER_ONLY
12752 /* CPACR and the CPTR registers don't exist before v6, so FP is
12753 * always accessible
12755 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12759 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12760 * 0, 2 : trap EL0 and EL1/PL1 accesses
12761 * 1 : trap only EL0 accesses
12762 * 3 : trap no accesses
12764 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12768 if (cur_el
== 0 || cur_el
== 1) {
12769 /* Trap to PL1, which might be EL1 or EL3 */
12770 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12775 if (cur_el
== 3 && !is_a64(env
)) {
12776 /* Secure PL1 running at EL3 */
12789 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12790 * check because zero bits in the registers mean "don't trap".
12793 /* CPTR_EL2 : present in v7VE or v8 */
12794 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12795 && !arm_is_secure_below_el3(env
)) {
12796 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12800 /* CPTR_EL3 : present in v8 */
12801 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12802 /* Trap all FP ops to EL3 */
12809 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
12810 bool secstate
, bool priv
)
12812 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
12815 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
12818 if (armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
)) {
12819 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
12823 mmu_idx
|= ARM_MMU_IDX_M_S
;
12829 /* Return the MMU index for a v7M CPU in the specified security state */
12830 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
12832 bool priv
= arm_current_el(env
) != 0;
12834 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
12837 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
12841 if (arm_feature(env
, ARM_FEATURE_M
)) {
12842 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
12845 el
= arm_current_el(env
);
12846 if (el
< 2 && arm_is_secure_below_el3(env
)) {
12847 return ARMMMUIdx_S1SE0
+ el
;
12849 return ARMMMUIdx_S12NSE0
+ el
;
12853 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
12855 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
12858 #ifndef CONFIG_USER_ONLY
12859 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
12861 return stage_1_mmu_idx(arm_mmu_idx(env
));
12865 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12866 target_ulong
*cs_base
, uint32_t *pflags
)
12868 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
12869 int current_el
= arm_current_el(env
);
12870 int fp_el
= fp_exception_el(env
, current_el
);
12871 uint32_t flags
= 0;
12874 ARMCPU
*cpu
= arm_env_get_cpu(env
);
12878 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
12880 /* Get control bits for tagged addresses. */
12882 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
12883 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
12886 /* FIXME: ARMv8.1-VHE S2 translation regime. */
12887 if (regime_el(env
, stage1
) < 2) {
12888 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
12889 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
12890 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
12893 tbii
= tbid
& !p0
.tbid
;
12896 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
12897 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBID
, tbid
);
12900 if (cpu_isar_feature(aa64_sve
, cpu
)) {
12901 int sve_el
= sve_exception_el(env
, current_el
);
12904 /* If SVE is disabled, but FP is enabled,
12905 * then the effective len is 0.
12907 if (sve_el
!= 0 && fp_el
== 0) {
12910 zcr_len
= sve_zcr_len_for_el(env
, current_el
);
12912 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
12913 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
12916 sctlr
= arm_sctlr(env
, current_el
);
12918 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
12920 * In order to save space in flags, we record only whether
12921 * pauth is "inactive", meaning all insns are implemented as
12922 * a nop, or "active" when some action must be performed.
12923 * The decision of which action to take is left to a helper.
12925 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
12926 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
12930 if (cpu_isar_feature(aa64_bti
, cpu
)) {
12931 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
12932 if (sctlr
& (current_el
== 0 ? SCTLR_BT0
: SCTLR_BT1
)) {
12933 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BT
, 1);
12935 flags
= FIELD_DP32(flags
, TBFLAG_A64
, BTYPE
, env
->btype
);
12938 *pc
= env
->regs
[15];
12939 flags
= FIELD_DP32(flags
, TBFLAG_A32
, THUMB
, env
->thumb
);
12940 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
, env
->vfp
.vec_len
);
12941 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
, env
->vfp
.vec_stride
);
12942 flags
= FIELD_DP32(flags
, TBFLAG_A32
, CONDEXEC
, env
->condexec_bits
);
12943 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, arm_sctlr_b(env
));
12944 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
12945 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
12946 || arm_el_is_aa64(env
, 1)) {
12947 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
12949 flags
= FIELD_DP32(flags
, TBFLAG_A32
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
12952 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
12954 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12955 * states defined in the ARM ARM for software singlestep:
12956 * SS_ACTIVE PSTATE.SS State
12957 * 0 x Inactive (the TB flag for SS is always 0)
12958 * 1 0 Active-pending
12959 * 1 1 Active-not-pending
12961 if (arm_singlestep_active(env
)) {
12962 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
12964 if (env
->pstate
& PSTATE_SS
) {
12965 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
12968 if (env
->uncached_cpsr
& PSTATE_SS
) {
12969 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
12973 if (arm_cpu_data_is_big_endian(env
)) {
12974 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
12976 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
12978 if (arm_v7m_is_handler_mode(env
)) {
12979 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HANDLER
, 1);
12982 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
12983 * suppressing them because the requested execution priority is less than 0.
12985 if (arm_feature(env
, ARM_FEATURE_V8
) &&
12986 arm_feature(env
, ARM_FEATURE_M
) &&
12987 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
12988 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
12989 flags
= FIELD_DP32(flags
, TBFLAG_A32
, STACKCHECK
, 1);
12996 #ifdef TARGET_AARCH64
12998 * The manual says that when SVE is enabled and VQ is widened the
12999 * implementation is allowed to zero the previously inaccessible
13000 * portion of the registers. The corollary to that is that when
13001 * SVE is enabled and VQ is narrowed we are also allowed to zero
13002 * the now inaccessible portion of the registers.
13004 * The intent of this is that no predicate bit beyond VQ is ever set.
13005 * Which means that some operations on predicate registers themselves
13006 * may operate on full uint64_t or even unrolled across the maximum
13007 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13008 * may well be cheaper than conditionals to restrict the operation
13009 * to the relevant portion of a uint16_t[16].
13011 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13016 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13017 assert(vq
<= arm_env_get_cpu(env
)->sve_max_vq
);
13019 /* Zap the high bits of the zregs. */
13020 for (i
= 0; i
< 32; i
++) {
13021 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13024 /* Zap the high bits of the pregs and ffr. */
13027 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13029 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13030 for (i
= 0; i
< 17; ++i
) {
13031 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13038 * Notice a change in SVE vector size when changing EL.
13040 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13041 int new_el
, bool el0_a64
)
13043 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13044 int old_len
, new_len
;
13045 bool old_a64
, new_a64
;
13047 /* Nothing to do if no SVE. */
13048 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13052 /* Nothing to do if FP is disabled in either EL. */
13053 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13058 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13059 * at ELx, or not available because the EL is in AArch32 state, then
13060 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13061 * has an effective value of 0".
13063 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13064 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13065 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13066 * we already have the correct register contents when encountering the
13067 * vq0->vq0 transition between EL0->EL1.
13069 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13070 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13071 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13072 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13073 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13074 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13076 /* When changing vector length, clear inaccessible state. */
13077 if (new_len
< old_len
) {
13078 aarch64_sve_narrow_vq(env
, new_len
+ 1);