1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
21 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
23 #ifndef CONFIG_USER_ONLY
24 /* Cacheability and shareability attributes for a memory access */
25 typedef struct ARMCacheAttrs
{
26 unsigned int attrs
:8; /* as in the MAIR register encoding */
27 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
30 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
31 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
32 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
33 target_ulong
*page_size
,
34 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
36 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
37 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
38 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
39 target_ulong
*page_size_ptr
,
40 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
42 /* Security attributes for an address, as returned by v8m_security_lookup. */
43 typedef struct V8M_SAttributes
{
52 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
53 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
54 V8M_SAttributes
*sattrs
);
57 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
61 /* VFP data registers are always little-endian. */
62 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
64 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
67 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
68 /* Aliases for Q regs. */
71 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
73 stq_le_p(buf
+ 8, q
[1]);
77 switch (reg
- nregs
) {
78 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
79 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
80 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
85 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
89 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
91 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
94 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
97 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
99 q
[1] = ldq_le_p(buf
+ 8);
103 switch (reg
- nregs
) {
104 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
105 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
106 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
111 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
115 /* 128 bit FP register */
117 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
119 stq_le_p(buf
+ 8, q
[1]);
124 stl_p(buf
, vfp_get_fpsr(env
));
128 stl_p(buf
, vfp_get_fpcr(env
));
135 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
139 /* 128 bit FP register */
141 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
142 q
[0] = ldq_le_p(buf
);
143 q
[1] = ldq_le_p(buf
+ 8);
148 vfp_set_fpsr(env
, ldl_p(buf
));
152 vfp_set_fpcr(env
, ldl_p(buf
));
159 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
161 assert(ri
->fieldoffset
);
162 if (cpreg_field_is_64bit(ri
)) {
163 return CPREG_FIELD64(env
, ri
);
165 return CPREG_FIELD32(env
, ri
);
169 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
172 assert(ri
->fieldoffset
);
173 if (cpreg_field_is_64bit(ri
)) {
174 CPREG_FIELD64(env
, ri
) = value
;
176 CPREG_FIELD32(env
, ri
) = value
;
180 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
182 return (char *)env
+ ri
->fieldoffset
;
185 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
187 /* Raw read of a coprocessor register (as needed for migration, etc). */
188 if (ri
->type
& ARM_CP_CONST
) {
189 return ri
->resetvalue
;
190 } else if (ri
->raw_readfn
) {
191 return ri
->raw_readfn(env
, ri
);
192 } else if (ri
->readfn
) {
193 return ri
->readfn(env
, ri
);
195 return raw_read(env
, ri
);
199 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
202 /* Raw write of a coprocessor register (as needed for migration, etc).
203 * Note that constant registers are treated as write-ignored; the
204 * caller should check for success by whether a readback gives the
207 if (ri
->type
& ARM_CP_CONST
) {
209 } else if (ri
->raw_writefn
) {
210 ri
->raw_writefn(env
, ri
, v
);
211 } else if (ri
->writefn
) {
212 ri
->writefn(env
, ri
, v
);
214 raw_write(env
, ri
, v
);
218 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
220 ARMCPU
*cpu
= arm_env_get_cpu(env
);
221 const ARMCPRegInfo
*ri
;
224 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
225 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
227 if (cpreg_field_is_64bit(ri
)) {
228 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
230 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
236 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
241 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
243 /* Return true if the regdef would cause an assertion if you called
244 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
245 * program bug for it not to have the NO_RAW flag).
246 * NB that returning false here doesn't necessarily mean that calling
247 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
248 * read/write access functions which are safe for raw use" from "has
249 * read/write access functions which have side effects but has forgotten
250 * to provide raw access functions".
251 * The tests here line up with the conditions in read/write_raw_cp_reg()
252 * and assertions in raw_read()/raw_write().
254 if ((ri
->type
& ARM_CP_CONST
) ||
256 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
262 bool write_cpustate_to_list(ARMCPU
*cpu
)
264 /* Write the coprocessor state from cpu->env to the (index,value) list. */
268 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
269 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
270 const ARMCPRegInfo
*ri
;
272 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
277 if (ri
->type
& ARM_CP_NO_RAW
) {
280 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
285 bool write_list_to_cpustate(ARMCPU
*cpu
)
290 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
291 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
292 uint64_t v
= cpu
->cpreg_values
[i
];
293 const ARMCPRegInfo
*ri
;
295 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
300 if (ri
->type
& ARM_CP_NO_RAW
) {
303 /* Write value and confirm it reads back as written
304 * (to catch read-only registers and partially read-only
305 * registers where the incoming migration value doesn't match)
307 write_raw_cp_reg(&cpu
->env
, ri
, v
);
308 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
315 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
317 ARMCPU
*cpu
= opaque
;
319 const ARMCPRegInfo
*ri
;
321 regidx
= *(uint32_t *)key
;
322 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
324 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
325 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
326 /* The value array need not be initialized at this point */
327 cpu
->cpreg_array_len
++;
331 static void count_cpreg(gpointer key
, gpointer opaque
)
333 ARMCPU
*cpu
= opaque
;
335 const ARMCPRegInfo
*ri
;
337 regidx
= *(uint32_t *)key
;
338 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
340 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
341 cpu
->cpreg_array_len
++;
345 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
347 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
348 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
359 void init_cpreg_list(ARMCPU
*cpu
)
361 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
362 * Note that we require cpreg_tuples[] to be sorted by key ID.
367 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
368 keys
= g_list_sort(keys
, cpreg_key_compare
);
370 cpu
->cpreg_array_len
= 0;
372 g_list_foreach(keys
, count_cpreg
, cpu
);
374 arraylen
= cpu
->cpreg_array_len
;
375 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
376 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
377 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
378 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
379 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
380 cpu
->cpreg_array_len
= 0;
382 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
384 assert(cpu
->cpreg_array_len
== arraylen
);
390 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
391 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
393 * access_el3_aa32ns: Used to check AArch32 register views.
394 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
396 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
397 const ARMCPRegInfo
*ri
,
400 bool secure
= arm_is_secure_below_el3(env
);
402 assert(!arm_el_is_aa64(env
, 3));
404 return CP_ACCESS_TRAP_UNCATEGORIZED
;
409 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
410 const ARMCPRegInfo
*ri
,
413 if (!arm_el_is_aa64(env
, 3)) {
414 return access_el3_aa32ns(env
, ri
, isread
);
419 /* Some secure-only AArch32 registers trap to EL3 if used from
420 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
421 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
422 * We assume that the .access field is set to PL1_RW.
424 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
425 const ARMCPRegInfo
*ri
,
428 if (arm_current_el(env
) == 3) {
431 if (arm_is_secure_below_el3(env
)) {
432 return CP_ACCESS_TRAP_EL3
;
434 /* This will be EL1 NS and EL2 NS, which just UNDEF */
435 return CP_ACCESS_TRAP_UNCATEGORIZED
;
438 /* Check for traps to "powerdown debug" registers, which are controlled
441 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
444 int el
= arm_current_el(env
);
446 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDOSA
)
447 && !arm_is_secure_below_el3(env
)) {
448 return CP_ACCESS_TRAP_EL2
;
450 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
451 return CP_ACCESS_TRAP_EL3
;
456 /* Check for traps to "debug ROM" registers, which are controlled
457 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
459 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
462 int el
= arm_current_el(env
);
464 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDRA
)
465 && !arm_is_secure_below_el3(env
)) {
466 return CP_ACCESS_TRAP_EL2
;
468 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
469 return CP_ACCESS_TRAP_EL3
;
474 /* Check for traps to general debug registers, which are controlled
475 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
477 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
480 int el
= arm_current_el(env
);
482 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDA
)
483 && !arm_is_secure_below_el3(env
)) {
484 return CP_ACCESS_TRAP_EL2
;
486 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
487 return CP_ACCESS_TRAP_EL3
;
492 /* Check for traps to performance monitor registers, which are controlled
493 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
495 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
498 int el
= arm_current_el(env
);
500 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
501 && !arm_is_secure_below_el3(env
)) {
502 return CP_ACCESS_TRAP_EL2
;
504 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
505 return CP_ACCESS_TRAP_EL3
;
510 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
512 ARMCPU
*cpu
= arm_env_get_cpu(env
);
514 raw_write(env
, ri
, value
);
515 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
518 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
520 ARMCPU
*cpu
= arm_env_get_cpu(env
);
522 if (raw_read(env
, ri
) != value
) {
523 /* Unlike real hardware the qemu TLB uses virtual addresses,
524 * not modified virtual addresses, so this causes a TLB flush.
527 raw_write(env
, ri
, value
);
531 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
534 ARMCPU
*cpu
= arm_env_get_cpu(env
);
536 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
537 && !extended_addresses_enabled(env
)) {
538 /* For VMSA (when not using the LPAE long descriptor page table
539 * format) this register includes the ASID, so do a TLB flush.
540 * For PMSA it is purely a process ID and no action is needed.
544 raw_write(env
, ri
, value
);
547 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
550 /* Invalidate all (TLBIALL) */
551 ARMCPU
*cpu
= arm_env_get_cpu(env
);
556 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
559 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
560 ARMCPU
*cpu
= arm_env_get_cpu(env
);
562 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
565 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
568 /* Invalidate by ASID (TLBIASID) */
569 ARMCPU
*cpu
= arm_env_get_cpu(env
);
574 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
577 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
578 ARMCPU
*cpu
= arm_env_get_cpu(env
);
580 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
583 /* IS variants of TLB operations must affect all cores */
584 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
587 CPUState
*cs
= ENV_GET_CPU(env
);
589 tlb_flush_all_cpus_synced(cs
);
592 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
595 CPUState
*cs
= ENV_GET_CPU(env
);
597 tlb_flush_all_cpus_synced(cs
);
600 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
603 CPUState
*cs
= ENV_GET_CPU(env
);
605 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
608 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
611 CPUState
*cs
= ENV_GET_CPU(env
);
613 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
616 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
619 CPUState
*cs
= ENV_GET_CPU(env
);
621 tlb_flush_by_mmuidx(cs
,
622 ARMMMUIdxBit_S12NSE1
|
623 ARMMMUIdxBit_S12NSE0
|
627 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
630 CPUState
*cs
= ENV_GET_CPU(env
);
632 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
633 ARMMMUIdxBit_S12NSE1
|
634 ARMMMUIdxBit_S12NSE0
|
638 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
641 /* Invalidate by IPA. This has to invalidate any structures that
642 * contain only stage 2 translation information, but does not need
643 * to apply to structures that contain combined stage 1 and stage 2
644 * translation information.
645 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
647 CPUState
*cs
= ENV_GET_CPU(env
);
650 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
654 pageaddr
= sextract64(value
<< 12, 0, 40);
656 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
659 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
662 CPUState
*cs
= ENV_GET_CPU(env
);
665 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
669 pageaddr
= sextract64(value
<< 12, 0, 40);
671 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
675 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
678 CPUState
*cs
= ENV_GET_CPU(env
);
680 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
683 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
686 CPUState
*cs
= ENV_GET_CPU(env
);
688 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
691 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
694 CPUState
*cs
= ENV_GET_CPU(env
);
695 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
697 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
700 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
703 CPUState
*cs
= ENV_GET_CPU(env
);
704 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
706 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
710 static const ARMCPRegInfo cp_reginfo
[] = {
711 /* Define the secure and non-secure FCSE identifier CP registers
712 * separately because there is no secure bank in V8 (no _EL3). This allows
713 * the secure register to be properly reset and migrated. There is also no
714 * v8 EL1 version of the register so the non-secure instance stands alone.
717 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
718 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
719 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
720 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
721 { .name
= "FCSEIDR_S",
722 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
723 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
724 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
725 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
726 /* Define the secure and non-secure context identifier CP registers
727 * separately because there is no secure bank in V8 (no _EL3). This allows
728 * the secure register to be properly reset and migrated. In the
729 * non-secure case, the 32-bit register will have reset and migration
730 * disabled during registration as it is handled by the 64-bit instance.
732 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
733 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
734 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
735 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
736 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
737 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
738 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
739 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
740 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
741 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
745 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
746 /* NB: Some of these registers exist in v8 but with more precise
747 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
749 /* MMU Domain access control / MPU write buffer control */
751 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
752 .access
= PL1_RW
, .resetvalue
= 0,
753 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
754 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
755 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
756 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
757 * For v6 and v5, these mappings are overly broad.
759 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
760 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
761 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
762 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
763 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
764 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
765 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
766 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
767 /* Cache maintenance ops; some of this space may be overridden later. */
768 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
769 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
770 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
774 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
775 /* Not all pre-v6 cores implemented this WFI, so this is slightly
778 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
779 .access
= PL1_W
, .type
= ARM_CP_WFI
},
783 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
784 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
785 * is UNPREDICTABLE; we choose to NOP as most implementations do).
787 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
788 .access
= PL1_W
, .type
= ARM_CP_WFI
},
789 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
790 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
791 * OMAPCP will override this space.
793 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
794 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
796 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
797 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
799 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
800 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
801 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
803 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
804 * implementing it as RAZ means the "debug architecture version" bits
805 * will read as a reserved value, which should cause Linux to not try
806 * to use the debug hardware.
808 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
809 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
810 /* MMU TLB control. Note that the wildcarding means we cover not just
811 * the unified TLB ops but also the dside/iside/inner-shareable variants.
813 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
814 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
815 .type
= ARM_CP_NO_RAW
},
816 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
817 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
818 .type
= ARM_CP_NO_RAW
},
819 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
820 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
821 .type
= ARM_CP_NO_RAW
},
822 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
823 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
824 .type
= ARM_CP_NO_RAW
},
825 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
826 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
827 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
828 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
832 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
837 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
838 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
839 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
840 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
841 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
843 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
844 /* VFP coprocessor: cp10 & cp11 [23:20] */
845 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
847 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
848 /* ASEDIS [31] bit is RAO/WI */
852 /* VFPv3 and upwards with NEON implement 32 double precision
853 * registers (D0-D31).
855 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
856 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
857 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
863 env
->cp15
.cpacr_el1
= value
;
866 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
868 /* Call cpacr_write() so that we reset with the correct RAO bits set
869 * for our CPU features.
871 cpacr_write(env
, ri
, 0);
874 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
877 if (arm_feature(env
, ARM_FEATURE_V8
)) {
878 /* Check if CPACR accesses are to be trapped to EL2 */
879 if (arm_current_el(env
) == 1 &&
880 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
881 return CP_ACCESS_TRAP_EL2
;
882 /* Check if CPACR accesses are to be trapped to EL3 */
883 } else if (arm_current_el(env
) < 3 &&
884 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
885 return CP_ACCESS_TRAP_EL3
;
892 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
895 /* Check if CPTR accesses are set to trap to EL3 */
896 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
897 return CP_ACCESS_TRAP_EL3
;
903 static const ARMCPRegInfo v6_cp_reginfo
[] = {
904 /* prefetch by MVA in v6, NOP in v7 */
905 { .name
= "MVA_prefetch",
906 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
907 .access
= PL1_W
, .type
= ARM_CP_NOP
},
908 /* We need to break the TB after ISB to execute self-modifying code
909 * correctly and also to take any pending interrupts immediately.
910 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
912 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
913 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
914 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
915 .access
= PL0_W
, .type
= ARM_CP_NOP
},
916 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
917 .access
= PL0_W
, .type
= ARM_CP_NOP
},
918 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
920 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
921 offsetof(CPUARMState
, cp15
.ifar_ns
) },
923 /* Watchpoint Fault Address Register : should actually only be present
924 * for 1136, 1176, 11MPCore.
926 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
927 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
928 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
929 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
930 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
931 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
935 /* Definitions for the PMU registers */
936 #define PMCRN_MASK 0xf800
937 #define PMCRN_SHIFT 11
942 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
944 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
947 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
948 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
950 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
953 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
956 /* Performance monitor registers user accessibility is controlled
957 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
958 * trapping to EL2 or EL3 for other accesses.
960 int el
= arm_current_el(env
);
962 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
963 return CP_ACCESS_TRAP
;
965 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
966 && !arm_is_secure_below_el3(env
)) {
967 return CP_ACCESS_TRAP_EL2
;
969 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
970 return CP_ACCESS_TRAP_EL3
;
976 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
977 const ARMCPRegInfo
*ri
,
980 /* ER: event counter read trap control */
981 if (arm_feature(env
, ARM_FEATURE_V8
)
982 && arm_current_el(env
) == 0
983 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
988 return pmreg_access(env
, ri
, isread
);
991 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
992 const ARMCPRegInfo
*ri
,
995 /* SW: software increment write trap control */
996 if (arm_feature(env
, ARM_FEATURE_V8
)
997 && arm_current_el(env
) == 0
998 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1000 return CP_ACCESS_OK
;
1003 return pmreg_access(env
, ri
, isread
);
1006 #ifndef CONFIG_USER_ONLY
1008 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1009 const ARMCPRegInfo
*ri
,
1012 /* ER: event counter read trap control */
1013 if (arm_feature(env
, ARM_FEATURE_V8
)
1014 && arm_current_el(env
) == 0
1015 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1016 return CP_ACCESS_OK
;
1019 return pmreg_access(env
, ri
, isread
);
1022 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1023 const ARMCPRegInfo
*ri
,
1026 /* CR: cycle counter read trap control */
1027 if (arm_feature(env
, ARM_FEATURE_V8
)
1028 && arm_current_el(env
) == 0
1029 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1031 return CP_ACCESS_OK
;
1034 return pmreg_access(env
, ri
, isread
);
1037 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
1039 /* This does not support checking PMCCFILTR_EL0 register */
1041 if (!(env
->cp15
.c9_pmcr
& PMCRE
) || !(env
->cp15
.c9_pmcnten
& (1 << 31))) {
1048 void pmccntr_sync(CPUARMState
*env
)
1050 uint64_t temp_ticks
;
1052 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1053 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1055 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1056 /* Increment once every 64 processor clock cycles */
1060 if (arm_ccnt_enabled(env
)) {
1061 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
1065 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1070 if (value
& PMCRC
) {
1071 /* The counter has been reset */
1072 env
->cp15
.c15_ccnt
= 0;
1075 /* only the DP, X, D and E bits are writable */
1076 env
->cp15
.c9_pmcr
&= ~0x39;
1077 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1082 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1084 uint64_t total_ticks
;
1086 if (!arm_ccnt_enabled(env
)) {
1087 /* Counter is disabled, do not change value */
1088 return env
->cp15
.c15_ccnt
;
1091 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1092 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1094 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1095 /* Increment once every 64 processor clock cycles */
1098 return total_ticks
- env
->cp15
.c15_ccnt
;
1101 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1104 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1105 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1106 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1109 env
->cp15
.c9_pmselr
= value
& 0x1f;
1112 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1115 uint64_t total_ticks
;
1117 if (!arm_ccnt_enabled(env
)) {
1118 /* Counter is disabled, set the absolute value */
1119 env
->cp15
.c15_ccnt
= value
;
1123 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1124 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1126 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1127 /* Increment once every 64 processor clock cycles */
1130 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1133 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1136 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1138 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1141 #else /* CONFIG_USER_ONLY */
1143 void pmccntr_sync(CPUARMState
*env
)
1149 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1153 env
->cp15
.pmccfiltr_el0
= value
& 0xfc000000;
1157 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1160 value
&= pmu_counter_mask(env
);
1161 env
->cp15
.c9_pmcnten
|= value
;
1164 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1167 value
&= pmu_counter_mask(env
);
1168 env
->cp15
.c9_pmcnten
&= ~value
;
1171 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1174 env
->cp15
.c9_pmovsr
&= ~value
;
1177 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1180 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1181 * PMSELR value is equal to or greater than the number of implemented
1182 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1184 if (env
->cp15
.c9_pmselr
== 0x1f) {
1185 pmccfiltr_write(env
, ri
, value
);
1189 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1191 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1192 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1194 if (env
->cp15
.c9_pmselr
== 0x1f) {
1195 return env
->cp15
.pmccfiltr_el0
;
1201 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1204 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1205 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1207 env
->cp15
.c9_pmuserenr
= value
& 1;
1211 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1214 /* We have no event counters so only the C bit can be changed */
1215 value
&= pmu_counter_mask(env
);
1216 env
->cp15
.c9_pminten
|= value
;
1219 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1222 value
&= pmu_counter_mask(env
);
1223 env
->cp15
.c9_pminten
&= ~value
;
1226 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1229 /* Note that even though the AArch64 view of this register has bits
1230 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1231 * architectural requirements for bits which are RES0 only in some
1232 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1233 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1235 raw_write(env
, ri
, value
& ~0x1FULL
);
1238 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1240 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1241 * For bits that vary between AArch32/64, code needs to check the
1242 * current execution mode before directly using the feature bit.
1244 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1246 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1247 valid_mask
&= ~SCR_HCE
;
1249 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1250 * supported if EL2 exists. The bit is UNK/SBZP when
1251 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1252 * when EL2 is unavailable.
1253 * On ARMv8, this bit is always available.
1255 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1256 !arm_feature(env
, ARM_FEATURE_V8
)) {
1257 valid_mask
&= ~SCR_SMD
;
1261 /* Clear all-context RES0 bits. */
1262 value
&= valid_mask
;
1263 raw_write(env
, ri
, value
);
1266 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1268 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1270 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1273 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1274 ri
->secure
& ARM_CP_SECSTATE_S
);
1276 return cpu
->ccsidr
[index
];
1279 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1282 raw_write(env
, ri
, value
& 0xf);
1285 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1287 CPUState
*cs
= ENV_GET_CPU(env
);
1290 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1293 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1296 /* External aborts are not possible in QEMU so A bit is always clear */
1300 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1301 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1302 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1303 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1304 /* Performance monitors are implementation defined in v7,
1305 * but with an ARM recommended set of registers, which we
1306 * follow (although we don't actually implement any counters)
1308 * Performance registers fall into three categories:
1309 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1310 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1311 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1312 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1313 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1315 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1316 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1317 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1318 .writefn
= pmcntenset_write
,
1319 .accessfn
= pmreg_access
,
1320 .raw_writefn
= raw_write
},
1321 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1322 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1323 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1324 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1325 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1326 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1328 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1329 .accessfn
= pmreg_access
,
1330 .writefn
= pmcntenclr_write
,
1331 .type
= ARM_CP_ALIAS
},
1332 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1333 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1334 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1335 .type
= ARM_CP_ALIAS
,
1336 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1337 .writefn
= pmcntenclr_write
},
1338 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1340 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1341 .accessfn
= pmreg_access
,
1342 .writefn
= pmovsr_write
,
1343 .raw_writefn
= raw_write
},
1344 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1345 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1346 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1347 .type
= ARM_CP_ALIAS
,
1348 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1349 .writefn
= pmovsr_write
,
1350 .raw_writefn
= raw_write
},
1351 /* Unimplemented so WI. */
1352 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1353 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1354 #ifndef CONFIG_USER_ONLY
1355 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1356 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1357 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1358 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1359 .raw_writefn
= raw_write
},
1360 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1361 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1362 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1363 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1364 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1365 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1366 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1367 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1368 .accessfn
= pmreg_access_ccntr
},
1369 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1370 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1371 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1373 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1375 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1376 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1377 .writefn
= pmccfiltr_write
,
1378 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1380 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1382 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1383 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1384 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1385 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1386 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1387 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1388 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1389 /* Unimplemented, RAZ/WI. */
1390 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1391 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1392 .accessfn
= pmreg_access_xevcntr
},
1393 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1394 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1395 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
1397 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1398 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1399 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1400 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1401 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1403 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1404 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1405 .access
= PL1_RW
, .accessfn
= access_tpm
,
1406 .type
= ARM_CP_ALIAS
,
1407 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1409 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1410 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1411 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1412 .access
= PL1_RW
, .accessfn
= access_tpm
,
1414 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1415 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1416 .resetvalue
= 0x0 },
1417 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1418 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1419 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1420 .writefn
= pmintenclr_write
, },
1421 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1422 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1423 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1424 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1425 .writefn
= pmintenclr_write
},
1426 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1427 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1428 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1429 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1430 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1431 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1432 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1433 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1434 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1435 * just RAZ for all cores:
1437 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1438 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1439 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1440 /* Auxiliary fault status registers: these also are IMPDEF, and we
1441 * choose to RAZ/WI for all cores.
1443 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1444 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1445 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1446 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1447 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1448 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1449 /* MAIR can just read-as-written because we don't implement caches
1450 * and so don't need to care about memory attributes.
1452 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1453 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1454 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1456 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1457 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1458 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1460 /* For non-long-descriptor page tables these are PRRR and NMRR;
1461 * regardless they still act as reads-as-written for QEMU.
1463 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1464 * allows them to assign the correct fieldoffset based on the endianness
1465 * handled in the field definitions.
1467 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1468 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1469 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1470 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1471 .resetfn
= arm_cp_reset_ignore
},
1472 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1473 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1474 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1475 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1476 .resetfn
= arm_cp_reset_ignore
},
1477 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1478 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1479 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1480 /* 32 bit ITLB invalidates */
1481 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1482 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1483 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1484 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1485 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1486 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1487 /* 32 bit DTLB invalidates */
1488 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1489 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1490 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1491 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1492 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1493 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1494 /* 32 bit TLB invalidates */
1495 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1496 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1497 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1498 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1499 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1500 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1501 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1502 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1506 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1507 /* 32 bit TLB invalidates, Inner Shareable */
1508 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1509 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1510 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1511 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1512 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1513 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1514 .writefn
= tlbiasid_is_write
},
1515 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1516 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1517 .writefn
= tlbimvaa_is_write
},
1521 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1528 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1531 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1532 return CP_ACCESS_TRAP
;
1534 return CP_ACCESS_OK
;
1537 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1538 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1539 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1541 .writefn
= teecr_write
},
1542 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1543 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1544 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1548 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1549 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1550 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1552 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1553 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1555 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1556 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1557 .resetfn
= arm_cp_reset_ignore
},
1558 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1559 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1560 .access
= PL0_R
|PL1_W
,
1561 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1563 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1564 .access
= PL0_R
|PL1_W
,
1565 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1566 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1567 .resetfn
= arm_cp_reset_ignore
},
1568 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1569 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1571 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1572 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1574 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1575 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1580 #ifndef CONFIG_USER_ONLY
1582 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1585 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1586 * Writable only at the highest implemented exception level.
1588 int el
= arm_current_el(env
);
1592 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1593 return CP_ACCESS_TRAP
;
1597 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1598 arm_is_secure_below_el3(env
)) {
1599 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1600 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1608 if (!isread
&& el
< arm_highest_el(env
)) {
1609 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1612 return CP_ACCESS_OK
;
1615 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1618 unsigned int cur_el
= arm_current_el(env
);
1619 bool secure
= arm_is_secure(env
);
1621 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1623 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1624 return CP_ACCESS_TRAP
;
1627 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1628 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1629 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1630 return CP_ACCESS_TRAP_EL2
;
1632 return CP_ACCESS_OK
;
1635 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1638 unsigned int cur_el
= arm_current_el(env
);
1639 bool secure
= arm_is_secure(env
);
1641 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1642 * EL0[PV]TEN is zero.
1645 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1646 return CP_ACCESS_TRAP
;
1649 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1650 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1651 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1652 return CP_ACCESS_TRAP_EL2
;
1654 return CP_ACCESS_OK
;
1657 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1658 const ARMCPRegInfo
*ri
,
1661 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1664 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1665 const ARMCPRegInfo
*ri
,
1668 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1671 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1674 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1677 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1680 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1683 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1684 const ARMCPRegInfo
*ri
,
1687 /* The AArch64 register view of the secure physical timer is
1688 * always accessible from EL3, and configurably accessible from
1691 switch (arm_current_el(env
)) {
1693 if (!arm_is_secure(env
)) {
1694 return CP_ACCESS_TRAP
;
1696 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1697 return CP_ACCESS_TRAP_EL3
;
1699 return CP_ACCESS_OK
;
1702 return CP_ACCESS_TRAP
;
1704 return CP_ACCESS_OK
;
1706 g_assert_not_reached();
1710 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1712 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1715 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1717 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1720 /* Timer enabled: calculate and set current ISTATUS, irq, and
1721 * reset timer to when ISTATUS next has to change
1723 uint64_t offset
= timeridx
== GTIMER_VIRT
?
1724 cpu
->env
.cp15
.cntvoff_el2
: 0;
1725 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1726 /* Note that this must be unsigned 64 bit arithmetic: */
1727 int istatus
= count
- offset
>= gt
->cval
;
1731 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1733 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1734 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1737 /* Next transition is when count rolls back over to zero */
1738 nexttick
= UINT64_MAX
;
1740 /* Next transition is when we hit cval */
1741 nexttick
= gt
->cval
+ offset
;
1743 /* Note that the desired next expiry time might be beyond the
1744 * signed-64-bit range of a QEMUTimer -- in this case we just
1745 * set the timer for as far in the future as possible. When the
1746 * timer expires we will reset the timer for any remaining period.
1748 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1749 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1751 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1752 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1754 /* Timer disabled: ISTATUS and timer output always clear */
1756 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1757 timer_del(cpu
->gt_timer
[timeridx
]);
1758 trace_arm_gt_recalc_disabled(timeridx
);
1762 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1765 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1767 timer_del(cpu
->gt_timer
[timeridx
]);
1770 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1772 return gt_get_countervalue(env
);
1775 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1777 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1780 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1784 trace_arm_gt_cval_write(timeridx
, value
);
1785 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1786 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1789 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1792 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1794 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1795 (gt_get_countervalue(env
) - offset
));
1798 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1802 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1804 trace_arm_gt_tval_write(timeridx
, value
);
1805 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1806 sextract64(value
, 0, 32);
1807 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1810 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1814 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1815 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1817 trace_arm_gt_ctl_write(timeridx
, value
);
1818 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1819 if ((oldval
^ value
) & 1) {
1820 /* Enable toggled */
1821 gt_recalc_timer(cpu
, timeridx
);
1822 } else if ((oldval
^ value
) & 2) {
1823 /* IMASK toggled: don't need to recalculate,
1824 * just set the interrupt line based on ISTATUS
1826 int irqstate
= (oldval
& 4) && !(value
& 2);
1828 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1829 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1833 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1835 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1838 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1841 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1844 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1846 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1849 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1852 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1855 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1858 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1861 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1863 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1866 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1869 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1872 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1874 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1877 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1880 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1883 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1886 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1889 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1892 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1894 trace_arm_gt_cntvoff_write(value
);
1895 raw_write(env
, ri
, value
);
1896 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1899 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1901 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1904 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1907 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1910 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1912 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1915 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1918 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1921 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1924 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1927 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1929 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1932 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1935 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1938 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1940 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1943 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1946 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1949 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1952 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1955 void arm_gt_ptimer_cb(void *opaque
)
1957 ARMCPU
*cpu
= opaque
;
1959 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1962 void arm_gt_vtimer_cb(void *opaque
)
1964 ARMCPU
*cpu
= opaque
;
1966 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1969 void arm_gt_htimer_cb(void *opaque
)
1971 ARMCPU
*cpu
= opaque
;
1973 gt_recalc_timer(cpu
, GTIMER_HYP
);
1976 void arm_gt_stimer_cb(void *opaque
)
1978 ARMCPU
*cpu
= opaque
;
1980 gt_recalc_timer(cpu
, GTIMER_SEC
);
1983 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1984 /* Note that CNTFRQ is purely reads-as-written for the benefit
1985 * of software; writing it doesn't actually change the timer frequency.
1986 * Our reset value matches the fixed frequency we implement the timer at.
1988 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1989 .type
= ARM_CP_ALIAS
,
1990 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1991 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
1993 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
1994 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
1995 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1996 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
1997 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
1999 /* overall control: mostly access permissions */
2000 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2001 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2003 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2006 /* per-timer control */
2007 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2008 .secure
= ARM_CP_SECSTATE_NS
,
2009 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2010 .accessfn
= gt_ptimer_access
,
2011 .fieldoffset
= offsetoflow32(CPUARMState
,
2012 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2013 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2015 { .name
= "CNTP_CTL_S",
2016 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2017 .secure
= ARM_CP_SECSTATE_S
,
2018 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2019 .accessfn
= gt_ptimer_access
,
2020 .fieldoffset
= offsetoflow32(CPUARMState
,
2021 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2022 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2024 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2025 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2026 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2027 .accessfn
= gt_ptimer_access
,
2028 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2030 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2032 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2033 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2034 .accessfn
= gt_vtimer_access
,
2035 .fieldoffset
= offsetoflow32(CPUARMState
,
2036 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2037 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2039 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2040 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2041 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2042 .accessfn
= gt_vtimer_access
,
2043 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2045 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2047 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2048 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2049 .secure
= ARM_CP_SECSTATE_NS
,
2050 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2051 .accessfn
= gt_ptimer_access
,
2052 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2054 { .name
= "CNTP_TVAL_S",
2055 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2056 .secure
= ARM_CP_SECSTATE_S
,
2057 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2058 .accessfn
= gt_ptimer_access
,
2059 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2061 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2062 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2063 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2064 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2065 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2067 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2068 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2069 .accessfn
= gt_vtimer_access
,
2070 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2072 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2073 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2074 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2075 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2076 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2078 /* The counter itself */
2079 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2080 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2081 .accessfn
= gt_pct_access
,
2082 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2084 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2085 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2086 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2087 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2089 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2090 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2091 .accessfn
= gt_vct_access
,
2092 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2094 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2095 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2096 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2097 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2099 /* Comparison value, indicating when the timer goes off */
2100 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2101 .secure
= ARM_CP_SECSTATE_NS
,
2102 .access
= PL1_RW
| PL0_R
,
2103 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2104 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2105 .accessfn
= gt_ptimer_access
,
2106 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2108 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2109 .secure
= ARM_CP_SECSTATE_S
,
2110 .access
= PL1_RW
| PL0_R
,
2111 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2112 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2113 .accessfn
= gt_ptimer_access
,
2114 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2116 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2117 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2118 .access
= PL1_RW
| PL0_R
,
2120 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2121 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2122 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2124 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2125 .access
= PL1_RW
| PL0_R
,
2126 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2127 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2128 .accessfn
= gt_vtimer_access
,
2129 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2131 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2132 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2133 .access
= PL1_RW
| PL0_R
,
2135 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2136 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2137 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2139 /* Secure timer -- this is actually restricted to only EL3
2140 * and configurably Secure-EL1 via the accessfn.
2142 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2143 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2144 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2145 .accessfn
= gt_stimer_access
,
2146 .readfn
= gt_sec_tval_read
,
2147 .writefn
= gt_sec_tval_write
,
2148 .resetfn
= gt_sec_timer_reset
,
2150 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2151 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2152 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2153 .accessfn
= gt_stimer_access
,
2154 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2156 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2158 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2159 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2160 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2161 .accessfn
= gt_stimer_access
,
2162 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2163 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2169 /* In user-mode none of the generic timer registers are accessible,
2170 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2171 * so instead just don't register any of them.
2173 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2179 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2181 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2182 raw_write(env
, ri
, value
);
2183 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2184 raw_write(env
, ri
, value
& 0xfffff6ff);
2186 raw_write(env
, ri
, value
& 0xfffff1ff);
2190 #ifndef CONFIG_USER_ONLY
2191 /* get_phys_addr() isn't present for user-mode-only targets */
2193 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2197 /* The ATS12NSO* operations must trap to EL3 if executed in
2198 * Secure EL1 (which can only happen if EL3 is AArch64).
2199 * They are simply UNDEF if executed from NS EL1.
2200 * They function normally from EL2 or EL3.
2202 if (arm_current_el(env
) == 1) {
2203 if (arm_is_secure_below_el3(env
)) {
2204 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2206 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2209 return CP_ACCESS_OK
;
2212 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2213 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2216 target_ulong page_size
;
2220 bool format64
= false;
2221 MemTxAttrs attrs
= {};
2222 ARMMMUFaultInfo fi
= {};
2223 ARMCacheAttrs cacheattrs
= {};
2225 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2226 &prot
, &page_size
, &fi
, &cacheattrs
);
2230 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2233 * * TTBCR.EAE determines whether the result is returned using the
2234 * 32-bit or the 64-bit PAR format
2235 * * Instructions executed in Hyp mode always use the 64bit format
2237 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2238 * * The Non-secure TTBCR.EAE bit is set to 1
2239 * * The implementation includes EL2, and the value of HCR.VM is 1
2241 * ATS1Hx always uses the 64bit format (not supported yet).
2243 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2245 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2246 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2247 format64
|= env
->cp15
.hcr_el2
& HCR_VM
;
2249 format64
|= arm_current_el(env
) == 2;
2255 /* Create a 64-bit PAR */
2256 par64
= (1 << 11); /* LPAE bit always set */
2258 par64
|= phys_addr
& ~0xfffULL
;
2259 if (!attrs
.secure
) {
2260 par64
|= (1 << 9); /* NS */
2262 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2263 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2265 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2268 par64
|= (fsr
& 0x3f) << 1; /* FS */
2269 /* Note that S2WLK and FSTAGE are always zero, because we don't
2270 * implement virtualization and therefore there can't be a stage 2
2275 /* fsr is a DFSR/IFSR value for the short descriptor
2276 * translation table format (with WnR always clear).
2277 * Convert it to a 32-bit PAR.
2280 /* We do not set any attribute bits in the PAR */
2281 if (page_size
== (1 << 24)
2282 && arm_feature(env
, ARM_FEATURE_V7
)) {
2283 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2285 par64
= phys_addr
& 0xfffff000;
2287 if (!attrs
.secure
) {
2288 par64
|= (1 << 9); /* NS */
2291 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2293 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2294 ((fsr
& 0xf) << 1) | 1;
2300 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2302 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2305 int el
= arm_current_el(env
);
2306 bool secure
= arm_is_secure_below_el3(env
);
2308 switch (ri
->opc2
& 6) {
2310 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2313 mmu_idx
= ARMMMUIdx_S1E3
;
2316 mmu_idx
= ARMMMUIdx_S1NSE1
;
2319 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2322 g_assert_not_reached();
2326 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2329 mmu_idx
= ARMMMUIdx_S1SE0
;
2332 mmu_idx
= ARMMMUIdx_S1NSE0
;
2335 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2338 g_assert_not_reached();
2342 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2343 mmu_idx
= ARMMMUIdx_S12NSE1
;
2346 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2347 mmu_idx
= ARMMMUIdx_S12NSE0
;
2350 g_assert_not_reached();
2353 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2355 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2358 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2361 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2364 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2366 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2369 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2372 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2373 return CP_ACCESS_TRAP
;
2375 return CP_ACCESS_OK
;
2378 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2381 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2383 int secure
= arm_is_secure_below_el3(env
);
2385 switch (ri
->opc2
& 6) {
2388 case 0: /* AT S1E1R, AT S1E1W */
2389 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2391 case 4: /* AT S1E2R, AT S1E2W */
2392 mmu_idx
= ARMMMUIdx_S1E2
;
2394 case 6: /* AT S1E3R, AT S1E3W */
2395 mmu_idx
= ARMMMUIdx_S1E3
;
2398 g_assert_not_reached();
2401 case 2: /* AT S1E0R, AT S1E0W */
2402 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2404 case 4: /* AT S12E1R, AT S12E1W */
2405 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2407 case 6: /* AT S12E0R, AT S12E0W */
2408 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2411 g_assert_not_reached();
2414 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2418 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2419 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2420 .access
= PL1_RW
, .resetvalue
= 0,
2421 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2422 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2423 .writefn
= par_write
},
2424 #ifndef CONFIG_USER_ONLY
2425 /* This underdecoding is safe because the reginfo is NO_RAW. */
2426 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2427 .access
= PL1_W
, .accessfn
= ats_access
,
2428 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2433 /* Return basic MPU access permission bits. */
2434 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2441 for (i
= 0; i
< 16; i
+= 2) {
2442 ret
|= (val
>> i
) & mask
;
2448 /* Pad basic MPU access permission bits to extended format. */
2449 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2456 for (i
= 0; i
< 16; i
+= 2) {
2457 ret
|= (val
& mask
) << i
;
2463 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2466 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2469 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2471 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2474 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2477 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2480 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2482 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2485 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2487 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2493 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2497 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2500 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2501 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2507 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2508 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2512 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2515 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2516 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2518 if (value
>= nrgs
) {
2519 qemu_log_mask(LOG_GUEST_ERROR
,
2520 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2521 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2525 raw_write(env
, ri
, value
);
2528 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2529 /* Reset for all these registers is handled in arm_cpu_reset(),
2530 * because the PMSAv7 is also used by M-profile CPUs, which do
2531 * not register cpregs but still need the state to be reset.
2533 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2534 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2535 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2536 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2537 .resetfn
= arm_cp_reset_ignore
},
2538 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2539 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2540 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2541 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2542 .resetfn
= arm_cp_reset_ignore
},
2543 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2544 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2545 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2546 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2547 .resetfn
= arm_cp_reset_ignore
},
2548 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2550 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2551 .writefn
= pmsav7_rgnr_write
,
2552 .resetfn
= arm_cp_reset_ignore
},
2556 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2557 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2558 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2559 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2560 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2561 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2562 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2563 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2564 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2565 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2567 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2569 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2571 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2573 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2575 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2576 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2578 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2579 /* Protection region base and size registers */
2580 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2581 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2582 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2583 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2584 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2585 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2586 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2587 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2588 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2589 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2590 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2591 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2592 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2593 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2594 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2595 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2596 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2597 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2598 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2599 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2600 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2601 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2602 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2603 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2607 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2610 TCR
*tcr
= raw_ptr(env
, ri
);
2611 int maskshift
= extract32(value
, 0, 3);
2613 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2614 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2615 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2616 * using Long-desciptor translation table format */
2617 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2618 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2619 /* In an implementation that includes the Security Extensions
2620 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2621 * Short-descriptor translation table format.
2623 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2629 /* Update the masks corresponding to the TCR bank being written
2630 * Note that we always calculate mask and base_mask, but
2631 * they are only used for short-descriptor tables (ie if EAE is 0);
2632 * for long-descriptor tables the TCR fields are used differently
2633 * and the mask and base_mask values are meaningless.
2635 tcr
->raw_tcr
= value
;
2636 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2637 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2640 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2643 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2645 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2646 /* With LPAE the TTBCR could result in a change of ASID
2647 * via the TTBCR.A1 bit, so do a TLB flush.
2649 tlb_flush(CPU(cpu
));
2651 vmsa_ttbcr_raw_write(env
, ri
, value
);
2654 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2656 TCR
*tcr
= raw_ptr(env
, ri
);
2658 /* Reset both the TCR as well as the masks corresponding to the bank of
2659 * the TCR being reset.
2663 tcr
->base_mask
= 0xffffc000u
;
2666 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2669 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2670 TCR
*tcr
= raw_ptr(env
, ri
);
2672 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2673 tlb_flush(CPU(cpu
));
2674 tcr
->raw_tcr
= value
;
2677 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2680 /* 64 bit accesses to the TTBRs can change the ASID and so we
2681 * must flush the TLB.
2683 if (cpreg_field_is_64bit(ri
)) {
2684 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2686 tlb_flush(CPU(cpu
));
2688 raw_write(env
, ri
, value
);
2691 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2694 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2695 CPUState
*cs
= CPU(cpu
);
2697 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2698 if (raw_read(env
, ri
) != value
) {
2699 tlb_flush_by_mmuidx(cs
,
2700 ARMMMUIdxBit_S12NSE1
|
2701 ARMMMUIdxBit_S12NSE0
|
2703 raw_write(env
, ri
, value
);
2707 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2708 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2709 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2710 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2711 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2712 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2713 .access
= PL1_RW
, .resetvalue
= 0,
2714 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2715 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2716 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2717 .access
= PL1_RW
, .resetvalue
= 0,
2718 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2719 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2720 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2721 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2722 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2727 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2728 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2729 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2731 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2732 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2733 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2734 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2735 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2736 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2737 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2738 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2739 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2740 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2741 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2742 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2743 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2744 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2745 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2746 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2747 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2748 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2749 .raw_writefn
= vmsa_ttbcr_raw_write
,
2750 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2751 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2755 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2758 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2759 /* The OS_TYPE bit in this register changes the reported CPUID! */
2760 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2761 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2764 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2767 env
->cp15
.c15_threadid
= value
& 0xffff;
2770 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2773 /* Wait-for-interrupt (deprecated) */
2774 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2777 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2780 /* On OMAP there are registers indicating the max/min index of dcache lines
2781 * containing a dirty line; cache flush operations have to reset these.
2783 env
->cp15
.c15_i_max
= 0x000;
2784 env
->cp15
.c15_i_min
= 0xff0;
2787 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2788 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2789 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2790 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2792 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2793 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2794 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2796 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2797 .writefn
= omap_ticonfig_write
},
2798 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2800 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2801 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2802 .access
= PL1_RW
, .resetvalue
= 0xff0,
2803 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2804 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2806 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2807 .writefn
= omap_threadid_write
},
2808 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2809 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2810 .type
= ARM_CP_NO_RAW
,
2811 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2812 /* TODO: Peripheral port remap register:
2813 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2814 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2817 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2818 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2819 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2820 .writefn
= omap_cachemaint_write
},
2821 { .name
= "C9", .cp
= 15, .crn
= 9,
2822 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2823 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2827 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2830 env
->cp15
.c15_cpar
= value
& 0x3fff;
2833 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2834 { .name
= "XSCALE_CPAR",
2835 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2836 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2837 .writefn
= xscale_cpar_write
, },
2838 { .name
= "XSCALE_AUXCR",
2839 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2840 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2842 /* XScale specific cache-lockdown: since we have no cache we NOP these
2843 * and hope the guest does not really rely on cache behaviour.
2845 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2846 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2847 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2848 { .name
= "XSCALE_UNLOCK_ICACHE",
2849 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2850 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2851 { .name
= "XSCALE_DCACHE_LOCK",
2852 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2853 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2854 { .name
= "XSCALE_UNLOCK_DCACHE",
2855 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2856 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2860 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2861 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2862 * implementation of this implementation-defined space.
2863 * Ideally this should eventually disappear in favour of actually
2864 * implementing the correct behaviour for all cores.
2866 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2867 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2869 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2874 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2875 /* Cache status: RAZ because we have no cache so it's always clean */
2876 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2877 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2882 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2883 /* We never have a a block transfer operation in progress */
2884 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2885 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2887 /* The cache ops themselves: these all NOP for QEMU */
2888 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2889 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2890 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2891 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2892 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2893 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2894 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2895 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2896 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2897 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2898 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2899 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2903 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2904 /* The cache test-and-clean instructions always return (1 << 30)
2905 * to indicate that there are no dirty cache lines.
2907 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2908 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2909 .resetvalue
= (1 << 30) },
2910 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2911 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2912 .resetvalue
= (1 << 30) },
2916 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2917 /* Ignore ReadBuffer accesses */
2918 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2919 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2920 .access
= PL1_RW
, .resetvalue
= 0,
2921 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2925 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2927 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2928 unsigned int cur_el
= arm_current_el(env
);
2929 bool secure
= arm_is_secure(env
);
2931 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2932 return env
->cp15
.vpidr_el2
;
2934 return raw_read(env
, ri
);
2937 static uint64_t mpidr_read_val(CPUARMState
*env
)
2939 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2940 uint64_t mpidr
= cpu
->mp_affinity
;
2942 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2943 mpidr
|= (1U << 31);
2944 /* Cores which are uniprocessor (non-coherent)
2945 * but still implement the MP extensions set
2946 * bit 30. (For instance, Cortex-R5).
2948 if (cpu
->mp_is_up
) {
2949 mpidr
|= (1u << 30);
2955 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2957 unsigned int cur_el
= arm_current_el(env
);
2958 bool secure
= arm_is_secure(env
);
2960 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2961 return env
->cp15
.vmpidr_el2
;
2963 return mpidr_read_val(env
);
2966 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2967 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2968 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2969 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
2973 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
2975 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
2976 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
2977 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2979 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2980 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
2981 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2983 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
2984 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
2985 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
2986 offsetof(CPUARMState
, cp15
.par_ns
)} },
2987 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
2988 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2989 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2990 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
2991 .writefn
= vmsa_ttbr_write
, },
2992 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
2993 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2994 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2995 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
2996 .writefn
= vmsa_ttbr_write
, },
3000 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3002 return vfp_get_fpcr(env
);
3005 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3008 vfp_set_fpcr(env
, value
);
3011 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3013 return vfp_get_fpsr(env
);
3016 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3019 vfp_set_fpsr(env
, value
);
3022 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3025 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3026 return CP_ACCESS_TRAP
;
3028 return CP_ACCESS_OK
;
3031 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3034 env
->daif
= value
& PSTATE_DAIF
;
3037 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3038 const ARMCPRegInfo
*ri
,
3041 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3042 * SCTLR_EL1.UCI is set.
3044 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3045 return CP_ACCESS_TRAP
;
3047 return CP_ACCESS_OK
;
3050 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3051 * Page D4-1736 (DDI0487A.b)
3054 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3057 CPUState
*cs
= ENV_GET_CPU(env
);
3059 if (arm_is_secure_below_el3(env
)) {
3060 tlb_flush_by_mmuidx(cs
,
3061 ARMMMUIdxBit_S1SE1
|
3062 ARMMMUIdxBit_S1SE0
);
3064 tlb_flush_by_mmuidx(cs
,
3065 ARMMMUIdxBit_S12NSE1
|
3066 ARMMMUIdxBit_S12NSE0
);
3070 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3073 CPUState
*cs
= ENV_GET_CPU(env
);
3074 bool sec
= arm_is_secure_below_el3(env
);
3077 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3078 ARMMMUIdxBit_S1SE1
|
3079 ARMMMUIdxBit_S1SE0
);
3081 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3082 ARMMMUIdxBit_S12NSE1
|
3083 ARMMMUIdxBit_S12NSE0
);
3087 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3090 /* Note that the 'ALL' scope must invalidate both stage 1 and
3091 * stage 2 translations, whereas most other scopes only invalidate
3092 * stage 1 translations.
3094 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3095 CPUState
*cs
= CPU(cpu
);
3097 if (arm_is_secure_below_el3(env
)) {
3098 tlb_flush_by_mmuidx(cs
,
3099 ARMMMUIdxBit_S1SE1
|
3100 ARMMMUIdxBit_S1SE0
);
3102 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3103 tlb_flush_by_mmuidx(cs
,
3104 ARMMMUIdxBit_S12NSE1
|
3105 ARMMMUIdxBit_S12NSE0
|
3108 tlb_flush_by_mmuidx(cs
,
3109 ARMMMUIdxBit_S12NSE1
|
3110 ARMMMUIdxBit_S12NSE0
);
3115 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3118 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3119 CPUState
*cs
= CPU(cpu
);
3121 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3124 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3127 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3128 CPUState
*cs
= CPU(cpu
);
3130 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3133 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3136 /* Note that the 'ALL' scope must invalidate both stage 1 and
3137 * stage 2 translations, whereas most other scopes only invalidate
3138 * stage 1 translations.
3140 CPUState
*cs
= ENV_GET_CPU(env
);
3141 bool sec
= arm_is_secure_below_el3(env
);
3142 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3145 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3146 ARMMMUIdxBit_S1SE1
|
3147 ARMMMUIdxBit_S1SE0
);
3148 } else if (has_el2
) {
3149 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3150 ARMMMUIdxBit_S12NSE1
|
3151 ARMMMUIdxBit_S12NSE0
|
3154 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3155 ARMMMUIdxBit_S12NSE1
|
3156 ARMMMUIdxBit_S12NSE0
);
3160 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3163 CPUState
*cs
= ENV_GET_CPU(env
);
3165 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3168 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3171 CPUState
*cs
= ENV_GET_CPU(env
);
3173 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3176 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3179 /* Invalidate by VA, EL1&0 (AArch64 version).
3180 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3181 * since we don't support flush-for-specific-ASID-only or
3182 * flush-last-level-only.
3184 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3185 CPUState
*cs
= CPU(cpu
);
3186 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3188 if (arm_is_secure_below_el3(env
)) {
3189 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3190 ARMMMUIdxBit_S1SE1
|
3191 ARMMMUIdxBit_S1SE0
);
3193 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3194 ARMMMUIdxBit_S12NSE1
|
3195 ARMMMUIdxBit_S12NSE0
);
3199 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3202 /* Invalidate by VA, EL2
3203 * Currently handles both VAE2 and VALE2, since we don't support
3204 * flush-last-level-only.
3206 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3207 CPUState
*cs
= CPU(cpu
);
3208 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3210 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3213 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3216 /* Invalidate by VA, EL3
3217 * Currently handles both VAE3 and VALE3, since we don't support
3218 * flush-last-level-only.
3220 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3221 CPUState
*cs
= CPU(cpu
);
3222 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3224 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3227 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3230 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3231 CPUState
*cs
= CPU(cpu
);
3232 bool sec
= arm_is_secure_below_el3(env
);
3233 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3236 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3237 ARMMMUIdxBit_S1SE1
|
3238 ARMMMUIdxBit_S1SE0
);
3240 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3241 ARMMMUIdxBit_S12NSE1
|
3242 ARMMMUIdxBit_S12NSE0
);
3246 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3249 CPUState
*cs
= ENV_GET_CPU(env
);
3250 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3252 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3256 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3259 CPUState
*cs
= ENV_GET_CPU(env
);
3260 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3262 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3266 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3269 /* Invalidate by IPA. This has to invalidate any structures that
3270 * contain only stage 2 translation information, but does not need
3271 * to apply to structures that contain combined stage 1 and stage 2
3272 * translation information.
3273 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3275 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3276 CPUState
*cs
= CPU(cpu
);
3279 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3283 pageaddr
= sextract64(value
<< 12, 0, 48);
3285 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3288 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3291 CPUState
*cs
= ENV_GET_CPU(env
);
3294 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3298 pageaddr
= sextract64(value
<< 12, 0, 48);
3300 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3304 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3307 /* We don't implement EL2, so the only control on DC ZVA is the
3308 * bit in the SCTLR which can prohibit access for EL0.
3310 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3311 return CP_ACCESS_TRAP
;
3313 return CP_ACCESS_OK
;
3316 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3318 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3319 int dzp_bit
= 1 << 4;
3321 /* DZP indicates whether DC ZVA access is allowed */
3322 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3325 return cpu
->dcz_blocksize
| dzp_bit
;
3328 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3331 if (!(env
->pstate
& PSTATE_SP
)) {
3332 /* Access to SP_EL0 is undefined if it's being used as
3333 * the stack pointer.
3335 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3337 return CP_ACCESS_OK
;
3340 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3342 return env
->pstate
& PSTATE_SP
;
3345 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3347 update_spsel(env
, val
);
3350 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3353 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3355 if (raw_read(env
, ri
) == value
) {
3356 /* Skip the TLB flush if nothing actually changed; Linux likes
3357 * to do a lot of pointless SCTLR writes.
3362 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3363 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3367 raw_write(env
, ri
, value
);
3368 /* ??? Lots of these bits are not implemented. */
3369 /* This may enable/disable the MMU, so do a TLB flush. */
3370 tlb_flush(CPU(cpu
));
3373 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3376 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3377 return CP_ACCESS_TRAP_FP_EL2
;
3379 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3380 return CP_ACCESS_TRAP_FP_EL3
;
3382 return CP_ACCESS_OK
;
3385 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3388 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3391 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3392 /* Minimal set of EL0-visible registers. This will need to be expanded
3393 * significantly for system emulation of AArch64 CPUs.
3395 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3396 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3397 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3398 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3399 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3400 .type
= ARM_CP_NO_RAW
,
3401 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3402 .fieldoffset
= offsetof(CPUARMState
, daif
),
3403 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3404 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3405 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3406 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3407 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
3408 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
3409 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
3410 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3411 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
3412 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
3413 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
3414 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
3415 .readfn
= aa64_dczid_read
},
3416 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
3417 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
3418 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
3419 #ifndef CONFIG_USER_ONLY
3420 /* Avoid overhead of an access check that always passes in user-mode */
3421 .accessfn
= aa64_zva_access
,
3424 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
3425 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
3426 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
3427 /* Cache ops: all NOPs since we don't emulate caches */
3428 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
3429 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3430 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3431 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
3432 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3433 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3434 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
3435 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
3436 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3437 .accessfn
= aa64_cacheop_access
},
3438 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
3439 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3440 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3441 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
3442 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3443 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3444 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
3445 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
3446 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3447 .accessfn
= aa64_cacheop_access
},
3448 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
3449 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3450 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3451 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
3452 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
3453 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3454 .accessfn
= aa64_cacheop_access
},
3455 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
3456 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
3457 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3458 .accessfn
= aa64_cacheop_access
},
3459 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
3460 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3461 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3462 /* TLBI operations */
3463 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
3464 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
3465 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3466 .writefn
= tlbi_aa64_vmalle1is_write
},
3467 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
3468 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
3469 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3470 .writefn
= tlbi_aa64_vae1is_write
},
3471 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
3472 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
3473 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3474 .writefn
= tlbi_aa64_vmalle1is_write
},
3475 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
3476 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
3477 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3478 .writefn
= tlbi_aa64_vae1is_write
},
3479 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
3480 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3481 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3482 .writefn
= tlbi_aa64_vae1is_write
},
3483 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
3484 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3485 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3486 .writefn
= tlbi_aa64_vae1is_write
},
3487 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
3488 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
3489 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3490 .writefn
= tlbi_aa64_vmalle1_write
},
3491 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
3492 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
3493 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3494 .writefn
= tlbi_aa64_vae1_write
},
3495 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
3496 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
3497 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3498 .writefn
= tlbi_aa64_vmalle1_write
},
3499 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
3500 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
3501 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3502 .writefn
= tlbi_aa64_vae1_write
},
3503 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
3504 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3505 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3506 .writefn
= tlbi_aa64_vae1_write
},
3507 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
3508 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3509 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3510 .writefn
= tlbi_aa64_vae1_write
},
3511 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
3512 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3513 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3514 .writefn
= tlbi_aa64_ipas2e1is_write
},
3515 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
3516 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3517 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3518 .writefn
= tlbi_aa64_ipas2e1is_write
},
3519 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
3520 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3521 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3522 .writefn
= tlbi_aa64_alle1is_write
},
3523 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
3524 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
3525 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3526 .writefn
= tlbi_aa64_alle1is_write
},
3527 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
3528 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3529 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3530 .writefn
= tlbi_aa64_ipas2e1_write
},
3531 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
3532 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3533 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3534 .writefn
= tlbi_aa64_ipas2e1_write
},
3535 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
3536 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3537 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3538 .writefn
= tlbi_aa64_alle1_write
},
3539 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
3540 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
3541 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3542 .writefn
= tlbi_aa64_alle1is_write
},
3543 #ifndef CONFIG_USER_ONLY
3544 /* 64 bit address translation operations */
3545 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
3546 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
3547 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3548 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
3549 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
3550 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3551 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
3552 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
3553 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3554 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
3555 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
3556 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3557 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
3558 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
3559 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3560 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
3561 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
3562 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3563 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
3564 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
3565 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3566 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
3567 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
3568 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3569 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3570 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
3571 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
3572 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3573 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
3574 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
3575 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3576 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
3577 .type
= ARM_CP_ALIAS
,
3578 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
3579 .access
= PL1_RW
, .resetvalue
= 0,
3580 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
3581 .writefn
= par_write
},
3583 /* TLB invalidate last level of translation table walk */
3584 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3585 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
3586 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3587 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
3588 .writefn
= tlbimvaa_is_write
},
3589 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3590 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
3591 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3592 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
3593 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3594 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3595 .writefn
= tlbimva_hyp_write
},
3596 { .name
= "TLBIMVALHIS",
3597 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3598 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3599 .writefn
= tlbimva_hyp_is_write
},
3600 { .name
= "TLBIIPAS2",
3601 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3602 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3603 .writefn
= tlbiipas2_write
},
3604 { .name
= "TLBIIPAS2IS",
3605 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3606 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3607 .writefn
= tlbiipas2_is_write
},
3608 { .name
= "TLBIIPAS2L",
3609 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3610 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3611 .writefn
= tlbiipas2_write
},
3612 { .name
= "TLBIIPAS2LIS",
3613 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3614 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3615 .writefn
= tlbiipas2_is_write
},
3616 /* 32 bit cache operations */
3617 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3618 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3619 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
3620 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3621 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3622 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3623 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
3624 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3625 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
3626 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3627 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
3628 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3629 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3630 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3631 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3632 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3633 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
3634 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3635 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3636 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3637 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
3638 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3639 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
3640 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3641 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3642 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3643 /* MMU Domain access control / MPU write buffer control */
3644 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
3645 .access
= PL1_RW
, .resetvalue
= 0,
3646 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3647 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
3648 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
3649 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
3650 .type
= ARM_CP_ALIAS
,
3651 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
3653 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
3654 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
3655 .type
= ARM_CP_ALIAS
,
3656 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
3658 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
3659 /* We rely on the access checks not allowing the guest to write to the
3660 * state field when SPSel indicates that it's being used as the stack
3663 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
3664 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
3665 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
3666 .type
= ARM_CP_ALIAS
,
3667 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
3668 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
3669 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
3670 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3671 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
3672 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
3673 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
3674 .type
= ARM_CP_NO_RAW
,
3675 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
3676 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
3677 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
3678 .type
= ARM_CP_ALIAS
,
3679 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
3680 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
3681 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
3682 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
3683 .access
= PL2_RW
, .resetvalue
= 0,
3684 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3685 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
3686 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
3687 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
3688 .access
= PL2_RW
, .resetvalue
= 0,
3689 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
3690 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
3691 .type
= ARM_CP_ALIAS
,
3692 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
3694 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
3695 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
3696 .type
= ARM_CP_ALIAS
,
3697 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
3699 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
3700 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
3701 .type
= ARM_CP_ALIAS
,
3702 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
3704 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
3705 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
3706 .type
= ARM_CP_ALIAS
,
3707 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
3709 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
3710 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
3711 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
3713 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
3714 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
3715 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
3716 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
3717 .writefn
= sdcr_write
,
3718 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
3722 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3723 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
3724 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3725 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3727 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3728 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3729 .type
= ARM_CP_NO_RAW
,
3730 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3732 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3733 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3734 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3735 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3736 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3737 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3738 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3740 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3741 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3742 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3743 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3744 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3745 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3747 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3748 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3749 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3751 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3752 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3753 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3755 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3756 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3757 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3759 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3760 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3761 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3762 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3763 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3764 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3765 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3766 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3767 .cp
= 15, .opc1
= 6, .crm
= 2,
3768 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3769 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
3770 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3771 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3772 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3773 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3774 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3775 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3776 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3777 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3778 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3779 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3780 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3781 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3782 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3783 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3785 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3786 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3787 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3788 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3789 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3790 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3791 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3792 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3794 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3795 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3796 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3797 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3798 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3800 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3801 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3802 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3803 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3804 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3805 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3806 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3807 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3808 .access
= PL2_RW
, .accessfn
= access_tda
,
3809 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3810 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3811 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3812 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3813 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3814 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3815 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3816 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3820 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3822 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3823 uint64_t valid_mask
= HCR_MASK
;
3825 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3826 valid_mask
&= ~HCR_HCD
;
3827 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
3828 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3829 * However, if we're using the SMC PSCI conduit then QEMU is
3830 * effectively acting like EL3 firmware and so the guest at
3831 * EL2 should retain the ability to prevent EL1 from being
3832 * able to make SMC calls into the ersatz firmware, so in
3833 * that case HCR.TSC should be read/write.
3835 valid_mask
&= ~HCR_TSC
;
3838 /* Clear RES0 bits. */
3839 value
&= valid_mask
;
3841 /* These bits change the MMU setup:
3842 * HCR_VM enables stage 2 translation
3843 * HCR_PTW forbids certain page-table setups
3844 * HCR_DC Disables stage1 and enables stage2 translation
3846 if ((raw_read(env
, ri
) ^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
3847 tlb_flush(CPU(cpu
));
3849 raw_write(env
, ri
, value
);
3852 static const ARMCPRegInfo el2_cp_reginfo
[] = {
3853 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3854 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3855 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
3856 .writefn
= hcr_write
},
3857 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
3858 .type
= ARM_CP_ALIAS
,
3859 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
3861 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
3862 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_AA64
,
3863 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3864 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
3865 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_AA64
,
3866 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3867 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
3868 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
3869 .type
= ARM_CP_ALIAS
,
3870 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
3872 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
3873 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3874 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3875 .access
= PL2_RW
, .writefn
= vbar_write
,
3876 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
3878 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
3879 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
3880 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
3881 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
3882 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3883 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3884 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
3885 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
3886 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3887 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3888 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
3890 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3891 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3892 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3893 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
3894 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3895 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3896 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3898 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3899 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3900 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3901 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3903 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3904 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3905 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3907 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3908 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3909 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3911 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3912 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3914 /* no .writefn needed as this can't cause an ASID change;
3915 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3917 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
3918 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
3919 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3920 .type
= ARM_CP_ALIAS
,
3921 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3922 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3923 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
3924 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3926 /* no .writefn needed as this can't cause an ASID change;
3927 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3929 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3930 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3931 .cp
= 15, .opc1
= 6, .crm
= 2,
3932 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3933 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3934 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
3935 .writefn
= vttbr_write
},
3936 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3937 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3938 .access
= PL2_RW
, .writefn
= vttbr_write
,
3939 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
3940 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3941 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3942 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
3943 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
3944 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3945 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3946 .access
= PL2_RW
, .resetvalue
= 0,
3947 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
3948 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3949 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3950 .access
= PL2_RW
, .resetvalue
= 0,
3951 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3952 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3953 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3954 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3955 { .name
= "TLBIALLNSNH",
3956 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3957 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3958 .writefn
= tlbiall_nsnh_write
},
3959 { .name
= "TLBIALLNSNHIS",
3960 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3961 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3962 .writefn
= tlbiall_nsnh_is_write
},
3963 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3964 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3965 .writefn
= tlbiall_hyp_write
},
3966 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3967 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3968 .writefn
= tlbiall_hyp_is_write
},
3969 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3970 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3971 .writefn
= tlbimva_hyp_write
},
3972 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3973 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3974 .writefn
= tlbimva_hyp_is_write
},
3975 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
3976 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3977 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3978 .writefn
= tlbi_aa64_alle2_write
},
3979 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
3980 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3981 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3982 .writefn
= tlbi_aa64_vae2_write
},
3983 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
3984 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3985 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3986 .writefn
= tlbi_aa64_vae2_write
},
3987 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
3988 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3989 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3990 .writefn
= tlbi_aa64_alle2is_write
},
3991 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
3992 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3993 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3994 .writefn
= tlbi_aa64_vae2is_write
},
3995 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
3996 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3997 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3998 .writefn
= tlbi_aa64_vae2is_write
},
3999 #ifndef CONFIG_USER_ONLY
4000 /* Unlike the other EL2-related AT operations, these must
4001 * UNDEF from EL3 if EL2 is not implemented, which is why we
4002 * define them here rather than with the rest of the AT ops.
4004 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4005 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4006 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4007 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4008 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4009 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4010 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4011 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4012 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4013 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4014 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4015 * to behave as if SCR.NS was 1.
4017 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4019 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4020 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4022 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4023 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4024 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4025 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4026 * reset values as IMPDEF. We choose to reset to 3 to comply with
4027 * both ARMv7 and ARMv8.
4029 .access
= PL2_RW
, .resetvalue
= 3,
4030 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4031 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4032 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4033 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4034 .writefn
= gt_cntvoff_write
,
4035 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4036 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4037 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4038 .writefn
= gt_cntvoff_write
,
4039 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4040 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4041 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4042 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4043 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4044 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4045 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4046 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4047 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4048 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4049 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4050 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4051 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4052 .resetfn
= gt_hyp_timer_reset
,
4053 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4054 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4056 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4058 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4060 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4062 /* The only field of MDCR_EL2 that has a defined architectural reset value
4063 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4064 * don't impelment any PMU event counters, so using zero as a reset
4065 * value for MDCR_EL2 is okay
4067 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4068 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4069 .access
= PL2_RW
, .resetvalue
= 0,
4070 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4071 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4072 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4073 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4074 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4075 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4076 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4078 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4079 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4080 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4082 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4086 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4089 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4090 * At Secure EL1 it traps to EL3.
4092 if (arm_current_el(env
) == 3) {
4093 return CP_ACCESS_OK
;
4095 if (arm_is_secure_below_el3(env
)) {
4096 return CP_ACCESS_TRAP_EL3
;
4098 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4100 return CP_ACCESS_OK
;
4102 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4105 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4106 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4107 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4108 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4109 .resetvalue
= 0, .writefn
= scr_write
},
4110 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4111 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4112 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4113 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4114 .writefn
= scr_write
},
4115 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4116 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4117 .access
= PL3_RW
, .resetvalue
= 0,
4118 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4120 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4121 .access
= PL3_RW
, .resetvalue
= 0,
4122 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4123 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4124 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4125 .writefn
= vbar_write
, .resetvalue
= 0,
4126 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4127 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4128 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4129 .access
= PL3_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4130 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4131 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4132 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4134 /* no .writefn needed as this can't cause an ASID change;
4135 * we must provide a .raw_writefn and .resetfn because we handle
4136 * reset and migration for the AArch32 TTBCR(S), which might be
4137 * using mask and base_mask.
4139 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4140 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4141 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4142 .type
= ARM_CP_ALIAS
,
4143 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4145 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4146 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4147 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4148 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4149 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4150 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4151 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4152 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4153 .type
= ARM_CP_ALIAS
,
4154 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4156 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4157 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4158 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4159 .access
= PL3_RW
, .writefn
= vbar_write
,
4160 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4162 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4163 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4164 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4165 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4166 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4167 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4168 .access
= PL3_RW
, .resetvalue
= 0,
4169 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4170 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4171 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4172 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4174 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4175 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4176 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4178 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4179 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4180 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4182 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4183 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4184 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4185 .writefn
= tlbi_aa64_alle3is_write
},
4186 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4187 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4188 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4189 .writefn
= tlbi_aa64_vae3is_write
},
4190 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4191 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4192 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4193 .writefn
= tlbi_aa64_vae3is_write
},
4194 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4195 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4196 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4197 .writefn
= tlbi_aa64_alle3_write
},
4198 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4199 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4200 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4201 .writefn
= tlbi_aa64_vae3_write
},
4202 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4203 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4204 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4205 .writefn
= tlbi_aa64_vae3_write
},
4209 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4212 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4213 * but the AArch32 CTR has its own reginfo struct)
4215 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4216 return CP_ACCESS_TRAP
;
4218 return CP_ACCESS_OK
;
4221 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4224 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4225 * read via a bit in OSLSR_EL1.
4229 if (ri
->state
== ARM_CP_STATE_AA32
) {
4230 oslock
= (value
== 0xC5ACCE55);
4235 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4238 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4239 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4240 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4241 * unlike DBGDRAR it is never accessible from EL0.
4242 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4245 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4246 .access
= PL0_R
, .accessfn
= access_tdra
,
4247 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4248 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4249 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4250 .access
= PL1_R
, .accessfn
= access_tdra
,
4251 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4252 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4253 .access
= PL0_R
, .accessfn
= access_tdra
,
4254 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4255 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4256 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4257 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4258 .access
= PL1_RW
, .accessfn
= access_tda
,
4259 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4261 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4262 * We don't implement the configurable EL0 access.
4264 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4265 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4266 .type
= ARM_CP_ALIAS
,
4267 .access
= PL1_R
, .accessfn
= access_tda
,
4268 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4269 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4270 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4271 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4272 .accessfn
= access_tdosa
,
4273 .writefn
= oslar_write
},
4274 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4275 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4276 .access
= PL1_R
, .resetvalue
= 10,
4277 .accessfn
= access_tdosa
,
4278 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4279 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4280 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4281 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4282 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4283 .type
= ARM_CP_NOP
},
4284 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4285 * implement vector catch debug events yet.
4288 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4289 .access
= PL1_RW
, .accessfn
= access_tda
,
4290 .type
= ARM_CP_NOP
},
4291 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4292 * to save and restore a 32-bit guest's DBGVCR)
4294 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
4295 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
4296 .access
= PL2_RW
, .accessfn
= access_tda
,
4297 .type
= ARM_CP_NOP
},
4298 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4299 * Channel but Linux may try to access this register. The 32-bit
4300 * alias is DBGDCCINT.
4302 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
4303 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4304 .access
= PL1_RW
, .accessfn
= access_tda
,
4305 .type
= ARM_CP_NOP
},
4309 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
4310 /* 64 bit access versions of the (dummy) debug registers */
4311 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
4312 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4313 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
4314 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4318 /* Return the exception level to which SVE-disabled exceptions should
4319 * be taken, or 0 if SVE is enabled.
4321 static int sve_exception_el(CPUARMState
*env
)
4323 #ifndef CONFIG_USER_ONLY
4324 unsigned current_el
= arm_current_el(env
);
4326 /* The CPACR.ZEN controls traps to EL1:
4327 * 0, 2 : trap EL0 and EL1 accesses
4328 * 1 : trap only EL0 accesses
4329 * 3 : trap no accesses
4331 switch (extract32(env
->cp15
.cpacr_el1
, 16, 2)) {
4333 if (current_el
<= 1) {
4334 /* Trap to PL1, which might be EL1 or EL3 */
4335 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
4342 if (current_el
== 0) {
4350 /* Similarly for CPACR.FPEN, after having checked ZEN. */
4351 switch (extract32(env
->cp15
.cpacr_el1
, 20, 2)) {
4353 if (current_el
<= 1) {
4354 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
4361 if (current_el
== 0) {
4369 /* CPTR_EL2. Check both TZ and TFP. */
4371 && (env
->cp15
.cptr_el
[2] & (CPTR_TFP
| CPTR_TZ
))
4372 && !arm_is_secure_below_el3(env
)) {
4376 /* CPTR_EL3. Check both EZ and TFP. */
4377 if (!(env
->cp15
.cptr_el
[3] & CPTR_EZ
)
4378 || (env
->cp15
.cptr_el
[3] & CPTR_TFP
)) {
4385 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4388 /* Bits other than [3:0] are RAZ/WI. */
4389 raw_write(env
, ri
, value
& 0xf);
4392 static const ARMCPRegInfo zcr_el1_reginfo
= {
4393 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
4394 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
4395 .access
= PL1_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4396 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
4397 .writefn
= zcr_write
, .raw_writefn
= raw_write
4400 static const ARMCPRegInfo zcr_el2_reginfo
= {
4401 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4402 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4403 .access
= PL2_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4404 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
4405 .writefn
= zcr_write
, .raw_writefn
= raw_write
4408 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
4409 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4410 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4411 .access
= PL2_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4412 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
4415 static const ARMCPRegInfo zcr_el3_reginfo
= {
4416 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
4417 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
4418 .access
= PL3_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4419 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
4420 .writefn
= zcr_write
, .raw_writefn
= raw_write
4423 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
4425 CPUARMState
*env
= &cpu
->env
;
4427 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
4428 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
4430 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
4432 if (env
->cpu_watchpoint
[n
]) {
4433 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
4434 env
->cpu_watchpoint
[n
] = NULL
;
4437 if (!extract64(wcr
, 0, 1)) {
4438 /* E bit clear : watchpoint disabled */
4442 switch (extract64(wcr
, 3, 2)) {
4444 /* LSC 00 is reserved and must behave as if the wp is disabled */
4447 flags
|= BP_MEM_READ
;
4450 flags
|= BP_MEM_WRITE
;
4453 flags
|= BP_MEM_ACCESS
;
4457 /* Attempts to use both MASK and BAS fields simultaneously are
4458 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4459 * thus generating a watchpoint for every byte in the masked region.
4461 mask
= extract64(wcr
, 24, 4);
4462 if (mask
== 1 || mask
== 2) {
4463 /* Reserved values of MASK; we must act as if the mask value was
4464 * some non-reserved value, or as if the watchpoint were disabled.
4465 * We choose the latter.
4469 /* Watchpoint covers an aligned area up to 2GB in size */
4471 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4472 * whether the watchpoint fires when the unmasked bits match; we opt
4473 * to generate the exceptions.
4477 /* Watchpoint covers bytes defined by the byte address select bits */
4478 int bas
= extract64(wcr
, 5, 8);
4482 /* This must act as if the watchpoint is disabled */
4486 if (extract64(wvr
, 2, 1)) {
4487 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4488 * ignored, and BAS[3:0] define which bytes to watch.
4492 /* The BAS bits are supposed to be programmed to indicate a contiguous
4493 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4494 * we fire for each byte in the word/doubleword addressed by the WVR.
4495 * We choose to ignore any non-zero bits after the first range of 1s.
4497 basstart
= ctz32(bas
);
4498 len
= cto32(bas
>> basstart
);
4502 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
4503 &env
->cpu_watchpoint
[n
]);
4506 void hw_watchpoint_update_all(ARMCPU
*cpu
)
4509 CPUARMState
*env
= &cpu
->env
;
4511 /* Completely clear out existing QEMU watchpoints and our array, to
4512 * avoid possible stale entries following migration load.
4514 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
4515 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
4517 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
4518 hw_watchpoint_update(cpu
, i
);
4522 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4525 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4528 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4529 * register reads and behaves as if values written are sign extended.
4530 * Bits [1:0] are RES0.
4532 value
= sextract64(value
, 0, 49) & ~3ULL;
4534 raw_write(env
, ri
, value
);
4535 hw_watchpoint_update(cpu
, i
);
4538 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4541 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4544 raw_write(env
, ri
, value
);
4545 hw_watchpoint_update(cpu
, i
);
4548 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
4550 CPUARMState
*env
= &cpu
->env
;
4551 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
4552 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
4557 if (env
->cpu_breakpoint
[n
]) {
4558 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
4559 env
->cpu_breakpoint
[n
] = NULL
;
4562 if (!extract64(bcr
, 0, 1)) {
4563 /* E bit clear : watchpoint disabled */
4567 bt
= extract64(bcr
, 20, 4);
4570 case 4: /* unlinked address mismatch (reserved if AArch64) */
4571 case 5: /* linked address mismatch (reserved if AArch64) */
4572 qemu_log_mask(LOG_UNIMP
,
4573 "arm: address mismatch breakpoint types not implemented\n");
4575 case 0: /* unlinked address match */
4576 case 1: /* linked address match */
4578 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4579 * we behave as if the register was sign extended. Bits [1:0] are
4580 * RES0. The BAS field is used to allow setting breakpoints on 16
4581 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4582 * a bp will fire if the addresses covered by the bp and the addresses
4583 * covered by the insn overlap but the insn doesn't start at the
4584 * start of the bp address range. We choose to require the insn and
4585 * the bp to have the same address. The constraints on writing to
4586 * BAS enforced in dbgbcr_write mean we have only four cases:
4587 * 0b0000 => no breakpoint
4588 * 0b0011 => breakpoint on addr
4589 * 0b1100 => breakpoint on addr + 2
4590 * 0b1111 => breakpoint on addr
4591 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4593 int bas
= extract64(bcr
, 5, 4);
4594 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
4603 case 2: /* unlinked context ID match */
4604 case 8: /* unlinked VMID match (reserved if no EL2) */
4605 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4606 qemu_log_mask(LOG_UNIMP
,
4607 "arm: unlinked context breakpoint types not implemented\n");
4609 case 9: /* linked VMID match (reserved if no EL2) */
4610 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4611 case 3: /* linked context ID match */
4613 /* We must generate no events for Linked context matches (unless
4614 * they are linked to by some other bp/wp, which is handled in
4615 * updates for the linking bp/wp). We choose to also generate no events
4616 * for reserved values.
4621 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
4624 void hw_breakpoint_update_all(ARMCPU
*cpu
)
4627 CPUARMState
*env
= &cpu
->env
;
4629 /* Completely clear out existing QEMU breakpoints and our array, to
4630 * avoid possible stale entries following migration load.
4632 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
4633 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
4635 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
4636 hw_breakpoint_update(cpu
, i
);
4640 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4643 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4646 raw_write(env
, ri
, value
);
4647 hw_breakpoint_update(cpu
, i
);
4650 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4653 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4656 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4659 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
4660 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
4662 raw_write(env
, ri
, value
);
4663 hw_breakpoint_update(cpu
, i
);
4666 static void define_debug_regs(ARMCPU
*cpu
)
4668 /* Define v7 and v8 architectural debug registers.
4669 * These are just dummy implementations for now.
4672 int wrps
, brps
, ctx_cmps
;
4673 ARMCPRegInfo dbgdidr
= {
4674 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
4675 .access
= PL0_R
, .accessfn
= access_tda
,
4676 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
4679 /* Note that all these register fields hold "number of Xs minus 1". */
4680 brps
= extract32(cpu
->dbgdidr
, 24, 4);
4681 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
4682 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
4684 assert(ctx_cmps
<= brps
);
4686 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4687 * of the debug registers such as number of breakpoints;
4688 * check that if they both exist then they agree.
4690 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
4691 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
4692 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
4693 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
4696 define_one_arm_cp_reg(cpu
, &dbgdidr
);
4697 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
4699 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
4700 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
4703 for (i
= 0; i
< brps
+ 1; i
++) {
4704 ARMCPRegInfo dbgregs
[] = {
4705 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
4706 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
4707 .access
= PL1_RW
, .accessfn
= access_tda
,
4708 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
4709 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
4711 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
4712 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
4713 .access
= PL1_RW
, .accessfn
= access_tda
,
4714 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
4715 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
4719 define_arm_cp_regs(cpu
, dbgregs
);
4722 for (i
= 0; i
< wrps
+ 1; i
++) {
4723 ARMCPRegInfo dbgregs
[] = {
4724 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
4725 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
4726 .access
= PL1_RW
, .accessfn
= access_tda
,
4727 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
4728 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
4730 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
4731 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
4732 .access
= PL1_RW
, .accessfn
= access_tda
,
4733 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
4734 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
4738 define_arm_cp_regs(cpu
, dbgregs
);
4742 /* We don't know until after realize whether there's a GICv3
4743 * attached, and that is what registers the gicv3 sysregs.
4744 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
4747 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4749 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4750 uint64_t pfr1
= cpu
->id_pfr1
;
4752 if (env
->gicv3state
) {
4758 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4760 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4761 uint64_t pfr0
= cpu
->id_aa64pfr0
;
4763 if (env
->gicv3state
) {
4769 void register_cp_regs_for_features(ARMCPU
*cpu
)
4771 /* Register all the coprocessor registers based on feature bits */
4772 CPUARMState
*env
= &cpu
->env
;
4773 if (arm_feature(env
, ARM_FEATURE_M
)) {
4774 /* M profile has no coprocessor registers */
4778 define_arm_cp_regs(cpu
, cp_reginfo
);
4779 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
4780 /* Must go early as it is full of wildcards that may be
4781 * overridden by later definitions.
4783 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
4786 if (arm_feature(env
, ARM_FEATURE_V6
)) {
4787 /* The ID registers all have impdef reset values */
4788 ARMCPRegInfo v6_idregs
[] = {
4789 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
4790 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4791 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4792 .resetvalue
= cpu
->id_pfr0
},
4793 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
4794 * the value of the GIC field until after we define these regs.
4796 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
4797 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
4798 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4799 .readfn
= id_pfr1_read
,
4800 .writefn
= arm_cp_write_ignore
},
4801 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
4802 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
4803 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4804 .resetvalue
= cpu
->id_dfr0
},
4805 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
4806 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
4807 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4808 .resetvalue
= cpu
->id_afr0
},
4809 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
4810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
4811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4812 .resetvalue
= cpu
->id_mmfr0
},
4813 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
4814 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
4815 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4816 .resetvalue
= cpu
->id_mmfr1
},
4817 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
4818 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
4819 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4820 .resetvalue
= cpu
->id_mmfr2
},
4821 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
4822 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
4823 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4824 .resetvalue
= cpu
->id_mmfr3
},
4825 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
4826 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4827 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4828 .resetvalue
= cpu
->id_isar0
},
4829 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
4830 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
4831 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4832 .resetvalue
= cpu
->id_isar1
},
4833 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
4834 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4835 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4836 .resetvalue
= cpu
->id_isar2
},
4837 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
4838 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
4839 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4840 .resetvalue
= cpu
->id_isar3
},
4841 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
4842 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
4843 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4844 .resetvalue
= cpu
->id_isar4
},
4845 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
4846 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
4847 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4848 .resetvalue
= cpu
->id_isar5
},
4849 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
4850 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
4851 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4852 .resetvalue
= cpu
->id_mmfr4
},
4853 /* 7 is as yet unallocated and must RAZ */
4854 { .name
= "ID_ISAR7_RESERVED", .state
= ARM_CP_STATE_BOTH
,
4855 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
4856 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4860 define_arm_cp_regs(cpu
, v6_idregs
);
4861 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
4863 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
4865 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
4866 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
4868 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
4869 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
4870 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
4872 if (arm_feature(env
, ARM_FEATURE_V7
)) {
4873 /* v7 performance monitor control register: same implementor
4874 * field as main ID register, and we implement only the cycle
4877 #ifndef CONFIG_USER_ONLY
4878 ARMCPRegInfo pmcr
= {
4879 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
4881 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
4882 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
4883 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
4884 .raw_writefn
= raw_write
,
4886 ARMCPRegInfo pmcr64
= {
4887 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
4888 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
4889 .access
= PL0_RW
, .accessfn
= pmreg_access
,
4891 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
4892 .resetvalue
= cpu
->midr
& 0xff000000,
4893 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
4895 define_one_arm_cp_reg(cpu
, &pmcr
);
4896 define_one_arm_cp_reg(cpu
, &pmcr64
);
4898 ARMCPRegInfo clidr
= {
4899 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
4900 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
4901 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
4903 define_one_arm_cp_reg(cpu
, &clidr
);
4904 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
4905 define_debug_regs(cpu
);
4907 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
4909 if (arm_feature(env
, ARM_FEATURE_V8
)) {
4910 /* AArch64 ID registers, which all have impdef reset values.
4911 * Note that within the ID register ranges the unused slots
4912 * must all RAZ, not UNDEF; future architecture versions may
4913 * define new registers here.
4915 ARMCPRegInfo v8_idregs
[] = {
4916 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
4917 * know the right value for the GIC field until after we
4918 * define these regs.
4920 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4921 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
4922 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4923 .readfn
= id_aa64pfr0_read
,
4924 .writefn
= arm_cp_write_ignore
},
4925 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4926 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
4927 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4928 .resetvalue
= cpu
->id_aa64pfr1
},
4929 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4930 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
4931 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4933 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4934 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
4935 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4937 { .name
= "ID_AA64PFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4938 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
4939 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4941 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4942 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
4943 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4945 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4946 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
4947 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4949 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4950 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
4951 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4953 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4954 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
4955 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4956 .resetvalue
= cpu
->id_aa64dfr0
},
4957 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4958 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
4959 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4960 .resetvalue
= cpu
->id_aa64dfr1
},
4961 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4962 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
4963 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4965 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4966 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
4967 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4969 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4970 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
4971 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4972 .resetvalue
= cpu
->id_aa64afr0
},
4973 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4974 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
4975 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4976 .resetvalue
= cpu
->id_aa64afr1
},
4977 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4978 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
4979 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4981 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4982 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
4983 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4985 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
4986 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
4987 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4988 .resetvalue
= cpu
->id_aa64isar0
},
4989 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
4990 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
4991 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4992 .resetvalue
= cpu
->id_aa64isar1
},
4993 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4994 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
4995 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4997 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4998 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
4999 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5001 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5002 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
5003 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5005 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5006 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
5007 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5009 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5010 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
5011 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5013 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5014 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
5015 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5017 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5018 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5019 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5020 .resetvalue
= cpu
->id_aa64mmfr0
},
5021 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5022 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
5023 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5024 .resetvalue
= cpu
->id_aa64mmfr1
},
5025 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5026 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
5027 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5029 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5030 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
5031 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5033 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5034 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
5035 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5037 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5038 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
5039 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5041 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5042 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
5043 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5045 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5046 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
5047 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5049 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5050 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
5051 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5052 .resetvalue
= cpu
->mvfr0
},
5053 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5054 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
5055 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5056 .resetvalue
= cpu
->mvfr1
},
5057 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
5058 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
5059 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5060 .resetvalue
= cpu
->mvfr2
},
5061 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5062 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
5063 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5065 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5066 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
5067 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5069 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5070 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
5071 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5073 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5074 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
5075 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5077 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5078 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
5079 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5081 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
5082 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
5083 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5084 .resetvalue
= cpu
->pmceid0
},
5085 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
5086 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
5087 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5088 .resetvalue
= cpu
->pmceid0
},
5089 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
5090 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
5091 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5092 .resetvalue
= cpu
->pmceid1
},
5093 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
5094 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
5095 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5096 .resetvalue
= cpu
->pmceid1
},
5099 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
5100 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
5101 !arm_feature(env
, ARM_FEATURE_EL2
)) {
5102 ARMCPRegInfo rvbar
= {
5103 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5104 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5105 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
5107 define_one_arm_cp_reg(cpu
, &rvbar
);
5109 define_arm_cp_regs(cpu
, v8_idregs
);
5110 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
5112 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5113 uint64_t vmpidr_def
= mpidr_read_val(env
);
5114 ARMCPRegInfo vpidr_regs
[] = {
5115 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
5116 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5117 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5118 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
5119 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
5120 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5121 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5122 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
5123 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5124 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
5125 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5126 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5127 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
5128 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
5129 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5130 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5132 .resetvalue
= vmpidr_def
,
5133 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
5136 define_arm_cp_regs(cpu
, vpidr_regs
);
5137 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
5138 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
5139 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
5140 ARMCPRegInfo rvbar
= {
5141 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
5142 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
5143 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
5145 define_one_arm_cp_reg(cpu
, &rvbar
);
5148 /* If EL2 is missing but higher ELs are enabled, we need to
5149 * register the no_el2 reginfos.
5151 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5152 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
5153 * of MIDR_EL1 and MPIDR_EL1.
5155 ARMCPRegInfo vpidr_regs
[] = {
5156 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5157 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5158 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5159 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
5160 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5161 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5162 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5163 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5164 .type
= ARM_CP_NO_RAW
,
5165 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
5168 define_arm_cp_regs(cpu
, vpidr_regs
);
5169 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
5172 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5173 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
5174 ARMCPRegInfo el3_regs
[] = {
5175 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5176 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
5177 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
5178 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5179 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
5181 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5182 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
5183 .resetvalue
= cpu
->reset_sctlr
},
5187 define_arm_cp_regs(cpu
, el3_regs
);
5189 /* The behaviour of NSACR is sufficiently various that we don't
5190 * try to describe it in a single reginfo:
5191 * if EL3 is 64 bit, then trap to EL3 from S EL1,
5192 * reads as constant 0xc00 from NS EL1 and NS EL2
5193 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5194 * if v7 without EL3, register doesn't exist
5195 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5197 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5198 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5199 ARMCPRegInfo nsacr
= {
5200 .name
= "NSACR", .type
= ARM_CP_CONST
,
5201 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5202 .access
= PL1_RW
, .accessfn
= nsacr_access
,
5205 define_one_arm_cp_reg(cpu
, &nsacr
);
5207 ARMCPRegInfo nsacr
= {
5209 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5210 .access
= PL3_RW
| PL1_R
,
5212 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
5214 define_one_arm_cp_reg(cpu
, &nsacr
);
5217 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5218 ARMCPRegInfo nsacr
= {
5219 .name
= "NSACR", .type
= ARM_CP_CONST
,
5220 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5224 define_one_arm_cp_reg(cpu
, &nsacr
);
5228 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
5229 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5230 /* PMSAv6 not implemented */
5231 assert(arm_feature(env
, ARM_FEATURE_V7
));
5232 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5233 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
5235 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
5238 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5239 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
5241 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5242 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
5244 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
5245 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
5247 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
5248 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
5250 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
5251 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
5253 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
5254 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
5256 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
5257 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
5259 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
5260 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
5262 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5263 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
5265 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5266 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
5268 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
5269 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
5271 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
5272 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
5274 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5275 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5276 * be read-only (ie write causes UNDEF exception).
5279 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
5280 /* Pre-v8 MIDR space.
5281 * Note that the MIDR isn't a simple constant register because
5282 * of the TI925 behaviour where writes to another register can
5283 * cause the MIDR value to change.
5285 * Unimplemented registers in the c15 0 0 0 space default to
5286 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5287 * and friends override accordingly.
5290 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
5291 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
5292 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
5293 .readfn
= midr_read
,
5294 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5295 .type
= ARM_CP_OVERRIDE
},
5296 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5298 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
5299 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5301 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
5302 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5304 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
5305 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5307 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
5308 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5310 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
5311 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5314 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
5315 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5316 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
5317 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
5318 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5319 .readfn
= midr_read
},
5320 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5321 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5322 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5323 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5324 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5325 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
5326 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5327 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5328 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
5329 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
5332 ARMCPRegInfo id_cp_reginfo
[] = {
5333 /* These are common to v8 and pre-v8 */
5335 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
5336 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5337 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
5338 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
5339 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
5340 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5341 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5343 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
5344 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5347 /* TLBTR is specific to VMSA */
5348 ARMCPRegInfo id_tlbtr_reginfo
= {
5350 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
5351 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
5353 /* MPUIR is specific to PMSA V6+ */
5354 ARMCPRegInfo id_mpuir_reginfo
= {
5356 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5357 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5358 .resetvalue
= cpu
->pmsav7_dregion
<< 8
5360 ARMCPRegInfo crn0_wi_reginfo
= {
5361 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
5362 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
5363 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
5365 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
5366 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5368 /* Register the blanket "writes ignored" value first to cover the
5369 * whole space. Then update the specific ID registers to allow write
5370 * access, so that they ignore writes rather than causing them to
5373 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
5374 for (r
= id_pre_v8_midr_cp_reginfo
;
5375 r
->type
!= ARM_CP_SENTINEL
; r
++) {
5378 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5381 id_mpuir_reginfo
.access
= PL1_RW
;
5382 id_tlbtr_reginfo
.access
= PL1_RW
;
5384 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5385 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
5387 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
5389 define_arm_cp_regs(cpu
, id_cp_reginfo
);
5390 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
5391 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
5392 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
5393 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
5397 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
5398 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
5401 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
5402 ARMCPRegInfo auxcr_reginfo
[] = {
5403 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5404 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
5405 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
5406 .resetvalue
= cpu
->reset_auxcr
},
5407 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5408 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
5409 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5411 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5412 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
5413 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5417 define_arm_cp_regs(cpu
, auxcr_reginfo
);
5420 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
5421 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5422 /* 32 bit view is [31:18] 0...0 [43:32]. */
5423 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
5424 | extract64(cpu
->reset_cbar
, 32, 12);
5425 ARMCPRegInfo cbar_reginfo
[] = {
5427 .type
= ARM_CP_CONST
,
5428 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5429 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
5430 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5431 .type
= ARM_CP_CONST
,
5432 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
5433 .access
= PL1_R
, .resetvalue
= cbar32
},
5436 /* We don't implement a r/w 64 bit CBAR currently */
5437 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
5438 define_arm_cp_regs(cpu
, cbar_reginfo
);
5440 ARMCPRegInfo cbar
= {
5442 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5443 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
5444 .fieldoffset
= offsetof(CPUARMState
,
5445 cp15
.c15_config_base_address
)
5447 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
5448 cbar
.access
= PL1_R
;
5449 cbar
.fieldoffset
= 0;
5450 cbar
.type
= ARM_CP_CONST
;
5452 define_one_arm_cp_reg(cpu
, &cbar
);
5456 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
5457 ARMCPRegInfo vbar_cp_reginfo
[] = {
5458 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
5459 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
5460 .access
= PL1_RW
, .writefn
= vbar_write
,
5461 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
5462 offsetof(CPUARMState
, cp15
.vbar_ns
) },
5466 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
5469 /* Generic registers whose values depend on the implementation */
5471 ARMCPRegInfo sctlr
= {
5472 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
5473 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5475 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
5476 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
5477 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
5478 .raw_writefn
= raw_write
,
5480 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5481 /* Normally we would always end the TB on an SCTLR write, but Linux
5482 * arch/arm/mach-pxa/sleep.S expects two instructions following
5483 * an MMU enable to execute from cache. Imitate this behaviour.
5485 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
5487 define_one_arm_cp_reg(cpu
, &sctlr
);
5490 if (arm_feature(env
, ARM_FEATURE_SVE
)) {
5491 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
5492 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5493 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
5495 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
5497 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5498 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
5503 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
5505 CPUState
*cs
= CPU(cpu
);
5506 CPUARMState
*env
= &cpu
->env
;
5508 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5509 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
5510 aarch64_fpu_gdb_set_reg
,
5511 34, "aarch64-fpu.xml", 0);
5512 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
5513 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5514 51, "arm-neon.xml", 0);
5515 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
5516 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5517 35, "arm-vfp3.xml", 0);
5518 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
5519 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5520 19, "arm-vfp.xml", 0);
5522 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
5523 arm_gen_dynamic_xml(cs
),
5524 "system-registers.xml", 0);
5527 /* Sort alphabetically by type name, except for "any". */
5528 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
5530 ObjectClass
*class_a
= (ObjectClass
*)a
;
5531 ObjectClass
*class_b
= (ObjectClass
*)b
;
5532 const char *name_a
, *name_b
;
5534 name_a
= object_class_get_name(class_a
);
5535 name_b
= object_class_get_name(class_b
);
5536 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
5538 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
5541 return strcmp(name_a
, name_b
);
5545 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
5547 ObjectClass
*oc
= data
;
5548 CPUListState
*s
= user_data
;
5549 const char *typename
;
5552 typename
= object_class_get_name(oc
);
5553 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5554 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
5559 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
5563 .cpu_fprintf
= cpu_fprintf
,
5567 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5568 list
= g_slist_sort(list
, arm_cpu_list_compare
);
5569 (*cpu_fprintf
)(f
, "Available CPUs:\n");
5570 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
5573 /* The 'host' CPU type is dynamically registered only if KVM is
5574 * enabled, so we have to special-case it here:
5576 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
5580 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
5582 ObjectClass
*oc
= data
;
5583 CpuDefinitionInfoList
**cpu_list
= user_data
;
5584 CpuDefinitionInfoList
*entry
;
5585 CpuDefinitionInfo
*info
;
5586 const char *typename
;
5588 typename
= object_class_get_name(oc
);
5589 info
= g_malloc0(sizeof(*info
));
5590 info
->name
= g_strndup(typename
,
5591 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5592 info
->q_typename
= g_strdup(typename
);
5594 entry
= g_malloc0(sizeof(*entry
));
5595 entry
->value
= info
;
5596 entry
->next
= *cpu_list
;
5600 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
5602 CpuDefinitionInfoList
*cpu_list
= NULL
;
5605 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5606 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
5612 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
5613 void *opaque
, int state
, int secstate
,
5614 int crm
, int opc1
, int opc2
,
5617 /* Private utility function for define_one_arm_cp_reg_with_opaque():
5618 * add a single reginfo struct to the hash table.
5620 uint32_t *key
= g_new(uint32_t, 1);
5621 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
5622 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
5623 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
5625 r2
->name
= g_strdup(name
);
5626 /* Reset the secure state to the specific incoming state. This is
5627 * necessary as the register may have been defined with both states.
5629 r2
->secure
= secstate
;
5631 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5632 /* Register is banked (using both entries in array).
5633 * Overwriting fieldoffset as the array is only used to define
5634 * banked registers but later only fieldoffset is used.
5636 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
5639 if (state
== ARM_CP_STATE_AA32
) {
5640 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5641 /* If the register is banked then we don't need to migrate or
5642 * reset the 32-bit instance in certain cases:
5644 * 1) If the register has both 32-bit and 64-bit instances then we
5645 * can count on the 64-bit instance taking care of the
5647 * 2) If ARMv8 is enabled then we can count on a 64-bit version
5648 * taking care of the secure bank. This requires that separate
5649 * 32 and 64-bit definitions are provided.
5651 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
5652 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
5653 r2
->type
|= ARM_CP_ALIAS
;
5655 } else if ((secstate
!= r
->secure
) && !ns
) {
5656 /* The register is not banked so we only want to allow migration of
5657 * the non-secure instance.
5659 r2
->type
|= ARM_CP_ALIAS
;
5662 if (r
->state
== ARM_CP_STATE_BOTH
) {
5663 /* We assume it is a cp15 register if the .cp field is left unset.
5669 #ifdef HOST_WORDS_BIGENDIAN
5670 if (r2
->fieldoffset
) {
5671 r2
->fieldoffset
+= sizeof(uint32_t);
5676 if (state
== ARM_CP_STATE_AA64
) {
5677 /* To allow abbreviation of ARMCPRegInfo
5678 * definitions, we treat cp == 0 as equivalent to
5679 * the value for "standard guest-visible sysreg".
5680 * STATE_BOTH definitions are also always "standard
5681 * sysreg" in their AArch64 view (the .cp value may
5682 * be non-zero for the benefit of the AArch32 view).
5684 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
5685 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
5687 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
5688 r2
->opc0
, opc1
, opc2
);
5690 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
5693 r2
->opaque
= opaque
;
5695 /* reginfo passed to helpers is correct for the actual access,
5696 * and is never ARM_CP_STATE_BOTH:
5699 /* Make sure reginfo passed to helpers for wildcarded regs
5700 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5705 /* By convention, for wildcarded registers only the first
5706 * entry is used for migration; the others are marked as
5707 * ALIAS so we don't try to transfer the register
5708 * multiple times. Special registers (ie NOP/WFI) are
5709 * never migratable and not even raw-accessible.
5711 if ((r
->type
& ARM_CP_SPECIAL
)) {
5712 r2
->type
|= ARM_CP_NO_RAW
;
5714 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
5715 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
5716 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
5717 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
5720 /* Check that raw accesses are either forbidden or handled. Note that
5721 * we can't assert this earlier because the setup of fieldoffset for
5722 * banked registers has to be done first.
5724 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
5725 assert(!raw_accessors_invalid(r2
));
5728 /* Overriding of an existing definition must be explicitly
5731 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
5732 ARMCPRegInfo
*oldreg
;
5733 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
5734 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
5735 fprintf(stderr
, "Register redefined: cp=%d %d bit "
5736 "crn=%d crm=%d opc1=%d opc2=%d, "
5737 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
5738 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
5739 oldreg
->name
, r2
->name
);
5740 g_assert_not_reached();
5743 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
5747 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
5748 const ARMCPRegInfo
*r
, void *opaque
)
5750 /* Define implementations of coprocessor registers.
5751 * We store these in a hashtable because typically
5752 * there are less than 150 registers in a space which
5753 * is 16*16*16*8*8 = 262144 in size.
5754 * Wildcarding is supported for the crm, opc1 and opc2 fields.
5755 * If a register is defined twice then the second definition is
5756 * used, so this can be used to define some generic registers and
5757 * then override them with implementation specific variations.
5758 * At least one of the original and the second definition should
5759 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5760 * against accidental use.
5762 * The state field defines whether the register is to be
5763 * visible in the AArch32 or AArch64 execution state. If the
5764 * state is set to ARM_CP_STATE_BOTH then we synthesise a
5765 * reginfo structure for the AArch32 view, which sees the lower
5766 * 32 bits of the 64 bit register.
5768 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5769 * be wildcarded. AArch64 registers are always considered to be 64
5770 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5771 * the register, if any.
5773 int crm
, opc1
, opc2
, state
;
5774 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
5775 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
5776 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
5777 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
5778 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
5779 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
5780 /* 64 bit registers have only CRm and Opc1 fields */
5781 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
5782 /* op0 only exists in the AArch64 encodings */
5783 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
5784 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5785 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
5786 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5787 * encodes a minimum access level for the register. We roll this
5788 * runtime check into our general permission check code, so check
5789 * here that the reginfo's specified permissions are strict enough
5790 * to encompass the generic architectural permission check.
5792 if (r
->state
!= ARM_CP_STATE_AA32
) {
5795 case 0: case 1: case 2:
5808 /* unallocated encoding, so not possible */
5816 /* min_EL EL1, secure mode only (we don't check the latter) */
5820 /* broken reginfo with out-of-range opc1 */
5824 /* assert our permissions are not too lax (stricter is fine) */
5825 assert((r
->access
& ~mask
) == 0);
5828 /* Check that the register definition has enough info to handle
5829 * reads and writes if they are permitted.
5831 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
5832 if (r
->access
& PL3_R
) {
5833 assert((r
->fieldoffset
||
5834 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5837 if (r
->access
& PL3_W
) {
5838 assert((r
->fieldoffset
||
5839 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5843 /* Bad type field probably means missing sentinel at end of reg list */
5844 assert(cptype_valid(r
->type
));
5845 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
5846 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
5847 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
5848 for (state
= ARM_CP_STATE_AA32
;
5849 state
<= ARM_CP_STATE_AA64
; state
++) {
5850 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
5853 if (state
== ARM_CP_STATE_AA32
) {
5854 /* Under AArch32 CP registers can be common
5855 * (same for secure and non-secure world) or banked.
5859 switch (r
->secure
) {
5860 case ARM_CP_SECSTATE_S
:
5861 case ARM_CP_SECSTATE_NS
:
5862 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5863 r
->secure
, crm
, opc1
, opc2
,
5867 name
= g_strdup_printf("%s_S", r
->name
);
5868 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5870 crm
, opc1
, opc2
, name
);
5872 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5874 crm
, opc1
, opc2
, r
->name
);
5878 /* AArch64 registers get mapped to non-secure instance
5880 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5882 crm
, opc1
, opc2
, r
->name
);
5890 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
5891 const ARMCPRegInfo
*regs
, void *opaque
)
5893 /* Define a whole list of registers */
5894 const ARMCPRegInfo
*r
;
5895 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5896 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
5900 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
5902 return g_hash_table_lookup(cpregs
, &encoded_cp
);
5905 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5908 /* Helper coprocessor write function for write-ignore registers */
5911 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5913 /* Helper coprocessor write function for read-as-zero registers */
5917 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
5919 /* Helper coprocessor reset function for do-nothing-on-reset registers */
5922 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
5924 /* Return true if it is not valid for us to switch to
5925 * this CPU mode (ie all the UNPREDICTABLE cases in
5926 * the ARM ARM CPSRWriteByInstr pseudocode).
5929 /* Changes to or from Hyp via MSR and CPS are illegal. */
5930 if (write_type
== CPSRWriteByInstr
&&
5931 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
5932 mode
== ARM_CPU_MODE_HYP
)) {
5937 case ARM_CPU_MODE_USR
:
5939 case ARM_CPU_MODE_SYS
:
5940 case ARM_CPU_MODE_SVC
:
5941 case ARM_CPU_MODE_ABT
:
5942 case ARM_CPU_MODE_UND
:
5943 case ARM_CPU_MODE_IRQ
:
5944 case ARM_CPU_MODE_FIQ
:
5945 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5946 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5948 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5949 * and CPS are treated as illegal mode changes.
5951 if (write_type
== CPSRWriteByInstr
&&
5952 (env
->cp15
.hcr_el2
& HCR_TGE
) &&
5953 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
5954 !arm_is_secure_below_el3(env
)) {
5958 case ARM_CPU_MODE_HYP
:
5959 return !arm_feature(env
, ARM_FEATURE_EL2
)
5960 || arm_current_el(env
) < 2 || arm_is_secure(env
);
5961 case ARM_CPU_MODE_MON
:
5962 return arm_current_el(env
) < 3;
5968 uint32_t cpsr_read(CPUARMState
*env
)
5971 ZF
= (env
->ZF
== 0);
5972 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
5973 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
5974 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
5975 | ((env
->condexec_bits
& 0xfc) << 8)
5976 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
5979 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
5980 CPSRWriteType write_type
)
5982 uint32_t changed_daif
;
5984 if (mask
& CPSR_NZCV
) {
5985 env
->ZF
= (~val
) & CPSR_Z
;
5987 env
->CF
= (val
>> 29) & 1;
5988 env
->VF
= (val
<< 3) & 0x80000000;
5991 env
->QF
= ((val
& CPSR_Q
) != 0);
5993 env
->thumb
= ((val
& CPSR_T
) != 0);
5994 if (mask
& CPSR_IT_0_1
) {
5995 env
->condexec_bits
&= ~3;
5996 env
->condexec_bits
|= (val
>> 25) & 3;
5998 if (mask
& CPSR_IT_2_7
) {
5999 env
->condexec_bits
&= 3;
6000 env
->condexec_bits
|= (val
>> 8) & 0xfc;
6002 if (mask
& CPSR_GE
) {
6003 env
->GE
= (val
>> 16) & 0xf;
6006 /* In a V7 implementation that includes the security extensions but does
6007 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
6008 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
6009 * bits respectively.
6011 * In a V8 implementation, it is permitted for privileged software to
6012 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
6014 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
6015 arm_feature(env
, ARM_FEATURE_EL3
) &&
6016 !arm_feature(env
, ARM_FEATURE_EL2
) &&
6017 !arm_is_secure(env
)) {
6019 changed_daif
= (env
->daif
^ val
) & mask
;
6021 if (changed_daif
& CPSR_A
) {
6022 /* Check to see if we are allowed to change the masking of async
6023 * abort exceptions from a non-secure state.
6025 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
6026 qemu_log_mask(LOG_GUEST_ERROR
,
6027 "Ignoring attempt to switch CPSR_A flag from "
6028 "non-secure world with SCR.AW bit clear\n");
6033 if (changed_daif
& CPSR_F
) {
6034 /* Check to see if we are allowed to change the masking of FIQ
6035 * exceptions from a non-secure state.
6037 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
6038 qemu_log_mask(LOG_GUEST_ERROR
,
6039 "Ignoring attempt to switch CPSR_F flag from "
6040 "non-secure world with SCR.FW bit clear\n");
6044 /* Check whether non-maskable FIQ (NMFI) support is enabled.
6045 * If this bit is set software is not allowed to mask
6046 * FIQs, but is allowed to set CPSR_F to 0.
6048 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
6050 qemu_log_mask(LOG_GUEST_ERROR
,
6051 "Ignoring attempt to enable CPSR_F flag "
6052 "(non-maskable FIQ [NMFI] support enabled)\n");
6058 env
->daif
&= ~(CPSR_AIF
& mask
);
6059 env
->daif
|= val
& CPSR_AIF
& mask
;
6061 if (write_type
!= CPSRWriteRaw
&&
6062 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
6063 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
6064 /* Note that we can only get here in USR mode if this is a
6065 * gdb stub write; for this case we follow the architectural
6066 * behaviour for guest writes in USR mode of ignoring an attempt
6067 * to switch mode. (Those are caught by translate.c for writes
6068 * triggered by guest instructions.)
6071 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
6072 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
6073 * v7, and has defined behaviour in v8:
6074 * + leave CPSR.M untouched
6075 * + allow changes to the other CPSR fields
6077 * For user changes via the GDB stub, we don't set PSTATE.IL,
6078 * as this would be unnecessarily harsh for a user error.
6081 if (write_type
!= CPSRWriteByGDBStub
&&
6082 arm_feature(env
, ARM_FEATURE_V8
)) {
6087 switch_mode(env
, val
& CPSR_M
);
6090 mask
&= ~CACHED_CPSR_BITS
;
6091 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
6094 /* Sign/zero extend */
6095 uint32_t HELPER(sxtb16
)(uint32_t x
)
6098 res
= (uint16_t)(int8_t)x
;
6099 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
6103 uint32_t HELPER(uxtb16
)(uint32_t x
)
6106 res
= (uint16_t)(uint8_t)x
;
6107 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
6111 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
6115 if (num
== INT_MIN
&& den
== -1)
6120 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
6127 uint32_t HELPER(rbit
)(uint32_t x
)
6132 #if defined(CONFIG_USER_ONLY)
6134 /* These should probably raise undefined insn exceptions. */
6135 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
6137 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6139 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
6142 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
6144 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6146 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
6150 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6152 /* translate.c should never generate calls here in user-only mode */
6153 g_assert_not_reached();
6156 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6158 /* translate.c should never generate calls here in user-only mode */
6159 g_assert_not_reached();
6162 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
6164 /* The TT instructions can be used by unprivileged code, but in
6165 * user-only emulation we don't have the MPU.
6166 * Luckily since we know we are NonSecure unprivileged (and that in
6167 * turn means that the A flag wasn't specified), all the bits in the
6168 * register must be zero:
6169 * IREGION: 0 because IRVALID is 0
6170 * IRVALID: 0 because NS
6172 * NSRW: 0 because NS
6174 * RW: 0 because unpriv and A flag not set
6175 * R: 0 because unpriv and A flag not set
6176 * SRVALID: 0 because NS
6177 * MRVALID: 0 because unpriv and A flag not set
6178 * SREGION: 0 becaus SRVALID is 0
6179 * MREGION: 0 because MRVALID is 0
6184 void switch_mode(CPUARMState
*env
, int mode
)
6186 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6188 if (mode
!= ARM_CPU_MODE_USR
) {
6189 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
6193 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6194 uint32_t cur_el
, bool secure
)
6199 void aarch64_sync_64_to_32(CPUARMState
*env
)
6201 g_assert_not_reached();
6206 void switch_mode(CPUARMState
*env
, int mode
)
6211 old_mode
= env
->uncached_cpsr
& CPSR_M
;
6212 if (mode
== old_mode
)
6215 if (old_mode
== ARM_CPU_MODE_FIQ
) {
6216 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6217 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
6218 } else if (mode
== ARM_CPU_MODE_FIQ
) {
6219 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6220 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
6223 i
= bank_number(old_mode
);
6224 env
->banked_r13
[i
] = env
->regs
[13];
6225 env
->banked_r14
[i
] = env
->regs
[14];
6226 env
->banked_spsr
[i
] = env
->spsr
;
6228 i
= bank_number(mode
);
6229 env
->regs
[13] = env
->banked_r13
[i
];
6230 env
->regs
[14] = env
->banked_r14
[i
];
6231 env
->spsr
= env
->banked_spsr
[i
];
6234 /* Physical Interrupt Target EL Lookup Table
6236 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6238 * The below multi-dimensional table is used for looking up the target
6239 * exception level given numerous condition criteria. Specifically, the
6240 * target EL is based on SCR and HCR routing controls as well as the
6241 * currently executing EL and secure state.
6244 * target_el_table[2][2][2][2][2][4]
6245 * | | | | | +--- Current EL
6246 * | | | | +------ Non-secure(0)/Secure(1)
6247 * | | | +--------- HCR mask override
6248 * | | +------------ SCR exec state control
6249 * | +--------------- SCR mask override
6250 * +------------------ 32-bit(0)/64-bit(1) EL3
6252 * The table values are as such:
6256 * The ARM ARM target EL table includes entries indicating that an "exception
6257 * is not taken". The two cases where this is applicable are:
6258 * 1) An exception is taken from EL3 but the SCR does not have the exception
6260 * 2) An exception is taken from EL2 but the HCR does not have the exception
6262 * In these two cases, the below table contain a target of EL1. This value is
6263 * returned as it is expected that the consumer of the table data will check
6264 * for "target EL >= current EL" to ensure the exception is not taken.
6268 * BIT IRQ IMO Non-secure Secure
6269 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
6271 static const int8_t target_el_table
[2][2][2][2][2][4] = {
6272 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6273 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6274 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6275 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6276 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6277 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6278 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6279 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6280 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6281 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6282 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6283 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6284 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6285 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6286 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6287 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6291 * Determine the target EL for physical exceptions
6293 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6294 uint32_t cur_el
, bool secure
)
6296 CPUARMState
*env
= cs
->env_ptr
;
6301 /* Is the highest EL AArch64? */
6302 int is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
6304 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6305 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
6307 /* Either EL2 is the highest EL (and so the EL2 register width
6308 * is given by is64); or there is no EL2 or EL3, in which case
6309 * the value of 'rw' does not affect the table lookup anyway.
6316 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
6317 hcr
= ((env
->cp15
.hcr_el2
& HCR_IMO
) == HCR_IMO
);
6320 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
6321 hcr
= ((env
->cp15
.hcr_el2
& HCR_FMO
) == HCR_FMO
);
6324 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
6325 hcr
= ((env
->cp15
.hcr_el2
& HCR_AMO
) == HCR_AMO
);
6329 /* If HCR.TGE is set then HCR is treated as being 1 */
6330 hcr
|= ((env
->cp15
.hcr_el2
& HCR_TGE
) == HCR_TGE
);
6332 /* Perform a table-lookup for the target EL given the current state */
6333 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
6335 assert(target_el
> 0);
6340 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
6341 ARMMMUIdx mmu_idx
, bool ignfault
)
6343 CPUState
*cs
= CPU(cpu
);
6344 CPUARMState
*env
= &cpu
->env
;
6345 MemTxAttrs attrs
= {};
6347 target_ulong page_size
;
6351 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6355 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
6356 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6357 /* MPU/SAU lookup failed */
6358 if (fi
.type
== ARMFault_QEMU_SFault
) {
6359 qemu_log_mask(CPU_LOG_INT
,
6360 "...SecureFault with SFSR.AUVIOL during stacking\n");
6361 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6362 env
->v7m
.sfar
= addr
;
6363 exc
= ARMV7M_EXCP_SECURE
;
6366 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
6367 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
6368 exc
= ARMV7M_EXCP_MEM
;
6369 exc_secure
= secure
;
6373 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
6375 if (txres
!= MEMTX_OK
) {
6376 /* BusFault trying to write the data */
6377 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
6378 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
6379 exc
= ARMV7M_EXCP_BUS
;
6386 /* By pending the exception at this point we are making
6387 * the IMPDEF choice "overridden exceptions pended" (see the
6388 * MergeExcInfo() pseudocode). The other choice would be to not
6389 * pend them now and then make a choice about which to throw away
6390 * later if we have two derived exceptions.
6391 * The only case when we must not pend the exception but instead
6392 * throw it away is if we are doing the push of the callee registers
6393 * and we've already generated a derived exception. Even in this
6394 * case we will still update the fault status registers.
6397 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
6402 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
6405 CPUState
*cs
= CPU(cpu
);
6406 CPUARMState
*env
= &cpu
->env
;
6407 MemTxAttrs attrs
= {};
6409 target_ulong page_size
;
6413 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6418 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
6419 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6420 /* MPU/SAU lookup failed */
6421 if (fi
.type
== ARMFault_QEMU_SFault
) {
6422 qemu_log_mask(CPU_LOG_INT
,
6423 "...SecureFault with SFSR.AUVIOL during unstack\n");
6424 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6425 env
->v7m
.sfar
= addr
;
6426 exc
= ARMV7M_EXCP_SECURE
;
6429 qemu_log_mask(CPU_LOG_INT
,
6430 "...MemManageFault with CFSR.MUNSTKERR\n");
6431 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
6432 exc
= ARMV7M_EXCP_MEM
;
6433 exc_secure
= secure
;
6438 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
6440 if (txres
!= MEMTX_OK
) {
6441 /* BusFault trying to read the data */
6442 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
6443 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
6444 exc
= ARMV7M_EXCP_BUS
;
6453 /* By pending the exception at this point we are making
6454 * the IMPDEF choice "overridden exceptions pended" (see the
6455 * MergeExcInfo() pseudocode). The other choice would be to not
6456 * pend them now and then make a choice about which to throw away
6457 * later if we have two derived exceptions.
6459 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
6463 /* Return true if we're using the process stack pointer (not the MSP) */
6464 static bool v7m_using_psp(CPUARMState
*env
)
6466 /* Handler mode always uses the main stack; for thread mode
6467 * the CONTROL.SPSEL bit determines the answer.
6468 * Note that in v7M it is not possible to be in Handler mode with
6469 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
6471 return !arm_v7m_is_handler_mode(env
) &&
6472 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
6475 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6476 * This may change the current stack pointer between Main and Process
6477 * stack pointers if it is done for the CONTROL register for the current
6480 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
6484 bool old_is_psp
= v7m_using_psp(env
);
6486 env
->v7m
.control
[secstate
] =
6487 deposit32(env
->v7m
.control
[secstate
],
6488 R_V7M_CONTROL_SPSEL_SHIFT
,
6489 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
6491 if (secstate
== env
->v7m
.secure
) {
6492 bool new_is_psp
= v7m_using_psp(env
);
6495 if (old_is_psp
!= new_is_psp
) {
6496 tmp
= env
->v7m
.other_sp
;
6497 env
->v7m
.other_sp
= env
->regs
[13];
6498 env
->regs
[13] = tmp
;
6503 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6504 * stack pointer between Main and Process stack pointers.
6506 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
6508 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
6511 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
6513 /* Write a new value to v7m.exception, thus transitioning into or out
6514 * of Handler mode; this may result in a change of active stack pointer.
6516 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
6519 env
->v7m
.exception
= new_exc
;
6521 new_is_psp
= v7m_using_psp(env
);
6523 if (old_is_psp
!= new_is_psp
) {
6524 tmp
= env
->v7m
.other_sp
;
6525 env
->v7m
.other_sp
= env
->regs
[13];
6526 env
->regs
[13] = tmp
;
6530 /* Switch M profile security state between NS and S */
6531 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
6533 uint32_t new_ss_msp
, new_ss_psp
;
6535 if (env
->v7m
.secure
== new_secstate
) {
6539 /* All the banked state is accessed by looking at env->v7m.secure
6540 * except for the stack pointer; rearrange the SP appropriately.
6542 new_ss_msp
= env
->v7m
.other_ss_msp
;
6543 new_ss_psp
= env
->v7m
.other_ss_psp
;
6545 if (v7m_using_psp(env
)) {
6546 env
->v7m
.other_ss_psp
= env
->regs
[13];
6547 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
6549 env
->v7m
.other_ss_msp
= env
->regs
[13];
6550 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
6553 env
->v7m
.secure
= new_secstate
;
6555 if (v7m_using_psp(env
)) {
6556 env
->regs
[13] = new_ss_psp
;
6557 env
->v7m
.other_sp
= new_ss_msp
;
6559 env
->regs
[13] = new_ss_msp
;
6560 env
->v7m
.other_sp
= new_ss_psp
;
6564 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6567 * - if the return value is a magic value, do exception return (like BX)
6568 * - otherwise bit 0 of the return value is the target security state
6572 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6573 /* Covers FNC_RETURN and EXC_RETURN magic */
6574 min_magic
= FNC_RETURN_MIN_MAGIC
;
6576 /* EXC_RETURN magic only */
6577 min_magic
= EXC_RETURN_MIN_MAGIC
;
6580 if (dest
>= min_magic
) {
6581 /* This is an exception return magic value; put it where
6582 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6583 * Note that if we ever add gen_ss_advance() singlestep support to
6584 * M profile this should count as an "instruction execution complete"
6585 * event (compare gen_bx_excret_final_code()).
6587 env
->regs
[15] = dest
& ~1;
6588 env
->thumb
= dest
& 1;
6589 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
6593 /* translate.c should have made BXNS UNDEF unless we're secure */
6594 assert(env
->v7m
.secure
);
6596 switch_v7m_security_state(env
, dest
& 1);
6598 env
->regs
[15] = dest
& ~1;
6601 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6603 /* Handle v7M BLXNS:
6604 * - bit 0 of the destination address is the target security state
6607 /* At this point regs[15] is the address just after the BLXNS */
6608 uint32_t nextinst
= env
->regs
[15] | 1;
6609 uint32_t sp
= env
->regs
[13] - 8;
6612 /* translate.c will have made BLXNS UNDEF unless we're secure */
6613 assert(env
->v7m
.secure
);
6616 /* target is Secure, so this is just a normal BLX,
6617 * except that the low bit doesn't indicate Thumb/not.
6619 env
->regs
[14] = nextinst
;
6621 env
->regs
[15] = dest
& ~1;
6625 /* Target is non-secure: first push a stack frame */
6626 if (!QEMU_IS_ALIGNED(sp
, 8)) {
6627 qemu_log_mask(LOG_GUEST_ERROR
,
6628 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6631 saved_psr
= env
->v7m
.exception
;
6632 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
6633 saved_psr
|= XPSR_SFPA
;
6636 /* Note that these stores can throw exceptions on MPU faults */
6637 cpu_stl_data(env
, sp
, nextinst
);
6638 cpu_stl_data(env
, sp
+ 4, saved_psr
);
6641 env
->regs
[14] = 0xfeffffff;
6642 if (arm_v7m_is_handler_mode(env
)) {
6643 /* Write a dummy value to IPSR, to avoid leaking the current secure
6644 * exception number to non-secure code. This is guaranteed not
6645 * to cause write_v7m_exception() to actually change stacks.
6647 write_v7m_exception(env
, 1);
6649 switch_v7m_security_state(env
, 0);
6651 env
->regs
[15] = dest
;
6654 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
6657 /* Return a pointer to the location where we currently store the
6658 * stack pointer for the requested security state and thread mode.
6659 * This pointer will become invalid if the CPU state is updated
6660 * such that the stack pointers are switched around (eg changing
6661 * the SPSEL control bit).
6662 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6663 * Unlike that pseudocode, we require the caller to pass us in the
6664 * SPSEL control bit value; this is because we also use this
6665 * function in handling of pushing of the callee-saves registers
6666 * part of the v8M stack frame (pseudocode PushCalleeStack()),
6667 * and in the tailchain codepath the SPSEL bit comes from the exception
6668 * return magic LR value from the previous exception. The pseudocode
6669 * opencodes the stack-selection in PushCalleeStack(), but we prefer
6670 * to make this utility function generic enough to do the job.
6672 bool want_psp
= threadmode
&& spsel
;
6674 if (secure
== env
->v7m
.secure
) {
6675 if (want_psp
== v7m_using_psp(env
)) {
6676 return &env
->regs
[13];
6678 return &env
->v7m
.other_sp
;
6682 return &env
->v7m
.other_ss_psp
;
6684 return &env
->v7m
.other_ss_msp
;
6689 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
6692 CPUState
*cs
= CPU(cpu
);
6693 CPUARMState
*env
= &cpu
->env
;
6695 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
6696 uint32_t vector_entry
;
6697 MemTxAttrs attrs
= {};
6701 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
6703 /* We don't do a get_phys_addr() here because the rules for vector
6704 * loads are special: they always use the default memory map, and
6705 * the default memory map permits reads from all addresses.
6706 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
6707 * that we want this special case which would always say "yes",
6708 * we just do the SAU lookup here followed by a direct physical load.
6710 attrs
.secure
= targets_secure
;
6713 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6714 V8M_SAttributes sattrs
= {};
6716 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
6718 attrs
.secure
= false;
6719 } else if (!targets_secure
) {
6720 /* NS access to S memory */
6725 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
6727 if (result
!= MEMTX_OK
) {
6730 *pvec
= vector_entry
;
6734 /* All vector table fetch fails are reported as HardFault, with
6735 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
6736 * technically the underlying exception is a MemManage or BusFault
6737 * that is escalated to HardFault.) This is a terminal exception,
6738 * so we will either take the HardFault immediately or else enter
6739 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
6741 exc_secure
= targets_secure
||
6742 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
6743 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
6744 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
6748 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
6751 /* For v8M, push the callee-saves register part of the stack frame.
6752 * Compare the v8M pseudocode PushCalleeStack().
6753 * In the tailchaining case this may not be the current stack.
6755 CPUARMState
*env
= &cpu
->env
;
6756 uint32_t *frame_sp_p
;
6762 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
6763 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
6766 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
6767 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
6768 lr
& R_V7M_EXCRET_SPSEL_MASK
);
6770 mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
6771 frame_sp_p
= &env
->regs
[13];
6774 frameptr
= *frame_sp_p
- 0x28;
6776 /* Write as much of the stack frame as we can. A write failure may
6777 * cause us to pend a derived exception.
6780 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
6781 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
6783 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
6785 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
6787 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
6789 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
6791 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
6793 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
6795 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
6798 /* Update SP regardless of whether any of the stack accesses failed.
6799 * When we implement v8M stack limit checking then this attempt to
6800 * update SP might also fail and result in a derived exception.
6802 *frame_sp_p
= frameptr
;
6807 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
6808 bool ignore_stackfaults
)
6810 /* Do the "take the exception" parts of exception entry,
6811 * but not the pushing of state to the stack. This is
6812 * similar to the pseudocode ExceptionTaken() function.
6814 CPUARMState
*env
= &cpu
->env
;
6816 bool targets_secure
;
6818 bool push_failed
= false;
6820 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
6822 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6823 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
6824 (lr
& R_V7M_EXCRET_S_MASK
)) {
6825 /* The background code (the owner of the registers in the
6826 * exception frame) is Secure. This means it may either already
6827 * have or now needs to push callee-saves registers.
6829 if (targets_secure
) {
6830 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
6831 /* We took an exception from Secure to NonSecure
6832 * (which means the callee-saved registers got stacked)
6833 * and are now tailchaining to a Secure exception.
6834 * Clear DCRS so eventual return from this Secure
6835 * exception unstacks the callee-saved registers.
6837 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
6840 /* We're going to a non-secure exception; push the
6841 * callee-saves registers to the stack now, if they're
6842 * not already saved.
6844 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
6845 !(dotailchain
&& (lr
& R_V7M_EXCRET_ES_MASK
))) {
6846 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
6847 ignore_stackfaults
);
6849 lr
|= R_V7M_EXCRET_DCRS_MASK
;
6853 lr
&= ~R_V7M_EXCRET_ES_MASK
;
6854 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6855 lr
|= R_V7M_EXCRET_ES_MASK
;
6857 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
6858 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
6859 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
6862 /* Clear registers if necessary to prevent non-secure exception
6863 * code being able to see register values from secure code.
6864 * Where register values become architecturally UNKNOWN we leave
6865 * them with their previous values.
6867 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6868 if (!targets_secure
) {
6869 /* Always clear the caller-saved registers (they have been
6870 * pushed to the stack earlier in v7m_push_stack()).
6871 * Clear callee-saved registers if the background code is
6872 * Secure (in which case these regs were saved in
6873 * v7m_push_callee_stack()).
6877 for (i
= 0; i
< 13; i
++) {
6878 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
6879 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
6884 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
6889 if (push_failed
&& !ignore_stackfaults
) {
6890 /* Derived exception on callee-saves register stacking:
6891 * we might now want to take a different exception which
6892 * targets a different security state, so try again from the top.
6894 v7m_exception_taken(cpu
, lr
, true, true);
6898 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
6899 /* Vector load failed: derived exception */
6900 v7m_exception_taken(cpu
, lr
, true, true);
6904 /* Now we've done everything that might cause a derived exception
6905 * we can go ahead and activate whichever exception we're going to
6906 * take (which might now be the derived exception).
6908 armv7m_nvic_acknowledge_irq(env
->nvic
);
6910 /* Switch to target security state -- must do this before writing SPSEL */
6911 switch_v7m_security_state(env
, targets_secure
);
6912 write_v7m_control_spsel(env
, 0);
6913 arm_clear_exclusive(env
);
6915 env
->condexec_bits
= 0;
6917 env
->regs
[15] = addr
& 0xfffffffe;
6918 env
->thumb
= addr
& 1;
6921 static bool v7m_push_stack(ARMCPU
*cpu
)
6923 /* Do the "set up stack frame" part of exception entry,
6924 * similar to pseudocode PushStack().
6925 * Return true if we generate a derived exception (and so
6926 * should ignore further stack faults trying to process
6927 * that derived exception.)
6930 CPUARMState
*env
= &cpu
->env
;
6931 uint32_t xpsr
= xpsr_read(env
);
6932 uint32_t frameptr
= env
->regs
[13];
6933 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
6935 /* Align stack pointer if the guest wants that */
6936 if ((frameptr
& 4) &&
6937 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
6939 xpsr
|= XPSR_SPREALIGN
;
6944 /* Write as much of the stack frame as we can. If we fail a stack
6945 * write this will result in a derived exception being pended
6946 * (which may be taken in preference to the one we started with
6947 * if it has higher priority).
6950 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
6951 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
6952 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
6953 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
6954 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
6955 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
6956 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
6957 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
6959 /* Update SP regardless of whether any of the stack accesses failed.
6960 * When we implement v8M stack limit checking then this attempt to
6961 * update SP might also fail and result in a derived exception.
6963 env
->regs
[13] = frameptr
;
6968 static void do_v7m_exception_exit(ARMCPU
*cpu
)
6970 CPUARMState
*env
= &cpu
->env
;
6973 bool ufault
= false;
6974 bool sfault
= false;
6975 bool return_to_sp_process
;
6976 bool return_to_handler
;
6977 bool rettobase
= false;
6978 bool exc_secure
= false;
6979 bool return_to_secure
;
6981 /* If we're not in Handler mode then jumps to magic exception-exit
6982 * addresses don't have magic behaviour. However for the v8M
6983 * security extensions the magic secure-function-return has to
6984 * work in thread mode too, so to avoid doing an extra check in
6985 * the generated code we allow exception-exit magic to also cause the
6986 * internal exception and bring us here in thread mode. Correct code
6987 * will never try to do this (the following insn fetch will always
6988 * fault) so we the overhead of having taken an unnecessary exception
6991 if (!arm_v7m_is_handler_mode(env
)) {
6995 /* In the spec pseudocode ExceptionReturn() is called directly
6996 * from BXWritePC() and gets the full target PC value including
6997 * bit zero. In QEMU's implementation we treat it as a normal
6998 * jump-to-register (which is then caught later on), and so split
6999 * the target value up between env->regs[15] and env->thumb in
7000 * gen_bx(). Reconstitute it.
7002 excret
= env
->regs
[15];
7007 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
7008 " previous exception %d\n",
7009 excret
, env
->v7m
.exception
);
7011 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
7012 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
7013 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
7017 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7018 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
7019 * we pick which FAULTMASK to clear.
7021 if (!env
->v7m
.secure
&&
7022 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
7023 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
7025 /* For all other purposes, treat ES as 0 (R_HXSR) */
7026 excret
&= ~R_V7M_EXCRET_ES_MASK
;
7030 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
7031 /* Auto-clear FAULTMASK on return from other than NMI.
7032 * If the security extension is implemented then this only
7033 * happens if the raw execution priority is >= 0; the
7034 * value of the ES bit in the exception return value indicates
7035 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
7037 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7038 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
7039 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
7040 env
->v7m
.faultmask
[exc_secure
] = 0;
7043 env
->v7m
.faultmask
[M_REG_NS
] = 0;
7047 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
7050 /* attempt to exit an exception that isn't active */
7054 /* still an irq active now */
7057 /* we returned to base exception level, no nesting.
7058 * (In the pseudocode this is written using "NestedActivation != 1"
7059 * where we have 'rettobase == false'.)
7064 g_assert_not_reached();
7067 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
7068 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
7069 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
7070 (excret
& R_V7M_EXCRET_S_MASK
);
7072 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7073 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7074 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
7075 * we choose to take the UsageFault.
7077 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
7078 (excret
& R_V7M_EXCRET_ES_MASK
) ||
7079 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
7083 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
7087 /* For v7M we only recognize certain combinations of the low bits */
7088 switch (excret
& 0xf) {
7089 case 1: /* Return to Handler */
7091 case 13: /* Return to Thread using Process stack */
7092 case 9: /* Return to Thread using Main stack */
7093 /* We only need to check NONBASETHRDENA for v7M, because in
7094 * v8M this bit does not exist (it is RES1).
7097 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
7098 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
7108 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
7109 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7110 v7m_exception_taken(cpu
, excret
, true, false);
7111 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7112 "stackframe: failed EXC_RETURN.ES validity check\n");
7117 /* Bad exception return: instead of popping the exception
7118 * stack, directly take a usage fault on the current stack.
7120 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7121 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7122 v7m_exception_taken(cpu
, excret
, true, false);
7123 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7124 "stackframe: failed exception return integrity check\n");
7128 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
7129 * Handler mode (and will be until we write the new XPSR.Interrupt
7130 * field) this does not switch around the current stack pointer.
7132 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
7134 switch_v7m_security_state(env
, return_to_secure
);
7137 /* The stack pointer we should be reading the exception frame from
7138 * depends on bits in the magic exception return type value (and
7139 * for v8M isn't necessarily the stack pointer we will eventually
7140 * end up resuming execution with). Get a pointer to the location
7141 * in the CPU state struct where the SP we need is currently being
7142 * stored; we will use and modify it in place.
7143 * We use this limited C variable scope so we don't accidentally
7144 * use 'frame_sp_p' after we do something that makes it invalid.
7146 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
7149 return_to_sp_process
);
7150 uint32_t frameptr
= *frame_sp_p
;
7154 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
7155 !return_to_handler
);
7157 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
7158 arm_feature(env
, ARM_FEATURE_V8
)) {
7159 qemu_log_mask(LOG_GUEST_ERROR
,
7160 "M profile exception return with non-8-aligned SP "
7161 "for destination state is UNPREDICTABLE\n");
7164 /* Do we need to pop callee-saved registers? */
7165 if (return_to_secure
&&
7166 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
7167 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
7168 uint32_t expected_sig
= 0xfefa125b;
7169 uint32_t actual_sig
;
7171 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
7173 if (pop_ok
&& expected_sig
!= actual_sig
) {
7174 /* Take a SecureFault on the current stack */
7175 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
7176 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7177 v7m_exception_taken(cpu
, excret
, true, false);
7178 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7179 "stackframe: failed exception return integrity "
7180 "signature check\n");
7185 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7186 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7187 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
7188 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
7189 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
7190 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
7191 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
7192 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
7193 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
7200 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
7201 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
7202 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
7203 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
7204 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
7205 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
7206 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
7207 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
7210 /* v7m_stack_read() pended a fault, so take it (as a tail
7211 * chained exception on the same stack frame)
7213 v7m_exception_taken(cpu
, excret
, true, false);
7217 /* Returning from an exception with a PC with bit 0 set is defined
7218 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
7219 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
7220 * the lsbit, and there are several RTOSes out there which incorrectly
7221 * assume the r15 in the stack frame should be a Thumb-style "lsbit
7222 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
7223 * complain about the badly behaved guest.
7225 if (env
->regs
[15] & 1) {
7226 env
->regs
[15] &= ~1U;
7227 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7228 qemu_log_mask(LOG_GUEST_ERROR
,
7229 "M profile return from interrupt with misaligned "
7230 "PC is UNPREDICTABLE on v7M\n");
7234 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7235 /* For v8M we have to check whether the xPSR exception field
7236 * matches the EXCRET value for return to handler/thread
7237 * before we commit to changing the SP and xPSR.
7239 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
7240 if (return_to_handler
!= will_be_handler
) {
7241 /* Take an INVPC UsageFault on the current stack.
7242 * By this point we will have switched to the security state
7243 * for the background state, so this UsageFault will target
7246 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7248 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7249 v7m_exception_taken(cpu
, excret
, true, false);
7250 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7251 "stackframe: failed exception return integrity "
7257 /* Commit to consuming the stack frame */
7259 /* Undo stack alignment (the SPREALIGN bit indicates that the original
7260 * pre-exception SP was not 8-aligned and we added a padding word to
7261 * align it, so we undo this by ORing in the bit that increases it
7262 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
7263 * would work too but a logical OR is how the pseudocode specifies it.)
7265 if (xpsr
& XPSR_SPREALIGN
) {
7268 *frame_sp_p
= frameptr
;
7270 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
7271 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
7273 /* The restored xPSR exception field will be zero if we're
7274 * resuming in Thread mode. If that doesn't match what the
7275 * exception return excret specified then this is a UsageFault.
7276 * v7M requires we make this check here; v8M did it earlier.
7278 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
7279 /* Take an INVPC UsageFault by pushing the stack again;
7280 * we know we're v7M so this is never a Secure UsageFault.
7282 bool ignore_stackfaults
;
7284 assert(!arm_feature(env
, ARM_FEATURE_V8
));
7285 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
7286 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7287 ignore_stackfaults
= v7m_push_stack(cpu
);
7288 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
7289 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
7290 "failed exception return integrity check\n");
7294 /* Otherwise, we have a successful exception exit. */
7295 arm_clear_exclusive(env
);
7296 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
7299 static bool do_v7m_function_return(ARMCPU
*cpu
)
7301 /* v8M security extensions magic function return.
7303 * (1) throw an exception (longjump)
7304 * (2) return true if we successfully handled the function return
7305 * (3) return false if we failed a consistency check and have
7306 * pended a UsageFault that needs to be taken now
7308 * At this point the magic return value is split between env->regs[15]
7309 * and env->thumb. We don't bother to reconstitute it because we don't
7310 * need it (all values are handled the same way).
7312 CPUARMState
*env
= &cpu
->env
;
7313 uint32_t newpc
, newpsr
, newpsr_exc
;
7315 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
7318 bool threadmode
, spsel
;
7321 uint32_t *frame_sp_p
;
7324 /* Pull the return address and IPSR from the Secure stack */
7325 threadmode
= !arm_v7m_is_handler_mode(env
);
7326 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
7328 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
7329 frameptr
= *frame_sp_p
;
7331 /* These loads may throw an exception (for MPU faults). We want to
7332 * do them as secure, so work out what MMU index that is.
7334 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7335 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
7336 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
7337 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
7339 /* Consistency checks on new IPSR */
7340 newpsr_exc
= newpsr
& XPSR_EXCP
;
7341 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
7342 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
7343 /* Pend the fault and tell our caller to take it */
7344 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7345 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7347 qemu_log_mask(CPU_LOG_INT
,
7348 "...taking INVPC UsageFault: "
7349 "IPSR consistency check failed\n");
7353 *frame_sp_p
= frameptr
+ 8;
7356 /* This invalidates frame_sp_p */
7357 switch_v7m_security_state(env
, true);
7358 env
->v7m
.exception
= newpsr_exc
;
7359 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
7360 if (newpsr
& XPSR_SFPA
) {
7361 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
7363 xpsr_write(env
, 0, XPSR_IT
);
7364 env
->thumb
= newpc
& 1;
7365 env
->regs
[15] = newpc
& ~1;
7367 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
7371 static void arm_log_exception(int idx
)
7373 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
7374 const char *exc
= NULL
;
7375 static const char * const excnames
[] = {
7376 [EXCP_UDEF
] = "Undefined Instruction",
7378 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
7379 [EXCP_DATA_ABORT
] = "Data Abort",
7382 [EXCP_BKPT
] = "Breakpoint",
7383 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
7384 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
7385 [EXCP_HVC
] = "Hypervisor Call",
7386 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
7387 [EXCP_SMC
] = "Secure Monitor Call",
7388 [EXCP_VIRQ
] = "Virtual IRQ",
7389 [EXCP_VFIQ
] = "Virtual FIQ",
7390 [EXCP_SEMIHOST
] = "Semihosting call",
7391 [EXCP_NOCP
] = "v7M NOCP UsageFault",
7392 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
7395 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
7396 exc
= excnames
[idx
];
7401 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
7405 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
7406 uint32_t addr
, uint16_t *insn
)
7408 /* Load a 16-bit portion of a v7M instruction, returning true on success,
7409 * or false on failure (in which case we will have pended the appropriate
7411 * We need to do the instruction fetch's MPU and SAU checks
7412 * like this because there is no MMU index that would allow
7413 * doing the load with a single function call. Instead we must
7414 * first check that the security attributes permit the load
7415 * and that they don't mismatch on the two halves of the instruction,
7416 * and then we do the load as a secure load (ie using the security
7417 * attributes of the address, not the CPU, as architecturally required).
7419 CPUState
*cs
= CPU(cpu
);
7420 CPUARMState
*env
= &cpu
->env
;
7421 V8M_SAttributes sattrs
= {};
7422 MemTxAttrs attrs
= {};
7423 ARMMMUFaultInfo fi
= {};
7425 target_ulong page_size
;
7429 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
7430 if (!sattrs
.nsc
|| sattrs
.ns
) {
7431 /* This must be the second half of the insn, and it straddles a
7432 * region boundary with the second half not being S&NSC.
7434 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7435 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7436 qemu_log_mask(CPU_LOG_INT
,
7437 "...really SecureFault with SFSR.INVEP\n");
7440 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
7441 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7442 /* the MPU lookup failed */
7443 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7444 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
7445 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
7448 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
7450 if (txres
!= MEMTX_OK
) {
7451 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7452 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7453 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
7459 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
7461 /* Check whether this attempt to execute code in a Secure & NS-Callable
7462 * memory region is for an SG instruction; if so, then emulate the
7463 * effect of the SG instruction and return true. Otherwise pend
7464 * the correct kind of exception and return false.
7466 CPUARMState
*env
= &cpu
->env
;
7470 /* We should never get here unless get_phys_addr_pmsav8() caused
7471 * an exception for NS executing in S&NSC memory.
7473 assert(!env
->v7m
.secure
);
7474 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7476 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7477 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7479 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
7487 if (insn
!= 0xe97f) {
7488 /* Not an SG instruction first half (we choose the IMPDEF
7489 * early-SG-check option).
7494 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
7498 if (insn
!= 0xe97f) {
7499 /* Not an SG instruction second half (yes, both halves of the SG
7500 * insn have the same hex value)
7505 /* OK, we have confirmed that we really have an SG instruction.
7506 * We know we're NS in S memory so don't need to repeat those checks.
7508 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
7509 ", executing it\n", env
->regs
[15]);
7510 env
->regs
[14] &= ~1;
7511 switch_v7m_security_state(env
, true);
7512 xpsr_write(env
, 0, XPSR_IT
);
7517 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7518 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7519 qemu_log_mask(CPU_LOG_INT
,
7520 "...really SecureFault with SFSR.INVEP\n");
7524 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
7526 ARMCPU
*cpu
= ARM_CPU(cs
);
7527 CPUARMState
*env
= &cpu
->env
;
7529 bool ignore_stackfaults
;
7531 arm_log_exception(cs
->exception_index
);
7533 /* For exceptions we just mark as pending on the NVIC, and let that
7535 switch (cs
->exception_index
) {
7537 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7538 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
7541 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7542 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
7545 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7546 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
7549 /* The PC already points to the next instruction. */
7550 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
7552 case EXCP_PREFETCH_ABORT
:
7553 case EXCP_DATA_ABORT
:
7554 /* Note that for M profile we don't have a guest facing FSR, but
7555 * the env->exception.fsr will be populated by the code that
7556 * raises the fault, in the A profile short-descriptor format.
7558 switch (env
->exception
.fsr
& 0xf) {
7559 case M_FAKE_FSR_NSC_EXEC
:
7560 /* Exception generated when we try to execute code at an address
7561 * which is marked as Secure & Non-Secure Callable and the CPU
7562 * is in the Non-Secure state. The only instruction which can
7563 * be executed like this is SG (and that only if both halves of
7564 * the SG instruction have the same security attributes.)
7565 * Everything else must generate an INVEP SecureFault, so we
7566 * emulate the SG instruction here.
7568 if (v7m_handle_execute_nsc(cpu
)) {
7572 case M_FAKE_FSR_SFAULT
:
7573 /* Various flavours of SecureFault for attempts to execute or
7574 * access data in the wrong security state.
7576 switch (cs
->exception_index
) {
7577 case EXCP_PREFETCH_ABORT
:
7578 if (env
->v7m
.secure
) {
7579 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
7580 qemu_log_mask(CPU_LOG_INT
,
7581 "...really SecureFault with SFSR.INVTRAN\n");
7583 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7584 qemu_log_mask(CPU_LOG_INT
,
7585 "...really SecureFault with SFSR.INVEP\n");
7588 case EXCP_DATA_ABORT
:
7589 /* This must be an NS access to S memory */
7590 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
7591 qemu_log_mask(CPU_LOG_INT
,
7592 "...really SecureFault with SFSR.AUVIOL\n");
7595 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7597 case 0x8: /* External Abort */
7598 switch (cs
->exception_index
) {
7599 case EXCP_PREFETCH_ABORT
:
7600 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7601 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
7603 case EXCP_DATA_ABORT
:
7604 env
->v7m
.cfsr
[M_REG_NS
] |=
7605 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
7606 env
->v7m
.bfar
= env
->exception
.vaddress
;
7607 qemu_log_mask(CPU_LOG_INT
,
7608 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7612 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7615 /* All other FSR values are either MPU faults or "can't happen
7616 * for M profile" cases.
7618 switch (cs
->exception_index
) {
7619 case EXCP_PREFETCH_ABORT
:
7620 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7621 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
7623 case EXCP_DATA_ABORT
:
7624 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
7625 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
7626 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
7627 qemu_log_mask(CPU_LOG_INT
,
7628 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7629 env
->v7m
.mmfar
[env
->v7m
.secure
]);
7632 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
7638 if (semihosting_enabled()) {
7640 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
7643 qemu_log_mask(CPU_LOG_INT
,
7644 "...handling as semihosting call 0x%x\n",
7646 env
->regs
[0] = do_arm_semihosting(env
);
7650 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
7654 case EXCP_EXCEPTION_EXIT
:
7655 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
7656 /* Must be v8M security extension function return */
7657 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
7658 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7659 if (do_v7m_function_return(cpu
)) {
7663 do_v7m_exception_exit(cpu
);
7668 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7669 return; /* Never happens. Keep compiler happy. */
7672 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7673 lr
= R_V7M_EXCRET_RES1_MASK
|
7674 R_V7M_EXCRET_DCRS_MASK
|
7675 R_V7M_EXCRET_FTYPE_MASK
;
7676 /* The S bit indicates whether we should return to Secure
7677 * or NonSecure (ie our current state).
7678 * The ES bit indicates whether we're taking this exception
7679 * to Secure or NonSecure (ie our target state). We set it
7680 * later, in v7m_exception_taken().
7681 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
7682 * This corresponds to the ARM ARM pseudocode for v8M setting
7683 * some LR bits in PushStack() and some in ExceptionTaken();
7684 * the distinction matters for the tailchain cases where we
7685 * can take an exception without pushing the stack.
7687 if (env
->v7m
.secure
) {
7688 lr
|= R_V7M_EXCRET_S_MASK
;
7691 lr
= R_V7M_EXCRET_RES1_MASK
|
7692 R_V7M_EXCRET_S_MASK
|
7693 R_V7M_EXCRET_DCRS_MASK
|
7694 R_V7M_EXCRET_FTYPE_MASK
|
7695 R_V7M_EXCRET_ES_MASK
;
7696 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
7697 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
7700 if (!arm_v7m_is_handler_mode(env
)) {
7701 lr
|= R_V7M_EXCRET_MODE_MASK
;
7704 ignore_stackfaults
= v7m_push_stack(cpu
);
7705 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
7706 qemu_log_mask(CPU_LOG_INT
, "... as %d\n", env
->v7m
.exception
);
7709 /* Function used to synchronize QEMU's AArch64 register set with AArch32
7710 * register set. This is necessary when switching between AArch32 and AArch64
7713 void aarch64_sync_32_to_64(CPUARMState
*env
)
7716 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7718 /* We can blanket copy R[0:7] to X[0:7] */
7719 for (i
= 0; i
< 8; i
++) {
7720 env
->xregs
[i
] = env
->regs
[i
];
7723 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7724 * Otherwise, they come from the banked user regs.
7726 if (mode
== ARM_CPU_MODE_FIQ
) {
7727 for (i
= 8; i
< 13; i
++) {
7728 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
7731 for (i
= 8; i
< 13; i
++) {
7732 env
->xregs
[i
] = env
->regs
[i
];
7736 /* Registers x13-x23 are the various mode SP and FP registers. Registers
7737 * r13 and r14 are only copied if we are in that mode, otherwise we copy
7738 * from the mode banked register.
7740 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7741 env
->xregs
[13] = env
->regs
[13];
7742 env
->xregs
[14] = env
->regs
[14];
7744 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
7745 /* HYP is an exception in that it is copied from r14 */
7746 if (mode
== ARM_CPU_MODE_HYP
) {
7747 env
->xregs
[14] = env
->regs
[14];
7749 env
->xregs
[14] = env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)];
7753 if (mode
== ARM_CPU_MODE_HYP
) {
7754 env
->xregs
[15] = env
->regs
[13];
7756 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
7759 if (mode
== ARM_CPU_MODE_IRQ
) {
7760 env
->xregs
[16] = env
->regs
[14];
7761 env
->xregs
[17] = env
->regs
[13];
7763 env
->xregs
[16] = env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)];
7764 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
7767 if (mode
== ARM_CPU_MODE_SVC
) {
7768 env
->xregs
[18] = env
->regs
[14];
7769 env
->xregs
[19] = env
->regs
[13];
7771 env
->xregs
[18] = env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)];
7772 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
7775 if (mode
== ARM_CPU_MODE_ABT
) {
7776 env
->xregs
[20] = env
->regs
[14];
7777 env
->xregs
[21] = env
->regs
[13];
7779 env
->xregs
[20] = env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)];
7780 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
7783 if (mode
== ARM_CPU_MODE_UND
) {
7784 env
->xregs
[22] = env
->regs
[14];
7785 env
->xregs
[23] = env
->regs
[13];
7787 env
->xregs
[22] = env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)];
7788 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
7791 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7792 * mode, then we can copy from r8-r14. Otherwise, we copy from the
7793 * FIQ bank for r8-r14.
7795 if (mode
== ARM_CPU_MODE_FIQ
) {
7796 for (i
= 24; i
< 31; i
++) {
7797 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
7800 for (i
= 24; i
< 29; i
++) {
7801 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
7803 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
7804 env
->xregs
[30] = env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)];
7807 env
->pc
= env
->regs
[15];
7810 /* Function used to synchronize QEMU's AArch32 register set with AArch64
7811 * register set. This is necessary when switching between AArch32 and AArch64
7814 void aarch64_sync_64_to_32(CPUARMState
*env
)
7817 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7819 /* We can blanket copy X[0:7] to R[0:7] */
7820 for (i
= 0; i
< 8; i
++) {
7821 env
->regs
[i
] = env
->xregs
[i
];
7824 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7825 * Otherwise, we copy x8-x12 into the banked user regs.
7827 if (mode
== ARM_CPU_MODE_FIQ
) {
7828 for (i
= 8; i
< 13; i
++) {
7829 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
7832 for (i
= 8; i
< 13; i
++) {
7833 env
->regs
[i
] = env
->xregs
[i
];
7837 /* Registers r13 & r14 depend on the current mode.
7838 * If we are in a given mode, we copy the corresponding x registers to r13
7839 * and r14. Otherwise, we copy the x register to the banked r13 and r14
7842 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7843 env
->regs
[13] = env
->xregs
[13];
7844 env
->regs
[14] = env
->xregs
[14];
7846 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
7848 /* HYP is an exception in that it does not have its own banked r14 but
7849 * shares the USR r14
7851 if (mode
== ARM_CPU_MODE_HYP
) {
7852 env
->regs
[14] = env
->xregs
[14];
7854 env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
7858 if (mode
== ARM_CPU_MODE_HYP
) {
7859 env
->regs
[13] = env
->xregs
[15];
7861 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
7864 if (mode
== ARM_CPU_MODE_IRQ
) {
7865 env
->regs
[14] = env
->xregs
[16];
7866 env
->regs
[13] = env
->xregs
[17];
7868 env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
7869 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
7872 if (mode
== ARM_CPU_MODE_SVC
) {
7873 env
->regs
[14] = env
->xregs
[18];
7874 env
->regs
[13] = env
->xregs
[19];
7876 env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
7877 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
7880 if (mode
== ARM_CPU_MODE_ABT
) {
7881 env
->regs
[14] = env
->xregs
[20];
7882 env
->regs
[13] = env
->xregs
[21];
7884 env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
7885 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
7888 if (mode
== ARM_CPU_MODE_UND
) {
7889 env
->regs
[14] = env
->xregs
[22];
7890 env
->regs
[13] = env
->xregs
[23];
7892 env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
7893 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
7896 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7897 * mode, then we can copy to r8-r14. Otherwise, we copy to the
7898 * FIQ bank for r8-r14.
7900 if (mode
== ARM_CPU_MODE_FIQ
) {
7901 for (i
= 24; i
< 31; i
++) {
7902 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
7905 for (i
= 24; i
< 29; i
++) {
7906 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
7908 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
7909 env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
7912 env
->regs
[15] = env
->pc
;
7915 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
7917 ARMCPU
*cpu
= ARM_CPU(cs
);
7918 CPUARMState
*env
= &cpu
->env
;
7925 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
7926 switch (env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
) {
7928 case EC_BREAKPOINT_SAME_EL
:
7932 case EC_WATCHPOINT_SAME_EL
:
7938 case EC_VECTORCATCH
:
7947 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
7950 /* TODO: Vectored interrupt controller. */
7951 switch (cs
->exception_index
) {
7953 new_mode
= ARM_CPU_MODE_UND
;
7962 new_mode
= ARM_CPU_MODE_SVC
;
7965 /* The PC already points to the next instruction. */
7969 /* Fall through to prefetch abort. */
7970 case EXCP_PREFETCH_ABORT
:
7971 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
7972 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
7973 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
7974 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
7975 new_mode
= ARM_CPU_MODE_ABT
;
7977 mask
= CPSR_A
| CPSR_I
;
7980 case EXCP_DATA_ABORT
:
7981 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
7982 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
7983 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
7985 (uint32_t)env
->exception
.vaddress
);
7986 new_mode
= ARM_CPU_MODE_ABT
;
7988 mask
= CPSR_A
| CPSR_I
;
7992 new_mode
= ARM_CPU_MODE_IRQ
;
7994 /* Disable IRQ and imprecise data aborts. */
7995 mask
= CPSR_A
| CPSR_I
;
7997 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
7998 /* IRQ routed to monitor mode */
7999 new_mode
= ARM_CPU_MODE_MON
;
8004 new_mode
= ARM_CPU_MODE_FIQ
;
8006 /* Disable FIQ, IRQ and imprecise data aborts. */
8007 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8008 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
8009 /* FIQ routed to monitor mode */
8010 new_mode
= ARM_CPU_MODE_MON
;
8015 new_mode
= ARM_CPU_MODE_IRQ
;
8017 /* Disable IRQ and imprecise data aborts. */
8018 mask
= CPSR_A
| CPSR_I
;
8022 new_mode
= ARM_CPU_MODE_FIQ
;
8024 /* Disable FIQ, IRQ and imprecise data aborts. */
8025 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8029 new_mode
= ARM_CPU_MODE_MON
;
8031 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8035 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8036 return; /* Never happens. Keep compiler happy. */
8039 if (new_mode
== ARM_CPU_MODE_MON
) {
8040 addr
+= env
->cp15
.mvbar
;
8041 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
8042 /* High vectors. When enabled, base address cannot be remapped. */
8045 /* ARM v7 architectures provide a vector base address register to remap
8046 * the interrupt vector table.
8047 * This register is only followed in non-monitor mode, and is banked.
8048 * Note: only bits 31:5 are valid.
8050 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
8053 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
8054 env
->cp15
.scr_el3
&= ~SCR_NS
;
8057 switch_mode (env
, new_mode
);
8058 /* For exceptions taken to AArch32 we must clear the SS bit in both
8059 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8061 env
->uncached_cpsr
&= ~PSTATE_SS
;
8062 env
->spsr
= cpsr_read(env
);
8063 /* Clear IT bits. */
8064 env
->condexec_bits
= 0;
8065 /* Switch to the new mode, and to the correct instruction set. */
8066 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8067 /* Set new mode endianness */
8068 env
->uncached_cpsr
&= ~CPSR_E
;
8069 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
8070 env
->uncached_cpsr
|= CPSR_E
;
8073 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
8074 * and we should just guard the thumb mode on V4 */
8075 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8076 env
->thumb
= (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8078 env
->regs
[14] = env
->regs
[15] + offset
;
8079 env
->regs
[15] = addr
;
8082 /* Handle exception entry to a target EL which is using AArch64 */
8083 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
8085 ARMCPU
*cpu
= ARM_CPU(cs
);
8086 CPUARMState
*env
= &cpu
->env
;
8087 unsigned int new_el
= env
->exception
.target_el
;
8088 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
8089 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
8091 if (arm_current_el(env
) < new_el
) {
8092 /* Entry vector offset depends on whether the implemented EL
8093 * immediately lower than the target level is using AArch32 or AArch64
8099 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
8102 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
8105 is_aa64
= is_a64(env
);
8108 g_assert_not_reached();
8116 } else if (pstate_read(env
) & PSTATE_SP
) {
8120 switch (cs
->exception_index
) {
8121 case EXCP_PREFETCH_ABORT
:
8122 case EXCP_DATA_ABORT
:
8123 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
8124 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
8125 env
->cp15
.far_el
[new_el
]);
8133 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
8144 qemu_log_mask(CPU_LOG_INT
,
8145 "...handling as semihosting call 0x%" PRIx64
"\n",
8147 env
->xregs
[0] = do_arm_semihosting(env
);
8150 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8154 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
8155 aarch64_save_sp(env
, arm_current_el(env
));
8156 env
->elr_el
[new_el
] = env
->pc
;
8158 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
8159 env
->elr_el
[new_el
] = env
->regs
[15];
8161 aarch64_sync_32_to_64(env
);
8163 env
->condexec_bits
= 0;
8165 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
8166 env
->elr_el
[new_el
]);
8168 pstate_write(env
, PSTATE_DAIF
| new_mode
);
8170 aarch64_restore_sp(env
, new_el
);
8174 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
8175 new_el
, env
->pc
, pstate_read(env
));
8178 static inline bool check_for_semihosting(CPUState
*cs
)
8180 /* Check whether this exception is a semihosting call; if so
8181 * then handle it and return true; otherwise return false.
8183 ARMCPU
*cpu
= ARM_CPU(cs
);
8184 CPUARMState
*env
= &cpu
->env
;
8187 if (cs
->exception_index
== EXCP_SEMIHOST
) {
8188 /* This is always the 64-bit semihosting exception.
8189 * The "is this usermode" and "is semihosting enabled"
8190 * checks have been done at translate time.
8192 qemu_log_mask(CPU_LOG_INT
,
8193 "...handling as semihosting call 0x%" PRIx64
"\n",
8195 env
->xregs
[0] = do_arm_semihosting(env
);
8202 /* Only intercept calls from privileged modes, to provide some
8203 * semblance of security.
8205 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
8206 (!semihosting_enabled() ||
8207 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
8211 switch (cs
->exception_index
) {
8213 /* This is always a semihosting call; the "is this usermode"
8214 * and "is semihosting enabled" checks have been done at
8219 /* Check for semihosting interrupt. */
8221 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
8227 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
8229 if (imm
== 0x123456) {
8235 /* See if this is a semihosting syscall. */
8237 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
8249 qemu_log_mask(CPU_LOG_INT
,
8250 "...handling as semihosting call 0x%x\n",
8252 env
->regs
[0] = do_arm_semihosting(env
);
8257 /* Handle a CPU exception for A and R profile CPUs.
8258 * Do any appropriate logging, handle PSCI calls, and then hand off
8259 * to the AArch64-entry or AArch32-entry function depending on the
8260 * target exception level's register width.
8262 void arm_cpu_do_interrupt(CPUState
*cs
)
8264 ARMCPU
*cpu
= ARM_CPU(cs
);
8265 CPUARMState
*env
= &cpu
->env
;
8266 unsigned int new_el
= env
->exception
.target_el
;
8268 assert(!arm_feature(env
, ARM_FEATURE_M
));
8270 arm_log_exception(cs
->exception_index
);
8271 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
8273 if (qemu_loglevel_mask(CPU_LOG_INT
)
8274 && !excp_is_internal(cs
->exception_index
)) {
8275 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
8276 env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
,
8277 env
->exception
.syndrome
);
8280 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
8281 arm_handle_psci_call(cpu
);
8282 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
8286 /* Semihosting semantics depend on the register width of the
8287 * code that caused the exception, not the target exception level,
8288 * so must be handled here.
8290 if (check_for_semihosting(cs
)) {
8294 /* Hooks may change global state so BQL should be held, also the
8295 * BQL needs to be held for any modification of
8296 * cs->interrupt_request.
8298 g_assert(qemu_mutex_iothread_locked());
8300 arm_call_pre_el_change_hook(cpu
);
8302 assert(!excp_is_internal(cs
->exception_index
));
8303 if (arm_el_is_aa64(env
, new_el
)) {
8304 arm_cpu_do_interrupt_aarch64(cs
);
8306 arm_cpu_do_interrupt_aarch32(cs
);
8309 arm_call_el_change_hook(cpu
);
8311 if (!kvm_enabled()) {
8312 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
8316 /* Return the exception level which controls this address translation regime */
8317 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8320 case ARMMMUIdx_S2NS
:
8321 case ARMMMUIdx_S1E2
:
8323 case ARMMMUIdx_S1E3
:
8325 case ARMMMUIdx_S1SE0
:
8326 return arm_el_is_aa64(env
, 3) ? 1 : 3;
8327 case ARMMMUIdx_S1SE1
:
8328 case ARMMMUIdx_S1NSE0
:
8329 case ARMMMUIdx_S1NSE1
:
8330 case ARMMMUIdx_MPrivNegPri
:
8331 case ARMMMUIdx_MUserNegPri
:
8332 case ARMMMUIdx_MPriv
:
8333 case ARMMMUIdx_MUser
:
8334 case ARMMMUIdx_MSPrivNegPri
:
8335 case ARMMMUIdx_MSUserNegPri
:
8336 case ARMMMUIdx_MSPriv
:
8337 case ARMMMUIdx_MSUser
:
8340 g_assert_not_reached();
8344 /* Return the SCTLR value which controls this address translation regime */
8345 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8347 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
8350 /* Return true if the specified stage of address translation is disabled */
8351 static inline bool regime_translation_disabled(CPUARMState
*env
,
8354 if (arm_feature(env
, ARM_FEATURE_M
)) {
8355 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
8356 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
8357 case R_V7M_MPU_CTRL_ENABLE_MASK
:
8358 /* Enabled, but not for HardFault and NMI */
8359 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
8360 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
8361 /* Enabled for all cases */
8365 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8366 * we warned about that in armv7m_nvic.c when the guest set it.
8372 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8373 return (env
->cp15
.hcr_el2
& HCR_VM
) == 0;
8375 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
8378 static inline bool regime_translation_big_endian(CPUARMState
*env
,
8381 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
8384 /* Return the TCR controlling this translation regime */
8385 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8387 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8388 return &env
->cp15
.vtcr_el2
;
8390 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
8393 /* Convert a possible stage1+2 MMU index into the appropriate
8396 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
8398 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
8399 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
8404 /* Returns TBI0 value for current regime el */
8405 uint32_t arm_regime_tbi0(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8410 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8411 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8413 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8415 tcr
= regime_tcr(env
, mmu_idx
);
8416 el
= regime_el(env
, mmu_idx
);
8419 return extract64(tcr
->raw_tcr
, 20, 1);
8421 return extract64(tcr
->raw_tcr
, 37, 1);
8425 /* Returns TBI1 value for current regime el */
8426 uint32_t arm_regime_tbi1(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8431 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8432 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8434 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8436 tcr
= regime_tcr(env
, mmu_idx
);
8437 el
= regime_el(env
, mmu_idx
);
8442 return extract64(tcr
->raw_tcr
, 38, 1);
8446 /* Return the TTBR associated with this translation regime */
8447 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8450 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8451 return env
->cp15
.vttbr_el2
;
8454 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
8456 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
8460 /* Return true if the translation regime is using LPAE format page tables */
8461 static inline bool regime_using_lpae_format(CPUARMState
*env
,
8464 int el
= regime_el(env
, mmu_idx
);
8465 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
8468 if (arm_feature(env
, ARM_FEATURE_LPAE
)
8469 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
8475 /* Returns true if the stage 1 translation regime is using LPAE format page
8476 * tables. Used when raising alignment exceptions, whose FSR changes depending
8477 * on whether the long or short descriptor format is in use. */
8478 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8480 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8482 return regime_using_lpae_format(env
, mmu_idx
);
8485 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8488 case ARMMMUIdx_S1SE0
:
8489 case ARMMMUIdx_S1NSE0
:
8490 case ARMMMUIdx_MUser
:
8491 case ARMMMUIdx_MSUser
:
8492 case ARMMMUIdx_MUserNegPri
:
8493 case ARMMMUIdx_MSUserNegPri
:
8497 case ARMMMUIdx_S12NSE0
:
8498 case ARMMMUIdx_S12NSE1
:
8499 g_assert_not_reached();
8503 /* Translate section/page access permissions to page
8504 * R/W protection flags
8507 * @mmu_idx: MMU index indicating required translation regime
8508 * @ap: The 3-bit access permissions (AP[2:0])
8509 * @domain_prot: The 2-bit domain access permissions
8511 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8512 int ap
, int domain_prot
)
8514 bool is_user
= regime_is_user(env
, mmu_idx
);
8516 if (domain_prot
== 3) {
8517 return PAGE_READ
| PAGE_WRITE
;
8522 if (arm_feature(env
, ARM_FEATURE_V7
)) {
8525 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
8527 return is_user
? 0 : PAGE_READ
;
8534 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8539 return PAGE_READ
| PAGE_WRITE
;
8542 return PAGE_READ
| PAGE_WRITE
;
8543 case 4: /* Reserved. */
8546 return is_user
? 0 : PAGE_READ
;
8550 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
8555 g_assert_not_reached();
8559 /* Translate section/page access permissions to page
8560 * R/W protection flags.
8562 * @ap: The 2-bit simple AP (AP[2:1])
8563 * @is_user: TRUE if accessing from PL0
8565 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
8569 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8571 return PAGE_READ
| PAGE_WRITE
;
8573 return is_user
? 0 : PAGE_READ
;
8577 g_assert_not_reached();
8582 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
8584 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
8587 /* Translate S2 section/page access permissions to protection flags
8590 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8591 * @xn: XN (execute-never) bit
8593 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
8604 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
8611 /* Translate section/page access permissions to protection flags
8614 * @mmu_idx: MMU index indicating required translation regime
8615 * @is_aa64: TRUE if AArch64
8616 * @ap: The 2-bit simple AP (AP[2:1])
8617 * @ns: NS (non-secure) bit
8618 * @xn: XN (execute-never) bit
8619 * @pxn: PXN (privileged execute-never) bit
8621 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
8622 int ap
, int ns
, int xn
, int pxn
)
8624 bool is_user
= regime_is_user(env
, mmu_idx
);
8625 int prot_rw
, user_rw
;
8629 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
8631 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
8635 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
8638 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
8642 /* TODO have_wxn should be replaced with
8643 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8644 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8645 * compatible processors have EL2, which is required for [U]WXN.
8647 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
8650 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
8654 switch (regime_el(env
, mmu_idx
)) {
8657 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
8664 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8665 switch (regime_el(env
, mmu_idx
)) {
8669 xn
= xn
|| !(user_rw
& PAGE_READ
);
8673 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
8675 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
8676 (uwxn
&& (user_rw
& PAGE_WRITE
));
8686 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
8689 return prot_rw
| PAGE_EXEC
;
8692 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8693 uint32_t *table
, uint32_t address
)
8695 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8696 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
8698 if (address
& tcr
->mask
) {
8699 if (tcr
->raw_tcr
& TTBCR_PD1
) {
8700 /* Translation table walk disabled for TTBR1 */
8703 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
8705 if (tcr
->raw_tcr
& TTBCR_PD0
) {
8706 /* Translation table walk disabled for TTBR0 */
8709 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
8711 *table
|= (address
>> 18) & 0x3ffc;
8715 /* Translate a S1 pagetable walk through S2 if needed. */
8716 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8717 hwaddr addr
, MemTxAttrs txattrs
,
8718 ARMMMUFaultInfo
*fi
)
8720 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
8721 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
8722 target_ulong s2size
;
8727 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
8728 &txattrs
, &s2prot
, &s2size
, fi
, NULL
);
8730 assert(fi
->type
!= ARMFault_None
);
8741 /* All loads done in the course of a page table walk go through here. */
8742 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8743 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8745 ARMCPU
*cpu
= ARM_CPU(cs
);
8746 CPUARMState
*env
= &cpu
->env
;
8747 MemTxAttrs attrs
= {};
8748 MemTxResult result
= MEMTX_OK
;
8752 attrs
.secure
= is_secure
;
8753 as
= arm_addressspace(cs
, attrs
);
8754 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8758 if (regime_translation_big_endian(env
, mmu_idx
)) {
8759 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
8761 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
8763 if (result
== MEMTX_OK
) {
8766 fi
->type
= ARMFault_SyncExternalOnWalk
;
8767 fi
->ea
= arm_extabort_type(result
);
8771 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8772 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8774 ARMCPU
*cpu
= ARM_CPU(cs
);
8775 CPUARMState
*env
= &cpu
->env
;
8776 MemTxAttrs attrs
= {};
8777 MemTxResult result
= MEMTX_OK
;
8781 attrs
.secure
= is_secure
;
8782 as
= arm_addressspace(cs
, attrs
);
8783 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8787 if (regime_translation_big_endian(env
, mmu_idx
)) {
8788 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
8790 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
8792 if (result
== MEMTX_OK
) {
8795 fi
->type
= ARMFault_SyncExternalOnWalk
;
8796 fi
->ea
= arm_extabort_type(result
);
8800 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
8801 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8802 hwaddr
*phys_ptr
, int *prot
,
8803 target_ulong
*page_size
,
8804 ARMMMUFaultInfo
*fi
)
8806 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
8817 /* Pagetable walk. */
8818 /* Lookup l1 descriptor. */
8819 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
8820 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8821 fi
->type
= ARMFault_Translation
;
8824 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8826 if (fi
->type
!= ARMFault_None
) {
8830 domain
= (desc
>> 5) & 0x0f;
8831 if (regime_el(env
, mmu_idx
) == 1) {
8832 dacr
= env
->cp15
.dacr_ns
;
8834 dacr
= env
->cp15
.dacr_s
;
8836 domain_prot
= (dacr
>> (domain
* 2)) & 3;
8838 /* Section translation fault. */
8839 fi
->type
= ARMFault_Translation
;
8845 if (domain_prot
== 0 || domain_prot
== 2) {
8846 fi
->type
= ARMFault_Domain
;
8851 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
8852 ap
= (desc
>> 10) & 3;
8853 *page_size
= 1024 * 1024;
8855 /* Lookup l2 entry. */
8857 /* Coarse pagetable. */
8858 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
8860 /* Fine pagetable. */
8861 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
8863 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8865 if (fi
->type
!= ARMFault_None
) {
8869 case 0: /* Page translation fault. */
8870 fi
->type
= ARMFault_Translation
;
8872 case 1: /* 64k page. */
8873 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
8874 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
8875 *page_size
= 0x10000;
8877 case 2: /* 4k page. */
8878 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8879 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
8880 *page_size
= 0x1000;
8882 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
8884 /* ARMv6/XScale extended small page format */
8885 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
8886 || arm_feature(env
, ARM_FEATURE_V6
)) {
8887 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8888 *page_size
= 0x1000;
8890 /* UNPREDICTABLE in ARMv5; we choose to take a
8891 * page translation fault.
8893 fi
->type
= ARMFault_Translation
;
8897 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
8900 ap
= (desc
>> 4) & 3;
8903 /* Never happens, but compiler isn't smart enough to tell. */
8907 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
8908 *prot
|= *prot
? PAGE_EXEC
: 0;
8909 if (!(*prot
& (1 << access_type
))) {
8910 /* Access permission fault. */
8911 fi
->type
= ARMFault_Permission
;
8914 *phys_ptr
= phys_addr
;
8917 fi
->domain
= domain
;
8922 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
8923 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8924 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
8925 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
8927 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
8941 /* Pagetable walk. */
8942 /* Lookup l1 descriptor. */
8943 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
8944 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8945 fi
->type
= ARMFault_Translation
;
8948 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8950 if (fi
->type
!= ARMFault_None
) {
8954 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
8955 /* Section translation fault, or attempt to use the encoding
8956 * which is Reserved on implementations without PXN.
8958 fi
->type
= ARMFault_Translation
;
8961 if ((type
== 1) || !(desc
& (1 << 18))) {
8962 /* Page or Section. */
8963 domain
= (desc
>> 5) & 0x0f;
8965 if (regime_el(env
, mmu_idx
) == 1) {
8966 dacr
= env
->cp15
.dacr_ns
;
8968 dacr
= env
->cp15
.dacr_s
;
8973 domain_prot
= (dacr
>> (domain
* 2)) & 3;
8974 if (domain_prot
== 0 || domain_prot
== 2) {
8975 /* Section or Page domain fault */
8976 fi
->type
= ARMFault_Domain
;
8980 if (desc
& (1 << 18)) {
8982 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
8983 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
8984 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
8985 *page_size
= 0x1000000;
8988 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
8989 *page_size
= 0x100000;
8991 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
8992 xn
= desc
& (1 << 4);
8994 ns
= extract32(desc
, 19, 1);
8996 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
8997 pxn
= (desc
>> 2) & 1;
8999 ns
= extract32(desc
, 3, 1);
9000 /* Lookup l2 entry. */
9001 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9002 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9004 if (fi
->type
!= ARMFault_None
) {
9007 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
9009 case 0: /* Page translation fault. */
9010 fi
->type
= ARMFault_Translation
;
9012 case 1: /* 64k page. */
9013 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9014 xn
= desc
& (1 << 15);
9015 *page_size
= 0x10000;
9017 case 2: case 3: /* 4k page. */
9018 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9020 *page_size
= 0x1000;
9023 /* Never happens, but compiler isn't smart enough to tell. */
9027 if (domain_prot
== 3) {
9028 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9030 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
9033 if (xn
&& access_type
== MMU_INST_FETCH
) {
9034 fi
->type
= ARMFault_Permission
;
9038 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
9039 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
9040 /* The simplified model uses AP[0] as an access control bit. */
9041 if ((ap
& 1) == 0) {
9042 /* Access flag fault. */
9043 fi
->type
= ARMFault_AccessFlag
;
9046 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
9048 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9053 if (!(*prot
& (1 << access_type
))) {
9054 /* Access permission fault. */
9055 fi
->type
= ARMFault_Permission
;
9060 /* The NS bit will (as required by the architecture) have no effect if
9061 * the CPU doesn't support TZ or this is a non-secure translation
9062 * regime, because the attribute will already be non-secure.
9064 attrs
->secure
= false;
9066 *phys_ptr
= phys_addr
;
9069 fi
->domain
= domain
;
9075 * check_s2_mmu_setup
9077 * @is_aa64: True if the translation regime is in AArch64 state
9078 * @startlevel: Suggested starting level
9079 * @inputsize: Bitsize of IPAs
9080 * @stride: Page-table stride (See the ARM ARM)
9082 * Returns true if the suggested S2 translation parameters are OK and
9085 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
9086 int inputsize
, int stride
)
9088 const int grainsize
= stride
+ 3;
9091 /* Negative levels are never allowed. */
9096 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
9097 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
9102 CPUARMState
*env
= &cpu
->env
;
9103 unsigned int pamax
= arm_pamax(cpu
);
9106 case 13: /* 64KB Pages. */
9107 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
9111 case 11: /* 16KB Pages. */
9112 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
9116 case 9: /* 4KB Pages. */
9117 if (level
== 0 && pamax
<= 42) {
9122 g_assert_not_reached();
9125 /* Inputsize checks. */
9126 if (inputsize
> pamax
&&
9127 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
9128 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9132 /* AArch32 only supports 4KB pages. Assert on that. */
9133 assert(stride
== 9);
9142 /* Translate from the 4-bit stage 2 representation of
9143 * memory attributes (without cache-allocation hints) to
9144 * the 8-bit representation of the stage 1 MAIR registers
9145 * (which includes allocation hints).
9147 * ref: shared/translation/attrs/S2AttrDecode()
9148 * .../S2ConvertAttrsHints()
9150 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
9152 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
9153 uint8_t loattr
= extract32(s2attrs
, 0, 2);
9154 uint8_t hihint
= 0, lohint
= 0;
9156 if (hiattr
!= 0) { /* normal memory */
9157 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
9158 hiattr
= loattr
= 1; /* non-cacheable */
9160 if (hiattr
!= 1) { /* Write-through or write-back */
9161 hihint
= 3; /* RW allocate */
9163 if (loattr
!= 1) { /* Write-through or write-back */
9164 lohint
= 3; /* RW allocate */
9169 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
9172 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
9173 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9174 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
9175 target_ulong
*page_size_ptr
,
9176 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
9178 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9179 CPUState
*cs
= CPU(cpu
);
9180 /* Read an LPAE long-descriptor translation table. */
9181 ARMFaultType fault_type
= ARMFault_Translation
;
9188 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
9189 uint32_t tableattrs
;
9190 target_ulong page_size
;
9196 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9197 int ap
, ns
, xn
, pxn
;
9198 uint32_t el
= regime_el(env
, mmu_idx
);
9199 bool ttbr1_valid
= true;
9200 uint64_t descaddrmask
;
9201 bool aarch64
= arm_el_is_aa64(env
, el
);
9204 * This code does not handle the different format TCR for VTCR_EL2.
9205 * This code also does not support shareability levels.
9206 * Attribute and permission bit handling should also be checked when adding
9207 * support for those page table walks.
9213 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9214 tbi
= extract64(tcr
->raw_tcr
, 20, 1);
9217 if (extract64(address
, 55, 1)) {
9218 tbi
= extract64(tcr
->raw_tcr
, 38, 1);
9220 tbi
= extract64(tcr
->raw_tcr
, 37, 1);
9225 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9229 ttbr1_valid
= false;
9234 /* There is no TTBR1 for EL2 */
9236 ttbr1_valid
= false;
9240 /* Determine whether this address is in the region controlled by
9241 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
9242 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
9243 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
9246 /* AArch64 translation. */
9247 t0sz
= extract32(tcr
->raw_tcr
, 0, 6);
9248 t0sz
= MIN(t0sz
, 39);
9249 t0sz
= MAX(t0sz
, 16);
9250 } else if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9251 /* AArch32 stage 1 translation. */
9252 t0sz
= extract32(tcr
->raw_tcr
, 0, 3);
9254 /* AArch32 stage 2 translation. */
9255 bool sext
= extract32(tcr
->raw_tcr
, 4, 1);
9256 bool sign
= extract32(tcr
->raw_tcr
, 3, 1);
9257 /* Address size is 40-bit for a stage 2 translation,
9258 * and t0sz can be negative (from -8 to 7),
9259 * so we need to adjust it to use the TTBR selecting logic below.
9262 t0sz
= sextract32(tcr
->raw_tcr
, 0, 4) + 8;
9264 /* If the sign-extend bit is not the same as t0sz[3], the result
9265 * is unpredictable. Flag this as a guest error. */
9267 qemu_log_mask(LOG_GUEST_ERROR
,
9268 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9271 t1sz
= extract32(tcr
->raw_tcr
, 16, 6);
9273 t1sz
= MIN(t1sz
, 39);
9274 t1sz
= MAX(t1sz
, 16);
9276 if (t0sz
&& !extract64(address
, addrsize
- t0sz
, t0sz
- tbi
)) {
9277 /* there is a ttbr0 region and we are in it (high bits all zero) */
9279 } else if (ttbr1_valid
&& t1sz
&&
9280 !extract64(~address
, addrsize
- t1sz
, t1sz
- tbi
)) {
9281 /* there is a ttbr1 region and we are in it (high bits all one) */
9284 /* ttbr0 region is "everything not in the ttbr1 region" */
9286 } else if (!t1sz
&& ttbr1_valid
) {
9287 /* ttbr1 region is "everything not in the ttbr0 region" */
9290 /* in the gap between the two regions, this is a Translation fault */
9291 fault_type
= ARMFault_Translation
;
9295 /* Note that QEMU ignores shareability and cacheability attributes,
9296 * so we don't need to do anything with the SH, ORGN, IRGN fields
9297 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
9298 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9299 * implement any ASID-like capability so we can ignore it (instead
9300 * we will always flush the TLB any time the ASID is changed).
9302 if (ttbr_select
== 0) {
9303 ttbr
= regime_ttbr(env
, mmu_idx
, 0);
9305 epd
= extract32(tcr
->raw_tcr
, 7, 1);
9307 inputsize
= addrsize
- t0sz
;
9309 tg
= extract32(tcr
->raw_tcr
, 14, 2);
9310 if (tg
== 1) { /* 64KB pages */
9313 if (tg
== 2) { /* 16KB pages */
9317 /* We should only be here if TTBR1 is valid */
9318 assert(ttbr1_valid
);
9320 ttbr
= regime_ttbr(env
, mmu_idx
, 1);
9321 epd
= extract32(tcr
->raw_tcr
, 23, 1);
9322 inputsize
= addrsize
- t1sz
;
9324 tg
= extract32(tcr
->raw_tcr
, 30, 2);
9325 if (tg
== 3) { /* 64KB pages */
9328 if (tg
== 1) { /* 16KB pages */
9333 /* Here we should have set up all the parameters for the translation:
9334 * inputsize, ttbr, epd, stride, tbi
9338 /* Translation table walk disabled => Translation fault on TLB miss
9339 * Note: This is always 0 on 64-bit EL2 and EL3.
9344 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9345 /* The starting level depends on the virtual address size (which can
9346 * be up to 48 bits) and the translation granule size. It indicates
9347 * the number of strides (stride bits at a time) needed to
9348 * consume the bits of the input address. In the pseudocode this is:
9349 * level = 4 - RoundUp((inputsize - grainsize) / stride)
9350 * where their 'inputsize' is our 'inputsize', 'grainsize' is
9351 * our 'stride + 3' and 'stride' is our 'stride'.
9352 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9353 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9354 * = 4 - (inputsize - 4) / stride;
9356 level
= 4 - (inputsize
- 4) / stride
;
9358 /* For stage 2 translations the starting level is specified by the
9359 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9361 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
9362 uint32_t startlevel
;
9365 if (!aarch64
|| stride
== 9) {
9366 /* AArch32 or 4KB pages */
9367 startlevel
= 2 - sl0
;
9369 /* 16KB or 64KB pages */
9370 startlevel
= 3 - sl0
;
9373 /* Check that the starting level is valid. */
9374 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
9377 fault_type
= ARMFault_Translation
;
9383 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
9384 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
9386 /* Now we can extract the actual base address from the TTBR */
9387 descaddr
= extract64(ttbr
, 0, 48);
9388 descaddr
&= ~indexmask
;
9390 /* The address field in the descriptor goes up to bit 39 for ARMv7
9391 * but up to bit 47 for ARMv8, but we use the descaddrmask
9392 * up to bit 39 for AArch32, because we don't need other bits in that case
9393 * to construct next descriptor address (anyway they should be all zeroes).
9395 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
9396 ~indexmask_grainsize
;
9398 /* Secure accesses start with the page table in secure memory and
9399 * can be downgraded to non-secure at any step. Non-secure accesses
9400 * remain non-secure. We implement this by just ORing in the NSTable/NS
9401 * bits at each step.
9403 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
9405 uint64_t descriptor
;
9408 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
9410 nstable
= extract32(tableattrs
, 4, 1);
9411 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
9412 if (fi
->type
!= ARMFault_None
) {
9416 if (!(descriptor
& 1) ||
9417 (!(descriptor
& 2) && (level
== 3))) {
9418 /* Invalid, or the Reserved level 3 encoding */
9421 descaddr
= descriptor
& descaddrmask
;
9423 if ((descriptor
& 2) && (level
< 3)) {
9424 /* Table entry. The top five bits are attributes which may
9425 * propagate down through lower levels of the table (and
9426 * which are all arranged so that 0 means "no effect", so
9427 * we can gather them up by ORing in the bits at each level).
9429 tableattrs
|= extract64(descriptor
, 59, 5);
9431 indexmask
= indexmask_grainsize
;
9434 /* Block entry at level 1 or 2, or page entry at level 3.
9435 * These are basically the same thing, although the number
9436 * of bits we pull in from the vaddr varies.
9438 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
9439 descaddr
|= (address
& (page_size
- 1));
9440 /* Extract attributes from the descriptor */
9441 attrs
= extract64(descriptor
, 2, 10)
9442 | (extract64(descriptor
, 52, 12) << 10);
9444 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9445 /* Stage 2 table descriptors do not include any attribute fields */
9448 /* Merge in attributes from table descriptors */
9449 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
9450 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
9451 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9452 * means "force PL1 access only", which means forcing AP[1] to 0.
9454 if (extract32(tableattrs
, 2, 1)) {
9457 attrs
|= nstable
<< 3; /* NS */
9460 /* Here descaddr is the final physical address, and attributes
9463 fault_type
= ARMFault_AccessFlag
;
9464 if ((attrs
& (1 << 8)) == 0) {
9469 ap
= extract32(attrs
, 4, 2);
9470 xn
= extract32(attrs
, 12, 1);
9472 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9474 *prot
= get_S2prot(env
, ap
, xn
);
9476 ns
= extract32(attrs
, 3, 1);
9477 pxn
= extract32(attrs
, 11, 1);
9478 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
9481 fault_type
= ARMFault_Permission
;
9482 if (!(*prot
& (1 << access_type
))) {
9487 /* The NS bit will (as required by the architecture) have no effect if
9488 * the CPU doesn't support TZ or this is a non-secure translation
9489 * regime, because the attribute will already be non-secure.
9491 txattrs
->secure
= false;
9494 if (cacheattrs
!= NULL
) {
9495 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9496 cacheattrs
->attrs
= convert_stage2_attrs(env
,
9497 extract32(attrs
, 0, 4));
9499 /* Index into MAIR registers for cache attributes */
9500 uint8_t attrindx
= extract32(attrs
, 0, 3);
9501 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
9502 assert(attrindx
<= 7);
9503 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
9505 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
9508 *phys_ptr
= descaddr
;
9509 *page_size_ptr
= page_size
;
9513 fi
->type
= fault_type
;
9515 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9516 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
9520 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
9522 int32_t address
, int *prot
)
9524 if (!arm_feature(env
, ARM_FEATURE_M
)) {
9525 *prot
= PAGE_READ
| PAGE_WRITE
;
9527 case 0xF0000000 ... 0xFFFFFFFF:
9528 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
9529 /* hivecs execing is ok */
9533 case 0x00000000 ... 0x7FFFFFFF:
9538 /* Default system address map for M profile cores.
9539 * The architecture specifies which regions are execute-never;
9540 * at the MPU level no other checks are defined.
9543 case 0x00000000 ... 0x1fffffff: /* ROM */
9544 case 0x20000000 ... 0x3fffffff: /* SRAM */
9545 case 0x60000000 ... 0x7fffffff: /* RAM */
9546 case 0x80000000 ... 0x9fffffff: /* RAM */
9547 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9549 case 0x40000000 ... 0x5fffffff: /* Peripheral */
9550 case 0xa0000000 ... 0xbfffffff: /* Device */
9551 case 0xc0000000 ... 0xdfffffff: /* Device */
9552 case 0xe0000000 ... 0xffffffff: /* System */
9553 *prot
= PAGE_READ
| PAGE_WRITE
;
9556 g_assert_not_reached();
9561 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
9562 ARMMMUIdx mmu_idx
, bool is_user
)
9564 /* Return true if we should use the default memory map as a
9565 * "background" region if there are no hits against any MPU regions.
9567 CPUARMState
*env
= &cpu
->env
;
9573 if (arm_feature(env
, ARM_FEATURE_M
)) {
9574 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
9575 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
9577 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
9581 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
9583 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9584 return arm_feature(env
, ARM_FEATURE_M
) &&
9585 extract32(address
, 20, 12) == 0xe00;
9588 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
9590 /* True if address is in the M profile system region
9591 * 0xe0000000 - 0xffffffff
9593 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
9596 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
9597 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9598 hwaddr
*phys_ptr
, int *prot
,
9599 ARMMMUFaultInfo
*fi
)
9601 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9603 bool is_user
= regime_is_user(env
, mmu_idx
);
9605 *phys_ptr
= address
;
9608 if (regime_translation_disabled(env
, mmu_idx
) ||
9609 m_is_ppb_region(env
, address
)) {
9610 /* MPU disabled or M profile PPB access: use default memory map.
9611 * The other case which uses the default memory map in the
9612 * v7M ARM ARM pseudocode is exception vector reads from the vector
9613 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9614 * which always does a direct read using address_space_ldl(), rather
9615 * than going via this function, so we don't need to check that here.
9617 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9618 } else { /* MPU enabled */
9619 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9621 uint32_t base
= env
->pmsav7
.drbar
[n
];
9622 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
9626 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
9631 qemu_log_mask(LOG_GUEST_ERROR
,
9632 "DRSR[%d]: Rsize field cannot be 0\n", n
);
9636 rmask
= (1ull << rsize
) - 1;
9639 qemu_log_mask(LOG_GUEST_ERROR
,
9640 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
9641 "to DRSR region size, mask = 0x%" PRIx32
"\n",
9646 if (address
< base
|| address
> base
+ rmask
) {
9650 /* Region matched */
9652 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
9654 uint32_t srdis_mask
;
9656 rsize
-= 3; /* sub region size (power of 2) */
9657 snd
= ((address
- base
) >> rsize
) & 0x7;
9658 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
9660 srdis_mask
= srdis
? 0x3 : 0x0;
9661 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
9662 /* This will check in groups of 2, 4 and then 8, whether
9663 * the subregion bits are consistent. rsize is incremented
9664 * back up to give the region size, considering consistent
9665 * adjacent subregions as one region. Stop testing if rsize
9666 * is already big enough for an entire QEMU page.
9668 int snd_rounded
= snd
& ~(i
- 1);
9669 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
9670 snd_rounded
+ 8, i
);
9671 if (srdis_mask
^ srdis_multi
) {
9674 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
9678 if (rsize
< TARGET_PAGE_BITS
) {
9679 qemu_log_mask(LOG_UNIMP
,
9680 "DRSR[%d]: No support for MPU (sub)region size of"
9681 " %" PRIu32
" bytes. Minimum is %d.\n",
9682 n
, (1 << rsize
), TARGET_PAGE_SIZE
);
9691 if (n
== -1) { /* no hits */
9692 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9693 /* background fault */
9694 fi
->type
= ARMFault_Background
;
9697 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9698 } else { /* a MPU hit! */
9699 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
9700 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
9702 if (m_is_system_region(env
, address
)) {
9703 /* System space is always execute never */
9707 if (is_user
) { /* User mode AP bit decoding */
9712 break; /* no access */
9714 *prot
|= PAGE_WRITE
;
9718 *prot
|= PAGE_READ
| PAGE_EXEC
;
9721 /* for v7M, same as 6; for R profile a reserved value */
9722 if (arm_feature(env
, ARM_FEATURE_M
)) {
9723 *prot
|= PAGE_READ
| PAGE_EXEC
;
9728 qemu_log_mask(LOG_GUEST_ERROR
,
9729 "DRACR[%d]: Bad value for AP bits: 0x%"
9730 PRIx32
"\n", n
, ap
);
9732 } else { /* Priv. mode AP bits decoding */
9735 break; /* no access */
9739 *prot
|= PAGE_WRITE
;
9743 *prot
|= PAGE_READ
| PAGE_EXEC
;
9746 /* for v7M, same as 6; for R profile a reserved value */
9747 if (arm_feature(env
, ARM_FEATURE_M
)) {
9748 *prot
|= PAGE_READ
| PAGE_EXEC
;
9753 qemu_log_mask(LOG_GUEST_ERROR
,
9754 "DRACR[%d]: Bad value for AP bits: 0x%"
9755 PRIx32
"\n", n
, ap
);
9761 *prot
&= ~PAGE_EXEC
;
9766 fi
->type
= ARMFault_Permission
;
9768 return !(*prot
& (1 << access_type
));
9771 static bool v8m_is_sau_exempt(CPUARMState
*env
,
9772 uint32_t address
, MMUAccessType access_type
)
9774 /* The architecture specifies that certain address ranges are
9775 * exempt from v8M SAU/IDAU checks.
9778 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
9779 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
9780 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
9781 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
9782 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
9783 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
9786 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
9787 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9788 V8M_SAttributes
*sattrs
)
9790 /* Look up the security attributes for this address. Compare the
9791 * pseudocode SecurityCheck() function.
9792 * We assume the caller has zero-initialized *sattrs.
9794 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9796 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
9797 int idau_region
= IREGION_NOTVALID
;
9800 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
9801 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
9803 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
9807 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
9808 /* 0xf0000000..0xffffffff is always S for insn fetches */
9812 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
9813 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
9817 if (idau_region
!= IREGION_NOTVALID
) {
9818 sattrs
->irvalid
= true;
9819 sattrs
->iregion
= idau_region
;
9822 switch (env
->sau
.ctrl
& 3) {
9823 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
9825 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
9828 default: /* SAU.ENABLE == 1 */
9829 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
9830 if (env
->sau
.rlar
[r
] & 1) {
9831 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
9832 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
9834 if (base
<= address
&& limit
>= address
) {
9835 if (sattrs
->srvalid
) {
9836 /* If we hit in more than one region then we must report
9837 * as Secure, not NS-Callable, with no valid region
9841 sattrs
->nsc
= false;
9842 sattrs
->sregion
= 0;
9843 sattrs
->srvalid
= false;
9846 if (env
->sau
.rlar
[r
] & 2) {
9851 sattrs
->srvalid
= true;
9852 sattrs
->sregion
= r
;
9858 /* The IDAU will override the SAU lookup results if it specifies
9859 * higher security than the SAU does.
9862 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
9864 sattrs
->nsc
= idau_nsc
;
9871 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
9872 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9873 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
9874 int *prot
, ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
9876 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
9877 * that a full phys-to-virt translation does).
9878 * mregion is (if not NULL) set to the region number which matched,
9879 * or -1 if no region number is returned (MPU off, address did not
9880 * hit a region, address hit in multiple regions).
9882 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9883 bool is_user
= regime_is_user(env
, mmu_idx
);
9884 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
9886 int matchregion
= -1;
9889 *phys_ptr
= address
;
9895 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
9896 * was an exception vector read from the vector table (which is always
9897 * done using the default system address map), because those accesses
9898 * are done in arm_v7m_load_vector(), which always does a direct
9899 * read using address_space_ldl(), rather than going via this function.
9901 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
9903 } else if (m_is_ppb_region(env
, address
)) {
9905 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9908 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9910 /* Note that the base address is bits [31:5] from the register
9911 * with bits [4:0] all zeroes, but the limit address is bits
9912 * [31:5] from the register with bits [4:0] all ones.
9914 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
9915 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
9917 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
9918 /* Region disabled */
9922 if (address
< base
|| address
> limit
) {
9927 /* Multiple regions match -- always a failure (unlike
9928 * PMSAv7 where highest-numbered-region wins)
9930 fi
->type
= ARMFault_Permission
;
9938 if (base
& ~TARGET_PAGE_MASK
) {
9939 qemu_log_mask(LOG_UNIMP
,
9940 "MPU_RBAR[%d]: No support for MPU region base"
9941 "address of 0x%" PRIx32
". Minimum alignment is "
9943 n
, base
, TARGET_PAGE_BITS
);
9946 if ((limit
+ 1) & ~TARGET_PAGE_MASK
) {
9947 qemu_log_mask(LOG_UNIMP
,
9948 "MPU_RBAR[%d]: No support for MPU region limit"
9949 "address of 0x%" PRIx32
". Minimum alignment is "
9951 n
, limit
, TARGET_PAGE_BITS
);
9958 /* background fault */
9959 fi
->type
= ARMFault_Background
;
9963 if (matchregion
== -1) {
9964 /* hit using the background region */
9965 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9967 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
9968 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
9970 if (m_is_system_region(env
, address
)) {
9971 /* System space is always execute never */
9975 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
9979 /* We don't need to look the attribute up in the MAIR0/MAIR1
9980 * registers because that only tells us about cacheability.
9983 *mregion
= matchregion
;
9987 fi
->type
= ARMFault_Permission
;
9989 return !(*prot
& (1 << access_type
));
9993 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
9994 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9995 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
9996 int *prot
, ARMMMUFaultInfo
*fi
)
9998 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
9999 V8M_SAttributes sattrs
= {};
10001 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10002 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
10003 if (access_type
== MMU_INST_FETCH
) {
10004 /* Instruction fetches always use the MMU bank and the
10005 * transaction attribute determined by the fetch address,
10006 * regardless of CPU state. This is painful for QEMU
10007 * to handle, because it would mean we need to encode
10008 * into the mmu_idx not just the (user, negpri) information
10009 * for the current security state but also that for the
10010 * other security state, which would balloon the number
10011 * of mmu_idx values needed alarmingly.
10012 * Fortunately we can avoid this because it's not actually
10013 * possible to arbitrarily execute code from memory with
10014 * the wrong security attribute: it will always generate
10015 * an exception of some kind or another, apart from the
10016 * special case of an NS CPU executing an SG instruction
10017 * in S&NSC memory. So we always just fail the translation
10018 * here and sort things out in the exception handler
10019 * (including possibly emulating an SG instruction).
10021 if (sattrs
.ns
!= !secure
) {
10023 fi
->type
= ARMFault_QEMU_NSCExec
;
10025 fi
->type
= ARMFault_QEMU_SFault
;
10027 *phys_ptr
= address
;
10032 /* For data accesses we always use the MMU bank indicated
10033 * by the current CPU state, but the security attributes
10034 * might downgrade a secure access to nonsecure.
10037 txattrs
->secure
= false;
10038 } else if (!secure
) {
10039 /* NS access to S memory must fault.
10040 * Architecturally we should first check whether the
10041 * MPU information for this address indicates that we
10042 * are doing an unaligned access to Device memory, which
10043 * should generate a UsageFault instead. QEMU does not
10044 * currently check for that kind of unaligned access though.
10045 * If we added it we would need to do so as a special case
10046 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10048 fi
->type
= ARMFault_QEMU_SFault
;
10049 *phys_ptr
= address
;
10056 return pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
10057 txattrs
, prot
, fi
, NULL
);
10060 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
10061 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10062 hwaddr
*phys_ptr
, int *prot
,
10063 ARMMMUFaultInfo
*fi
)
10068 bool is_user
= regime_is_user(env
, mmu_idx
);
10070 if (regime_translation_disabled(env
, mmu_idx
)) {
10071 /* MPU disabled. */
10072 *phys_ptr
= address
;
10073 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10077 *phys_ptr
= address
;
10078 for (n
= 7; n
>= 0; n
--) {
10079 base
= env
->cp15
.c6_region
[n
];
10080 if ((base
& 1) == 0) {
10083 mask
= 1 << ((base
>> 1) & 0x1f);
10084 /* Keep this shift separate from the above to avoid an
10085 (undefined) << 32. */
10086 mask
= (mask
<< 1) - 1;
10087 if (((base
^ address
) & ~mask
) == 0) {
10092 fi
->type
= ARMFault_Background
;
10096 if (access_type
== MMU_INST_FETCH
) {
10097 mask
= env
->cp15
.pmsav5_insn_ap
;
10099 mask
= env
->cp15
.pmsav5_data_ap
;
10101 mask
= (mask
>> (n
* 4)) & 0xf;
10104 fi
->type
= ARMFault_Permission
;
10109 fi
->type
= ARMFault_Permission
;
10113 *prot
= PAGE_READ
| PAGE_WRITE
;
10118 *prot
|= PAGE_WRITE
;
10122 *prot
= PAGE_READ
| PAGE_WRITE
;
10126 fi
->type
= ARMFault_Permission
;
10136 /* Bad permission. */
10137 fi
->type
= ARMFault_Permission
;
10141 *prot
|= PAGE_EXEC
;
10145 /* Combine either inner or outer cacheability attributes for normal
10146 * memory, according to table D4-42 and pseudocode procedure
10147 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10149 * NB: only stage 1 includes allocation hints (RW bits), leading to
10152 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
10154 if (s1
== 4 || s2
== 4) {
10155 /* non-cacheable has precedence */
10157 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
10158 /* stage 1 write-through takes precedence */
10160 } else if (extract32(s2
, 2, 2) == 2) {
10161 /* stage 2 write-through takes precedence, but the allocation hint
10162 * is still taken from stage 1
10164 return (2 << 2) | extract32(s1
, 0, 2);
10165 } else { /* write-back */
10170 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10171 * and CombineS1S2Desc()
10173 * @s1: Attributes from stage 1 walk
10174 * @s2: Attributes from stage 2 walk
10176 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
10178 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
10179 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
10182 /* Combine shareability attributes (table D4-43) */
10183 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
10184 /* if either are outer-shareable, the result is outer-shareable */
10185 ret
.shareability
= 2;
10186 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
10187 /* if either are inner-shareable, the result is inner-shareable */
10188 ret
.shareability
= 3;
10190 /* both non-shareable */
10191 ret
.shareability
= 0;
10194 /* Combine memory type and cacheability attributes */
10195 if (s1hi
== 0 || s2hi
== 0) {
10196 /* Device has precedence over normal */
10197 if (s1lo
== 0 || s2lo
== 0) {
10198 /* nGnRnE has precedence over anything */
10200 } else if (s1lo
== 4 || s2lo
== 4) {
10201 /* non-Reordering has precedence over Reordering */
10202 ret
.attrs
= 4; /* nGnRE */
10203 } else if (s1lo
== 8 || s2lo
== 8) {
10204 /* non-Gathering has precedence over Gathering */
10205 ret
.attrs
= 8; /* nGRE */
10207 ret
.attrs
= 0xc; /* GRE */
10210 /* Any location for which the resultant memory type is any
10211 * type of Device memory is always treated as Outer Shareable.
10213 ret
.shareability
= 2;
10214 } else { /* Normal memory */
10215 /* Outer/inner cacheability combine independently */
10216 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
10217 | combine_cacheattr_nibble(s1lo
, s2lo
);
10219 if (ret
.attrs
== 0x44) {
10220 /* Any location for which the resultant memory type is Normal
10221 * Inner Non-cacheable, Outer Non-cacheable is always treated
10222 * as Outer Shareable.
10224 ret
.shareability
= 2;
10232 /* get_phys_addr - get the physical address for this virtual address
10234 * Find the physical address corresponding to the given virtual address,
10235 * by doing a translation table walk on MMU based systems or using the
10236 * MPU state on MPU based systems.
10238 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10239 * prot and page_size may not be filled in, and the populated fsr value provides
10240 * information on why the translation aborted, in the format of a
10241 * DFSR/IFSR fault register, with the following caveats:
10242 * * we honour the short vs long DFSR format differences.
10243 * * the WnR bit is never set (the caller must do this).
10244 * * for PSMAv5 based systems we don't bother to return a full FSR format
10247 * @env: CPUARMState
10248 * @address: virtual address to get physical address for
10249 * @access_type: 0 for read, 1 for write, 2 for execute
10250 * @mmu_idx: MMU index indicating required translation regime
10251 * @phys_ptr: set to the physical address corresponding to the virtual address
10252 * @attrs: set to the memory transaction attributes to use
10253 * @prot: set to the permissions for the page containing phys_ptr
10254 * @page_size: set to the size of the page containing phys_ptr
10255 * @fi: set to fault info if the translation fails
10256 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10258 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
10259 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10260 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10261 target_ulong
*page_size
,
10262 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10264 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
10265 /* Call ourselves recursively to do the stage 1 and then stage 2
10268 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
10272 ARMCacheAttrs cacheattrs2
= {};
10274 ret
= get_phys_addr(env
, address
, access_type
,
10275 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
10276 prot
, page_size
, fi
, cacheattrs
);
10278 /* If S1 fails or S2 is disabled, return early. */
10279 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10284 /* S1 is done. Now do S2 translation. */
10285 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
10286 phys_ptr
, attrs
, &s2_prot
,
10288 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
10290 /* Combine the S1 and S2 perms. */
10293 /* Combine the S1 and S2 cache attributes, if needed */
10294 if (!ret
&& cacheattrs
!= NULL
) {
10295 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
10301 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10303 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10307 /* The page table entries may downgrade secure to non-secure, but
10308 * cannot upgrade an non-secure translation regime's attributes
10311 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
10312 attrs
->user
= regime_is_user(env
, mmu_idx
);
10314 /* Fast Context Switch Extension. This doesn't exist at all in v8.
10315 * In v7 and earlier it affects all stage 1 translations.
10317 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
10318 && !arm_feature(env
, ARM_FEATURE_V8
)) {
10319 if (regime_el(env
, mmu_idx
) == 3) {
10320 address
+= env
->cp15
.fcseidr_s
;
10322 address
+= env
->cp15
.fcseidr_ns
;
10326 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
10328 *page_size
= TARGET_PAGE_SIZE
;
10330 if (arm_feature(env
, ARM_FEATURE_V8
)) {
10332 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
10333 phys_ptr
, attrs
, prot
, fi
);
10334 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10336 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
10337 phys_ptr
, prot
, fi
);
10340 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
10341 phys_ptr
, prot
, fi
);
10343 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
10344 " mmu_idx %u -> %s (prot %c%c%c)\n",
10345 access_type
== MMU_DATA_LOAD
? "reading" :
10346 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
10347 (uint32_t)address
, mmu_idx
,
10348 ret
? "Miss" : "Hit",
10349 *prot
& PAGE_READ
? 'r' : '-',
10350 *prot
& PAGE_WRITE
? 'w' : '-',
10351 *prot
& PAGE_EXEC
? 'x' : '-');
10356 /* Definitely a real MMU, not an MPU */
10358 if (regime_translation_disabled(env
, mmu_idx
)) {
10359 /* MMU disabled. */
10360 *phys_ptr
= address
;
10361 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10362 *page_size
= TARGET_PAGE_SIZE
;
10366 if (regime_using_lpae_format(env
, mmu_idx
)) {
10367 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
10368 phys_ptr
, attrs
, prot
, page_size
,
10370 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
10371 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
10372 phys_ptr
, attrs
, prot
, page_size
, fi
);
10374 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
10375 phys_ptr
, prot
, page_size
, fi
);
10379 /* Walk the page table and (if the mapping exists) add the page
10380 * to the TLB. Return false on success, or true on failure. Populate
10381 * fsr with ARM DFSR/IFSR fault register format value on failure.
10383 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
10384 MMUAccessType access_type
, int mmu_idx
,
10385 ARMMMUFaultInfo
*fi
)
10387 ARMCPU
*cpu
= ARM_CPU(cs
);
10388 CPUARMState
*env
= &cpu
->env
;
10390 target_ulong page_size
;
10393 MemTxAttrs attrs
= {};
10395 ret
= get_phys_addr(env
, address
, access_type
,
10396 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
10397 &attrs
, &prot
, &page_size
, fi
, NULL
);
10399 /* Map a single [sub]page. */
10400 phys_addr
&= TARGET_PAGE_MASK
;
10401 address
&= TARGET_PAGE_MASK
;
10402 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
10403 prot
, mmu_idx
, page_size
);
10410 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
10413 ARMCPU
*cpu
= ARM_CPU(cs
);
10414 CPUARMState
*env
= &cpu
->env
;
10416 target_ulong page_size
;
10419 ARMMMUFaultInfo fi
= {};
10420 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
10422 *attrs
= (MemTxAttrs
) {};
10424 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
10425 attrs
, &prot
, &page_size
, &fi
, NULL
);
10433 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
10436 unsigned el
= arm_current_el(env
);
10438 /* First handle registers which unprivileged can read */
10441 case 0 ... 7: /* xPSR sub-fields */
10443 if ((reg
& 1) && el
) {
10444 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
10447 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
10449 /* EPSR reads as zero */
10450 return xpsr_read(env
) & mask
;
10452 case 20: /* CONTROL */
10453 return env
->v7m
.control
[env
->v7m
.secure
];
10454 case 0x94: /* CONTROL_NS */
10455 /* We have to handle this here because unprivileged Secure code
10456 * can read the NS CONTROL register.
10458 if (!env
->v7m
.secure
) {
10461 return env
->v7m
.control
[M_REG_NS
];
10465 return 0; /* unprivileged reads others as zero */
10468 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10470 case 0x88: /* MSP_NS */
10471 if (!env
->v7m
.secure
) {
10474 return env
->v7m
.other_ss_msp
;
10475 case 0x89: /* PSP_NS */
10476 if (!env
->v7m
.secure
) {
10479 return env
->v7m
.other_ss_psp
;
10480 case 0x8a: /* MSPLIM_NS */
10481 if (!env
->v7m
.secure
) {
10484 return env
->v7m
.msplim
[M_REG_NS
];
10485 case 0x8b: /* PSPLIM_NS */
10486 if (!env
->v7m
.secure
) {
10489 return env
->v7m
.psplim
[M_REG_NS
];
10490 case 0x90: /* PRIMASK_NS */
10491 if (!env
->v7m
.secure
) {
10494 return env
->v7m
.primask
[M_REG_NS
];
10495 case 0x91: /* BASEPRI_NS */
10496 if (!env
->v7m
.secure
) {
10499 return env
->v7m
.basepri
[M_REG_NS
];
10500 case 0x93: /* FAULTMASK_NS */
10501 if (!env
->v7m
.secure
) {
10504 return env
->v7m
.faultmask
[M_REG_NS
];
10505 case 0x98: /* SP_NS */
10507 /* This gives the non-secure SP selected based on whether we're
10508 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10510 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10512 if (!env
->v7m
.secure
) {
10515 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10516 return env
->v7m
.other_ss_psp
;
10518 return env
->v7m
.other_ss_msp
;
10528 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
10530 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
10531 case 10: /* MSPLIM */
10532 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10535 return env
->v7m
.msplim
[env
->v7m
.secure
];
10536 case 11: /* PSPLIM */
10537 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10540 return env
->v7m
.psplim
[env
->v7m
.secure
];
10541 case 16: /* PRIMASK */
10542 return env
->v7m
.primask
[env
->v7m
.secure
];
10543 case 17: /* BASEPRI */
10544 case 18: /* BASEPRI_MAX */
10545 return env
->v7m
.basepri
[env
->v7m
.secure
];
10546 case 19: /* FAULTMASK */
10547 return env
->v7m
.faultmask
[env
->v7m
.secure
];
10550 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
10551 " register %d\n", reg
);
10556 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
10558 /* We're passed bits [11..0] of the instruction; extract
10559 * SYSm and the mask bits.
10560 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
10561 * we choose to treat them as if the mask bits were valid.
10562 * NB that the pseudocode 'mask' variable is bits [11..10],
10563 * whereas ours is [11..8].
10565 uint32_t mask
= extract32(maskreg
, 8, 4);
10566 uint32_t reg
= extract32(maskreg
, 0, 8);
10568 if (arm_current_el(env
) == 0 && reg
> 7) {
10569 /* only xPSR sub-fields may be written by unprivileged */
10573 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10575 case 0x88: /* MSP_NS */
10576 if (!env
->v7m
.secure
) {
10579 env
->v7m
.other_ss_msp
= val
;
10581 case 0x89: /* PSP_NS */
10582 if (!env
->v7m
.secure
) {
10585 env
->v7m
.other_ss_psp
= val
;
10587 case 0x8a: /* MSPLIM_NS */
10588 if (!env
->v7m
.secure
) {
10591 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
10593 case 0x8b: /* PSPLIM_NS */
10594 if (!env
->v7m
.secure
) {
10597 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
10599 case 0x90: /* PRIMASK_NS */
10600 if (!env
->v7m
.secure
) {
10603 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
10605 case 0x91: /* BASEPRI_NS */
10606 if (!env
->v7m
.secure
) {
10609 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
10611 case 0x93: /* FAULTMASK_NS */
10612 if (!env
->v7m
.secure
) {
10615 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
10617 case 0x94: /* CONTROL_NS */
10618 if (!env
->v7m
.secure
) {
10621 write_v7m_control_spsel_for_secstate(env
,
10622 val
& R_V7M_CONTROL_SPSEL_MASK
,
10624 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
10625 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
10627 case 0x98: /* SP_NS */
10629 /* This gives the non-secure SP selected based on whether we're
10630 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10632 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10634 if (!env
->v7m
.secure
) {
10637 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10638 env
->v7m
.other_ss_psp
= val
;
10640 env
->v7m
.other_ss_msp
= val
;
10650 case 0 ... 7: /* xPSR sub-fields */
10651 /* only APSR is actually writable */
10653 uint32_t apsrmask
= 0;
10656 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
10658 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
10659 apsrmask
|= XPSR_GE
;
10661 xpsr_write(env
, val
, apsrmask
);
10665 if (v7m_using_psp(env
)) {
10666 env
->v7m
.other_sp
= val
;
10668 env
->regs
[13] = val
;
10672 if (v7m_using_psp(env
)) {
10673 env
->regs
[13] = val
;
10675 env
->v7m
.other_sp
= val
;
10678 case 10: /* MSPLIM */
10679 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10682 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
10684 case 11: /* PSPLIM */
10685 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10688 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
10690 case 16: /* PRIMASK */
10691 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
10693 case 17: /* BASEPRI */
10694 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
10696 case 18: /* BASEPRI_MAX */
10698 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
10699 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
10700 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
10703 case 19: /* FAULTMASK */
10704 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
10706 case 20: /* CONTROL */
10707 /* Writing to the SPSEL bit only has an effect if we are in
10708 * thread mode; other bits can be updated by any privileged code.
10709 * write_v7m_control_spsel() deals with updating the SPSEL bit in
10710 * env->v7m.control, so we only need update the others.
10711 * For v7M, we must just ignore explicit writes to SPSEL in handler
10712 * mode; for v8M the write is permitted but will have no effect.
10714 if (arm_feature(env
, ARM_FEATURE_V8
) ||
10715 !arm_v7m_is_handler_mode(env
)) {
10716 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
10718 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
10719 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
10723 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
10724 " register %d\n", reg
);
10729 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
10731 /* Implement the TT instruction. op is bits [7:6] of the insn. */
10732 bool forceunpriv
= op
& 1;
10734 V8M_SAttributes sattrs
= {};
10736 bool r
, rw
, nsr
, nsrw
, mrvalid
;
10738 ARMMMUFaultInfo fi
= {};
10739 MemTxAttrs attrs
= {};
10744 bool targetsec
= env
->v7m
.secure
;
10746 /* Work out what the security state and privilege level we're
10747 * interested in is...
10750 targetsec
= !targetsec
;
10754 targetpriv
= false;
10756 targetpriv
= arm_v7m_is_handler_mode(env
) ||
10757 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
10760 /* ...and then figure out which MMU index this is */
10761 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
10763 /* We know that the MPU and SAU don't care about the access type
10764 * for our purposes beyond that we don't want to claim to be
10765 * an insn fetch, so we arbitrarily call this a read.
10768 /* MPU region info only available for privileged or if
10769 * inspecting the other MPU state.
10771 if (arm_current_el(env
) != 0 || alt
) {
10772 /* We can ignore the return value as prot is always set */
10773 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
10774 &phys_addr
, &attrs
, &prot
, &fi
, &mregion
);
10775 if (mregion
== -1) {
10781 r
= prot
& PAGE_READ
;
10782 rw
= prot
& PAGE_WRITE
;
10790 if (env
->v7m
.secure
) {
10791 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
10792 nsr
= sattrs
.ns
&& r
;
10793 nsrw
= sattrs
.ns
&& rw
;
10800 tt_resp
= (sattrs
.iregion
<< 24) |
10801 (sattrs
.irvalid
<< 23) |
10802 ((!sattrs
.ns
) << 22) |
10807 (sattrs
.srvalid
<< 17) |
10809 (sattrs
.sregion
<< 8) |
10817 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
10819 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
10820 * Note that we do not implement the (architecturally mandated)
10821 * alignment fault for attempts to use this on Device memory
10822 * (which matches the usual QEMU behaviour of not implementing either
10823 * alignment faults or any memory attribute handling).
10826 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10827 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
10828 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
10830 #ifndef CONFIG_USER_ONLY
10832 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
10833 * the block size so we might have to do more than one TLB lookup.
10834 * We know that in fact for any v8 CPU the page size is at least 4K
10835 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
10836 * 1K as an artefact of legacy v5 subpage support being present in the
10837 * same QEMU executable.
10839 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
10840 void *hostaddr
[maxidx
];
10842 unsigned mmu_idx
= cpu_mmu_index(env
, false);
10843 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
10845 for (try = 0; try < 2; try++) {
10847 for (i
= 0; i
< maxidx
; i
++) {
10848 hostaddr
[i
] = tlb_vaddr_to_host(env
,
10849 vaddr
+ TARGET_PAGE_SIZE
* i
,
10851 if (!hostaddr
[i
]) {
10856 /* If it's all in the TLB it's fair game for just writing to;
10857 * we know we don't need to update dirty status, etc.
10859 for (i
= 0; i
< maxidx
- 1; i
++) {
10860 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
10862 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
10865 /* OK, try a store and see if we can populate the tlb. This
10866 * might cause an exception if the memory isn't writable,
10867 * in which case we will longjmp out of here. We must for
10868 * this purpose use the actual register value passed to us
10869 * so that we get the fault address right.
10871 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
10872 /* Now we can populate the other TLB entries, if any */
10873 for (i
= 0; i
< maxidx
; i
++) {
10874 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
10875 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
10876 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
10881 /* Slow path (probably attempt to do this to an I/O device or
10882 * similar, or clearing of a block of code we have translations
10883 * cached for). Just do a series of byte writes as the architecture
10884 * demands. It's not worth trying to use a cpu_physical_memory_map(),
10885 * memset(), unmap() sequence here because:
10886 * + we'd need to account for the blocksize being larger than a page
10887 * + the direct-RAM access case is almost always going to be dealt
10888 * with in the fastpath code above, so there's no speed benefit
10889 * + we would have to deal with the map returning NULL because the
10890 * bounce buffer was in use
10892 for (i
= 0; i
< blocklen
; i
++) {
10893 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
10897 memset(g2h(vaddr
), 0, blocklen
);
10901 /* Note that signed overflow is undefined in C. The following routines are
10902 careful to use unsigned types where modulo arithmetic is required.
10903 Failure to do so _will_ break on newer gcc. */
10905 /* Signed saturating arithmetic. */
10907 /* Perform 16-bit signed saturating addition. */
10908 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
10913 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
10922 /* Perform 8-bit signed saturating addition. */
10923 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
10928 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
10937 /* Perform 16-bit signed saturating subtraction. */
10938 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
10943 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
10952 /* Perform 8-bit signed saturating subtraction. */
10953 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
10958 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
10967 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10968 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10969 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10970 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10973 #include "op_addsub.h"
10975 /* Unsigned saturating arithmetic. */
10976 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
10985 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
10993 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
11002 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
11010 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11011 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11012 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11013 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11016 #include "op_addsub.h"
11018 /* Signed modulo arithmetic. */
11019 #define SARITH16(a, b, n, op) do { \
11021 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11022 RESULT(sum, n, 16); \
11024 ge |= 3 << (n * 2); \
11027 #define SARITH8(a, b, n, op) do { \
11029 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11030 RESULT(sum, n, 8); \
11036 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11037 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11038 #define ADD8(a, b, n) SARITH8(a, b, n, +)
11039 #define SUB8(a, b, n) SARITH8(a, b, n, -)
11043 #include "op_addsub.h"
11045 /* Unsigned modulo arithmetic. */
11046 #define ADD16(a, b, n) do { \
11048 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11049 RESULT(sum, n, 16); \
11050 if ((sum >> 16) == 1) \
11051 ge |= 3 << (n * 2); \
11054 #define ADD8(a, b, n) do { \
11056 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11057 RESULT(sum, n, 8); \
11058 if ((sum >> 8) == 1) \
11062 #define SUB16(a, b, n) do { \
11064 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11065 RESULT(sum, n, 16); \
11066 if ((sum >> 16) == 0) \
11067 ge |= 3 << (n * 2); \
11070 #define SUB8(a, b, n) do { \
11072 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11073 RESULT(sum, n, 8); \
11074 if ((sum >> 8) == 0) \
11081 #include "op_addsub.h"
11083 /* Halved signed arithmetic. */
11084 #define ADD16(a, b, n) \
11085 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11086 #define SUB16(a, b, n) \
11087 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11088 #define ADD8(a, b, n) \
11089 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11090 #define SUB8(a, b, n) \
11091 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11094 #include "op_addsub.h"
11096 /* Halved unsigned arithmetic. */
11097 #define ADD16(a, b, n) \
11098 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11099 #define SUB16(a, b, n) \
11100 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11101 #define ADD8(a, b, n) \
11102 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11103 #define SUB8(a, b, n) \
11104 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11107 #include "op_addsub.h"
11109 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
11117 /* Unsigned sum of absolute byte differences. */
11118 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
11121 sum
= do_usad(a
, b
);
11122 sum
+= do_usad(a
>> 8, b
>> 8);
11123 sum
+= do_usad(a
>> 16, b
>>16);
11124 sum
+= do_usad(a
>> 24, b
>> 24);
11128 /* For ARMv6 SEL instruction. */
11129 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
11141 mask
|= 0xff000000;
11142 return (a
& mask
) | (b
& ~mask
);
11145 /* VFP support. We follow the convention used for VFP instructions:
11146 Single precision routines have a "s" suffix, double precision a
11149 /* Convert host exception flags to vfp form. */
11150 static inline int vfp_exceptbits_from_host(int host_bits
)
11152 int target_bits
= 0;
11154 if (host_bits
& float_flag_invalid
)
11156 if (host_bits
& float_flag_divbyzero
)
11158 if (host_bits
& float_flag_overflow
)
11160 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
11162 if (host_bits
& float_flag_inexact
)
11163 target_bits
|= 0x10;
11164 if (host_bits
& float_flag_input_denormal
)
11165 target_bits
|= 0x80;
11166 return target_bits
;
11169 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
11174 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
11175 | (env
->vfp
.vec_len
<< 16)
11176 | (env
->vfp
.vec_stride
<< 20);
11177 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
11178 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
11179 i
|= get_float_exception_flags(&env
->vfp
.fp_status_f16
);
11180 fpscr
|= vfp_exceptbits_from_host(i
);
11184 uint32_t vfp_get_fpscr(CPUARMState
*env
)
11186 return HELPER(vfp_get_fpscr
)(env
);
11189 /* Convert vfp exception flags to target form. */
11190 static inline int vfp_exceptbits_to_host(int target_bits
)
11194 if (target_bits
& 1)
11195 host_bits
|= float_flag_invalid
;
11196 if (target_bits
& 2)
11197 host_bits
|= float_flag_divbyzero
;
11198 if (target_bits
& 4)
11199 host_bits
|= float_flag_overflow
;
11200 if (target_bits
& 8)
11201 host_bits
|= float_flag_underflow
;
11202 if (target_bits
& 0x10)
11203 host_bits
|= float_flag_inexact
;
11204 if (target_bits
& 0x80)
11205 host_bits
|= float_flag_input_denormal
;
11209 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
11214 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
11215 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
11216 env
->vfp
.vec_len
= (val
>> 16) & 7;
11217 env
->vfp
.vec_stride
= (val
>> 20) & 3;
11220 if (changed
& (3 << 22)) {
11221 i
= (val
>> 22) & 3;
11223 case FPROUNDING_TIEEVEN
:
11224 i
= float_round_nearest_even
;
11226 case FPROUNDING_POSINF
:
11227 i
= float_round_up
;
11229 case FPROUNDING_NEGINF
:
11230 i
= float_round_down
;
11232 case FPROUNDING_ZERO
:
11233 i
= float_round_to_zero
;
11236 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
11237 set_float_rounding_mode(i
, &env
->vfp
.fp_status_f16
);
11239 if (changed
& FPCR_FZ16
) {
11240 bool ftz_enabled
= val
& FPCR_FZ16
;
11241 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
11242 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
11244 if (changed
& FPCR_FZ
) {
11245 bool ftz_enabled
= val
& FPCR_FZ
;
11246 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
11247 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
11249 if (changed
& FPCR_DN
) {
11250 bool dnan_enabled
= val
& FPCR_DN
;
11251 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status
);
11252 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status_f16
);
11255 /* The exception flags are ORed together when we read fpscr so we
11256 * only need to preserve the current state in one of our
11257 * float_status values.
11259 i
= vfp_exceptbits_to_host(val
);
11260 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
11261 set_float_exception_flags(0, &env
->vfp
.fp_status_f16
);
11262 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
11265 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
11267 HELPER(vfp_set_fpscr
)(env
, val
);
11270 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
11272 #define VFP_BINOP(name) \
11273 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
11275 float_status *fpst = fpstp; \
11276 return float32_ ## name(a, b, fpst); \
11278 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
11280 float_status *fpst = fpstp; \
11281 return float64_ ## name(a, b, fpst); \
11293 float32
VFP_HELPER(neg
, s
)(float32 a
)
11295 return float32_chs(a
);
11298 float64
VFP_HELPER(neg
, d
)(float64 a
)
11300 return float64_chs(a
);
11303 float32
VFP_HELPER(abs
, s
)(float32 a
)
11305 return float32_abs(a
);
11308 float64
VFP_HELPER(abs
, d
)(float64 a
)
11310 return float64_abs(a
);
11313 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
11315 return float32_sqrt(a
, &env
->vfp
.fp_status
);
11318 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
11320 return float64_sqrt(a
, &env
->vfp
.fp_status
);
11323 /* XXX: check quiet/signaling case */
11324 #define DO_VFP_cmp(p, type) \
11325 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
11328 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
11329 case 0: flags = 0x6; break; \
11330 case -1: flags = 0x8; break; \
11331 case 1: flags = 0x2; break; \
11332 default: case 2: flags = 0x3; break; \
11334 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11335 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11337 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
11340 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
11341 case 0: flags = 0x6; break; \
11342 case -1: flags = 0x8; break; \
11343 case 1: flags = 0x2; break; \
11344 default: case 2: flags = 0x3; break; \
11346 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11347 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11349 DO_VFP_cmp(s
, float32
)
11350 DO_VFP_cmp(d
, float64
)
11353 /* Integer to float and float to integer conversions */
11355 #define CONV_ITOF(name, ftype, fsz, sign) \
11356 ftype HELPER(name)(uint32_t x, void *fpstp) \
11358 float_status *fpst = fpstp; \
11359 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
11362 #define CONV_FTOI(name, ftype, fsz, sign, round) \
11363 uint32_t HELPER(name)(ftype x, void *fpstp) \
11365 float_status *fpst = fpstp; \
11366 if (float##fsz##_is_any_nan(x)) { \
11367 float_raise(float_flag_invalid, fpst); \
11370 return float##fsz##_to_##sign##int32##round(x, fpst); \
11373 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \
11374 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
11375 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
11376 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
11378 FLOAT_CONVS(si
, h
, uint32_t, 16, )
11379 FLOAT_CONVS(si
, s
, float32
, 32, )
11380 FLOAT_CONVS(si
, d
, float64
, 64, )
11381 FLOAT_CONVS(ui
, h
, uint32_t, 16, u
)
11382 FLOAT_CONVS(ui
, s
, float32
, 32, u
)
11383 FLOAT_CONVS(ui
, d
, float64
, 64, u
)
11389 /* floating point conversion */
11390 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
11392 return float32_to_float64(x
, &env
->vfp
.fp_status
);
11395 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
11397 return float64_to_float32(x
, &env
->vfp
.fp_status
);
11400 /* VFP3 fixed point conversion. */
11401 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11402 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
11405 float_status *fpst = fpstp; \
11407 tmp = itype##_to_##float##fsz(x, fpst); \
11408 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
11411 /* Notice that we want only input-denormal exception flags from the
11412 * scalbn operation: the other possible flags (overflow+inexact if
11413 * we overflow to infinity, output-denormal) aren't correct for the
11414 * complete scale-and-convert operation.
11416 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
11417 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
11421 float_status *fpst = fpstp; \
11422 int old_exc_flags = get_float_exception_flags(fpst); \
11424 if (float##fsz##_is_any_nan(x)) { \
11425 float_raise(float_flag_invalid, fpst); \
11428 tmp = float##fsz##_scalbn(x, shift, fpst); \
11429 old_exc_flags |= get_float_exception_flags(fpst) \
11430 & float_flag_input_denormal; \
11431 set_float_exception_flags(old_exc_flags, fpst); \
11432 return float##fsz##_to_##itype##round(tmp, fpst); \
11435 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
11436 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11437 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
11438 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
11440 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
11441 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11442 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
11444 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
11445 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
11446 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
11447 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
11448 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
11449 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
11450 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
11451 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
11452 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
11453 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
11454 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
11455 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
11457 #undef VFP_CONV_FIX
11458 #undef VFP_CONV_FIX_FLOAT
11459 #undef VFP_CONV_FLOAT_FIX_ROUND
11460 #undef VFP_CONV_FIX_A64
11462 /* Conversion to/from f16 can overflow to infinity before/after scaling.
11463 * Therefore we convert to f64, scale, and then convert f64 to f16; or
11464 * vice versa for conversion to integer.
11466 * For 16- and 32-bit integers, the conversion to f64 never rounds.
11467 * For 64-bit integers, any integer that would cause rounding will also
11468 * overflow to f16 infinity, so there is no double rounding problem.
11471 static float16
do_postscale_fp16(float64 f
, int shift
, float_status
*fpst
)
11473 return float64_to_float16(float64_scalbn(f
, -shift
, fpst
), true, fpst
);
11476 uint32_t HELPER(vfp_sltoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11478 return do_postscale_fp16(int32_to_float64(x
, fpst
), shift
, fpst
);
11481 uint32_t HELPER(vfp_ultoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11483 return do_postscale_fp16(uint32_to_float64(x
, fpst
), shift
, fpst
);
11486 uint32_t HELPER(vfp_sqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
11488 return do_postscale_fp16(int64_to_float64(x
, fpst
), shift
, fpst
);
11491 uint32_t HELPER(vfp_uqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
11493 return do_postscale_fp16(uint64_to_float64(x
, fpst
), shift
, fpst
);
11496 static float64
do_prescale_fp16(float16 f
, int shift
, float_status
*fpst
)
11498 if (unlikely(float16_is_any_nan(f
))) {
11499 float_raise(float_flag_invalid
, fpst
);
11502 int old_exc_flags
= get_float_exception_flags(fpst
);
11505 ret
= float16_to_float64(f
, true, fpst
);
11506 ret
= float64_scalbn(ret
, shift
, fpst
);
11507 old_exc_flags
|= get_float_exception_flags(fpst
)
11508 & float_flag_input_denormal
;
11509 set_float_exception_flags(old_exc_flags
, fpst
);
11515 uint32_t HELPER(vfp_toshh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11517 return float64_to_int16(do_prescale_fp16(x
, shift
, fpst
), fpst
);
11520 uint32_t HELPER(vfp_touhh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11522 return float64_to_uint16(do_prescale_fp16(x
, shift
, fpst
), fpst
);
11525 uint32_t HELPER(vfp_toslh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11527 return float64_to_int32(do_prescale_fp16(x
, shift
, fpst
), fpst
);
11530 uint32_t HELPER(vfp_toulh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11532 return float64_to_uint32(do_prescale_fp16(x
, shift
, fpst
), fpst
);
11535 uint64_t HELPER(vfp_tosqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11537 return float64_to_int64(do_prescale_fp16(x
, shift
, fpst
), fpst
);
11540 uint64_t HELPER(vfp_touqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
11542 return float64_to_uint64(do_prescale_fp16(x
, shift
, fpst
), fpst
);
11545 /* Set the current fp rounding mode and return the old one.
11546 * The argument is a softfloat float_round_ value.
11548 uint32_t HELPER(set_rmode
)(uint32_t rmode
, void *fpstp
)
11550 float_status
*fp_status
= fpstp
;
11552 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
11553 set_float_rounding_mode(rmode
, fp_status
);
11558 /* Set the current fp rounding mode in the standard fp status and return
11559 * the old one. This is for NEON instructions that need to change the
11560 * rounding mode but wish to use the standard FPSCR values for everything
11561 * else. Always set the rounding mode back to the correct value after
11563 * The argument is a softfloat float_round_ value.
11565 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
11567 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
11569 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
11570 set_float_rounding_mode(rmode
, fp_status
);
11575 /* Half precision conversions. */
11576 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
11578 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11579 * it would affect flushing input denormals.
11581 float_status
*fpst
= fpstp
;
11582 flag save
= get_flush_inputs_to_zero(fpst
);
11583 set_flush_inputs_to_zero(false, fpst
);
11584 float32 r
= float16_to_float32(a
, !ahp_mode
, fpst
);
11585 set_flush_inputs_to_zero(save
, fpst
);
11589 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, void *fpstp
, uint32_t ahp_mode
)
11591 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11592 * it would affect flushing output denormals.
11594 float_status
*fpst
= fpstp
;
11595 flag save
= get_flush_to_zero(fpst
);
11596 set_flush_to_zero(false, fpst
);
11597 float16 r
= float32_to_float16(a
, !ahp_mode
, fpst
);
11598 set_flush_to_zero(save
, fpst
);
11602 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
11604 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11605 * it would affect flushing input denormals.
11607 float_status
*fpst
= fpstp
;
11608 flag save
= get_flush_inputs_to_zero(fpst
);
11609 set_flush_inputs_to_zero(false, fpst
);
11610 float64 r
= float16_to_float64(a
, !ahp_mode
, fpst
);
11611 set_flush_inputs_to_zero(save
, fpst
);
11615 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, void *fpstp
, uint32_t ahp_mode
)
11617 /* Squash FZ16 to 0 for the duration of conversion. In this case,
11618 * it would affect flushing output denormals.
11620 float_status
*fpst
= fpstp
;
11621 flag save
= get_flush_to_zero(fpst
);
11622 set_flush_to_zero(false, fpst
);
11623 float16 r
= float64_to_float16(a
, !ahp_mode
, fpst
);
11624 set_flush_to_zero(save
, fpst
);
11628 #define float32_two make_float32(0x40000000)
11629 #define float32_three make_float32(0x40400000)
11630 #define float32_one_point_five make_float32(0x3fc00000)
11632 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11634 float_status
*s
= &env
->vfp
.standard_fp_status
;
11635 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11636 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11637 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11638 float_raise(float_flag_input_denormal
, s
);
11640 return float32_two
;
11642 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
11645 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11647 float_status
*s
= &env
->vfp
.standard_fp_status
;
11649 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11650 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11651 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11652 float_raise(float_flag_input_denormal
, s
);
11654 return float32_one_point_five
;
11656 product
= float32_mul(a
, b
, s
);
11657 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
11660 /* NEON helpers. */
11662 /* Constants 256 and 512 are used in some helpers; we avoid relying on
11663 * int->float conversions at run-time. */
11664 #define float64_256 make_float64(0x4070000000000000LL)
11665 #define float64_512 make_float64(0x4080000000000000LL)
11666 #define float16_maxnorm make_float16(0x7bff)
11667 #define float32_maxnorm make_float32(0x7f7fffff)
11668 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
11670 /* Reciprocal functions
11672 * The algorithm that must be used to calculate the estimate
11673 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
11676 /* See RecipEstimate()
11678 * input is a 9 bit fixed point number
11679 * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
11680 * result range 256 .. 511 for a number from 1.0 to 511/256.
11683 static int recip_estimate(int input
)
11686 assert(256 <= input
&& input
< 512);
11687 a
= (input
* 2) + 1;
11690 assert(256 <= r
&& r
< 512);
11695 * Common wrapper to call recip_estimate
11697 * The parameters are exponent and 64 bit fraction (without implicit
11698 * bit) where the binary point is nominally at bit 52. Returns a
11699 * float64 which can then be rounded to the appropriate size by the
11703 static uint64_t call_recip_estimate(int *exp
, int exp_off
, uint64_t frac
)
11705 uint32_t scaled
, estimate
;
11706 uint64_t result_frac
;
11709 /* Handle sub-normals */
11711 if (extract64(frac
, 51, 1) == 0) {
11719 /* scaled = UInt('1':fraction<51:44>) */
11720 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
11721 estimate
= recip_estimate(scaled
);
11723 result_exp
= exp_off
- *exp
;
11724 result_frac
= deposit64(0, 44, 8, estimate
);
11725 if (result_exp
== 0) {
11726 result_frac
= deposit64(result_frac
>> 1, 51, 1, 1);
11727 } else if (result_exp
== -1) {
11728 result_frac
= deposit64(result_frac
>> 2, 50, 2, 1);
11734 return result_frac
;
11737 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
11739 switch (fpst
->float_rounding_mode
) {
11740 case float_round_nearest_even
: /* Round to Nearest */
11742 case float_round_up
: /* Round to +Inf */
11744 case float_round_down
: /* Round to -Inf */
11746 case float_round_to_zero
: /* Round to Zero */
11750 g_assert_not_reached();
11753 uint32_t HELPER(recpe_f16
)(uint32_t input
, void *fpstp
)
11755 float_status
*fpst
= fpstp
;
11756 float16 f16
= float16_squash_input_denormal(input
, fpst
);
11757 uint32_t f16_val
= float16_val(f16
);
11758 uint32_t f16_sign
= float16_is_neg(f16
);
11759 int f16_exp
= extract32(f16_val
, 10, 5);
11760 uint32_t f16_frac
= extract32(f16_val
, 0, 10);
11763 if (float16_is_any_nan(f16
)) {
11765 if (float16_is_signaling_nan(f16
, fpst
)) {
11766 float_raise(float_flag_invalid
, fpst
);
11767 nan
= float16_silence_nan(f16
, fpst
);
11769 if (fpst
->default_nan_mode
) {
11770 nan
= float16_default_nan(fpst
);
11773 } else if (float16_is_infinity(f16
)) {
11774 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
11775 } else if (float16_is_zero(f16
)) {
11776 float_raise(float_flag_divbyzero
, fpst
);
11777 return float16_set_sign(float16_infinity
, float16_is_neg(f16
));
11778 } else if (float16_abs(f16
) < (1 << 8)) {
11779 /* Abs(value) < 2.0^-16 */
11780 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11781 if (round_to_inf(fpst
, f16_sign
)) {
11782 return float16_set_sign(float16_infinity
, f16_sign
);
11784 return float16_set_sign(float16_maxnorm
, f16_sign
);
11786 } else if (f16_exp
>= 29 && fpst
->flush_to_zero
) {
11787 float_raise(float_flag_underflow
, fpst
);
11788 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
11791 f64_frac
= call_recip_estimate(&f16_exp
, 29,
11792 ((uint64_t) f16_frac
) << (52 - 10));
11794 /* result = sign : result_exp<4:0> : fraction<51:42> */
11795 f16_val
= deposit32(0, 15, 1, f16_sign
);
11796 f16_val
= deposit32(f16_val
, 10, 5, f16_exp
);
11797 f16_val
= deposit32(f16_val
, 0, 10, extract64(f64_frac
, 52 - 10, 10));
11798 return make_float16(f16_val
);
11801 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
11803 float_status
*fpst
= fpstp
;
11804 float32 f32
= float32_squash_input_denormal(input
, fpst
);
11805 uint32_t f32_val
= float32_val(f32
);
11806 bool f32_sign
= float32_is_neg(f32
);
11807 int f32_exp
= extract32(f32_val
, 23, 8);
11808 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
11811 if (float32_is_any_nan(f32
)) {
11813 if (float32_is_signaling_nan(f32
, fpst
)) {
11814 float_raise(float_flag_invalid
, fpst
);
11815 nan
= float32_silence_nan(f32
, fpst
);
11817 if (fpst
->default_nan_mode
) {
11818 nan
= float32_default_nan(fpst
);
11821 } else if (float32_is_infinity(f32
)) {
11822 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
11823 } else if (float32_is_zero(f32
)) {
11824 float_raise(float_flag_divbyzero
, fpst
);
11825 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
11826 } else if (float32_abs(f32
) < (1ULL << 21)) {
11827 /* Abs(value) < 2.0^-128 */
11828 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11829 if (round_to_inf(fpst
, f32_sign
)) {
11830 return float32_set_sign(float32_infinity
, f32_sign
);
11832 return float32_set_sign(float32_maxnorm
, f32_sign
);
11834 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
11835 float_raise(float_flag_underflow
, fpst
);
11836 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
11839 f64_frac
= call_recip_estimate(&f32_exp
, 253,
11840 ((uint64_t) f32_frac
) << (52 - 23));
11842 /* result = sign : result_exp<7:0> : fraction<51:29> */
11843 f32_val
= deposit32(0, 31, 1, f32_sign
);
11844 f32_val
= deposit32(f32_val
, 23, 8, f32_exp
);
11845 f32_val
= deposit32(f32_val
, 0, 23, extract64(f64_frac
, 52 - 23, 23));
11846 return make_float32(f32_val
);
11849 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
11851 float_status
*fpst
= fpstp
;
11852 float64 f64
= float64_squash_input_denormal(input
, fpst
);
11853 uint64_t f64_val
= float64_val(f64
);
11854 bool f64_sign
= float64_is_neg(f64
);
11855 int f64_exp
= extract64(f64_val
, 52, 11);
11856 uint64_t f64_frac
= extract64(f64_val
, 0, 52);
11858 /* Deal with any special cases */
11859 if (float64_is_any_nan(f64
)) {
11861 if (float64_is_signaling_nan(f64
, fpst
)) {
11862 float_raise(float_flag_invalid
, fpst
);
11863 nan
= float64_silence_nan(f64
, fpst
);
11865 if (fpst
->default_nan_mode
) {
11866 nan
= float64_default_nan(fpst
);
11869 } else if (float64_is_infinity(f64
)) {
11870 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
11871 } else if (float64_is_zero(f64
)) {
11872 float_raise(float_flag_divbyzero
, fpst
);
11873 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
11874 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
11875 /* Abs(value) < 2.0^-1024 */
11876 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11877 if (round_to_inf(fpst
, f64_sign
)) {
11878 return float64_set_sign(float64_infinity
, f64_sign
);
11880 return float64_set_sign(float64_maxnorm
, f64_sign
);
11882 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
11883 float_raise(float_flag_underflow
, fpst
);
11884 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
11887 f64_frac
= call_recip_estimate(&f64_exp
, 2045, f64_frac
);
11889 /* result = sign : result_exp<10:0> : fraction<51:0>; */
11890 f64_val
= deposit64(0, 63, 1, f64_sign
);
11891 f64_val
= deposit64(f64_val
, 52, 11, f64_exp
);
11892 f64_val
= deposit64(f64_val
, 0, 52, f64_frac
);
11893 return make_float64(f64_val
);
11896 /* The algorithm that must be used to calculate the estimate
11897 * is specified by the ARM ARM.
11900 static int do_recip_sqrt_estimate(int a
)
11904 assert(128 <= a
&& a
< 512);
11912 while (a
* (b
+ 1) * (b
+ 1) < (1 << 28)) {
11915 estimate
= (b
+ 1) / 2;
11916 assert(256 <= estimate
&& estimate
< 512);
11922 static uint64_t recip_sqrt_estimate(int *exp
, int exp_off
, uint64_t frac
)
11928 while (extract64(frac
, 51, 1) == 0) {
11932 frac
= extract64(frac
, 0, 51) << 1;
11936 /* scaled = UInt('01':fraction<51:45>) */
11937 scaled
= deposit32(1 << 7, 0, 7, extract64(frac
, 45, 7));
11939 /* scaled = UInt('1':fraction<51:44>) */
11940 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
11942 estimate
= do_recip_sqrt_estimate(scaled
);
11944 *exp
= (exp_off
- *exp
) / 2;
11945 return extract64(estimate
, 0, 8) << 44;
11948 uint32_t HELPER(rsqrte_f16
)(uint32_t input
, void *fpstp
)
11950 float_status
*s
= fpstp
;
11951 float16 f16
= float16_squash_input_denormal(input
, s
);
11952 uint16_t val
= float16_val(f16
);
11953 bool f16_sign
= float16_is_neg(f16
);
11954 int f16_exp
= extract32(val
, 10, 5);
11955 uint16_t f16_frac
= extract32(val
, 0, 10);
11958 if (float16_is_any_nan(f16
)) {
11960 if (float16_is_signaling_nan(f16
, s
)) {
11961 float_raise(float_flag_invalid
, s
);
11962 nan
= float16_silence_nan(f16
, s
);
11964 if (s
->default_nan_mode
) {
11965 nan
= float16_default_nan(s
);
11968 } else if (float16_is_zero(f16
)) {
11969 float_raise(float_flag_divbyzero
, s
);
11970 return float16_set_sign(float16_infinity
, f16_sign
);
11971 } else if (f16_sign
) {
11972 float_raise(float_flag_invalid
, s
);
11973 return float16_default_nan(s
);
11974 } else if (float16_is_infinity(f16
)) {
11975 return float16_zero
;
11978 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11979 * preserving the parity of the exponent. */
11981 f64_frac
= ((uint64_t) f16_frac
) << (52 - 10);
11983 f64_frac
= recip_sqrt_estimate(&f16_exp
, 44, f64_frac
);
11985 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
11986 val
= deposit32(0, 15, 1, f16_sign
);
11987 val
= deposit32(val
, 10, 5, f16_exp
);
11988 val
= deposit32(val
, 2, 8, extract64(f64_frac
, 52 - 8, 8));
11989 return make_float16(val
);
11992 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
11994 float_status
*s
= fpstp
;
11995 float32 f32
= float32_squash_input_denormal(input
, s
);
11996 uint32_t val
= float32_val(f32
);
11997 uint32_t f32_sign
= float32_is_neg(f32
);
11998 int f32_exp
= extract32(val
, 23, 8);
11999 uint32_t f32_frac
= extract32(val
, 0, 23);
12002 if (float32_is_any_nan(f32
)) {
12004 if (float32_is_signaling_nan(f32
, s
)) {
12005 float_raise(float_flag_invalid
, s
);
12006 nan
= float32_silence_nan(f32
, s
);
12008 if (s
->default_nan_mode
) {
12009 nan
= float32_default_nan(s
);
12012 } else if (float32_is_zero(f32
)) {
12013 float_raise(float_flag_divbyzero
, s
);
12014 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
12015 } else if (float32_is_neg(f32
)) {
12016 float_raise(float_flag_invalid
, s
);
12017 return float32_default_nan(s
);
12018 } else if (float32_is_infinity(f32
)) {
12019 return float32_zero
;
12022 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12023 * preserving the parity of the exponent. */
12025 f64_frac
= ((uint64_t) f32_frac
) << 29;
12027 f64_frac
= recip_sqrt_estimate(&f32_exp
, 380, f64_frac
);
12029 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
12030 val
= deposit32(0, 31, 1, f32_sign
);
12031 val
= deposit32(val
, 23, 8, f32_exp
);
12032 val
= deposit32(val
, 15, 8, extract64(f64_frac
, 52 - 8, 8));
12033 return make_float32(val
);
12036 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
12038 float_status
*s
= fpstp
;
12039 float64 f64
= float64_squash_input_denormal(input
, s
);
12040 uint64_t val
= float64_val(f64
);
12041 bool f64_sign
= float64_is_neg(f64
);
12042 int f64_exp
= extract64(val
, 52, 11);
12043 uint64_t f64_frac
= extract64(val
, 0, 52);
12045 if (float64_is_any_nan(f64
)) {
12047 if (float64_is_signaling_nan(f64
, s
)) {
12048 float_raise(float_flag_invalid
, s
);
12049 nan
= float64_silence_nan(f64
, s
);
12051 if (s
->default_nan_mode
) {
12052 nan
= float64_default_nan(s
);
12055 } else if (float64_is_zero(f64
)) {
12056 float_raise(float_flag_divbyzero
, s
);
12057 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
12058 } else if (float64_is_neg(f64
)) {
12059 float_raise(float_flag_invalid
, s
);
12060 return float64_default_nan(s
);
12061 } else if (float64_is_infinity(f64
)) {
12062 return float64_zero
;
12065 f64_frac
= recip_sqrt_estimate(&f64_exp
, 3068, f64_frac
);
12067 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
12068 val
= deposit64(0, 61, 1, f64_sign
);
12069 val
= deposit64(val
, 52, 11, f64_exp
);
12070 val
= deposit64(val
, 44, 8, extract64(f64_frac
, 52 - 8, 8));
12071 return make_float64(val
);
12074 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
12076 /* float_status *s = fpstp; */
12077 int input
, estimate
;
12079 if ((a
& 0x80000000) == 0) {
12083 input
= extract32(a
, 23, 9);
12084 estimate
= recip_estimate(input
);
12086 return deposit32(0, (32 - 9), 9, estimate
);
12089 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
12093 if ((a
& 0xc0000000) == 0) {
12097 estimate
= do_recip_sqrt_estimate(extract32(a
, 23, 9));
12099 return deposit32(0, 23, 9, estimate
);
12102 /* VFPv4 fused multiply-accumulate */
12103 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
12105 float_status
*fpst
= fpstp
;
12106 return float32_muladd(a
, b
, c
, 0, fpst
);
12109 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
12111 float_status
*fpst
= fpstp
;
12112 return float64_muladd(a
, b
, c
, 0, fpst
);
12115 /* ARMv8 round to integral */
12116 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
12118 return float32_round_to_int(x
, fp_status
);
12121 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
12123 return float64_round_to_int(x
, fp_status
);
12126 float32
HELPER(rints
)(float32 x
, void *fp_status
)
12128 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12131 ret
= float32_round_to_int(x
, fp_status
);
12133 /* Suppress any inexact exceptions the conversion produced */
12134 if (!(old_flags
& float_flag_inexact
)) {
12135 new_flags
= get_float_exception_flags(fp_status
);
12136 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12142 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
12144 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12147 ret
= float64_round_to_int(x
, fp_status
);
12149 new_flags
= get_float_exception_flags(fp_status
);
12151 /* Suppress any inexact exceptions the conversion produced */
12152 if (!(old_flags
& float_flag_inexact
)) {
12153 new_flags
= get_float_exception_flags(fp_status
);
12154 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12160 /* Convert ARM rounding mode to softfloat */
12161 int arm_rmode_to_sf(int rmode
)
12164 case FPROUNDING_TIEAWAY
:
12165 rmode
= float_round_ties_away
;
12167 case FPROUNDING_ODD
:
12168 /* FIXME: add support for TIEAWAY and ODD */
12169 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
12171 case FPROUNDING_TIEEVEN
:
12173 rmode
= float_round_nearest_even
;
12175 case FPROUNDING_POSINF
:
12176 rmode
= float_round_up
;
12178 case FPROUNDING_NEGINF
:
12179 rmode
= float_round_down
;
12181 case FPROUNDING_ZERO
:
12182 rmode
= float_round_to_zero
;
12189 * The upper bytes of val (above the number specified by 'bytes') must have
12190 * been zeroed out by the caller.
12192 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12196 stl_le_p(buf
, val
);
12198 /* zlib crc32 converts the accumulator and output to one's complement. */
12199 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12202 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12206 stl_le_p(buf
, val
);
12208 /* Linux crc32c converts the output to one's complement. */
12209 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12212 /* Return the exception level to which FP-disabled exceptions should
12213 * be taken, or 0 if FP is enabled.
12215 static inline int fp_exception_el(CPUARMState
*env
)
12217 #ifndef CONFIG_USER_ONLY
12219 int cur_el
= arm_current_el(env
);
12221 /* CPACR and the CPTR registers don't exist before v6, so FP is
12222 * always accessible
12224 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12228 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12229 * 0, 2 : trap EL0 and EL1/PL1 accesses
12230 * 1 : trap only EL0 accesses
12231 * 3 : trap no accesses
12233 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12237 if (cur_el
== 0 || cur_el
== 1) {
12238 /* Trap to PL1, which might be EL1 or EL3 */
12239 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12244 if (cur_el
== 3 && !is_a64(env
)) {
12245 /* Secure PL1 running at EL3 */
12258 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12259 * check because zero bits in the registers mean "don't trap".
12262 /* CPTR_EL2 : present in v7VE or v8 */
12263 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12264 && !arm_is_secure_below_el3(env
)) {
12265 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12269 /* CPTR_EL3 : present in v8 */
12270 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12271 /* Trap all FP ops to EL3 */
12278 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12279 target_ulong
*cs_base
, uint32_t *pflags
)
12281 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
12282 int fp_el
= fp_exception_el(env
);
12286 int sve_el
= sve_exception_el(env
);
12290 flags
= ARM_TBFLAG_AARCH64_STATE_MASK
;
12291 /* Get control bits for tagged addresses */
12292 flags
|= (arm_regime_tbi0(env
, mmu_idx
) << ARM_TBFLAG_TBI0_SHIFT
);
12293 flags
|= (arm_regime_tbi1(env
, mmu_idx
) << ARM_TBFLAG_TBI1_SHIFT
);
12294 flags
|= sve_el
<< ARM_TBFLAG_SVEEXC_EL_SHIFT
;
12296 /* If SVE is disabled, but FP is enabled,
12297 then the effective len is 0. */
12298 if (sve_el
!= 0 && fp_el
== 0) {
12301 int current_el
= arm_current_el(env
);
12303 zcr_len
= env
->vfp
.zcr_el
[current_el
<= 1 ? 1 : current_el
];
12305 if (current_el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
12306 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
12308 if (current_el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
12309 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
12312 flags
|= zcr_len
<< ARM_TBFLAG_ZCR_LEN_SHIFT
;
12314 *pc
= env
->regs
[15];
12315 flags
= (env
->thumb
<< ARM_TBFLAG_THUMB_SHIFT
)
12316 | (env
->vfp
.vec_len
<< ARM_TBFLAG_VECLEN_SHIFT
)
12317 | (env
->vfp
.vec_stride
<< ARM_TBFLAG_VECSTRIDE_SHIFT
)
12318 | (env
->condexec_bits
<< ARM_TBFLAG_CONDEXEC_SHIFT
)
12319 | (arm_sctlr_b(env
) << ARM_TBFLAG_SCTLR_B_SHIFT
);
12320 if (!(access_secure_reg(env
))) {
12321 flags
|= ARM_TBFLAG_NS_MASK
;
12323 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
12324 || arm_el_is_aa64(env
, 1)) {
12325 flags
|= ARM_TBFLAG_VFPEN_MASK
;
12327 flags
|= (extract32(env
->cp15
.c15_cpar
, 0, 2)
12328 << ARM_TBFLAG_XSCALE_CPAR_SHIFT
);
12331 flags
|= (arm_to_core_mmu_idx(mmu_idx
) << ARM_TBFLAG_MMUIDX_SHIFT
);
12333 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12334 * states defined in the ARM ARM for software singlestep:
12335 * SS_ACTIVE PSTATE.SS State
12336 * 0 x Inactive (the TB flag for SS is always 0)
12337 * 1 0 Active-pending
12338 * 1 1 Active-not-pending
12340 if (arm_singlestep_active(env
)) {
12341 flags
|= ARM_TBFLAG_SS_ACTIVE_MASK
;
12343 if (env
->pstate
& PSTATE_SS
) {
12344 flags
|= ARM_TBFLAG_PSTATE_SS_MASK
;
12347 if (env
->uncached_cpsr
& PSTATE_SS
) {
12348 flags
|= ARM_TBFLAG_PSTATE_SS_MASK
;
12352 if (arm_cpu_data_is_big_endian(env
)) {
12353 flags
|= ARM_TBFLAG_BE_DATA_MASK
;
12355 flags
|= fp_el
<< ARM_TBFLAG_FPEXC_EL_SHIFT
;
12357 if (arm_v7m_is_handler_mode(env
)) {
12358 flags
|= ARM_TBFLAG_HANDLER_MASK
;