1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
20 #include "qemu/range.h"
22 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
24 #ifndef CONFIG_USER_ONLY
25 /* Cacheability and shareability attributes for a memory access */
26 typedef struct ARMCacheAttrs
{
27 unsigned int attrs
:8; /* as in the MAIR register encoding */
28 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
31 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
32 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
33 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
34 target_ulong
*page_size
,
35 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
37 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
38 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
39 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
40 target_ulong
*page_size_ptr
,
41 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
43 /* Security attributes for an address, as returned by v8m_security_lookup. */
44 typedef struct V8M_SAttributes
{
45 bool subpage
; /* true if these attrs don't cover the whole TARGET_PAGE */
54 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
55 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
56 V8M_SAttributes
*sattrs
);
59 static void switch_mode(CPUARMState
*env
, int mode
);
61 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
65 /* VFP data registers are always little-endian. */
66 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
68 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
71 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
72 /* Aliases for Q regs. */
75 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
77 stq_le_p(buf
+ 8, q
[1]);
81 switch (reg
- nregs
) {
82 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
83 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
84 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
89 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
93 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
95 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
98 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
101 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
102 q
[0] = ldq_le_p(buf
);
103 q
[1] = ldq_le_p(buf
+ 8);
107 switch (reg
- nregs
) {
108 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
109 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
110 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
115 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
119 /* 128 bit FP register */
121 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
123 stq_le_p(buf
+ 8, q
[1]);
128 stl_p(buf
, vfp_get_fpsr(env
));
132 stl_p(buf
, vfp_get_fpcr(env
));
139 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
143 /* 128 bit FP register */
145 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
146 q
[0] = ldq_le_p(buf
);
147 q
[1] = ldq_le_p(buf
+ 8);
152 vfp_set_fpsr(env
, ldl_p(buf
));
156 vfp_set_fpcr(env
, ldl_p(buf
));
163 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
165 assert(ri
->fieldoffset
);
166 if (cpreg_field_is_64bit(ri
)) {
167 return CPREG_FIELD64(env
, ri
);
169 return CPREG_FIELD32(env
, ri
);
173 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
176 assert(ri
->fieldoffset
);
177 if (cpreg_field_is_64bit(ri
)) {
178 CPREG_FIELD64(env
, ri
) = value
;
180 CPREG_FIELD32(env
, ri
) = value
;
184 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
186 return (char *)env
+ ri
->fieldoffset
;
189 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
191 /* Raw read of a coprocessor register (as needed for migration, etc). */
192 if (ri
->type
& ARM_CP_CONST
) {
193 return ri
->resetvalue
;
194 } else if (ri
->raw_readfn
) {
195 return ri
->raw_readfn(env
, ri
);
196 } else if (ri
->readfn
) {
197 return ri
->readfn(env
, ri
);
199 return raw_read(env
, ri
);
203 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
206 /* Raw write of a coprocessor register (as needed for migration, etc).
207 * Note that constant registers are treated as write-ignored; the
208 * caller should check for success by whether a readback gives the
211 if (ri
->type
& ARM_CP_CONST
) {
213 } else if (ri
->raw_writefn
) {
214 ri
->raw_writefn(env
, ri
, v
);
215 } else if (ri
->writefn
) {
216 ri
->writefn(env
, ri
, v
);
218 raw_write(env
, ri
, v
);
222 static int arm_gdb_get_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
224 ARMCPU
*cpu
= arm_env_get_cpu(env
);
225 const ARMCPRegInfo
*ri
;
228 key
= cpu
->dyn_xml
.cpregs_keys
[reg
];
229 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, key
);
231 if (cpreg_field_is_64bit(ri
)) {
232 return gdb_get_reg64(buf
, (uint64_t)read_raw_cp_reg(env
, ri
));
234 return gdb_get_reg32(buf
, (uint32_t)read_raw_cp_reg(env
, ri
));
240 static int arm_gdb_set_sysreg(CPUARMState
*env
, uint8_t *buf
, int reg
)
245 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
247 /* Return true if the regdef would cause an assertion if you called
248 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
249 * program bug for it not to have the NO_RAW flag).
250 * NB that returning false here doesn't necessarily mean that calling
251 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
252 * read/write access functions which are safe for raw use" from "has
253 * read/write access functions which have side effects but has forgotten
254 * to provide raw access functions".
255 * The tests here line up with the conditions in read/write_raw_cp_reg()
256 * and assertions in raw_read()/raw_write().
258 if ((ri
->type
& ARM_CP_CONST
) ||
260 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
266 bool write_cpustate_to_list(ARMCPU
*cpu
)
268 /* Write the coprocessor state from cpu->env to the (index,value) list. */
272 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
273 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
274 const ARMCPRegInfo
*ri
;
276 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
281 if (ri
->type
& ARM_CP_NO_RAW
) {
284 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
289 bool write_list_to_cpustate(ARMCPU
*cpu
)
294 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
295 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
296 uint64_t v
= cpu
->cpreg_values
[i
];
297 const ARMCPRegInfo
*ri
;
299 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
304 if (ri
->type
& ARM_CP_NO_RAW
) {
307 /* Write value and confirm it reads back as written
308 * (to catch read-only registers and partially read-only
309 * registers where the incoming migration value doesn't match)
311 write_raw_cp_reg(&cpu
->env
, ri
, v
);
312 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
319 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
321 ARMCPU
*cpu
= opaque
;
323 const ARMCPRegInfo
*ri
;
325 regidx
= *(uint32_t *)key
;
326 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
328 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
329 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
330 /* The value array need not be initialized at this point */
331 cpu
->cpreg_array_len
++;
335 static void count_cpreg(gpointer key
, gpointer opaque
)
337 ARMCPU
*cpu
= opaque
;
339 const ARMCPRegInfo
*ri
;
341 regidx
= *(uint32_t *)key
;
342 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
344 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
345 cpu
->cpreg_array_len
++;
349 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
351 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
352 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
363 void init_cpreg_list(ARMCPU
*cpu
)
365 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
366 * Note that we require cpreg_tuples[] to be sorted by key ID.
371 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
372 keys
= g_list_sort(keys
, cpreg_key_compare
);
374 cpu
->cpreg_array_len
= 0;
376 g_list_foreach(keys
, count_cpreg
, cpu
);
378 arraylen
= cpu
->cpreg_array_len
;
379 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
380 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
381 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
382 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
383 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
384 cpu
->cpreg_array_len
= 0;
386 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
388 assert(cpu
->cpreg_array_len
== arraylen
);
394 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
395 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
397 * access_el3_aa32ns: Used to check AArch32 register views.
398 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
400 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
401 const ARMCPRegInfo
*ri
,
404 bool secure
= arm_is_secure_below_el3(env
);
406 assert(!arm_el_is_aa64(env
, 3));
408 return CP_ACCESS_TRAP_UNCATEGORIZED
;
413 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
414 const ARMCPRegInfo
*ri
,
417 if (!arm_el_is_aa64(env
, 3)) {
418 return access_el3_aa32ns(env
, ri
, isread
);
423 /* Some secure-only AArch32 registers trap to EL3 if used from
424 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
425 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
426 * We assume that the .access field is set to PL1_RW.
428 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
429 const ARMCPRegInfo
*ri
,
432 if (arm_current_el(env
) == 3) {
435 if (arm_is_secure_below_el3(env
)) {
436 return CP_ACCESS_TRAP_EL3
;
438 /* This will be EL1 NS and EL2 NS, which just UNDEF */
439 return CP_ACCESS_TRAP_UNCATEGORIZED
;
442 /* Check for traps to "powerdown debug" registers, which are controlled
445 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
448 int el
= arm_current_el(env
);
449 bool mdcr_el2_tdosa
= (env
->cp15
.mdcr_el2
& MDCR_TDOSA
) ||
450 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
451 (arm_hcr_el2_eff(env
) & HCR_TGE
);
453 if (el
< 2 && mdcr_el2_tdosa
&& !arm_is_secure_below_el3(env
)) {
454 return CP_ACCESS_TRAP_EL2
;
456 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
457 return CP_ACCESS_TRAP_EL3
;
462 /* Check for traps to "debug ROM" registers, which are controlled
463 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
465 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
468 int el
= arm_current_el(env
);
469 bool mdcr_el2_tdra
= (env
->cp15
.mdcr_el2
& MDCR_TDRA
) ||
470 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
471 (arm_hcr_el2_eff(env
) & HCR_TGE
);
473 if (el
< 2 && mdcr_el2_tdra
&& !arm_is_secure_below_el3(env
)) {
474 return CP_ACCESS_TRAP_EL2
;
476 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
477 return CP_ACCESS_TRAP_EL3
;
482 /* Check for traps to general debug registers, which are controlled
483 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
485 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
488 int el
= arm_current_el(env
);
489 bool mdcr_el2_tda
= (env
->cp15
.mdcr_el2
& MDCR_TDA
) ||
490 (env
->cp15
.mdcr_el2
& MDCR_TDE
) ||
491 (arm_hcr_el2_eff(env
) & HCR_TGE
);
493 if (el
< 2 && mdcr_el2_tda
&& !arm_is_secure_below_el3(env
)) {
494 return CP_ACCESS_TRAP_EL2
;
496 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
497 return CP_ACCESS_TRAP_EL3
;
502 /* Check for traps to performance monitor registers, which are controlled
503 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
505 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
508 int el
= arm_current_el(env
);
510 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
511 && !arm_is_secure_below_el3(env
)) {
512 return CP_ACCESS_TRAP_EL2
;
514 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
515 return CP_ACCESS_TRAP_EL3
;
520 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
522 ARMCPU
*cpu
= arm_env_get_cpu(env
);
524 raw_write(env
, ri
, value
);
525 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
528 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
530 ARMCPU
*cpu
= arm_env_get_cpu(env
);
532 if (raw_read(env
, ri
) != value
) {
533 /* Unlike real hardware the qemu TLB uses virtual addresses,
534 * not modified virtual addresses, so this causes a TLB flush.
537 raw_write(env
, ri
, value
);
541 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
544 ARMCPU
*cpu
= arm_env_get_cpu(env
);
546 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
547 && !extended_addresses_enabled(env
)) {
548 /* For VMSA (when not using the LPAE long descriptor page table
549 * format) this register includes the ASID, so do a TLB flush.
550 * For PMSA it is purely a process ID and no action is needed.
554 raw_write(env
, ri
, value
);
557 /* IS variants of TLB operations must affect all cores */
558 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
561 CPUState
*cs
= ENV_GET_CPU(env
);
563 tlb_flush_all_cpus_synced(cs
);
566 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
569 CPUState
*cs
= ENV_GET_CPU(env
);
571 tlb_flush_all_cpus_synced(cs
);
574 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
577 CPUState
*cs
= ENV_GET_CPU(env
);
579 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
582 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
585 CPUState
*cs
= ENV_GET_CPU(env
);
587 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
591 * Non-IS variants of TLB operations are upgraded to
592 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
593 * force broadcast of these operations.
595 static bool tlb_force_broadcast(CPUARMState
*env
)
597 return (env
->cp15
.hcr_el2
& HCR_FB
) &&
598 arm_current_el(env
) == 1 && arm_is_secure_below_el3(env
);
601 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
604 /* Invalidate all (TLBIALL) */
605 ARMCPU
*cpu
= arm_env_get_cpu(env
);
607 if (tlb_force_broadcast(env
)) {
608 tlbiall_is_write(env
, NULL
, value
);
615 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
618 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
619 ARMCPU
*cpu
= arm_env_get_cpu(env
);
621 if (tlb_force_broadcast(env
)) {
622 tlbimva_is_write(env
, NULL
, value
);
626 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
629 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
632 /* Invalidate by ASID (TLBIASID) */
633 ARMCPU
*cpu
= arm_env_get_cpu(env
);
635 if (tlb_force_broadcast(env
)) {
636 tlbiasid_is_write(env
, NULL
, value
);
643 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
646 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
647 ARMCPU
*cpu
= arm_env_get_cpu(env
);
649 if (tlb_force_broadcast(env
)) {
650 tlbimvaa_is_write(env
, NULL
, value
);
654 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
657 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
660 CPUState
*cs
= ENV_GET_CPU(env
);
662 tlb_flush_by_mmuidx(cs
,
663 ARMMMUIdxBit_S12NSE1
|
664 ARMMMUIdxBit_S12NSE0
|
668 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
671 CPUState
*cs
= ENV_GET_CPU(env
);
673 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
674 ARMMMUIdxBit_S12NSE1
|
675 ARMMMUIdxBit_S12NSE0
|
679 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
682 /* Invalidate by IPA. This has to invalidate any structures that
683 * contain only stage 2 translation information, but does not need
684 * to apply to structures that contain combined stage 1 and stage 2
685 * translation information.
686 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
688 CPUState
*cs
= ENV_GET_CPU(env
);
691 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
695 pageaddr
= sextract64(value
<< 12, 0, 40);
697 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
700 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
703 CPUState
*cs
= ENV_GET_CPU(env
);
706 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
710 pageaddr
= sextract64(value
<< 12, 0, 40);
712 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
716 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
719 CPUState
*cs
= ENV_GET_CPU(env
);
721 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
724 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
727 CPUState
*cs
= ENV_GET_CPU(env
);
729 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
732 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
735 CPUState
*cs
= ENV_GET_CPU(env
);
736 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
738 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
741 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
744 CPUState
*cs
= ENV_GET_CPU(env
);
745 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
747 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
751 static const ARMCPRegInfo cp_reginfo
[] = {
752 /* Define the secure and non-secure FCSE identifier CP registers
753 * separately because there is no secure bank in V8 (no _EL3). This allows
754 * the secure register to be properly reset and migrated. There is also no
755 * v8 EL1 version of the register so the non-secure instance stands alone.
758 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
759 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
760 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
761 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
762 { .name
= "FCSEIDR_S",
763 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
764 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
765 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
766 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
767 /* Define the secure and non-secure context identifier CP registers
768 * separately because there is no secure bank in V8 (no _EL3). This allows
769 * the secure register to be properly reset and migrated. In the
770 * non-secure case, the 32-bit register will have reset and migration
771 * disabled during registration as it is handled by the 64-bit instance.
773 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
774 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
775 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
776 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
777 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
778 { .name
= "CONTEXTIDR_S", .state
= ARM_CP_STATE_AA32
,
779 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
780 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
781 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
782 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
786 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
787 /* NB: Some of these registers exist in v8 but with more precise
788 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
790 /* MMU Domain access control / MPU write buffer control */
792 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
793 .access
= PL1_RW
, .resetvalue
= 0,
794 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
795 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
796 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
797 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
798 * For v6 and v5, these mappings are overly broad.
800 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
801 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
802 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
803 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
804 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
805 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
806 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
807 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
808 /* Cache maintenance ops; some of this space may be overridden later. */
809 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
810 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
811 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
815 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
816 /* Not all pre-v6 cores implemented this WFI, so this is slightly
819 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
820 .access
= PL1_W
, .type
= ARM_CP_WFI
},
824 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
825 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
826 * is UNPREDICTABLE; we choose to NOP as most implementations do).
828 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
829 .access
= PL1_W
, .type
= ARM_CP_WFI
},
830 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
831 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
832 * OMAPCP will override this space.
834 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
835 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
837 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
838 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
840 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
841 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
842 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
844 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
845 * implementing it as RAZ means the "debug architecture version" bits
846 * will read as a reserved value, which should cause Linux to not try
847 * to use the debug hardware.
849 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
850 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
851 /* MMU TLB control. Note that the wildcarding means we cover not just
852 * the unified TLB ops but also the dside/iside/inner-shareable variants.
854 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
855 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
856 .type
= ARM_CP_NO_RAW
},
857 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
858 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
859 .type
= ARM_CP_NO_RAW
},
860 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
861 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
862 .type
= ARM_CP_NO_RAW
},
863 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
864 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
865 .type
= ARM_CP_NO_RAW
},
866 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
867 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
868 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
869 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
873 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
878 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
879 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
880 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
881 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
882 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
884 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
885 /* VFP coprocessor: cp10 & cp11 [23:20] */
886 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
888 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
889 /* ASEDIS [31] bit is RAO/WI */
893 /* VFPv3 and upwards with NEON implement 32 double precision
894 * registers (D0-D31).
896 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
897 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
898 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
904 env
->cp15
.cpacr_el1
= value
;
907 static void cpacr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
909 /* Call cpacr_write() so that we reset with the correct RAO bits set
910 * for our CPU features.
912 cpacr_write(env
, ri
, 0);
915 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
918 if (arm_feature(env
, ARM_FEATURE_V8
)) {
919 /* Check if CPACR accesses are to be trapped to EL2 */
920 if (arm_current_el(env
) == 1 &&
921 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
922 return CP_ACCESS_TRAP_EL2
;
923 /* Check if CPACR accesses are to be trapped to EL3 */
924 } else if (arm_current_el(env
) < 3 &&
925 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
926 return CP_ACCESS_TRAP_EL3
;
933 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
936 /* Check if CPTR accesses are set to trap to EL3 */
937 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
938 return CP_ACCESS_TRAP_EL3
;
944 static const ARMCPRegInfo v6_cp_reginfo
[] = {
945 /* prefetch by MVA in v6, NOP in v7 */
946 { .name
= "MVA_prefetch",
947 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
948 .access
= PL1_W
, .type
= ARM_CP_NOP
},
949 /* We need to break the TB after ISB to execute self-modifying code
950 * correctly and also to take any pending interrupts immediately.
951 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
953 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
954 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
955 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
956 .access
= PL0_W
, .type
= ARM_CP_NOP
},
957 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
958 .access
= PL0_W
, .type
= ARM_CP_NOP
},
959 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
961 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
962 offsetof(CPUARMState
, cp15
.ifar_ns
) },
964 /* Watchpoint Fault Address Register : should actually only be present
965 * for 1136, 1176, 11MPCore.
967 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
968 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
969 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
970 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
971 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
972 .resetfn
= cpacr_reset
, .writefn
= cpacr_write
},
976 /* Definitions for the PMU registers */
977 #define PMCRN_MASK 0xf800
978 #define PMCRN_SHIFT 11
983 static inline uint32_t pmu_num_counters(CPUARMState
*env
)
985 return (env
->cp15
.c9_pmcr
& PMCRN_MASK
) >> PMCRN_SHIFT
;
988 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
989 static inline uint64_t pmu_counter_mask(CPUARMState
*env
)
991 return (1 << 31) | ((1 << pmu_num_counters(env
)) - 1);
994 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
997 /* Performance monitor registers user accessibility is controlled
998 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
999 * trapping to EL2 or EL3 for other accesses.
1001 int el
= arm_current_el(env
);
1003 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
1004 return CP_ACCESS_TRAP
;
1006 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
1007 && !arm_is_secure_below_el3(env
)) {
1008 return CP_ACCESS_TRAP_EL2
;
1010 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
1011 return CP_ACCESS_TRAP_EL3
;
1014 return CP_ACCESS_OK
;
1017 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
1018 const ARMCPRegInfo
*ri
,
1021 /* ER: event counter read trap control */
1022 if (arm_feature(env
, ARM_FEATURE_V8
)
1023 && arm_current_el(env
) == 0
1024 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
1026 return CP_ACCESS_OK
;
1029 return pmreg_access(env
, ri
, isread
);
1032 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
1033 const ARMCPRegInfo
*ri
,
1036 /* SW: software increment write trap control */
1037 if (arm_feature(env
, ARM_FEATURE_V8
)
1038 && arm_current_el(env
) == 0
1039 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
1041 return CP_ACCESS_OK
;
1044 return pmreg_access(env
, ri
, isread
);
1047 #ifndef CONFIG_USER_ONLY
1049 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
1050 const ARMCPRegInfo
*ri
,
1053 /* ER: event counter read trap control */
1054 if (arm_feature(env
, ARM_FEATURE_V8
)
1055 && arm_current_el(env
) == 0
1056 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
1057 return CP_ACCESS_OK
;
1060 return pmreg_access(env
, ri
, isread
);
1063 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
1064 const ARMCPRegInfo
*ri
,
1067 /* CR: cycle counter read trap control */
1068 if (arm_feature(env
, ARM_FEATURE_V8
)
1069 && arm_current_el(env
) == 0
1070 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
1072 return CP_ACCESS_OK
;
1075 return pmreg_access(env
, ri
, isread
);
1078 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
1080 /* This does not support checking PMCCFILTR_EL0 register */
1082 if (!(env
->cp15
.c9_pmcr
& PMCRE
) || !(env
->cp15
.c9_pmcnten
& (1 << 31))) {
1089 * Ensure c15_ccnt is the guest-visible count so that operations such as
1090 * enabling/disabling the counter or filtering, modifying the count itself,
1091 * etc. can be done logically. This is essentially a no-op if the counter is
1092 * not enabled at the time of the call.
1094 void pmccntr_op_start(CPUARMState
*env
)
1096 uint64_t cycles
= 0;
1097 cycles
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1098 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1100 if (arm_ccnt_enabled(env
)) {
1101 uint64_t eff_cycles
= cycles
;
1102 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1103 /* Increment once every 64 processor clock cycles */
1107 env
->cp15
.c15_ccnt
= eff_cycles
- env
->cp15
.c15_ccnt_delta
;
1109 env
->cp15
.c15_ccnt_delta
= cycles
;
1113 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1114 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1117 void pmccntr_op_finish(CPUARMState
*env
)
1119 if (arm_ccnt_enabled(env
)) {
1120 uint64_t prev_cycles
= env
->cp15
.c15_ccnt_delta
;
1122 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1123 /* Increment once every 64 processor clock cycles */
1127 env
->cp15
.c15_ccnt_delta
= prev_cycles
- env
->cp15
.c15_ccnt
;
1131 void pmu_op_start(CPUARMState
*env
)
1133 pmccntr_op_start(env
);
1136 void pmu_op_finish(CPUARMState
*env
)
1138 pmccntr_op_finish(env
);
1141 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1146 if (value
& PMCRC
) {
1147 /* The counter has been reset */
1148 env
->cp15
.c15_ccnt
= 0;
1151 /* only the DP, X, D and E bits are writable */
1152 env
->cp15
.c9_pmcr
&= ~0x39;
1153 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1158 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1161 pmccntr_op_start(env
);
1162 ret
= env
->cp15
.c15_ccnt
;
1163 pmccntr_op_finish(env
);
1167 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1170 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1171 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1172 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1175 env
->cp15
.c9_pmselr
= value
& 0x1f;
1178 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1181 pmccntr_op_start(env
);
1182 env
->cp15
.c15_ccnt
= value
;
1183 pmccntr_op_finish(env
);
1186 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1189 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1191 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1194 #else /* CONFIG_USER_ONLY */
1196 void pmccntr_op_start(CPUARMState
*env
)
1200 void pmccntr_op_finish(CPUARMState
*env
)
1204 void pmu_op_start(CPUARMState
*env
)
1208 void pmu_op_finish(CPUARMState
*env
)
1214 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1217 pmccntr_op_start(env
);
1218 env
->cp15
.pmccfiltr_el0
= value
& 0xfc000000;
1219 pmccntr_op_finish(env
);
1222 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1225 value
&= pmu_counter_mask(env
);
1226 env
->cp15
.c9_pmcnten
|= value
;
1229 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1232 value
&= pmu_counter_mask(env
);
1233 env
->cp15
.c9_pmcnten
&= ~value
;
1236 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1239 value
&= pmu_counter_mask(env
);
1240 env
->cp15
.c9_pmovsr
&= ~value
;
1243 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1246 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1247 * PMSELR value is equal to or greater than the number of implemented
1248 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1250 if (env
->cp15
.c9_pmselr
== 0x1f) {
1251 pmccfiltr_write(env
, ri
, value
);
1255 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1257 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1258 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1260 if (env
->cp15
.c9_pmselr
== 0x1f) {
1261 return env
->cp15
.pmccfiltr_el0
;
1267 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1270 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1271 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1273 env
->cp15
.c9_pmuserenr
= value
& 1;
1277 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1280 /* We have no event counters so only the C bit can be changed */
1281 value
&= pmu_counter_mask(env
);
1282 env
->cp15
.c9_pminten
|= value
;
1285 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1288 value
&= pmu_counter_mask(env
);
1289 env
->cp15
.c9_pminten
&= ~value
;
1292 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1295 /* Note that even though the AArch64 view of this register has bits
1296 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1297 * architectural requirements for bits which are RES0 only in some
1298 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1299 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1301 raw_write(env
, ri
, value
& ~0x1FULL
);
1304 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1306 /* Begin with base v8.0 state. */
1307 uint32_t valid_mask
= 0x3fff;
1308 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1310 if (arm_el_is_aa64(env
, 3)) {
1311 value
|= SCR_FW
| SCR_AW
; /* these two bits are RES1. */
1312 valid_mask
&= ~SCR_NET
;
1314 valid_mask
&= ~(SCR_RW
| SCR_ST
);
1317 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1318 valid_mask
&= ~SCR_HCE
;
1320 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1321 * supported if EL2 exists. The bit is UNK/SBZP when
1322 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1323 * when EL2 is unavailable.
1324 * On ARMv8, this bit is always available.
1326 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1327 !arm_feature(env
, ARM_FEATURE_V8
)) {
1328 valid_mask
&= ~SCR_SMD
;
1331 if (cpu_isar_feature(aa64_lor
, cpu
)) {
1332 valid_mask
|= SCR_TLOR
;
1335 /* Clear all-context RES0 bits. */
1336 value
&= valid_mask
;
1337 raw_write(env
, ri
, value
);
1340 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1342 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1344 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1347 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1348 ri
->secure
& ARM_CP_SECSTATE_S
);
1350 return cpu
->ccsidr
[index
];
1353 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1356 raw_write(env
, ri
, value
& 0xf);
1359 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1361 CPUState
*cs
= ENV_GET_CPU(env
);
1362 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
1365 if (hcr_el2
& HCR_IMO
) {
1366 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
1370 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1375 if (hcr_el2
& HCR_FMO
) {
1376 if (cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) {
1380 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1385 /* External aborts are not possible in QEMU so A bit is always clear */
1389 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1390 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1391 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1392 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1393 /* Performance monitors are implementation defined in v7,
1394 * but with an ARM recommended set of registers, which we
1395 * follow (although we don't actually implement any counters)
1397 * Performance registers fall into three categories:
1398 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1399 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1400 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1401 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1402 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1404 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1405 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1406 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1407 .writefn
= pmcntenset_write
,
1408 .accessfn
= pmreg_access
,
1409 .raw_writefn
= raw_write
},
1410 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1411 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1412 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1413 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1414 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1415 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1417 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1418 .accessfn
= pmreg_access
,
1419 .writefn
= pmcntenclr_write
,
1420 .type
= ARM_CP_ALIAS
},
1421 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1422 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1423 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1424 .type
= ARM_CP_ALIAS
,
1425 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1426 .writefn
= pmcntenclr_write
},
1427 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1429 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmovsr
),
1430 .accessfn
= pmreg_access
,
1431 .writefn
= pmovsr_write
,
1432 .raw_writefn
= raw_write
},
1433 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1434 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1435 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1436 .type
= ARM_CP_ALIAS
,
1437 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1438 .writefn
= pmovsr_write
,
1439 .raw_writefn
= raw_write
},
1440 /* Unimplemented so WI. */
1441 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1442 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1443 #ifndef CONFIG_USER_ONLY
1444 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1445 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1446 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1447 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1448 .raw_writefn
= raw_write
},
1449 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1450 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1451 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1452 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1453 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1454 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1455 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1456 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1457 .accessfn
= pmreg_access_ccntr
},
1458 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1459 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1460 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1462 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ccnt
),
1463 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
1464 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
, },
1466 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1467 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1468 .writefn
= pmccfiltr_write
, .raw_writefn
= raw_write
,
1469 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1471 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1473 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1474 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1475 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1476 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1477 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1478 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1479 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1480 /* Unimplemented, RAZ/WI. */
1481 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1482 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1483 .accessfn
= pmreg_access_xevcntr
},
1484 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1485 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1486 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmuserenr
),
1488 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1489 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1490 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1491 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1492 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1494 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1495 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1496 .access
= PL1_RW
, .accessfn
= access_tpm
,
1497 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1498 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1500 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1501 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1502 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1503 .access
= PL1_RW
, .accessfn
= access_tpm
,
1505 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1506 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1507 .resetvalue
= 0x0 },
1508 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1509 .access
= PL1_RW
, .accessfn
= access_tpm
,
1510 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1511 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1512 .writefn
= pmintenclr_write
, },
1513 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1514 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1515 .access
= PL1_RW
, .accessfn
= access_tpm
,
1516 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
1517 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1518 .writefn
= pmintenclr_write
},
1519 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1520 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1521 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1522 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1523 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1524 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1525 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1526 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1527 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1528 * just RAZ for all cores:
1530 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1531 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1532 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1533 /* Auxiliary fault status registers: these also are IMPDEF, and we
1534 * choose to RAZ/WI for all cores.
1536 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1537 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1538 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1539 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1540 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1541 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1542 /* MAIR can just read-as-written because we don't implement caches
1543 * and so don't need to care about memory attributes.
1545 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1546 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1547 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1549 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1550 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1551 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1553 /* For non-long-descriptor page tables these are PRRR and NMRR;
1554 * regardless they still act as reads-as-written for QEMU.
1556 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1557 * allows them to assign the correct fieldoffset based on the endianness
1558 * handled in the field definitions.
1560 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1561 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1562 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1563 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1564 .resetfn
= arm_cp_reset_ignore
},
1565 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1566 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1567 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1568 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1569 .resetfn
= arm_cp_reset_ignore
},
1570 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1571 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1572 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1573 /* 32 bit ITLB invalidates */
1574 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1575 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1576 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1577 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1578 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1579 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1580 /* 32 bit DTLB invalidates */
1581 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1582 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1583 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1584 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1585 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1586 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1587 /* 32 bit TLB invalidates */
1588 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1589 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1590 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1591 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1592 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1593 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1594 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1595 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1599 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1600 /* 32 bit TLB invalidates, Inner Shareable */
1601 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1602 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1603 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1604 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1605 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1606 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1607 .writefn
= tlbiasid_is_write
},
1608 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1609 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1610 .writefn
= tlbimvaa_is_write
},
1614 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1621 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1624 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1625 return CP_ACCESS_TRAP
;
1627 return CP_ACCESS_OK
;
1630 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1631 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1632 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1634 .writefn
= teecr_write
},
1635 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1636 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1637 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1641 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1642 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1643 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1645 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1646 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1648 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1649 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1650 .resetfn
= arm_cp_reset_ignore
},
1651 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1652 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1653 .access
= PL0_R
|PL1_W
,
1654 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1656 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1657 .access
= PL0_R
|PL1_W
,
1658 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1659 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1660 .resetfn
= arm_cp_reset_ignore
},
1661 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1662 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1664 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1665 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1667 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1668 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1673 #ifndef CONFIG_USER_ONLY
1675 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1678 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1679 * Writable only at the highest implemented exception level.
1681 int el
= arm_current_el(env
);
1685 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1686 return CP_ACCESS_TRAP
;
1690 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1691 arm_is_secure_below_el3(env
)) {
1692 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1693 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1701 if (!isread
&& el
< arm_highest_el(env
)) {
1702 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1705 return CP_ACCESS_OK
;
1708 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1711 unsigned int cur_el
= arm_current_el(env
);
1712 bool secure
= arm_is_secure(env
);
1714 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1716 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1717 return CP_ACCESS_TRAP
;
1720 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1721 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1722 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1723 return CP_ACCESS_TRAP_EL2
;
1725 return CP_ACCESS_OK
;
1728 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1731 unsigned int cur_el
= arm_current_el(env
);
1732 bool secure
= arm_is_secure(env
);
1734 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1735 * EL0[PV]TEN is zero.
1738 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1739 return CP_ACCESS_TRAP
;
1742 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1743 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1744 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1745 return CP_ACCESS_TRAP_EL2
;
1747 return CP_ACCESS_OK
;
1750 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1751 const ARMCPRegInfo
*ri
,
1754 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1757 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1758 const ARMCPRegInfo
*ri
,
1761 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1764 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1767 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1770 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1773 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1776 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1777 const ARMCPRegInfo
*ri
,
1780 /* The AArch64 register view of the secure physical timer is
1781 * always accessible from EL3, and configurably accessible from
1784 switch (arm_current_el(env
)) {
1786 if (!arm_is_secure(env
)) {
1787 return CP_ACCESS_TRAP
;
1789 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1790 return CP_ACCESS_TRAP_EL3
;
1792 return CP_ACCESS_OK
;
1795 return CP_ACCESS_TRAP
;
1797 return CP_ACCESS_OK
;
1799 g_assert_not_reached();
1803 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1805 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1808 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1810 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1813 /* Timer enabled: calculate and set current ISTATUS, irq, and
1814 * reset timer to when ISTATUS next has to change
1816 uint64_t offset
= timeridx
== GTIMER_VIRT
?
1817 cpu
->env
.cp15
.cntvoff_el2
: 0;
1818 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1819 /* Note that this must be unsigned 64 bit arithmetic: */
1820 int istatus
= count
- offset
>= gt
->cval
;
1824 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1826 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1827 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1830 /* Next transition is when count rolls back over to zero */
1831 nexttick
= UINT64_MAX
;
1833 /* Next transition is when we hit cval */
1834 nexttick
= gt
->cval
+ offset
;
1836 /* Note that the desired next expiry time might be beyond the
1837 * signed-64-bit range of a QEMUTimer -- in this case we just
1838 * set the timer for as far in the future as possible. When the
1839 * timer expires we will reset the timer for any remaining period.
1841 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1842 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1844 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1845 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1847 /* Timer disabled: ISTATUS and timer output always clear */
1849 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1850 timer_del(cpu
->gt_timer
[timeridx
]);
1851 trace_arm_gt_recalc_disabled(timeridx
);
1855 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1858 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1860 timer_del(cpu
->gt_timer
[timeridx
]);
1863 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1865 return gt_get_countervalue(env
);
1868 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1870 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1873 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1877 trace_arm_gt_cval_write(timeridx
, value
);
1878 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1879 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1882 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1885 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1887 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1888 (gt_get_countervalue(env
) - offset
));
1891 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1895 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1897 trace_arm_gt_tval_write(timeridx
, value
);
1898 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1899 sextract64(value
, 0, 32);
1900 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1903 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1907 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1908 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1910 trace_arm_gt_ctl_write(timeridx
, value
);
1911 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1912 if ((oldval
^ value
) & 1) {
1913 /* Enable toggled */
1914 gt_recalc_timer(cpu
, timeridx
);
1915 } else if ((oldval
^ value
) & 2) {
1916 /* IMASK toggled: don't need to recalculate,
1917 * just set the interrupt line based on ISTATUS
1919 int irqstate
= (oldval
& 4) && !(value
& 2);
1921 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1922 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1926 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1928 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1931 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1934 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1937 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1939 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1942 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1945 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1948 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1951 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1954 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1956 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1959 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1962 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1965 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1967 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1970 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1973 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1976 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1979 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1982 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1985 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1987 trace_arm_gt_cntvoff_write(value
);
1988 raw_write(env
, ri
, value
);
1989 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1992 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1994 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1997 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2000 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
2003 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2005 return gt_tval_read(env
, ri
, GTIMER_HYP
);
2008 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2011 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
2014 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2017 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
2020 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2022 gt_timer_reset(env
, ri
, GTIMER_SEC
);
2025 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2028 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
2031 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2033 return gt_tval_read(env
, ri
, GTIMER_SEC
);
2036 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2039 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
2042 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2045 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
2048 void arm_gt_ptimer_cb(void *opaque
)
2050 ARMCPU
*cpu
= opaque
;
2052 gt_recalc_timer(cpu
, GTIMER_PHYS
);
2055 void arm_gt_vtimer_cb(void *opaque
)
2057 ARMCPU
*cpu
= opaque
;
2059 gt_recalc_timer(cpu
, GTIMER_VIRT
);
2062 void arm_gt_htimer_cb(void *opaque
)
2064 ARMCPU
*cpu
= opaque
;
2066 gt_recalc_timer(cpu
, GTIMER_HYP
);
2069 void arm_gt_stimer_cb(void *opaque
)
2071 ARMCPU
*cpu
= opaque
;
2073 gt_recalc_timer(cpu
, GTIMER_SEC
);
2076 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2077 /* Note that CNTFRQ is purely reads-as-written for the benefit
2078 * of software; writing it doesn't actually change the timer frequency.
2079 * Our reset value matches the fixed frequency we implement the timer at.
2081 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
2082 .type
= ARM_CP_ALIAS
,
2083 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2084 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
2086 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2087 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2088 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
2089 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2090 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
2092 /* overall control: mostly access permissions */
2093 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
2094 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
2096 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
2099 /* per-timer control */
2100 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2101 .secure
= ARM_CP_SECSTATE_NS
,
2102 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2103 .accessfn
= gt_ptimer_access
,
2104 .fieldoffset
= offsetoflow32(CPUARMState
,
2105 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2106 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2108 { .name
= "CNTP_CTL_S",
2109 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
2110 .secure
= ARM_CP_SECSTATE_S
,
2111 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2112 .accessfn
= gt_ptimer_access
,
2113 .fieldoffset
= offsetoflow32(CPUARMState
,
2114 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2115 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2117 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2118 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
2119 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2120 .accessfn
= gt_ptimer_access
,
2121 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
2123 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
2125 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
2126 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
2127 .accessfn
= gt_vtimer_access
,
2128 .fieldoffset
= offsetoflow32(CPUARMState
,
2129 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2130 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2132 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
2133 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
2134 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2135 .accessfn
= gt_vtimer_access
,
2136 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2138 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2140 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2141 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2142 .secure
= ARM_CP_SECSTATE_NS
,
2143 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2144 .accessfn
= gt_ptimer_access
,
2145 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2147 { .name
= "CNTP_TVAL_S",
2148 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2149 .secure
= ARM_CP_SECSTATE_S
,
2150 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2151 .accessfn
= gt_ptimer_access
,
2152 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2154 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2155 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2156 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2157 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2158 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2160 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2161 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2162 .accessfn
= gt_vtimer_access
,
2163 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2165 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2166 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2167 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2168 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2169 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2171 /* The counter itself */
2172 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2173 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2174 .accessfn
= gt_pct_access
,
2175 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2177 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2178 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2179 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2180 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2182 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2183 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2184 .accessfn
= gt_vct_access
,
2185 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2187 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2188 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2189 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2190 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2192 /* Comparison value, indicating when the timer goes off */
2193 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2194 .secure
= ARM_CP_SECSTATE_NS
,
2195 .access
= PL1_RW
| PL0_R
,
2196 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2197 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2198 .accessfn
= gt_ptimer_access
,
2199 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2201 { .name
= "CNTP_CVAL_S", .cp
= 15, .crm
= 14, .opc1
= 2,
2202 .secure
= ARM_CP_SECSTATE_S
,
2203 .access
= PL1_RW
| PL0_R
,
2204 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2205 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2206 .accessfn
= gt_ptimer_access
,
2207 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2209 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2210 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2211 .access
= PL1_RW
| PL0_R
,
2213 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2214 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2215 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2217 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2218 .access
= PL1_RW
| PL0_R
,
2219 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2220 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2221 .accessfn
= gt_vtimer_access
,
2222 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2224 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2225 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2226 .access
= PL1_RW
| PL0_R
,
2228 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2229 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2230 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2232 /* Secure timer -- this is actually restricted to only EL3
2233 * and configurably Secure-EL1 via the accessfn.
2235 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2236 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2237 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2238 .accessfn
= gt_stimer_access
,
2239 .readfn
= gt_sec_tval_read
,
2240 .writefn
= gt_sec_tval_write
,
2241 .resetfn
= gt_sec_timer_reset
,
2243 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2244 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2245 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2246 .accessfn
= gt_stimer_access
,
2247 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2249 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2251 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2252 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2253 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2254 .accessfn
= gt_stimer_access
,
2255 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2256 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2263 /* In user-mode most of the generic timer registers are inaccessible
2264 * however modern kernels (4.12+) allow access to cntvct_el0
2267 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2269 /* Currently we have no support for QEMUTimer in linux-user so we
2270 * can't call gt_get_countervalue(env), instead we directly
2271 * call the lower level functions.
2273 return cpu_get_clock() / GTIMER_SCALE
;
2276 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2277 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
2278 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
2279 .type
= ARM_CP_CONST
, .access
= PL0_R
/* no PL1_RW in linux-user */,
2280 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
2281 .resetvalue
= NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
,
2283 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2284 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2285 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2286 .readfn
= gt_virt_cnt_read
,
2293 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2295 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2296 raw_write(env
, ri
, value
);
2297 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2298 raw_write(env
, ri
, value
& 0xfffff6ff);
2300 raw_write(env
, ri
, value
& 0xfffff1ff);
2304 #ifndef CONFIG_USER_ONLY
2305 /* get_phys_addr() isn't present for user-mode-only targets */
2307 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2311 /* The ATS12NSO* operations must trap to EL3 if executed in
2312 * Secure EL1 (which can only happen if EL3 is AArch64).
2313 * They are simply UNDEF if executed from NS EL1.
2314 * They function normally from EL2 or EL3.
2316 if (arm_current_el(env
) == 1) {
2317 if (arm_is_secure_below_el3(env
)) {
2318 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2320 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2323 return CP_ACCESS_OK
;
2326 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2327 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2330 target_ulong page_size
;
2334 bool format64
= false;
2335 MemTxAttrs attrs
= {};
2336 ARMMMUFaultInfo fi
= {};
2337 ARMCacheAttrs cacheattrs
= {};
2339 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2340 &prot
, &page_size
, &fi
, &cacheattrs
);
2344 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2347 * * TTBCR.EAE determines whether the result is returned using the
2348 * 32-bit or the 64-bit PAR format
2349 * * Instructions executed in Hyp mode always use the 64bit format
2351 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2352 * * The Non-secure TTBCR.EAE bit is set to 1
2353 * * The implementation includes EL2, and the value of HCR.VM is 1
2355 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2357 * ATS1Hx always uses the 64bit format.
2359 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2361 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2362 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2363 format64
|= env
->cp15
.hcr_el2
& (HCR_VM
| HCR_DC
);
2365 format64
|= arm_current_el(env
) == 2;
2371 /* Create a 64-bit PAR */
2372 par64
= (1 << 11); /* LPAE bit always set */
2374 par64
|= phys_addr
& ~0xfffULL
;
2375 if (!attrs
.secure
) {
2376 par64
|= (1 << 9); /* NS */
2378 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2379 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2381 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2384 par64
|= (fsr
& 0x3f) << 1; /* FS */
2386 par64
|= (1 << 9); /* S */
2389 par64
|= (1 << 8); /* PTW */
2393 /* fsr is a DFSR/IFSR value for the short descriptor
2394 * translation table format (with WnR always clear).
2395 * Convert it to a 32-bit PAR.
2398 /* We do not set any attribute bits in the PAR */
2399 if (page_size
== (1 << 24)
2400 && arm_feature(env
, ARM_FEATURE_V7
)) {
2401 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2403 par64
= phys_addr
& 0xfffff000;
2405 if (!attrs
.secure
) {
2406 par64
|= (1 << 9); /* NS */
2409 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2411 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2412 ((fsr
& 0xf) << 1) | 1;
2418 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2420 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2423 int el
= arm_current_el(env
);
2424 bool secure
= arm_is_secure_below_el3(env
);
2426 switch (ri
->opc2
& 6) {
2428 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2431 mmu_idx
= ARMMMUIdx_S1E3
;
2434 mmu_idx
= ARMMMUIdx_S1NSE1
;
2437 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2440 g_assert_not_reached();
2444 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2447 mmu_idx
= ARMMMUIdx_S1SE0
;
2450 mmu_idx
= ARMMMUIdx_S1NSE0
;
2453 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2456 g_assert_not_reached();
2460 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2461 mmu_idx
= ARMMMUIdx_S12NSE1
;
2464 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2465 mmu_idx
= ARMMMUIdx_S12NSE0
;
2468 g_assert_not_reached();
2471 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2473 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2476 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2479 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2482 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S1E2
);
2484 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2487 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2490 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2491 return CP_ACCESS_TRAP
;
2493 return CP_ACCESS_OK
;
2496 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2499 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2501 int secure
= arm_is_secure_below_el3(env
);
2503 switch (ri
->opc2
& 6) {
2506 case 0: /* AT S1E1R, AT S1E1W */
2507 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2509 case 4: /* AT S1E2R, AT S1E2W */
2510 mmu_idx
= ARMMMUIdx_S1E2
;
2512 case 6: /* AT S1E3R, AT S1E3W */
2513 mmu_idx
= ARMMMUIdx_S1E3
;
2516 g_assert_not_reached();
2519 case 2: /* AT S1E0R, AT S1E0W */
2520 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2522 case 4: /* AT S12E1R, AT S12E1W */
2523 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2525 case 6: /* AT S12E0R, AT S12E0W */
2526 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2529 g_assert_not_reached();
2532 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2536 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2537 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2538 .access
= PL1_RW
, .resetvalue
= 0,
2539 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2540 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2541 .writefn
= par_write
},
2542 #ifndef CONFIG_USER_ONLY
2543 /* This underdecoding is safe because the reginfo is NO_RAW. */
2544 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2545 .access
= PL1_W
, .accessfn
= ats_access
,
2546 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2551 /* Return basic MPU access permission bits. */
2552 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2559 for (i
= 0; i
< 16; i
+= 2) {
2560 ret
|= (val
>> i
) & mask
;
2566 /* Pad basic MPU access permission bits to extended format. */
2567 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2574 for (i
= 0; i
< 16; i
+= 2) {
2575 ret
|= (val
& mask
) << i
;
2581 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2584 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2587 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2589 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2592 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2595 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2598 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2600 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2603 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2605 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2611 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2615 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2618 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2619 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2625 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2626 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2630 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2633 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2634 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2636 if (value
>= nrgs
) {
2637 qemu_log_mask(LOG_GUEST_ERROR
,
2638 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2639 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2643 raw_write(env
, ri
, value
);
2646 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2647 /* Reset for all these registers is handled in arm_cpu_reset(),
2648 * because the PMSAv7 is also used by M-profile CPUs, which do
2649 * not register cpregs but still need the state to be reset.
2651 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2652 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2653 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2654 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2655 .resetfn
= arm_cp_reset_ignore
},
2656 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2657 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2658 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2659 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2660 .resetfn
= arm_cp_reset_ignore
},
2661 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2662 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2663 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2664 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2665 .resetfn
= arm_cp_reset_ignore
},
2666 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2668 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2669 .writefn
= pmsav7_rgnr_write
,
2670 .resetfn
= arm_cp_reset_ignore
},
2674 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2675 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2676 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2677 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2678 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2679 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2680 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2681 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2682 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2683 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2685 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2687 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2689 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2691 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2693 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2694 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2696 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2697 /* Protection region base and size registers */
2698 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2699 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2700 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2701 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2702 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2703 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2704 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2705 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2706 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2707 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2708 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2709 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2710 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2711 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2712 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2713 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2714 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2715 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2716 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2717 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2718 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2719 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2720 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2721 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2725 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2728 TCR
*tcr
= raw_ptr(env
, ri
);
2729 int maskshift
= extract32(value
, 0, 3);
2731 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2732 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2733 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2734 * using Long-desciptor translation table format */
2735 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2736 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2737 /* In an implementation that includes the Security Extensions
2738 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2739 * Short-descriptor translation table format.
2741 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2747 /* Update the masks corresponding to the TCR bank being written
2748 * Note that we always calculate mask and base_mask, but
2749 * they are only used for short-descriptor tables (ie if EAE is 0);
2750 * for long-descriptor tables the TCR fields are used differently
2751 * and the mask and base_mask values are meaningless.
2753 tcr
->raw_tcr
= value
;
2754 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2755 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2758 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2761 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2762 TCR
*tcr
= raw_ptr(env
, ri
);
2764 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2765 /* With LPAE the TTBCR could result in a change of ASID
2766 * via the TTBCR.A1 bit, so do a TLB flush.
2768 tlb_flush(CPU(cpu
));
2770 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
2771 value
= deposit64(tcr
->raw_tcr
, 0, 32, value
);
2772 vmsa_ttbcr_raw_write(env
, ri
, value
);
2775 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2777 TCR
*tcr
= raw_ptr(env
, ri
);
2779 /* Reset both the TCR as well as the masks corresponding to the bank of
2780 * the TCR being reset.
2784 tcr
->base_mask
= 0xffffc000u
;
2787 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2790 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2791 TCR
*tcr
= raw_ptr(env
, ri
);
2793 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2794 tlb_flush(CPU(cpu
));
2795 tcr
->raw_tcr
= value
;
2798 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2801 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
2802 if (cpreg_field_is_64bit(ri
) &&
2803 extract64(raw_read(env
, ri
) ^ value
, 48, 16) != 0) {
2804 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2805 tlb_flush(CPU(cpu
));
2807 raw_write(env
, ri
, value
);
2810 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2813 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2814 CPUState
*cs
= CPU(cpu
);
2816 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2817 if (raw_read(env
, ri
) != value
) {
2818 tlb_flush_by_mmuidx(cs
,
2819 ARMMMUIdxBit_S12NSE1
|
2820 ARMMMUIdxBit_S12NSE0
|
2822 raw_write(env
, ri
, value
);
2826 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2827 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2828 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2829 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2830 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2831 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2832 .access
= PL1_RW
, .resetvalue
= 0,
2833 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2834 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2835 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2836 .access
= PL1_RW
, .resetvalue
= 0,
2837 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2838 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2839 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2840 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2841 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2846 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2847 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2848 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2850 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2851 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2852 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2853 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2854 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2855 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2856 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2857 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2858 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2859 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2860 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2861 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2862 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2863 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2864 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2865 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2866 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2867 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2868 .raw_writefn
= vmsa_ttbcr_raw_write
,
2869 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2870 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2874 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
2875 * qemu tlbs nor adjusting cached masks.
2877 static const ARMCPRegInfo ttbcr2_reginfo
= {
2878 .name
= "TTBCR2", .cp
= 15, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 3,
2879 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2880 .bank_fieldoffsets
= { offsetofhigh32(CPUARMState
, cp15
.tcr_el
[3]),
2881 offsetofhigh32(CPUARMState
, cp15
.tcr_el
[1]) },
2884 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2887 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2888 /* The OS_TYPE bit in this register changes the reported CPUID! */
2889 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2890 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2893 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2896 env
->cp15
.c15_threadid
= value
& 0xffff;
2899 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2902 /* Wait-for-interrupt (deprecated) */
2903 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2906 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2909 /* On OMAP there are registers indicating the max/min index of dcache lines
2910 * containing a dirty line; cache flush operations have to reset these.
2912 env
->cp15
.c15_i_max
= 0x000;
2913 env
->cp15
.c15_i_min
= 0xff0;
2916 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2917 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2918 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2919 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2921 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2922 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2923 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2925 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2926 .writefn
= omap_ticonfig_write
},
2927 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2929 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2930 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2931 .access
= PL1_RW
, .resetvalue
= 0xff0,
2932 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2933 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2935 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2936 .writefn
= omap_threadid_write
},
2937 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2938 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2939 .type
= ARM_CP_NO_RAW
,
2940 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2941 /* TODO: Peripheral port remap register:
2942 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2943 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2946 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2947 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2948 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2949 .writefn
= omap_cachemaint_write
},
2950 { .name
= "C9", .cp
= 15, .crn
= 9,
2951 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2952 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2956 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2959 env
->cp15
.c15_cpar
= value
& 0x3fff;
2962 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2963 { .name
= "XSCALE_CPAR",
2964 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2965 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2966 .writefn
= xscale_cpar_write
, },
2967 { .name
= "XSCALE_AUXCR",
2968 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2969 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2971 /* XScale specific cache-lockdown: since we have no cache we NOP these
2972 * and hope the guest does not really rely on cache behaviour.
2974 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2975 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2976 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2977 { .name
= "XSCALE_UNLOCK_ICACHE",
2978 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2979 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2980 { .name
= "XSCALE_DCACHE_LOCK",
2981 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2982 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2983 { .name
= "XSCALE_UNLOCK_DCACHE",
2984 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2985 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2989 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2990 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2991 * implementation of this implementation-defined space.
2992 * Ideally this should eventually disappear in favour of actually
2993 * implementing the correct behaviour for all cores.
2995 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2996 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2998 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
3003 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
3004 /* Cache status: RAZ because we have no cache so it's always clean */
3005 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
3006 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3011 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
3012 /* We never have a a block transfer operation in progress */
3013 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
3014 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3016 /* The cache ops themselves: these all NOP for QEMU */
3017 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
3018 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3019 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
3020 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3021 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
3022 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3023 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
3024 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3025 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
3026 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3027 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
3028 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
3032 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
3033 /* The cache test-and-clean instructions always return (1 << 30)
3034 * to indicate that there are no dirty cache lines.
3036 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
3037 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3038 .resetvalue
= (1 << 30) },
3039 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
3040 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
3041 .resetvalue
= (1 << 30) },
3045 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
3046 /* Ignore ReadBuffer accesses */
3047 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
3048 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
3049 .access
= PL1_RW
, .resetvalue
= 0,
3050 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
3054 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3056 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3057 unsigned int cur_el
= arm_current_el(env
);
3058 bool secure
= arm_is_secure(env
);
3060 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3061 return env
->cp15
.vpidr_el2
;
3063 return raw_read(env
, ri
);
3066 static uint64_t mpidr_read_val(CPUARMState
*env
)
3068 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
3069 uint64_t mpidr
= cpu
->mp_affinity
;
3071 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
3072 mpidr
|= (1U << 31);
3073 /* Cores which are uniprocessor (non-coherent)
3074 * but still implement the MP extensions set
3075 * bit 30. (For instance, Cortex-R5).
3077 if (cpu
->mp_is_up
) {
3078 mpidr
|= (1u << 30);
3084 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3086 unsigned int cur_el
= arm_current_el(env
);
3087 bool secure
= arm_is_secure(env
);
3089 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
3090 return env
->cp15
.vmpidr_el2
;
3092 return mpidr_read_val(env
);
3095 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
3096 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
3097 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
3098 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
3102 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
3104 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
3105 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
3106 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3108 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3109 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
3110 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
3112 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
3113 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
3114 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
3115 offsetof(CPUARMState
, cp15
.par_ns
)} },
3116 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
3117 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3118 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
3119 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
3120 .writefn
= vmsa_ttbr_write
, },
3121 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
3122 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3123 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
3124 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
3125 .writefn
= vmsa_ttbr_write
, },
3129 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3131 return vfp_get_fpcr(env
);
3134 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3137 vfp_set_fpcr(env
, value
);
3140 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3142 return vfp_get_fpsr(env
);
3145 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3148 vfp_set_fpsr(env
, value
);
3151 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3154 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
3155 return CP_ACCESS_TRAP
;
3157 return CP_ACCESS_OK
;
3160 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3163 env
->daif
= value
& PSTATE_DAIF
;
3166 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
3167 const ARMCPRegInfo
*ri
,
3170 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3171 * SCTLR_EL1.UCI is set.
3173 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3174 return CP_ACCESS_TRAP
;
3176 return CP_ACCESS_OK
;
3179 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3180 * Page D4-1736 (DDI0487A.b)
3183 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3186 CPUState
*cs
= ENV_GET_CPU(env
);
3187 bool sec
= arm_is_secure_below_el3(env
);
3190 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3191 ARMMMUIdxBit_S1SE1
|
3192 ARMMMUIdxBit_S1SE0
);
3194 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3195 ARMMMUIdxBit_S12NSE1
|
3196 ARMMMUIdxBit_S12NSE0
);
3200 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3203 CPUState
*cs
= ENV_GET_CPU(env
);
3205 if (tlb_force_broadcast(env
)) {
3206 tlbi_aa64_vmalle1is_write(env
, NULL
, value
);
3210 if (arm_is_secure_below_el3(env
)) {
3211 tlb_flush_by_mmuidx(cs
,
3212 ARMMMUIdxBit_S1SE1
|
3213 ARMMMUIdxBit_S1SE0
);
3215 tlb_flush_by_mmuidx(cs
,
3216 ARMMMUIdxBit_S12NSE1
|
3217 ARMMMUIdxBit_S12NSE0
);
3221 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3224 /* Note that the 'ALL' scope must invalidate both stage 1 and
3225 * stage 2 translations, whereas most other scopes only invalidate
3226 * stage 1 translations.
3228 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3229 CPUState
*cs
= CPU(cpu
);
3231 if (arm_is_secure_below_el3(env
)) {
3232 tlb_flush_by_mmuidx(cs
,
3233 ARMMMUIdxBit_S1SE1
|
3234 ARMMMUIdxBit_S1SE0
);
3236 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3237 tlb_flush_by_mmuidx(cs
,
3238 ARMMMUIdxBit_S12NSE1
|
3239 ARMMMUIdxBit_S12NSE0
|
3242 tlb_flush_by_mmuidx(cs
,
3243 ARMMMUIdxBit_S12NSE1
|
3244 ARMMMUIdxBit_S12NSE0
);
3249 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3252 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3253 CPUState
*cs
= CPU(cpu
);
3255 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3258 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3261 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3262 CPUState
*cs
= CPU(cpu
);
3264 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3267 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3270 /* Note that the 'ALL' scope must invalidate both stage 1 and
3271 * stage 2 translations, whereas most other scopes only invalidate
3272 * stage 1 translations.
3274 CPUState
*cs
= ENV_GET_CPU(env
);
3275 bool sec
= arm_is_secure_below_el3(env
);
3276 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3279 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3280 ARMMMUIdxBit_S1SE1
|
3281 ARMMMUIdxBit_S1SE0
);
3282 } else if (has_el2
) {
3283 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3284 ARMMMUIdxBit_S12NSE1
|
3285 ARMMMUIdxBit_S12NSE0
|
3288 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3289 ARMMMUIdxBit_S12NSE1
|
3290 ARMMMUIdxBit_S12NSE0
);
3294 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3297 CPUState
*cs
= ENV_GET_CPU(env
);
3299 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3302 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3305 CPUState
*cs
= ENV_GET_CPU(env
);
3307 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3310 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3313 /* Invalidate by VA, EL2
3314 * Currently handles both VAE2 and VALE2, since we don't support
3315 * flush-last-level-only.
3317 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3318 CPUState
*cs
= CPU(cpu
);
3319 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3321 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3324 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3327 /* Invalidate by VA, EL3
3328 * Currently handles both VAE3 and VALE3, since we don't support
3329 * flush-last-level-only.
3331 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3332 CPUState
*cs
= CPU(cpu
);
3333 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3335 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3338 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3341 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3342 CPUState
*cs
= CPU(cpu
);
3343 bool sec
= arm_is_secure_below_el3(env
);
3344 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3347 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3348 ARMMMUIdxBit_S1SE1
|
3349 ARMMMUIdxBit_S1SE0
);
3351 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3352 ARMMMUIdxBit_S12NSE1
|
3353 ARMMMUIdxBit_S12NSE0
);
3357 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3360 /* Invalidate by VA, EL1&0 (AArch64 version).
3361 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3362 * since we don't support flush-for-specific-ASID-only or
3363 * flush-last-level-only.
3365 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3366 CPUState
*cs
= CPU(cpu
);
3367 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3369 if (tlb_force_broadcast(env
)) {
3370 tlbi_aa64_vae1is_write(env
, NULL
, value
);
3374 if (arm_is_secure_below_el3(env
)) {
3375 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3376 ARMMMUIdxBit_S1SE1
|
3377 ARMMMUIdxBit_S1SE0
);
3379 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3380 ARMMMUIdxBit_S12NSE1
|
3381 ARMMMUIdxBit_S12NSE0
);
3385 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3388 CPUState
*cs
= ENV_GET_CPU(env
);
3389 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3391 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3395 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3398 CPUState
*cs
= ENV_GET_CPU(env
);
3399 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3401 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3405 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3408 /* Invalidate by IPA. This has to invalidate any structures that
3409 * contain only stage 2 translation information, but does not need
3410 * to apply to structures that contain combined stage 1 and stage 2
3411 * translation information.
3412 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3414 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3415 CPUState
*cs
= CPU(cpu
);
3418 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3422 pageaddr
= sextract64(value
<< 12, 0, 48);
3424 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3427 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3430 CPUState
*cs
= ENV_GET_CPU(env
);
3433 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3437 pageaddr
= sextract64(value
<< 12, 0, 48);
3439 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3443 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3446 /* We don't implement EL2, so the only control on DC ZVA is the
3447 * bit in the SCTLR which can prohibit access for EL0.
3449 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3450 return CP_ACCESS_TRAP
;
3452 return CP_ACCESS_OK
;
3455 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3457 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3458 int dzp_bit
= 1 << 4;
3460 /* DZP indicates whether DC ZVA access is allowed */
3461 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3464 return cpu
->dcz_blocksize
| dzp_bit
;
3467 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3470 if (!(env
->pstate
& PSTATE_SP
)) {
3471 /* Access to SP_EL0 is undefined if it's being used as
3472 * the stack pointer.
3474 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3476 return CP_ACCESS_OK
;
3479 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3481 return env
->pstate
& PSTATE_SP
;
3484 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3486 update_spsel(env
, val
);
3489 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3492 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3494 if (raw_read(env
, ri
) == value
) {
3495 /* Skip the TLB flush if nothing actually changed; Linux likes
3496 * to do a lot of pointless SCTLR writes.
3501 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3502 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3506 raw_write(env
, ri
, value
);
3507 /* ??? Lots of these bits are not implemented. */
3508 /* This may enable/disable the MMU, so do a TLB flush. */
3509 tlb_flush(CPU(cpu
));
3512 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3515 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3516 return CP_ACCESS_TRAP_FP_EL2
;
3518 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3519 return CP_ACCESS_TRAP_FP_EL3
;
3521 return CP_ACCESS_OK
;
3524 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3527 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3530 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3531 /* Minimal set of EL0-visible registers. This will need to be expanded
3532 * significantly for system emulation of AArch64 CPUs.
3534 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3535 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3536 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3537 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3538 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3539 .type
= ARM_CP_NO_RAW
,
3540 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3541 .fieldoffset
= offsetof(CPUARMState
, daif
),
3542 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3543 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3544 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3545 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3546 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
3547 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
3548 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
3549 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3550 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
3551 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
3552 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
3553 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
3554 .readfn
= aa64_dczid_read
},
3555 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
3556 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
3557 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
3558 #ifndef CONFIG_USER_ONLY
3559 /* Avoid overhead of an access check that always passes in user-mode */
3560 .accessfn
= aa64_zva_access
,
3563 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
3564 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
3565 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
3566 /* Cache ops: all NOPs since we don't emulate caches */
3567 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
3568 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3569 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3570 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
3571 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3572 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3573 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
3574 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
3575 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3576 .accessfn
= aa64_cacheop_access
},
3577 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
3578 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3579 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3580 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
3581 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3582 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3583 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
3584 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
3585 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3586 .accessfn
= aa64_cacheop_access
},
3587 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
3588 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3589 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3590 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
3591 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
3592 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3593 .accessfn
= aa64_cacheop_access
},
3594 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
3595 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
3596 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3597 .accessfn
= aa64_cacheop_access
},
3598 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
3599 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3600 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3601 /* TLBI operations */
3602 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
3603 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
3604 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3605 .writefn
= tlbi_aa64_vmalle1is_write
},
3606 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
3607 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
3608 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3609 .writefn
= tlbi_aa64_vae1is_write
},
3610 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
3611 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
3612 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3613 .writefn
= tlbi_aa64_vmalle1is_write
},
3614 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
3615 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
3616 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3617 .writefn
= tlbi_aa64_vae1is_write
},
3618 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
3619 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3620 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3621 .writefn
= tlbi_aa64_vae1is_write
},
3622 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
3623 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3624 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3625 .writefn
= tlbi_aa64_vae1is_write
},
3626 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
3627 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
3628 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3629 .writefn
= tlbi_aa64_vmalle1_write
},
3630 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
3631 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
3632 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3633 .writefn
= tlbi_aa64_vae1_write
},
3634 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
3635 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
3636 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3637 .writefn
= tlbi_aa64_vmalle1_write
},
3638 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
3639 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
3640 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3641 .writefn
= tlbi_aa64_vae1_write
},
3642 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
3643 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3644 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3645 .writefn
= tlbi_aa64_vae1_write
},
3646 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
3647 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3648 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3649 .writefn
= tlbi_aa64_vae1_write
},
3650 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
3651 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3652 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3653 .writefn
= tlbi_aa64_ipas2e1is_write
},
3654 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
3655 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3656 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3657 .writefn
= tlbi_aa64_ipas2e1is_write
},
3658 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
3659 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3660 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3661 .writefn
= tlbi_aa64_alle1is_write
},
3662 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
3663 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
3664 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3665 .writefn
= tlbi_aa64_alle1is_write
},
3666 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
3667 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3668 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3669 .writefn
= tlbi_aa64_ipas2e1_write
},
3670 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
3671 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3672 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3673 .writefn
= tlbi_aa64_ipas2e1_write
},
3674 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
3675 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3676 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3677 .writefn
= tlbi_aa64_alle1_write
},
3678 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
3679 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
3680 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3681 .writefn
= tlbi_aa64_alle1is_write
},
3682 #ifndef CONFIG_USER_ONLY
3683 /* 64 bit address translation operations */
3684 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
3685 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
3686 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3687 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
3688 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
3689 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3690 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
3691 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
3692 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3693 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
3694 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
3695 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3696 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
3697 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
3698 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3699 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
3700 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
3701 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3702 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
3703 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
3704 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3705 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
3706 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
3707 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3708 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3709 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
3710 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
3711 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3712 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
3713 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
3714 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3715 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
3716 .type
= ARM_CP_ALIAS
,
3717 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
3718 .access
= PL1_RW
, .resetvalue
= 0,
3719 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
3720 .writefn
= par_write
},
3722 /* TLB invalidate last level of translation table walk */
3723 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3724 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
3725 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3726 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
3727 .writefn
= tlbimvaa_is_write
},
3728 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3729 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
3730 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3731 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
3732 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3733 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3734 .writefn
= tlbimva_hyp_write
},
3735 { .name
= "TLBIMVALHIS",
3736 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3737 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3738 .writefn
= tlbimva_hyp_is_write
},
3739 { .name
= "TLBIIPAS2",
3740 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3741 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3742 .writefn
= tlbiipas2_write
},
3743 { .name
= "TLBIIPAS2IS",
3744 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3745 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3746 .writefn
= tlbiipas2_is_write
},
3747 { .name
= "TLBIIPAS2L",
3748 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3749 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3750 .writefn
= tlbiipas2_write
},
3751 { .name
= "TLBIIPAS2LIS",
3752 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3753 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3754 .writefn
= tlbiipas2_is_write
},
3755 /* 32 bit cache operations */
3756 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3757 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3758 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
3759 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3760 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3761 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3762 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
3763 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3764 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
3765 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3766 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
3767 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3768 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3769 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3770 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3771 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3772 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
3773 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3774 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3775 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3776 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
3777 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3778 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
3779 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3780 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3781 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3782 /* MMU Domain access control / MPU write buffer control */
3783 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
3784 .access
= PL1_RW
, .resetvalue
= 0,
3785 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3786 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
3787 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
3788 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
3789 .type
= ARM_CP_ALIAS
,
3790 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
3792 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
3793 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
3794 .type
= ARM_CP_ALIAS
,
3795 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
3797 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
3798 /* We rely on the access checks not allowing the guest to write to the
3799 * state field when SPSel indicates that it's being used as the stack
3802 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
3803 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
3804 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
3805 .type
= ARM_CP_ALIAS
,
3806 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
3807 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
3808 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
3809 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3810 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
3811 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
3812 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
3813 .type
= ARM_CP_NO_RAW
,
3814 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
3815 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
3816 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
3817 .type
= ARM_CP_ALIAS
,
3818 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
3819 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
3820 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
3821 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
3822 .access
= PL2_RW
, .resetvalue
= 0,
3823 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3824 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
3825 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
3826 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
3827 .access
= PL2_RW
, .resetvalue
= 0,
3828 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
3829 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
3830 .type
= ARM_CP_ALIAS
,
3831 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
3833 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
3834 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
3835 .type
= ARM_CP_ALIAS
,
3836 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
3838 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
3839 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
3840 .type
= ARM_CP_ALIAS
,
3841 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
3843 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
3844 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
3845 .type
= ARM_CP_ALIAS
,
3846 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
3848 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
3849 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
3850 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
3852 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
3853 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
3854 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
3855 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
3856 .writefn
= sdcr_write
,
3857 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
3861 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3862 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
3863 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3864 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3866 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3867 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3868 .type
= ARM_CP_NO_RAW
,
3869 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3871 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3872 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
3873 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3875 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3876 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3877 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3878 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3879 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3880 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3881 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3883 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3884 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3885 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3886 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3887 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3888 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3890 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
3891 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3892 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3894 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3895 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3896 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3898 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3899 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3900 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3902 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3903 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3904 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3905 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3906 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3907 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3908 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3909 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3910 .cp
= 15, .opc1
= 6, .crm
= 2,
3911 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3912 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
3913 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3914 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3915 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3916 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3917 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3918 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3919 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3920 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3921 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3922 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3923 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3924 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3925 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3926 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3928 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3929 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3930 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3931 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3932 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3933 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3934 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3935 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3937 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3938 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3939 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3940 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3941 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3943 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3944 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3945 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3946 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3947 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3948 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3949 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3950 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3951 .access
= PL2_RW
, .accessfn
= access_tda
,
3952 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3953 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3954 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3955 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3956 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3957 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3958 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3959 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3960 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3961 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3962 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3963 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
3964 .type
= ARM_CP_CONST
,
3965 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
3966 .access
= PL2_RW
, .resetvalue
= 0 },
3970 /* Ditto, but for registers which exist in ARMv8 but not v7 */
3971 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo
[] = {
3972 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
3973 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
3975 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3979 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3981 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3982 uint64_t valid_mask
= HCR_MASK
;
3984 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3985 valid_mask
&= ~HCR_HCD
;
3986 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
3987 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3988 * However, if we're using the SMC PSCI conduit then QEMU is
3989 * effectively acting like EL3 firmware and so the guest at
3990 * EL2 should retain the ability to prevent EL1 from being
3991 * able to make SMC calls into the ersatz firmware, so in
3992 * that case HCR.TSC should be read/write.
3994 valid_mask
&= ~HCR_TSC
;
3996 if (cpu_isar_feature(aa64_lor
, cpu
)) {
3997 valid_mask
|= HCR_TLOR
;
4000 /* Clear RES0 bits. */
4001 value
&= valid_mask
;
4003 /* These bits change the MMU setup:
4004 * HCR_VM enables stage 2 translation
4005 * HCR_PTW forbids certain page-table setups
4006 * HCR_DC Disables stage1 and enables stage2 translation
4008 if ((env
->cp15
.hcr_el2
^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
4009 tlb_flush(CPU(cpu
));
4011 env
->cp15
.hcr_el2
= value
;
4014 * Updates to VI and VF require us to update the status of
4015 * virtual interrupts, which are the logical OR of these bits
4016 * and the state of the input lines from the GIC. (This requires
4017 * that we have the iothread lock, which is done by marking the
4018 * reginfo structs as ARM_CP_IO.)
4019 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4020 * possible for it to be taken immediately, because VIRQ and
4021 * VFIQ are masked unless running at EL0 or EL1, and HCR
4022 * can only be written at EL2.
4024 g_assert(qemu_mutex_iothread_locked());
4025 arm_cpu_update_virq(cpu
);
4026 arm_cpu_update_vfiq(cpu
);
4029 static void hcr_writehigh(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4032 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4033 value
= deposit64(env
->cp15
.hcr_el2
, 32, 32, value
);
4034 hcr_write(env
, NULL
, value
);
4037 static void hcr_writelow(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4040 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4041 value
= deposit64(env
->cp15
.hcr_el2
, 0, 32, value
);
4042 hcr_write(env
, NULL
, value
);
4046 * Return the effective value of HCR_EL2.
4047 * Bits that are not included here:
4048 * RW (read from SCR_EL3.RW as needed)
4050 uint64_t arm_hcr_el2_eff(CPUARMState
*env
)
4052 uint64_t ret
= env
->cp15
.hcr_el2
;
4054 if (arm_is_secure_below_el3(env
)) {
4056 * "This register has no effect if EL2 is not enabled in the
4057 * current Security state". This is ARMv8.4-SecEL2 speak for
4058 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4060 * Prior to that, the language was "In an implementation that
4061 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4062 * as if this field is 0 for all purposes other than a direct
4063 * read or write access of HCR_EL2". With lots of enumeration
4064 * on a per-field basis. In current QEMU, this is condition
4065 * is arm_is_secure_below_el3.
4067 * Since the v8.4 language applies to the entire register, and
4068 * appears to be backward compatible, use that.
4071 } else if (ret
& HCR_TGE
) {
4072 /* These bits are up-to-date as of ARMv8.4. */
4073 if (ret
& HCR_E2H
) {
4074 ret
&= ~(HCR_VM
| HCR_FMO
| HCR_IMO
| HCR_AMO
|
4075 HCR_BSU_MASK
| HCR_DC
| HCR_TWI
| HCR_TWE
|
4076 HCR_TID0
| HCR_TID2
| HCR_TPCP
| HCR_TPU
|
4077 HCR_TDZ
| HCR_CD
| HCR_ID
| HCR_MIOCNCE
);
4079 ret
|= HCR_FMO
| HCR_IMO
| HCR_AMO
;
4081 ret
&= ~(HCR_SWIO
| HCR_PTW
| HCR_VF
| HCR_VI
| HCR_VSE
|
4082 HCR_FB
| HCR_TID1
| HCR_TID3
| HCR_TSC
| HCR_TACR
|
4083 HCR_TSW
| HCR_TTLB
| HCR_TVM
| HCR_HCD
| HCR_TRVM
|
4090 static const ARMCPRegInfo el2_cp_reginfo
[] = {
4091 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
4093 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4094 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4095 .writefn
= hcr_write
},
4096 { .name
= "HCR", .state
= ARM_CP_STATE_AA32
,
4097 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4098 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
4099 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
4100 .writefn
= hcr_writelow
},
4101 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
4102 .type
= ARM_CP_ALIAS
,
4103 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
4105 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
4106 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_BOTH
,
4107 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
4108 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
4109 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4110 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
4111 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
4112 { .name
= "HIFAR", .state
= ARM_CP_STATE_AA32
,
4113 .type
= ARM_CP_ALIAS
,
4114 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 2,
4116 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.far_el
[2]) },
4117 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
4118 .type
= ARM_CP_ALIAS
,
4119 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
4121 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
4122 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_BOTH
,
4123 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
4124 .access
= PL2_RW
, .writefn
= vbar_write
,
4125 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
4127 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
4128 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
4129 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
4130 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
4131 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4132 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
4133 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4134 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
4135 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4136 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
4137 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
4139 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
4140 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
4141 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
4142 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
4143 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
4144 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
4145 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4147 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4148 { .name
= "HAMAIR1", .state
= ARM_CP_STATE_AA32
,
4149 .cp
= 15, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
4150 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4152 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
4153 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
4154 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4156 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
4157 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
4158 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
4160 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4161 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
4163 /* no .writefn needed as this can't cause an ASID change;
4164 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4166 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
4167 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
4168 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4169 .type
= ARM_CP_ALIAS
,
4170 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4171 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4172 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
4173 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
4175 /* no .writefn needed as this can't cause an ASID change;
4176 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4178 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
4179 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
4180 .cp
= 15, .opc1
= 6, .crm
= 2,
4181 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4182 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4183 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
4184 .writefn
= vttbr_write
},
4185 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
4186 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
4187 .access
= PL2_RW
, .writefn
= vttbr_write
,
4188 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
4189 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
4190 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
4191 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
4192 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
4193 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
4194 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
4195 .access
= PL2_RW
, .resetvalue
= 0,
4196 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
4197 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
4198 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
4199 .access
= PL2_RW
, .resetvalue
= 0,
4200 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4201 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
4202 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
4203 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
4204 { .name
= "TLBIALLNSNH",
4205 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
4206 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4207 .writefn
= tlbiall_nsnh_write
},
4208 { .name
= "TLBIALLNSNHIS",
4209 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
4210 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4211 .writefn
= tlbiall_nsnh_is_write
},
4212 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4213 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4214 .writefn
= tlbiall_hyp_write
},
4215 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4216 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4217 .writefn
= tlbiall_hyp_is_write
},
4218 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4219 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4220 .writefn
= tlbimva_hyp_write
},
4221 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4222 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4223 .writefn
= tlbimva_hyp_is_write
},
4224 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
4225 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
4226 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4227 .writefn
= tlbi_aa64_alle2_write
},
4228 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
4229 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
4230 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4231 .writefn
= tlbi_aa64_vae2_write
},
4232 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
4233 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
4234 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4235 .writefn
= tlbi_aa64_vae2_write
},
4236 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
4237 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
4238 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4239 .writefn
= tlbi_aa64_alle2is_write
},
4240 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
4241 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
4242 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
4243 .writefn
= tlbi_aa64_vae2is_write
},
4244 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
4245 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
4246 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
4247 .writefn
= tlbi_aa64_vae2is_write
},
4248 #ifndef CONFIG_USER_ONLY
4249 /* Unlike the other EL2-related AT operations, these must
4250 * UNDEF from EL3 if EL2 is not implemented, which is why we
4251 * define them here rather than with the rest of the AT ops.
4253 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
4254 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4255 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4256 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4257 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
4258 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4259 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
4260 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
4261 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4262 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4263 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4264 * to behave as if SCR.NS was 1.
4266 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
4268 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4269 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
4271 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
4272 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4273 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
4274 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4275 * reset values as IMPDEF. We choose to reset to 3 to comply with
4276 * both ARMv7 and ARMv8.
4278 .access
= PL2_RW
, .resetvalue
= 3,
4279 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
4280 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
4281 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
4282 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
4283 .writefn
= gt_cntvoff_write
,
4284 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4285 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
4286 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
4287 .writefn
= gt_cntvoff_write
,
4288 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
4289 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
4290 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
4291 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4292 .type
= ARM_CP_IO
, .access
= PL2_RW
,
4293 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4294 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4295 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4296 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4297 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4298 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4299 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4300 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4301 .resetfn
= gt_hyp_timer_reset
,
4302 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4303 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4305 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4307 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4309 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4311 /* The only field of MDCR_EL2 that has a defined architectural reset value
4312 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4313 * don't impelment any PMU event counters, so using zero as a reset
4314 * value for MDCR_EL2 is okay
4316 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4317 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4318 .access
= PL2_RW
, .resetvalue
= 0,
4319 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4320 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4321 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4322 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4323 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4324 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4325 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4327 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4328 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4329 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4331 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4335 static const ARMCPRegInfo el2_v8_cp_reginfo
[] = {
4336 { .name
= "HCR2", .state
= ARM_CP_STATE_AA32
,
4337 .type
= ARM_CP_ALIAS
| ARM_CP_IO
,
4338 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 4,
4340 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.hcr_el2
),
4341 .writefn
= hcr_writehigh
},
4345 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4348 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4349 * At Secure EL1 it traps to EL3.
4351 if (arm_current_el(env
) == 3) {
4352 return CP_ACCESS_OK
;
4354 if (arm_is_secure_below_el3(env
)) {
4355 return CP_ACCESS_TRAP_EL3
;
4357 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4359 return CP_ACCESS_OK
;
4361 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4364 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4365 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4366 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4367 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4368 .resetvalue
= 0, .writefn
= scr_write
},
4369 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4370 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4371 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4372 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4373 .writefn
= scr_write
},
4374 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4375 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4376 .access
= PL3_RW
, .resetvalue
= 0,
4377 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4379 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4380 .access
= PL3_RW
, .resetvalue
= 0,
4381 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4382 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4383 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4384 .writefn
= vbar_write
, .resetvalue
= 0,
4385 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4386 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4387 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4388 .access
= PL3_RW
, .resetvalue
= 0,
4389 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4390 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4391 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4393 /* no .writefn needed as this can't cause an ASID change;
4394 * we must provide a .raw_writefn and .resetfn because we handle
4395 * reset and migration for the AArch32 TTBCR(S), which might be
4396 * using mask and base_mask.
4398 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4399 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4400 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4401 .type
= ARM_CP_ALIAS
,
4402 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4404 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4405 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4406 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4407 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4408 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4409 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4410 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4411 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4412 .type
= ARM_CP_ALIAS
,
4413 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4415 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4416 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4417 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4418 .access
= PL3_RW
, .writefn
= vbar_write
,
4419 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4421 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4422 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4423 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4424 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4425 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4426 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4427 .access
= PL3_RW
, .resetvalue
= 0,
4428 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4429 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4430 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4431 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4433 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4434 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4435 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4437 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4438 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4439 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4441 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4442 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4443 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4444 .writefn
= tlbi_aa64_alle3is_write
},
4445 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4446 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4447 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4448 .writefn
= tlbi_aa64_vae3is_write
},
4449 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4450 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4451 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4452 .writefn
= tlbi_aa64_vae3is_write
},
4453 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4454 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4455 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4456 .writefn
= tlbi_aa64_alle3_write
},
4457 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4458 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4459 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4460 .writefn
= tlbi_aa64_vae3_write
},
4461 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4462 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4463 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4464 .writefn
= tlbi_aa64_vae3_write
},
4468 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4471 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4472 * but the AArch32 CTR has its own reginfo struct)
4474 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4475 return CP_ACCESS_TRAP
;
4477 return CP_ACCESS_OK
;
4480 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4483 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4484 * read via a bit in OSLSR_EL1.
4488 if (ri
->state
== ARM_CP_STATE_AA32
) {
4489 oslock
= (value
== 0xC5ACCE55);
4494 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4497 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4498 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4499 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4500 * unlike DBGDRAR it is never accessible from EL0.
4501 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4504 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4505 .access
= PL0_R
, .accessfn
= access_tdra
,
4506 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4507 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4508 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4509 .access
= PL1_R
, .accessfn
= access_tdra
,
4510 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4511 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4512 .access
= PL0_R
, .accessfn
= access_tdra
,
4513 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4514 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4515 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4516 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4517 .access
= PL1_RW
, .accessfn
= access_tda
,
4518 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4520 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4521 * We don't implement the configurable EL0 access.
4523 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4524 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4525 .type
= ARM_CP_ALIAS
,
4526 .access
= PL1_R
, .accessfn
= access_tda
,
4527 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4528 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4529 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4530 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4531 .accessfn
= access_tdosa
,
4532 .writefn
= oslar_write
},
4533 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4534 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4535 .access
= PL1_R
, .resetvalue
= 10,
4536 .accessfn
= access_tdosa
,
4537 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4538 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4539 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4540 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4541 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4542 .type
= ARM_CP_NOP
},
4543 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4544 * implement vector catch debug events yet.
4547 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4548 .access
= PL1_RW
, .accessfn
= access_tda
,
4549 .type
= ARM_CP_NOP
},
4550 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4551 * to save and restore a 32-bit guest's DBGVCR)
4553 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
4554 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
4555 .access
= PL2_RW
, .accessfn
= access_tda
,
4556 .type
= ARM_CP_NOP
},
4557 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4558 * Channel but Linux may try to access this register. The 32-bit
4559 * alias is DBGDCCINT.
4561 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
4562 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4563 .access
= PL1_RW
, .accessfn
= access_tda
,
4564 .type
= ARM_CP_NOP
},
4568 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
4569 /* 64 bit access versions of the (dummy) debug registers */
4570 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
4571 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4572 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
4573 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4577 /* Return the exception level to which exceptions should be taken
4578 * via SVEAccessTrap. If an exception should be routed through
4579 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
4580 * take care of raising that exception.
4581 * C.f. the ARM pseudocode function CheckSVEEnabled.
4583 int sve_exception_el(CPUARMState
*env
, int el
)
4585 #ifndef CONFIG_USER_ONLY
4587 bool disabled
= false;
4589 /* The CPACR.ZEN controls traps to EL1:
4590 * 0, 2 : trap EL0 and EL1 accesses
4591 * 1 : trap only EL0 accesses
4592 * 3 : trap no accesses
4594 if (!extract32(env
->cp15
.cpacr_el1
, 16, 1)) {
4596 } else if (!extract32(env
->cp15
.cpacr_el1
, 17, 1)) {
4601 return (arm_feature(env
, ARM_FEATURE_EL2
)
4602 && (arm_hcr_el2_eff(env
) & HCR_TGE
) ? 2 : 1);
4605 /* Check CPACR.FPEN. */
4606 if (!extract32(env
->cp15
.cpacr_el1
, 20, 1)) {
4608 } else if (!extract32(env
->cp15
.cpacr_el1
, 21, 1)) {
4616 /* CPTR_EL2. Since TZ and TFP are positive,
4617 * they will be zero when EL2 is not present.
4619 if (el
<= 2 && !arm_is_secure_below_el3(env
)) {
4620 if (env
->cp15
.cptr_el
[2] & CPTR_TZ
) {
4623 if (env
->cp15
.cptr_el
[2] & CPTR_TFP
) {
4628 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
4629 if (arm_feature(env
, ARM_FEATURE_EL3
)
4630 && !(env
->cp15
.cptr_el
[3] & CPTR_EZ
)) {
4638 * Given that SVE is enabled, return the vector length for EL.
4640 uint32_t sve_zcr_len_for_el(CPUARMState
*env
, int el
)
4642 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4643 uint32_t zcr_len
= cpu
->sve_max_vq
- 1;
4646 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[1]);
4648 if (el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
4649 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
4651 if (el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
4652 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
4657 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4660 int cur_el
= arm_current_el(env
);
4661 int old_len
= sve_zcr_len_for_el(env
, cur_el
);
4664 /* Bits other than [3:0] are RAZ/WI. */
4665 raw_write(env
, ri
, value
& 0xf);
4668 * Because we arrived here, we know both FP and SVE are enabled;
4669 * otherwise we would have trapped access to the ZCR_ELn register.
4671 new_len
= sve_zcr_len_for_el(env
, cur_el
);
4672 if (new_len
< old_len
) {
4673 aarch64_sve_narrow_vq(env
, new_len
+ 1);
4677 static const ARMCPRegInfo zcr_el1_reginfo
= {
4678 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
4679 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
4680 .access
= PL1_RW
, .type
= ARM_CP_SVE
,
4681 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
4682 .writefn
= zcr_write
, .raw_writefn
= raw_write
4685 static const ARMCPRegInfo zcr_el2_reginfo
= {
4686 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4687 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4688 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
4689 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
4690 .writefn
= zcr_write
, .raw_writefn
= raw_write
4693 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
4694 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4695 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4696 .access
= PL2_RW
, .type
= ARM_CP_SVE
,
4697 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
4700 static const ARMCPRegInfo zcr_el3_reginfo
= {
4701 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
4702 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
4703 .access
= PL3_RW
, .type
= ARM_CP_SVE
,
4704 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
4705 .writefn
= zcr_write
, .raw_writefn
= raw_write
4708 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
4710 CPUARMState
*env
= &cpu
->env
;
4712 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
4713 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
4715 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
4717 if (env
->cpu_watchpoint
[n
]) {
4718 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
4719 env
->cpu_watchpoint
[n
] = NULL
;
4722 if (!extract64(wcr
, 0, 1)) {
4723 /* E bit clear : watchpoint disabled */
4727 switch (extract64(wcr
, 3, 2)) {
4729 /* LSC 00 is reserved and must behave as if the wp is disabled */
4732 flags
|= BP_MEM_READ
;
4735 flags
|= BP_MEM_WRITE
;
4738 flags
|= BP_MEM_ACCESS
;
4742 /* Attempts to use both MASK and BAS fields simultaneously are
4743 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4744 * thus generating a watchpoint for every byte in the masked region.
4746 mask
= extract64(wcr
, 24, 4);
4747 if (mask
== 1 || mask
== 2) {
4748 /* Reserved values of MASK; we must act as if the mask value was
4749 * some non-reserved value, or as if the watchpoint were disabled.
4750 * We choose the latter.
4754 /* Watchpoint covers an aligned area up to 2GB in size */
4756 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4757 * whether the watchpoint fires when the unmasked bits match; we opt
4758 * to generate the exceptions.
4762 /* Watchpoint covers bytes defined by the byte address select bits */
4763 int bas
= extract64(wcr
, 5, 8);
4767 /* This must act as if the watchpoint is disabled */
4771 if (extract64(wvr
, 2, 1)) {
4772 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4773 * ignored, and BAS[3:0] define which bytes to watch.
4777 /* The BAS bits are supposed to be programmed to indicate a contiguous
4778 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4779 * we fire for each byte in the word/doubleword addressed by the WVR.
4780 * We choose to ignore any non-zero bits after the first range of 1s.
4782 basstart
= ctz32(bas
);
4783 len
= cto32(bas
>> basstart
);
4787 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
4788 &env
->cpu_watchpoint
[n
]);
4791 void hw_watchpoint_update_all(ARMCPU
*cpu
)
4794 CPUARMState
*env
= &cpu
->env
;
4796 /* Completely clear out existing QEMU watchpoints and our array, to
4797 * avoid possible stale entries following migration load.
4799 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
4800 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
4802 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
4803 hw_watchpoint_update(cpu
, i
);
4807 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4810 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4813 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4814 * register reads and behaves as if values written are sign extended.
4815 * Bits [1:0] are RES0.
4817 value
= sextract64(value
, 0, 49) & ~3ULL;
4819 raw_write(env
, ri
, value
);
4820 hw_watchpoint_update(cpu
, i
);
4823 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4826 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4829 raw_write(env
, ri
, value
);
4830 hw_watchpoint_update(cpu
, i
);
4833 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
4835 CPUARMState
*env
= &cpu
->env
;
4836 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
4837 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
4842 if (env
->cpu_breakpoint
[n
]) {
4843 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
4844 env
->cpu_breakpoint
[n
] = NULL
;
4847 if (!extract64(bcr
, 0, 1)) {
4848 /* E bit clear : watchpoint disabled */
4852 bt
= extract64(bcr
, 20, 4);
4855 case 4: /* unlinked address mismatch (reserved if AArch64) */
4856 case 5: /* linked address mismatch (reserved if AArch64) */
4857 qemu_log_mask(LOG_UNIMP
,
4858 "arm: address mismatch breakpoint types not implemented\n");
4860 case 0: /* unlinked address match */
4861 case 1: /* linked address match */
4863 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4864 * we behave as if the register was sign extended. Bits [1:0] are
4865 * RES0. The BAS field is used to allow setting breakpoints on 16
4866 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4867 * a bp will fire if the addresses covered by the bp and the addresses
4868 * covered by the insn overlap but the insn doesn't start at the
4869 * start of the bp address range. We choose to require the insn and
4870 * the bp to have the same address. The constraints on writing to
4871 * BAS enforced in dbgbcr_write mean we have only four cases:
4872 * 0b0000 => no breakpoint
4873 * 0b0011 => breakpoint on addr
4874 * 0b1100 => breakpoint on addr + 2
4875 * 0b1111 => breakpoint on addr
4876 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4878 int bas
= extract64(bcr
, 5, 4);
4879 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
4888 case 2: /* unlinked context ID match */
4889 case 8: /* unlinked VMID match (reserved if no EL2) */
4890 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4891 qemu_log_mask(LOG_UNIMP
,
4892 "arm: unlinked context breakpoint types not implemented\n");
4894 case 9: /* linked VMID match (reserved if no EL2) */
4895 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4896 case 3: /* linked context ID match */
4898 /* We must generate no events for Linked context matches (unless
4899 * they are linked to by some other bp/wp, which is handled in
4900 * updates for the linking bp/wp). We choose to also generate no events
4901 * for reserved values.
4906 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
4909 void hw_breakpoint_update_all(ARMCPU
*cpu
)
4912 CPUARMState
*env
= &cpu
->env
;
4914 /* Completely clear out existing QEMU breakpoints and our array, to
4915 * avoid possible stale entries following migration load.
4917 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
4918 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
4920 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
4921 hw_breakpoint_update(cpu
, i
);
4925 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4928 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4931 raw_write(env
, ri
, value
);
4932 hw_breakpoint_update(cpu
, i
);
4935 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4938 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4941 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4944 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
4945 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
4947 raw_write(env
, ri
, value
);
4948 hw_breakpoint_update(cpu
, i
);
4951 static void define_debug_regs(ARMCPU
*cpu
)
4953 /* Define v7 and v8 architectural debug registers.
4954 * These are just dummy implementations for now.
4957 int wrps
, brps
, ctx_cmps
;
4958 ARMCPRegInfo dbgdidr
= {
4959 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
4960 .access
= PL0_R
, .accessfn
= access_tda
,
4961 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
4964 /* Note that all these register fields hold "number of Xs minus 1". */
4965 brps
= extract32(cpu
->dbgdidr
, 24, 4);
4966 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
4967 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
4969 assert(ctx_cmps
<= brps
);
4971 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4972 * of the debug registers such as number of breakpoints;
4973 * check that if they both exist then they agree.
4975 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
4976 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
4977 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
4978 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
4981 define_one_arm_cp_reg(cpu
, &dbgdidr
);
4982 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
4984 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
4985 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
4988 for (i
= 0; i
< brps
+ 1; i
++) {
4989 ARMCPRegInfo dbgregs
[] = {
4990 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
4991 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
4992 .access
= PL1_RW
, .accessfn
= access_tda
,
4993 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
4994 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
4996 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
4997 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
4998 .access
= PL1_RW
, .accessfn
= access_tda
,
4999 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
5000 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
5004 define_arm_cp_regs(cpu
, dbgregs
);
5007 for (i
= 0; i
< wrps
+ 1; i
++) {
5008 ARMCPRegInfo dbgregs
[] = {
5009 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
5010 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
5011 .access
= PL1_RW
, .accessfn
= access_tda
,
5012 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
5013 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
5015 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
5016 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
5017 .access
= PL1_RW
, .accessfn
= access_tda
,
5018 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
5019 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
5023 define_arm_cp_regs(cpu
, dbgregs
);
5027 /* We don't know until after realize whether there's a GICv3
5028 * attached, and that is what registers the gicv3 sysregs.
5029 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5032 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5034 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5035 uint64_t pfr1
= cpu
->id_pfr1
;
5037 if (env
->gicv3state
) {
5043 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5045 ARMCPU
*cpu
= arm_env_get_cpu(env
);
5046 uint64_t pfr0
= cpu
->isar
.id_aa64pfr0
;
5048 if (env
->gicv3state
) {
5054 /* Shared logic between LORID and the rest of the LOR* registers.
5055 * Secure state has already been delt with.
5057 static CPAccessResult
access_lor_ns(CPUARMState
*env
)
5059 int el
= arm_current_el(env
);
5061 if (el
< 2 && (arm_hcr_el2_eff(env
) & HCR_TLOR
)) {
5062 return CP_ACCESS_TRAP_EL2
;
5064 if (el
< 3 && (env
->cp15
.scr_el3
& SCR_TLOR
)) {
5065 return CP_ACCESS_TRAP_EL3
;
5067 return CP_ACCESS_OK
;
5070 static CPAccessResult
access_lorid(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5073 if (arm_is_secure_below_el3(env
)) {
5074 /* Access ok in secure mode. */
5075 return CP_ACCESS_OK
;
5077 return access_lor_ns(env
);
5080 static CPAccessResult
access_lor_other(CPUARMState
*env
,
5081 const ARMCPRegInfo
*ri
, bool isread
)
5083 if (arm_is_secure_below_el3(env
)) {
5084 /* Access denied in secure mode. */
5085 return CP_ACCESS_TRAP
;
5087 return access_lor_ns(env
);
5090 #ifdef TARGET_AARCH64
5091 static CPAccessResult
access_pauth(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5094 int el
= arm_current_el(env
);
5097 arm_feature(env
, ARM_FEATURE_EL2
) &&
5098 !(arm_hcr_el2_eff(env
) & HCR_APK
)) {
5099 return CP_ACCESS_TRAP_EL2
;
5102 arm_feature(env
, ARM_FEATURE_EL3
) &&
5103 !(env
->cp15
.scr_el3
& SCR_APK
)) {
5104 return CP_ACCESS_TRAP_EL3
;
5106 return CP_ACCESS_OK
;
5109 static const ARMCPRegInfo pauth_reginfo
[] = {
5110 { .name
= "APDAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5111 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 0,
5112 .access
= PL1_RW
, .accessfn
= access_pauth
,
5113 .fieldoffset
= offsetof(CPUARMState
, apda_key
.lo
) },
5114 { .name
= "APDAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5115 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 1,
5116 .access
= PL1_RW
, .accessfn
= access_pauth
,
5117 .fieldoffset
= offsetof(CPUARMState
, apda_key
.hi
) },
5118 { .name
= "APDBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5119 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 2,
5120 .access
= PL1_RW
, .accessfn
= access_pauth
,
5121 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.lo
) },
5122 { .name
= "APDBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5123 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 2, .opc2
= 3,
5124 .access
= PL1_RW
, .accessfn
= access_pauth
,
5125 .fieldoffset
= offsetof(CPUARMState
, apdb_key
.hi
) },
5126 { .name
= "APGAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5127 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 0,
5128 .access
= PL1_RW
, .accessfn
= access_pauth
,
5129 .fieldoffset
= offsetof(CPUARMState
, apga_key
.lo
) },
5130 { .name
= "APGAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5131 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 3, .opc2
= 1,
5132 .access
= PL1_RW
, .accessfn
= access_pauth
,
5133 .fieldoffset
= offsetof(CPUARMState
, apga_key
.hi
) },
5134 { .name
= "APIAKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5135 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 0,
5136 .access
= PL1_RW
, .accessfn
= access_pauth
,
5137 .fieldoffset
= offsetof(CPUARMState
, apia_key
.lo
) },
5138 { .name
= "APIAKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5139 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 1,
5140 .access
= PL1_RW
, .accessfn
= access_pauth
,
5141 .fieldoffset
= offsetof(CPUARMState
, apia_key
.hi
) },
5142 { .name
= "APIBKEYLO_EL1", .state
= ARM_CP_STATE_AA64
,
5143 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 2,
5144 .access
= PL1_RW
, .accessfn
= access_pauth
,
5145 .fieldoffset
= offsetof(CPUARMState
, apib_key
.lo
) },
5146 { .name
= "APIBKEYHI_EL1", .state
= ARM_CP_STATE_AA64
,
5147 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 1, .opc2
= 3,
5148 .access
= PL1_RW
, .accessfn
= access_pauth
,
5149 .fieldoffset
= offsetof(CPUARMState
, apib_key
.hi
) },
5154 void register_cp_regs_for_features(ARMCPU
*cpu
)
5156 /* Register all the coprocessor registers based on feature bits */
5157 CPUARMState
*env
= &cpu
->env
;
5158 if (arm_feature(env
, ARM_FEATURE_M
)) {
5159 /* M profile has no coprocessor registers */
5163 define_arm_cp_regs(cpu
, cp_reginfo
);
5164 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
5165 /* Must go early as it is full of wildcards that may be
5166 * overridden by later definitions.
5168 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
5171 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5172 /* The ID registers all have impdef reset values */
5173 ARMCPRegInfo v6_idregs
[] = {
5174 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
5175 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
5176 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5177 .resetvalue
= cpu
->id_pfr0
},
5178 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5179 * the value of the GIC field until after we define these regs.
5181 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
5182 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
5183 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5184 .readfn
= id_pfr1_read
,
5185 .writefn
= arm_cp_write_ignore
},
5186 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
5187 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
5188 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5189 .resetvalue
= cpu
->id_dfr0
},
5190 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
5191 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
5192 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5193 .resetvalue
= cpu
->id_afr0
},
5194 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
5195 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
5196 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5197 .resetvalue
= cpu
->id_mmfr0
},
5198 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
5199 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
5200 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5201 .resetvalue
= cpu
->id_mmfr1
},
5202 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
5203 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
5204 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5205 .resetvalue
= cpu
->id_mmfr2
},
5206 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
5207 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
5208 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5209 .resetvalue
= cpu
->id_mmfr3
},
5210 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
5211 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
5212 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5213 .resetvalue
= cpu
->isar
.id_isar0
},
5214 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
5215 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
5216 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5217 .resetvalue
= cpu
->isar
.id_isar1
},
5218 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
5219 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
5220 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5221 .resetvalue
= cpu
->isar
.id_isar2
},
5222 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
5223 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
5224 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5225 .resetvalue
= cpu
->isar
.id_isar3
},
5226 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
5227 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
5228 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5229 .resetvalue
= cpu
->isar
.id_isar4
},
5230 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
5231 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
5232 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5233 .resetvalue
= cpu
->isar
.id_isar5
},
5234 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
5235 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
5236 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5237 .resetvalue
= cpu
->id_mmfr4
},
5238 { .name
= "ID_ISAR6", .state
= ARM_CP_STATE_BOTH
,
5239 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
5240 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5241 .resetvalue
= cpu
->isar
.id_isar6
},
5244 define_arm_cp_regs(cpu
, v6_idregs
);
5245 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
5247 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
5249 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
5250 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
5252 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
5253 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
5254 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
5256 if (arm_feature(env
, ARM_FEATURE_V7
)) {
5257 /* v7 performance monitor control register: same implementor
5258 * field as main ID register, and we implement only the cycle
5261 #ifndef CONFIG_USER_ONLY
5262 ARMCPRegInfo pmcr
= {
5263 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
5265 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
5266 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
5267 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
5268 .raw_writefn
= raw_write
,
5270 ARMCPRegInfo pmcr64
= {
5271 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
5272 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
5273 .access
= PL0_RW
, .accessfn
= pmreg_access
,
5275 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
5276 .resetvalue
= cpu
->midr
& 0xff000000,
5277 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
5279 define_one_arm_cp_reg(cpu
, &pmcr
);
5280 define_one_arm_cp_reg(cpu
, &pmcr64
);
5282 ARMCPRegInfo clidr
= {
5283 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
5284 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
5285 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
5287 define_one_arm_cp_reg(cpu
, &clidr
);
5288 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
5289 define_debug_regs(cpu
);
5291 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
5293 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5294 /* AArch64 ID registers, which all have impdef reset values.
5295 * Note that within the ID register ranges the unused slots
5296 * must all RAZ, not UNDEF; future architecture versions may
5297 * define new registers here.
5299 ARMCPRegInfo v8_idregs
[] = {
5300 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5301 * know the right value for the GIC field until after we
5302 * define these regs.
5304 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5305 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
5306 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
5307 .readfn
= id_aa64pfr0_read
,
5308 .writefn
= arm_cp_write_ignore
},
5309 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5310 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
5311 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5312 .resetvalue
= cpu
->isar
.id_aa64pfr1
},
5313 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5314 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
5315 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5317 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5318 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
5319 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5321 { .name
= "ID_AA64ZFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5322 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
5323 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5324 /* At present, only SVEver == 0 is defined anyway. */
5326 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5327 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
5328 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5330 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5331 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
5332 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5334 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5335 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
5336 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5338 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5339 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
5340 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5341 .resetvalue
= cpu
->id_aa64dfr0
},
5342 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5343 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
5344 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5345 .resetvalue
= cpu
->id_aa64dfr1
},
5346 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5347 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
5348 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5350 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5351 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
5352 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5354 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5355 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
5356 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5357 .resetvalue
= cpu
->id_aa64afr0
},
5358 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5359 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
5360 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5361 .resetvalue
= cpu
->id_aa64afr1
},
5362 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5363 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
5364 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5366 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5367 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
5368 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5370 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
5371 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
5372 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5373 .resetvalue
= cpu
->isar
.id_aa64isar0
},
5374 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
5375 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
5376 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5377 .resetvalue
= cpu
->isar
.id_aa64isar1
},
5378 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5379 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
5380 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5382 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5383 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
5384 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5386 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5387 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
5388 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5390 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5391 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
5392 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5394 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5395 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
5396 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5398 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5399 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
5400 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5402 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5403 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
5404 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5405 .resetvalue
= cpu
->isar
.id_aa64mmfr0
},
5406 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5407 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
5408 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5409 .resetvalue
= cpu
->isar
.id_aa64mmfr1
},
5410 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5411 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
5412 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5414 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5415 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
5416 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5418 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5419 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
5420 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5422 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5423 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
5424 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5426 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5427 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
5428 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5430 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5431 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
5432 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5434 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5435 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
5436 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5437 .resetvalue
= cpu
->isar
.mvfr0
},
5438 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5439 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
5440 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5441 .resetvalue
= cpu
->isar
.mvfr1
},
5442 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
5443 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
5444 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5445 .resetvalue
= cpu
->isar
.mvfr2
},
5446 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5447 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
5448 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5450 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5451 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
5452 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5454 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5455 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
5456 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5458 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5459 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
5460 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5462 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5463 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
5464 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5466 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
5467 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
5468 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5469 .resetvalue
= cpu
->pmceid0
},
5470 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
5471 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
5472 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5473 .resetvalue
= cpu
->pmceid0
},
5474 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
5475 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
5476 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5477 .resetvalue
= cpu
->pmceid1
},
5478 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
5479 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
5480 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5481 .resetvalue
= cpu
->pmceid1
},
5484 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
5485 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
5486 !arm_feature(env
, ARM_FEATURE_EL2
)) {
5487 ARMCPRegInfo rvbar
= {
5488 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5489 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5490 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
5492 define_one_arm_cp_reg(cpu
, &rvbar
);
5494 define_arm_cp_regs(cpu
, v8_idregs
);
5495 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
5497 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5498 uint64_t vmpidr_def
= mpidr_read_val(env
);
5499 ARMCPRegInfo vpidr_regs
[] = {
5500 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
5501 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5502 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5503 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
5504 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
5505 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5506 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5507 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
5508 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5509 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
5510 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5511 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5512 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
5513 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
5514 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5515 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5517 .resetvalue
= vmpidr_def
,
5518 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
5521 define_arm_cp_regs(cpu
, vpidr_regs
);
5522 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
5523 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5524 define_arm_cp_regs(cpu
, el2_v8_cp_reginfo
);
5526 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
5527 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
5528 ARMCPRegInfo rvbar
= {
5529 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
5530 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
5531 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
5533 define_one_arm_cp_reg(cpu
, &rvbar
);
5536 /* If EL2 is missing but higher ELs are enabled, we need to
5537 * register the no_el2 reginfos.
5539 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5540 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
5541 * of MIDR_EL1 and MPIDR_EL1.
5543 ARMCPRegInfo vpidr_regs
[] = {
5544 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5545 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5546 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5547 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
5548 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5549 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5550 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5551 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5552 .type
= ARM_CP_NO_RAW
,
5553 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
5556 define_arm_cp_regs(cpu
, vpidr_regs
);
5557 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
5558 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5559 define_arm_cp_regs(cpu
, el3_no_el2_v8_cp_reginfo
);
5563 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5564 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
5565 ARMCPRegInfo el3_regs
[] = {
5566 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5567 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
5568 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
5569 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5570 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
5572 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5573 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
5574 .resetvalue
= cpu
->reset_sctlr
},
5578 define_arm_cp_regs(cpu
, el3_regs
);
5580 /* The behaviour of NSACR is sufficiently various that we don't
5581 * try to describe it in a single reginfo:
5582 * if EL3 is 64 bit, then trap to EL3 from S EL1,
5583 * reads as constant 0xc00 from NS EL1 and NS EL2
5584 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5585 * if v7 without EL3, register doesn't exist
5586 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5588 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5589 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5590 ARMCPRegInfo nsacr
= {
5591 .name
= "NSACR", .type
= ARM_CP_CONST
,
5592 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5593 .access
= PL1_RW
, .accessfn
= nsacr_access
,
5596 define_one_arm_cp_reg(cpu
, &nsacr
);
5598 ARMCPRegInfo nsacr
= {
5600 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5601 .access
= PL3_RW
| PL1_R
,
5603 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
5605 define_one_arm_cp_reg(cpu
, &nsacr
);
5608 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5609 ARMCPRegInfo nsacr
= {
5610 .name
= "NSACR", .type
= ARM_CP_CONST
,
5611 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5615 define_one_arm_cp_reg(cpu
, &nsacr
);
5619 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
5620 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5621 /* PMSAv6 not implemented */
5622 assert(arm_feature(env
, ARM_FEATURE_V7
));
5623 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5624 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
5626 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
5629 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5630 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
5631 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */
5632 if (FIELD_EX32(cpu
->id_mmfr4
, ID_MMFR4
, HPDS
) != 0) {
5633 define_one_arm_cp_reg(cpu
, &ttbcr2_reginfo
);
5636 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5637 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
5639 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
5640 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
5642 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
5643 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
5645 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
5646 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
5648 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
5649 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
5651 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
5652 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
5654 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
5655 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
5657 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5658 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
5660 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5661 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
5663 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
5664 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
5666 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
5667 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
5669 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5670 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5671 * be read-only (ie write causes UNDEF exception).
5674 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
5675 /* Pre-v8 MIDR space.
5676 * Note that the MIDR isn't a simple constant register because
5677 * of the TI925 behaviour where writes to another register can
5678 * cause the MIDR value to change.
5680 * Unimplemented registers in the c15 0 0 0 space default to
5681 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5682 * and friends override accordingly.
5685 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
5686 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
5687 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
5688 .readfn
= midr_read
,
5689 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5690 .type
= ARM_CP_OVERRIDE
},
5691 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5693 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
5694 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5696 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
5697 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5699 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
5700 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5702 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
5703 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5705 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
5706 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5709 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
5710 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5711 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
5712 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
5713 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5714 .readfn
= midr_read
},
5715 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5716 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5717 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5718 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5719 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5720 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
5721 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5722 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5723 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
5724 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
5727 ARMCPRegInfo id_cp_reginfo
[] = {
5728 /* These are common to v8 and pre-v8 */
5730 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
5731 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5732 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
5733 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
5734 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
5735 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5736 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5738 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
5739 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5742 /* TLBTR is specific to VMSA */
5743 ARMCPRegInfo id_tlbtr_reginfo
= {
5745 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
5746 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
5748 /* MPUIR is specific to PMSA V6+ */
5749 ARMCPRegInfo id_mpuir_reginfo
= {
5751 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5752 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5753 .resetvalue
= cpu
->pmsav7_dregion
<< 8
5755 ARMCPRegInfo crn0_wi_reginfo
= {
5756 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
5757 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
5758 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
5760 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
5761 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5763 /* Register the blanket "writes ignored" value first to cover the
5764 * whole space. Then update the specific ID registers to allow write
5765 * access, so that they ignore writes rather than causing them to
5768 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
5769 for (r
= id_pre_v8_midr_cp_reginfo
;
5770 r
->type
!= ARM_CP_SENTINEL
; r
++) {
5773 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5776 id_mpuir_reginfo
.access
= PL1_RW
;
5777 id_tlbtr_reginfo
.access
= PL1_RW
;
5779 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5780 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
5782 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
5784 define_arm_cp_regs(cpu
, id_cp_reginfo
);
5785 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
5786 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
5787 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
5788 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
5792 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
5793 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
5796 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
5797 ARMCPRegInfo auxcr_reginfo
[] = {
5798 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5799 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
5800 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
5801 .resetvalue
= cpu
->reset_auxcr
},
5802 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5803 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
5804 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5806 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5807 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
5808 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5812 define_arm_cp_regs(cpu
, auxcr_reginfo
);
5813 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5814 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
5815 ARMCPRegInfo hactlr2_reginfo
= {
5816 .name
= "HACTLR2", .state
= ARM_CP_STATE_AA32
,
5817 .cp
= 15, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 3,
5818 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5821 define_one_arm_cp_reg(cpu
, &hactlr2_reginfo
);
5825 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
5826 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5827 /* 32 bit view is [31:18] 0...0 [43:32]. */
5828 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
5829 | extract64(cpu
->reset_cbar
, 32, 12);
5830 ARMCPRegInfo cbar_reginfo
[] = {
5832 .type
= ARM_CP_CONST
,
5833 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5834 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
5835 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5836 .type
= ARM_CP_CONST
,
5837 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
5838 .access
= PL1_R
, .resetvalue
= cbar32
},
5841 /* We don't implement a r/w 64 bit CBAR currently */
5842 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
5843 define_arm_cp_regs(cpu
, cbar_reginfo
);
5845 ARMCPRegInfo cbar
= {
5847 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5848 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
5849 .fieldoffset
= offsetof(CPUARMState
,
5850 cp15
.c15_config_base_address
)
5852 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
5853 cbar
.access
= PL1_R
;
5854 cbar
.fieldoffset
= 0;
5855 cbar
.type
= ARM_CP_CONST
;
5857 define_one_arm_cp_reg(cpu
, &cbar
);
5861 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
5862 ARMCPRegInfo vbar_cp_reginfo
[] = {
5863 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
5864 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
5865 .access
= PL1_RW
, .writefn
= vbar_write
,
5866 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
5867 offsetof(CPUARMState
, cp15
.vbar_ns
) },
5871 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
5874 /* Generic registers whose values depend on the implementation */
5876 ARMCPRegInfo sctlr
= {
5877 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
5878 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5880 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
5881 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
5882 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
5883 .raw_writefn
= raw_write
,
5885 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5886 /* Normally we would always end the TB on an SCTLR write, but Linux
5887 * arch/arm/mach-pxa/sleep.S expects two instructions following
5888 * an MMU enable to execute from cache. Imitate this behaviour.
5890 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
5892 define_one_arm_cp_reg(cpu
, &sctlr
);
5895 if (cpu_isar_feature(aa64_lor
, cpu
)) {
5897 * A trivial implementation of ARMv8.1-LOR leaves all of these
5898 * registers fixed at 0, which indicates that there are zero
5899 * supported Limited Ordering regions.
5901 static const ARMCPRegInfo lor_reginfo
[] = {
5902 { .name
= "LORSA_EL1", .state
= ARM_CP_STATE_AA64
,
5903 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 0,
5904 .access
= PL1_RW
, .accessfn
= access_lor_other
,
5905 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5906 { .name
= "LOREA_EL1", .state
= ARM_CP_STATE_AA64
,
5907 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 1,
5908 .access
= PL1_RW
, .accessfn
= access_lor_other
,
5909 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5910 { .name
= "LORN_EL1", .state
= ARM_CP_STATE_AA64
,
5911 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 2,
5912 .access
= PL1_RW
, .accessfn
= access_lor_other
,
5913 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5914 { .name
= "LORC_EL1", .state
= ARM_CP_STATE_AA64
,
5915 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 3,
5916 .access
= PL1_RW
, .accessfn
= access_lor_other
,
5917 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5918 { .name
= "LORID_EL1", .state
= ARM_CP_STATE_AA64
,
5919 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 4, .opc2
= 7,
5920 .access
= PL1_R
, .accessfn
= access_lorid
,
5921 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5924 define_arm_cp_regs(cpu
, lor_reginfo
);
5927 if (cpu_isar_feature(aa64_sve
, cpu
)) {
5928 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
5929 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5930 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
5932 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
5934 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5935 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
5939 #ifdef TARGET_AARCH64
5940 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
5941 define_arm_cp_regs(cpu
, pauth_reginfo
);
5946 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
5948 CPUState
*cs
= CPU(cpu
);
5949 CPUARMState
*env
= &cpu
->env
;
5951 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5952 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
5953 aarch64_fpu_gdb_set_reg
,
5954 34, "aarch64-fpu.xml", 0);
5955 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
5956 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5957 51, "arm-neon.xml", 0);
5958 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
5959 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5960 35, "arm-vfp3.xml", 0);
5961 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
5962 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5963 19, "arm-vfp.xml", 0);
5965 gdb_register_coprocessor(cs
, arm_gdb_get_sysreg
, arm_gdb_set_sysreg
,
5966 arm_gen_dynamic_xml(cs
),
5967 "system-registers.xml", 0);
5970 /* Sort alphabetically by type name, except for "any". */
5971 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
5973 ObjectClass
*class_a
= (ObjectClass
*)a
;
5974 ObjectClass
*class_b
= (ObjectClass
*)b
;
5975 const char *name_a
, *name_b
;
5977 name_a
= object_class_get_name(class_a
);
5978 name_b
= object_class_get_name(class_b
);
5979 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
5981 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
5984 return strcmp(name_a
, name_b
);
5988 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
5990 ObjectClass
*oc
= data
;
5991 CPUListState
*s
= user_data
;
5992 const char *typename
;
5995 typename
= object_class_get_name(oc
);
5996 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5997 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
6002 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
6006 .cpu_fprintf
= cpu_fprintf
,
6010 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6011 list
= g_slist_sort(list
, arm_cpu_list_compare
);
6012 (*cpu_fprintf
)(f
, "Available CPUs:\n");
6013 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
6017 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
6019 ObjectClass
*oc
= data
;
6020 CpuDefinitionInfoList
**cpu_list
= user_data
;
6021 CpuDefinitionInfoList
*entry
;
6022 CpuDefinitionInfo
*info
;
6023 const char *typename
;
6025 typename
= object_class_get_name(oc
);
6026 info
= g_malloc0(sizeof(*info
));
6027 info
->name
= g_strndup(typename
,
6028 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
6029 info
->q_typename
= g_strdup(typename
);
6031 entry
= g_malloc0(sizeof(*entry
));
6032 entry
->value
= info
;
6033 entry
->next
= *cpu_list
;
6037 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
6039 CpuDefinitionInfoList
*cpu_list
= NULL
;
6042 list
= object_class_get_list(TYPE_ARM_CPU
, false);
6043 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
6049 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
6050 void *opaque
, int state
, int secstate
,
6051 int crm
, int opc1
, int opc2
,
6054 /* Private utility function for define_one_arm_cp_reg_with_opaque():
6055 * add a single reginfo struct to the hash table.
6057 uint32_t *key
= g_new(uint32_t, 1);
6058 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
6059 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
6060 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
6062 r2
->name
= g_strdup(name
);
6063 /* Reset the secure state to the specific incoming state. This is
6064 * necessary as the register may have been defined with both states.
6066 r2
->secure
= secstate
;
6068 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6069 /* Register is banked (using both entries in array).
6070 * Overwriting fieldoffset as the array is only used to define
6071 * banked registers but later only fieldoffset is used.
6073 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
6076 if (state
== ARM_CP_STATE_AA32
) {
6077 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
6078 /* If the register is banked then we don't need to migrate or
6079 * reset the 32-bit instance in certain cases:
6081 * 1) If the register has both 32-bit and 64-bit instances then we
6082 * can count on the 64-bit instance taking care of the
6084 * 2) If ARMv8 is enabled then we can count on a 64-bit version
6085 * taking care of the secure bank. This requires that separate
6086 * 32 and 64-bit definitions are provided.
6088 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
6089 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
6090 r2
->type
|= ARM_CP_ALIAS
;
6092 } else if ((secstate
!= r
->secure
) && !ns
) {
6093 /* The register is not banked so we only want to allow migration of
6094 * the non-secure instance.
6096 r2
->type
|= ARM_CP_ALIAS
;
6099 if (r
->state
== ARM_CP_STATE_BOTH
) {
6100 /* We assume it is a cp15 register if the .cp field is left unset.
6106 #ifdef HOST_WORDS_BIGENDIAN
6107 if (r2
->fieldoffset
) {
6108 r2
->fieldoffset
+= sizeof(uint32_t);
6113 if (state
== ARM_CP_STATE_AA64
) {
6114 /* To allow abbreviation of ARMCPRegInfo
6115 * definitions, we treat cp == 0 as equivalent to
6116 * the value for "standard guest-visible sysreg".
6117 * STATE_BOTH definitions are also always "standard
6118 * sysreg" in their AArch64 view (the .cp value may
6119 * be non-zero for the benefit of the AArch32 view).
6121 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
6122 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
6124 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
6125 r2
->opc0
, opc1
, opc2
);
6127 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
6130 r2
->opaque
= opaque
;
6132 /* reginfo passed to helpers is correct for the actual access,
6133 * and is never ARM_CP_STATE_BOTH:
6136 /* Make sure reginfo passed to helpers for wildcarded regs
6137 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6142 /* By convention, for wildcarded registers only the first
6143 * entry is used for migration; the others are marked as
6144 * ALIAS so we don't try to transfer the register
6145 * multiple times. Special registers (ie NOP/WFI) are
6146 * never migratable and not even raw-accessible.
6148 if ((r
->type
& ARM_CP_SPECIAL
)) {
6149 r2
->type
|= ARM_CP_NO_RAW
;
6151 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
6152 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
6153 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
6154 r2
->type
|= ARM_CP_ALIAS
| ARM_CP_NO_GDB
;
6157 /* Check that raw accesses are either forbidden or handled. Note that
6158 * we can't assert this earlier because the setup of fieldoffset for
6159 * banked registers has to be done first.
6161 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
6162 assert(!raw_accessors_invalid(r2
));
6165 /* Overriding of an existing definition must be explicitly
6168 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
6169 ARMCPRegInfo
*oldreg
;
6170 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
6171 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
6172 fprintf(stderr
, "Register redefined: cp=%d %d bit "
6173 "crn=%d crm=%d opc1=%d opc2=%d, "
6174 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
6175 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
6176 oldreg
->name
, r2
->name
);
6177 g_assert_not_reached();
6180 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
6184 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
6185 const ARMCPRegInfo
*r
, void *opaque
)
6187 /* Define implementations of coprocessor registers.
6188 * We store these in a hashtable because typically
6189 * there are less than 150 registers in a space which
6190 * is 16*16*16*8*8 = 262144 in size.
6191 * Wildcarding is supported for the crm, opc1 and opc2 fields.
6192 * If a register is defined twice then the second definition is
6193 * used, so this can be used to define some generic registers and
6194 * then override them with implementation specific variations.
6195 * At least one of the original and the second definition should
6196 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
6197 * against accidental use.
6199 * The state field defines whether the register is to be
6200 * visible in the AArch32 or AArch64 execution state. If the
6201 * state is set to ARM_CP_STATE_BOTH then we synthesise a
6202 * reginfo structure for the AArch32 view, which sees the lower
6203 * 32 bits of the 64 bit register.
6205 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
6206 * be wildcarded. AArch64 registers are always considered to be 64
6207 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
6208 * the register, if any.
6210 int crm
, opc1
, opc2
, state
;
6211 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
6212 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
6213 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
6214 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
6215 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
6216 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
6217 /* 64 bit registers have only CRm and Opc1 fields */
6218 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
6219 /* op0 only exists in the AArch64 encodings */
6220 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
6221 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
6222 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
6223 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
6224 * encodes a minimum access level for the register. We roll this
6225 * runtime check into our general permission check code, so check
6226 * here that the reginfo's specified permissions are strict enough
6227 * to encompass the generic architectural permission check.
6229 if (r
->state
!= ARM_CP_STATE_AA32
) {
6232 case 0: case 1: case 2:
6245 /* unallocated encoding, so not possible */
6253 /* min_EL EL1, secure mode only (we don't check the latter) */
6257 /* broken reginfo with out-of-range opc1 */
6261 /* assert our permissions are not too lax (stricter is fine) */
6262 assert((r
->access
& ~mask
) == 0);
6265 /* Check that the register definition has enough info to handle
6266 * reads and writes if they are permitted.
6268 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
6269 if (r
->access
& PL3_R
) {
6270 assert((r
->fieldoffset
||
6271 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6274 if (r
->access
& PL3_W
) {
6275 assert((r
->fieldoffset
||
6276 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
6280 /* Bad type field probably means missing sentinel at end of reg list */
6281 assert(cptype_valid(r
->type
));
6282 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
6283 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
6284 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
6285 for (state
= ARM_CP_STATE_AA32
;
6286 state
<= ARM_CP_STATE_AA64
; state
++) {
6287 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
6290 if (state
== ARM_CP_STATE_AA32
) {
6291 /* Under AArch32 CP registers can be common
6292 * (same for secure and non-secure world) or banked.
6296 switch (r
->secure
) {
6297 case ARM_CP_SECSTATE_S
:
6298 case ARM_CP_SECSTATE_NS
:
6299 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6300 r
->secure
, crm
, opc1
, opc2
,
6304 name
= g_strdup_printf("%s_S", r
->name
);
6305 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6307 crm
, opc1
, opc2
, name
);
6309 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6311 crm
, opc1
, opc2
, r
->name
);
6315 /* AArch64 registers get mapped to non-secure instance
6317 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
6319 crm
, opc1
, opc2
, r
->name
);
6327 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
6328 const ARMCPRegInfo
*regs
, void *opaque
)
6330 /* Define a whole list of registers */
6331 const ARMCPRegInfo
*r
;
6332 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
6333 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
6337 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
6339 return g_hash_table_lookup(cpregs
, &encoded_cp
);
6342 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
6345 /* Helper coprocessor write function for write-ignore registers */
6348 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
6350 /* Helper coprocessor write function for read-as-zero registers */
6354 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
6356 /* Helper coprocessor reset function for do-nothing-on-reset registers */
6359 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
6361 /* Return true if it is not valid for us to switch to
6362 * this CPU mode (ie all the UNPREDICTABLE cases in
6363 * the ARM ARM CPSRWriteByInstr pseudocode).
6366 /* Changes to or from Hyp via MSR and CPS are illegal. */
6367 if (write_type
== CPSRWriteByInstr
&&
6368 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
6369 mode
== ARM_CPU_MODE_HYP
)) {
6374 case ARM_CPU_MODE_USR
:
6376 case ARM_CPU_MODE_SYS
:
6377 case ARM_CPU_MODE_SVC
:
6378 case ARM_CPU_MODE_ABT
:
6379 case ARM_CPU_MODE_UND
:
6380 case ARM_CPU_MODE_IRQ
:
6381 case ARM_CPU_MODE_FIQ
:
6382 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
6383 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
6385 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
6386 * and CPS are treated as illegal mode changes.
6388 if (write_type
== CPSRWriteByInstr
&&
6389 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
6390 (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
6394 case ARM_CPU_MODE_HYP
:
6395 return !arm_feature(env
, ARM_FEATURE_EL2
)
6396 || arm_current_el(env
) < 2 || arm_is_secure_below_el3(env
);
6397 case ARM_CPU_MODE_MON
:
6398 return arm_current_el(env
) < 3;
6404 uint32_t cpsr_read(CPUARMState
*env
)
6407 ZF
= (env
->ZF
== 0);
6408 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
6409 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
6410 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
6411 | ((env
->condexec_bits
& 0xfc) << 8)
6412 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
6415 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
6416 CPSRWriteType write_type
)
6418 uint32_t changed_daif
;
6420 if (mask
& CPSR_NZCV
) {
6421 env
->ZF
= (~val
) & CPSR_Z
;
6423 env
->CF
= (val
>> 29) & 1;
6424 env
->VF
= (val
<< 3) & 0x80000000;
6427 env
->QF
= ((val
& CPSR_Q
) != 0);
6429 env
->thumb
= ((val
& CPSR_T
) != 0);
6430 if (mask
& CPSR_IT_0_1
) {
6431 env
->condexec_bits
&= ~3;
6432 env
->condexec_bits
|= (val
>> 25) & 3;
6434 if (mask
& CPSR_IT_2_7
) {
6435 env
->condexec_bits
&= 3;
6436 env
->condexec_bits
|= (val
>> 8) & 0xfc;
6438 if (mask
& CPSR_GE
) {
6439 env
->GE
= (val
>> 16) & 0xf;
6442 /* In a V7 implementation that includes the security extensions but does
6443 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
6444 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
6445 * bits respectively.
6447 * In a V8 implementation, it is permitted for privileged software to
6448 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
6450 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
6451 arm_feature(env
, ARM_FEATURE_EL3
) &&
6452 !arm_feature(env
, ARM_FEATURE_EL2
) &&
6453 !arm_is_secure(env
)) {
6455 changed_daif
= (env
->daif
^ val
) & mask
;
6457 if (changed_daif
& CPSR_A
) {
6458 /* Check to see if we are allowed to change the masking of async
6459 * abort exceptions from a non-secure state.
6461 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
6462 qemu_log_mask(LOG_GUEST_ERROR
,
6463 "Ignoring attempt to switch CPSR_A flag from "
6464 "non-secure world with SCR.AW bit clear\n");
6469 if (changed_daif
& CPSR_F
) {
6470 /* Check to see if we are allowed to change the masking of FIQ
6471 * exceptions from a non-secure state.
6473 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
6474 qemu_log_mask(LOG_GUEST_ERROR
,
6475 "Ignoring attempt to switch CPSR_F flag from "
6476 "non-secure world with SCR.FW bit clear\n");
6480 /* Check whether non-maskable FIQ (NMFI) support is enabled.
6481 * If this bit is set software is not allowed to mask
6482 * FIQs, but is allowed to set CPSR_F to 0.
6484 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
6486 qemu_log_mask(LOG_GUEST_ERROR
,
6487 "Ignoring attempt to enable CPSR_F flag "
6488 "(non-maskable FIQ [NMFI] support enabled)\n");
6494 env
->daif
&= ~(CPSR_AIF
& mask
);
6495 env
->daif
|= val
& CPSR_AIF
& mask
;
6497 if (write_type
!= CPSRWriteRaw
&&
6498 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
6499 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
6500 /* Note that we can only get here in USR mode if this is a
6501 * gdb stub write; for this case we follow the architectural
6502 * behaviour for guest writes in USR mode of ignoring an attempt
6503 * to switch mode. (Those are caught by translate.c for writes
6504 * triggered by guest instructions.)
6507 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
6508 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
6509 * v7, and has defined behaviour in v8:
6510 * + leave CPSR.M untouched
6511 * + allow changes to the other CPSR fields
6513 * For user changes via the GDB stub, we don't set PSTATE.IL,
6514 * as this would be unnecessarily harsh for a user error.
6517 if (write_type
!= CPSRWriteByGDBStub
&&
6518 arm_feature(env
, ARM_FEATURE_V8
)) {
6522 qemu_log_mask(LOG_GUEST_ERROR
,
6523 "Illegal AArch32 mode switch attempt from %s to %s\n",
6524 aarch32_mode_name(env
->uncached_cpsr
),
6525 aarch32_mode_name(val
));
6527 qemu_log_mask(CPU_LOG_INT
, "%s %s to %s PC 0x%" PRIx32
"\n",
6528 write_type
== CPSRWriteExceptionReturn
?
6529 "Exception return from AArch32" :
6530 "AArch32 mode switch from",
6531 aarch32_mode_name(env
->uncached_cpsr
),
6532 aarch32_mode_name(val
), env
->regs
[15]);
6533 switch_mode(env
, val
& CPSR_M
);
6536 mask
&= ~CACHED_CPSR_BITS
;
6537 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
6540 /* Sign/zero extend */
6541 uint32_t HELPER(sxtb16
)(uint32_t x
)
6544 res
= (uint16_t)(int8_t)x
;
6545 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
6549 uint32_t HELPER(uxtb16
)(uint32_t x
)
6552 res
= (uint16_t)(uint8_t)x
;
6553 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
6557 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
6561 if (num
== INT_MIN
&& den
== -1)
6566 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
6573 uint32_t HELPER(rbit
)(uint32_t x
)
6578 #if defined(CONFIG_USER_ONLY)
6580 /* These should probably raise undefined insn exceptions. */
6581 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
6583 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6585 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
6588 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
6590 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6592 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
6596 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6598 /* translate.c should never generate calls here in user-only mode */
6599 g_assert_not_reached();
6602 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6604 /* translate.c should never generate calls here in user-only mode */
6605 g_assert_not_reached();
6608 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
6610 /* The TT instructions can be used by unprivileged code, but in
6611 * user-only emulation we don't have the MPU.
6612 * Luckily since we know we are NonSecure unprivileged (and that in
6613 * turn means that the A flag wasn't specified), all the bits in the
6614 * register must be zero:
6615 * IREGION: 0 because IRVALID is 0
6616 * IRVALID: 0 because NS
6618 * NSRW: 0 because NS
6620 * RW: 0 because unpriv and A flag not set
6621 * R: 0 because unpriv and A flag not set
6622 * SRVALID: 0 because NS
6623 * MRVALID: 0 because unpriv and A flag not set
6624 * SREGION: 0 becaus SRVALID is 0
6625 * MREGION: 0 because MRVALID is 0
6630 static void switch_mode(CPUARMState
*env
, int mode
)
6632 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6634 if (mode
!= ARM_CPU_MODE_USR
) {
6635 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
6639 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6640 uint32_t cur_el
, bool secure
)
6645 void aarch64_sync_64_to_32(CPUARMState
*env
)
6647 g_assert_not_reached();
6652 static void switch_mode(CPUARMState
*env
, int mode
)
6657 old_mode
= env
->uncached_cpsr
& CPSR_M
;
6658 if (mode
== old_mode
)
6661 if (old_mode
== ARM_CPU_MODE_FIQ
) {
6662 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6663 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
6664 } else if (mode
== ARM_CPU_MODE_FIQ
) {
6665 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6666 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
6669 i
= bank_number(old_mode
);
6670 env
->banked_r13
[i
] = env
->regs
[13];
6671 env
->banked_spsr
[i
] = env
->spsr
;
6673 i
= bank_number(mode
);
6674 env
->regs
[13] = env
->banked_r13
[i
];
6675 env
->spsr
= env
->banked_spsr
[i
];
6677 env
->banked_r14
[r14_bank_number(old_mode
)] = env
->regs
[14];
6678 env
->regs
[14] = env
->banked_r14
[r14_bank_number(mode
)];
6681 /* Physical Interrupt Target EL Lookup Table
6683 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6685 * The below multi-dimensional table is used for looking up the target
6686 * exception level given numerous condition criteria. Specifically, the
6687 * target EL is based on SCR and HCR routing controls as well as the
6688 * currently executing EL and secure state.
6691 * target_el_table[2][2][2][2][2][4]
6692 * | | | | | +--- Current EL
6693 * | | | | +------ Non-secure(0)/Secure(1)
6694 * | | | +--------- HCR mask override
6695 * | | +------------ SCR exec state control
6696 * | +--------------- SCR mask override
6697 * +------------------ 32-bit(0)/64-bit(1) EL3
6699 * The table values are as such:
6703 * The ARM ARM target EL table includes entries indicating that an "exception
6704 * is not taken". The two cases where this is applicable are:
6705 * 1) An exception is taken from EL3 but the SCR does not have the exception
6707 * 2) An exception is taken from EL2 but the HCR does not have the exception
6709 * In these two cases, the below table contain a target of EL1. This value is
6710 * returned as it is expected that the consumer of the table data will check
6711 * for "target EL >= current EL" to ensure the exception is not taken.
6715 * BIT IRQ IMO Non-secure Secure
6716 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
6718 static const int8_t target_el_table
[2][2][2][2][2][4] = {
6719 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6720 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6721 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6722 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6723 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6724 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6725 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6726 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6727 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6728 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6729 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6730 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6731 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6732 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6733 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6734 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6738 * Determine the target EL for physical exceptions
6740 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6741 uint32_t cur_el
, bool secure
)
6743 CPUARMState
*env
= cs
->env_ptr
;
6748 /* Is the highest EL AArch64? */
6749 bool is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
6752 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6753 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
6755 /* Either EL2 is the highest EL (and so the EL2 register width
6756 * is given by is64); or there is no EL2 or EL3, in which case
6757 * the value of 'rw' does not affect the table lookup anyway.
6762 hcr_el2
= arm_hcr_el2_eff(env
);
6765 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
6766 hcr
= hcr_el2
& HCR_IMO
;
6769 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
6770 hcr
= hcr_el2
& HCR_FMO
;
6773 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
6774 hcr
= hcr_el2
& HCR_AMO
;
6778 /* Perform a table-lookup for the target EL given the current state */
6779 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
6781 assert(target_el
> 0);
6786 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
6787 ARMMMUIdx mmu_idx
, bool ignfault
)
6789 CPUState
*cs
= CPU(cpu
);
6790 CPUARMState
*env
= &cpu
->env
;
6791 MemTxAttrs attrs
= {};
6793 target_ulong page_size
;
6796 ARMMMUFaultInfo fi
= {};
6797 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6801 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
6802 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6803 /* MPU/SAU lookup failed */
6804 if (fi
.type
== ARMFault_QEMU_SFault
) {
6805 qemu_log_mask(CPU_LOG_INT
,
6806 "...SecureFault with SFSR.AUVIOL during stacking\n");
6807 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6808 env
->v7m
.sfar
= addr
;
6809 exc
= ARMV7M_EXCP_SECURE
;
6812 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
6813 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
6814 exc
= ARMV7M_EXCP_MEM
;
6815 exc_secure
= secure
;
6819 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
6821 if (txres
!= MEMTX_OK
) {
6822 /* BusFault trying to write the data */
6823 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
6824 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
6825 exc
= ARMV7M_EXCP_BUS
;
6832 /* By pending the exception at this point we are making
6833 * the IMPDEF choice "overridden exceptions pended" (see the
6834 * MergeExcInfo() pseudocode). The other choice would be to not
6835 * pend them now and then make a choice about which to throw away
6836 * later if we have two derived exceptions.
6837 * The only case when we must not pend the exception but instead
6838 * throw it away is if we are doing the push of the callee registers
6839 * and we've already generated a derived exception. Even in this
6840 * case we will still update the fault status registers.
6843 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
6848 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
6851 CPUState
*cs
= CPU(cpu
);
6852 CPUARMState
*env
= &cpu
->env
;
6853 MemTxAttrs attrs
= {};
6855 target_ulong page_size
;
6858 ARMMMUFaultInfo fi
= {};
6859 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6864 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
6865 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6866 /* MPU/SAU lookup failed */
6867 if (fi
.type
== ARMFault_QEMU_SFault
) {
6868 qemu_log_mask(CPU_LOG_INT
,
6869 "...SecureFault with SFSR.AUVIOL during unstack\n");
6870 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6871 env
->v7m
.sfar
= addr
;
6872 exc
= ARMV7M_EXCP_SECURE
;
6875 qemu_log_mask(CPU_LOG_INT
,
6876 "...MemManageFault with CFSR.MUNSTKERR\n");
6877 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
6878 exc
= ARMV7M_EXCP_MEM
;
6879 exc_secure
= secure
;
6884 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
6886 if (txres
!= MEMTX_OK
) {
6887 /* BusFault trying to read the data */
6888 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
6889 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
6890 exc
= ARMV7M_EXCP_BUS
;
6899 /* By pending the exception at this point we are making
6900 * the IMPDEF choice "overridden exceptions pended" (see the
6901 * MergeExcInfo() pseudocode). The other choice would be to not
6902 * pend them now and then make a choice about which to throw away
6903 * later if we have two derived exceptions.
6905 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
6909 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6910 * This may change the current stack pointer between Main and Process
6911 * stack pointers if it is done for the CONTROL register for the current
6914 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
6918 bool old_is_psp
= v7m_using_psp(env
);
6920 env
->v7m
.control
[secstate
] =
6921 deposit32(env
->v7m
.control
[secstate
],
6922 R_V7M_CONTROL_SPSEL_SHIFT
,
6923 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
6925 if (secstate
== env
->v7m
.secure
) {
6926 bool new_is_psp
= v7m_using_psp(env
);
6929 if (old_is_psp
!= new_is_psp
) {
6930 tmp
= env
->v7m
.other_sp
;
6931 env
->v7m
.other_sp
= env
->regs
[13];
6932 env
->regs
[13] = tmp
;
6937 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6938 * stack pointer between Main and Process stack pointers.
6940 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
6942 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
6945 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
6947 /* Write a new value to v7m.exception, thus transitioning into or out
6948 * of Handler mode; this may result in a change of active stack pointer.
6950 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
6953 env
->v7m
.exception
= new_exc
;
6955 new_is_psp
= v7m_using_psp(env
);
6957 if (old_is_psp
!= new_is_psp
) {
6958 tmp
= env
->v7m
.other_sp
;
6959 env
->v7m
.other_sp
= env
->regs
[13];
6960 env
->regs
[13] = tmp
;
6964 /* Switch M profile security state between NS and S */
6965 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
6967 uint32_t new_ss_msp
, new_ss_psp
;
6969 if (env
->v7m
.secure
== new_secstate
) {
6973 /* All the banked state is accessed by looking at env->v7m.secure
6974 * except for the stack pointer; rearrange the SP appropriately.
6976 new_ss_msp
= env
->v7m
.other_ss_msp
;
6977 new_ss_psp
= env
->v7m
.other_ss_psp
;
6979 if (v7m_using_psp(env
)) {
6980 env
->v7m
.other_ss_psp
= env
->regs
[13];
6981 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
6983 env
->v7m
.other_ss_msp
= env
->regs
[13];
6984 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
6987 env
->v7m
.secure
= new_secstate
;
6989 if (v7m_using_psp(env
)) {
6990 env
->regs
[13] = new_ss_psp
;
6991 env
->v7m
.other_sp
= new_ss_msp
;
6993 env
->regs
[13] = new_ss_msp
;
6994 env
->v7m
.other_sp
= new_ss_psp
;
6998 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
7001 * - if the return value is a magic value, do exception return (like BX)
7002 * - otherwise bit 0 of the return value is the target security state
7006 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7007 /* Covers FNC_RETURN and EXC_RETURN magic */
7008 min_magic
= FNC_RETURN_MIN_MAGIC
;
7010 /* EXC_RETURN magic only */
7011 min_magic
= EXC_RETURN_MIN_MAGIC
;
7014 if (dest
>= min_magic
) {
7015 /* This is an exception return magic value; put it where
7016 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
7017 * Note that if we ever add gen_ss_advance() singlestep support to
7018 * M profile this should count as an "instruction execution complete"
7019 * event (compare gen_bx_excret_final_code()).
7021 env
->regs
[15] = dest
& ~1;
7022 env
->thumb
= dest
& 1;
7023 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
7027 /* translate.c should have made BXNS UNDEF unless we're secure */
7028 assert(env
->v7m
.secure
);
7030 switch_v7m_security_state(env
, dest
& 1);
7032 env
->regs
[15] = dest
& ~1;
7035 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
7037 /* Handle v7M BLXNS:
7038 * - bit 0 of the destination address is the target security state
7041 /* At this point regs[15] is the address just after the BLXNS */
7042 uint32_t nextinst
= env
->regs
[15] | 1;
7043 uint32_t sp
= env
->regs
[13] - 8;
7046 /* translate.c will have made BLXNS UNDEF unless we're secure */
7047 assert(env
->v7m
.secure
);
7050 /* target is Secure, so this is just a normal BLX,
7051 * except that the low bit doesn't indicate Thumb/not.
7053 env
->regs
[14] = nextinst
;
7055 env
->regs
[15] = dest
& ~1;
7059 /* Target is non-secure: first push a stack frame */
7060 if (!QEMU_IS_ALIGNED(sp
, 8)) {
7061 qemu_log_mask(LOG_GUEST_ERROR
,
7062 "BLXNS with misaligned SP is UNPREDICTABLE\n");
7065 if (sp
< v7m_sp_limit(env
)) {
7066 raise_exception(env
, EXCP_STKOF
, 0, 1);
7069 saved_psr
= env
->v7m
.exception
;
7070 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
7071 saved_psr
|= XPSR_SFPA
;
7074 /* Note that these stores can throw exceptions on MPU faults */
7075 cpu_stl_data(env
, sp
, nextinst
);
7076 cpu_stl_data(env
, sp
+ 4, saved_psr
);
7079 env
->regs
[14] = 0xfeffffff;
7080 if (arm_v7m_is_handler_mode(env
)) {
7081 /* Write a dummy value to IPSR, to avoid leaking the current secure
7082 * exception number to non-secure code. This is guaranteed not
7083 * to cause write_v7m_exception() to actually change stacks.
7085 write_v7m_exception(env
, 1);
7087 switch_v7m_security_state(env
, 0);
7089 env
->regs
[15] = dest
;
7092 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
7095 /* Return a pointer to the location where we currently store the
7096 * stack pointer for the requested security state and thread mode.
7097 * This pointer will become invalid if the CPU state is updated
7098 * such that the stack pointers are switched around (eg changing
7099 * the SPSEL control bit).
7100 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
7101 * Unlike that pseudocode, we require the caller to pass us in the
7102 * SPSEL control bit value; this is because we also use this
7103 * function in handling of pushing of the callee-saves registers
7104 * part of the v8M stack frame (pseudocode PushCalleeStack()),
7105 * and in the tailchain codepath the SPSEL bit comes from the exception
7106 * return magic LR value from the previous exception. The pseudocode
7107 * opencodes the stack-selection in PushCalleeStack(), but we prefer
7108 * to make this utility function generic enough to do the job.
7110 bool want_psp
= threadmode
&& spsel
;
7112 if (secure
== env
->v7m
.secure
) {
7113 if (want_psp
== v7m_using_psp(env
)) {
7114 return &env
->regs
[13];
7116 return &env
->v7m
.other_sp
;
7120 return &env
->v7m
.other_ss_psp
;
7122 return &env
->v7m
.other_ss_msp
;
7127 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
7130 CPUState
*cs
= CPU(cpu
);
7131 CPUARMState
*env
= &cpu
->env
;
7133 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
7134 uint32_t vector_entry
;
7135 MemTxAttrs attrs
= {};
7139 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
7141 /* We don't do a get_phys_addr() here because the rules for vector
7142 * loads are special: they always use the default memory map, and
7143 * the default memory map permits reads from all addresses.
7144 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
7145 * that we want this special case which would always say "yes",
7146 * we just do the SAU lookup here followed by a direct physical load.
7148 attrs
.secure
= targets_secure
;
7151 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7152 V8M_SAttributes sattrs
= {};
7154 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
7156 attrs
.secure
= false;
7157 } else if (!targets_secure
) {
7158 /* NS access to S memory */
7163 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
7165 if (result
!= MEMTX_OK
) {
7168 *pvec
= vector_entry
;
7172 /* All vector table fetch fails are reported as HardFault, with
7173 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
7174 * technically the underlying exception is a MemManage or BusFault
7175 * that is escalated to HardFault.) This is a terminal exception,
7176 * so we will either take the HardFault immediately or else enter
7177 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
7179 exc_secure
= targets_secure
||
7180 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
7181 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
7182 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
7186 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7189 /* For v8M, push the callee-saves register part of the stack frame.
7190 * Compare the v8M pseudocode PushCalleeStack().
7191 * In the tailchaining case this may not be the current stack.
7193 CPUARMState
*env
= &cpu
->env
;
7194 uint32_t *frame_sp_p
;
7202 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
7203 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
7206 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
7207 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
7208 lr
& R_V7M_EXCRET_SPSEL_MASK
);
7209 want_psp
= mode
&& (lr
& R_V7M_EXCRET_SPSEL_MASK
);
7211 limit
= env
->v7m
.psplim
[M_REG_S
];
7213 limit
= env
->v7m
.msplim
[M_REG_S
];
7216 mmu_idx
= arm_mmu_idx(env
);
7217 frame_sp_p
= &env
->regs
[13];
7218 limit
= v7m_sp_limit(env
);
7221 frameptr
= *frame_sp_p
- 0x28;
7222 if (frameptr
< limit
) {
7224 * Stack limit failure: set SP to the limit value, and generate
7225 * STKOF UsageFault. Stack pushes below the limit must not be
7226 * performed. It is IMPDEF whether pushes above the limit are
7227 * performed; we choose not to.
7229 qemu_log_mask(CPU_LOG_INT
,
7230 "...STKOF during callee-saves register stacking\n");
7231 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
7232 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7234 *frame_sp_p
= limit
;
7238 /* Write as much of the stack frame as we can. A write failure may
7239 * cause us to pend a derived exception.
7242 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
7243 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
7245 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
7247 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
7249 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
7251 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
7253 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
7255 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
7257 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
7260 /* Update SP regardless of whether any of the stack accesses failed. */
7261 *frame_sp_p
= frameptr
;
7266 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
7267 bool ignore_stackfaults
)
7269 /* Do the "take the exception" parts of exception entry,
7270 * but not the pushing of state to the stack. This is
7271 * similar to the pseudocode ExceptionTaken() function.
7273 CPUARMState
*env
= &cpu
->env
;
7275 bool targets_secure
;
7277 bool push_failed
= false;
7279 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
7280 qemu_log_mask(CPU_LOG_INT
, "...taking pending %s exception %d\n",
7281 targets_secure
? "secure" : "nonsecure", exc
);
7283 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7284 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
7285 (lr
& R_V7M_EXCRET_S_MASK
)) {
7286 /* The background code (the owner of the registers in the
7287 * exception frame) is Secure. This means it may either already
7288 * have or now needs to push callee-saves registers.
7290 if (targets_secure
) {
7291 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
7292 /* We took an exception from Secure to NonSecure
7293 * (which means the callee-saved registers got stacked)
7294 * and are now tailchaining to a Secure exception.
7295 * Clear DCRS so eventual return from this Secure
7296 * exception unstacks the callee-saved registers.
7298 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
7301 /* We're going to a non-secure exception; push the
7302 * callee-saves registers to the stack now, if they're
7303 * not already saved.
7305 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
7306 !(dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
))) {
7307 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
7308 ignore_stackfaults
);
7310 lr
|= R_V7M_EXCRET_DCRS_MASK
;
7314 lr
&= ~R_V7M_EXCRET_ES_MASK
;
7315 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7316 lr
|= R_V7M_EXCRET_ES_MASK
;
7318 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
7319 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
7320 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
7323 /* Clear registers if necessary to prevent non-secure exception
7324 * code being able to see register values from secure code.
7325 * Where register values become architecturally UNKNOWN we leave
7326 * them with their previous values.
7328 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7329 if (!targets_secure
) {
7330 /* Always clear the caller-saved registers (they have been
7331 * pushed to the stack earlier in v7m_push_stack()).
7332 * Clear callee-saved registers if the background code is
7333 * Secure (in which case these regs were saved in
7334 * v7m_push_callee_stack()).
7338 for (i
= 0; i
< 13; i
++) {
7339 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
7340 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
7345 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
7350 if (push_failed
&& !ignore_stackfaults
) {
7351 /* Derived exception on callee-saves register stacking:
7352 * we might now want to take a different exception which
7353 * targets a different security state, so try again from the top.
7355 qemu_log_mask(CPU_LOG_INT
,
7356 "...derived exception on callee-saves register stacking");
7357 v7m_exception_taken(cpu
, lr
, true, true);
7361 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
7362 /* Vector load failed: derived exception */
7363 qemu_log_mask(CPU_LOG_INT
, "...derived exception on vector table load");
7364 v7m_exception_taken(cpu
, lr
, true, true);
7368 /* Now we've done everything that might cause a derived exception
7369 * we can go ahead and activate whichever exception we're going to
7370 * take (which might now be the derived exception).
7372 armv7m_nvic_acknowledge_irq(env
->nvic
);
7374 /* Switch to target security state -- must do this before writing SPSEL */
7375 switch_v7m_security_state(env
, targets_secure
);
7376 write_v7m_control_spsel(env
, 0);
7377 arm_clear_exclusive(env
);
7379 env
->condexec_bits
= 0;
7381 env
->regs
[15] = addr
& 0xfffffffe;
7382 env
->thumb
= addr
& 1;
7385 static bool v7m_push_stack(ARMCPU
*cpu
)
7387 /* Do the "set up stack frame" part of exception entry,
7388 * similar to pseudocode PushStack().
7389 * Return true if we generate a derived exception (and so
7390 * should ignore further stack faults trying to process
7391 * that derived exception.)
7394 CPUARMState
*env
= &cpu
->env
;
7395 uint32_t xpsr
= xpsr_read(env
);
7396 uint32_t frameptr
= env
->regs
[13];
7397 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
7399 /* Align stack pointer if the guest wants that */
7400 if ((frameptr
& 4) &&
7401 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
7403 xpsr
|= XPSR_SPREALIGN
;
7408 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7409 uint32_t limit
= v7m_sp_limit(env
);
7411 if (frameptr
< limit
) {
7413 * Stack limit failure: set SP to the limit value, and generate
7414 * STKOF UsageFault. Stack pushes below the limit must not be
7415 * performed. It is IMPDEF whether pushes above the limit are
7416 * performed; we choose not to.
7418 qemu_log_mask(CPU_LOG_INT
,
7419 "...STKOF during stacking\n");
7420 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
7421 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7423 env
->regs
[13] = limit
;
7428 /* Write as much of the stack frame as we can. If we fail a stack
7429 * write this will result in a derived exception being pended
7430 * (which may be taken in preference to the one we started with
7431 * if it has higher priority).
7434 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
7435 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
7436 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
7437 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
7438 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
7439 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
7440 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
7441 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
7443 /* Update SP regardless of whether any of the stack accesses failed. */
7444 env
->regs
[13] = frameptr
;
7449 static void do_v7m_exception_exit(ARMCPU
*cpu
)
7451 CPUARMState
*env
= &cpu
->env
;
7454 bool ufault
= false;
7455 bool sfault
= false;
7456 bool return_to_sp_process
;
7457 bool return_to_handler
;
7458 bool rettobase
= false;
7459 bool exc_secure
= false;
7460 bool return_to_secure
;
7462 /* If we're not in Handler mode then jumps to magic exception-exit
7463 * addresses don't have magic behaviour. However for the v8M
7464 * security extensions the magic secure-function-return has to
7465 * work in thread mode too, so to avoid doing an extra check in
7466 * the generated code we allow exception-exit magic to also cause the
7467 * internal exception and bring us here in thread mode. Correct code
7468 * will never try to do this (the following insn fetch will always
7469 * fault) so we the overhead of having taken an unnecessary exception
7472 if (!arm_v7m_is_handler_mode(env
)) {
7476 /* In the spec pseudocode ExceptionReturn() is called directly
7477 * from BXWritePC() and gets the full target PC value including
7478 * bit zero. In QEMU's implementation we treat it as a normal
7479 * jump-to-register (which is then caught later on), and so split
7480 * the target value up between env->regs[15] and env->thumb in
7481 * gen_bx(). Reconstitute it.
7483 excret
= env
->regs
[15];
7488 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
7489 " previous exception %d\n",
7490 excret
, env
->v7m
.exception
);
7492 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
7493 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
7494 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
7498 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7499 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
7500 * we pick which FAULTMASK to clear.
7502 if (!env
->v7m
.secure
&&
7503 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
7504 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
7506 /* For all other purposes, treat ES as 0 (R_HXSR) */
7507 excret
&= ~R_V7M_EXCRET_ES_MASK
;
7509 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
7512 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
7513 /* Auto-clear FAULTMASK on return from other than NMI.
7514 * If the security extension is implemented then this only
7515 * happens if the raw execution priority is >= 0; the
7516 * value of the ES bit in the exception return value indicates
7517 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
7519 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7520 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
7521 env
->v7m
.faultmask
[exc_secure
] = 0;
7524 env
->v7m
.faultmask
[M_REG_NS
] = 0;
7528 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
7531 /* attempt to exit an exception that isn't active */
7535 /* still an irq active now */
7538 /* we returned to base exception level, no nesting.
7539 * (In the pseudocode this is written using "NestedActivation != 1"
7540 * where we have 'rettobase == false'.)
7545 g_assert_not_reached();
7548 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
7549 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
7550 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
7551 (excret
& R_V7M_EXCRET_S_MASK
);
7553 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7554 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7555 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
7556 * we choose to take the UsageFault.
7558 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
7559 (excret
& R_V7M_EXCRET_ES_MASK
) ||
7560 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
7564 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
7568 /* For v7M we only recognize certain combinations of the low bits */
7569 switch (excret
& 0xf) {
7570 case 1: /* Return to Handler */
7572 case 13: /* Return to Thread using Process stack */
7573 case 9: /* Return to Thread using Main stack */
7574 /* We only need to check NONBASETHRDENA for v7M, because in
7575 * v8M this bit does not exist (it is RES1).
7578 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
7579 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
7589 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
7590 * Handler mode (and will be until we write the new XPSR.Interrupt
7591 * field) this does not switch around the current stack pointer.
7592 * We must do this before we do any kind of tailchaining, including
7593 * for the derived exceptions on integrity check failures, or we will
7594 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
7596 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
7599 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
7600 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7601 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7602 "stackframe: failed EXC_RETURN.ES validity check\n");
7603 v7m_exception_taken(cpu
, excret
, true, false);
7608 /* Bad exception return: instead of popping the exception
7609 * stack, directly take a usage fault on the current stack.
7611 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7612 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7613 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7614 "stackframe: failed exception return integrity check\n");
7615 v7m_exception_taken(cpu
, excret
, true, false);
7620 * Tailchaining: if there is currently a pending exception that
7621 * is high enough priority to preempt execution at the level we're
7622 * about to return to, then just directly take that exception now,
7623 * avoiding an unstack-and-then-stack. Note that now we have
7624 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
7625 * our current execution priority is already the execution priority we are
7626 * returning to -- none of the state we would unstack or set based on
7627 * the EXCRET value affects it.
7629 if (armv7m_nvic_can_take_pending_exception(env
->nvic
)) {
7630 qemu_log_mask(CPU_LOG_INT
, "...tailchaining to pending exception\n");
7631 v7m_exception_taken(cpu
, excret
, true, false);
7635 switch_v7m_security_state(env
, return_to_secure
);
7638 /* The stack pointer we should be reading the exception frame from
7639 * depends on bits in the magic exception return type value (and
7640 * for v8M isn't necessarily the stack pointer we will eventually
7641 * end up resuming execution with). Get a pointer to the location
7642 * in the CPU state struct where the SP we need is currently being
7643 * stored; we will use and modify it in place.
7644 * We use this limited C variable scope so we don't accidentally
7645 * use 'frame_sp_p' after we do something that makes it invalid.
7647 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
7650 return_to_sp_process
);
7651 uint32_t frameptr
= *frame_sp_p
;
7654 bool return_to_priv
= return_to_handler
||
7655 !(env
->v7m
.control
[return_to_secure
] & R_V7M_CONTROL_NPRIV_MASK
);
7657 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
7660 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
7661 arm_feature(env
, ARM_FEATURE_V8
)) {
7662 qemu_log_mask(LOG_GUEST_ERROR
,
7663 "M profile exception return with non-8-aligned SP "
7664 "for destination state is UNPREDICTABLE\n");
7667 /* Do we need to pop callee-saved registers? */
7668 if (return_to_secure
&&
7669 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
7670 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
7671 uint32_t expected_sig
= 0xfefa125b;
7672 uint32_t actual_sig
;
7674 pop_ok
= v7m_stack_read(cpu
, &actual_sig
, frameptr
, mmu_idx
);
7676 if (pop_ok
&& expected_sig
!= actual_sig
) {
7677 /* Take a SecureFault on the current stack */
7678 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
7679 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7680 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7681 "stackframe: failed exception return integrity "
7682 "signature check\n");
7683 v7m_exception_taken(cpu
, excret
, true, false);
7688 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7689 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
7690 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
7691 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
7692 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
7693 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
7694 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
7695 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
7702 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
7703 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
7704 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
7705 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
7706 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
7707 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
7708 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
7709 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
7712 /* v7m_stack_read() pended a fault, so take it (as a tail
7713 * chained exception on the same stack frame)
7715 qemu_log_mask(CPU_LOG_INT
, "...derived exception on unstacking\n");
7716 v7m_exception_taken(cpu
, excret
, true, false);
7720 /* Returning from an exception with a PC with bit 0 set is defined
7721 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
7722 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
7723 * the lsbit, and there are several RTOSes out there which incorrectly
7724 * assume the r15 in the stack frame should be a Thumb-style "lsbit
7725 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
7726 * complain about the badly behaved guest.
7728 if (env
->regs
[15] & 1) {
7729 env
->regs
[15] &= ~1U;
7730 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7731 qemu_log_mask(LOG_GUEST_ERROR
,
7732 "M profile return from interrupt with misaligned "
7733 "PC is UNPREDICTABLE on v7M\n");
7737 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7738 /* For v8M we have to check whether the xPSR exception field
7739 * matches the EXCRET value for return to handler/thread
7740 * before we commit to changing the SP and xPSR.
7742 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
7743 if (return_to_handler
!= will_be_handler
) {
7744 /* Take an INVPC UsageFault on the current stack.
7745 * By this point we will have switched to the security state
7746 * for the background state, so this UsageFault will target
7749 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7751 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7752 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7753 "stackframe: failed exception return integrity "
7755 v7m_exception_taken(cpu
, excret
, true, false);
7760 /* Commit to consuming the stack frame */
7762 /* Undo stack alignment (the SPREALIGN bit indicates that the original
7763 * pre-exception SP was not 8-aligned and we added a padding word to
7764 * align it, so we undo this by ORing in the bit that increases it
7765 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
7766 * would work too but a logical OR is how the pseudocode specifies it.)
7768 if (xpsr
& XPSR_SPREALIGN
) {
7771 *frame_sp_p
= frameptr
;
7773 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
7774 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
7776 /* The restored xPSR exception field will be zero if we're
7777 * resuming in Thread mode. If that doesn't match what the
7778 * exception return excret specified then this is a UsageFault.
7779 * v7M requires we make this check here; v8M did it earlier.
7781 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
7782 /* Take an INVPC UsageFault by pushing the stack again;
7783 * we know we're v7M so this is never a Secure UsageFault.
7785 bool ignore_stackfaults
;
7787 assert(!arm_feature(env
, ARM_FEATURE_V8
));
7788 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
7789 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7790 ignore_stackfaults
= v7m_push_stack(cpu
);
7791 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
7792 "failed exception return integrity check\n");
7793 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
7797 /* Otherwise, we have a successful exception exit. */
7798 arm_clear_exclusive(env
);
7799 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
7802 static bool do_v7m_function_return(ARMCPU
*cpu
)
7804 /* v8M security extensions magic function return.
7806 * (1) throw an exception (longjump)
7807 * (2) return true if we successfully handled the function return
7808 * (3) return false if we failed a consistency check and have
7809 * pended a UsageFault that needs to be taken now
7811 * At this point the magic return value is split between env->regs[15]
7812 * and env->thumb. We don't bother to reconstitute it because we don't
7813 * need it (all values are handled the same way).
7815 CPUARMState
*env
= &cpu
->env
;
7816 uint32_t newpc
, newpsr
, newpsr_exc
;
7818 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
7821 bool threadmode
, spsel
;
7824 uint32_t *frame_sp_p
;
7827 /* Pull the return address and IPSR from the Secure stack */
7828 threadmode
= !arm_v7m_is_handler_mode(env
);
7829 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
7831 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
7832 frameptr
= *frame_sp_p
;
7834 /* These loads may throw an exception (for MPU faults). We want to
7835 * do them as secure, so work out what MMU index that is.
7837 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7838 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
7839 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
7840 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
7842 /* Consistency checks on new IPSR */
7843 newpsr_exc
= newpsr
& XPSR_EXCP
;
7844 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
7845 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
7846 /* Pend the fault and tell our caller to take it */
7847 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7848 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7850 qemu_log_mask(CPU_LOG_INT
,
7851 "...taking INVPC UsageFault: "
7852 "IPSR consistency check failed\n");
7856 *frame_sp_p
= frameptr
+ 8;
7859 /* This invalidates frame_sp_p */
7860 switch_v7m_security_state(env
, true);
7861 env
->v7m
.exception
= newpsr_exc
;
7862 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
7863 if (newpsr
& XPSR_SFPA
) {
7864 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
7866 xpsr_write(env
, 0, XPSR_IT
);
7867 env
->thumb
= newpc
& 1;
7868 env
->regs
[15] = newpc
& ~1;
7870 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
7874 static void arm_log_exception(int idx
)
7876 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
7877 const char *exc
= NULL
;
7878 static const char * const excnames
[] = {
7879 [EXCP_UDEF
] = "Undefined Instruction",
7881 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
7882 [EXCP_DATA_ABORT
] = "Data Abort",
7885 [EXCP_BKPT
] = "Breakpoint",
7886 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
7887 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
7888 [EXCP_HVC
] = "Hypervisor Call",
7889 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
7890 [EXCP_SMC
] = "Secure Monitor Call",
7891 [EXCP_VIRQ
] = "Virtual IRQ",
7892 [EXCP_VFIQ
] = "Virtual FIQ",
7893 [EXCP_SEMIHOST
] = "Semihosting call",
7894 [EXCP_NOCP
] = "v7M NOCP UsageFault",
7895 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
7896 [EXCP_STKOF
] = "v8M STKOF UsageFault",
7899 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
7900 exc
= excnames
[idx
];
7905 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
7909 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
7910 uint32_t addr
, uint16_t *insn
)
7912 /* Load a 16-bit portion of a v7M instruction, returning true on success,
7913 * or false on failure (in which case we will have pended the appropriate
7915 * We need to do the instruction fetch's MPU and SAU checks
7916 * like this because there is no MMU index that would allow
7917 * doing the load with a single function call. Instead we must
7918 * first check that the security attributes permit the load
7919 * and that they don't mismatch on the two halves of the instruction,
7920 * and then we do the load as a secure load (ie using the security
7921 * attributes of the address, not the CPU, as architecturally required).
7923 CPUState
*cs
= CPU(cpu
);
7924 CPUARMState
*env
= &cpu
->env
;
7925 V8M_SAttributes sattrs
= {};
7926 MemTxAttrs attrs
= {};
7927 ARMMMUFaultInfo fi
= {};
7929 target_ulong page_size
;
7933 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
7934 if (!sattrs
.nsc
|| sattrs
.ns
) {
7935 /* This must be the second half of the insn, and it straddles a
7936 * region boundary with the second half not being S&NSC.
7938 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7939 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7940 qemu_log_mask(CPU_LOG_INT
,
7941 "...really SecureFault with SFSR.INVEP\n");
7944 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
7945 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7946 /* the MPU lookup failed */
7947 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7948 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
7949 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
7952 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
7954 if (txres
!= MEMTX_OK
) {
7955 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7956 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7957 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
7963 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
7965 /* Check whether this attempt to execute code in a Secure & NS-Callable
7966 * memory region is for an SG instruction; if so, then emulate the
7967 * effect of the SG instruction and return true. Otherwise pend
7968 * the correct kind of exception and return false.
7970 CPUARMState
*env
= &cpu
->env
;
7974 /* We should never get here unless get_phys_addr_pmsav8() caused
7975 * an exception for NS executing in S&NSC memory.
7977 assert(!env
->v7m
.secure
);
7978 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7980 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7981 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7983 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
7991 if (insn
!= 0xe97f) {
7992 /* Not an SG instruction first half (we choose the IMPDEF
7993 * early-SG-check option).
7998 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
8002 if (insn
!= 0xe97f) {
8003 /* Not an SG instruction second half (yes, both halves of the SG
8004 * insn have the same hex value)
8009 /* OK, we have confirmed that we really have an SG instruction.
8010 * We know we're NS in S memory so don't need to repeat those checks.
8012 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
8013 ", executing it\n", env
->regs
[15]);
8014 env
->regs
[14] &= ~1;
8015 switch_v7m_security_state(env
, true);
8016 xpsr_write(env
, 0, XPSR_IT
);
8021 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8022 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8023 qemu_log_mask(CPU_LOG_INT
,
8024 "...really SecureFault with SFSR.INVEP\n");
8028 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
8030 ARMCPU
*cpu
= ARM_CPU(cs
);
8031 CPUARMState
*env
= &cpu
->env
;
8033 bool ignore_stackfaults
;
8035 arm_log_exception(cs
->exception_index
);
8037 /* For exceptions we just mark as pending on the NVIC, and let that
8039 switch (cs
->exception_index
) {
8041 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8042 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
8045 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8046 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
8049 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8050 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
8053 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
8054 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_STKOF_MASK
;
8057 /* The PC already points to the next instruction. */
8058 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
8060 case EXCP_PREFETCH_ABORT
:
8061 case EXCP_DATA_ABORT
:
8062 /* Note that for M profile we don't have a guest facing FSR, but
8063 * the env->exception.fsr will be populated by the code that
8064 * raises the fault, in the A profile short-descriptor format.
8066 switch (env
->exception
.fsr
& 0xf) {
8067 case M_FAKE_FSR_NSC_EXEC
:
8068 /* Exception generated when we try to execute code at an address
8069 * which is marked as Secure & Non-Secure Callable and the CPU
8070 * is in the Non-Secure state. The only instruction which can
8071 * be executed like this is SG (and that only if both halves of
8072 * the SG instruction have the same security attributes.)
8073 * Everything else must generate an INVEP SecureFault, so we
8074 * emulate the SG instruction here.
8076 if (v7m_handle_execute_nsc(cpu
)) {
8080 case M_FAKE_FSR_SFAULT
:
8081 /* Various flavours of SecureFault for attempts to execute or
8082 * access data in the wrong security state.
8084 switch (cs
->exception_index
) {
8085 case EXCP_PREFETCH_ABORT
:
8086 if (env
->v7m
.secure
) {
8087 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
8088 qemu_log_mask(CPU_LOG_INT
,
8089 "...really SecureFault with SFSR.INVTRAN\n");
8091 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
8092 qemu_log_mask(CPU_LOG_INT
,
8093 "...really SecureFault with SFSR.INVEP\n");
8096 case EXCP_DATA_ABORT
:
8097 /* This must be an NS access to S memory */
8098 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
8099 qemu_log_mask(CPU_LOG_INT
,
8100 "...really SecureFault with SFSR.AUVIOL\n");
8103 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
8105 case 0x8: /* External Abort */
8106 switch (cs
->exception_index
) {
8107 case EXCP_PREFETCH_ABORT
:
8108 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
8109 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
8111 case EXCP_DATA_ABORT
:
8112 env
->v7m
.cfsr
[M_REG_NS
] |=
8113 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
8114 env
->v7m
.bfar
= env
->exception
.vaddress
;
8115 qemu_log_mask(CPU_LOG_INT
,
8116 "...with CFSR.PRECISERR and BFAR 0x%x\n",
8120 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
8123 /* All other FSR values are either MPU faults or "can't happen
8124 * for M profile" cases.
8126 switch (cs
->exception_index
) {
8127 case EXCP_PREFETCH_ABORT
:
8128 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
8129 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
8131 case EXCP_DATA_ABORT
:
8132 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
8133 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
8134 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
8135 qemu_log_mask(CPU_LOG_INT
,
8136 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
8137 env
->v7m
.mmfar
[env
->v7m
.secure
]);
8140 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
8146 if (semihosting_enabled()) {
8148 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
8151 qemu_log_mask(CPU_LOG_INT
,
8152 "...handling as semihosting call 0x%x\n",
8154 env
->regs
[0] = do_arm_semihosting(env
);
8158 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
8162 case EXCP_EXCEPTION_EXIT
:
8163 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
8164 /* Must be v8M security extension function return */
8165 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
8166 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
8167 if (do_v7m_function_return(cpu
)) {
8171 do_v7m_exception_exit(cpu
);
8176 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8177 return; /* Never happens. Keep compiler happy. */
8180 if (arm_feature(env
, ARM_FEATURE_V8
)) {
8181 lr
= R_V7M_EXCRET_RES1_MASK
|
8182 R_V7M_EXCRET_DCRS_MASK
|
8183 R_V7M_EXCRET_FTYPE_MASK
;
8184 /* The S bit indicates whether we should return to Secure
8185 * or NonSecure (ie our current state).
8186 * The ES bit indicates whether we're taking this exception
8187 * to Secure or NonSecure (ie our target state). We set it
8188 * later, in v7m_exception_taken().
8189 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
8190 * This corresponds to the ARM ARM pseudocode for v8M setting
8191 * some LR bits in PushStack() and some in ExceptionTaken();
8192 * the distinction matters for the tailchain cases where we
8193 * can take an exception without pushing the stack.
8195 if (env
->v7m
.secure
) {
8196 lr
|= R_V7M_EXCRET_S_MASK
;
8199 lr
= R_V7M_EXCRET_RES1_MASK
|
8200 R_V7M_EXCRET_S_MASK
|
8201 R_V7M_EXCRET_DCRS_MASK
|
8202 R_V7M_EXCRET_FTYPE_MASK
|
8203 R_V7M_EXCRET_ES_MASK
;
8204 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
8205 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
8208 if (!arm_v7m_is_handler_mode(env
)) {
8209 lr
|= R_V7M_EXCRET_MODE_MASK
;
8212 ignore_stackfaults
= v7m_push_stack(cpu
);
8213 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
8216 /* Function used to synchronize QEMU's AArch64 register set with AArch32
8217 * register set. This is necessary when switching between AArch32 and AArch64
8220 void aarch64_sync_32_to_64(CPUARMState
*env
)
8223 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8225 /* We can blanket copy R[0:7] to X[0:7] */
8226 for (i
= 0; i
< 8; i
++) {
8227 env
->xregs
[i
] = env
->regs
[i
];
8230 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
8231 * Otherwise, they come from the banked user regs.
8233 if (mode
== ARM_CPU_MODE_FIQ
) {
8234 for (i
= 8; i
< 13; i
++) {
8235 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
8238 for (i
= 8; i
< 13; i
++) {
8239 env
->xregs
[i
] = env
->regs
[i
];
8243 /* Registers x13-x23 are the various mode SP and FP registers. Registers
8244 * r13 and r14 are only copied if we are in that mode, otherwise we copy
8245 * from the mode banked register.
8247 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8248 env
->xregs
[13] = env
->regs
[13];
8249 env
->xregs
[14] = env
->regs
[14];
8251 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
8252 /* HYP is an exception in that it is copied from r14 */
8253 if (mode
== ARM_CPU_MODE_HYP
) {
8254 env
->xregs
[14] = env
->regs
[14];
8256 env
->xregs
[14] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)];
8260 if (mode
== ARM_CPU_MODE_HYP
) {
8261 env
->xregs
[15] = env
->regs
[13];
8263 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
8266 if (mode
== ARM_CPU_MODE_IRQ
) {
8267 env
->xregs
[16] = env
->regs
[14];
8268 env
->xregs
[17] = env
->regs
[13];
8270 env
->xregs
[16] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)];
8271 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
8274 if (mode
== ARM_CPU_MODE_SVC
) {
8275 env
->xregs
[18] = env
->regs
[14];
8276 env
->xregs
[19] = env
->regs
[13];
8278 env
->xregs
[18] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)];
8279 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
8282 if (mode
== ARM_CPU_MODE_ABT
) {
8283 env
->xregs
[20] = env
->regs
[14];
8284 env
->xregs
[21] = env
->regs
[13];
8286 env
->xregs
[20] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)];
8287 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
8290 if (mode
== ARM_CPU_MODE_UND
) {
8291 env
->xregs
[22] = env
->regs
[14];
8292 env
->xregs
[23] = env
->regs
[13];
8294 env
->xregs
[22] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)];
8295 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
8298 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8299 * mode, then we can copy from r8-r14. Otherwise, we copy from the
8300 * FIQ bank for r8-r14.
8302 if (mode
== ARM_CPU_MODE_FIQ
) {
8303 for (i
= 24; i
< 31; i
++) {
8304 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
8307 for (i
= 24; i
< 29; i
++) {
8308 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
8310 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
8311 env
->xregs
[30] = env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)];
8314 env
->pc
= env
->regs
[15];
8317 /* Function used to synchronize QEMU's AArch32 register set with AArch64
8318 * register set. This is necessary when switching between AArch32 and AArch64
8321 void aarch64_sync_64_to_32(CPUARMState
*env
)
8324 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
8326 /* We can blanket copy X[0:7] to R[0:7] */
8327 for (i
= 0; i
< 8; i
++) {
8328 env
->regs
[i
] = env
->xregs
[i
];
8331 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8332 * Otherwise, we copy x8-x12 into the banked user regs.
8334 if (mode
== ARM_CPU_MODE_FIQ
) {
8335 for (i
= 8; i
< 13; i
++) {
8336 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
8339 for (i
= 8; i
< 13; i
++) {
8340 env
->regs
[i
] = env
->xregs
[i
];
8344 /* Registers r13 & r14 depend on the current mode.
8345 * If we are in a given mode, we copy the corresponding x registers to r13
8346 * and r14. Otherwise, we copy the x register to the banked r13 and r14
8349 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
8350 env
->regs
[13] = env
->xregs
[13];
8351 env
->regs
[14] = env
->xregs
[14];
8353 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
8355 /* HYP is an exception in that it does not have its own banked r14 but
8356 * shares the USR r14
8358 if (mode
== ARM_CPU_MODE_HYP
) {
8359 env
->regs
[14] = env
->xregs
[14];
8361 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
8365 if (mode
== ARM_CPU_MODE_HYP
) {
8366 env
->regs
[13] = env
->xregs
[15];
8368 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
8371 if (mode
== ARM_CPU_MODE_IRQ
) {
8372 env
->regs
[14] = env
->xregs
[16];
8373 env
->regs
[13] = env
->xregs
[17];
8375 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
8376 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
8379 if (mode
== ARM_CPU_MODE_SVC
) {
8380 env
->regs
[14] = env
->xregs
[18];
8381 env
->regs
[13] = env
->xregs
[19];
8383 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
8384 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
8387 if (mode
== ARM_CPU_MODE_ABT
) {
8388 env
->regs
[14] = env
->xregs
[20];
8389 env
->regs
[13] = env
->xregs
[21];
8391 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
8392 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
8395 if (mode
== ARM_CPU_MODE_UND
) {
8396 env
->regs
[14] = env
->xregs
[22];
8397 env
->regs
[13] = env
->xregs
[23];
8399 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
8400 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
8403 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
8404 * mode, then we can copy to r8-r14. Otherwise, we copy to the
8405 * FIQ bank for r8-r14.
8407 if (mode
== ARM_CPU_MODE_FIQ
) {
8408 for (i
= 24; i
< 31; i
++) {
8409 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
8412 for (i
= 24; i
< 29; i
++) {
8413 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
8415 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
8416 env
->banked_r14
[r14_bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
8419 env
->regs
[15] = env
->pc
;
8422 static void take_aarch32_exception(CPUARMState
*env
, int new_mode
,
8423 uint32_t mask
, uint32_t offset
,
8426 /* Change the CPU state so as to actually take the exception. */
8427 switch_mode(env
, new_mode
);
8429 * For exceptions taken to AArch32 we must clear the SS bit in both
8430 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8432 env
->uncached_cpsr
&= ~PSTATE_SS
;
8433 env
->spsr
= cpsr_read(env
);
8434 /* Clear IT bits. */
8435 env
->condexec_bits
= 0;
8436 /* Switch to the new mode, and to the correct instruction set. */
8437 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8438 /* Set new mode endianness */
8439 env
->uncached_cpsr
&= ~CPSR_E
;
8440 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
8441 env
->uncached_cpsr
|= CPSR_E
;
8443 /* J and IL must always be cleared for exception entry */
8444 env
->uncached_cpsr
&= ~(CPSR_IL
| CPSR_J
);
8447 if (new_mode
== ARM_CPU_MODE_HYP
) {
8448 env
->thumb
= (env
->cp15
.sctlr_el
[2] & SCTLR_TE
) != 0;
8449 env
->elr_el
[2] = env
->regs
[15];
8452 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8453 * and we should just guard the thumb mode on V4
8455 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8457 (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8459 env
->regs
[14] = env
->regs
[15] + offset
;
8461 env
->regs
[15] = newpc
;
8464 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState
*cs
)
8467 * Handle exception entry to Hyp mode; this is sufficiently
8468 * different to entry to other AArch32 modes that we handle it
8471 * The vector table entry used is always the 0x14 Hyp mode entry point,
8472 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8473 * The offset applied to the preferred return address is always zero
8474 * (see DDI0487C.a section G1.12.3).
8475 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8477 uint32_t addr
, mask
;
8478 ARMCPU
*cpu
= ARM_CPU(cs
);
8479 CPUARMState
*env
= &cpu
->env
;
8481 switch (cs
->exception_index
) {
8489 /* Fall through to prefetch abort. */
8490 case EXCP_PREFETCH_ABORT
:
8491 env
->cp15
.ifar_s
= env
->exception
.vaddress
;
8492 qemu_log_mask(CPU_LOG_INT
, "...with HIFAR 0x%x\n",
8493 (uint32_t)env
->exception
.vaddress
);
8496 case EXCP_DATA_ABORT
:
8497 env
->cp15
.dfar_s
= env
->exception
.vaddress
;
8498 qemu_log_mask(CPU_LOG_INT
, "...with HDFAR 0x%x\n",
8499 (uint32_t)env
->exception
.vaddress
);
8514 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8517 if (cs
->exception_index
!= EXCP_IRQ
&& cs
->exception_index
!= EXCP_FIQ
) {
8518 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
8520 * QEMU syndrome values are v8-style. v7 has the IL bit
8521 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8522 * If this is a v7 CPU, squash the IL bit in those cases.
8524 if (cs
->exception_index
== EXCP_PREFETCH_ABORT
||
8525 (cs
->exception_index
== EXCP_DATA_ABORT
&&
8526 !(env
->exception
.syndrome
& ARM_EL_ISV
)) ||
8527 syn_get_ec(env
->exception
.syndrome
) == EC_UNCATEGORIZED
) {
8528 env
->exception
.syndrome
&= ~ARM_EL_IL
;
8531 env
->cp15
.esr_el
[2] = env
->exception
.syndrome
;
8534 if (arm_current_el(env
) != 2 && addr
< 0x14) {
8539 if (!(env
->cp15
.scr_el3
& SCR_EA
)) {
8542 if (!(env
->cp15
.scr_el3
& SCR_IRQ
)) {
8545 if (!(env
->cp15
.scr_el3
& SCR_FIQ
)) {
8549 addr
+= env
->cp15
.hvbar
;
8551 take_aarch32_exception(env
, ARM_CPU_MODE_HYP
, mask
, 0, addr
);
8554 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
8556 ARMCPU
*cpu
= ARM_CPU(cs
);
8557 CPUARMState
*env
= &cpu
->env
;
8564 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8565 switch (syn_get_ec(env
->exception
.syndrome
)) {
8567 case EC_BREAKPOINT_SAME_EL
:
8571 case EC_WATCHPOINT_SAME_EL
:
8577 case EC_VECTORCATCH
:
8586 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
8589 if (env
->exception
.target_el
== 2) {
8590 arm_cpu_do_interrupt_aarch32_hyp(cs
);
8594 switch (cs
->exception_index
) {
8596 new_mode
= ARM_CPU_MODE_UND
;
8605 new_mode
= ARM_CPU_MODE_SVC
;
8608 /* The PC already points to the next instruction. */
8612 /* Fall through to prefetch abort. */
8613 case EXCP_PREFETCH_ABORT
:
8614 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
8615 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
8616 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
8617 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
8618 new_mode
= ARM_CPU_MODE_ABT
;
8620 mask
= CPSR_A
| CPSR_I
;
8623 case EXCP_DATA_ABORT
:
8624 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
8625 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
8626 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
8628 (uint32_t)env
->exception
.vaddress
);
8629 new_mode
= ARM_CPU_MODE_ABT
;
8631 mask
= CPSR_A
| CPSR_I
;
8635 new_mode
= ARM_CPU_MODE_IRQ
;
8637 /* Disable IRQ and imprecise data aborts. */
8638 mask
= CPSR_A
| CPSR_I
;
8640 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
8641 /* IRQ routed to monitor mode */
8642 new_mode
= ARM_CPU_MODE_MON
;
8647 new_mode
= ARM_CPU_MODE_FIQ
;
8649 /* Disable FIQ, IRQ and imprecise data aborts. */
8650 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8651 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
8652 /* FIQ routed to monitor mode */
8653 new_mode
= ARM_CPU_MODE_MON
;
8658 new_mode
= ARM_CPU_MODE_IRQ
;
8660 /* Disable IRQ and imprecise data aborts. */
8661 mask
= CPSR_A
| CPSR_I
;
8665 new_mode
= ARM_CPU_MODE_FIQ
;
8667 /* Disable FIQ, IRQ and imprecise data aborts. */
8668 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8672 new_mode
= ARM_CPU_MODE_MON
;
8674 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
8678 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8679 return; /* Never happens. Keep compiler happy. */
8682 if (new_mode
== ARM_CPU_MODE_MON
) {
8683 addr
+= env
->cp15
.mvbar
;
8684 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
8685 /* High vectors. When enabled, base address cannot be remapped. */
8688 /* ARM v7 architectures provide a vector base address register to remap
8689 * the interrupt vector table.
8690 * This register is only followed in non-monitor mode, and is banked.
8691 * Note: only bits 31:5 are valid.
8693 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
8696 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
8697 env
->cp15
.scr_el3
&= ~SCR_NS
;
8700 take_aarch32_exception(env
, new_mode
, mask
, offset
, addr
);
8703 /* Handle exception entry to a target EL which is using AArch64 */
8704 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
8706 ARMCPU
*cpu
= ARM_CPU(cs
);
8707 CPUARMState
*env
= &cpu
->env
;
8708 unsigned int new_el
= env
->exception
.target_el
;
8709 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
8710 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
8711 unsigned int cur_el
= arm_current_el(env
);
8714 * Note that new_el can never be 0. If cur_el is 0, then
8715 * el0_a64 is is_a64(), else el0_a64 is ignored.
8717 aarch64_sve_change_el(env
, cur_el
, new_el
, is_a64(env
));
8719 if (cur_el
< new_el
) {
8720 /* Entry vector offset depends on whether the implemented EL
8721 * immediately lower than the target level is using AArch32 or AArch64
8727 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
8730 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
8733 is_aa64
= is_a64(env
);
8736 g_assert_not_reached();
8744 } else if (pstate_read(env
) & PSTATE_SP
) {
8748 switch (cs
->exception_index
) {
8749 case EXCP_PREFETCH_ABORT
:
8750 case EXCP_DATA_ABORT
:
8751 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
8752 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
8753 env
->cp15
.far_el
[new_el
]);
8761 if (syn_get_ec(env
->exception
.syndrome
) == EC_ADVSIMDFPACCESSTRAP
) {
8763 * QEMU internal FP/SIMD syndromes from AArch32 include the
8764 * TA and coproc fields which are only exposed if the exception
8765 * is taken to AArch32 Hyp mode. Mask them out to get a valid
8766 * AArch64 format syndrome.
8768 env
->exception
.syndrome
&= ~MAKE_64BIT_MASK(0, 20);
8770 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
8781 qemu_log_mask(CPU_LOG_INT
,
8782 "...handling as semihosting call 0x%" PRIx64
"\n",
8784 env
->xregs
[0] = do_arm_semihosting(env
);
8787 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8791 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
8792 aarch64_save_sp(env
, arm_current_el(env
));
8793 env
->elr_el
[new_el
] = env
->pc
;
8795 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
8796 env
->elr_el
[new_el
] = env
->regs
[15];
8798 aarch64_sync_32_to_64(env
);
8800 env
->condexec_bits
= 0;
8802 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
8803 env
->elr_el
[new_el
]);
8805 pstate_write(env
, PSTATE_DAIF
| new_mode
);
8807 aarch64_restore_sp(env
, new_el
);
8811 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
8812 new_el
, env
->pc
, pstate_read(env
));
8815 static inline bool check_for_semihosting(CPUState
*cs
)
8817 /* Check whether this exception is a semihosting call; if so
8818 * then handle it and return true; otherwise return false.
8820 ARMCPU
*cpu
= ARM_CPU(cs
);
8821 CPUARMState
*env
= &cpu
->env
;
8824 if (cs
->exception_index
== EXCP_SEMIHOST
) {
8825 /* This is always the 64-bit semihosting exception.
8826 * The "is this usermode" and "is semihosting enabled"
8827 * checks have been done at translate time.
8829 qemu_log_mask(CPU_LOG_INT
,
8830 "...handling as semihosting call 0x%" PRIx64
"\n",
8832 env
->xregs
[0] = do_arm_semihosting(env
);
8839 /* Only intercept calls from privileged modes, to provide some
8840 * semblance of security.
8842 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
8843 (!semihosting_enabled() ||
8844 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
8848 switch (cs
->exception_index
) {
8850 /* This is always a semihosting call; the "is this usermode"
8851 * and "is semihosting enabled" checks have been done at
8856 /* Check for semihosting interrupt. */
8858 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
8864 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
8866 if (imm
== 0x123456) {
8872 /* See if this is a semihosting syscall. */
8874 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
8886 qemu_log_mask(CPU_LOG_INT
,
8887 "...handling as semihosting call 0x%x\n",
8889 env
->regs
[0] = do_arm_semihosting(env
);
8894 /* Handle a CPU exception for A and R profile CPUs.
8895 * Do any appropriate logging, handle PSCI calls, and then hand off
8896 * to the AArch64-entry or AArch32-entry function depending on the
8897 * target exception level's register width.
8899 void arm_cpu_do_interrupt(CPUState
*cs
)
8901 ARMCPU
*cpu
= ARM_CPU(cs
);
8902 CPUARMState
*env
= &cpu
->env
;
8903 unsigned int new_el
= env
->exception
.target_el
;
8905 assert(!arm_feature(env
, ARM_FEATURE_M
));
8907 arm_log_exception(cs
->exception_index
);
8908 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
8910 if (qemu_loglevel_mask(CPU_LOG_INT
)
8911 && !excp_is_internal(cs
->exception_index
)) {
8912 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
8913 syn_get_ec(env
->exception
.syndrome
),
8914 env
->exception
.syndrome
);
8917 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
8918 arm_handle_psci_call(cpu
);
8919 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
8923 /* Semihosting semantics depend on the register width of the
8924 * code that caused the exception, not the target exception level,
8925 * so must be handled here.
8927 if (check_for_semihosting(cs
)) {
8931 /* Hooks may change global state so BQL should be held, also the
8932 * BQL needs to be held for any modification of
8933 * cs->interrupt_request.
8935 g_assert(qemu_mutex_iothread_locked());
8937 arm_call_pre_el_change_hook(cpu
);
8939 assert(!excp_is_internal(cs
->exception_index
));
8940 if (arm_el_is_aa64(env
, new_el
)) {
8941 arm_cpu_do_interrupt_aarch64(cs
);
8943 arm_cpu_do_interrupt_aarch32(cs
);
8946 arm_call_el_change_hook(cpu
);
8948 if (!kvm_enabled()) {
8949 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
8953 /* Return the exception level which controls this address translation regime */
8954 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8957 case ARMMMUIdx_S2NS
:
8958 case ARMMMUIdx_S1E2
:
8960 case ARMMMUIdx_S1E3
:
8962 case ARMMMUIdx_S1SE0
:
8963 return arm_el_is_aa64(env
, 3) ? 1 : 3;
8964 case ARMMMUIdx_S1SE1
:
8965 case ARMMMUIdx_S1NSE0
:
8966 case ARMMMUIdx_S1NSE1
:
8967 case ARMMMUIdx_MPrivNegPri
:
8968 case ARMMMUIdx_MUserNegPri
:
8969 case ARMMMUIdx_MPriv
:
8970 case ARMMMUIdx_MUser
:
8971 case ARMMMUIdx_MSPrivNegPri
:
8972 case ARMMMUIdx_MSUserNegPri
:
8973 case ARMMMUIdx_MSPriv
:
8974 case ARMMMUIdx_MSUser
:
8977 g_assert_not_reached();
8981 /* Return the SCTLR value which controls this address translation regime */
8982 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8984 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
8987 /* Return true if the specified stage of address translation is disabled */
8988 static inline bool regime_translation_disabled(CPUARMState
*env
,
8991 if (arm_feature(env
, ARM_FEATURE_M
)) {
8992 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
8993 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
8994 case R_V7M_MPU_CTRL_ENABLE_MASK
:
8995 /* Enabled, but not for HardFault and NMI */
8996 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
8997 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
8998 /* Enabled for all cases */
9002 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9003 * we warned about that in armv7m_nvic.c when the guest set it.
9009 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9010 /* HCR.DC means HCR.VM behaves as 1 */
9011 return (env
->cp15
.hcr_el2
& (HCR_DC
| HCR_VM
)) == 0;
9014 if (env
->cp15
.hcr_el2
& HCR_TGE
) {
9015 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
9016 if (!regime_is_secure(env
, mmu_idx
) && regime_el(env
, mmu_idx
) == 1) {
9021 if ((env
->cp15
.hcr_el2
& HCR_DC
) &&
9022 (mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
)) {
9023 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
9027 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
9030 static inline bool regime_translation_big_endian(CPUARMState
*env
,
9033 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
9036 /* Return the TCR controlling this translation regime */
9037 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9039 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9040 return &env
->cp15
.vtcr_el2
;
9042 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
9045 /* Convert a possible stage1+2 MMU index into the appropriate
9048 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
9050 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
9051 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
9056 /* Return the TTBR associated with this translation regime */
9057 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9060 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9061 return env
->cp15
.vttbr_el2
;
9064 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
9066 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
9070 /* Return true if the translation regime is using LPAE format page tables */
9071 static inline bool regime_using_lpae_format(CPUARMState
*env
,
9074 int el
= regime_el(env
, mmu_idx
);
9075 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
9078 if (arm_feature(env
, ARM_FEATURE_LPAE
)
9079 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
9085 /* Returns true if the stage 1 translation regime is using LPAE format page
9086 * tables. Used when raising alignment exceptions, whose FSR changes depending
9087 * on whether the long or short descriptor format is in use. */
9088 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9090 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
9092 return regime_using_lpae_format(env
, mmu_idx
);
9095 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
9098 case ARMMMUIdx_S1SE0
:
9099 case ARMMMUIdx_S1NSE0
:
9100 case ARMMMUIdx_MUser
:
9101 case ARMMMUIdx_MSUser
:
9102 case ARMMMUIdx_MUserNegPri
:
9103 case ARMMMUIdx_MSUserNegPri
:
9107 case ARMMMUIdx_S12NSE0
:
9108 case ARMMMUIdx_S12NSE1
:
9109 g_assert_not_reached();
9113 /* Translate section/page access permissions to page
9114 * R/W protection flags
9117 * @mmu_idx: MMU index indicating required translation regime
9118 * @ap: The 3-bit access permissions (AP[2:0])
9119 * @domain_prot: The 2-bit domain access permissions
9121 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9122 int ap
, int domain_prot
)
9124 bool is_user
= regime_is_user(env
, mmu_idx
);
9126 if (domain_prot
== 3) {
9127 return PAGE_READ
| PAGE_WRITE
;
9132 if (arm_feature(env
, ARM_FEATURE_V7
)) {
9135 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
9137 return is_user
? 0 : PAGE_READ
;
9144 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9149 return PAGE_READ
| PAGE_WRITE
;
9152 return PAGE_READ
| PAGE_WRITE
;
9153 case 4: /* Reserved. */
9156 return is_user
? 0 : PAGE_READ
;
9160 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
9165 g_assert_not_reached();
9169 /* Translate section/page access permissions to page
9170 * R/W protection flags.
9172 * @ap: The 2-bit simple AP (AP[2:1])
9173 * @is_user: TRUE if accessing from PL0
9175 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
9179 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
9181 return PAGE_READ
| PAGE_WRITE
;
9183 return is_user
? 0 : PAGE_READ
;
9187 g_assert_not_reached();
9192 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
9194 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
9197 /* Translate S2 section/page access permissions to protection flags
9200 * @s2ap: The 2-bit stage2 access permissions (S2AP)
9201 * @xn: XN (execute-never) bit
9203 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
9214 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
9221 /* Translate section/page access permissions to protection flags
9224 * @mmu_idx: MMU index indicating required translation regime
9225 * @is_aa64: TRUE if AArch64
9226 * @ap: The 2-bit simple AP (AP[2:1])
9227 * @ns: NS (non-secure) bit
9228 * @xn: XN (execute-never) bit
9229 * @pxn: PXN (privileged execute-never) bit
9231 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
9232 int ap
, int ns
, int xn
, int pxn
)
9234 bool is_user
= regime_is_user(env
, mmu_idx
);
9235 int prot_rw
, user_rw
;
9239 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
9241 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
9245 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
9248 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
9252 /* TODO have_wxn should be replaced with
9253 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9254 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9255 * compatible processors have EL2, which is required for [U]WXN.
9257 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
9260 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
9264 switch (regime_el(env
, mmu_idx
)) {
9267 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
9274 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
9275 switch (regime_el(env
, mmu_idx
)) {
9279 xn
= xn
|| !(user_rw
& PAGE_READ
);
9283 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
9285 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
9286 (uwxn
&& (user_rw
& PAGE_WRITE
));
9296 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
9299 return prot_rw
| PAGE_EXEC
;
9302 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9303 uint32_t *table
, uint32_t address
)
9305 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
9306 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9308 if (address
& tcr
->mask
) {
9309 if (tcr
->raw_tcr
& TTBCR_PD1
) {
9310 /* Translation table walk disabled for TTBR1 */
9313 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
9315 if (tcr
->raw_tcr
& TTBCR_PD0
) {
9316 /* Translation table walk disabled for TTBR0 */
9319 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
9321 *table
|= (address
>> 18) & 0x3ffc;
9325 /* Translate a S1 pagetable walk through S2 if needed. */
9326 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
9327 hwaddr addr
, MemTxAttrs txattrs
,
9328 ARMMMUFaultInfo
*fi
)
9330 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
9331 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
9332 target_ulong s2size
;
9336 ARMCacheAttrs cacheattrs
= {};
9337 ARMCacheAttrs
*pcacheattrs
= NULL
;
9339 if (env
->cp15
.hcr_el2
& HCR_PTW
) {
9341 * PTW means we must fault if this S1 walk touches S2 Device
9342 * memory; otherwise we don't care about the attributes and can
9343 * save the S2 translation the effort of computing them.
9345 pcacheattrs
= &cacheattrs
;
9348 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
9349 &txattrs
, &s2prot
, &s2size
, fi
, pcacheattrs
);
9351 assert(fi
->type
!= ARMFault_None
);
9357 if (pcacheattrs
&& (pcacheattrs
->attrs
& 0xf0) == 0) {
9358 /* Access was to Device memory: generate Permission fault */
9359 fi
->type
= ARMFault_Permission
;
9370 /* All loads done in the course of a page table walk go through here. */
9371 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9372 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9374 ARMCPU
*cpu
= ARM_CPU(cs
);
9375 CPUARMState
*env
= &cpu
->env
;
9376 MemTxAttrs attrs
= {};
9377 MemTxResult result
= MEMTX_OK
;
9381 attrs
.secure
= is_secure
;
9382 as
= arm_addressspace(cs
, attrs
);
9383 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9387 if (regime_translation_big_endian(env
, mmu_idx
)) {
9388 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
9390 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
9392 if (result
== MEMTX_OK
) {
9395 fi
->type
= ARMFault_SyncExternalOnWalk
;
9396 fi
->ea
= arm_extabort_type(result
);
9400 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
9401 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
9403 ARMCPU
*cpu
= ARM_CPU(cs
);
9404 CPUARMState
*env
= &cpu
->env
;
9405 MemTxAttrs attrs
= {};
9406 MemTxResult result
= MEMTX_OK
;
9410 attrs
.secure
= is_secure
;
9411 as
= arm_addressspace(cs
, attrs
);
9412 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
9416 if (regime_translation_big_endian(env
, mmu_idx
)) {
9417 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
9419 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
9421 if (result
== MEMTX_OK
) {
9424 fi
->type
= ARMFault_SyncExternalOnWalk
;
9425 fi
->ea
= arm_extabort_type(result
);
9429 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
9430 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9431 hwaddr
*phys_ptr
, int *prot
,
9432 target_ulong
*page_size
,
9433 ARMMMUFaultInfo
*fi
)
9435 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
9446 /* Pagetable walk. */
9447 /* Lookup l1 descriptor. */
9448 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9449 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9450 fi
->type
= ARMFault_Translation
;
9453 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9455 if (fi
->type
!= ARMFault_None
) {
9459 domain
= (desc
>> 5) & 0x0f;
9460 if (regime_el(env
, mmu_idx
) == 1) {
9461 dacr
= env
->cp15
.dacr_ns
;
9463 dacr
= env
->cp15
.dacr_s
;
9465 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9467 /* Section translation fault. */
9468 fi
->type
= ARMFault_Translation
;
9474 if (domain_prot
== 0 || domain_prot
== 2) {
9475 fi
->type
= ARMFault_Domain
;
9480 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9481 ap
= (desc
>> 10) & 3;
9482 *page_size
= 1024 * 1024;
9484 /* Lookup l2 entry. */
9486 /* Coarse pagetable. */
9487 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9489 /* Fine pagetable. */
9490 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
9492 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9494 if (fi
->type
!= ARMFault_None
) {
9498 case 0: /* Page translation fault. */
9499 fi
->type
= ARMFault_Translation
;
9501 case 1: /* 64k page. */
9502 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9503 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
9504 *page_size
= 0x10000;
9506 case 2: /* 4k page. */
9507 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9508 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
9509 *page_size
= 0x1000;
9511 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
9513 /* ARMv6/XScale extended small page format */
9514 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
9515 || arm_feature(env
, ARM_FEATURE_V6
)) {
9516 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9517 *page_size
= 0x1000;
9519 /* UNPREDICTABLE in ARMv5; we choose to take a
9520 * page translation fault.
9522 fi
->type
= ARMFault_Translation
;
9526 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
9529 ap
= (desc
>> 4) & 3;
9532 /* Never happens, but compiler isn't smart enough to tell. */
9536 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9537 *prot
|= *prot
? PAGE_EXEC
: 0;
9538 if (!(*prot
& (1 << access_type
))) {
9539 /* Access permission fault. */
9540 fi
->type
= ARMFault_Permission
;
9543 *phys_ptr
= phys_addr
;
9546 fi
->domain
= domain
;
9551 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
9552 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9553 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
9554 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
9556 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
9570 /* Pagetable walk. */
9571 /* Lookup l1 descriptor. */
9572 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
9573 /* Section translation fault if page walk is disabled by PD0 or PD1 */
9574 fi
->type
= ARMFault_Translation
;
9577 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9579 if (fi
->type
!= ARMFault_None
) {
9583 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
9584 /* Section translation fault, or attempt to use the encoding
9585 * which is Reserved on implementations without PXN.
9587 fi
->type
= ARMFault_Translation
;
9590 if ((type
== 1) || !(desc
& (1 << 18))) {
9591 /* Page or Section. */
9592 domain
= (desc
>> 5) & 0x0f;
9594 if (regime_el(env
, mmu_idx
) == 1) {
9595 dacr
= env
->cp15
.dacr_ns
;
9597 dacr
= env
->cp15
.dacr_s
;
9602 domain_prot
= (dacr
>> (domain
* 2)) & 3;
9603 if (domain_prot
== 0 || domain_prot
== 2) {
9604 /* Section or Page domain fault */
9605 fi
->type
= ARMFault_Domain
;
9609 if (desc
& (1 << 18)) {
9611 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
9612 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
9613 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
9614 *page_size
= 0x1000000;
9617 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
9618 *page_size
= 0x100000;
9620 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
9621 xn
= desc
& (1 << 4);
9623 ns
= extract32(desc
, 19, 1);
9625 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
9626 pxn
= (desc
>> 2) & 1;
9628 ns
= extract32(desc
, 3, 1);
9629 /* Lookup l2 entry. */
9630 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
9631 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
9633 if (fi
->type
!= ARMFault_None
) {
9636 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
9638 case 0: /* Page translation fault. */
9639 fi
->type
= ARMFault_Translation
;
9641 case 1: /* 64k page. */
9642 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
9643 xn
= desc
& (1 << 15);
9644 *page_size
= 0x10000;
9646 case 2: case 3: /* 4k page. */
9647 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
9649 *page_size
= 0x1000;
9652 /* Never happens, but compiler isn't smart enough to tell. */
9656 if (domain_prot
== 3) {
9657 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9659 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
9662 if (xn
&& access_type
== MMU_INST_FETCH
) {
9663 fi
->type
= ARMFault_Permission
;
9667 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
9668 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
9669 /* The simplified model uses AP[0] as an access control bit. */
9670 if ((ap
& 1) == 0) {
9671 /* Access flag fault. */
9672 fi
->type
= ARMFault_AccessFlag
;
9675 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
9677 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9682 if (!(*prot
& (1 << access_type
))) {
9683 /* Access permission fault. */
9684 fi
->type
= ARMFault_Permission
;
9689 /* The NS bit will (as required by the architecture) have no effect if
9690 * the CPU doesn't support TZ or this is a non-secure translation
9691 * regime, because the attribute will already be non-secure.
9693 attrs
->secure
= false;
9695 *phys_ptr
= phys_addr
;
9698 fi
->domain
= domain
;
9704 * check_s2_mmu_setup
9706 * @is_aa64: True if the translation regime is in AArch64 state
9707 * @startlevel: Suggested starting level
9708 * @inputsize: Bitsize of IPAs
9709 * @stride: Page-table stride (See the ARM ARM)
9711 * Returns true if the suggested S2 translation parameters are OK and
9714 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
9715 int inputsize
, int stride
)
9717 const int grainsize
= stride
+ 3;
9720 /* Negative levels are never allowed. */
9725 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
9726 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
9731 CPUARMState
*env
= &cpu
->env
;
9732 unsigned int pamax
= arm_pamax(cpu
);
9735 case 13: /* 64KB Pages. */
9736 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
9740 case 11: /* 16KB Pages. */
9741 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
9745 case 9: /* 4KB Pages. */
9746 if (level
== 0 && pamax
<= 42) {
9751 g_assert_not_reached();
9754 /* Inputsize checks. */
9755 if (inputsize
> pamax
&&
9756 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
9757 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9761 /* AArch32 only supports 4KB pages. Assert on that. */
9762 assert(stride
== 9);
9771 /* Translate from the 4-bit stage 2 representation of
9772 * memory attributes (without cache-allocation hints) to
9773 * the 8-bit representation of the stage 1 MAIR registers
9774 * (which includes allocation hints).
9776 * ref: shared/translation/attrs/S2AttrDecode()
9777 * .../S2ConvertAttrsHints()
9779 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
9781 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
9782 uint8_t loattr
= extract32(s2attrs
, 0, 2);
9783 uint8_t hihint
= 0, lohint
= 0;
9785 if (hiattr
!= 0) { /* normal memory */
9786 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
9787 hiattr
= loattr
= 1; /* non-cacheable */
9789 if (hiattr
!= 1) { /* Write-through or write-back */
9790 hihint
= 3; /* RW allocate */
9792 if (loattr
!= 1) { /* Write-through or write-back */
9793 lohint
= 3; /* RW allocate */
9798 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
9801 ARMVAParameters
aa64_va_parameters_both(CPUARMState
*env
, uint64_t va
,
9804 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
9805 uint32_t el
= regime_el(env
, mmu_idx
);
9806 bool tbi
, tbid
, epd
, hpd
, using16k
, using64k
;
9810 * Bit 55 is always between the two regions, and is canonical for
9811 * determining if address tagging is enabled.
9813 select
= extract64(va
, 55, 1);
9816 tsz
= extract32(tcr
, 0, 6);
9817 using64k
= extract32(tcr
, 14, 1);
9818 using16k
= extract32(tcr
, 15, 1);
9819 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9821 tbi
= tbid
= hpd
= false;
9823 tbi
= extract32(tcr
, 20, 1);
9824 hpd
= extract32(tcr
, 24, 1);
9825 tbid
= extract32(tcr
, 29, 1);
9828 } else if (!select
) {
9829 tsz
= extract32(tcr
, 0, 6);
9830 epd
= extract32(tcr
, 7, 1);
9831 using64k
= extract32(tcr
, 14, 1);
9832 using16k
= extract32(tcr
, 15, 1);
9833 tbi
= extract64(tcr
, 37, 1);
9834 hpd
= extract64(tcr
, 41, 1);
9835 tbid
= extract64(tcr
, 51, 1);
9837 int tg
= extract32(tcr
, 30, 2);
9840 tsz
= extract32(tcr
, 16, 6);
9841 epd
= extract32(tcr
, 23, 1);
9842 tbi
= extract64(tcr
, 38, 1);
9843 hpd
= extract64(tcr
, 42, 1);
9844 tbid
= extract64(tcr
, 52, 1);
9846 tsz
= MIN(tsz
, 39); /* TODO: ARMv8.4-TTST */
9847 tsz
= MAX(tsz
, 16); /* TODO: ARMv8.2-LVA */
9849 return (ARMVAParameters
) {
9856 .using16k
= using16k
,
9857 .using64k
= using64k
,
9861 ARMVAParameters
aa64_va_parameters(CPUARMState
*env
, uint64_t va
,
9862 ARMMMUIdx mmu_idx
, bool data
)
9864 ARMVAParameters ret
= aa64_va_parameters_both(env
, va
, mmu_idx
);
9866 /* Present TBI as a composite with TBID. */
9867 ret
.tbi
&= (data
|| !ret
.tbid
);
9871 static ARMVAParameters
aa32_va_parameters(CPUARMState
*env
, uint32_t va
,
9874 uint64_t tcr
= regime_tcr(env
, mmu_idx
)->raw_tcr
;
9875 uint32_t el
= regime_el(env
, mmu_idx
);
9879 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9881 bool sext
= extract32(tcr
, 4, 1);
9882 bool sign
= extract32(tcr
, 3, 1);
9885 * If the sign-extend bit is not the same as t0sz[3], the result
9886 * is unpredictable. Flag this as a guest error.
9889 qemu_log_mask(LOG_GUEST_ERROR
,
9890 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9892 tsz
= sextract32(tcr
, 0, 4) + 8;
9896 } else if (el
== 2) {
9898 tsz
= extract32(tcr
, 0, 3);
9900 hpd
= extract64(tcr
, 24, 1);
9903 int t0sz
= extract32(tcr
, 0, 3);
9904 int t1sz
= extract32(tcr
, 16, 3);
9907 select
= va
> (0xffffffffu
>> t0sz
);
9909 /* Note that we will detect errors later. */
9910 select
= va
>= ~(0xffffffffu
>> t1sz
);
9914 epd
= extract32(tcr
, 7, 1);
9915 hpd
= extract64(tcr
, 41, 1);
9918 epd
= extract32(tcr
, 23, 1);
9919 hpd
= extract64(tcr
, 42, 1);
9921 /* For aarch32, hpd0 is not enabled without t2e as well. */
9922 hpd
&= extract32(tcr
, 6, 1);
9925 return (ARMVAParameters
) {
9933 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
9934 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9935 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
9936 target_ulong
*page_size_ptr
,
9937 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
9939 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9940 CPUState
*cs
= CPU(cpu
);
9941 /* Read an LPAE long-descriptor translation table. */
9942 ARMFaultType fault_type
= ARMFault_Translation
;
9944 ARMVAParameters param
;
9946 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
9947 uint32_t tableattrs
;
9948 target_ulong page_size
, top_bits
;
9951 int addrsize
, inputsize
;
9952 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9953 int ap
, ns
, xn
, pxn
;
9954 uint32_t el
= regime_el(env
, mmu_idx
);
9956 uint64_t descaddrmask
;
9957 bool aarch64
= arm_el_is_aa64(env
, el
);
9960 * This code does not handle the different format TCR for VTCR_EL2.
9961 * This code also does not support shareability levels.
9962 * Attribute and permission bit handling should also be checked when adding
9963 * support for those page table walks.
9966 param
= aa64_va_parameters(env
, address
, mmu_idx
,
9967 access_type
!= MMU_INST_FETCH
);
9969 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9972 ttbr1_valid
= (el
< 2);
9973 addrsize
= 64 - 8 * param
.tbi
;
9974 inputsize
= 64 - param
.tsz
;
9976 param
= aa32_va_parameters(env
, address
, mmu_idx
);
9978 /* There is no TTBR1 for EL2 */
9979 ttbr1_valid
= (el
!= 2);
9980 addrsize
= (mmu_idx
== ARMMMUIdx_S2NS
? 40 : 32);
9981 inputsize
= addrsize
- param
.tsz
;
9985 * We determined the region when collecting the parameters, but we
9986 * have not yet validated that the address is valid for the region.
9987 * Extract the top bits and verify that they all match select.
9989 top_bits
= sextract64(address
, inputsize
, addrsize
- inputsize
);
9990 if (-top_bits
!= param
.select
|| (param
.select
&& !ttbr1_valid
)) {
9991 /* In the gap between the two regions, this is a Translation fault */
9992 fault_type
= ARMFault_Translation
;
9996 if (param
.using64k
) {
9998 } else if (param
.using16k
) {
10004 /* Note that QEMU ignores shareability and cacheability attributes,
10005 * so we don't need to do anything with the SH, ORGN, IRGN fields
10006 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
10007 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10008 * implement any ASID-like capability so we can ignore it (instead
10009 * we will always flush the TLB any time the ASID is changed).
10011 ttbr
= regime_ttbr(env
, mmu_idx
, param
.select
);
10013 /* Here we should have set up all the parameters for the translation:
10014 * inputsize, ttbr, epd, stride, tbi
10018 /* Translation table walk disabled => Translation fault on TLB miss
10019 * Note: This is always 0 on 64-bit EL2 and EL3.
10024 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
10025 /* The starting level depends on the virtual address size (which can
10026 * be up to 48 bits) and the translation granule size. It indicates
10027 * the number of strides (stride bits at a time) needed to
10028 * consume the bits of the input address. In the pseudocode this is:
10029 * level = 4 - RoundUp((inputsize - grainsize) / stride)
10030 * where their 'inputsize' is our 'inputsize', 'grainsize' is
10031 * our 'stride + 3' and 'stride' is our 'stride'.
10032 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
10033 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
10034 * = 4 - (inputsize - 4) / stride;
10036 level
= 4 - (inputsize
- 4) / stride
;
10038 /* For stage 2 translations the starting level is specified by the
10039 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
10041 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
10042 uint32_t startlevel
;
10045 if (!aarch64
|| stride
== 9) {
10046 /* AArch32 or 4KB pages */
10047 startlevel
= 2 - sl0
;
10049 /* 16KB or 64KB pages */
10050 startlevel
= 3 - sl0
;
10053 /* Check that the starting level is valid. */
10054 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
10055 inputsize
, stride
);
10057 fault_type
= ARMFault_Translation
;
10060 level
= startlevel
;
10063 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
10064 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
10066 /* Now we can extract the actual base address from the TTBR */
10067 descaddr
= extract64(ttbr
, 0, 48);
10068 descaddr
&= ~indexmask
;
10070 /* The address field in the descriptor goes up to bit 39 for ARMv7
10071 * but up to bit 47 for ARMv8, but we use the descaddrmask
10072 * up to bit 39 for AArch32, because we don't need other bits in that case
10073 * to construct next descriptor address (anyway they should be all zeroes).
10075 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
10076 ~indexmask_grainsize
;
10078 /* Secure accesses start with the page table in secure memory and
10079 * can be downgraded to non-secure at any step. Non-secure accesses
10080 * remain non-secure. We implement this by just ORing in the NSTable/NS
10081 * bits at each step.
10083 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
10085 uint64_t descriptor
;
10088 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
10090 nstable
= extract32(tableattrs
, 4, 1);
10091 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
10092 if (fi
->type
!= ARMFault_None
) {
10096 if (!(descriptor
& 1) ||
10097 (!(descriptor
& 2) && (level
== 3))) {
10098 /* Invalid, or the Reserved level 3 encoding */
10101 descaddr
= descriptor
& descaddrmask
;
10103 if ((descriptor
& 2) && (level
< 3)) {
10104 /* Table entry. The top five bits are attributes which may
10105 * propagate down through lower levels of the table (and
10106 * which are all arranged so that 0 means "no effect", so
10107 * we can gather them up by ORing in the bits at each level).
10109 tableattrs
|= extract64(descriptor
, 59, 5);
10111 indexmask
= indexmask_grainsize
;
10114 /* Block entry at level 1 or 2, or page entry at level 3.
10115 * These are basically the same thing, although the number
10116 * of bits we pull in from the vaddr varies.
10118 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
10119 descaddr
|= (address
& (page_size
- 1));
10120 /* Extract attributes from the descriptor */
10121 attrs
= extract64(descriptor
, 2, 10)
10122 | (extract64(descriptor
, 52, 12) << 10);
10124 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10125 /* Stage 2 table descriptors do not include any attribute fields */
10128 /* Merge in attributes from table descriptors */
10129 attrs
|= nstable
<< 3; /* NS */
10131 /* HPD disables all the table attributes except NSTable. */
10134 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
10135 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
10136 * means "force PL1 access only", which means forcing AP[1] to 0.
10138 attrs
&= ~(extract32(tableattrs
, 2, 1) << 4); /* !APT[0] => AP[1] */
10139 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APT[1] => AP[2] */
10142 /* Here descaddr is the final physical address, and attributes
10143 * are all in attrs.
10145 fault_type
= ARMFault_AccessFlag
;
10146 if ((attrs
& (1 << 8)) == 0) {
10151 ap
= extract32(attrs
, 4, 2);
10152 xn
= extract32(attrs
, 12, 1);
10154 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10156 *prot
= get_S2prot(env
, ap
, xn
);
10158 ns
= extract32(attrs
, 3, 1);
10159 pxn
= extract32(attrs
, 11, 1);
10160 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
10163 fault_type
= ARMFault_Permission
;
10164 if (!(*prot
& (1 << access_type
))) {
10169 /* The NS bit will (as required by the architecture) have no effect if
10170 * the CPU doesn't support TZ or this is a non-secure translation
10171 * regime, because the attribute will already be non-secure.
10173 txattrs
->secure
= false;
10176 if (cacheattrs
!= NULL
) {
10177 if (mmu_idx
== ARMMMUIdx_S2NS
) {
10178 cacheattrs
->attrs
= convert_stage2_attrs(env
,
10179 extract32(attrs
, 0, 4));
10181 /* Index into MAIR registers for cache attributes */
10182 uint8_t attrindx
= extract32(attrs
, 0, 3);
10183 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
10184 assert(attrindx
<= 7);
10185 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
10187 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
10190 *phys_ptr
= descaddr
;
10191 *page_size_ptr
= page_size
;
10195 fi
->type
= fault_type
;
10197 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
10198 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
10202 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
10204 int32_t address
, int *prot
)
10206 if (!arm_feature(env
, ARM_FEATURE_M
)) {
10207 *prot
= PAGE_READ
| PAGE_WRITE
;
10209 case 0xF0000000 ... 0xFFFFFFFF:
10210 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
10211 /* hivecs execing is ok */
10212 *prot
|= PAGE_EXEC
;
10215 case 0x00000000 ... 0x7FFFFFFF:
10216 *prot
|= PAGE_EXEC
;
10220 /* Default system address map for M profile cores.
10221 * The architecture specifies which regions are execute-never;
10222 * at the MPU level no other checks are defined.
10225 case 0x00000000 ... 0x1fffffff: /* ROM */
10226 case 0x20000000 ... 0x3fffffff: /* SRAM */
10227 case 0x60000000 ... 0x7fffffff: /* RAM */
10228 case 0x80000000 ... 0x9fffffff: /* RAM */
10229 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10231 case 0x40000000 ... 0x5fffffff: /* Peripheral */
10232 case 0xa0000000 ... 0xbfffffff: /* Device */
10233 case 0xc0000000 ... 0xdfffffff: /* Device */
10234 case 0xe0000000 ... 0xffffffff: /* System */
10235 *prot
= PAGE_READ
| PAGE_WRITE
;
10238 g_assert_not_reached();
10243 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
10244 ARMMMUIdx mmu_idx
, bool is_user
)
10246 /* Return true if we should use the default memory map as a
10247 * "background" region if there are no hits against any MPU regions.
10249 CPUARMState
*env
= &cpu
->env
;
10255 if (arm_feature(env
, ARM_FEATURE_M
)) {
10256 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
10257 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
10259 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
10263 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
10265 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10266 return arm_feature(env
, ARM_FEATURE_M
) &&
10267 extract32(address
, 20, 12) == 0xe00;
10270 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
10272 /* True if address is in the M profile system region
10273 * 0xe0000000 - 0xffffffff
10275 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
10278 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
10279 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10280 hwaddr
*phys_ptr
, int *prot
,
10281 target_ulong
*page_size
,
10282 ARMMMUFaultInfo
*fi
)
10284 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10286 bool is_user
= regime_is_user(env
, mmu_idx
);
10288 *phys_ptr
= address
;
10289 *page_size
= TARGET_PAGE_SIZE
;
10292 if (regime_translation_disabled(env
, mmu_idx
) ||
10293 m_is_ppb_region(env
, address
)) {
10294 /* MPU disabled or M profile PPB access: use default memory map.
10295 * The other case which uses the default memory map in the
10296 * v7M ARM ARM pseudocode is exception vector reads from the vector
10297 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10298 * which always does a direct read using address_space_ldl(), rather
10299 * than going via this function, so we don't need to check that here.
10301 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10302 } else { /* MPU enabled */
10303 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10304 /* region search */
10305 uint32_t base
= env
->pmsav7
.drbar
[n
];
10306 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
10308 bool srdis
= false;
10310 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
10315 qemu_log_mask(LOG_GUEST_ERROR
,
10316 "DRSR[%d]: Rsize field cannot be 0\n", n
);
10320 rmask
= (1ull << rsize
) - 1;
10322 if (base
& rmask
) {
10323 qemu_log_mask(LOG_GUEST_ERROR
,
10324 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
10325 "to DRSR region size, mask = 0x%" PRIx32
"\n",
10330 if (address
< base
|| address
> base
+ rmask
) {
10332 * Address not in this region. We must check whether the
10333 * region covers addresses in the same page as our address.
10334 * In that case we must not report a size that covers the
10335 * whole page for a subsequent hit against a different MPU
10336 * region or the background region, because it would result in
10337 * incorrect TLB hits for subsequent accesses to addresses that
10338 * are in this MPU region.
10340 if (ranges_overlap(base
, rmask
,
10341 address
& TARGET_PAGE_MASK
,
10342 TARGET_PAGE_SIZE
)) {
10348 /* Region matched */
10350 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
10352 uint32_t srdis_mask
;
10354 rsize
-= 3; /* sub region size (power of 2) */
10355 snd
= ((address
- base
) >> rsize
) & 0x7;
10356 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
10358 srdis_mask
= srdis
? 0x3 : 0x0;
10359 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
10360 /* This will check in groups of 2, 4 and then 8, whether
10361 * the subregion bits are consistent. rsize is incremented
10362 * back up to give the region size, considering consistent
10363 * adjacent subregions as one region. Stop testing if rsize
10364 * is already big enough for an entire QEMU page.
10366 int snd_rounded
= snd
& ~(i
- 1);
10367 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
10368 snd_rounded
+ 8, i
);
10369 if (srdis_mask
^ srdis_multi
) {
10372 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
10379 if (rsize
< TARGET_PAGE_BITS
) {
10380 *page_size
= 1 << rsize
;
10385 if (n
== -1) { /* no hits */
10386 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10387 /* background fault */
10388 fi
->type
= ARMFault_Background
;
10391 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10392 } else { /* a MPU hit! */
10393 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
10394 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
10396 if (m_is_system_region(env
, address
)) {
10397 /* System space is always execute never */
10401 if (is_user
) { /* User mode AP bit decoding */
10406 break; /* no access */
10408 *prot
|= PAGE_WRITE
;
10412 *prot
|= PAGE_READ
| PAGE_EXEC
;
10415 /* for v7M, same as 6; for R profile a reserved value */
10416 if (arm_feature(env
, ARM_FEATURE_M
)) {
10417 *prot
|= PAGE_READ
| PAGE_EXEC
;
10422 qemu_log_mask(LOG_GUEST_ERROR
,
10423 "DRACR[%d]: Bad value for AP bits: 0x%"
10424 PRIx32
"\n", n
, ap
);
10426 } else { /* Priv. mode AP bits decoding */
10429 break; /* no access */
10433 *prot
|= PAGE_WRITE
;
10437 *prot
|= PAGE_READ
| PAGE_EXEC
;
10440 /* for v7M, same as 6; for R profile a reserved value */
10441 if (arm_feature(env
, ARM_FEATURE_M
)) {
10442 *prot
|= PAGE_READ
| PAGE_EXEC
;
10447 qemu_log_mask(LOG_GUEST_ERROR
,
10448 "DRACR[%d]: Bad value for AP bits: 0x%"
10449 PRIx32
"\n", n
, ap
);
10453 /* execute never */
10455 *prot
&= ~PAGE_EXEC
;
10460 fi
->type
= ARMFault_Permission
;
10462 return !(*prot
& (1 << access_type
));
10465 static bool v8m_is_sau_exempt(CPUARMState
*env
,
10466 uint32_t address
, MMUAccessType access_type
)
10468 /* The architecture specifies that certain address ranges are
10469 * exempt from v8M SAU/IDAU checks.
10472 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
10473 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
10474 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
10475 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
10476 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
10477 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
10480 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
10481 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10482 V8M_SAttributes
*sattrs
)
10484 /* Look up the security attributes for this address. Compare the
10485 * pseudocode SecurityCheck() function.
10486 * We assume the caller has zero-initialized *sattrs.
10488 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10490 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
10491 int idau_region
= IREGION_NOTVALID
;
10492 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10493 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10496 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
10497 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
10499 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
10503 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
10504 /* 0xf0000000..0xffffffff is always S for insn fetches */
10508 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
10509 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
10513 if (idau_region
!= IREGION_NOTVALID
) {
10514 sattrs
->irvalid
= true;
10515 sattrs
->iregion
= idau_region
;
10518 switch (env
->sau
.ctrl
& 3) {
10519 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10521 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10524 default: /* SAU.ENABLE == 1 */
10525 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
10526 if (env
->sau
.rlar
[r
] & 1) {
10527 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
10528 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
10530 if (base
<= address
&& limit
>= address
) {
10531 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10532 sattrs
->subpage
= true;
10534 if (sattrs
->srvalid
) {
10535 /* If we hit in more than one region then we must report
10536 * as Secure, not NS-Callable, with no valid region
10539 sattrs
->ns
= false;
10540 sattrs
->nsc
= false;
10541 sattrs
->sregion
= 0;
10542 sattrs
->srvalid
= false;
10545 if (env
->sau
.rlar
[r
] & 2) {
10546 sattrs
->nsc
= true;
10550 sattrs
->srvalid
= true;
10551 sattrs
->sregion
= r
;
10555 * Address not in this region. We must check whether the
10556 * region covers addresses in the same page as our address.
10557 * In that case we must not report a size that covers the
10558 * whole page for a subsequent hit against a different MPU
10559 * region or the background region, because it would result
10560 * in incorrect TLB hits for subsequent accesses to
10561 * addresses that are in this MPU region.
10563 if (limit
>= base
&&
10564 ranges_overlap(base
, limit
- base
+ 1,
10566 TARGET_PAGE_SIZE
)) {
10567 sattrs
->subpage
= true;
10573 /* The IDAU will override the SAU lookup results if it specifies
10574 * higher security than the SAU does.
10577 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
10578 sattrs
->ns
= false;
10579 sattrs
->nsc
= idau_nsc
;
10586 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
10587 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10588 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
10589 int *prot
, bool *is_subpage
,
10590 ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
10592 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10593 * that a full phys-to-virt translation does).
10594 * mregion is (if not NULL) set to the region number which matched,
10595 * or -1 if no region number is returned (MPU off, address did not
10596 * hit a region, address hit in multiple regions).
10597 * We set is_subpage to true if the region hit doesn't cover the
10598 * entire TARGET_PAGE the address is within.
10600 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10601 bool is_user
= regime_is_user(env
, mmu_idx
);
10602 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
10604 int matchregion
= -1;
10606 uint32_t addr_page_base
= address
& TARGET_PAGE_MASK
;
10607 uint32_t addr_page_limit
= addr_page_base
+ (TARGET_PAGE_SIZE
- 1);
10609 *is_subpage
= false;
10610 *phys_ptr
= address
;
10616 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10617 * was an exception vector read from the vector table (which is always
10618 * done using the default system address map), because those accesses
10619 * are done in arm_v7m_load_vector(), which always does a direct
10620 * read using address_space_ldl(), rather than going via this function.
10622 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
10624 } else if (m_is_ppb_region(env
, address
)) {
10626 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
10629 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
10630 /* region search */
10631 /* Note that the base address is bits [31:5] from the register
10632 * with bits [4:0] all zeroes, but the limit address is bits
10633 * [31:5] from the register with bits [4:0] all ones.
10635 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
10636 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
10638 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
10639 /* Region disabled */
10643 if (address
< base
|| address
> limit
) {
10645 * Address not in this region. We must check whether the
10646 * region covers addresses in the same page as our address.
10647 * In that case we must not report a size that covers the
10648 * whole page for a subsequent hit against a different MPU
10649 * region or the background region, because it would result in
10650 * incorrect TLB hits for subsequent accesses to addresses that
10651 * are in this MPU region.
10653 if (limit
>= base
&&
10654 ranges_overlap(base
, limit
- base
+ 1,
10656 TARGET_PAGE_SIZE
)) {
10657 *is_subpage
= true;
10662 if (base
> addr_page_base
|| limit
< addr_page_limit
) {
10663 *is_subpage
= true;
10667 /* Multiple regions match -- always a failure (unlike
10668 * PMSAv7 where highest-numbered-region wins)
10670 fi
->type
= ARMFault_Permission
;
10681 /* background fault */
10682 fi
->type
= ARMFault_Background
;
10686 if (matchregion
== -1) {
10687 /* hit using the background region */
10688 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
10690 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
10691 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
10693 if (m_is_system_region(env
, address
)) {
10694 /* System space is always execute never */
10698 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
10699 if (*prot
&& !xn
) {
10700 *prot
|= PAGE_EXEC
;
10702 /* We don't need to look the attribute up in the MAIR0/MAIR1
10703 * registers because that only tells us about cacheability.
10706 *mregion
= matchregion
;
10710 fi
->type
= ARMFault_Permission
;
10712 return !(*prot
& (1 << access_type
));
10716 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
10717 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10718 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
10719 int *prot
, target_ulong
*page_size
,
10720 ARMMMUFaultInfo
*fi
)
10722 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
10723 V8M_SAttributes sattrs
= {};
10725 bool mpu_is_subpage
;
10727 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10728 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
10729 if (access_type
== MMU_INST_FETCH
) {
10730 /* Instruction fetches always use the MMU bank and the
10731 * transaction attribute determined by the fetch address,
10732 * regardless of CPU state. This is painful for QEMU
10733 * to handle, because it would mean we need to encode
10734 * into the mmu_idx not just the (user, negpri) information
10735 * for the current security state but also that for the
10736 * other security state, which would balloon the number
10737 * of mmu_idx values needed alarmingly.
10738 * Fortunately we can avoid this because it's not actually
10739 * possible to arbitrarily execute code from memory with
10740 * the wrong security attribute: it will always generate
10741 * an exception of some kind or another, apart from the
10742 * special case of an NS CPU executing an SG instruction
10743 * in S&NSC memory. So we always just fail the translation
10744 * here and sort things out in the exception handler
10745 * (including possibly emulating an SG instruction).
10747 if (sattrs
.ns
!= !secure
) {
10749 fi
->type
= ARMFault_QEMU_NSCExec
;
10751 fi
->type
= ARMFault_QEMU_SFault
;
10753 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
10754 *phys_ptr
= address
;
10759 /* For data accesses we always use the MMU bank indicated
10760 * by the current CPU state, but the security attributes
10761 * might downgrade a secure access to nonsecure.
10764 txattrs
->secure
= false;
10765 } else if (!secure
) {
10766 /* NS access to S memory must fault.
10767 * Architecturally we should first check whether the
10768 * MPU information for this address indicates that we
10769 * are doing an unaligned access to Device memory, which
10770 * should generate a UsageFault instead. QEMU does not
10771 * currently check for that kind of unaligned access though.
10772 * If we added it we would need to do so as a special case
10773 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10775 fi
->type
= ARMFault_QEMU_SFault
;
10776 *page_size
= sattrs
.subpage
? 1 : TARGET_PAGE_SIZE
;
10777 *phys_ptr
= address
;
10784 ret
= pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
10785 txattrs
, prot
, &mpu_is_subpage
, fi
, NULL
);
10786 *page_size
= sattrs
.subpage
|| mpu_is_subpage
? 1 : TARGET_PAGE_SIZE
;
10790 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
10791 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10792 hwaddr
*phys_ptr
, int *prot
,
10793 ARMMMUFaultInfo
*fi
)
10798 bool is_user
= regime_is_user(env
, mmu_idx
);
10800 if (regime_translation_disabled(env
, mmu_idx
)) {
10801 /* MPU disabled. */
10802 *phys_ptr
= address
;
10803 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10807 *phys_ptr
= address
;
10808 for (n
= 7; n
>= 0; n
--) {
10809 base
= env
->cp15
.c6_region
[n
];
10810 if ((base
& 1) == 0) {
10813 mask
= 1 << ((base
>> 1) & 0x1f);
10814 /* Keep this shift separate from the above to avoid an
10815 (undefined) << 32. */
10816 mask
= (mask
<< 1) - 1;
10817 if (((base
^ address
) & ~mask
) == 0) {
10822 fi
->type
= ARMFault_Background
;
10826 if (access_type
== MMU_INST_FETCH
) {
10827 mask
= env
->cp15
.pmsav5_insn_ap
;
10829 mask
= env
->cp15
.pmsav5_data_ap
;
10831 mask
= (mask
>> (n
* 4)) & 0xf;
10834 fi
->type
= ARMFault_Permission
;
10839 fi
->type
= ARMFault_Permission
;
10843 *prot
= PAGE_READ
| PAGE_WRITE
;
10848 *prot
|= PAGE_WRITE
;
10852 *prot
= PAGE_READ
| PAGE_WRITE
;
10856 fi
->type
= ARMFault_Permission
;
10866 /* Bad permission. */
10867 fi
->type
= ARMFault_Permission
;
10871 *prot
|= PAGE_EXEC
;
10875 /* Combine either inner or outer cacheability attributes for normal
10876 * memory, according to table D4-42 and pseudocode procedure
10877 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10879 * NB: only stage 1 includes allocation hints (RW bits), leading to
10882 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
10884 if (s1
== 4 || s2
== 4) {
10885 /* non-cacheable has precedence */
10887 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
10888 /* stage 1 write-through takes precedence */
10890 } else if (extract32(s2
, 2, 2) == 2) {
10891 /* stage 2 write-through takes precedence, but the allocation hint
10892 * is still taken from stage 1
10894 return (2 << 2) | extract32(s1
, 0, 2);
10895 } else { /* write-back */
10900 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10901 * and CombineS1S2Desc()
10903 * @s1: Attributes from stage 1 walk
10904 * @s2: Attributes from stage 2 walk
10906 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
10908 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
10909 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
10912 /* Combine shareability attributes (table D4-43) */
10913 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
10914 /* if either are outer-shareable, the result is outer-shareable */
10915 ret
.shareability
= 2;
10916 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
10917 /* if either are inner-shareable, the result is inner-shareable */
10918 ret
.shareability
= 3;
10920 /* both non-shareable */
10921 ret
.shareability
= 0;
10924 /* Combine memory type and cacheability attributes */
10925 if (s1hi
== 0 || s2hi
== 0) {
10926 /* Device has precedence over normal */
10927 if (s1lo
== 0 || s2lo
== 0) {
10928 /* nGnRnE has precedence over anything */
10930 } else if (s1lo
== 4 || s2lo
== 4) {
10931 /* non-Reordering has precedence over Reordering */
10932 ret
.attrs
= 4; /* nGnRE */
10933 } else if (s1lo
== 8 || s2lo
== 8) {
10934 /* non-Gathering has precedence over Gathering */
10935 ret
.attrs
= 8; /* nGRE */
10937 ret
.attrs
= 0xc; /* GRE */
10940 /* Any location for which the resultant memory type is any
10941 * type of Device memory is always treated as Outer Shareable.
10943 ret
.shareability
= 2;
10944 } else { /* Normal memory */
10945 /* Outer/inner cacheability combine independently */
10946 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
10947 | combine_cacheattr_nibble(s1lo
, s2lo
);
10949 if (ret
.attrs
== 0x44) {
10950 /* Any location for which the resultant memory type is Normal
10951 * Inner Non-cacheable, Outer Non-cacheable is always treated
10952 * as Outer Shareable.
10954 ret
.shareability
= 2;
10962 /* get_phys_addr - get the physical address for this virtual address
10964 * Find the physical address corresponding to the given virtual address,
10965 * by doing a translation table walk on MMU based systems or using the
10966 * MPU state on MPU based systems.
10968 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10969 * prot and page_size may not be filled in, and the populated fsr value provides
10970 * information on why the translation aborted, in the format of a
10971 * DFSR/IFSR fault register, with the following caveats:
10972 * * we honour the short vs long DFSR format differences.
10973 * * the WnR bit is never set (the caller must do this).
10974 * * for PSMAv5 based systems we don't bother to return a full FSR format
10977 * @env: CPUARMState
10978 * @address: virtual address to get physical address for
10979 * @access_type: 0 for read, 1 for write, 2 for execute
10980 * @mmu_idx: MMU index indicating required translation regime
10981 * @phys_ptr: set to the physical address corresponding to the virtual address
10982 * @attrs: set to the memory transaction attributes to use
10983 * @prot: set to the permissions for the page containing phys_ptr
10984 * @page_size: set to the size of the page containing phys_ptr
10985 * @fi: set to fault info if the translation fails
10986 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10988 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
10989 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10990 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10991 target_ulong
*page_size
,
10992 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10994 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
10995 /* Call ourselves recursively to do the stage 1 and then stage 2
10998 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
11002 ARMCacheAttrs cacheattrs2
= {};
11004 ret
= get_phys_addr(env
, address
, access_type
,
11005 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
11006 prot
, page_size
, fi
, cacheattrs
);
11008 /* If S1 fails or S2 is disabled, return early. */
11009 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
11014 /* S1 is done. Now do S2 translation. */
11015 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
11016 phys_ptr
, attrs
, &s2_prot
,
11018 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
11020 /* Combine the S1 and S2 perms. */
11023 /* Combine the S1 and S2 cache attributes, if needed */
11024 if (!ret
&& cacheattrs
!= NULL
) {
11025 if (env
->cp15
.hcr_el2
& HCR_DC
) {
11027 * HCR.DC forces the first stage attributes to
11028 * Normal Non-Shareable,
11029 * Inner Write-Back Read-Allocate Write-Allocate,
11030 * Outer Write-Back Read-Allocate Write-Allocate.
11032 cacheattrs
->attrs
= 0xff;
11033 cacheattrs
->shareability
= 0;
11035 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
11041 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
11043 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
11047 /* The page table entries may downgrade secure to non-secure, but
11048 * cannot upgrade an non-secure translation regime's attributes
11051 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
11052 attrs
->user
= regime_is_user(env
, mmu_idx
);
11054 /* Fast Context Switch Extension. This doesn't exist at all in v8.
11055 * In v7 and earlier it affects all stage 1 translations.
11057 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
11058 && !arm_feature(env
, ARM_FEATURE_V8
)) {
11059 if (regime_el(env
, mmu_idx
) == 3) {
11060 address
+= env
->cp15
.fcseidr_s
;
11062 address
+= env
->cp15
.fcseidr_ns
;
11066 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
11068 *page_size
= TARGET_PAGE_SIZE
;
11070 if (arm_feature(env
, ARM_FEATURE_V8
)) {
11072 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
11073 phys_ptr
, attrs
, prot
, page_size
, fi
);
11074 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
11076 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
11077 phys_ptr
, prot
, page_size
, fi
);
11080 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
11081 phys_ptr
, prot
, fi
);
11083 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
11084 " mmu_idx %u -> %s (prot %c%c%c)\n",
11085 access_type
== MMU_DATA_LOAD
? "reading" :
11086 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
11087 (uint32_t)address
, mmu_idx
,
11088 ret
? "Miss" : "Hit",
11089 *prot
& PAGE_READ
? 'r' : '-',
11090 *prot
& PAGE_WRITE
? 'w' : '-',
11091 *prot
& PAGE_EXEC
? 'x' : '-');
11096 /* Definitely a real MMU, not an MPU */
11098 if (regime_translation_disabled(env
, mmu_idx
)) {
11099 /* MMU disabled. */
11100 *phys_ptr
= address
;
11101 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
11102 *page_size
= TARGET_PAGE_SIZE
;
11106 if (regime_using_lpae_format(env
, mmu_idx
)) {
11107 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
11108 phys_ptr
, attrs
, prot
, page_size
,
11110 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
11111 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
11112 phys_ptr
, attrs
, prot
, page_size
, fi
);
11114 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
11115 phys_ptr
, prot
, page_size
, fi
);
11119 /* Walk the page table and (if the mapping exists) add the page
11120 * to the TLB. Return false on success, or true on failure. Populate
11121 * fsr with ARM DFSR/IFSR fault register format value on failure.
11123 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
11124 MMUAccessType access_type
, int mmu_idx
,
11125 ARMMMUFaultInfo
*fi
)
11127 ARMCPU
*cpu
= ARM_CPU(cs
);
11128 CPUARMState
*env
= &cpu
->env
;
11130 target_ulong page_size
;
11133 MemTxAttrs attrs
= {};
11135 ret
= get_phys_addr(env
, address
, access_type
,
11136 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
11137 &attrs
, &prot
, &page_size
, fi
, NULL
);
11140 * Map a single [sub]page. Regions smaller than our declared
11141 * target page size are handled specially, so for those we
11142 * pass in the exact addresses.
11144 if (page_size
>= TARGET_PAGE_SIZE
) {
11145 phys_addr
&= TARGET_PAGE_MASK
;
11146 address
&= TARGET_PAGE_MASK
;
11148 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
11149 prot
, mmu_idx
, page_size
);
11156 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
11159 ARMCPU
*cpu
= ARM_CPU(cs
);
11160 CPUARMState
*env
= &cpu
->env
;
11162 target_ulong page_size
;
11165 ARMMMUFaultInfo fi
= {};
11166 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
11168 *attrs
= (MemTxAttrs
) {};
11170 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
11171 attrs
, &prot
, &page_size
, &fi
, NULL
);
11179 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
11182 unsigned el
= arm_current_el(env
);
11184 /* First handle registers which unprivileged can read */
11187 case 0 ... 7: /* xPSR sub-fields */
11189 if ((reg
& 1) && el
) {
11190 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
11193 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
11195 /* EPSR reads as zero */
11196 return xpsr_read(env
) & mask
;
11198 case 20: /* CONTROL */
11199 return env
->v7m
.control
[env
->v7m
.secure
];
11200 case 0x94: /* CONTROL_NS */
11201 /* We have to handle this here because unprivileged Secure code
11202 * can read the NS CONTROL register.
11204 if (!env
->v7m
.secure
) {
11207 return env
->v7m
.control
[M_REG_NS
];
11211 return 0; /* unprivileged reads others as zero */
11214 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11216 case 0x88: /* MSP_NS */
11217 if (!env
->v7m
.secure
) {
11220 return env
->v7m
.other_ss_msp
;
11221 case 0x89: /* PSP_NS */
11222 if (!env
->v7m
.secure
) {
11225 return env
->v7m
.other_ss_psp
;
11226 case 0x8a: /* MSPLIM_NS */
11227 if (!env
->v7m
.secure
) {
11230 return env
->v7m
.msplim
[M_REG_NS
];
11231 case 0x8b: /* PSPLIM_NS */
11232 if (!env
->v7m
.secure
) {
11235 return env
->v7m
.psplim
[M_REG_NS
];
11236 case 0x90: /* PRIMASK_NS */
11237 if (!env
->v7m
.secure
) {
11240 return env
->v7m
.primask
[M_REG_NS
];
11241 case 0x91: /* BASEPRI_NS */
11242 if (!env
->v7m
.secure
) {
11245 return env
->v7m
.basepri
[M_REG_NS
];
11246 case 0x93: /* FAULTMASK_NS */
11247 if (!env
->v7m
.secure
) {
11250 return env
->v7m
.faultmask
[M_REG_NS
];
11251 case 0x98: /* SP_NS */
11253 /* This gives the non-secure SP selected based on whether we're
11254 * currently in handler mode or not, using the NS CONTROL.SPSEL.
11256 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
11258 if (!env
->v7m
.secure
) {
11261 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
11262 return env
->v7m
.other_ss_psp
;
11264 return env
->v7m
.other_ss_msp
;
11274 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
11276 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
11277 case 10: /* MSPLIM */
11278 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11281 return env
->v7m
.msplim
[env
->v7m
.secure
];
11282 case 11: /* PSPLIM */
11283 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11286 return env
->v7m
.psplim
[env
->v7m
.secure
];
11287 case 16: /* PRIMASK */
11288 return env
->v7m
.primask
[env
->v7m
.secure
];
11289 case 17: /* BASEPRI */
11290 case 18: /* BASEPRI_MAX */
11291 return env
->v7m
.basepri
[env
->v7m
.secure
];
11292 case 19: /* FAULTMASK */
11293 return env
->v7m
.faultmask
[env
->v7m
.secure
];
11296 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
11297 " register %d\n", reg
);
11302 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
11304 /* We're passed bits [11..0] of the instruction; extract
11305 * SYSm and the mask bits.
11306 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
11307 * we choose to treat them as if the mask bits were valid.
11308 * NB that the pseudocode 'mask' variable is bits [11..10],
11309 * whereas ours is [11..8].
11311 uint32_t mask
= extract32(maskreg
, 8, 4);
11312 uint32_t reg
= extract32(maskreg
, 0, 8);
11314 if (arm_current_el(env
) == 0 && reg
> 7) {
11315 /* only xPSR sub-fields may be written by unprivileged */
11319 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
11321 case 0x88: /* MSP_NS */
11322 if (!env
->v7m
.secure
) {
11325 env
->v7m
.other_ss_msp
= val
;
11327 case 0x89: /* PSP_NS */
11328 if (!env
->v7m
.secure
) {
11331 env
->v7m
.other_ss_psp
= val
;
11333 case 0x8a: /* MSPLIM_NS */
11334 if (!env
->v7m
.secure
) {
11337 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
11339 case 0x8b: /* PSPLIM_NS */
11340 if (!env
->v7m
.secure
) {
11343 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
11345 case 0x90: /* PRIMASK_NS */
11346 if (!env
->v7m
.secure
) {
11349 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
11351 case 0x91: /* BASEPRI_NS */
11352 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11355 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
11357 case 0x93: /* FAULTMASK_NS */
11358 if (!env
->v7m
.secure
|| !arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11361 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
11363 case 0x94: /* CONTROL_NS */
11364 if (!env
->v7m
.secure
) {
11367 write_v7m_control_spsel_for_secstate(env
,
11368 val
& R_V7M_CONTROL_SPSEL_MASK
,
11370 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11371 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
11372 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
11375 case 0x98: /* SP_NS */
11377 /* This gives the non-secure SP selected based on whether we're
11378 * currently in handler mode or not, using the NS CONTROL.SPSEL.
11380 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
11381 bool is_psp
= !arm_v7m_is_handler_mode(env
) && spsel
;
11384 if (!env
->v7m
.secure
) {
11388 limit
= is_psp
? env
->v7m
.psplim
[false] : env
->v7m
.msplim
[false];
11391 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
11393 cpu_restore_state(cs
, GETPC(), true);
11394 raise_exception(env
, EXCP_STKOF
, 0, 1);
11398 env
->v7m
.other_ss_psp
= val
;
11400 env
->v7m
.other_ss_msp
= val
;
11410 case 0 ... 7: /* xPSR sub-fields */
11411 /* only APSR is actually writable */
11413 uint32_t apsrmask
= 0;
11416 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
11418 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
11419 apsrmask
|= XPSR_GE
;
11421 xpsr_write(env
, val
, apsrmask
);
11425 if (v7m_using_psp(env
)) {
11426 env
->v7m
.other_sp
= val
;
11428 env
->regs
[13] = val
;
11432 if (v7m_using_psp(env
)) {
11433 env
->regs
[13] = val
;
11435 env
->v7m
.other_sp
= val
;
11438 case 10: /* MSPLIM */
11439 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11442 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
11444 case 11: /* PSPLIM */
11445 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
11448 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
11450 case 16: /* PRIMASK */
11451 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
11453 case 17: /* BASEPRI */
11454 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11457 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
11459 case 18: /* BASEPRI_MAX */
11460 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11464 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
11465 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
11466 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
11469 case 19: /* FAULTMASK */
11470 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11473 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
11475 case 20: /* CONTROL */
11476 /* Writing to the SPSEL bit only has an effect if we are in
11477 * thread mode; other bits can be updated by any privileged code.
11478 * write_v7m_control_spsel() deals with updating the SPSEL bit in
11479 * env->v7m.control, so we only need update the others.
11480 * For v7M, we must just ignore explicit writes to SPSEL in handler
11481 * mode; for v8M the write is permitted but will have no effect.
11483 if (arm_feature(env
, ARM_FEATURE_V8
) ||
11484 !arm_v7m_is_handler_mode(env
)) {
11485 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
11487 if (arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
11488 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
11489 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
11494 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
11495 " register %d\n", reg
);
11500 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
11502 /* Implement the TT instruction. op is bits [7:6] of the insn. */
11503 bool forceunpriv
= op
& 1;
11505 V8M_SAttributes sattrs
= {};
11507 bool r
, rw
, nsr
, nsrw
, mrvalid
;
11509 ARMMMUFaultInfo fi
= {};
11510 MemTxAttrs attrs
= {};
11515 bool targetsec
= env
->v7m
.secure
;
11518 /* Work out what the security state and privilege level we're
11519 * interested in is...
11522 targetsec
= !targetsec
;
11526 targetpriv
= false;
11528 targetpriv
= arm_v7m_is_handler_mode(env
) ||
11529 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
11532 /* ...and then figure out which MMU index this is */
11533 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
11535 /* We know that the MPU and SAU don't care about the access type
11536 * for our purposes beyond that we don't want to claim to be
11537 * an insn fetch, so we arbitrarily call this a read.
11540 /* MPU region info only available for privileged or if
11541 * inspecting the other MPU state.
11543 if (arm_current_el(env
) != 0 || alt
) {
11544 /* We can ignore the return value as prot is always set */
11545 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
11546 &phys_addr
, &attrs
, &prot
, &is_subpage
,
11548 if (mregion
== -1) {
11554 r
= prot
& PAGE_READ
;
11555 rw
= prot
& PAGE_WRITE
;
11563 if (env
->v7m
.secure
) {
11564 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
11565 nsr
= sattrs
.ns
&& r
;
11566 nsrw
= sattrs
.ns
&& rw
;
11573 tt_resp
= (sattrs
.iregion
<< 24) |
11574 (sattrs
.irvalid
<< 23) |
11575 ((!sattrs
.ns
) << 22) |
11580 (sattrs
.srvalid
<< 17) |
11582 (sattrs
.sregion
<< 8) |
11590 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
11592 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
11593 * Note that we do not implement the (architecturally mandated)
11594 * alignment fault for attempts to use this on Device memory
11595 * (which matches the usual QEMU behaviour of not implementing either
11596 * alignment faults or any memory attribute handling).
11599 ARMCPU
*cpu
= arm_env_get_cpu(env
);
11600 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
11601 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
11603 #ifndef CONFIG_USER_ONLY
11605 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
11606 * the block size so we might have to do more than one TLB lookup.
11607 * We know that in fact for any v8 CPU the page size is at least 4K
11608 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
11609 * 1K as an artefact of legacy v5 subpage support being present in the
11610 * same QEMU executable.
11612 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
11613 void *hostaddr
[maxidx
];
11615 unsigned mmu_idx
= cpu_mmu_index(env
, false);
11616 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
11618 for (try = 0; try < 2; try++) {
11620 for (i
= 0; i
< maxidx
; i
++) {
11621 hostaddr
[i
] = tlb_vaddr_to_host(env
,
11622 vaddr
+ TARGET_PAGE_SIZE
* i
,
11624 if (!hostaddr
[i
]) {
11629 /* If it's all in the TLB it's fair game for just writing to;
11630 * we know we don't need to update dirty status, etc.
11632 for (i
= 0; i
< maxidx
- 1; i
++) {
11633 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
11635 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
11638 /* OK, try a store and see if we can populate the tlb. This
11639 * might cause an exception if the memory isn't writable,
11640 * in which case we will longjmp out of here. We must for
11641 * this purpose use the actual register value passed to us
11642 * so that we get the fault address right.
11644 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
11645 /* Now we can populate the other TLB entries, if any */
11646 for (i
= 0; i
< maxidx
; i
++) {
11647 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
11648 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
11649 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
11654 /* Slow path (probably attempt to do this to an I/O device or
11655 * similar, or clearing of a block of code we have translations
11656 * cached for). Just do a series of byte writes as the architecture
11657 * demands. It's not worth trying to use a cpu_physical_memory_map(),
11658 * memset(), unmap() sequence here because:
11659 * + we'd need to account for the blocksize being larger than a page
11660 * + the direct-RAM access case is almost always going to be dealt
11661 * with in the fastpath code above, so there's no speed benefit
11662 * + we would have to deal with the map returning NULL because the
11663 * bounce buffer was in use
11665 for (i
= 0; i
< blocklen
; i
++) {
11666 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
11670 memset(g2h(vaddr
), 0, blocklen
);
11674 /* Note that signed overflow is undefined in C. The following routines are
11675 careful to use unsigned types where modulo arithmetic is required.
11676 Failure to do so _will_ break on newer gcc. */
11678 /* Signed saturating arithmetic. */
11680 /* Perform 16-bit signed saturating addition. */
11681 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
11686 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
11695 /* Perform 8-bit signed saturating addition. */
11696 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
11701 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
11710 /* Perform 16-bit signed saturating subtraction. */
11711 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
11716 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
11725 /* Perform 8-bit signed saturating subtraction. */
11726 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
11731 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
11740 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11741 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11742 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
11743 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
11746 #include "op_addsub.h"
11748 /* Unsigned saturating arithmetic. */
11749 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
11758 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
11766 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
11775 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
11783 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11784 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11785 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
11786 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
11789 #include "op_addsub.h"
11791 /* Signed modulo arithmetic. */
11792 #define SARITH16(a, b, n, op) do { \
11794 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11795 RESULT(sum, n, 16); \
11797 ge |= 3 << (n * 2); \
11800 #define SARITH8(a, b, n, op) do { \
11802 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11803 RESULT(sum, n, 8); \
11809 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11810 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11811 #define ADD8(a, b, n) SARITH8(a, b, n, +)
11812 #define SUB8(a, b, n) SARITH8(a, b, n, -)
11816 #include "op_addsub.h"
11818 /* Unsigned modulo arithmetic. */
11819 #define ADD16(a, b, n) do { \
11821 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11822 RESULT(sum, n, 16); \
11823 if ((sum >> 16) == 1) \
11824 ge |= 3 << (n * 2); \
11827 #define ADD8(a, b, n) do { \
11829 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11830 RESULT(sum, n, 8); \
11831 if ((sum >> 8) == 1) \
11835 #define SUB16(a, b, n) do { \
11837 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11838 RESULT(sum, n, 16); \
11839 if ((sum >> 16) == 0) \
11840 ge |= 3 << (n * 2); \
11843 #define SUB8(a, b, n) do { \
11845 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11846 RESULT(sum, n, 8); \
11847 if ((sum >> 8) == 0) \
11854 #include "op_addsub.h"
11856 /* Halved signed arithmetic. */
11857 #define ADD16(a, b, n) \
11858 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11859 #define SUB16(a, b, n) \
11860 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11861 #define ADD8(a, b, n) \
11862 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11863 #define SUB8(a, b, n) \
11864 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11867 #include "op_addsub.h"
11869 /* Halved unsigned arithmetic. */
11870 #define ADD16(a, b, n) \
11871 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11872 #define SUB16(a, b, n) \
11873 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11874 #define ADD8(a, b, n) \
11875 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11876 #define SUB8(a, b, n) \
11877 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11880 #include "op_addsub.h"
11882 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
11890 /* Unsigned sum of absolute byte differences. */
11891 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
11894 sum
= do_usad(a
, b
);
11895 sum
+= do_usad(a
>> 8, b
>> 8);
11896 sum
+= do_usad(a
>> 16, b
>>16);
11897 sum
+= do_usad(a
>> 24, b
>> 24);
11901 /* For ARMv6 SEL instruction. */
11902 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
11914 mask
|= 0xff000000;
11915 return (a
& mask
) | (b
& ~mask
);
11918 /* VFP support. We follow the convention used for VFP instructions:
11919 Single precision routines have a "s" suffix, double precision a
11922 /* Convert host exception flags to vfp form. */
11923 static inline int vfp_exceptbits_from_host(int host_bits
)
11925 int target_bits
= 0;
11927 if (host_bits
& float_flag_invalid
)
11929 if (host_bits
& float_flag_divbyzero
)
11931 if (host_bits
& float_flag_overflow
)
11933 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
11935 if (host_bits
& float_flag_inexact
)
11936 target_bits
|= 0x10;
11937 if (host_bits
& float_flag_input_denormal
)
11938 target_bits
|= 0x80;
11939 return target_bits
;
11942 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
11947 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
11948 | (env
->vfp
.vec_len
<< 16)
11949 | (env
->vfp
.vec_stride
<< 20);
11951 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
11952 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
11953 /* FZ16 does not generate an input denormal exception. */
11954 i
|= (get_float_exception_flags(&env
->vfp
.fp_status_f16
)
11955 & ~float_flag_input_denormal
);
11957 fpscr
|= vfp_exceptbits_from_host(i
);
11961 uint32_t vfp_get_fpscr(CPUARMState
*env
)
11963 return HELPER(vfp_get_fpscr
)(env
);
11966 /* Convert vfp exception flags to target form. */
11967 static inline int vfp_exceptbits_to_host(int target_bits
)
11971 if (target_bits
& 1)
11972 host_bits
|= float_flag_invalid
;
11973 if (target_bits
& 2)
11974 host_bits
|= float_flag_divbyzero
;
11975 if (target_bits
& 4)
11976 host_bits
|= float_flag_overflow
;
11977 if (target_bits
& 8)
11978 host_bits
|= float_flag_underflow
;
11979 if (target_bits
& 0x10)
11980 host_bits
|= float_flag_inexact
;
11981 if (target_bits
& 0x80)
11982 host_bits
|= float_flag_input_denormal
;
11986 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
11991 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
11992 if (!cpu_isar_feature(aa64_fp16
, arm_env_get_cpu(env
))) {
11996 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
11997 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
11998 env
->vfp
.vec_len
= (val
>> 16) & 7;
11999 env
->vfp
.vec_stride
= (val
>> 20) & 3;
12002 if (changed
& (3 << 22)) {
12003 i
= (val
>> 22) & 3;
12005 case FPROUNDING_TIEEVEN
:
12006 i
= float_round_nearest_even
;
12008 case FPROUNDING_POSINF
:
12009 i
= float_round_up
;
12011 case FPROUNDING_NEGINF
:
12012 i
= float_round_down
;
12014 case FPROUNDING_ZERO
:
12015 i
= float_round_to_zero
;
12018 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
12019 set_float_rounding_mode(i
, &env
->vfp
.fp_status_f16
);
12021 if (changed
& FPCR_FZ16
) {
12022 bool ftz_enabled
= val
& FPCR_FZ16
;
12023 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
12024 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
12026 if (changed
& FPCR_FZ
) {
12027 bool ftz_enabled
= val
& FPCR_FZ
;
12028 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
12029 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
12031 if (changed
& FPCR_DN
) {
12032 bool dnan_enabled
= val
& FPCR_DN
;
12033 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status
);
12034 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status_f16
);
12037 /* The exception flags are ORed together when we read fpscr so we
12038 * only need to preserve the current state in one of our
12039 * float_status values.
12041 i
= vfp_exceptbits_to_host(val
);
12042 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
12043 set_float_exception_flags(0, &env
->vfp
.fp_status_f16
);
12044 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
12047 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
12049 HELPER(vfp_set_fpscr
)(env
, val
);
12052 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
12054 #define VFP_BINOP(name) \
12055 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
12057 float_status *fpst = fpstp; \
12058 return float32_ ## name(a, b, fpst); \
12060 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
12062 float_status *fpst = fpstp; \
12063 return float64_ ## name(a, b, fpst); \
12075 float32
VFP_HELPER(neg
, s
)(float32 a
)
12077 return float32_chs(a
);
12080 float64
VFP_HELPER(neg
, d
)(float64 a
)
12082 return float64_chs(a
);
12085 float32
VFP_HELPER(abs
, s
)(float32 a
)
12087 return float32_abs(a
);
12090 float64
VFP_HELPER(abs
, d
)(float64 a
)
12092 return float64_abs(a
);
12095 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
12097 return float32_sqrt(a
, &env
->vfp
.fp_status
);
12100 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
12102 return float64_sqrt(a
, &env
->vfp
.fp_status
);
12105 /* XXX: check quiet/signaling case */
12106 #define DO_VFP_cmp(p, type) \
12107 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
12110 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
12111 case 0: flags = 0x6; break; \
12112 case -1: flags = 0x8; break; \
12113 case 1: flags = 0x2; break; \
12114 default: case 2: flags = 0x3; break; \
12116 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
12117 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
12119 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
12122 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
12123 case 0: flags = 0x6; break; \
12124 case -1: flags = 0x8; break; \
12125 case 1: flags = 0x2; break; \
12126 default: case 2: flags = 0x3; break; \
12128 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
12129 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
12131 DO_VFP_cmp(s
, float32
)
12132 DO_VFP_cmp(d
, float64
)
12135 /* Integer to float and float to integer conversions */
12137 #define CONV_ITOF(name, ftype, fsz, sign) \
12138 ftype HELPER(name)(uint32_t x, void *fpstp) \
12140 float_status *fpst = fpstp; \
12141 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
12144 #define CONV_FTOI(name, ftype, fsz, sign, round) \
12145 sign##int32_t HELPER(name)(ftype x, void *fpstp) \
12147 float_status *fpst = fpstp; \
12148 if (float##fsz##_is_any_nan(x)) { \
12149 float_raise(float_flag_invalid, fpst); \
12152 return float##fsz##_to_##sign##int32##round(x, fpst); \
12155 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \
12156 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \
12157 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \
12158 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
12160 FLOAT_CONVS(si
, h
, uint32_t, 16, )
12161 FLOAT_CONVS(si
, s
, float32
, 32, )
12162 FLOAT_CONVS(si
, d
, float64
, 64, )
12163 FLOAT_CONVS(ui
, h
, uint32_t, 16, u
)
12164 FLOAT_CONVS(ui
, s
, float32
, 32, u
)
12165 FLOAT_CONVS(ui
, d
, float64
, 64, u
)
12171 /* floating point conversion */
12172 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
12174 return float32_to_float64(x
, &env
->vfp
.fp_status
);
12177 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
12179 return float64_to_float32(x
, &env
->vfp
.fp_status
);
12182 /* VFP3 fixed point conversion. */
12183 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12184 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
12186 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
12188 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \
12189 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
12192 if (unlikely(float##fsz##_is_any_nan(x))) { \
12193 float_raise(float_flag_invalid, fpst); \
12196 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \
12199 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
12200 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12201 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12202 float_round_to_zero, _round_to_zero) \
12203 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12204 get_float_rounding_mode(fpst), )
12206 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
12207 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
12208 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \
12209 get_float_rounding_mode(fpst), )
12211 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
12212 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
12213 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
12214 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
12215 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
12216 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
12217 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
12218 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
12219 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
12220 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
12221 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
12222 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
12224 #undef VFP_CONV_FIX
12225 #undef VFP_CONV_FIX_FLOAT
12226 #undef VFP_CONV_FLOAT_FIX_ROUND
12227 #undef VFP_CONV_FIX_A64
12229 uint32_t HELPER(vfp_sltoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12231 return int32_to_float16_scalbn(x
, -shift
, fpst
);
12234 uint32_t HELPER(vfp_ultoh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12236 return uint32_to_float16_scalbn(x
, -shift
, fpst
);
12239 uint32_t HELPER(vfp_sqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
12241 return int64_to_float16_scalbn(x
, -shift
, fpst
);
12244 uint32_t HELPER(vfp_uqtoh
)(uint64_t x
, uint32_t shift
, void *fpst
)
12246 return uint64_to_float16_scalbn(x
, -shift
, fpst
);
12249 uint32_t HELPER(vfp_toshh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12251 if (unlikely(float16_is_any_nan(x
))) {
12252 float_raise(float_flag_invalid
, fpst
);
12255 return float16_to_int16_scalbn(x
, get_float_rounding_mode(fpst
),
12259 uint32_t HELPER(vfp_touhh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12261 if (unlikely(float16_is_any_nan(x
))) {
12262 float_raise(float_flag_invalid
, fpst
);
12265 return float16_to_uint16_scalbn(x
, get_float_rounding_mode(fpst
),
12269 uint32_t HELPER(vfp_toslh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12271 if (unlikely(float16_is_any_nan(x
))) {
12272 float_raise(float_flag_invalid
, fpst
);
12275 return float16_to_int32_scalbn(x
, get_float_rounding_mode(fpst
),
12279 uint32_t HELPER(vfp_toulh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12281 if (unlikely(float16_is_any_nan(x
))) {
12282 float_raise(float_flag_invalid
, fpst
);
12285 return float16_to_uint32_scalbn(x
, get_float_rounding_mode(fpst
),
12289 uint64_t HELPER(vfp_tosqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12291 if (unlikely(float16_is_any_nan(x
))) {
12292 float_raise(float_flag_invalid
, fpst
);
12295 return float16_to_int64_scalbn(x
, get_float_rounding_mode(fpst
),
12299 uint64_t HELPER(vfp_touqh
)(uint32_t x
, uint32_t shift
, void *fpst
)
12301 if (unlikely(float16_is_any_nan(x
))) {
12302 float_raise(float_flag_invalid
, fpst
);
12305 return float16_to_uint64_scalbn(x
, get_float_rounding_mode(fpst
),
12309 /* Set the current fp rounding mode and return the old one.
12310 * The argument is a softfloat float_round_ value.
12312 uint32_t HELPER(set_rmode
)(uint32_t rmode
, void *fpstp
)
12314 float_status
*fp_status
= fpstp
;
12316 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
12317 set_float_rounding_mode(rmode
, fp_status
);
12322 /* Set the current fp rounding mode in the standard fp status and return
12323 * the old one. This is for NEON instructions that need to change the
12324 * rounding mode but wish to use the standard FPSCR values for everything
12325 * else. Always set the rounding mode back to the correct value after
12327 * The argument is a softfloat float_round_ value.
12329 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
12331 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
12333 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
12334 set_float_rounding_mode(rmode
, fp_status
);
12339 /* Half precision conversions. */
12340 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
12342 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12343 * it would affect flushing input denormals.
12345 float_status
*fpst
= fpstp
;
12346 flag save
= get_flush_inputs_to_zero(fpst
);
12347 set_flush_inputs_to_zero(false, fpst
);
12348 float32 r
= float16_to_float32(a
, !ahp_mode
, fpst
);
12349 set_flush_inputs_to_zero(save
, fpst
);
12353 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, void *fpstp
, uint32_t ahp_mode
)
12355 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12356 * it would affect flushing output denormals.
12358 float_status
*fpst
= fpstp
;
12359 flag save
= get_flush_to_zero(fpst
);
12360 set_flush_to_zero(false, fpst
);
12361 float16 r
= float32_to_float16(a
, !ahp_mode
, fpst
);
12362 set_flush_to_zero(save
, fpst
);
12366 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, void *fpstp
, uint32_t ahp_mode
)
12368 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12369 * it would affect flushing input denormals.
12371 float_status
*fpst
= fpstp
;
12372 flag save
= get_flush_inputs_to_zero(fpst
);
12373 set_flush_inputs_to_zero(false, fpst
);
12374 float64 r
= float16_to_float64(a
, !ahp_mode
, fpst
);
12375 set_flush_inputs_to_zero(save
, fpst
);
12379 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, void *fpstp
, uint32_t ahp_mode
)
12381 /* Squash FZ16 to 0 for the duration of conversion. In this case,
12382 * it would affect flushing output denormals.
12384 float_status
*fpst
= fpstp
;
12385 flag save
= get_flush_to_zero(fpst
);
12386 set_flush_to_zero(false, fpst
);
12387 float16 r
= float64_to_float16(a
, !ahp_mode
, fpst
);
12388 set_flush_to_zero(save
, fpst
);
12392 #define float32_two make_float32(0x40000000)
12393 #define float32_three make_float32(0x40400000)
12394 #define float32_one_point_five make_float32(0x3fc00000)
12396 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
12398 float_status
*s
= &env
->vfp
.standard_fp_status
;
12399 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
12400 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
12401 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
12402 float_raise(float_flag_input_denormal
, s
);
12404 return float32_two
;
12406 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
12409 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
12411 float_status
*s
= &env
->vfp
.standard_fp_status
;
12413 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
12414 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
12415 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
12416 float_raise(float_flag_input_denormal
, s
);
12418 return float32_one_point_five
;
12420 product
= float32_mul(a
, b
, s
);
12421 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
12424 /* NEON helpers. */
12426 /* Constants 256 and 512 are used in some helpers; we avoid relying on
12427 * int->float conversions at run-time. */
12428 #define float64_256 make_float64(0x4070000000000000LL)
12429 #define float64_512 make_float64(0x4080000000000000LL)
12430 #define float16_maxnorm make_float16(0x7bff)
12431 #define float32_maxnorm make_float32(0x7f7fffff)
12432 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
12434 /* Reciprocal functions
12436 * The algorithm that must be used to calculate the estimate
12437 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
12440 /* See RecipEstimate()
12442 * input is a 9 bit fixed point number
12443 * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
12444 * result range 256 .. 511 for a number from 1.0 to 511/256.
12447 static int recip_estimate(int input
)
12450 assert(256 <= input
&& input
< 512);
12451 a
= (input
* 2) + 1;
12454 assert(256 <= r
&& r
< 512);
12459 * Common wrapper to call recip_estimate
12461 * The parameters are exponent and 64 bit fraction (without implicit
12462 * bit) where the binary point is nominally at bit 52. Returns a
12463 * float64 which can then be rounded to the appropriate size by the
12467 static uint64_t call_recip_estimate(int *exp
, int exp_off
, uint64_t frac
)
12469 uint32_t scaled
, estimate
;
12470 uint64_t result_frac
;
12473 /* Handle sub-normals */
12475 if (extract64(frac
, 51, 1) == 0) {
12483 /* scaled = UInt('1':fraction<51:44>) */
12484 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
12485 estimate
= recip_estimate(scaled
);
12487 result_exp
= exp_off
- *exp
;
12488 result_frac
= deposit64(0, 44, 8, estimate
);
12489 if (result_exp
== 0) {
12490 result_frac
= deposit64(result_frac
>> 1, 51, 1, 1);
12491 } else if (result_exp
== -1) {
12492 result_frac
= deposit64(result_frac
>> 2, 50, 2, 1);
12498 return result_frac
;
12501 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
12503 switch (fpst
->float_rounding_mode
) {
12504 case float_round_nearest_even
: /* Round to Nearest */
12506 case float_round_up
: /* Round to +Inf */
12508 case float_round_down
: /* Round to -Inf */
12510 case float_round_to_zero
: /* Round to Zero */
12514 g_assert_not_reached();
12517 uint32_t HELPER(recpe_f16
)(uint32_t input
, void *fpstp
)
12519 float_status
*fpst
= fpstp
;
12520 float16 f16
= float16_squash_input_denormal(input
, fpst
);
12521 uint32_t f16_val
= float16_val(f16
);
12522 uint32_t f16_sign
= float16_is_neg(f16
);
12523 int f16_exp
= extract32(f16_val
, 10, 5);
12524 uint32_t f16_frac
= extract32(f16_val
, 0, 10);
12527 if (float16_is_any_nan(f16
)) {
12529 if (float16_is_signaling_nan(f16
, fpst
)) {
12530 float_raise(float_flag_invalid
, fpst
);
12531 nan
= float16_silence_nan(f16
, fpst
);
12533 if (fpst
->default_nan_mode
) {
12534 nan
= float16_default_nan(fpst
);
12537 } else if (float16_is_infinity(f16
)) {
12538 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
12539 } else if (float16_is_zero(f16
)) {
12540 float_raise(float_flag_divbyzero
, fpst
);
12541 return float16_set_sign(float16_infinity
, float16_is_neg(f16
));
12542 } else if (float16_abs(f16
) < (1 << 8)) {
12543 /* Abs(value) < 2.0^-16 */
12544 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
12545 if (round_to_inf(fpst
, f16_sign
)) {
12546 return float16_set_sign(float16_infinity
, f16_sign
);
12548 return float16_set_sign(float16_maxnorm
, f16_sign
);
12550 } else if (f16_exp
>= 29 && fpst
->flush_to_zero
) {
12551 float_raise(float_flag_underflow
, fpst
);
12552 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
12555 f64_frac
= call_recip_estimate(&f16_exp
, 29,
12556 ((uint64_t) f16_frac
) << (52 - 10));
12558 /* result = sign : result_exp<4:0> : fraction<51:42> */
12559 f16_val
= deposit32(0, 15, 1, f16_sign
);
12560 f16_val
= deposit32(f16_val
, 10, 5, f16_exp
);
12561 f16_val
= deposit32(f16_val
, 0, 10, extract64(f64_frac
, 52 - 10, 10));
12562 return make_float16(f16_val
);
12565 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
12567 float_status
*fpst
= fpstp
;
12568 float32 f32
= float32_squash_input_denormal(input
, fpst
);
12569 uint32_t f32_val
= float32_val(f32
);
12570 bool f32_sign
= float32_is_neg(f32
);
12571 int f32_exp
= extract32(f32_val
, 23, 8);
12572 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
12575 if (float32_is_any_nan(f32
)) {
12577 if (float32_is_signaling_nan(f32
, fpst
)) {
12578 float_raise(float_flag_invalid
, fpst
);
12579 nan
= float32_silence_nan(f32
, fpst
);
12581 if (fpst
->default_nan_mode
) {
12582 nan
= float32_default_nan(fpst
);
12585 } else if (float32_is_infinity(f32
)) {
12586 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
12587 } else if (float32_is_zero(f32
)) {
12588 float_raise(float_flag_divbyzero
, fpst
);
12589 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
12590 } else if (float32_abs(f32
) < (1ULL << 21)) {
12591 /* Abs(value) < 2.0^-128 */
12592 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
12593 if (round_to_inf(fpst
, f32_sign
)) {
12594 return float32_set_sign(float32_infinity
, f32_sign
);
12596 return float32_set_sign(float32_maxnorm
, f32_sign
);
12598 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
12599 float_raise(float_flag_underflow
, fpst
);
12600 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
12603 f64_frac
= call_recip_estimate(&f32_exp
, 253,
12604 ((uint64_t) f32_frac
) << (52 - 23));
12606 /* result = sign : result_exp<7:0> : fraction<51:29> */
12607 f32_val
= deposit32(0, 31, 1, f32_sign
);
12608 f32_val
= deposit32(f32_val
, 23, 8, f32_exp
);
12609 f32_val
= deposit32(f32_val
, 0, 23, extract64(f64_frac
, 52 - 23, 23));
12610 return make_float32(f32_val
);
12613 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
12615 float_status
*fpst
= fpstp
;
12616 float64 f64
= float64_squash_input_denormal(input
, fpst
);
12617 uint64_t f64_val
= float64_val(f64
);
12618 bool f64_sign
= float64_is_neg(f64
);
12619 int f64_exp
= extract64(f64_val
, 52, 11);
12620 uint64_t f64_frac
= extract64(f64_val
, 0, 52);
12622 /* Deal with any special cases */
12623 if (float64_is_any_nan(f64
)) {
12625 if (float64_is_signaling_nan(f64
, fpst
)) {
12626 float_raise(float_flag_invalid
, fpst
);
12627 nan
= float64_silence_nan(f64
, fpst
);
12629 if (fpst
->default_nan_mode
) {
12630 nan
= float64_default_nan(fpst
);
12633 } else if (float64_is_infinity(f64
)) {
12634 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
12635 } else if (float64_is_zero(f64
)) {
12636 float_raise(float_flag_divbyzero
, fpst
);
12637 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
12638 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
12639 /* Abs(value) < 2.0^-1024 */
12640 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
12641 if (round_to_inf(fpst
, f64_sign
)) {
12642 return float64_set_sign(float64_infinity
, f64_sign
);
12644 return float64_set_sign(float64_maxnorm
, f64_sign
);
12646 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
12647 float_raise(float_flag_underflow
, fpst
);
12648 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
12651 f64_frac
= call_recip_estimate(&f64_exp
, 2045, f64_frac
);
12653 /* result = sign : result_exp<10:0> : fraction<51:0>; */
12654 f64_val
= deposit64(0, 63, 1, f64_sign
);
12655 f64_val
= deposit64(f64_val
, 52, 11, f64_exp
);
12656 f64_val
= deposit64(f64_val
, 0, 52, f64_frac
);
12657 return make_float64(f64_val
);
12660 /* The algorithm that must be used to calculate the estimate
12661 * is specified by the ARM ARM.
12664 static int do_recip_sqrt_estimate(int a
)
12668 assert(128 <= a
&& a
< 512);
12676 while (a
* (b
+ 1) * (b
+ 1) < (1 << 28)) {
12679 estimate
= (b
+ 1) / 2;
12680 assert(256 <= estimate
&& estimate
< 512);
12686 static uint64_t recip_sqrt_estimate(int *exp
, int exp_off
, uint64_t frac
)
12692 while (extract64(frac
, 51, 1) == 0) {
12696 frac
= extract64(frac
, 0, 51) << 1;
12700 /* scaled = UInt('01':fraction<51:45>) */
12701 scaled
= deposit32(1 << 7, 0, 7, extract64(frac
, 45, 7));
12703 /* scaled = UInt('1':fraction<51:44>) */
12704 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
12706 estimate
= do_recip_sqrt_estimate(scaled
);
12708 *exp
= (exp_off
- *exp
) / 2;
12709 return extract64(estimate
, 0, 8) << 44;
12712 uint32_t HELPER(rsqrte_f16
)(uint32_t input
, void *fpstp
)
12714 float_status
*s
= fpstp
;
12715 float16 f16
= float16_squash_input_denormal(input
, s
);
12716 uint16_t val
= float16_val(f16
);
12717 bool f16_sign
= float16_is_neg(f16
);
12718 int f16_exp
= extract32(val
, 10, 5);
12719 uint16_t f16_frac
= extract32(val
, 0, 10);
12722 if (float16_is_any_nan(f16
)) {
12724 if (float16_is_signaling_nan(f16
, s
)) {
12725 float_raise(float_flag_invalid
, s
);
12726 nan
= float16_silence_nan(f16
, s
);
12728 if (s
->default_nan_mode
) {
12729 nan
= float16_default_nan(s
);
12732 } else if (float16_is_zero(f16
)) {
12733 float_raise(float_flag_divbyzero
, s
);
12734 return float16_set_sign(float16_infinity
, f16_sign
);
12735 } else if (f16_sign
) {
12736 float_raise(float_flag_invalid
, s
);
12737 return float16_default_nan(s
);
12738 } else if (float16_is_infinity(f16
)) {
12739 return float16_zero
;
12742 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12743 * preserving the parity of the exponent. */
12745 f64_frac
= ((uint64_t) f16_frac
) << (52 - 10);
12747 f64_frac
= recip_sqrt_estimate(&f16_exp
, 44, f64_frac
);
12749 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
12750 val
= deposit32(0, 15, 1, f16_sign
);
12751 val
= deposit32(val
, 10, 5, f16_exp
);
12752 val
= deposit32(val
, 2, 8, extract64(f64_frac
, 52 - 8, 8));
12753 return make_float16(val
);
12756 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
12758 float_status
*s
= fpstp
;
12759 float32 f32
= float32_squash_input_denormal(input
, s
);
12760 uint32_t val
= float32_val(f32
);
12761 uint32_t f32_sign
= float32_is_neg(f32
);
12762 int f32_exp
= extract32(val
, 23, 8);
12763 uint32_t f32_frac
= extract32(val
, 0, 23);
12766 if (float32_is_any_nan(f32
)) {
12768 if (float32_is_signaling_nan(f32
, s
)) {
12769 float_raise(float_flag_invalid
, s
);
12770 nan
= float32_silence_nan(f32
, s
);
12772 if (s
->default_nan_mode
) {
12773 nan
= float32_default_nan(s
);
12776 } else if (float32_is_zero(f32
)) {
12777 float_raise(float_flag_divbyzero
, s
);
12778 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
12779 } else if (float32_is_neg(f32
)) {
12780 float_raise(float_flag_invalid
, s
);
12781 return float32_default_nan(s
);
12782 } else if (float32_is_infinity(f32
)) {
12783 return float32_zero
;
12786 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12787 * preserving the parity of the exponent. */
12789 f64_frac
= ((uint64_t) f32_frac
) << 29;
12791 f64_frac
= recip_sqrt_estimate(&f32_exp
, 380, f64_frac
);
12793 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
12794 val
= deposit32(0, 31, 1, f32_sign
);
12795 val
= deposit32(val
, 23, 8, f32_exp
);
12796 val
= deposit32(val
, 15, 8, extract64(f64_frac
, 52 - 8, 8));
12797 return make_float32(val
);
12800 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
12802 float_status
*s
= fpstp
;
12803 float64 f64
= float64_squash_input_denormal(input
, s
);
12804 uint64_t val
= float64_val(f64
);
12805 bool f64_sign
= float64_is_neg(f64
);
12806 int f64_exp
= extract64(val
, 52, 11);
12807 uint64_t f64_frac
= extract64(val
, 0, 52);
12809 if (float64_is_any_nan(f64
)) {
12811 if (float64_is_signaling_nan(f64
, s
)) {
12812 float_raise(float_flag_invalid
, s
);
12813 nan
= float64_silence_nan(f64
, s
);
12815 if (s
->default_nan_mode
) {
12816 nan
= float64_default_nan(s
);
12819 } else if (float64_is_zero(f64
)) {
12820 float_raise(float_flag_divbyzero
, s
);
12821 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
12822 } else if (float64_is_neg(f64
)) {
12823 float_raise(float_flag_invalid
, s
);
12824 return float64_default_nan(s
);
12825 } else if (float64_is_infinity(f64
)) {
12826 return float64_zero
;
12829 f64_frac
= recip_sqrt_estimate(&f64_exp
, 3068, f64_frac
);
12831 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
12832 val
= deposit64(0, 61, 1, f64_sign
);
12833 val
= deposit64(val
, 52, 11, f64_exp
);
12834 val
= deposit64(val
, 44, 8, extract64(f64_frac
, 52 - 8, 8));
12835 return make_float64(val
);
12838 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
12840 /* float_status *s = fpstp; */
12841 int input
, estimate
;
12843 if ((a
& 0x80000000) == 0) {
12847 input
= extract32(a
, 23, 9);
12848 estimate
= recip_estimate(input
);
12850 return deposit32(0, (32 - 9), 9, estimate
);
12853 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
12857 if ((a
& 0xc0000000) == 0) {
12861 estimate
= do_recip_sqrt_estimate(extract32(a
, 23, 9));
12863 return deposit32(0, 23, 9, estimate
);
12866 /* VFPv4 fused multiply-accumulate */
12867 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
12869 float_status
*fpst
= fpstp
;
12870 return float32_muladd(a
, b
, c
, 0, fpst
);
12873 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
12875 float_status
*fpst
= fpstp
;
12876 return float64_muladd(a
, b
, c
, 0, fpst
);
12879 /* ARMv8 round to integral */
12880 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
12882 return float32_round_to_int(x
, fp_status
);
12885 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
12887 return float64_round_to_int(x
, fp_status
);
12890 float32
HELPER(rints
)(float32 x
, void *fp_status
)
12892 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12895 ret
= float32_round_to_int(x
, fp_status
);
12897 /* Suppress any inexact exceptions the conversion produced */
12898 if (!(old_flags
& float_flag_inexact
)) {
12899 new_flags
= get_float_exception_flags(fp_status
);
12900 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12906 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
12908 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12911 ret
= float64_round_to_int(x
, fp_status
);
12913 new_flags
= get_float_exception_flags(fp_status
);
12915 /* Suppress any inexact exceptions the conversion produced */
12916 if (!(old_flags
& float_flag_inexact
)) {
12917 new_flags
= get_float_exception_flags(fp_status
);
12918 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12924 /* Convert ARM rounding mode to softfloat */
12925 int arm_rmode_to_sf(int rmode
)
12928 case FPROUNDING_TIEAWAY
:
12929 rmode
= float_round_ties_away
;
12931 case FPROUNDING_ODD
:
12932 /* FIXME: add support for TIEAWAY and ODD */
12933 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
12935 /* fall through for now */
12936 case FPROUNDING_TIEEVEN
:
12938 rmode
= float_round_nearest_even
;
12940 case FPROUNDING_POSINF
:
12941 rmode
= float_round_up
;
12943 case FPROUNDING_NEGINF
:
12944 rmode
= float_round_down
;
12946 case FPROUNDING_ZERO
:
12947 rmode
= float_round_to_zero
;
12954 * The upper bytes of val (above the number specified by 'bytes') must have
12955 * been zeroed out by the caller.
12957 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12961 stl_le_p(buf
, val
);
12963 /* zlib crc32 converts the accumulator and output to one's complement. */
12964 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12967 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12971 stl_le_p(buf
, val
);
12973 /* Linux crc32c converts the output to one's complement. */
12974 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12977 /* Return the exception level to which FP-disabled exceptions should
12978 * be taken, or 0 if FP is enabled.
12980 int fp_exception_el(CPUARMState
*env
, int cur_el
)
12982 #ifndef CONFIG_USER_ONLY
12985 /* CPACR and the CPTR registers don't exist before v6, so FP is
12986 * always accessible
12988 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12992 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12993 * 0, 2 : trap EL0 and EL1/PL1 accesses
12994 * 1 : trap only EL0 accesses
12995 * 3 : trap no accesses
12997 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
13001 if (cur_el
== 0 || cur_el
== 1) {
13002 /* Trap to PL1, which might be EL1 or EL3 */
13003 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
13008 if (cur_el
== 3 && !is_a64(env
)) {
13009 /* Secure PL1 running at EL3 */
13022 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
13023 * check because zero bits in the registers mean "don't trap".
13026 /* CPTR_EL2 : present in v7VE or v8 */
13027 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
13028 && !arm_is_secure_below_el3(env
)) {
13029 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
13033 /* CPTR_EL3 : present in v8 */
13034 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
13035 /* Trap all FP ops to EL3 */
13042 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState
*env
,
13043 bool secstate
, bool priv
)
13045 ARMMMUIdx mmu_idx
= ARM_MMU_IDX_M
;
13048 mmu_idx
|= ARM_MMU_IDX_M_PRIV
;
13051 if (armv7m_nvic_neg_prio_requested(env
->nvic
, secstate
)) {
13052 mmu_idx
|= ARM_MMU_IDX_M_NEGPRI
;
13056 mmu_idx
|= ARM_MMU_IDX_M_S
;
13062 /* Return the MMU index for a v7M CPU in the specified security state */
13063 ARMMMUIdx
arm_v7m_mmu_idx_for_secstate(CPUARMState
*env
, bool secstate
)
13065 bool priv
= arm_current_el(env
) != 0;
13067 return arm_v7m_mmu_idx_for_secstate_and_priv(env
, secstate
, priv
);
13070 ARMMMUIdx
arm_mmu_idx(CPUARMState
*env
)
13074 if (arm_feature(env
, ARM_FEATURE_M
)) {
13075 return arm_v7m_mmu_idx_for_secstate(env
, env
->v7m
.secure
);
13078 el
= arm_current_el(env
);
13079 if (el
< 2 && arm_is_secure_below_el3(env
)) {
13080 return ARMMMUIdx_S1SE0
+ el
;
13082 return ARMMMUIdx_S12NSE0
+ el
;
13086 int cpu_mmu_index(CPUARMState
*env
, bool ifetch
)
13088 return arm_to_core_mmu_idx(arm_mmu_idx(env
));
13091 #ifndef CONFIG_USER_ONLY
13092 ARMMMUIdx
arm_stage1_mmu_idx(CPUARMState
*env
)
13094 return stage_1_mmu_idx(arm_mmu_idx(env
));
13098 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
13099 target_ulong
*cs_base
, uint32_t *pflags
)
13101 ARMMMUIdx mmu_idx
= arm_mmu_idx(env
);
13102 int current_el
= arm_current_el(env
);
13103 int fp_el
= fp_exception_el(env
, current_el
);
13104 uint32_t flags
= 0;
13107 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13110 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, AARCH64_STATE
, 1);
13112 #ifndef CONFIG_USER_ONLY
13114 * Get control bits for tagged addresses. Note that the
13115 * translator only uses this for instruction addresses.
13118 ARMMMUIdx stage1
= stage_1_mmu_idx(mmu_idx
);
13119 ARMVAParameters p0
= aa64_va_parameters_both(env
, 0, stage1
);
13122 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13123 if (regime_el(env
, stage1
) < 2) {
13124 ARMVAParameters p1
= aa64_va_parameters_both(env
, -1, stage1
);
13125 tbid
= (p1
.tbi
<< 1) | p0
.tbi
;
13126 tbii
= tbid
& ~((p1
.tbid
<< 1) | p0
.tbid
);
13129 tbii
= tbid
& !p0
.tbid
;
13132 flags
= FIELD_DP32(flags
, TBFLAG_A64
, TBII
, tbii
);
13136 if (cpu_isar_feature(aa64_sve
, cpu
)) {
13137 int sve_el
= sve_exception_el(env
, current_el
);
13140 /* If SVE is disabled, but FP is enabled,
13141 * then the effective len is 0.
13143 if (sve_el
!= 0 && fp_el
== 0) {
13146 zcr_len
= sve_zcr_len_for_el(env
, current_el
);
13148 flags
= FIELD_DP32(flags
, TBFLAG_A64
, SVEEXC_EL
, sve_el
);
13149 flags
= FIELD_DP32(flags
, TBFLAG_A64
, ZCR_LEN
, zcr_len
);
13152 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
13154 * In order to save space in flags, we record only whether
13155 * pauth is "inactive", meaning all insns are implemented as
13156 * a nop, or "active" when some action must be performed.
13157 * The decision of which action to take is left to a helper.
13160 if (current_el
== 0) {
13161 /* FIXME: ARMv8.1-VHE S2 translation regime. */
13162 sctlr
= env
->cp15
.sctlr_el
[1];
13164 sctlr
= env
->cp15
.sctlr_el
[current_el
];
13166 if (sctlr
& (SCTLR_EnIA
| SCTLR_EnIB
| SCTLR_EnDA
| SCTLR_EnDB
)) {
13167 flags
= FIELD_DP32(flags
, TBFLAG_A64
, PAUTH_ACTIVE
, 1);
13171 *pc
= env
->regs
[15];
13172 flags
= FIELD_DP32(flags
, TBFLAG_A32
, THUMB
, env
->thumb
);
13173 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECLEN
, env
->vfp
.vec_len
);
13174 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VECSTRIDE
, env
->vfp
.vec_stride
);
13175 flags
= FIELD_DP32(flags
, TBFLAG_A32
, CONDEXEC
, env
->condexec_bits
);
13176 flags
= FIELD_DP32(flags
, TBFLAG_A32
, SCTLR_B
, arm_sctlr_b(env
));
13177 flags
= FIELD_DP32(flags
, TBFLAG_A32
, NS
, !access_secure_reg(env
));
13178 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
13179 || arm_el_is_aa64(env
, 1)) {
13180 flags
= FIELD_DP32(flags
, TBFLAG_A32
, VFPEN
, 1);
13182 flags
= FIELD_DP32(flags
, TBFLAG_A32
, XSCALE_CPAR
, env
->cp15
.c15_cpar
);
13185 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, MMUIDX
, arm_to_core_mmu_idx(mmu_idx
));
13187 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13188 * states defined in the ARM ARM for software singlestep:
13189 * SS_ACTIVE PSTATE.SS State
13190 * 0 x Inactive (the TB flag for SS is always 0)
13191 * 1 0 Active-pending
13192 * 1 1 Active-not-pending
13194 if (arm_singlestep_active(env
)) {
13195 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, SS_ACTIVE
, 1);
13197 if (env
->pstate
& PSTATE_SS
) {
13198 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13201 if (env
->uncached_cpsr
& PSTATE_SS
) {
13202 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, PSTATE_SS
, 1);
13206 if (arm_cpu_data_is_big_endian(env
)) {
13207 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, BE_DATA
, 1);
13209 flags
= FIELD_DP32(flags
, TBFLAG_ANY
, FPEXC_EL
, fp_el
);
13211 if (arm_v7m_is_handler_mode(env
)) {
13212 flags
= FIELD_DP32(flags
, TBFLAG_A32
, HANDLER
, 1);
13215 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
13216 * suppressing them because the requested execution priority is less than 0.
13218 if (arm_feature(env
, ARM_FEATURE_V8
) &&
13219 arm_feature(env
, ARM_FEATURE_M
) &&
13220 !((mmu_idx
& ARM_MMU_IDX_M_NEGPRI
) &&
13221 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKOFHFNMIGN_MASK
))) {
13222 flags
= FIELD_DP32(flags
, TBFLAG_A32
, STACKCHECK
, 1);
13229 #ifdef TARGET_AARCH64
13231 * The manual says that when SVE is enabled and VQ is widened the
13232 * implementation is allowed to zero the previously inaccessible
13233 * portion of the registers. The corollary to that is that when
13234 * SVE is enabled and VQ is narrowed we are also allowed to zero
13235 * the now inaccessible portion of the registers.
13237 * The intent of this is that no predicate bit beyond VQ is ever set.
13238 * Which means that some operations on predicate registers themselves
13239 * may operate on full uint64_t or even unrolled across the maximum
13240 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13241 * may well be cheaper than conditionals to restrict the operation
13242 * to the relevant portion of a uint16_t[16].
13244 void aarch64_sve_narrow_vq(CPUARMState
*env
, unsigned vq
)
13249 assert(vq
>= 1 && vq
<= ARM_MAX_VQ
);
13250 assert(vq
<= arm_env_get_cpu(env
)->sve_max_vq
);
13252 /* Zap the high bits of the zregs. */
13253 for (i
= 0; i
< 32; i
++) {
13254 memset(&env
->vfp
.zregs
[i
].d
[2 * vq
], 0, 16 * (ARM_MAX_VQ
- vq
));
13257 /* Zap the high bits of the pregs and ffr. */
13260 pmask
= ~(-1ULL << (16 * (vq
& 3)));
13262 for (j
= vq
/ 4; j
< ARM_MAX_VQ
/ 4; j
++) {
13263 for (i
= 0; i
< 17; ++i
) {
13264 env
->vfp
.pregs
[i
].p
[j
] &= pmask
;
13271 * Notice a change in SVE vector size when changing EL.
13273 void aarch64_sve_change_el(CPUARMState
*env
, int old_el
,
13274 int new_el
, bool el0_a64
)
13276 ARMCPU
*cpu
= arm_env_get_cpu(env
);
13277 int old_len
, new_len
;
13278 bool old_a64
, new_a64
;
13280 /* Nothing to do if no SVE. */
13281 if (!cpu_isar_feature(aa64_sve
, cpu
)) {
13285 /* Nothing to do if FP is disabled in either EL. */
13286 if (fp_exception_el(env
, old_el
) || fp_exception_el(env
, new_el
)) {
13291 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13292 * at ELx, or not available because the EL is in AArch32 state, then
13293 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13294 * has an effective value of 0".
13296 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13297 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13298 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13299 * we already have the correct register contents when encountering the
13300 * vq0->vq0 transition between EL0->EL1.
13302 old_a64
= old_el
? arm_el_is_aa64(env
, old_el
) : el0_a64
;
13303 old_len
= (old_a64
&& !sve_exception_el(env
, old_el
)
13304 ? sve_zcr_len_for_el(env
, old_el
) : 0);
13305 new_a64
= new_el
? arm_el_is_aa64(env
, new_el
) : el0_a64
;
13306 new_len
= (new_a64
&& !sve_exception_el(env
, new_el
)
13307 ? sve_zcr_len_for_el(env
, new_el
) : 0);
13309 /* When changing vector length, clear inaccessible state. */
13310 if (new_len
< old_len
) {
13311 aarch64_sve_narrow_vq(env
, new_len
+ 1);