1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
21 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
23 #ifndef CONFIG_USER_ONLY
24 /* Cacheability and shareability attributes for a memory access */
25 typedef struct ARMCacheAttrs
{
26 unsigned int attrs
:8; /* as in the MAIR register encoding */
27 unsigned int shareability
:2; /* as in the SH field of the VMSAv8-64 PTEs */
30 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
31 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
32 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
33 target_ulong
*page_size
,
34 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
36 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
37 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
38 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
39 target_ulong
*page_size_ptr
,
40 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
);
42 /* Security attributes for an address, as returned by v8m_security_lookup. */
43 typedef struct V8M_SAttributes
{
52 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
53 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
54 V8M_SAttributes
*sattrs
);
56 /* Definitions for the PMCCNTR and PMCR registers */
62 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
66 /* VFP data registers are always little-endian. */
67 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
69 stq_le_p(buf
, *aa32_vfp_dreg(env
, reg
));
72 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
73 /* Aliases for Q regs. */
76 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
78 stq_le_p(buf
+ 8, q
[1]);
82 switch (reg
- nregs
) {
83 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
84 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
85 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
90 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
94 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
96 *aa32_vfp_dreg(env
, reg
) = ldq_le_p(buf
);
99 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
102 uint64_t *q
= aa32_vfp_qreg(env
, reg
- 32);
103 q
[0] = ldq_le_p(buf
);
104 q
[1] = ldq_le_p(buf
+ 8);
108 switch (reg
- nregs
) {
109 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
110 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
111 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
116 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
120 /* 128 bit FP register */
122 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
124 stq_le_p(buf
+ 8, q
[1]);
129 stl_p(buf
, vfp_get_fpsr(env
));
133 stl_p(buf
, vfp_get_fpcr(env
));
140 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
144 /* 128 bit FP register */
146 uint64_t *q
= aa64_vfp_qreg(env
, reg
);
147 q
[0] = ldq_le_p(buf
);
148 q
[1] = ldq_le_p(buf
+ 8);
153 vfp_set_fpsr(env
, ldl_p(buf
));
157 vfp_set_fpcr(env
, ldl_p(buf
));
164 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
166 assert(ri
->fieldoffset
);
167 if (cpreg_field_is_64bit(ri
)) {
168 return CPREG_FIELD64(env
, ri
);
170 return CPREG_FIELD32(env
, ri
);
174 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
177 assert(ri
->fieldoffset
);
178 if (cpreg_field_is_64bit(ri
)) {
179 CPREG_FIELD64(env
, ri
) = value
;
181 CPREG_FIELD32(env
, ri
) = value
;
185 static void *raw_ptr(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
187 return (char *)env
+ ri
->fieldoffset
;
190 uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
192 /* Raw read of a coprocessor register (as needed for migration, etc). */
193 if (ri
->type
& ARM_CP_CONST
) {
194 return ri
->resetvalue
;
195 } else if (ri
->raw_readfn
) {
196 return ri
->raw_readfn(env
, ri
);
197 } else if (ri
->readfn
) {
198 return ri
->readfn(env
, ri
);
200 return raw_read(env
, ri
);
204 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
207 /* Raw write of a coprocessor register (as needed for migration, etc).
208 * Note that constant registers are treated as write-ignored; the
209 * caller should check for success by whether a readback gives the
212 if (ri
->type
& ARM_CP_CONST
) {
214 } else if (ri
->raw_writefn
) {
215 ri
->raw_writefn(env
, ri
, v
);
216 } else if (ri
->writefn
) {
217 ri
->writefn(env
, ri
, v
);
219 raw_write(env
, ri
, v
);
223 static bool raw_accessors_invalid(const ARMCPRegInfo
*ri
)
225 /* Return true if the regdef would cause an assertion if you called
226 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
227 * program bug for it not to have the NO_RAW flag).
228 * NB that returning false here doesn't necessarily mean that calling
229 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
230 * read/write access functions which are safe for raw use" from "has
231 * read/write access functions which have side effects but has forgotten
232 * to provide raw access functions".
233 * The tests here line up with the conditions in read/write_raw_cp_reg()
234 * and assertions in raw_read()/raw_write().
236 if ((ri
->type
& ARM_CP_CONST
) ||
238 ((ri
->raw_writefn
|| ri
->writefn
) && (ri
->raw_readfn
|| ri
->readfn
))) {
244 bool write_cpustate_to_list(ARMCPU
*cpu
)
246 /* Write the coprocessor state from cpu->env to the (index,value) list. */
250 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
251 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
252 const ARMCPRegInfo
*ri
;
254 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
259 if (ri
->type
& ARM_CP_NO_RAW
) {
262 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
267 bool write_list_to_cpustate(ARMCPU
*cpu
)
272 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
273 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
274 uint64_t v
= cpu
->cpreg_values
[i
];
275 const ARMCPRegInfo
*ri
;
277 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
282 if (ri
->type
& ARM_CP_NO_RAW
) {
285 /* Write value and confirm it reads back as written
286 * (to catch read-only registers and partially read-only
287 * registers where the incoming migration value doesn't match)
289 write_raw_cp_reg(&cpu
->env
, ri
, v
);
290 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
297 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
299 ARMCPU
*cpu
= opaque
;
301 const ARMCPRegInfo
*ri
;
303 regidx
= *(uint32_t *)key
;
304 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
306 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
307 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
308 /* The value array need not be initialized at this point */
309 cpu
->cpreg_array_len
++;
313 static void count_cpreg(gpointer key
, gpointer opaque
)
315 ARMCPU
*cpu
= opaque
;
317 const ARMCPRegInfo
*ri
;
319 regidx
= *(uint32_t *)key
;
320 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
322 if (!(ri
->type
& (ARM_CP_NO_RAW
|ARM_CP_ALIAS
))) {
323 cpu
->cpreg_array_len
++;
327 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
329 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
330 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
341 void init_cpreg_list(ARMCPU
*cpu
)
343 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
344 * Note that we require cpreg_tuples[] to be sorted by key ID.
349 keys
= g_hash_table_get_keys(cpu
->cp_regs
);
350 keys
= g_list_sort(keys
, cpreg_key_compare
);
352 cpu
->cpreg_array_len
= 0;
354 g_list_foreach(keys
, count_cpreg
, cpu
);
356 arraylen
= cpu
->cpreg_array_len
;
357 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
358 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
359 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
360 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
361 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
362 cpu
->cpreg_array_len
= 0;
364 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
366 assert(cpu
->cpreg_array_len
== arraylen
);
372 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
373 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
375 * access_el3_aa32ns: Used to check AArch32 register views.
376 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
378 static CPAccessResult
access_el3_aa32ns(CPUARMState
*env
,
379 const ARMCPRegInfo
*ri
,
382 bool secure
= arm_is_secure_below_el3(env
);
384 assert(!arm_el_is_aa64(env
, 3));
386 return CP_ACCESS_TRAP_UNCATEGORIZED
;
391 static CPAccessResult
access_el3_aa32ns_aa64any(CPUARMState
*env
,
392 const ARMCPRegInfo
*ri
,
395 if (!arm_el_is_aa64(env
, 3)) {
396 return access_el3_aa32ns(env
, ri
, isread
);
401 /* Some secure-only AArch32 registers trap to EL3 if used from
402 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
403 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
404 * We assume that the .access field is set to PL1_RW.
406 static CPAccessResult
access_trap_aa32s_el1(CPUARMState
*env
,
407 const ARMCPRegInfo
*ri
,
410 if (arm_current_el(env
) == 3) {
413 if (arm_is_secure_below_el3(env
)) {
414 return CP_ACCESS_TRAP_EL3
;
416 /* This will be EL1 NS and EL2 NS, which just UNDEF */
417 return CP_ACCESS_TRAP_UNCATEGORIZED
;
420 /* Check for traps to "powerdown debug" registers, which are controlled
423 static CPAccessResult
access_tdosa(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
426 int el
= arm_current_el(env
);
428 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDOSA
)
429 && !arm_is_secure_below_el3(env
)) {
430 return CP_ACCESS_TRAP_EL2
;
432 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDOSA
)) {
433 return CP_ACCESS_TRAP_EL3
;
438 /* Check for traps to "debug ROM" registers, which are controlled
439 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
441 static CPAccessResult
access_tdra(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
444 int el
= arm_current_el(env
);
446 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDRA
)
447 && !arm_is_secure_below_el3(env
)) {
448 return CP_ACCESS_TRAP_EL2
;
450 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
451 return CP_ACCESS_TRAP_EL3
;
456 /* Check for traps to general debug registers, which are controlled
457 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
459 static CPAccessResult
access_tda(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
462 int el
= arm_current_el(env
);
464 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TDA
)
465 && !arm_is_secure_below_el3(env
)) {
466 return CP_ACCESS_TRAP_EL2
;
468 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TDA
)) {
469 return CP_ACCESS_TRAP_EL3
;
474 /* Check for traps to performance monitor registers, which are controlled
475 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
477 static CPAccessResult
access_tpm(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
480 int el
= arm_current_el(env
);
482 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
483 && !arm_is_secure_below_el3(env
)) {
484 return CP_ACCESS_TRAP_EL2
;
486 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
487 return CP_ACCESS_TRAP_EL3
;
492 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
494 ARMCPU
*cpu
= arm_env_get_cpu(env
);
496 raw_write(env
, ri
, value
);
497 tlb_flush(CPU(cpu
)); /* Flush TLB as domain not tracked in TLB */
500 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
502 ARMCPU
*cpu
= arm_env_get_cpu(env
);
504 if (raw_read(env
, ri
) != value
) {
505 /* Unlike real hardware the qemu TLB uses virtual addresses,
506 * not modified virtual addresses, so this causes a TLB flush.
509 raw_write(env
, ri
, value
);
513 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
516 ARMCPU
*cpu
= arm_env_get_cpu(env
);
518 if (raw_read(env
, ri
) != value
&& !arm_feature(env
, ARM_FEATURE_PMSA
)
519 && !extended_addresses_enabled(env
)) {
520 /* For VMSA (when not using the LPAE long descriptor page table
521 * format) this register includes the ASID, so do a TLB flush.
522 * For PMSA it is purely a process ID and no action is needed.
526 raw_write(env
, ri
, value
);
529 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
532 /* Invalidate all (TLBIALL) */
533 ARMCPU
*cpu
= arm_env_get_cpu(env
);
538 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
541 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
542 ARMCPU
*cpu
= arm_env_get_cpu(env
);
544 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
547 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
550 /* Invalidate by ASID (TLBIASID) */
551 ARMCPU
*cpu
= arm_env_get_cpu(env
);
556 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
559 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
560 ARMCPU
*cpu
= arm_env_get_cpu(env
);
562 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
565 /* IS variants of TLB operations must affect all cores */
566 static void tlbiall_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
569 CPUState
*cs
= ENV_GET_CPU(env
);
571 tlb_flush_all_cpus_synced(cs
);
574 static void tlbiasid_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
577 CPUState
*cs
= ENV_GET_CPU(env
);
579 tlb_flush_all_cpus_synced(cs
);
582 static void tlbimva_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
585 CPUState
*cs
= ENV_GET_CPU(env
);
587 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
590 static void tlbimvaa_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
593 CPUState
*cs
= ENV_GET_CPU(env
);
595 tlb_flush_page_all_cpus_synced(cs
, value
& TARGET_PAGE_MASK
);
598 static void tlbiall_nsnh_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
601 CPUState
*cs
= ENV_GET_CPU(env
);
603 tlb_flush_by_mmuidx(cs
,
604 ARMMMUIdxBit_S12NSE1
|
605 ARMMMUIdxBit_S12NSE0
|
609 static void tlbiall_nsnh_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
612 CPUState
*cs
= ENV_GET_CPU(env
);
614 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
615 ARMMMUIdxBit_S12NSE1
|
616 ARMMMUIdxBit_S12NSE0
|
620 static void tlbiipas2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
623 /* Invalidate by IPA. This has to invalidate any structures that
624 * contain only stage 2 translation information, but does not need
625 * to apply to structures that contain combined stage 1 and stage 2
626 * translation information.
627 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
629 CPUState
*cs
= ENV_GET_CPU(env
);
632 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
636 pageaddr
= sextract64(value
<< 12, 0, 40);
638 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
641 static void tlbiipas2_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
644 CPUState
*cs
= ENV_GET_CPU(env
);
647 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
651 pageaddr
= sextract64(value
<< 12, 0, 40);
653 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
657 static void tlbiall_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
660 CPUState
*cs
= ENV_GET_CPU(env
);
662 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
665 static void tlbiall_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
668 CPUState
*cs
= ENV_GET_CPU(env
);
670 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
673 static void tlbimva_hyp_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
676 CPUState
*cs
= ENV_GET_CPU(env
);
677 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
679 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
682 static void tlbimva_hyp_is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
685 CPUState
*cs
= ENV_GET_CPU(env
);
686 uint64_t pageaddr
= value
& ~MAKE_64BIT_MASK(0, 12);
688 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
692 static const ARMCPRegInfo cp_reginfo
[] = {
693 /* Define the secure and non-secure FCSE identifier CP registers
694 * separately because there is no secure bank in V8 (no _EL3). This allows
695 * the secure register to be properly reset and migrated. There is also no
696 * v8 EL1 version of the register so the non-secure instance stands alone.
698 { .name
= "FCSEIDR(NS)",
699 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
700 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
701 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_ns
),
702 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
703 { .name
= "FCSEIDR(S)",
704 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 0,
705 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
706 .fieldoffset
= offsetof(CPUARMState
, cp15
.fcseidr_s
),
707 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
708 /* Define the secure and non-secure context identifier CP registers
709 * separately because there is no secure bank in V8 (no _EL3). This allows
710 * the secure register to be properly reset and migrated. In the
711 * non-secure case, the 32-bit register will have reset and migration
712 * disabled during registration as it is handled by the 64-bit instance.
714 { .name
= "CONTEXTIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
715 .opc0
= 3, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
716 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_NS
,
717 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_el
[1]),
718 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
719 { .name
= "CONTEXTIDR(S)", .state
= ARM_CP_STATE_AA32
,
720 .cp
= 15, .opc1
= 0, .crn
= 13, .crm
= 0, .opc2
= 1,
721 .access
= PL1_RW
, .secure
= ARM_CP_SECSTATE_S
,
722 .fieldoffset
= offsetof(CPUARMState
, cp15
.contextidr_s
),
723 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
727 static const ARMCPRegInfo not_v8_cp_reginfo
[] = {
728 /* NB: Some of these registers exist in v8 but with more precise
729 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
731 /* MMU Domain access control / MPU write buffer control */
733 .cp
= 15, .opc1
= CP_ANY
, .crn
= 3, .crm
= CP_ANY
, .opc2
= CP_ANY
,
734 .access
= PL1_RW
, .resetvalue
= 0,
735 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
736 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
737 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
738 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
739 * For v6 and v5, these mappings are overly broad.
741 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 0,
742 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
743 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 1,
744 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
745 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 4,
746 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
747 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= 8,
748 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
749 /* Cache maintenance ops; some of this space may be overridden later. */
750 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
751 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
752 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
756 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
757 /* Not all pre-v6 cores implemented this WFI, so this is slightly
760 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
761 .access
= PL1_W
, .type
= ARM_CP_WFI
},
765 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
766 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
767 * is UNPREDICTABLE; we choose to NOP as most implementations do).
769 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
770 .access
= PL1_W
, .type
= ARM_CP_WFI
},
771 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
772 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
773 * OMAPCP will override this space.
775 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
776 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
778 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
779 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
781 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
782 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
783 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
785 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
786 * implementing it as RAZ means the "debug architecture version" bits
787 * will read as a reserved value, which should cause Linux to not try
788 * to use the debug hardware.
790 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
791 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
792 /* MMU TLB control. Note that the wildcarding means we cover not just
793 * the unified TLB ops but also the dside/iside/inner-shareable variants.
795 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
796 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
797 .type
= ARM_CP_NO_RAW
},
798 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
799 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
800 .type
= ARM_CP_NO_RAW
},
801 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
802 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
803 .type
= ARM_CP_NO_RAW
},
804 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
805 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
806 .type
= ARM_CP_NO_RAW
},
807 { .name
= "PRRR", .cp
= 15, .crn
= 10, .crm
= 2,
808 .opc1
= 0, .opc2
= 0, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
809 { .name
= "NMRR", .cp
= 15, .crn
= 10, .crm
= 2,
810 .opc1
= 0, .opc2
= 1, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
814 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
819 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
820 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
821 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
822 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
823 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
825 if (arm_feature(env
, ARM_FEATURE_VFP
)) {
826 /* VFP coprocessor: cp10 & cp11 [23:20] */
827 mask
|= (1 << 31) | (1 << 30) | (0xf << 20);
829 if (!arm_feature(env
, ARM_FEATURE_NEON
)) {
830 /* ASEDIS [31] bit is RAO/WI */
834 /* VFPv3 and upwards with NEON implement 32 double precision
835 * registers (D0-D31).
837 if (!arm_feature(env
, ARM_FEATURE_NEON
) ||
838 !arm_feature(env
, ARM_FEATURE_VFP3
)) {
839 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
845 env
->cp15
.cpacr_el1
= value
;
848 static CPAccessResult
cpacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
851 if (arm_feature(env
, ARM_FEATURE_V8
)) {
852 /* Check if CPACR accesses are to be trapped to EL2 */
853 if (arm_current_el(env
) == 1 &&
854 (env
->cp15
.cptr_el
[2] & CPTR_TCPAC
) && !arm_is_secure(env
)) {
855 return CP_ACCESS_TRAP_EL2
;
856 /* Check if CPACR accesses are to be trapped to EL3 */
857 } else if (arm_current_el(env
) < 3 &&
858 (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
859 return CP_ACCESS_TRAP_EL3
;
866 static CPAccessResult
cptr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
869 /* Check if CPTR accesses are set to trap to EL3 */
870 if (arm_current_el(env
) == 2 && (env
->cp15
.cptr_el
[3] & CPTR_TCPAC
)) {
871 return CP_ACCESS_TRAP_EL3
;
877 static const ARMCPRegInfo v6_cp_reginfo
[] = {
878 /* prefetch by MVA in v6, NOP in v7 */
879 { .name
= "MVA_prefetch",
880 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
881 .access
= PL1_W
, .type
= ARM_CP_NOP
},
882 /* We need to break the TB after ISB to execute self-modifying code
883 * correctly and also to take any pending interrupts immediately.
884 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
886 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
887 .access
= PL0_W
, .type
= ARM_CP_NO_RAW
, .writefn
= arm_cp_write_ignore
},
888 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
889 .access
= PL0_W
, .type
= ARM_CP_NOP
},
890 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
891 .access
= PL0_W
, .type
= ARM_CP_NOP
},
892 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
894 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ifar_s
),
895 offsetof(CPUARMState
, cp15
.ifar_ns
) },
897 /* Watchpoint Fault Address Register : should actually only be present
898 * for 1136, 1176, 11MPCore.
900 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
901 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
902 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
903 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2, .accessfn
= cpacr_access
,
904 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.cpacr_el1
),
905 .resetvalue
= 0, .writefn
= cpacr_write
},
909 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
912 /* Performance monitor registers user accessibility is controlled
913 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
914 * trapping to EL2 or EL3 for other accesses.
916 int el
= arm_current_el(env
);
918 if (el
== 0 && !(env
->cp15
.c9_pmuserenr
& 1)) {
919 return CP_ACCESS_TRAP
;
921 if (el
< 2 && (env
->cp15
.mdcr_el2
& MDCR_TPM
)
922 && !arm_is_secure_below_el3(env
)) {
923 return CP_ACCESS_TRAP_EL2
;
925 if (el
< 3 && (env
->cp15
.mdcr_el3
& MDCR_TPM
)) {
926 return CP_ACCESS_TRAP_EL3
;
932 static CPAccessResult
pmreg_access_xevcntr(CPUARMState
*env
,
933 const ARMCPRegInfo
*ri
,
936 /* ER: event counter read trap control */
937 if (arm_feature(env
, ARM_FEATURE_V8
)
938 && arm_current_el(env
) == 0
939 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0
944 return pmreg_access(env
, ri
, isread
);
947 static CPAccessResult
pmreg_access_swinc(CPUARMState
*env
,
948 const ARMCPRegInfo
*ri
,
951 /* SW: software increment write trap control */
952 if (arm_feature(env
, ARM_FEATURE_V8
)
953 && arm_current_el(env
) == 0
954 && (env
->cp15
.c9_pmuserenr
& (1 << 1)) != 0
959 return pmreg_access(env
, ri
, isread
);
962 #ifndef CONFIG_USER_ONLY
964 static CPAccessResult
pmreg_access_selr(CPUARMState
*env
,
965 const ARMCPRegInfo
*ri
,
968 /* ER: event counter read trap control */
969 if (arm_feature(env
, ARM_FEATURE_V8
)
970 && arm_current_el(env
) == 0
971 && (env
->cp15
.c9_pmuserenr
& (1 << 3)) != 0) {
975 return pmreg_access(env
, ri
, isread
);
978 static CPAccessResult
pmreg_access_ccntr(CPUARMState
*env
,
979 const ARMCPRegInfo
*ri
,
982 /* CR: cycle counter read trap control */
983 if (arm_feature(env
, ARM_FEATURE_V8
)
984 && arm_current_el(env
) == 0
985 && (env
->cp15
.c9_pmuserenr
& (1 << 2)) != 0
990 return pmreg_access(env
, ri
, isread
);
993 static inline bool arm_ccnt_enabled(CPUARMState
*env
)
995 /* This does not support checking PMCCFILTR_EL0 register */
997 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
1004 void pmccntr_sync(CPUARMState
*env
)
1006 uint64_t temp_ticks
;
1008 temp_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1009 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1011 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1012 /* Increment once every 64 processor clock cycles */
1016 if (arm_ccnt_enabled(env
)) {
1017 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
1021 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1026 if (value
& PMCRC
) {
1027 /* The counter has been reset */
1028 env
->cp15
.c15_ccnt
= 0;
1031 /* only the DP, X, D and E bits are writable */
1032 env
->cp15
.c9_pmcr
&= ~0x39;
1033 env
->cp15
.c9_pmcr
|= (value
& 0x39);
1038 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1040 uint64_t total_ticks
;
1042 if (!arm_ccnt_enabled(env
)) {
1043 /* Counter is disabled, do not change value */
1044 return env
->cp15
.c15_ccnt
;
1047 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1048 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1050 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1051 /* Increment once every 64 processor clock cycles */
1054 return total_ticks
- env
->cp15
.c15_ccnt
;
1057 static void pmselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1060 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1061 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1062 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1065 env
->cp15
.c9_pmselr
= value
& 0x1f;
1068 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1071 uint64_t total_ticks
;
1073 if (!arm_ccnt_enabled(env
)) {
1074 /* Counter is disabled, set the absolute value */
1075 env
->cp15
.c15_ccnt
= value
;
1079 total_ticks
= muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
),
1080 ARM_CPU_FREQ
, NANOSECONDS_PER_SECOND
);
1082 if (env
->cp15
.c9_pmcr
& PMCRD
) {
1083 /* Increment once every 64 processor clock cycles */
1086 env
->cp15
.c15_ccnt
= total_ticks
- value
;
1089 static void pmccntr_write32(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1092 uint64_t cur_val
= pmccntr_read(env
, NULL
);
1094 pmccntr_write(env
, ri
, deposit64(cur_val
, 0, 32, value
));
1097 #else /* CONFIG_USER_ONLY */
1099 void pmccntr_sync(CPUARMState
*env
)
1105 static void pmccfiltr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1109 env
->cp15
.pmccfiltr_el0
= value
& 0x7E000000;
1113 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1117 env
->cp15
.c9_pmcnten
|= value
;
1120 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1124 env
->cp15
.c9_pmcnten
&= ~value
;
1127 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1130 env
->cp15
.c9_pmovsr
&= ~value
;
1133 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1136 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1137 * PMSELR value is equal to or greater than the number of implemented
1138 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1140 if (env
->cp15
.c9_pmselr
== 0x1f) {
1141 pmccfiltr_write(env
, ri
, value
);
1145 static uint64_t pmxevtyper_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1147 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1148 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1150 if (env
->cp15
.c9_pmselr
== 0x1f) {
1151 return env
->cp15
.pmccfiltr_el0
;
1157 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1160 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1161 env
->cp15
.c9_pmuserenr
= value
& 0xf;
1163 env
->cp15
.c9_pmuserenr
= value
& 1;
1167 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1170 /* We have no event counters so only the C bit can be changed */
1172 env
->cp15
.c9_pminten
|= value
;
1175 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1179 env
->cp15
.c9_pminten
&= ~value
;
1182 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1185 /* Note that even though the AArch64 view of this register has bits
1186 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1187 * architectural requirements for bits which are RES0 only in some
1188 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1189 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1191 raw_write(env
, ri
, value
& ~0x1FULL
);
1194 static void scr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1196 /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1197 * For bits that vary between AArch32/64, code needs to check the
1198 * current execution mode before directly using the feature bit.
1200 uint32_t valid_mask
= SCR_AARCH64_MASK
| SCR_AARCH32_MASK
;
1202 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1203 valid_mask
&= ~SCR_HCE
;
1205 /* On ARMv7, SMD (or SCD as it is called in v7) is only
1206 * supported if EL2 exists. The bit is UNK/SBZP when
1207 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1208 * when EL2 is unavailable.
1209 * On ARMv8, this bit is always available.
1211 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1212 !arm_feature(env
, ARM_FEATURE_V8
)) {
1213 valid_mask
&= ~SCR_SMD
;
1217 /* Clear all-context RES0 bits. */
1218 value
&= valid_mask
;
1219 raw_write(env
, ri
, value
);
1222 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1224 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1226 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1229 uint32_t index
= A32_BANKED_REG_GET(env
, csselr
,
1230 ri
->secure
& ARM_CP_SECSTATE_S
);
1232 return cpu
->ccsidr
[index
];
1235 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1238 raw_write(env
, ri
, value
& 0xf);
1241 static uint64_t isr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1243 CPUState
*cs
= ENV_GET_CPU(env
);
1246 if (cs
->interrupt_request
& CPU_INTERRUPT_HARD
) {
1249 if (cs
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
1252 /* External aborts are not possible in QEMU so A bit is always clear */
1256 static const ARMCPRegInfo v7_cp_reginfo
[] = {
1257 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1258 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
1259 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1260 /* Performance monitors are implementation defined in v7,
1261 * but with an ARM recommended set of registers, which we
1262 * follow (although we don't actually implement any counters)
1264 * Performance registers fall into three categories:
1265 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1266 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1267 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1268 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1269 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1271 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
1272 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1273 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1274 .writefn
= pmcntenset_write
,
1275 .accessfn
= pmreg_access
,
1276 .raw_writefn
= raw_write
},
1277 { .name
= "PMCNTENSET_EL0", .state
= ARM_CP_STATE_AA64
,
1278 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 1,
1279 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1280 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
), .resetvalue
= 0,
1281 .writefn
= pmcntenset_write
, .raw_writefn
= raw_write
},
1282 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
1284 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcnten
),
1285 .accessfn
= pmreg_access
,
1286 .writefn
= pmcntenclr_write
,
1287 .type
= ARM_CP_ALIAS
},
1288 { .name
= "PMCNTENCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1289 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 2,
1290 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1291 .type
= ARM_CP_ALIAS
,
1292 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
1293 .writefn
= pmcntenclr_write
},
1294 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
1295 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1296 .accessfn
= pmreg_access
,
1297 .writefn
= pmovsr_write
,
1298 .raw_writefn
= raw_write
},
1299 { .name
= "PMOVSCLR_EL0", .state
= ARM_CP_STATE_AA64
,
1300 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 3,
1301 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1302 .type
= ARM_CP_ALIAS
,
1303 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
1304 .writefn
= pmovsr_write
,
1305 .raw_writefn
= raw_write
},
1306 /* Unimplemented so WI. */
1307 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
1308 .access
= PL0_W
, .accessfn
= pmreg_access_swinc
, .type
= ARM_CP_NOP
},
1309 #ifndef CONFIG_USER_ONLY
1310 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
1311 .access
= PL0_RW
, .type
= ARM_CP_ALIAS
,
1312 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmselr
),
1313 .accessfn
= pmreg_access_selr
, .writefn
= pmselr_write
,
1314 .raw_writefn
= raw_write
},
1315 { .name
= "PMSELR_EL0", .state
= ARM_CP_STATE_AA64
,
1316 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 5,
1317 .access
= PL0_RW
, .accessfn
= pmreg_access_selr
,
1318 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmselr
),
1319 .writefn
= pmselr_write
, .raw_writefn
= raw_write
, },
1320 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
1321 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_IO
,
1322 .readfn
= pmccntr_read
, .writefn
= pmccntr_write32
,
1323 .accessfn
= pmreg_access_ccntr
},
1324 { .name
= "PMCCNTR_EL0", .state
= ARM_CP_STATE_AA64
,
1325 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 0,
1326 .access
= PL0_RW
, .accessfn
= pmreg_access_ccntr
,
1328 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
, },
1330 { .name
= "PMCCFILTR_EL0", .state
= ARM_CP_STATE_AA64
,
1331 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 15, .opc2
= 7,
1332 .writefn
= pmccfiltr_write
,
1333 .access
= PL0_RW
, .accessfn
= pmreg_access
,
1335 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmccfiltr_el0
),
1337 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
1338 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1339 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1340 { .name
= "PMXEVTYPER_EL0", .state
= ARM_CP_STATE_AA64
,
1341 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 13, .opc2
= 1,
1342 .access
= PL0_RW
, .type
= ARM_CP_NO_RAW
, .accessfn
= pmreg_access
,
1343 .writefn
= pmxevtyper_write
, .readfn
= pmxevtyper_read
},
1344 /* Unimplemented, RAZ/WI. */
1345 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
1346 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
1347 .accessfn
= pmreg_access_xevcntr
},
1348 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
1349 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
,
1350 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1352 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1353 { .name
= "PMUSERENR_EL0", .state
= ARM_CP_STATE_AA64
,
1354 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 14, .opc2
= 0,
1355 .access
= PL0_R
| PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1356 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
1358 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
1359 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
1360 .access
= PL1_RW
, .accessfn
= access_tpm
,
1361 .type
= ARM_CP_ALIAS
,
1362 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pminten
),
1364 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
1365 { .name
= "PMINTENSET_EL1", .state
= ARM_CP_STATE_AA64
,
1366 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 1,
1367 .access
= PL1_RW
, .accessfn
= access_tpm
,
1369 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1370 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
,
1371 .resetvalue
= 0x0 },
1372 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
1373 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1374 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1375 .writefn
= pmintenclr_write
, },
1376 { .name
= "PMINTENCLR_EL1", .state
= ARM_CP_STATE_AA64
,
1377 .opc0
= 3, .opc1
= 0, .crn
= 9, .crm
= 14, .opc2
= 2,
1378 .access
= PL1_RW
, .accessfn
= access_tpm
, .type
= ARM_CP_ALIAS
,
1379 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
1380 .writefn
= pmintenclr_write
},
1381 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
1382 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
1383 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_RAW
},
1384 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
1385 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
1386 .access
= PL1_RW
, .writefn
= csselr_write
, .resetvalue
= 0,
1387 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.csselr_s
),
1388 offsetof(CPUARMState
, cp15
.csselr_ns
) } },
1389 /* Auxiliary ID register: this actually has an IMPDEF value but for now
1390 * just RAZ for all cores:
1392 { .name
= "AIDR", .state
= ARM_CP_STATE_BOTH
,
1393 .opc0
= 3, .opc1
= 1, .crn
= 0, .crm
= 0, .opc2
= 7,
1394 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1395 /* Auxiliary fault status registers: these also are IMPDEF, and we
1396 * choose to RAZ/WI for all cores.
1398 { .name
= "AFSR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1399 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 0,
1400 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1401 { .name
= "AFSR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1402 .opc0
= 3, .opc1
= 0, .crn
= 5, .crm
= 1, .opc2
= 1,
1403 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1404 /* MAIR can just read-as-written because we don't implement caches
1405 * and so don't need to care about memory attributes.
1407 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
1408 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
1409 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[1]),
1411 { .name
= "MAIR_EL3", .state
= ARM_CP_STATE_AA64
,
1412 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 2, .opc2
= 0,
1413 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[3]),
1415 /* For non-long-descriptor page tables these are PRRR and NMRR;
1416 * regardless they still act as reads-as-written for QEMU.
1418 /* MAIR0/1 are defined separately from their 64-bit counterpart which
1419 * allows them to assign the correct fieldoffset based on the endianness
1420 * handled in the field definitions.
1422 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
,
1423 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
1424 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair0_s
),
1425 offsetof(CPUARMState
, cp15
.mair0_ns
) },
1426 .resetfn
= arm_cp_reset_ignore
},
1427 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
,
1428 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
1429 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.mair1_s
),
1430 offsetof(CPUARMState
, cp15
.mair1_ns
) },
1431 .resetfn
= arm_cp_reset_ignore
},
1432 { .name
= "ISR_EL1", .state
= ARM_CP_STATE_BOTH
,
1433 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 1, .opc2
= 0,
1434 .type
= ARM_CP_NO_RAW
, .access
= PL1_R
, .readfn
= isr_read
},
1435 /* 32 bit ITLB invalidates */
1436 { .name
= "ITLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 0,
1437 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1438 { .name
= "ITLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 1,
1439 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1440 { .name
= "ITLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 5, .opc2
= 2,
1441 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1442 /* 32 bit DTLB invalidates */
1443 { .name
= "DTLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 0,
1444 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1445 { .name
= "DTLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 1,
1446 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1447 { .name
= "DTLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 6, .opc2
= 2,
1448 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1449 /* 32 bit TLB invalidates */
1450 { .name
= "TLBIALL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1451 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_write
},
1452 { .name
= "TLBIMVA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1453 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
1454 { .name
= "TLBIASID", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1455 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiasid_write
},
1456 { .name
= "TLBIMVAA", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1457 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
1461 static const ARMCPRegInfo v7mp_cp_reginfo
[] = {
1462 /* 32 bit TLB invalidates, Inner Shareable */
1463 { .name
= "TLBIALLIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1464 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbiall_is_write
},
1465 { .name
= "TLBIMVAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1466 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
1467 { .name
= "TLBIASIDIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1468 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1469 .writefn
= tlbiasid_is_write
},
1470 { .name
= "TLBIMVAAIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1471 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
1472 .writefn
= tlbimvaa_is_write
},
1476 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1483 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1486 if (arm_current_el(env
) == 0 && (env
->teecr
& 1)) {
1487 return CP_ACCESS_TRAP
;
1489 return CP_ACCESS_OK
;
1492 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
1493 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
1494 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
1496 .writefn
= teecr_write
},
1497 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
1498 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
1499 .accessfn
= teehbr_access
, .resetvalue
= 0 },
1503 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
1504 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
1505 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
1507 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[0]), .resetvalue
= 0 },
1508 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
1510 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrurw_s
),
1511 offsetoflow32(CPUARMState
, cp15
.tpidrurw_ns
) },
1512 .resetfn
= arm_cp_reset_ignore
},
1513 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
1514 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
1515 .access
= PL0_R
|PL1_W
,
1516 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el
[0]),
1518 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
1519 .access
= PL0_R
|PL1_W
,
1520 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidruro_s
),
1521 offsetoflow32(CPUARMState
, cp15
.tpidruro_ns
) },
1522 .resetfn
= arm_cp_reset_ignore
},
1523 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_AA64
,
1524 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
1526 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[1]), .resetvalue
= 0 },
1527 { .name
= "TPIDRPRW", .opc1
= 0, .cp
= 15, .crn
= 13, .crm
= 0, .opc2
= 4,
1529 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tpidrprw_s
),
1530 offsetoflow32(CPUARMState
, cp15
.tpidrprw_ns
) },
1535 #ifndef CONFIG_USER_ONLY
1537 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1540 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1541 * Writable only at the highest implemented exception level.
1543 int el
= arm_current_el(env
);
1547 if (!extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
1548 return CP_ACCESS_TRAP
;
1552 if (!isread
&& ri
->state
== ARM_CP_STATE_AA32
&&
1553 arm_is_secure_below_el3(env
)) {
1554 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1555 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1563 if (!isread
&& el
< arm_highest_el(env
)) {
1564 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1567 return CP_ACCESS_OK
;
1570 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
,
1573 unsigned int cur_el
= arm_current_el(env
);
1574 bool secure
= arm_is_secure(env
);
1576 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1578 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
1579 return CP_ACCESS_TRAP
;
1582 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1583 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1584 !extract32(env
->cp15
.cnthctl_el2
, 0, 1)) {
1585 return CP_ACCESS_TRAP_EL2
;
1587 return CP_ACCESS_OK
;
1590 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
,
1593 unsigned int cur_el
= arm_current_el(env
);
1594 bool secure
= arm_is_secure(env
);
1596 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1597 * EL0[PV]TEN is zero.
1600 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
1601 return CP_ACCESS_TRAP
;
1604 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
1605 timeridx
== GTIMER_PHYS
&& !secure
&& cur_el
< 2 &&
1606 !extract32(env
->cp15
.cnthctl_el2
, 1, 1)) {
1607 return CP_ACCESS_TRAP_EL2
;
1609 return CP_ACCESS_OK
;
1612 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
1613 const ARMCPRegInfo
*ri
,
1616 return gt_counter_access(env
, GTIMER_PHYS
, isread
);
1619 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
1620 const ARMCPRegInfo
*ri
,
1623 return gt_counter_access(env
, GTIMER_VIRT
, isread
);
1626 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1629 return gt_timer_access(env
, GTIMER_PHYS
, isread
);
1632 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1635 return gt_timer_access(env
, GTIMER_VIRT
, isread
);
1638 static CPAccessResult
gt_stimer_access(CPUARMState
*env
,
1639 const ARMCPRegInfo
*ri
,
1642 /* The AArch64 register view of the secure physical timer is
1643 * always accessible from EL3, and configurably accessible from
1646 switch (arm_current_el(env
)) {
1648 if (!arm_is_secure(env
)) {
1649 return CP_ACCESS_TRAP
;
1651 if (!(env
->cp15
.scr_el3
& SCR_ST
)) {
1652 return CP_ACCESS_TRAP_EL3
;
1654 return CP_ACCESS_OK
;
1657 return CP_ACCESS_TRAP
;
1659 return CP_ACCESS_OK
;
1661 g_assert_not_reached();
1665 static uint64_t gt_get_countervalue(CPUARMState
*env
)
1667 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
1670 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
1672 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
1675 /* Timer enabled: calculate and set current ISTATUS, irq, and
1676 * reset timer to when ISTATUS next has to change
1678 uint64_t offset
= timeridx
== GTIMER_VIRT
?
1679 cpu
->env
.cp15
.cntvoff_el2
: 0;
1680 uint64_t count
= gt_get_countervalue(&cpu
->env
);
1681 /* Note that this must be unsigned 64 bit arithmetic: */
1682 int istatus
= count
- offset
>= gt
->cval
;
1686 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
1688 irqstate
= (istatus
&& !(gt
->ctl
& 2));
1689 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1692 /* Next transition is when count rolls back over to zero */
1693 nexttick
= UINT64_MAX
;
1695 /* Next transition is when we hit cval */
1696 nexttick
= gt
->cval
+ offset
;
1698 /* Note that the desired next expiry time might be beyond the
1699 * signed-64-bit range of a QEMUTimer -- in this case we just
1700 * set the timer for as far in the future as possible. When the
1701 * timer expires we will reset the timer for any remaining period.
1703 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
1704 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
1706 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
1707 trace_arm_gt_recalc(timeridx
, irqstate
, nexttick
);
1709 /* Timer disabled: ISTATUS and timer output always clear */
1711 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
1712 timer_del(cpu
->gt_timer
[timeridx
]);
1713 trace_arm_gt_recalc_disabled(timeridx
);
1717 static void gt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1720 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1722 timer_del(cpu
->gt_timer
[timeridx
]);
1725 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1727 return gt_get_countervalue(env
);
1730 static uint64_t gt_virt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1732 return gt_get_countervalue(env
) - env
->cp15
.cntvoff_el2
;
1735 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1739 trace_arm_gt_cval_write(timeridx
, value
);
1740 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
1741 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1744 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1747 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1749 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
1750 (gt_get_countervalue(env
) - offset
));
1753 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1757 uint64_t offset
= timeridx
== GTIMER_VIRT
? env
->cp15
.cntvoff_el2
: 0;
1759 trace_arm_gt_tval_write(timeridx
, value
);
1760 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) - offset
+
1761 sextract64(value
, 0, 32);
1762 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
1765 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1769 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1770 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
1772 trace_arm_gt_ctl_write(timeridx
, value
);
1773 env
->cp15
.c14_timer
[timeridx
].ctl
= deposit64(oldval
, 0, 2, value
);
1774 if ((oldval
^ value
) & 1) {
1775 /* Enable toggled */
1776 gt_recalc_timer(cpu
, timeridx
);
1777 } else if ((oldval
^ value
) & 2) {
1778 /* IMASK toggled: don't need to recalculate,
1779 * just set the interrupt line based on ISTATUS
1781 int irqstate
= (oldval
& 4) && !(value
& 2);
1783 trace_arm_gt_imask_toggle(timeridx
, irqstate
);
1784 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], irqstate
);
1788 static void gt_phys_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1790 gt_timer_reset(env
, ri
, GTIMER_PHYS
);
1793 static void gt_phys_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1796 gt_cval_write(env
, ri
, GTIMER_PHYS
, value
);
1799 static uint64_t gt_phys_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1801 return gt_tval_read(env
, ri
, GTIMER_PHYS
);
1804 static void gt_phys_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1807 gt_tval_write(env
, ri
, GTIMER_PHYS
, value
);
1810 static void gt_phys_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1813 gt_ctl_write(env
, ri
, GTIMER_PHYS
, value
);
1816 static void gt_virt_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1818 gt_timer_reset(env
, ri
, GTIMER_VIRT
);
1821 static void gt_virt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1824 gt_cval_write(env
, ri
, GTIMER_VIRT
, value
);
1827 static uint64_t gt_virt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1829 return gt_tval_read(env
, ri
, GTIMER_VIRT
);
1832 static void gt_virt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1835 gt_tval_write(env
, ri
, GTIMER_VIRT
, value
);
1838 static void gt_virt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1841 gt_ctl_write(env
, ri
, GTIMER_VIRT
, value
);
1844 static void gt_cntvoff_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1847 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1849 trace_arm_gt_cntvoff_write(value
);
1850 raw_write(env
, ri
, value
);
1851 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1854 static void gt_hyp_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1856 gt_timer_reset(env
, ri
, GTIMER_HYP
);
1859 static void gt_hyp_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1862 gt_cval_write(env
, ri
, GTIMER_HYP
, value
);
1865 static uint64_t gt_hyp_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1867 return gt_tval_read(env
, ri
, GTIMER_HYP
);
1870 static void gt_hyp_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1873 gt_tval_write(env
, ri
, GTIMER_HYP
, value
);
1876 static void gt_hyp_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1879 gt_ctl_write(env
, ri
, GTIMER_HYP
, value
);
1882 static void gt_sec_timer_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1884 gt_timer_reset(env
, ri
, GTIMER_SEC
);
1887 static void gt_sec_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1890 gt_cval_write(env
, ri
, GTIMER_SEC
, value
);
1893 static uint64_t gt_sec_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1895 return gt_tval_read(env
, ri
, GTIMER_SEC
);
1898 static void gt_sec_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1901 gt_tval_write(env
, ri
, GTIMER_SEC
, value
);
1904 static void gt_sec_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1907 gt_ctl_write(env
, ri
, GTIMER_SEC
, value
);
1910 void arm_gt_ptimer_cb(void *opaque
)
1912 ARMCPU
*cpu
= opaque
;
1914 gt_recalc_timer(cpu
, GTIMER_PHYS
);
1917 void arm_gt_vtimer_cb(void *opaque
)
1919 ARMCPU
*cpu
= opaque
;
1921 gt_recalc_timer(cpu
, GTIMER_VIRT
);
1924 void arm_gt_htimer_cb(void *opaque
)
1926 ARMCPU
*cpu
= opaque
;
1928 gt_recalc_timer(cpu
, GTIMER_HYP
);
1931 void arm_gt_stimer_cb(void *opaque
)
1933 ARMCPU
*cpu
= opaque
;
1935 gt_recalc_timer(cpu
, GTIMER_SEC
);
1938 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1939 /* Note that CNTFRQ is purely reads-as-written for the benefit
1940 * of software; writing it doesn't actually change the timer frequency.
1941 * Our reset value matches the fixed frequency we implement the timer at.
1943 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
1944 .type
= ARM_CP_ALIAS
,
1945 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1946 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
1948 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
1949 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
1950 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
1951 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
1952 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
1954 /* overall control: mostly access permissions */
1955 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
1956 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
1958 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
1961 /* per-timer control */
1962 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1963 .secure
= ARM_CP_SECSTATE_NS
,
1964 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1965 .accessfn
= gt_ptimer_access
,
1966 .fieldoffset
= offsetoflow32(CPUARMState
,
1967 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1968 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1970 { .name
= "CNTP_CTL(S)",
1971 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1972 .secure
= ARM_CP_SECSTATE_S
,
1973 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1974 .accessfn
= gt_ptimer_access
,
1975 .fieldoffset
= offsetoflow32(CPUARMState
,
1976 cp15
.c14_timer
[GTIMER_SEC
].ctl
),
1977 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
1979 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1980 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
1981 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1982 .accessfn
= gt_ptimer_access
,
1983 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1985 .writefn
= gt_phys_ctl_write
, .raw_writefn
= raw_write
,
1987 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
1988 .type
= ARM_CP_IO
| ARM_CP_ALIAS
, .access
= PL1_RW
| PL0_R
,
1989 .accessfn
= gt_vtimer_access
,
1990 .fieldoffset
= offsetoflow32(CPUARMState
,
1991 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1992 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
1994 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1995 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
1996 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1997 .accessfn
= gt_vtimer_access
,
1998 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
2000 .writefn
= gt_virt_ctl_write
, .raw_writefn
= raw_write
,
2002 /* TimerValue views: a 32 bit downcounting view of the underlying state */
2003 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2004 .secure
= ARM_CP_SECSTATE_NS
,
2005 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2006 .accessfn
= gt_ptimer_access
,
2007 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2009 { .name
= "CNTP_TVAL(S)",
2010 .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
2011 .secure
= ARM_CP_SECSTATE_S
,
2012 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2013 .accessfn
= gt_ptimer_access
,
2014 .readfn
= gt_sec_tval_read
, .writefn
= gt_sec_tval_write
,
2016 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2017 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
2018 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2019 .accessfn
= gt_ptimer_access
, .resetfn
= gt_phys_timer_reset
,
2020 .readfn
= gt_phys_tval_read
, .writefn
= gt_phys_tval_write
,
2022 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
2023 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2024 .accessfn
= gt_vtimer_access
,
2025 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2027 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2028 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
2029 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
2030 .accessfn
= gt_vtimer_access
, .resetfn
= gt_virt_timer_reset
,
2031 .readfn
= gt_virt_tval_read
, .writefn
= gt_virt_tval_write
,
2033 /* The counter itself */
2034 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
2035 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2036 .accessfn
= gt_pct_access
,
2037 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2039 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
2040 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
2041 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2042 .accessfn
= gt_pct_access
, .readfn
= gt_cnt_read
,
2044 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
2045 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_RAW
| ARM_CP_IO
,
2046 .accessfn
= gt_vct_access
,
2047 .readfn
= gt_virt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
2049 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
2050 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
2051 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
| ARM_CP_IO
,
2052 .accessfn
= gt_vct_access
, .readfn
= gt_virt_cnt_read
,
2054 /* Comparison value, indicating when the timer goes off */
2055 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
2056 .secure
= ARM_CP_SECSTATE_NS
,
2057 .access
= PL1_RW
| PL0_R
,
2058 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2059 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2060 .accessfn
= gt_ptimer_access
,
2061 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2063 { .name
= "CNTP_CVAL(S)", .cp
= 15, .crm
= 14, .opc1
= 2,
2064 .secure
= ARM_CP_SECSTATE_S
,
2065 .access
= PL1_RW
| PL0_R
,
2066 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2067 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2068 .accessfn
= gt_ptimer_access
,
2069 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2071 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2072 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
2073 .access
= PL1_RW
| PL0_R
,
2075 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
2076 .resetvalue
= 0, .accessfn
= gt_ptimer_access
,
2077 .writefn
= gt_phys_cval_write
, .raw_writefn
= raw_write
,
2079 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
2080 .access
= PL1_RW
| PL0_R
,
2081 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_ALIAS
,
2082 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2083 .accessfn
= gt_vtimer_access
,
2084 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2086 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
2087 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
2088 .access
= PL1_RW
| PL0_R
,
2090 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
2091 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
2092 .writefn
= gt_virt_cval_write
, .raw_writefn
= raw_write
,
2094 /* Secure timer -- this is actually restricted to only EL3
2095 * and configurably Secure-EL1 via the accessfn.
2097 { .name
= "CNTPS_TVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2098 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 0,
2099 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL1_RW
,
2100 .accessfn
= gt_stimer_access
,
2101 .readfn
= gt_sec_tval_read
,
2102 .writefn
= gt_sec_tval_write
,
2103 .resetfn
= gt_sec_timer_reset
,
2105 { .name
= "CNTPS_CTL_EL1", .state
= ARM_CP_STATE_AA64
,
2106 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 1,
2107 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2108 .accessfn
= gt_stimer_access
,
2109 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].ctl
),
2111 .writefn
= gt_sec_ctl_write
, .raw_writefn
= raw_write
,
2113 { .name
= "CNTPS_CVAL_EL1", .state
= ARM_CP_STATE_AA64
,
2114 .opc0
= 3, .opc1
= 7, .crn
= 14, .crm
= 2, .opc2
= 2,
2115 .type
= ARM_CP_IO
, .access
= PL1_RW
,
2116 .accessfn
= gt_stimer_access
,
2117 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_SEC
].cval
),
2118 .writefn
= gt_sec_cval_write
, .raw_writefn
= raw_write
,
2124 /* In user-mode none of the generic timer registers are accessible,
2125 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2126 * so instead just don't register any of them.
2128 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
2134 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2136 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2137 raw_write(env
, ri
, value
);
2138 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
2139 raw_write(env
, ri
, value
& 0xfffff6ff);
2141 raw_write(env
, ri
, value
& 0xfffff1ff);
2145 #ifndef CONFIG_USER_ONLY
2146 /* get_phys_addr() isn't present for user-mode-only targets */
2148 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2152 /* The ATS12NSO* operations must trap to EL3 if executed in
2153 * Secure EL1 (which can only happen if EL3 is AArch64).
2154 * They are simply UNDEF if executed from NS EL1.
2155 * They function normally from EL2 or EL3.
2157 if (arm_current_el(env
) == 1) {
2158 if (arm_is_secure_below_el3(env
)) {
2159 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3
;
2161 return CP_ACCESS_TRAP_UNCATEGORIZED
;
2164 return CP_ACCESS_OK
;
2167 static uint64_t do_ats_write(CPUARMState
*env
, uint64_t value
,
2168 MMUAccessType access_type
, ARMMMUIdx mmu_idx
)
2171 target_ulong page_size
;
2175 bool format64
= false;
2176 MemTxAttrs attrs
= {};
2177 ARMMMUFaultInfo fi
= {};
2178 ARMCacheAttrs cacheattrs
= {};
2180 ret
= get_phys_addr(env
, value
, access_type
, mmu_idx
, &phys_addr
, &attrs
,
2181 &prot
, &page_size
, &fi
, &cacheattrs
);
2185 } else if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2188 * * TTBCR.EAE determines whether the result is returned using the
2189 * 32-bit or the 64-bit PAR format
2190 * * Instructions executed in Hyp mode always use the 64bit format
2192 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2193 * * The Non-secure TTBCR.EAE bit is set to 1
2194 * * The implementation includes EL2, and the value of HCR.VM is 1
2196 * ATS1Hx always uses the 64bit format (not supported yet).
2198 format64
= arm_s1_regime_using_lpae_format(env
, mmu_idx
);
2200 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
2201 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
2202 format64
|= env
->cp15
.hcr_el2
& HCR_VM
;
2204 format64
|= arm_current_el(env
) == 2;
2210 /* Create a 64-bit PAR */
2211 par64
= (1 << 11); /* LPAE bit always set */
2213 par64
|= phys_addr
& ~0xfffULL
;
2214 if (!attrs
.secure
) {
2215 par64
|= (1 << 9); /* NS */
2217 par64
|= (uint64_t)cacheattrs
.attrs
<< 56; /* ATTR */
2218 par64
|= cacheattrs
.shareability
<< 7; /* SH */
2220 uint32_t fsr
= arm_fi_to_lfsc(&fi
);
2223 par64
|= (fsr
& 0x3f) << 1; /* FS */
2224 /* Note that S2WLK and FSTAGE are always zero, because we don't
2225 * implement virtualization and therefore there can't be a stage 2
2230 /* fsr is a DFSR/IFSR value for the short descriptor
2231 * translation table format (with WnR always clear).
2232 * Convert it to a 32-bit PAR.
2235 /* We do not set any attribute bits in the PAR */
2236 if (page_size
== (1 << 24)
2237 && arm_feature(env
, ARM_FEATURE_V7
)) {
2238 par64
= (phys_addr
& 0xff000000) | (1 << 1);
2240 par64
= phys_addr
& 0xfffff000;
2242 if (!attrs
.secure
) {
2243 par64
|= (1 << 9); /* NS */
2246 uint32_t fsr
= arm_fi_to_sfsc(&fi
);
2248 par64
= ((fsr
& (1 << 10)) >> 5) | ((fsr
& (1 << 12)) >> 6) |
2249 ((fsr
& 0xf) << 1) | 1;
2255 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
2257 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2260 int el
= arm_current_el(env
);
2261 bool secure
= arm_is_secure_below_el3(env
);
2263 switch (ri
->opc2
& 6) {
2265 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2268 mmu_idx
= ARMMMUIdx_S1E3
;
2271 mmu_idx
= ARMMMUIdx_S1NSE1
;
2274 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2277 g_assert_not_reached();
2281 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2284 mmu_idx
= ARMMMUIdx_S1SE0
;
2287 mmu_idx
= ARMMMUIdx_S1NSE0
;
2290 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2293 g_assert_not_reached();
2297 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2298 mmu_idx
= ARMMMUIdx_S12NSE1
;
2301 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2302 mmu_idx
= ARMMMUIdx_S12NSE0
;
2305 g_assert_not_reached();
2308 par64
= do_ats_write(env
, value
, access_type
, mmu_idx
);
2310 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2313 static void ats1h_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2316 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2319 par64
= do_ats_write(env
, value
, access_type
, ARMMMUIdx_S2NS
);
2321 A32_BANKED_CURRENT_REG_SET(env
, par
, par64
);
2324 static CPAccessResult
at_s1e2_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2327 if (arm_current_el(env
) == 3 && !(env
->cp15
.scr_el3
& SCR_NS
)) {
2328 return CP_ACCESS_TRAP
;
2330 return CP_ACCESS_OK
;
2333 static void ats_write64(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2336 MMUAccessType access_type
= ri
->opc2
& 1 ? MMU_DATA_STORE
: MMU_DATA_LOAD
;
2338 int secure
= arm_is_secure_below_el3(env
);
2340 switch (ri
->opc2
& 6) {
2343 case 0: /* AT S1E1R, AT S1E1W */
2344 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S1NSE1
;
2346 case 4: /* AT S1E2R, AT S1E2W */
2347 mmu_idx
= ARMMMUIdx_S1E2
;
2349 case 6: /* AT S1E3R, AT S1E3W */
2350 mmu_idx
= ARMMMUIdx_S1E3
;
2353 g_assert_not_reached();
2356 case 2: /* AT S1E0R, AT S1E0W */
2357 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S1NSE0
;
2359 case 4: /* AT S12E1R, AT S12E1W */
2360 mmu_idx
= secure
? ARMMMUIdx_S1SE1
: ARMMMUIdx_S12NSE1
;
2362 case 6: /* AT S12E0R, AT S12E0W */
2363 mmu_idx
= secure
? ARMMMUIdx_S1SE0
: ARMMMUIdx_S12NSE0
;
2366 g_assert_not_reached();
2369 env
->cp15
.par_el
[1] = do_ats_write(env
, value
, access_type
, mmu_idx
);
2373 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
2374 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
2375 .access
= PL1_RW
, .resetvalue
= 0,
2376 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.par_s
),
2377 offsetoflow32(CPUARMState
, cp15
.par_ns
) },
2378 .writefn
= par_write
},
2379 #ifndef CONFIG_USER_ONLY
2380 /* This underdecoding is safe because the reginfo is NO_RAW. */
2381 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
2382 .access
= PL1_W
, .accessfn
= ats_access
,
2383 .writefn
= ats_write
, .type
= ARM_CP_NO_RAW
},
2388 /* Return basic MPU access permission bits. */
2389 static uint32_t simple_mpu_ap_bits(uint32_t val
)
2396 for (i
= 0; i
< 16; i
+= 2) {
2397 ret
|= (val
>> i
) & mask
;
2403 /* Pad basic MPU access permission bits to extended format. */
2404 static uint32_t extended_mpu_ap_bits(uint32_t val
)
2411 for (i
= 0; i
< 16; i
+= 2) {
2412 ret
|= (val
& mask
) << i
;
2418 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2421 env
->cp15
.pmsav5_data_ap
= extended_mpu_ap_bits(value
);
2424 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2426 return simple_mpu_ap_bits(env
->cp15
.pmsav5_data_ap
);
2429 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2432 env
->cp15
.pmsav5_insn_ap
= extended_mpu_ap_bits(value
);
2435 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2437 return simple_mpu_ap_bits(env
->cp15
.pmsav5_insn_ap
);
2440 static uint64_t pmsav7_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2442 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2448 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2452 static void pmsav7_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2455 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2456 uint32_t *u32p
= *(uint32_t **)raw_ptr(env
, ri
);
2462 u32p
+= env
->pmsav7
.rnr
[M_REG_NS
];
2463 tlb_flush(CPU(cpu
)); /* Mappings may have changed - purge! */
2467 static void pmsav7_rgnr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2470 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2471 uint32_t nrgs
= cpu
->pmsav7_dregion
;
2473 if (value
>= nrgs
) {
2474 qemu_log_mask(LOG_GUEST_ERROR
,
2475 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2476 " > %" PRIu32
"\n", (uint32_t)value
, nrgs
);
2480 raw_write(env
, ri
, value
);
2483 static const ARMCPRegInfo pmsav7_cp_reginfo
[] = {
2484 /* Reset for all these registers is handled in arm_cpu_reset(),
2485 * because the PMSAv7 is also used by M-profile CPUs, which do
2486 * not register cpregs but still need the state to be reset.
2488 { .name
= "DRBAR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 0,
2489 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2490 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drbar
),
2491 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2492 .resetfn
= arm_cp_reset_ignore
},
2493 { .name
= "DRSR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 2,
2494 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2495 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.drsr
),
2496 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2497 .resetfn
= arm_cp_reset_ignore
},
2498 { .name
= "DRACR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 1, .opc2
= 4,
2499 .access
= PL1_RW
, .type
= ARM_CP_NO_RAW
,
2500 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.dracr
),
2501 .readfn
= pmsav7_read
, .writefn
= pmsav7_write
,
2502 .resetfn
= arm_cp_reset_ignore
},
2503 { .name
= "RGNR", .cp
= 15, .crn
= 6, .opc1
= 0, .crm
= 2, .opc2
= 0,
2505 .fieldoffset
= offsetof(CPUARMState
, pmsav7
.rnr
[M_REG_NS
]),
2506 .writefn
= pmsav7_rgnr_write
,
2507 .resetfn
= arm_cp_reset_ignore
},
2511 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
2512 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2513 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2514 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2515 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
2516 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2517 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2518 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2519 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
2520 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
2522 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_data_ap
),
2524 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
2526 .fieldoffset
= offsetof(CPUARMState
, cp15
.pmsav5_insn_ap
),
2528 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
2530 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
2531 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
2533 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
2534 /* Protection region base and size registers */
2535 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
2536 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2537 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
2538 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
2539 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2540 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
2541 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
2542 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2543 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
2544 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
2545 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2546 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
2547 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
2548 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2549 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
2550 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
2551 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2552 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
2553 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
2554 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2555 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
2556 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
2557 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
2558 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
2562 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2565 TCR
*tcr
= raw_ptr(env
, ri
);
2566 int maskshift
= extract32(value
, 0, 3);
2568 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
2569 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& TTBCR_EAE
)) {
2570 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2571 * using Long-desciptor translation table format */
2572 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
2573 } else if (arm_feature(env
, ARM_FEATURE_EL3
)) {
2574 /* In an implementation that includes the Security Extensions
2575 * TTBCR has additional fields PD0 [4] and PD1 [5] for
2576 * Short-descriptor translation table format.
2578 value
&= TTBCR_PD1
| TTBCR_PD0
| TTBCR_N
;
2584 /* Update the masks corresponding to the TCR bank being written
2585 * Note that we always calculate mask and base_mask, but
2586 * they are only used for short-descriptor tables (ie if EAE is 0);
2587 * for long-descriptor tables the TCR fields are used differently
2588 * and the mask and base_mask values are meaningless.
2590 tcr
->raw_tcr
= value
;
2591 tcr
->mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
2592 tcr
->base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
2595 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2598 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2600 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2601 /* With LPAE the TTBCR could result in a change of ASID
2602 * via the TTBCR.A1 bit, so do a TLB flush.
2604 tlb_flush(CPU(cpu
));
2606 vmsa_ttbcr_raw_write(env
, ri
, value
);
2609 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2611 TCR
*tcr
= raw_ptr(env
, ri
);
2613 /* Reset both the TCR as well as the masks corresponding to the bank of
2614 * the TCR being reset.
2618 tcr
->base_mask
= 0xffffc000u
;
2621 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2624 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2625 TCR
*tcr
= raw_ptr(env
, ri
);
2627 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2628 tlb_flush(CPU(cpu
));
2629 tcr
->raw_tcr
= value
;
2632 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2635 /* 64 bit accesses to the TTBRs can change the ASID and so we
2636 * must flush the TLB.
2638 if (cpreg_field_is_64bit(ri
)) {
2639 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2641 tlb_flush(CPU(cpu
));
2643 raw_write(env
, ri
, value
);
2646 static void vttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2649 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2650 CPUState
*cs
= CPU(cpu
);
2652 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
2653 if (raw_read(env
, ri
) != value
) {
2654 tlb_flush_by_mmuidx(cs
,
2655 ARMMMUIdxBit_S12NSE1
|
2656 ARMMMUIdxBit_S12NSE0
|
2658 raw_write(env
, ri
, value
);
2662 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo
[] = {
2663 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
2664 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
,
2665 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dfsr_s
),
2666 offsetoflow32(CPUARMState
, cp15
.dfsr_ns
) }, },
2667 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
2668 .access
= PL1_RW
, .resetvalue
= 0,
2669 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.ifsr_s
),
2670 offsetoflow32(CPUARMState
, cp15
.ifsr_ns
) } },
2671 { .name
= "DFAR", .cp
= 15, .opc1
= 0, .crn
= 6, .crm
= 0, .opc2
= 0,
2672 .access
= PL1_RW
, .resetvalue
= 0,
2673 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.dfar_s
),
2674 offsetof(CPUARMState
, cp15
.dfar_ns
) } },
2675 { .name
= "FAR_EL1", .state
= ARM_CP_STATE_AA64
,
2676 .opc0
= 3, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
2677 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[1]),
2682 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
2683 { .name
= "ESR_EL1", .state
= ARM_CP_STATE_AA64
,
2684 .opc0
= 3, .crn
= 5, .crm
= 2, .opc1
= 0, .opc2
= 0,
2686 .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[1]), .resetvalue
= 0, },
2687 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
2688 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 0,
2689 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2690 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2691 offsetof(CPUARMState
, cp15
.ttbr0_ns
) } },
2692 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
2693 .opc0
= 3, .opc1
= 0, .crn
= 2, .crm
= 0, .opc2
= 1,
2694 .access
= PL1_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
2695 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2696 offsetof(CPUARMState
, cp15
.ttbr1_ns
) } },
2697 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
2698 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2699 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
2700 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
2701 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[1]) },
2702 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
2703 .access
= PL1_RW
, .type
= ARM_CP_ALIAS
, .writefn
= vmsa_ttbcr_write
,
2704 .raw_writefn
= vmsa_ttbcr_raw_write
,
2705 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.tcr_el
[3]),
2706 offsetoflow32(CPUARMState
, cp15
.tcr_el
[1])} },
2710 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2713 env
->cp15
.c15_ticonfig
= value
& 0xe7;
2714 /* The OS_TYPE bit in this register changes the reported CPUID! */
2715 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
2716 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
2719 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2722 env
->cp15
.c15_threadid
= value
& 0xffff;
2725 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2728 /* Wait-for-interrupt (deprecated) */
2729 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
2732 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2735 /* On OMAP there are registers indicating the max/min index of dcache lines
2736 * containing a dirty line; cache flush operations have to reset these.
2738 env
->cp15
.c15_i_max
= 0x000;
2739 env
->cp15
.c15_i_min
= 0xff0;
2742 static const ARMCPRegInfo omap_cp_reginfo
[] = {
2743 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
2744 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
2745 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.esr_el
[1]),
2747 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
2748 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2749 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
2751 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
2752 .writefn
= omap_ticonfig_write
},
2753 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
2755 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
2756 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
2757 .access
= PL1_RW
, .resetvalue
= 0xff0,
2758 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
2759 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
2761 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
2762 .writefn
= omap_threadid_write
},
2763 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
2764 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2765 .type
= ARM_CP_NO_RAW
,
2766 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
2767 /* TODO: Peripheral port remap register:
2768 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2769 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2772 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
2773 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
2774 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
,
2775 .writefn
= omap_cachemaint_write
},
2776 { .name
= "C9", .cp
= 15, .crn
= 9,
2777 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
2778 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
2782 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2785 env
->cp15
.c15_cpar
= value
& 0x3fff;
2788 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
2789 { .name
= "XSCALE_CPAR",
2790 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
2791 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
2792 .writefn
= xscale_cpar_write
, },
2793 { .name
= "XSCALE_AUXCR",
2794 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
2795 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
2797 /* XScale specific cache-lockdown: since we have no cache we NOP these
2798 * and hope the guest does not really rely on cache behaviour.
2800 { .name
= "XSCALE_LOCK_ICACHE_LINE",
2801 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
2802 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2803 { .name
= "XSCALE_UNLOCK_ICACHE",
2804 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
2805 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2806 { .name
= "XSCALE_DCACHE_LOCK",
2807 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 0,
2808 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
2809 { .name
= "XSCALE_UNLOCK_DCACHE",
2810 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 2, .opc2
= 1,
2811 .access
= PL1_W
, .type
= ARM_CP_NOP
},
2815 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
2816 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2817 * implementation of this implementation-defined space.
2818 * Ideally this should eventually disappear in favour of actually
2819 * implementing the correct behaviour for all cores.
2821 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
2822 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2824 .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
| ARM_CP_OVERRIDE
,
2829 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
2830 /* Cache status: RAZ because we have no cache so it's always clean */
2831 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
2832 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2837 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
2838 /* We never have a a block transfer operation in progress */
2839 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
2840 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2842 /* The cache ops themselves: these all NOP for QEMU */
2843 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
2844 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2845 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
2846 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2847 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
2848 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2849 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
2850 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2851 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
2852 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2853 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
2854 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
2858 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
2859 /* The cache test-and-clean instructions always return (1 << 30)
2860 * to indicate that there are no dirty cache lines.
2862 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
2863 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2864 .resetvalue
= (1 << 30) },
2865 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
2866 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_RAW
,
2867 .resetvalue
= (1 << 30) },
2871 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
2872 /* Ignore ReadBuffer accesses */
2873 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
2874 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
2875 .access
= PL1_RW
, .resetvalue
= 0,
2876 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_RAW
},
2880 static uint64_t midr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2882 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2883 unsigned int cur_el
= arm_current_el(env
);
2884 bool secure
= arm_is_secure(env
);
2886 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2887 return env
->cp15
.vpidr_el2
;
2889 return raw_read(env
, ri
);
2892 static uint64_t mpidr_read_val(CPUARMState
*env
)
2894 ARMCPU
*cpu
= ARM_CPU(arm_env_get_cpu(env
));
2895 uint64_t mpidr
= cpu
->mp_affinity
;
2897 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
2898 mpidr
|= (1U << 31);
2899 /* Cores which are uniprocessor (non-coherent)
2900 * but still implement the MP extensions set
2901 * bit 30. (For instance, Cortex-R5).
2903 if (cpu
->mp_is_up
) {
2904 mpidr
|= (1u << 30);
2910 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2912 unsigned int cur_el
= arm_current_el(env
);
2913 bool secure
= arm_is_secure(env
);
2915 if (arm_feature(env
, ARM_FEATURE_EL2
) && !secure
&& cur_el
== 1) {
2916 return env
->cp15
.vmpidr_el2
;
2918 return mpidr_read_val(env
);
2921 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
2922 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
2923 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
2924 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_RAW
},
2928 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
2930 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
2931 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
2932 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2934 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2935 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
2936 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2938 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
2939 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .resetvalue
= 0,
2940 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.par_s
),
2941 offsetof(CPUARMState
, cp15
.par_ns
)} },
2942 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
2943 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2944 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr0_s
),
2945 offsetof(CPUARMState
, cp15
.ttbr0_ns
) },
2946 .writefn
= vmsa_ttbr_write
, },
2947 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
2948 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
2949 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.ttbr1_s
),
2950 offsetof(CPUARMState
, cp15
.ttbr1_ns
) },
2951 .writefn
= vmsa_ttbr_write
, },
2955 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2957 return vfp_get_fpcr(env
);
2960 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2963 vfp_set_fpcr(env
, value
);
2966 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2968 return vfp_get_fpsr(env
);
2971 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2974 vfp_set_fpsr(env
, value
);
2977 static CPAccessResult
aa64_daif_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2980 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UMA
)) {
2981 return CP_ACCESS_TRAP
;
2983 return CP_ACCESS_OK
;
2986 static void aa64_daif_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2989 env
->daif
= value
& PSTATE_DAIF
;
2992 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
2993 const ARMCPRegInfo
*ri
,
2996 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2997 * SCTLR_EL1.UCI is set.
2999 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCI
)) {
3000 return CP_ACCESS_TRAP
;
3002 return CP_ACCESS_OK
;
3005 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3006 * Page D4-1736 (DDI0487A.b)
3009 static void tlbi_aa64_vmalle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3012 CPUState
*cs
= ENV_GET_CPU(env
);
3014 if (arm_is_secure_below_el3(env
)) {
3015 tlb_flush_by_mmuidx(cs
,
3016 ARMMMUIdxBit_S1SE1
|
3017 ARMMMUIdxBit_S1SE0
);
3019 tlb_flush_by_mmuidx(cs
,
3020 ARMMMUIdxBit_S12NSE1
|
3021 ARMMMUIdxBit_S12NSE0
);
3025 static void tlbi_aa64_vmalle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3028 CPUState
*cs
= ENV_GET_CPU(env
);
3029 bool sec
= arm_is_secure_below_el3(env
);
3032 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3033 ARMMMUIdxBit_S1SE1
|
3034 ARMMMUIdxBit_S1SE0
);
3036 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3037 ARMMMUIdxBit_S12NSE1
|
3038 ARMMMUIdxBit_S12NSE0
);
3042 static void tlbi_aa64_alle1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3045 /* Note that the 'ALL' scope must invalidate both stage 1 and
3046 * stage 2 translations, whereas most other scopes only invalidate
3047 * stage 1 translations.
3049 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3050 CPUState
*cs
= CPU(cpu
);
3052 if (arm_is_secure_below_el3(env
)) {
3053 tlb_flush_by_mmuidx(cs
,
3054 ARMMMUIdxBit_S1SE1
|
3055 ARMMMUIdxBit_S1SE0
);
3057 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
3058 tlb_flush_by_mmuidx(cs
,
3059 ARMMMUIdxBit_S12NSE1
|
3060 ARMMMUIdxBit_S12NSE0
|
3063 tlb_flush_by_mmuidx(cs
,
3064 ARMMMUIdxBit_S12NSE1
|
3065 ARMMMUIdxBit_S12NSE0
);
3070 static void tlbi_aa64_alle2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3073 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3074 CPUState
*cs
= CPU(cpu
);
3076 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E2
);
3079 static void tlbi_aa64_alle3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3082 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3083 CPUState
*cs
= CPU(cpu
);
3085 tlb_flush_by_mmuidx(cs
, ARMMMUIdxBit_S1E3
);
3088 static void tlbi_aa64_alle1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3091 /* Note that the 'ALL' scope must invalidate both stage 1 and
3092 * stage 2 translations, whereas most other scopes only invalidate
3093 * stage 1 translations.
3095 CPUState
*cs
= ENV_GET_CPU(env
);
3096 bool sec
= arm_is_secure_below_el3(env
);
3097 bool has_el2
= arm_feature(env
, ARM_FEATURE_EL2
);
3100 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3101 ARMMMUIdxBit_S1SE1
|
3102 ARMMMUIdxBit_S1SE0
);
3103 } else if (has_el2
) {
3104 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3105 ARMMMUIdxBit_S12NSE1
|
3106 ARMMMUIdxBit_S12NSE0
|
3109 tlb_flush_by_mmuidx_all_cpus_synced(cs
,
3110 ARMMMUIdxBit_S12NSE1
|
3111 ARMMMUIdxBit_S12NSE0
);
3115 static void tlbi_aa64_alle2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3118 CPUState
*cs
= ENV_GET_CPU(env
);
3120 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E2
);
3123 static void tlbi_aa64_alle3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3126 CPUState
*cs
= ENV_GET_CPU(env
);
3128 tlb_flush_by_mmuidx_all_cpus_synced(cs
, ARMMMUIdxBit_S1E3
);
3131 static void tlbi_aa64_vae1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3134 /* Invalidate by VA, EL1&0 (AArch64 version).
3135 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3136 * since we don't support flush-for-specific-ASID-only or
3137 * flush-last-level-only.
3139 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3140 CPUState
*cs
= CPU(cpu
);
3141 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3143 if (arm_is_secure_below_el3(env
)) {
3144 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3145 ARMMMUIdxBit_S1SE1
|
3146 ARMMMUIdxBit_S1SE0
);
3148 tlb_flush_page_by_mmuidx(cs
, pageaddr
,
3149 ARMMMUIdxBit_S12NSE1
|
3150 ARMMMUIdxBit_S12NSE0
);
3154 static void tlbi_aa64_vae2_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3157 /* Invalidate by VA, EL2
3158 * Currently handles both VAE2 and VALE2, since we don't support
3159 * flush-last-level-only.
3161 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3162 CPUState
*cs
= CPU(cpu
);
3163 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3165 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E2
);
3168 static void tlbi_aa64_vae3_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3171 /* Invalidate by VA, EL3
3172 * Currently handles both VAE3 and VALE3, since we don't support
3173 * flush-last-level-only.
3175 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3176 CPUState
*cs
= CPU(cpu
);
3177 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3179 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S1E3
);
3182 static void tlbi_aa64_vae1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3185 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3186 CPUState
*cs
= CPU(cpu
);
3187 bool sec
= arm_is_secure_below_el3(env
);
3188 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3191 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3192 ARMMMUIdxBit_S1SE1
|
3193 ARMMMUIdxBit_S1SE0
);
3195 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3196 ARMMMUIdxBit_S12NSE1
|
3197 ARMMMUIdxBit_S12NSE0
);
3201 static void tlbi_aa64_vae2is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3204 CPUState
*cs
= ENV_GET_CPU(env
);
3205 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3207 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3211 static void tlbi_aa64_vae3is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3214 CPUState
*cs
= ENV_GET_CPU(env
);
3215 uint64_t pageaddr
= sextract64(value
<< 12, 0, 56);
3217 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3221 static void tlbi_aa64_ipas2e1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3224 /* Invalidate by IPA. This has to invalidate any structures that
3225 * contain only stage 2 translation information, but does not need
3226 * to apply to structures that contain combined stage 1 and stage 2
3227 * translation information.
3228 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3230 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3231 CPUState
*cs
= CPU(cpu
);
3234 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3238 pageaddr
= sextract64(value
<< 12, 0, 48);
3240 tlb_flush_page_by_mmuidx(cs
, pageaddr
, ARMMMUIdxBit_S2NS
);
3243 static void tlbi_aa64_ipas2e1is_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3246 CPUState
*cs
= ENV_GET_CPU(env
);
3249 if (!arm_feature(env
, ARM_FEATURE_EL2
) || !(env
->cp15
.scr_el3
& SCR_NS
)) {
3253 pageaddr
= sextract64(value
<< 12, 0, 48);
3255 tlb_flush_page_by_mmuidx_all_cpus_synced(cs
, pageaddr
,
3259 static CPAccessResult
aa64_zva_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3262 /* We don't implement EL2, so the only control on DC ZVA is the
3263 * bit in the SCTLR which can prohibit access for EL0.
3265 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_DZE
)) {
3266 return CP_ACCESS_TRAP
;
3268 return CP_ACCESS_OK
;
3271 static uint64_t aa64_dczid_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3273 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3274 int dzp_bit
= 1 << 4;
3276 /* DZP indicates whether DC ZVA access is allowed */
3277 if (aa64_zva_access(env
, NULL
, false) == CP_ACCESS_OK
) {
3280 return cpu
->dcz_blocksize
| dzp_bit
;
3283 static CPAccessResult
sp_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3286 if (!(env
->pstate
& PSTATE_SP
)) {
3287 /* Access to SP_EL0 is undefined if it's being used as
3288 * the stack pointer.
3290 return CP_ACCESS_TRAP_UNCATEGORIZED
;
3292 return CP_ACCESS_OK
;
3295 static uint64_t spsel_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
3297 return env
->pstate
& PSTATE_SP
;
3300 static void spsel_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t val
)
3302 update_spsel(env
, val
);
3305 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3308 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3310 if (raw_read(env
, ri
) == value
) {
3311 /* Skip the TLB flush if nothing actually changed; Linux likes
3312 * to do a lot of pointless SCTLR writes.
3317 if (arm_feature(env
, ARM_FEATURE_PMSA
) && !cpu
->has_mpu
) {
3318 /* M bit is RAZ/WI for PMSA with no MPU implemented */
3322 raw_write(env
, ri
, value
);
3323 /* ??? Lots of these bits are not implemented. */
3324 /* This may enable/disable the MMU, so do a TLB flush. */
3325 tlb_flush(CPU(cpu
));
3328 static CPAccessResult
fpexc32_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3331 if ((env
->cp15
.cptr_el
[2] & CPTR_TFP
) && arm_current_el(env
) == 2) {
3332 return CP_ACCESS_TRAP_FP_EL2
;
3334 if (env
->cp15
.cptr_el
[3] & CPTR_TFP
) {
3335 return CP_ACCESS_TRAP_FP_EL3
;
3337 return CP_ACCESS_OK
;
3340 static void sdcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
3343 env
->cp15
.mdcr_el3
= value
& SDCR_VALID_MASK
;
3346 static const ARMCPRegInfo v8_cp_reginfo
[] = {
3347 /* Minimal set of EL0-visible registers. This will need to be expanded
3348 * significantly for system emulation of AArch64 CPUs.
3350 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
3351 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
3352 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
3353 { .name
= "DAIF", .state
= ARM_CP_STATE_AA64
,
3354 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 2,
3355 .type
= ARM_CP_NO_RAW
,
3356 .access
= PL0_RW
, .accessfn
= aa64_daif_access
,
3357 .fieldoffset
= offsetof(CPUARMState
, daif
),
3358 .writefn
= aa64_daif_write
, .resetfn
= arm_cp_reset_ignore
},
3359 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
3360 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
3361 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3362 .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
3363 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
3364 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
3365 .access
= PL0_RW
, .type
= ARM_CP_FPU
| ARM_CP_SUPPRESS_TB_END
,
3366 .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
3367 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
3368 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
3369 .access
= PL0_R
, .type
= ARM_CP_NO_RAW
,
3370 .readfn
= aa64_dczid_read
},
3371 { .name
= "DC_ZVA", .state
= ARM_CP_STATE_AA64
,
3372 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 4, .opc2
= 1,
3373 .access
= PL0_W
, .type
= ARM_CP_DC_ZVA
,
3374 #ifndef CONFIG_USER_ONLY
3375 /* Avoid overhead of an access check that always passes in user-mode */
3376 .accessfn
= aa64_zva_access
,
3379 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
3380 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
3381 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
3382 /* Cache ops: all NOPs since we don't emulate caches */
3383 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
3384 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3385 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3386 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
3387 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3388 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3389 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
3390 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
3391 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3392 .accessfn
= aa64_cacheop_access
},
3393 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
3394 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3395 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3396 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
3397 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3398 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3399 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
3400 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
3401 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3402 .accessfn
= aa64_cacheop_access
},
3403 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
3404 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3405 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3406 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
3407 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
3408 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3409 .accessfn
= aa64_cacheop_access
},
3410 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
3411 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
3412 .access
= PL0_W
, .type
= ARM_CP_NOP
,
3413 .accessfn
= aa64_cacheop_access
},
3414 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
3415 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3416 .access
= PL1_W
, .type
= ARM_CP_NOP
},
3417 /* TLBI operations */
3418 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
3419 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
3420 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3421 .writefn
= tlbi_aa64_vmalle1is_write
},
3422 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
3423 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
3424 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3425 .writefn
= tlbi_aa64_vae1is_write
},
3426 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
3427 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
3428 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3429 .writefn
= tlbi_aa64_vmalle1is_write
},
3430 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
3431 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
3432 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3433 .writefn
= tlbi_aa64_vae1is_write
},
3434 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
3435 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3436 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3437 .writefn
= tlbi_aa64_vae1is_write
},
3438 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
3439 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3440 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3441 .writefn
= tlbi_aa64_vae1is_write
},
3442 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
3443 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
3444 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3445 .writefn
= tlbi_aa64_vmalle1_write
},
3446 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
3447 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
3448 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3449 .writefn
= tlbi_aa64_vae1_write
},
3450 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
3451 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
3452 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3453 .writefn
= tlbi_aa64_vmalle1_write
},
3454 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
3455 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
3456 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3457 .writefn
= tlbi_aa64_vae1_write
},
3458 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
3459 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3460 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3461 .writefn
= tlbi_aa64_vae1_write
},
3462 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
3463 .opc0
= 1, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3464 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
3465 .writefn
= tlbi_aa64_vae1_write
},
3466 { .name
= "TLBI_IPAS2E1IS", .state
= ARM_CP_STATE_AA64
,
3467 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3468 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3469 .writefn
= tlbi_aa64_ipas2e1is_write
},
3470 { .name
= "TLBI_IPAS2LE1IS", .state
= ARM_CP_STATE_AA64
,
3471 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3472 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3473 .writefn
= tlbi_aa64_ipas2e1is_write
},
3474 { .name
= "TLBI_ALLE1IS", .state
= ARM_CP_STATE_AA64
,
3475 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3476 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3477 .writefn
= tlbi_aa64_alle1is_write
},
3478 { .name
= "TLBI_VMALLS12E1IS", .state
= ARM_CP_STATE_AA64
,
3479 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 6,
3480 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3481 .writefn
= tlbi_aa64_alle1is_write
},
3482 { .name
= "TLBI_IPAS2E1", .state
= ARM_CP_STATE_AA64
,
3483 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3484 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3485 .writefn
= tlbi_aa64_ipas2e1_write
},
3486 { .name
= "TLBI_IPAS2LE1", .state
= ARM_CP_STATE_AA64
,
3487 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3488 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3489 .writefn
= tlbi_aa64_ipas2e1_write
},
3490 { .name
= "TLBI_ALLE1", .state
= ARM_CP_STATE_AA64
,
3491 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3492 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3493 .writefn
= tlbi_aa64_alle1_write
},
3494 { .name
= "TLBI_VMALLS12E1", .state
= ARM_CP_STATE_AA64
,
3495 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 6,
3496 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3497 .writefn
= tlbi_aa64_alle1is_write
},
3498 #ifndef CONFIG_USER_ONLY
3499 /* 64 bit address translation operations */
3500 { .name
= "AT_S1E1R", .state
= ARM_CP_STATE_AA64
,
3501 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 0,
3502 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3503 { .name
= "AT_S1E1W", .state
= ARM_CP_STATE_AA64
,
3504 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 1,
3505 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3506 { .name
= "AT_S1E0R", .state
= ARM_CP_STATE_AA64
,
3507 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 2,
3508 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3509 { .name
= "AT_S1E0W", .state
= ARM_CP_STATE_AA64
,
3510 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 8, .opc2
= 3,
3511 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3512 { .name
= "AT_S12E1R", .state
= ARM_CP_STATE_AA64
,
3513 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 4,
3514 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3515 { .name
= "AT_S12E1W", .state
= ARM_CP_STATE_AA64
,
3516 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 5,
3517 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3518 { .name
= "AT_S12E0R", .state
= ARM_CP_STATE_AA64
,
3519 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 6,
3520 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3521 { .name
= "AT_S12E0W", .state
= ARM_CP_STATE_AA64
,
3522 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 7,
3523 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3524 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3525 { .name
= "AT_S1E3R", .state
= ARM_CP_STATE_AA64
,
3526 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 0,
3527 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3528 { .name
= "AT_S1E3W", .state
= ARM_CP_STATE_AA64
,
3529 .opc0
= 1, .opc1
= 6, .crn
= 7, .crm
= 8, .opc2
= 1,
3530 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3531 { .name
= "PAR_EL1", .state
= ARM_CP_STATE_AA64
,
3532 .type
= ARM_CP_ALIAS
,
3533 .opc0
= 3, .opc1
= 0, .crn
= 7, .crm
= 4, .opc2
= 0,
3534 .access
= PL1_RW
, .resetvalue
= 0,
3535 .fieldoffset
= offsetof(CPUARMState
, cp15
.par_el
[1]),
3536 .writefn
= par_write
},
3538 /* TLB invalidate last level of translation table walk */
3539 { .name
= "TLBIMVALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
3540 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_is_write
},
3541 { .name
= "TLBIMVAALIS", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
3542 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
,
3543 .writefn
= tlbimvaa_is_write
},
3544 { .name
= "TLBIMVAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
3545 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimva_write
},
3546 { .name
= "TLBIMVAAL", .cp
= 15, .opc1
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
3547 .type
= ARM_CP_NO_RAW
, .access
= PL1_W
, .writefn
= tlbimvaa_write
},
3548 { .name
= "TLBIMVALH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3549 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3550 .writefn
= tlbimva_hyp_write
},
3551 { .name
= "TLBIMVALHIS",
3552 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3553 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3554 .writefn
= tlbimva_hyp_is_write
},
3555 { .name
= "TLBIIPAS2",
3556 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 1,
3557 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3558 .writefn
= tlbiipas2_write
},
3559 { .name
= "TLBIIPAS2IS",
3560 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 1,
3561 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3562 .writefn
= tlbiipas2_is_write
},
3563 { .name
= "TLBIIPAS2L",
3564 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 4, .opc2
= 5,
3565 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3566 .writefn
= tlbiipas2_write
},
3567 { .name
= "TLBIIPAS2LIS",
3568 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 0, .opc2
= 5,
3569 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3570 .writefn
= tlbiipas2_is_write
},
3571 /* 32 bit cache operations */
3572 { .name
= "ICIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
3573 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3574 { .name
= "BPIALLUIS", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 6,
3575 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3576 { .name
= "ICIALLU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
3577 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3578 { .name
= "ICIMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 1,
3579 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3580 { .name
= "BPIALL", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 6,
3581 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3582 { .name
= "BPIMVA", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 7,
3583 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3584 { .name
= "DCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
3585 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3586 { .name
= "DCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
3587 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3588 { .name
= "DCCMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 1,
3589 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3590 { .name
= "DCCSW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
3591 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3592 { .name
= "DCCMVAU", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 11, .opc2
= 1,
3593 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3594 { .name
= "DCCIMVAC", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 1,
3595 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3596 { .name
= "DCCISW", .cp
= 15, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
3597 .type
= ARM_CP_NOP
, .access
= PL1_W
},
3598 /* MMU Domain access control / MPU write buffer control */
3599 { .name
= "DACR", .cp
= 15, .opc1
= 0, .crn
= 3, .crm
= 0, .opc2
= 0,
3600 .access
= PL1_RW
, .resetvalue
= 0,
3601 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3602 .bank_fieldoffsets
= { offsetoflow32(CPUARMState
, cp15
.dacr_s
),
3603 offsetoflow32(CPUARMState
, cp15
.dacr_ns
) } },
3604 { .name
= "ELR_EL1", .state
= ARM_CP_STATE_AA64
,
3605 .type
= ARM_CP_ALIAS
,
3606 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 1,
3608 .fieldoffset
= offsetof(CPUARMState
, elr_el
[1]) },
3609 { .name
= "SPSR_EL1", .state
= ARM_CP_STATE_AA64
,
3610 .type
= ARM_CP_ALIAS
,
3611 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 0, .opc2
= 0,
3613 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_SVC
]) },
3614 /* We rely on the access checks not allowing the guest to write to the
3615 * state field when SPSel indicates that it's being used as the stack
3618 { .name
= "SP_EL0", .state
= ARM_CP_STATE_AA64
,
3619 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 1, .opc2
= 0,
3620 .access
= PL1_RW
, .accessfn
= sp_el0_access
,
3621 .type
= ARM_CP_ALIAS
,
3622 .fieldoffset
= offsetof(CPUARMState
, sp_el
[0]) },
3623 { .name
= "SP_EL1", .state
= ARM_CP_STATE_AA64
,
3624 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 1, .opc2
= 0,
3625 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3626 .fieldoffset
= offsetof(CPUARMState
, sp_el
[1]) },
3627 { .name
= "SPSel", .state
= ARM_CP_STATE_AA64
,
3628 .opc0
= 3, .opc1
= 0, .crn
= 4, .crm
= 2, .opc2
= 0,
3629 .type
= ARM_CP_NO_RAW
,
3630 .access
= PL1_RW
, .readfn
= spsel_read
, .writefn
= spsel_write
},
3631 { .name
= "FPEXC32_EL2", .state
= ARM_CP_STATE_AA64
,
3632 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 3, .opc2
= 0,
3633 .type
= ARM_CP_ALIAS
,
3634 .fieldoffset
= offsetof(CPUARMState
, vfp
.xregs
[ARM_VFP_FPEXC
]),
3635 .access
= PL2_RW
, .accessfn
= fpexc32_access
},
3636 { .name
= "DACR32_EL2", .state
= ARM_CP_STATE_AA64
,
3637 .opc0
= 3, .opc1
= 4, .crn
= 3, .crm
= 0, .opc2
= 0,
3638 .access
= PL2_RW
, .resetvalue
= 0,
3639 .writefn
= dacr_write
, .raw_writefn
= raw_write
,
3640 .fieldoffset
= offsetof(CPUARMState
, cp15
.dacr32_el2
) },
3641 { .name
= "IFSR32_EL2", .state
= ARM_CP_STATE_AA64
,
3642 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 0, .opc2
= 1,
3643 .access
= PL2_RW
, .resetvalue
= 0,
3644 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifsr32_el2
) },
3645 { .name
= "SPSR_IRQ", .state
= ARM_CP_STATE_AA64
,
3646 .type
= ARM_CP_ALIAS
,
3647 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 0,
3649 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_IRQ
]) },
3650 { .name
= "SPSR_ABT", .state
= ARM_CP_STATE_AA64
,
3651 .type
= ARM_CP_ALIAS
,
3652 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 1,
3654 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_ABT
]) },
3655 { .name
= "SPSR_UND", .state
= ARM_CP_STATE_AA64
,
3656 .type
= ARM_CP_ALIAS
,
3657 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 2,
3659 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_UND
]) },
3660 { .name
= "SPSR_FIQ", .state
= ARM_CP_STATE_AA64
,
3661 .type
= ARM_CP_ALIAS
,
3662 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 3, .opc2
= 3,
3664 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_FIQ
]) },
3665 { .name
= "MDCR_EL3", .state
= ARM_CP_STATE_AA64
,
3666 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 3, .opc2
= 1,
3668 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el3
) },
3669 { .name
= "SDCR", .type
= ARM_CP_ALIAS
,
3670 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 1,
3671 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
3672 .writefn
= sdcr_write
,
3673 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mdcr_el3
) },
3677 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
3678 static const ARMCPRegInfo el3_no_el2_cp_reginfo
[] = {
3679 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3680 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3682 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3683 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3684 .type
= ARM_CP_NO_RAW
,
3685 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3687 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
},
3688 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3689 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3690 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3691 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3692 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3693 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3695 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3696 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3697 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3698 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3699 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3700 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3702 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3703 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3704 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3706 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3707 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3708 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3710 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3711 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3712 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3714 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3715 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3716 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3717 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3718 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3719 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3720 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3721 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3722 .cp
= 15, .opc1
= 6, .crm
= 2,
3723 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3724 .type
= ARM_CP_CONST
| ARM_CP_64BIT
, .resetvalue
= 0 },
3725 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3726 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3727 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3728 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3729 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3730 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3731 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3732 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3733 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3734 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3735 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3736 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3737 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3738 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3740 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3741 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3742 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3743 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3744 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3745 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3746 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3747 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3749 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3750 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3751 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3752 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
3753 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_CONST
,
3755 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
3756 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
3757 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3758 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3759 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
3760 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3761 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3762 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
3763 .access
= PL2_RW
, .accessfn
= access_tda
,
3764 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3765 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_BOTH
,
3766 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
3767 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
3768 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3769 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3770 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
3771 .access
= PL2_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
3775 static void hcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
3777 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3778 uint64_t valid_mask
= HCR_MASK
;
3780 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
3781 valid_mask
&= ~HCR_HCD
;
3782 } else if (cpu
->psci_conduit
!= QEMU_PSCI_CONDUIT_SMC
) {
3783 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3784 * However, if we're using the SMC PSCI conduit then QEMU is
3785 * effectively acting like EL3 firmware and so the guest at
3786 * EL2 should retain the ability to prevent EL1 from being
3787 * able to make SMC calls into the ersatz firmware, so in
3788 * that case HCR.TSC should be read/write.
3790 valid_mask
&= ~HCR_TSC
;
3793 /* Clear RES0 bits. */
3794 value
&= valid_mask
;
3796 /* These bits change the MMU setup:
3797 * HCR_VM enables stage 2 translation
3798 * HCR_PTW forbids certain page-table setups
3799 * HCR_DC Disables stage1 and enables stage2 translation
3801 if ((raw_read(env
, ri
) ^ value
) & (HCR_VM
| HCR_PTW
| HCR_DC
)) {
3802 tlb_flush(CPU(cpu
));
3804 raw_write(env
, ri
, value
);
3807 static const ARMCPRegInfo el2_cp_reginfo
[] = {
3808 { .name
= "HCR_EL2", .state
= ARM_CP_STATE_AA64
,
3809 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 0,
3810 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.hcr_el2
),
3811 .writefn
= hcr_write
},
3812 { .name
= "ELR_EL2", .state
= ARM_CP_STATE_AA64
,
3813 .type
= ARM_CP_ALIAS
,
3814 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 1,
3816 .fieldoffset
= offsetof(CPUARMState
, elr_el
[2]) },
3817 { .name
= "ESR_EL2", .state
= ARM_CP_STATE_AA64
,
3818 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 2, .opc2
= 0,
3819 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[2]) },
3820 { .name
= "FAR_EL2", .state
= ARM_CP_STATE_AA64
,
3821 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 0,
3822 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[2]) },
3823 { .name
= "SPSR_EL2", .state
= ARM_CP_STATE_AA64
,
3824 .type
= ARM_CP_ALIAS
,
3825 .opc0
= 3, .opc1
= 4, .crn
= 4, .crm
= 0, .opc2
= 0,
3827 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_HYP
]) },
3828 { .name
= "VBAR_EL2", .state
= ARM_CP_STATE_AA64
,
3829 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 0,
3830 .access
= PL2_RW
, .writefn
= vbar_write
,
3831 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[2]),
3833 { .name
= "SP_EL2", .state
= ARM_CP_STATE_AA64
,
3834 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 1, .opc2
= 0,
3835 .access
= PL3_RW
, .type
= ARM_CP_ALIAS
,
3836 .fieldoffset
= offsetof(CPUARMState
, sp_el
[2]) },
3837 { .name
= "CPTR_EL2", .state
= ARM_CP_STATE_BOTH
,
3838 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 2,
3839 .access
= PL2_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
3840 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[2]) },
3841 { .name
= "MAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3842 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 0,
3843 .access
= PL2_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el
[2]),
3845 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3846 .opc1
= 4, .crn
= 10, .crm
= 2, .opc2
= 1,
3847 .access
= PL2_RW
, .type
= ARM_CP_ALIAS
,
3848 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el
[2]) },
3849 { .name
= "AMAIR_EL2", .state
= ARM_CP_STATE_BOTH
,
3850 .opc0
= 3, .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 0,
3851 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3853 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3854 { .name
= "HMAIR1", .state
= ARM_CP_STATE_AA32
,
3855 .opc1
= 4, .crn
= 10, .crm
= 3, .opc2
= 1,
3856 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3858 { .name
= "AFSR0_EL2", .state
= ARM_CP_STATE_BOTH
,
3859 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 0,
3860 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3862 { .name
= "AFSR1_EL2", .state
= ARM_CP_STATE_BOTH
,
3863 .opc0
= 3, .opc1
= 4, .crn
= 5, .crm
= 1, .opc2
= 1,
3864 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
3866 { .name
= "TCR_EL2", .state
= ARM_CP_STATE_BOTH
,
3867 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 2,
3869 /* no .writefn needed as this can't cause an ASID change;
3870 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3872 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[2]) },
3873 { .name
= "VTCR", .state
= ARM_CP_STATE_AA32
,
3874 .cp
= 15, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3875 .type
= ARM_CP_ALIAS
,
3876 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3877 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3878 { .name
= "VTCR_EL2", .state
= ARM_CP_STATE_AA64
,
3879 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 2,
3881 /* no .writefn needed as this can't cause an ASID change;
3882 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3884 .fieldoffset
= offsetof(CPUARMState
, cp15
.vtcr_el2
) },
3885 { .name
= "VTTBR", .state
= ARM_CP_STATE_AA32
,
3886 .cp
= 15, .opc1
= 6, .crm
= 2,
3887 .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3888 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
3889 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
),
3890 .writefn
= vttbr_write
},
3891 { .name
= "VTTBR_EL2", .state
= ARM_CP_STATE_AA64
,
3892 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 1, .opc2
= 0,
3893 .access
= PL2_RW
, .writefn
= vttbr_write
,
3894 .fieldoffset
= offsetof(CPUARMState
, cp15
.vttbr_el2
) },
3895 { .name
= "SCTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
3896 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 0,
3897 .access
= PL2_RW
, .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
3898 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[2]) },
3899 { .name
= "TPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
3900 .opc0
= 3, .opc1
= 4, .crn
= 13, .crm
= 0, .opc2
= 2,
3901 .access
= PL2_RW
, .resetvalue
= 0,
3902 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[2]) },
3903 { .name
= "TTBR0_EL2", .state
= ARM_CP_STATE_AA64
,
3904 .opc0
= 3, .opc1
= 4, .crn
= 2, .crm
= 0, .opc2
= 0,
3905 .access
= PL2_RW
, .resetvalue
= 0,
3906 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3907 { .name
= "HTTBR", .cp
= 15, .opc1
= 4, .crm
= 2,
3908 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
,
3909 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[2]) },
3910 { .name
= "TLBIALLNSNH",
3911 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 4,
3912 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3913 .writefn
= tlbiall_nsnh_write
},
3914 { .name
= "TLBIALLNSNHIS",
3915 .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 4,
3916 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3917 .writefn
= tlbiall_nsnh_is_write
},
3918 { .name
= "TLBIALLH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3919 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3920 .writefn
= tlbiall_hyp_write
},
3921 { .name
= "TLBIALLHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3922 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3923 .writefn
= tlbiall_hyp_is_write
},
3924 { .name
= "TLBIMVAH", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3925 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3926 .writefn
= tlbimva_hyp_write
},
3927 { .name
= "TLBIMVAHIS", .cp
= 15, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3928 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3929 .writefn
= tlbimva_hyp_is_write
},
3930 { .name
= "TLBI_ALLE2", .state
= ARM_CP_STATE_AA64
,
3931 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 0,
3932 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3933 .writefn
= tlbi_aa64_alle2_write
},
3934 { .name
= "TLBI_VAE2", .state
= ARM_CP_STATE_AA64
,
3935 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 1,
3936 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3937 .writefn
= tlbi_aa64_vae2_write
},
3938 { .name
= "TLBI_VALE2", .state
= ARM_CP_STATE_AA64
,
3939 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 7, .opc2
= 5,
3940 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3941 .writefn
= tlbi_aa64_vae2_write
},
3942 { .name
= "TLBI_ALLE2IS", .state
= ARM_CP_STATE_AA64
,
3943 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 0,
3944 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3945 .writefn
= tlbi_aa64_alle2is_write
},
3946 { .name
= "TLBI_VAE2IS", .state
= ARM_CP_STATE_AA64
,
3947 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 1,
3948 .type
= ARM_CP_NO_RAW
, .access
= PL2_W
,
3949 .writefn
= tlbi_aa64_vae2is_write
},
3950 { .name
= "TLBI_VALE2IS", .state
= ARM_CP_STATE_AA64
,
3951 .opc0
= 1, .opc1
= 4, .crn
= 8, .crm
= 3, .opc2
= 5,
3952 .access
= PL2_W
, .type
= ARM_CP_NO_RAW
,
3953 .writefn
= tlbi_aa64_vae2is_write
},
3954 #ifndef CONFIG_USER_ONLY
3955 /* Unlike the other EL2-related AT operations, these must
3956 * UNDEF from EL3 if EL2 is not implemented, which is why we
3957 * define them here rather than with the rest of the AT ops.
3959 { .name
= "AT_S1E2R", .state
= ARM_CP_STATE_AA64
,
3960 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
3961 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
3962 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3963 { .name
= "AT_S1E2W", .state
= ARM_CP_STATE_AA64
,
3964 .opc0
= 1, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
3965 .access
= PL2_W
, .accessfn
= at_s1e2_access
,
3966 .type
= ARM_CP_NO_RAW
, .writefn
= ats_write64
},
3967 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3968 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3969 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3970 * to behave as if SCR.NS was 1.
3972 { .name
= "ATS1HR", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 0,
3974 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
3975 { .name
= "ATS1HW", .cp
= 15, .opc1
= 4, .crn
= 7, .crm
= 8, .opc2
= 1,
3977 .writefn
= ats1h_write
, .type
= ARM_CP_NO_RAW
},
3978 { .name
= "CNTHCTL_EL2", .state
= ARM_CP_STATE_BOTH
,
3979 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 1, .opc2
= 0,
3980 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3981 * reset values as IMPDEF. We choose to reset to 3 to comply with
3982 * both ARMv7 and ARMv8.
3984 .access
= PL2_RW
, .resetvalue
= 3,
3985 .fieldoffset
= offsetof(CPUARMState
, cp15
.cnthctl_el2
) },
3986 { .name
= "CNTVOFF_EL2", .state
= ARM_CP_STATE_AA64
,
3987 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 0, .opc2
= 3,
3988 .access
= PL2_RW
, .type
= ARM_CP_IO
, .resetvalue
= 0,
3989 .writefn
= gt_cntvoff_write
,
3990 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
3991 { .name
= "CNTVOFF", .cp
= 15, .opc1
= 4, .crm
= 14,
3992 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_ALIAS
| ARM_CP_IO
,
3993 .writefn
= gt_cntvoff_write
,
3994 .fieldoffset
= offsetof(CPUARMState
, cp15
.cntvoff_el2
) },
3995 { .name
= "CNTHP_CVAL_EL2", .state
= ARM_CP_STATE_AA64
,
3996 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 2,
3997 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
3998 .type
= ARM_CP_IO
, .access
= PL2_RW
,
3999 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4000 { .name
= "CNTHP_CVAL", .cp
= 15, .opc1
= 6, .crm
= 14,
4001 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].cval
),
4002 .access
= PL2_RW
, .type
= ARM_CP_64BIT
| ARM_CP_IO
,
4003 .writefn
= gt_hyp_cval_write
, .raw_writefn
= raw_write
},
4004 { .name
= "CNTHP_TVAL_EL2", .state
= ARM_CP_STATE_BOTH
,
4005 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 0,
4006 .type
= ARM_CP_NO_RAW
| ARM_CP_IO
, .access
= PL2_RW
,
4007 .resetfn
= gt_hyp_timer_reset
,
4008 .readfn
= gt_hyp_tval_read
, .writefn
= gt_hyp_tval_write
},
4009 { .name
= "CNTHP_CTL_EL2", .state
= ARM_CP_STATE_BOTH
,
4011 .opc0
= 3, .opc1
= 4, .crn
= 14, .crm
= 2, .opc2
= 1,
4013 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_HYP
].ctl
),
4015 .writefn
= gt_hyp_ctl_write
, .raw_writefn
= raw_write
},
4017 /* The only field of MDCR_EL2 that has a defined architectural reset value
4018 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4019 * don't impelment any PMU event counters, so using zero as a reset
4020 * value for MDCR_EL2 is okay
4022 { .name
= "MDCR_EL2", .state
= ARM_CP_STATE_BOTH
,
4023 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 1,
4024 .access
= PL2_RW
, .resetvalue
= 0,
4025 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdcr_el2
), },
4026 { .name
= "HPFAR", .state
= ARM_CP_STATE_AA32
,
4027 .cp
= 15, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4028 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
4029 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4030 { .name
= "HPFAR_EL2", .state
= ARM_CP_STATE_AA64
,
4031 .opc0
= 3, .opc1
= 4, .crn
= 6, .crm
= 0, .opc2
= 4,
4033 .fieldoffset
= offsetof(CPUARMState
, cp15
.hpfar_el2
) },
4034 { .name
= "HSTR_EL2", .state
= ARM_CP_STATE_BOTH
,
4035 .cp
= 15, .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 1, .opc2
= 3,
4037 .fieldoffset
= offsetof(CPUARMState
, cp15
.hstr_el2
) },
4041 static CPAccessResult
nsacr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4044 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4045 * At Secure EL1 it traps to EL3.
4047 if (arm_current_el(env
) == 3) {
4048 return CP_ACCESS_OK
;
4050 if (arm_is_secure_below_el3(env
)) {
4051 return CP_ACCESS_TRAP_EL3
;
4053 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4055 return CP_ACCESS_OK
;
4057 return CP_ACCESS_TRAP_UNCATEGORIZED
;
4060 static const ARMCPRegInfo el3_cp_reginfo
[] = {
4061 { .name
= "SCR_EL3", .state
= ARM_CP_STATE_AA64
,
4062 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 0,
4063 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.scr_el3
),
4064 .resetvalue
= 0, .writefn
= scr_write
},
4065 { .name
= "SCR", .type
= ARM_CP_ALIAS
,
4066 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 0,
4067 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4068 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.scr_el3
),
4069 .writefn
= scr_write
},
4070 { .name
= "SDER32_EL3", .state
= ARM_CP_STATE_AA64
,
4071 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 1,
4072 .access
= PL3_RW
, .resetvalue
= 0,
4073 .fieldoffset
= offsetof(CPUARMState
, cp15
.sder
) },
4075 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 1,
4076 .access
= PL3_RW
, .resetvalue
= 0,
4077 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.sder
) },
4078 { .name
= "MVBAR", .cp
= 15, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
4079 .access
= PL1_RW
, .accessfn
= access_trap_aa32s_el1
,
4080 .writefn
= vbar_write
, .resetvalue
= 0,
4081 .fieldoffset
= offsetof(CPUARMState
, cp15
.mvbar
) },
4082 { .name
= "TTBR0_EL3", .state
= ARM_CP_STATE_AA64
,
4083 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 0,
4084 .access
= PL3_RW
, .writefn
= vmsa_ttbr_write
, .resetvalue
= 0,
4085 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el
[3]) },
4086 { .name
= "TCR_EL3", .state
= ARM_CP_STATE_AA64
,
4087 .opc0
= 3, .opc1
= 6, .crn
= 2, .crm
= 0, .opc2
= 2,
4089 /* no .writefn needed as this can't cause an ASID change;
4090 * we must provide a .raw_writefn and .resetfn because we handle
4091 * reset and migration for the AArch32 TTBCR(S), which might be
4092 * using mask and base_mask.
4094 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
4095 .fieldoffset
= offsetof(CPUARMState
, cp15
.tcr_el
[3]) },
4096 { .name
= "ELR_EL3", .state
= ARM_CP_STATE_AA64
,
4097 .type
= ARM_CP_ALIAS
,
4098 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 1,
4100 .fieldoffset
= offsetof(CPUARMState
, elr_el
[3]) },
4101 { .name
= "ESR_EL3", .state
= ARM_CP_STATE_AA64
,
4102 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 2, .opc2
= 0,
4103 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.esr_el
[3]) },
4104 { .name
= "FAR_EL3", .state
= ARM_CP_STATE_AA64
,
4105 .opc0
= 3, .opc1
= 6, .crn
= 6, .crm
= 0, .opc2
= 0,
4106 .access
= PL3_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.far_el
[3]) },
4107 { .name
= "SPSR_EL3", .state
= ARM_CP_STATE_AA64
,
4108 .type
= ARM_CP_ALIAS
,
4109 .opc0
= 3, .opc1
= 6, .crn
= 4, .crm
= 0, .opc2
= 0,
4111 .fieldoffset
= offsetof(CPUARMState
, banked_spsr
[BANK_MON
]) },
4112 { .name
= "VBAR_EL3", .state
= ARM_CP_STATE_AA64
,
4113 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 0,
4114 .access
= PL3_RW
, .writefn
= vbar_write
,
4115 .fieldoffset
= offsetof(CPUARMState
, cp15
.vbar_el
[3]),
4117 { .name
= "CPTR_EL3", .state
= ARM_CP_STATE_AA64
,
4118 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 1, .opc2
= 2,
4119 .access
= PL3_RW
, .accessfn
= cptr_access
, .resetvalue
= 0,
4120 .fieldoffset
= offsetof(CPUARMState
, cp15
.cptr_el
[3]) },
4121 { .name
= "TPIDR_EL3", .state
= ARM_CP_STATE_AA64
,
4122 .opc0
= 3, .opc1
= 6, .crn
= 13, .crm
= 0, .opc2
= 2,
4123 .access
= PL3_RW
, .resetvalue
= 0,
4124 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el
[3]) },
4125 { .name
= "AMAIR_EL3", .state
= ARM_CP_STATE_AA64
,
4126 .opc0
= 3, .opc1
= 6, .crn
= 10, .crm
= 3, .opc2
= 0,
4127 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4129 { .name
= "AFSR0_EL3", .state
= ARM_CP_STATE_BOTH
,
4130 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 0,
4131 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4133 { .name
= "AFSR1_EL3", .state
= ARM_CP_STATE_BOTH
,
4134 .opc0
= 3, .opc1
= 6, .crn
= 5, .crm
= 1, .opc2
= 1,
4135 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
4137 { .name
= "TLBI_ALLE3IS", .state
= ARM_CP_STATE_AA64
,
4138 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 0,
4139 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4140 .writefn
= tlbi_aa64_alle3is_write
},
4141 { .name
= "TLBI_VAE3IS", .state
= ARM_CP_STATE_AA64
,
4142 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 1,
4143 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4144 .writefn
= tlbi_aa64_vae3is_write
},
4145 { .name
= "TLBI_VALE3IS", .state
= ARM_CP_STATE_AA64
,
4146 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 3, .opc2
= 5,
4147 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4148 .writefn
= tlbi_aa64_vae3is_write
},
4149 { .name
= "TLBI_ALLE3", .state
= ARM_CP_STATE_AA64
,
4150 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 0,
4151 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4152 .writefn
= tlbi_aa64_alle3_write
},
4153 { .name
= "TLBI_VAE3", .state
= ARM_CP_STATE_AA64
,
4154 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 1,
4155 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4156 .writefn
= tlbi_aa64_vae3_write
},
4157 { .name
= "TLBI_VALE3", .state
= ARM_CP_STATE_AA64
,
4158 .opc0
= 1, .opc1
= 6, .crn
= 8, .crm
= 7, .opc2
= 5,
4159 .access
= PL3_W
, .type
= ARM_CP_NO_RAW
,
4160 .writefn
= tlbi_aa64_vae3_write
},
4164 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4167 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4168 * but the AArch32 CTR has its own reginfo struct)
4170 if (arm_current_el(env
) == 0 && !(env
->cp15
.sctlr_el
[1] & SCTLR_UCT
)) {
4171 return CP_ACCESS_TRAP
;
4173 return CP_ACCESS_OK
;
4176 static void oslar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4179 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4180 * read via a bit in OSLSR_EL1.
4184 if (ri
->state
== ARM_CP_STATE_AA32
) {
4185 oslock
= (value
== 0xC5ACCE55);
4190 env
->cp15
.oslsr_el1
= deposit32(env
->cp15
.oslsr_el1
, 1, 1, oslock
);
4193 static const ARMCPRegInfo debug_cp_reginfo
[] = {
4194 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4195 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4196 * unlike DBGDRAR it is never accessible from EL0.
4197 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4200 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
4201 .access
= PL0_R
, .accessfn
= access_tdra
,
4202 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4203 { .name
= "MDRAR_EL1", .state
= ARM_CP_STATE_AA64
,
4204 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
4205 .access
= PL1_R
, .accessfn
= access_tdra
,
4206 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4207 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
4208 .access
= PL0_R
, .accessfn
= access_tdra
,
4209 .type
= ARM_CP_CONST
, .resetvalue
= 0 },
4210 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4211 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_BOTH
,
4212 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4213 .access
= PL1_RW
, .accessfn
= access_tda
,
4214 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
),
4216 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4217 * We don't implement the configurable EL0 access.
4219 { .name
= "MDCCSR_EL0", .state
= ARM_CP_STATE_BOTH
,
4220 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4221 .type
= ARM_CP_ALIAS
,
4222 .access
= PL1_R
, .accessfn
= access_tda
,
4223 .fieldoffset
= offsetof(CPUARMState
, cp15
.mdscr_el1
), },
4224 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_BOTH
,
4225 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
4226 .access
= PL1_W
, .type
= ARM_CP_NO_RAW
,
4227 .accessfn
= access_tdosa
,
4228 .writefn
= oslar_write
},
4229 { .name
= "OSLSR_EL1", .state
= ARM_CP_STATE_BOTH
,
4230 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 4,
4231 .access
= PL1_R
, .resetvalue
= 10,
4232 .accessfn
= access_tdosa
,
4233 .fieldoffset
= offsetof(CPUARMState
, cp15
.oslsr_el1
) },
4234 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4235 { .name
= "OSDLR_EL1", .state
= ARM_CP_STATE_BOTH
,
4236 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 3, .opc2
= 4,
4237 .access
= PL1_RW
, .accessfn
= access_tdosa
,
4238 .type
= ARM_CP_NOP
},
4239 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4240 * implement vector catch debug events yet.
4243 .cp
= 14, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4244 .access
= PL1_RW
, .accessfn
= access_tda
,
4245 .type
= ARM_CP_NOP
},
4246 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4247 * to save and restore a 32-bit guest's DBGVCR)
4249 { .name
= "DBGVCR32_EL2", .state
= ARM_CP_STATE_AA64
,
4250 .opc0
= 2, .opc1
= 4, .crn
= 0, .crm
= 7, .opc2
= 0,
4251 .access
= PL2_RW
, .accessfn
= access_tda
,
4252 .type
= ARM_CP_NOP
},
4253 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4254 * Channel but Linux may try to access this register. The 32-bit
4255 * alias is DBGDCCINT.
4257 { .name
= "MDCCINT_EL1", .state
= ARM_CP_STATE_BOTH
,
4258 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4259 .access
= PL1_RW
, .accessfn
= access_tda
,
4260 .type
= ARM_CP_NOP
},
4264 static const ARMCPRegInfo debug_lpae_cp_reginfo
[] = {
4265 /* 64 bit access versions of the (dummy) debug registers */
4266 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
4267 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4268 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
4269 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
4273 /* Return the exception level to which SVE-disabled exceptions should
4274 * be taken, or 0 if SVE is enabled.
4276 static int sve_exception_el(CPUARMState
*env
)
4278 #ifndef CONFIG_USER_ONLY
4279 unsigned current_el
= arm_current_el(env
);
4281 /* The CPACR.ZEN controls traps to EL1:
4282 * 0, 2 : trap EL0 and EL1 accesses
4283 * 1 : trap only EL0 accesses
4284 * 3 : trap no accesses
4286 switch (extract32(env
->cp15
.cpacr_el1
, 16, 2)) {
4288 if (current_el
<= 1) {
4289 /* Trap to PL1, which might be EL1 or EL3 */
4290 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
4297 if (current_el
== 0) {
4305 /* Similarly for CPACR.FPEN, after having checked ZEN. */
4306 switch (extract32(env
->cp15
.cpacr_el1
, 20, 2)) {
4308 if (current_el
<= 1) {
4309 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
4316 if (current_el
== 0) {
4324 /* CPTR_EL2. Check both TZ and TFP. */
4326 && (env
->cp15
.cptr_el
[2] & (CPTR_TFP
| CPTR_TZ
))
4327 && !arm_is_secure_below_el3(env
)) {
4331 /* CPTR_EL3. Check both EZ and TFP. */
4332 if (!(env
->cp15
.cptr_el
[3] & CPTR_EZ
)
4333 || (env
->cp15
.cptr_el
[3] & CPTR_TFP
)) {
4340 static void zcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4343 /* Bits other than [3:0] are RAZ/WI. */
4344 raw_write(env
, ri
, value
& 0xf);
4347 static const ARMCPRegInfo zcr_el1_reginfo
= {
4348 .name
= "ZCR_EL1", .state
= ARM_CP_STATE_AA64
,
4349 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 2, .opc2
= 0,
4350 .access
= PL1_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4351 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[1]),
4352 .writefn
= zcr_write
, .raw_writefn
= raw_write
4355 static const ARMCPRegInfo zcr_el2_reginfo
= {
4356 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4357 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4358 .access
= PL2_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4359 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[2]),
4360 .writefn
= zcr_write
, .raw_writefn
= raw_write
4363 static const ARMCPRegInfo zcr_no_el2_reginfo
= {
4364 .name
= "ZCR_EL2", .state
= ARM_CP_STATE_AA64
,
4365 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 2, .opc2
= 0,
4366 .access
= PL2_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4367 .readfn
= arm_cp_read_zero
, .writefn
= arm_cp_write_ignore
4370 static const ARMCPRegInfo zcr_el3_reginfo
= {
4371 .name
= "ZCR_EL3", .state
= ARM_CP_STATE_AA64
,
4372 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 2, .opc2
= 0,
4373 .access
= PL3_RW
, .type
= ARM_CP_SVE
| ARM_CP_FPU
,
4374 .fieldoffset
= offsetof(CPUARMState
, vfp
.zcr_el
[3]),
4375 .writefn
= zcr_write
, .raw_writefn
= raw_write
4378 void hw_watchpoint_update(ARMCPU
*cpu
, int n
)
4380 CPUARMState
*env
= &cpu
->env
;
4382 vaddr wvr
= env
->cp15
.dbgwvr
[n
];
4383 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
4385 int flags
= BP_CPU
| BP_STOP_BEFORE_ACCESS
;
4387 if (env
->cpu_watchpoint
[n
]) {
4388 cpu_watchpoint_remove_by_ref(CPU(cpu
), env
->cpu_watchpoint
[n
]);
4389 env
->cpu_watchpoint
[n
] = NULL
;
4392 if (!extract64(wcr
, 0, 1)) {
4393 /* E bit clear : watchpoint disabled */
4397 switch (extract64(wcr
, 3, 2)) {
4399 /* LSC 00 is reserved and must behave as if the wp is disabled */
4402 flags
|= BP_MEM_READ
;
4405 flags
|= BP_MEM_WRITE
;
4408 flags
|= BP_MEM_ACCESS
;
4412 /* Attempts to use both MASK and BAS fields simultaneously are
4413 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4414 * thus generating a watchpoint for every byte in the masked region.
4416 mask
= extract64(wcr
, 24, 4);
4417 if (mask
== 1 || mask
== 2) {
4418 /* Reserved values of MASK; we must act as if the mask value was
4419 * some non-reserved value, or as if the watchpoint were disabled.
4420 * We choose the latter.
4424 /* Watchpoint covers an aligned area up to 2GB in size */
4426 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4427 * whether the watchpoint fires when the unmasked bits match; we opt
4428 * to generate the exceptions.
4432 /* Watchpoint covers bytes defined by the byte address select bits */
4433 int bas
= extract64(wcr
, 5, 8);
4437 /* This must act as if the watchpoint is disabled */
4441 if (extract64(wvr
, 2, 1)) {
4442 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4443 * ignored, and BAS[3:0] define which bytes to watch.
4447 /* The BAS bits are supposed to be programmed to indicate a contiguous
4448 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4449 * we fire for each byte in the word/doubleword addressed by the WVR.
4450 * We choose to ignore any non-zero bits after the first range of 1s.
4452 basstart
= ctz32(bas
);
4453 len
= cto32(bas
>> basstart
);
4457 cpu_watchpoint_insert(CPU(cpu
), wvr
, len
, flags
,
4458 &env
->cpu_watchpoint
[n
]);
4461 void hw_watchpoint_update_all(ARMCPU
*cpu
)
4464 CPUARMState
*env
= &cpu
->env
;
4466 /* Completely clear out existing QEMU watchpoints and our array, to
4467 * avoid possible stale entries following migration load.
4469 cpu_watchpoint_remove_all(CPU(cpu
), BP_CPU
);
4470 memset(env
->cpu_watchpoint
, 0, sizeof(env
->cpu_watchpoint
));
4472 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_watchpoint
); i
++) {
4473 hw_watchpoint_update(cpu
, i
);
4477 static void dbgwvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4480 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4483 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4484 * register reads and behaves as if values written are sign extended.
4485 * Bits [1:0] are RES0.
4487 value
= sextract64(value
, 0, 49) & ~3ULL;
4489 raw_write(env
, ri
, value
);
4490 hw_watchpoint_update(cpu
, i
);
4493 static void dbgwcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4496 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4499 raw_write(env
, ri
, value
);
4500 hw_watchpoint_update(cpu
, i
);
4503 void hw_breakpoint_update(ARMCPU
*cpu
, int n
)
4505 CPUARMState
*env
= &cpu
->env
;
4506 uint64_t bvr
= env
->cp15
.dbgbvr
[n
];
4507 uint64_t bcr
= env
->cp15
.dbgbcr
[n
];
4512 if (env
->cpu_breakpoint
[n
]) {
4513 cpu_breakpoint_remove_by_ref(CPU(cpu
), env
->cpu_breakpoint
[n
]);
4514 env
->cpu_breakpoint
[n
] = NULL
;
4517 if (!extract64(bcr
, 0, 1)) {
4518 /* E bit clear : watchpoint disabled */
4522 bt
= extract64(bcr
, 20, 4);
4525 case 4: /* unlinked address mismatch (reserved if AArch64) */
4526 case 5: /* linked address mismatch (reserved if AArch64) */
4527 qemu_log_mask(LOG_UNIMP
,
4528 "arm: address mismatch breakpoint types not implemented");
4530 case 0: /* unlinked address match */
4531 case 1: /* linked address match */
4533 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4534 * we behave as if the register was sign extended. Bits [1:0] are
4535 * RES0. The BAS field is used to allow setting breakpoints on 16
4536 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4537 * a bp will fire if the addresses covered by the bp and the addresses
4538 * covered by the insn overlap but the insn doesn't start at the
4539 * start of the bp address range. We choose to require the insn and
4540 * the bp to have the same address. The constraints on writing to
4541 * BAS enforced in dbgbcr_write mean we have only four cases:
4542 * 0b0000 => no breakpoint
4543 * 0b0011 => breakpoint on addr
4544 * 0b1100 => breakpoint on addr + 2
4545 * 0b1111 => breakpoint on addr
4546 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4548 int bas
= extract64(bcr
, 5, 4);
4549 addr
= sextract64(bvr
, 0, 49) & ~3ULL;
4558 case 2: /* unlinked context ID match */
4559 case 8: /* unlinked VMID match (reserved if no EL2) */
4560 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4561 qemu_log_mask(LOG_UNIMP
,
4562 "arm: unlinked context breakpoint types not implemented");
4564 case 9: /* linked VMID match (reserved if no EL2) */
4565 case 11: /* linked context ID and VMID match (reserved if no EL2) */
4566 case 3: /* linked context ID match */
4568 /* We must generate no events for Linked context matches (unless
4569 * they are linked to by some other bp/wp, which is handled in
4570 * updates for the linking bp/wp). We choose to also generate no events
4571 * for reserved values.
4576 cpu_breakpoint_insert(CPU(cpu
), addr
, flags
, &env
->cpu_breakpoint
[n
]);
4579 void hw_breakpoint_update_all(ARMCPU
*cpu
)
4582 CPUARMState
*env
= &cpu
->env
;
4584 /* Completely clear out existing QEMU breakpoints and our array, to
4585 * avoid possible stale entries following migration load.
4587 cpu_breakpoint_remove_all(CPU(cpu
), BP_CPU
);
4588 memset(env
->cpu_breakpoint
, 0, sizeof(env
->cpu_breakpoint
));
4590 for (i
= 0; i
< ARRAY_SIZE(cpu
->env
.cpu_breakpoint
); i
++) {
4591 hw_breakpoint_update(cpu
, i
);
4595 static void dbgbvr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4598 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4601 raw_write(env
, ri
, value
);
4602 hw_breakpoint_update(cpu
, i
);
4605 static void dbgbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
4608 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4611 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4614 value
= deposit64(value
, 6, 1, extract64(value
, 5, 1));
4615 value
= deposit64(value
, 8, 1, extract64(value
, 7, 1));
4617 raw_write(env
, ri
, value
);
4618 hw_breakpoint_update(cpu
, i
);
4621 static void define_debug_regs(ARMCPU
*cpu
)
4623 /* Define v7 and v8 architectural debug registers.
4624 * These are just dummy implementations for now.
4627 int wrps
, brps
, ctx_cmps
;
4628 ARMCPRegInfo dbgdidr
= {
4629 .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
4630 .access
= PL0_R
, .accessfn
= access_tda
,
4631 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->dbgdidr
,
4634 /* Note that all these register fields hold "number of Xs minus 1". */
4635 brps
= extract32(cpu
->dbgdidr
, 24, 4);
4636 wrps
= extract32(cpu
->dbgdidr
, 28, 4);
4637 ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
4639 assert(ctx_cmps
<= brps
);
4641 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4642 * of the debug registers such as number of breakpoints;
4643 * check that if they both exist then they agree.
4645 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
4646 assert(extract32(cpu
->id_aa64dfr0
, 12, 4) == brps
);
4647 assert(extract32(cpu
->id_aa64dfr0
, 20, 4) == wrps
);
4648 assert(extract32(cpu
->id_aa64dfr0
, 28, 4) == ctx_cmps
);
4651 define_one_arm_cp_reg(cpu
, &dbgdidr
);
4652 define_arm_cp_regs(cpu
, debug_cp_reginfo
);
4654 if (arm_feature(&cpu
->env
, ARM_FEATURE_LPAE
)) {
4655 define_arm_cp_regs(cpu
, debug_lpae_cp_reginfo
);
4658 for (i
= 0; i
< brps
+ 1; i
++) {
4659 ARMCPRegInfo dbgregs
[] = {
4660 { .name
= "DBGBVR", .state
= ARM_CP_STATE_BOTH
,
4661 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
4662 .access
= PL1_RW
, .accessfn
= access_tda
,
4663 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]),
4664 .writefn
= dbgbvr_write
, .raw_writefn
= raw_write
4666 { .name
= "DBGBCR", .state
= ARM_CP_STATE_BOTH
,
4667 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
4668 .access
= PL1_RW
, .accessfn
= access_tda
,
4669 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]),
4670 .writefn
= dbgbcr_write
, .raw_writefn
= raw_write
4674 define_arm_cp_regs(cpu
, dbgregs
);
4677 for (i
= 0; i
< wrps
+ 1; i
++) {
4678 ARMCPRegInfo dbgregs
[] = {
4679 { .name
= "DBGWVR", .state
= ARM_CP_STATE_BOTH
,
4680 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
4681 .access
= PL1_RW
, .accessfn
= access_tda
,
4682 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]),
4683 .writefn
= dbgwvr_write
, .raw_writefn
= raw_write
4685 { .name
= "DBGWCR", .state
= ARM_CP_STATE_BOTH
,
4686 .cp
= 14, .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
4687 .access
= PL1_RW
, .accessfn
= access_tda
,
4688 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]),
4689 .writefn
= dbgwcr_write
, .raw_writefn
= raw_write
4693 define_arm_cp_regs(cpu
, dbgregs
);
4697 /* We don't know until after realize whether there's a GICv3
4698 * attached, and that is what registers the gicv3 sysregs.
4699 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
4702 static uint64_t id_pfr1_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4704 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4705 uint64_t pfr1
= cpu
->id_pfr1
;
4707 if (env
->gicv3state
) {
4713 static uint64_t id_aa64pfr0_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
4715 ARMCPU
*cpu
= arm_env_get_cpu(env
);
4716 uint64_t pfr0
= cpu
->id_aa64pfr0
;
4718 if (env
->gicv3state
) {
4724 void register_cp_regs_for_features(ARMCPU
*cpu
)
4726 /* Register all the coprocessor registers based on feature bits */
4727 CPUARMState
*env
= &cpu
->env
;
4728 if (arm_feature(env
, ARM_FEATURE_M
)) {
4729 /* M profile has no coprocessor registers */
4733 define_arm_cp_regs(cpu
, cp_reginfo
);
4734 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
4735 /* Must go early as it is full of wildcards that may be
4736 * overridden by later definitions.
4738 define_arm_cp_regs(cpu
, not_v8_cp_reginfo
);
4741 if (arm_feature(env
, ARM_FEATURE_V6
)) {
4742 /* The ID registers all have impdef reset values */
4743 ARMCPRegInfo v6_idregs
[] = {
4744 { .name
= "ID_PFR0", .state
= ARM_CP_STATE_BOTH
,
4745 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 0,
4746 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4747 .resetvalue
= cpu
->id_pfr0
},
4748 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
4749 * the value of the GIC field until after we define these regs.
4751 { .name
= "ID_PFR1", .state
= ARM_CP_STATE_BOTH
,
4752 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 1,
4753 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4754 .readfn
= id_pfr1_read
,
4755 .writefn
= arm_cp_write_ignore
},
4756 { .name
= "ID_DFR0", .state
= ARM_CP_STATE_BOTH
,
4757 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 2,
4758 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4759 .resetvalue
= cpu
->id_dfr0
},
4760 { .name
= "ID_AFR0", .state
= ARM_CP_STATE_BOTH
,
4761 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 3,
4762 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4763 .resetvalue
= cpu
->id_afr0
},
4764 { .name
= "ID_MMFR0", .state
= ARM_CP_STATE_BOTH
,
4765 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 4,
4766 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4767 .resetvalue
= cpu
->id_mmfr0
},
4768 { .name
= "ID_MMFR1", .state
= ARM_CP_STATE_BOTH
,
4769 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 5,
4770 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4771 .resetvalue
= cpu
->id_mmfr1
},
4772 { .name
= "ID_MMFR2", .state
= ARM_CP_STATE_BOTH
,
4773 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 6,
4774 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4775 .resetvalue
= cpu
->id_mmfr2
},
4776 { .name
= "ID_MMFR3", .state
= ARM_CP_STATE_BOTH
,
4777 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 1, .opc2
= 7,
4778 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4779 .resetvalue
= cpu
->id_mmfr3
},
4780 { .name
= "ID_ISAR0", .state
= ARM_CP_STATE_BOTH
,
4781 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 0,
4782 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4783 .resetvalue
= cpu
->id_isar0
},
4784 { .name
= "ID_ISAR1", .state
= ARM_CP_STATE_BOTH
,
4785 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 1,
4786 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4787 .resetvalue
= cpu
->id_isar1
},
4788 { .name
= "ID_ISAR2", .state
= ARM_CP_STATE_BOTH
,
4789 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
4790 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4791 .resetvalue
= cpu
->id_isar2
},
4792 { .name
= "ID_ISAR3", .state
= ARM_CP_STATE_BOTH
,
4793 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 3,
4794 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4795 .resetvalue
= cpu
->id_isar3
},
4796 { .name
= "ID_ISAR4", .state
= ARM_CP_STATE_BOTH
,
4797 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 4,
4798 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4799 .resetvalue
= cpu
->id_isar4
},
4800 { .name
= "ID_ISAR5", .state
= ARM_CP_STATE_BOTH
,
4801 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 5,
4802 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4803 .resetvalue
= cpu
->id_isar5
},
4804 { .name
= "ID_MMFR4", .state
= ARM_CP_STATE_BOTH
,
4805 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 6,
4806 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4807 .resetvalue
= cpu
->id_mmfr4
},
4808 /* 7 is as yet unallocated and must RAZ */
4809 { .name
= "ID_ISAR7_RESERVED", .state
= ARM_CP_STATE_BOTH
,
4810 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 7,
4811 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4815 define_arm_cp_regs(cpu
, v6_idregs
);
4816 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
4818 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
4820 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
4821 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
4823 if (arm_feature(env
, ARM_FEATURE_V7MP
) &&
4824 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
4825 define_arm_cp_regs(cpu
, v7mp_cp_reginfo
);
4827 if (arm_feature(env
, ARM_FEATURE_V7
)) {
4828 /* v7 performance monitor control register: same implementor
4829 * field as main ID register, and we implement only the cycle
4832 #ifndef CONFIG_USER_ONLY
4833 ARMCPRegInfo pmcr
= {
4834 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
4836 .type
= ARM_CP_IO
| ARM_CP_ALIAS
,
4837 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c9_pmcr
),
4838 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
4839 .raw_writefn
= raw_write
,
4841 ARMCPRegInfo pmcr64
= {
4842 .name
= "PMCR_EL0", .state
= ARM_CP_STATE_AA64
,
4843 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 0,
4844 .access
= PL0_RW
, .accessfn
= pmreg_access
,
4846 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
4847 .resetvalue
= cpu
->midr
& 0xff000000,
4848 .writefn
= pmcr_write
, .raw_writefn
= raw_write
,
4850 define_one_arm_cp_reg(cpu
, &pmcr
);
4851 define_one_arm_cp_reg(cpu
, &pmcr64
);
4853 ARMCPRegInfo clidr
= {
4854 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
4855 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
4856 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
4858 define_one_arm_cp_reg(cpu
, &clidr
);
4859 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
4860 define_debug_regs(cpu
);
4862 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
4864 if (arm_feature(env
, ARM_FEATURE_V8
)) {
4865 /* AArch64 ID registers, which all have impdef reset values.
4866 * Note that within the ID register ranges the unused slots
4867 * must all RAZ, not UNDEF; future architecture versions may
4868 * define new registers here.
4870 ARMCPRegInfo v8_idregs
[] = {
4871 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
4872 * know the right value for the GIC field until after we
4873 * define these regs.
4875 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4876 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
4877 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
,
4878 .readfn
= id_aa64pfr0_read
,
4879 .writefn
= arm_cp_write_ignore
},
4880 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4881 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
4882 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4883 .resetvalue
= cpu
->id_aa64pfr1
},
4884 { .name
= "ID_AA64PFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4885 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 2,
4886 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4888 { .name
= "ID_AA64PFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4889 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 3,
4890 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4892 { .name
= "ID_AA64PFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4893 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 4,
4894 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4896 { .name
= "ID_AA64PFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4897 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 5,
4898 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4900 { .name
= "ID_AA64PFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4901 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 6,
4902 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4904 { .name
= "ID_AA64PFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4905 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 7,
4906 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4908 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4909 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
4910 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4911 .resetvalue
= cpu
->id_aa64dfr0
},
4912 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4913 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
4914 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4915 .resetvalue
= cpu
->id_aa64dfr1
},
4916 { .name
= "ID_AA64DFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4917 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 2,
4918 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4920 { .name
= "ID_AA64DFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4921 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 3,
4922 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4924 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4925 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
4926 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4927 .resetvalue
= cpu
->id_aa64afr0
},
4928 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4929 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
4930 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4931 .resetvalue
= cpu
->id_aa64afr1
},
4932 { .name
= "ID_AA64AFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4933 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 6,
4934 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4936 { .name
= "ID_AA64AFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4937 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 7,
4938 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4940 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
4941 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
4942 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4943 .resetvalue
= cpu
->id_aa64isar0
},
4944 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
4945 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
4946 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4947 .resetvalue
= cpu
->id_aa64isar1
},
4948 { .name
= "ID_AA64ISAR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4949 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 2,
4950 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4952 { .name
= "ID_AA64ISAR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4953 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 3,
4954 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4956 { .name
= "ID_AA64ISAR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4957 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 4,
4958 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4960 { .name
= "ID_AA64ISAR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4961 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 5,
4962 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4964 { .name
= "ID_AA64ISAR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4965 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 6,
4966 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4968 { .name
= "ID_AA64ISAR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4969 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 7,
4970 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4972 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
4973 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
4974 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4975 .resetvalue
= cpu
->id_aa64mmfr0
},
4976 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
4977 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
4978 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4979 .resetvalue
= cpu
->id_aa64mmfr1
},
4980 { .name
= "ID_AA64MMFR2_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4981 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 2,
4982 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4984 { .name
= "ID_AA64MMFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4985 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 3,
4986 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4988 { .name
= "ID_AA64MMFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4989 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 4,
4990 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4992 { .name
= "ID_AA64MMFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4993 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 5,
4994 .access
= PL1_R
, .type
= ARM_CP_CONST
,
4996 { .name
= "ID_AA64MMFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
4997 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 6,
4998 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5000 { .name
= "ID_AA64MMFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5001 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 7,
5002 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5004 { .name
= "MVFR0_EL1", .state
= ARM_CP_STATE_AA64
,
5005 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 0,
5006 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5007 .resetvalue
= cpu
->mvfr0
},
5008 { .name
= "MVFR1_EL1", .state
= ARM_CP_STATE_AA64
,
5009 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 1,
5010 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5011 .resetvalue
= cpu
->mvfr1
},
5012 { .name
= "MVFR2_EL1", .state
= ARM_CP_STATE_AA64
,
5013 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 2,
5014 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5015 .resetvalue
= cpu
->mvfr2
},
5016 { .name
= "MVFR3_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5017 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 3,
5018 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5020 { .name
= "MVFR4_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5021 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 4,
5022 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5024 { .name
= "MVFR5_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5025 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 5,
5026 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5028 { .name
= "MVFR6_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5029 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 6,
5030 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5032 { .name
= "MVFR7_EL1_RESERVED", .state
= ARM_CP_STATE_AA64
,
5033 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 3, .opc2
= 7,
5034 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5036 { .name
= "PMCEID0", .state
= ARM_CP_STATE_AA32
,
5037 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 6,
5038 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5039 .resetvalue
= cpu
->pmceid0
},
5040 { .name
= "PMCEID0_EL0", .state
= ARM_CP_STATE_AA64
,
5041 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 6,
5042 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5043 .resetvalue
= cpu
->pmceid0
},
5044 { .name
= "PMCEID1", .state
= ARM_CP_STATE_AA32
,
5045 .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 12, .opc2
= 7,
5046 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5047 .resetvalue
= cpu
->pmceid1
},
5048 { .name
= "PMCEID1_EL0", .state
= ARM_CP_STATE_AA64
,
5049 .opc0
= 3, .opc1
= 3, .crn
= 9, .crm
= 12, .opc2
= 7,
5050 .access
= PL0_R
, .accessfn
= pmreg_access
, .type
= ARM_CP_CONST
,
5051 .resetvalue
= cpu
->pmceid1
},
5054 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
5055 if (!arm_feature(env
, ARM_FEATURE_EL3
) &&
5056 !arm_feature(env
, ARM_FEATURE_EL2
)) {
5057 ARMCPRegInfo rvbar
= {
5058 .name
= "RVBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5059 .opc0
= 3, .opc1
= 0, .crn
= 12, .crm
= 0, .opc2
= 1,
5060 .type
= ARM_CP_CONST
, .access
= PL1_R
, .resetvalue
= cpu
->rvbar
5062 define_one_arm_cp_reg(cpu
, &rvbar
);
5064 define_arm_cp_regs(cpu
, v8_idregs
);
5065 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
5067 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5068 uint64_t vmpidr_def
= mpidr_read_val(env
);
5069 ARMCPRegInfo vpidr_regs
[] = {
5070 { .name
= "VPIDR", .state
= ARM_CP_STATE_AA32
,
5071 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5072 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5073 .resetvalue
= cpu
->midr
, .type
= ARM_CP_ALIAS
,
5074 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vpidr_el2
) },
5075 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5076 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5077 .access
= PL2_RW
, .resetvalue
= cpu
->midr
,
5078 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5079 { .name
= "VMPIDR", .state
= ARM_CP_STATE_AA32
,
5080 .cp
= 15, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5081 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns
,
5082 .resetvalue
= vmpidr_def
, .type
= ARM_CP_ALIAS
,
5083 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.vmpidr_el2
) },
5084 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_AA64
,
5085 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5087 .resetvalue
= vmpidr_def
,
5088 .fieldoffset
= offsetof(CPUARMState
, cp15
.vmpidr_el2
) },
5091 define_arm_cp_regs(cpu
, vpidr_regs
);
5092 define_arm_cp_regs(cpu
, el2_cp_reginfo
);
5093 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
5094 if (!arm_feature(env
, ARM_FEATURE_EL3
)) {
5095 ARMCPRegInfo rvbar
= {
5096 .name
= "RVBAR_EL2", .state
= ARM_CP_STATE_AA64
,
5097 .opc0
= 3, .opc1
= 4, .crn
= 12, .crm
= 0, .opc2
= 1,
5098 .type
= ARM_CP_CONST
, .access
= PL2_R
, .resetvalue
= cpu
->rvbar
5100 define_one_arm_cp_reg(cpu
, &rvbar
);
5103 /* If EL2 is missing but higher ELs are enabled, we need to
5104 * register the no_el2 reginfos.
5106 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5107 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
5108 * of MIDR_EL1 and MPIDR_EL1.
5110 ARMCPRegInfo vpidr_regs
[] = {
5111 { .name
= "VPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5112 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 0,
5113 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5114 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->midr
,
5115 .fieldoffset
= offsetof(CPUARMState
, cp15
.vpidr_el2
) },
5116 { .name
= "VMPIDR_EL2", .state
= ARM_CP_STATE_BOTH
,
5117 .opc0
= 3, .opc1
= 4, .crn
= 0, .crm
= 0, .opc2
= 5,
5118 .access
= PL2_RW
, .accessfn
= access_el3_aa32ns_aa64any
,
5119 .type
= ARM_CP_NO_RAW
,
5120 .writefn
= arm_cp_write_ignore
, .readfn
= mpidr_read
},
5123 define_arm_cp_regs(cpu
, vpidr_regs
);
5124 define_arm_cp_regs(cpu
, el3_no_el2_cp_reginfo
);
5127 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5128 define_arm_cp_regs(cpu
, el3_cp_reginfo
);
5129 ARMCPRegInfo el3_regs
[] = {
5130 { .name
= "RVBAR_EL3", .state
= ARM_CP_STATE_AA64
,
5131 .opc0
= 3, .opc1
= 6, .crn
= 12, .crm
= 0, .opc2
= 1,
5132 .type
= ARM_CP_CONST
, .access
= PL3_R
, .resetvalue
= cpu
->rvbar
},
5133 { .name
= "SCTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5134 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 0,
5136 .raw_writefn
= raw_write
, .writefn
= sctlr_write
,
5137 .fieldoffset
= offsetof(CPUARMState
, cp15
.sctlr_el
[3]),
5138 .resetvalue
= cpu
->reset_sctlr
},
5142 define_arm_cp_regs(cpu
, el3_regs
);
5144 /* The behaviour of NSACR is sufficiently various that we don't
5145 * try to describe it in a single reginfo:
5146 * if EL3 is 64 bit, then trap to EL3 from S EL1,
5147 * reads as constant 0xc00 from NS EL1 and NS EL2
5148 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5149 * if v7 without EL3, register doesn't exist
5150 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5152 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5153 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5154 ARMCPRegInfo nsacr
= {
5155 .name
= "NSACR", .type
= ARM_CP_CONST
,
5156 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5157 .access
= PL1_RW
, .accessfn
= nsacr_access
,
5160 define_one_arm_cp_reg(cpu
, &nsacr
);
5162 ARMCPRegInfo nsacr
= {
5164 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5165 .access
= PL3_RW
| PL1_R
,
5167 .fieldoffset
= offsetof(CPUARMState
, cp15
.nsacr
)
5169 define_one_arm_cp_reg(cpu
, &nsacr
);
5172 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5173 ARMCPRegInfo nsacr
= {
5174 .name
= "NSACR", .type
= ARM_CP_CONST
,
5175 .cp
= 15, .opc1
= 0, .crn
= 1, .crm
= 1, .opc2
= 2,
5179 define_one_arm_cp_reg(cpu
, &nsacr
);
5183 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
5184 if (arm_feature(env
, ARM_FEATURE_V6
)) {
5185 /* PMSAv6 not implemented */
5186 assert(arm_feature(env
, ARM_FEATURE_V7
));
5187 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5188 define_arm_cp_regs(cpu
, pmsav7_cp_reginfo
);
5190 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
5193 define_arm_cp_regs(cpu
, vmsa_pmsa_cp_reginfo
);
5194 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
5196 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
5197 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
5199 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
5200 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
5202 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
5203 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
5205 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
5206 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
5208 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
5209 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
5211 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
5212 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
5214 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
5215 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
5217 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5218 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
5220 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5221 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
5223 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
5224 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
5226 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
5227 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
5229 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5230 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5231 * be read-only (ie write causes UNDEF exception).
5234 ARMCPRegInfo id_pre_v8_midr_cp_reginfo
[] = {
5235 /* Pre-v8 MIDR space.
5236 * Note that the MIDR isn't a simple constant register because
5237 * of the TI925 behaviour where writes to another register can
5238 * cause the MIDR value to change.
5240 * Unimplemented registers in the c15 0 0 0 space default to
5241 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5242 * and friends override accordingly.
5245 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
5246 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
5247 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
5248 .readfn
= midr_read
,
5249 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5250 .type
= ARM_CP_OVERRIDE
},
5251 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5253 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
5254 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5256 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
5257 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5259 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
5260 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5262 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
5263 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5265 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
5266 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5269 ARMCPRegInfo id_v8_midr_cp_reginfo
[] = {
5270 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5271 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 0,
5272 .access
= PL1_R
, .type
= ARM_CP_NO_RAW
, .resetvalue
= cpu
->midr
,
5273 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
5274 .readfn
= midr_read
},
5275 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5276 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5277 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5278 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5279 { .name
= "MIDR", .type
= ARM_CP_ALIAS
| ARM_CP_CONST
,
5280 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 7,
5281 .access
= PL1_R
, .resetvalue
= cpu
->midr
},
5282 { .name
= "REVIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
5283 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 0, .opc2
= 6,
5284 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->revidr
},
5287 ARMCPRegInfo id_cp_reginfo
[] = {
5288 /* These are common to v8 and pre-v8 */
5290 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
5291 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5292 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
5293 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
5294 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
5295 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
5296 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5298 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
5299 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
5302 /* TLBTR is specific to VMSA */
5303 ARMCPRegInfo id_tlbtr_reginfo
= {
5305 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
5306 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
5308 /* MPUIR is specific to PMSA V6+ */
5309 ARMCPRegInfo id_mpuir_reginfo
= {
5311 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 4,
5312 .access
= PL1_R
, .type
= ARM_CP_CONST
,
5313 .resetvalue
= cpu
->pmsav7_dregion
<< 8
5315 ARMCPRegInfo crn0_wi_reginfo
= {
5316 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
5317 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
5318 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
5320 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
5321 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
5323 /* Register the blanket "writes ignored" value first to cover the
5324 * whole space. Then update the specific ID registers to allow write
5325 * access, so that they ignore writes rather than causing them to
5328 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
5329 for (r
= id_pre_v8_midr_cp_reginfo
;
5330 r
->type
!= ARM_CP_SENTINEL
; r
++) {
5333 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5336 id_tlbtr_reginfo
.access
= PL1_RW
;
5337 id_tlbtr_reginfo
.access
= PL1_RW
;
5339 if (arm_feature(env
, ARM_FEATURE_V8
)) {
5340 define_arm_cp_regs(cpu
, id_v8_midr_cp_reginfo
);
5342 define_arm_cp_regs(cpu
, id_pre_v8_midr_cp_reginfo
);
5344 define_arm_cp_regs(cpu
, id_cp_reginfo
);
5345 if (!arm_feature(env
, ARM_FEATURE_PMSA
)) {
5346 define_one_arm_cp_reg(cpu
, &id_tlbtr_reginfo
);
5347 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
5348 define_one_arm_cp_reg(cpu
, &id_mpuir_reginfo
);
5352 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
5353 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
5356 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
5357 ARMCPRegInfo auxcr_reginfo
[] = {
5358 { .name
= "ACTLR_EL1", .state
= ARM_CP_STATE_BOTH
,
5359 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 1,
5360 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
5361 .resetvalue
= cpu
->reset_auxcr
},
5362 { .name
= "ACTLR_EL2", .state
= ARM_CP_STATE_BOTH
,
5363 .opc0
= 3, .opc1
= 4, .crn
= 1, .crm
= 0, .opc2
= 1,
5364 .access
= PL2_RW
, .type
= ARM_CP_CONST
,
5366 { .name
= "ACTLR_EL3", .state
= ARM_CP_STATE_AA64
,
5367 .opc0
= 3, .opc1
= 6, .crn
= 1, .crm
= 0, .opc2
= 1,
5368 .access
= PL3_RW
, .type
= ARM_CP_CONST
,
5372 define_arm_cp_regs(cpu
, auxcr_reginfo
);
5375 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
5376 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5377 /* 32 bit view is [31:18] 0...0 [43:32]. */
5378 uint32_t cbar32
= (extract64(cpu
->reset_cbar
, 18, 14) << 18)
5379 | extract64(cpu
->reset_cbar
, 32, 12);
5380 ARMCPRegInfo cbar_reginfo
[] = {
5382 .type
= ARM_CP_CONST
,
5383 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5384 .access
= PL1_R
, .resetvalue
= cpu
->reset_cbar
},
5385 { .name
= "CBAR_EL1", .state
= ARM_CP_STATE_AA64
,
5386 .type
= ARM_CP_CONST
,
5387 .opc0
= 3, .opc1
= 1, .crn
= 15, .crm
= 3, .opc2
= 0,
5388 .access
= PL1_R
, .resetvalue
= cbar32
},
5391 /* We don't implement a r/w 64 bit CBAR currently */
5392 assert(arm_feature(env
, ARM_FEATURE_CBAR_RO
));
5393 define_arm_cp_regs(cpu
, cbar_reginfo
);
5395 ARMCPRegInfo cbar
= {
5397 .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
5398 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
5399 .fieldoffset
= offsetof(CPUARMState
,
5400 cp15
.c15_config_base_address
)
5402 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
5403 cbar
.access
= PL1_R
;
5404 cbar
.fieldoffset
= 0;
5405 cbar
.type
= ARM_CP_CONST
;
5407 define_one_arm_cp_reg(cpu
, &cbar
);
5411 if (arm_feature(env
, ARM_FEATURE_VBAR
)) {
5412 ARMCPRegInfo vbar_cp_reginfo
[] = {
5413 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
5414 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
5415 .access
= PL1_RW
, .writefn
= vbar_write
,
5416 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.vbar_s
),
5417 offsetof(CPUARMState
, cp15
.vbar_ns
) },
5421 define_arm_cp_regs(cpu
, vbar_cp_reginfo
);
5424 /* Generic registers whose values depend on the implementation */
5426 ARMCPRegInfo sctlr
= {
5427 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
5428 .opc0
= 3, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 0,
5430 .bank_fieldoffsets
= { offsetof(CPUARMState
, cp15
.sctlr_s
),
5431 offsetof(CPUARMState
, cp15
.sctlr_ns
) },
5432 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
5433 .raw_writefn
= raw_write
,
5435 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
5436 /* Normally we would always end the TB on an SCTLR write, but Linux
5437 * arch/arm/mach-pxa/sleep.S expects two instructions following
5438 * an MMU enable to execute from cache. Imitate this behaviour.
5440 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
5442 define_one_arm_cp_reg(cpu
, &sctlr
);
5445 if (arm_feature(env
, ARM_FEATURE_SVE
)) {
5446 define_one_arm_cp_reg(cpu
, &zcr_el1_reginfo
);
5447 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
5448 define_one_arm_cp_reg(cpu
, &zcr_el2_reginfo
);
5450 define_one_arm_cp_reg(cpu
, &zcr_no_el2_reginfo
);
5452 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
5453 define_one_arm_cp_reg(cpu
, &zcr_el3_reginfo
);
5458 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
5460 CPUState
*cs
= CPU(cpu
);
5461 CPUARMState
*env
= &cpu
->env
;
5463 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
5464 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
5465 aarch64_fpu_gdb_set_reg
,
5466 34, "aarch64-fpu.xml", 0);
5467 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
5468 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5469 51, "arm-neon.xml", 0);
5470 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
5471 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5472 35, "arm-vfp3.xml", 0);
5473 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
5474 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
5475 19, "arm-vfp.xml", 0);
5479 /* Sort alphabetically by type name, except for "any". */
5480 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
5482 ObjectClass
*class_a
= (ObjectClass
*)a
;
5483 ObjectClass
*class_b
= (ObjectClass
*)b
;
5484 const char *name_a
, *name_b
;
5486 name_a
= object_class_get_name(class_a
);
5487 name_b
= object_class_get_name(class_b
);
5488 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
5490 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
5493 return strcmp(name_a
, name_b
);
5497 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
5499 ObjectClass
*oc
= data
;
5500 CPUListState
*s
= user_data
;
5501 const char *typename
;
5504 typename
= object_class_get_name(oc
);
5505 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5506 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
5511 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
5515 .cpu_fprintf
= cpu_fprintf
,
5519 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5520 list
= g_slist_sort(list
, arm_cpu_list_compare
);
5521 (*cpu_fprintf
)(f
, "Available CPUs:\n");
5522 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
5525 /* The 'host' CPU type is dynamically registered only if KVM is
5526 * enabled, so we have to special-case it here:
5528 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
5532 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
5534 ObjectClass
*oc
= data
;
5535 CpuDefinitionInfoList
**cpu_list
= user_data
;
5536 CpuDefinitionInfoList
*entry
;
5537 CpuDefinitionInfo
*info
;
5538 const char *typename
;
5540 typename
= object_class_get_name(oc
);
5541 info
= g_malloc0(sizeof(*info
));
5542 info
->name
= g_strndup(typename
,
5543 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
5544 info
->q_typename
= g_strdup(typename
);
5546 entry
= g_malloc0(sizeof(*entry
));
5547 entry
->value
= info
;
5548 entry
->next
= *cpu_list
;
5552 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
5554 CpuDefinitionInfoList
*cpu_list
= NULL
;
5557 list
= object_class_get_list(TYPE_ARM_CPU
, false);
5558 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
5564 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
5565 void *opaque
, int state
, int secstate
,
5566 int crm
, int opc1
, int opc2
)
5568 /* Private utility function for define_one_arm_cp_reg_with_opaque():
5569 * add a single reginfo struct to the hash table.
5571 uint32_t *key
= g_new(uint32_t, 1);
5572 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
5573 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
5574 int ns
= (secstate
& ARM_CP_SECSTATE_NS
) ? 1 : 0;
5576 /* Reset the secure state to the specific incoming state. This is
5577 * necessary as the register may have been defined with both states.
5579 r2
->secure
= secstate
;
5581 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5582 /* Register is banked (using both entries in array).
5583 * Overwriting fieldoffset as the array is only used to define
5584 * banked registers but later only fieldoffset is used.
5586 r2
->fieldoffset
= r
->bank_fieldoffsets
[ns
];
5589 if (state
== ARM_CP_STATE_AA32
) {
5590 if (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1]) {
5591 /* If the register is banked then we don't need to migrate or
5592 * reset the 32-bit instance in certain cases:
5594 * 1) If the register has both 32-bit and 64-bit instances then we
5595 * can count on the 64-bit instance taking care of the
5597 * 2) If ARMv8 is enabled then we can count on a 64-bit version
5598 * taking care of the secure bank. This requires that separate
5599 * 32 and 64-bit definitions are provided.
5601 if ((r
->state
== ARM_CP_STATE_BOTH
&& ns
) ||
5602 (arm_feature(&cpu
->env
, ARM_FEATURE_V8
) && !ns
)) {
5603 r2
->type
|= ARM_CP_ALIAS
;
5605 } else if ((secstate
!= r
->secure
) && !ns
) {
5606 /* The register is not banked so we only want to allow migration of
5607 * the non-secure instance.
5609 r2
->type
|= ARM_CP_ALIAS
;
5612 if (r
->state
== ARM_CP_STATE_BOTH
) {
5613 /* We assume it is a cp15 register if the .cp field is left unset.
5619 #ifdef HOST_WORDS_BIGENDIAN
5620 if (r2
->fieldoffset
) {
5621 r2
->fieldoffset
+= sizeof(uint32_t);
5626 if (state
== ARM_CP_STATE_AA64
) {
5627 /* To allow abbreviation of ARMCPRegInfo
5628 * definitions, we treat cp == 0 as equivalent to
5629 * the value for "standard guest-visible sysreg".
5630 * STATE_BOTH definitions are also always "standard
5631 * sysreg" in their AArch64 view (the .cp value may
5632 * be non-zero for the benefit of the AArch32 view).
5634 if (r
->cp
== 0 || r
->state
== ARM_CP_STATE_BOTH
) {
5635 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
5637 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
5638 r2
->opc0
, opc1
, opc2
);
5640 *key
= ENCODE_CP_REG(r2
->cp
, is64
, ns
, r2
->crn
, crm
, opc1
, opc2
);
5643 r2
->opaque
= opaque
;
5645 /* reginfo passed to helpers is correct for the actual access,
5646 * and is never ARM_CP_STATE_BOTH:
5649 /* Make sure reginfo passed to helpers for wildcarded regs
5650 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5655 /* By convention, for wildcarded registers only the first
5656 * entry is used for migration; the others are marked as
5657 * ALIAS so we don't try to transfer the register
5658 * multiple times. Special registers (ie NOP/WFI) are
5659 * never migratable and not even raw-accessible.
5661 if ((r
->type
& ARM_CP_SPECIAL
)) {
5662 r2
->type
|= ARM_CP_NO_RAW
;
5664 if (((r
->crm
== CP_ANY
) && crm
!= 0) ||
5665 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
5666 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
5667 r2
->type
|= ARM_CP_ALIAS
;
5670 /* Check that raw accesses are either forbidden or handled. Note that
5671 * we can't assert this earlier because the setup of fieldoffset for
5672 * banked registers has to be done first.
5674 if (!(r2
->type
& ARM_CP_NO_RAW
)) {
5675 assert(!raw_accessors_invalid(r2
));
5678 /* Overriding of an existing definition must be explicitly
5681 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
5682 ARMCPRegInfo
*oldreg
;
5683 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
5684 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
5685 fprintf(stderr
, "Register redefined: cp=%d %d bit "
5686 "crn=%d crm=%d opc1=%d opc2=%d, "
5687 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
5688 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
5689 oldreg
->name
, r2
->name
);
5690 g_assert_not_reached();
5693 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
5697 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
5698 const ARMCPRegInfo
*r
, void *opaque
)
5700 /* Define implementations of coprocessor registers.
5701 * We store these in a hashtable because typically
5702 * there are less than 150 registers in a space which
5703 * is 16*16*16*8*8 = 262144 in size.
5704 * Wildcarding is supported for the crm, opc1 and opc2 fields.
5705 * If a register is defined twice then the second definition is
5706 * used, so this can be used to define some generic registers and
5707 * then override them with implementation specific variations.
5708 * At least one of the original and the second definition should
5709 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5710 * against accidental use.
5712 * The state field defines whether the register is to be
5713 * visible in the AArch32 or AArch64 execution state. If the
5714 * state is set to ARM_CP_STATE_BOTH then we synthesise a
5715 * reginfo structure for the AArch32 view, which sees the lower
5716 * 32 bits of the 64 bit register.
5718 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5719 * be wildcarded. AArch64 registers are always considered to be 64
5720 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5721 * the register, if any.
5723 int crm
, opc1
, opc2
, state
;
5724 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
5725 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
5726 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
5727 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
5728 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
5729 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
5730 /* 64 bit registers have only CRm and Opc1 fields */
5731 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
5732 /* op0 only exists in the AArch64 encodings */
5733 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
5734 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5735 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
5736 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5737 * encodes a minimum access level for the register. We roll this
5738 * runtime check into our general permission check code, so check
5739 * here that the reginfo's specified permissions are strict enough
5740 * to encompass the generic architectural permission check.
5742 if (r
->state
!= ARM_CP_STATE_AA32
) {
5745 case 0: case 1: case 2:
5758 /* unallocated encoding, so not possible */
5766 /* min_EL EL1, secure mode only (we don't check the latter) */
5770 /* broken reginfo with out-of-range opc1 */
5774 /* assert our permissions are not too lax (stricter is fine) */
5775 assert((r
->access
& ~mask
) == 0);
5778 /* Check that the register definition has enough info to handle
5779 * reads and writes if they are permitted.
5781 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
5782 if (r
->access
& PL3_R
) {
5783 assert((r
->fieldoffset
||
5784 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5787 if (r
->access
& PL3_W
) {
5788 assert((r
->fieldoffset
||
5789 (r
->bank_fieldoffsets
[0] && r
->bank_fieldoffsets
[1])) ||
5793 /* Bad type field probably means missing sentinel at end of reg list */
5794 assert(cptype_valid(r
->type
));
5795 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
5796 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
5797 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
5798 for (state
= ARM_CP_STATE_AA32
;
5799 state
<= ARM_CP_STATE_AA64
; state
++) {
5800 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
5803 if (state
== ARM_CP_STATE_AA32
) {
5804 /* Under AArch32 CP registers can be common
5805 * (same for secure and non-secure world) or banked.
5807 switch (r
->secure
) {
5808 case ARM_CP_SECSTATE_S
:
5809 case ARM_CP_SECSTATE_NS
:
5810 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5811 r
->secure
, crm
, opc1
, opc2
);
5814 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5817 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5823 /* AArch64 registers get mapped to non-secure instance
5825 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
5835 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
5836 const ARMCPRegInfo
*regs
, void *opaque
)
5838 /* Define a whole list of registers */
5839 const ARMCPRegInfo
*r
;
5840 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
5841 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
5845 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
5847 return g_hash_table_lookup(cpregs
, &encoded_cp
);
5850 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
5853 /* Helper coprocessor write function for write-ignore registers */
5856 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
5858 /* Helper coprocessor write function for read-as-zero registers */
5862 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
5864 /* Helper coprocessor reset function for do-nothing-on-reset registers */
5867 static int bad_mode_switch(CPUARMState
*env
, int mode
, CPSRWriteType write_type
)
5869 /* Return true if it is not valid for us to switch to
5870 * this CPU mode (ie all the UNPREDICTABLE cases in
5871 * the ARM ARM CPSRWriteByInstr pseudocode).
5874 /* Changes to or from Hyp via MSR and CPS are illegal. */
5875 if (write_type
== CPSRWriteByInstr
&&
5876 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_HYP
||
5877 mode
== ARM_CPU_MODE_HYP
)) {
5882 case ARM_CPU_MODE_USR
:
5884 case ARM_CPU_MODE_SYS
:
5885 case ARM_CPU_MODE_SVC
:
5886 case ARM_CPU_MODE_ABT
:
5887 case ARM_CPU_MODE_UND
:
5888 case ARM_CPU_MODE_IRQ
:
5889 case ARM_CPU_MODE_FIQ
:
5890 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5891 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5893 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5894 * and CPS are treated as illegal mode changes.
5896 if (write_type
== CPSRWriteByInstr
&&
5897 (env
->cp15
.hcr_el2
& HCR_TGE
) &&
5898 (env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
&&
5899 !arm_is_secure_below_el3(env
)) {
5903 case ARM_CPU_MODE_HYP
:
5904 return !arm_feature(env
, ARM_FEATURE_EL2
)
5905 || arm_current_el(env
) < 2 || arm_is_secure(env
);
5906 case ARM_CPU_MODE_MON
:
5907 return arm_current_el(env
) < 3;
5913 uint32_t cpsr_read(CPUARMState
*env
)
5916 ZF
= (env
->ZF
== 0);
5917 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
5918 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
5919 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
5920 | ((env
->condexec_bits
& 0xfc) << 8)
5921 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
5924 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
,
5925 CPSRWriteType write_type
)
5927 uint32_t changed_daif
;
5929 if (mask
& CPSR_NZCV
) {
5930 env
->ZF
= (~val
) & CPSR_Z
;
5932 env
->CF
= (val
>> 29) & 1;
5933 env
->VF
= (val
<< 3) & 0x80000000;
5936 env
->QF
= ((val
& CPSR_Q
) != 0);
5938 env
->thumb
= ((val
& CPSR_T
) != 0);
5939 if (mask
& CPSR_IT_0_1
) {
5940 env
->condexec_bits
&= ~3;
5941 env
->condexec_bits
|= (val
>> 25) & 3;
5943 if (mask
& CPSR_IT_2_7
) {
5944 env
->condexec_bits
&= 3;
5945 env
->condexec_bits
|= (val
>> 8) & 0xfc;
5947 if (mask
& CPSR_GE
) {
5948 env
->GE
= (val
>> 16) & 0xf;
5951 /* In a V7 implementation that includes the security extensions but does
5952 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5953 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5954 * bits respectively.
5956 * In a V8 implementation, it is permitted for privileged software to
5957 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5959 if (write_type
!= CPSRWriteRaw
&& !arm_feature(env
, ARM_FEATURE_V8
) &&
5960 arm_feature(env
, ARM_FEATURE_EL3
) &&
5961 !arm_feature(env
, ARM_FEATURE_EL2
) &&
5962 !arm_is_secure(env
)) {
5964 changed_daif
= (env
->daif
^ val
) & mask
;
5966 if (changed_daif
& CPSR_A
) {
5967 /* Check to see if we are allowed to change the masking of async
5968 * abort exceptions from a non-secure state.
5970 if (!(env
->cp15
.scr_el3
& SCR_AW
)) {
5971 qemu_log_mask(LOG_GUEST_ERROR
,
5972 "Ignoring attempt to switch CPSR_A flag from "
5973 "non-secure world with SCR.AW bit clear\n");
5978 if (changed_daif
& CPSR_F
) {
5979 /* Check to see if we are allowed to change the masking of FIQ
5980 * exceptions from a non-secure state.
5982 if (!(env
->cp15
.scr_el3
& SCR_FW
)) {
5983 qemu_log_mask(LOG_GUEST_ERROR
,
5984 "Ignoring attempt to switch CPSR_F flag from "
5985 "non-secure world with SCR.FW bit clear\n");
5989 /* Check whether non-maskable FIQ (NMFI) support is enabled.
5990 * If this bit is set software is not allowed to mask
5991 * FIQs, but is allowed to set CPSR_F to 0.
5993 if ((A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_NMFI
) &&
5995 qemu_log_mask(LOG_GUEST_ERROR
,
5996 "Ignoring attempt to enable CPSR_F flag "
5997 "(non-maskable FIQ [NMFI] support enabled)\n");
6003 env
->daif
&= ~(CPSR_AIF
& mask
);
6004 env
->daif
|= val
& CPSR_AIF
& mask
;
6006 if (write_type
!= CPSRWriteRaw
&&
6007 ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
)) {
6008 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
) {
6009 /* Note that we can only get here in USR mode if this is a
6010 * gdb stub write; for this case we follow the architectural
6011 * behaviour for guest writes in USR mode of ignoring an attempt
6012 * to switch mode. (Those are caught by translate.c for writes
6013 * triggered by guest instructions.)
6016 } else if (bad_mode_switch(env
, val
& CPSR_M
, write_type
)) {
6017 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
6018 * v7, and has defined behaviour in v8:
6019 * + leave CPSR.M untouched
6020 * + allow changes to the other CPSR fields
6022 * For user changes via the GDB stub, we don't set PSTATE.IL,
6023 * as this would be unnecessarily harsh for a user error.
6026 if (write_type
!= CPSRWriteByGDBStub
&&
6027 arm_feature(env
, ARM_FEATURE_V8
)) {
6032 switch_mode(env
, val
& CPSR_M
);
6035 mask
&= ~CACHED_CPSR_BITS
;
6036 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
6039 /* Sign/zero extend */
6040 uint32_t HELPER(sxtb16
)(uint32_t x
)
6043 res
= (uint16_t)(int8_t)x
;
6044 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
6048 uint32_t HELPER(uxtb16
)(uint32_t x
)
6051 res
= (uint16_t)(uint8_t)x
;
6052 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
6056 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
6060 if (num
== INT_MIN
&& den
== -1)
6065 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
6072 uint32_t HELPER(rbit
)(uint32_t x
)
6077 #if defined(CONFIG_USER_ONLY)
6079 /* These should probably raise undefined insn exceptions. */
6080 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
6082 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6084 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
6087 uint32_t QEMU_NORETURN
HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
6089 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6091 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
6094 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6096 /* translate.c should never generate calls here in user-only mode */
6097 g_assert_not_reached();
6100 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6102 /* translate.c should never generate calls here in user-only mode */
6103 g_assert_not_reached();
6106 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
6108 /* The TT instructions can be used by unprivileged code, but in
6109 * user-only emulation we don't have the MPU.
6110 * Luckily since we know we are NonSecure unprivileged (and that in
6111 * turn means that the A flag wasn't specified), all the bits in the
6112 * register must be zero:
6113 * IREGION: 0 because IRVALID is 0
6114 * IRVALID: 0 because NS
6116 * NSRW: 0 because NS
6118 * RW: 0 because unpriv and A flag not set
6119 * R: 0 because unpriv and A flag not set
6120 * SRVALID: 0 because NS
6121 * MRVALID: 0 because unpriv and A flag not set
6122 * SREGION: 0 becaus SRVALID is 0
6123 * MREGION: 0 because MRVALID is 0
6128 void switch_mode(CPUARMState
*env
, int mode
)
6130 ARMCPU
*cpu
= arm_env_get_cpu(env
);
6132 if (mode
!= ARM_CPU_MODE_USR
) {
6133 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
6137 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6138 uint32_t cur_el
, bool secure
)
6143 void aarch64_sync_64_to_32(CPUARMState
*env
)
6145 g_assert_not_reached();
6150 void switch_mode(CPUARMState
*env
, int mode
)
6155 old_mode
= env
->uncached_cpsr
& CPSR_M
;
6156 if (mode
== old_mode
)
6159 if (old_mode
== ARM_CPU_MODE_FIQ
) {
6160 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6161 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
6162 } else if (mode
== ARM_CPU_MODE_FIQ
) {
6163 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
6164 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
6167 i
= bank_number(old_mode
);
6168 env
->banked_r13
[i
] = env
->regs
[13];
6169 env
->banked_r14
[i
] = env
->regs
[14];
6170 env
->banked_spsr
[i
] = env
->spsr
;
6172 i
= bank_number(mode
);
6173 env
->regs
[13] = env
->banked_r13
[i
];
6174 env
->regs
[14] = env
->banked_r14
[i
];
6175 env
->spsr
= env
->banked_spsr
[i
];
6178 /* Physical Interrupt Target EL Lookup Table
6180 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6182 * The below multi-dimensional table is used for looking up the target
6183 * exception level given numerous condition criteria. Specifically, the
6184 * target EL is based on SCR and HCR routing controls as well as the
6185 * currently executing EL and secure state.
6188 * target_el_table[2][2][2][2][2][4]
6189 * | | | | | +--- Current EL
6190 * | | | | +------ Non-secure(0)/Secure(1)
6191 * | | | +--------- HCR mask override
6192 * | | +------------ SCR exec state control
6193 * | +--------------- SCR mask override
6194 * +------------------ 32-bit(0)/64-bit(1) EL3
6196 * The table values are as such:
6200 * The ARM ARM target EL table includes entries indicating that an "exception
6201 * is not taken". The two cases where this is applicable are:
6202 * 1) An exception is taken from EL3 but the SCR does not have the exception
6204 * 2) An exception is taken from EL2 but the HCR does not have the exception
6206 * In these two cases, the below table contain a target of EL1. This value is
6207 * returned as it is expected that the consumer of the table data will check
6208 * for "target EL >= current EL" to ensure the exception is not taken.
6212 * BIT IRQ IMO Non-secure Secure
6213 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
6215 static const int8_t target_el_table
[2][2][2][2][2][4] = {
6216 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6217 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
6218 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
6219 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
6220 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6221 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
6222 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
6223 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
6224 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
6225 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
6226 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
6227 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
6228 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6229 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
6230 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
6231 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
6235 * Determine the target EL for physical exceptions
6237 uint32_t arm_phys_excp_target_el(CPUState
*cs
, uint32_t excp_idx
,
6238 uint32_t cur_el
, bool secure
)
6240 CPUARMState
*env
= cs
->env_ptr
;
6245 /* Is the highest EL AArch64? */
6246 int is64
= arm_feature(env
, ARM_FEATURE_AARCH64
);
6248 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
6249 rw
= ((env
->cp15
.scr_el3
& SCR_RW
) == SCR_RW
);
6251 /* Either EL2 is the highest EL (and so the EL2 register width
6252 * is given by is64); or there is no EL2 or EL3, in which case
6253 * the value of 'rw' does not affect the table lookup anyway.
6260 scr
= ((env
->cp15
.scr_el3
& SCR_IRQ
) == SCR_IRQ
);
6261 hcr
= ((env
->cp15
.hcr_el2
& HCR_IMO
) == HCR_IMO
);
6264 scr
= ((env
->cp15
.scr_el3
& SCR_FIQ
) == SCR_FIQ
);
6265 hcr
= ((env
->cp15
.hcr_el2
& HCR_FMO
) == HCR_FMO
);
6268 scr
= ((env
->cp15
.scr_el3
& SCR_EA
) == SCR_EA
);
6269 hcr
= ((env
->cp15
.hcr_el2
& HCR_AMO
) == HCR_AMO
);
6273 /* If HCR.TGE is set then HCR is treated as being 1 */
6274 hcr
|= ((env
->cp15
.hcr_el2
& HCR_TGE
) == HCR_TGE
);
6276 /* Perform a table-lookup for the target EL given the current state */
6277 target_el
= target_el_table
[is64
][scr
][rw
][hcr
][secure
][cur_el
];
6279 assert(target_el
> 0);
6284 static bool v7m_stack_write(ARMCPU
*cpu
, uint32_t addr
, uint32_t value
,
6285 ARMMMUIdx mmu_idx
, bool ignfault
)
6287 CPUState
*cs
= CPU(cpu
);
6288 CPUARMState
*env
= &cpu
->env
;
6289 MemTxAttrs attrs
= {};
6291 target_ulong page_size
;
6295 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6299 if (get_phys_addr(env
, addr
, MMU_DATA_STORE
, mmu_idx
, &physaddr
,
6300 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6301 /* MPU/SAU lookup failed */
6302 if (fi
.type
== ARMFault_QEMU_SFault
) {
6303 qemu_log_mask(CPU_LOG_INT
,
6304 "...SecureFault with SFSR.AUVIOL during stacking\n");
6305 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6306 env
->v7m
.sfar
= addr
;
6307 exc
= ARMV7M_EXCP_SECURE
;
6310 qemu_log_mask(CPU_LOG_INT
, "...MemManageFault with CFSR.MSTKERR\n");
6311 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MSTKERR_MASK
;
6312 exc
= ARMV7M_EXCP_MEM
;
6313 exc_secure
= secure
;
6317 address_space_stl_le(arm_addressspace(cs
, attrs
), physaddr
, value
,
6319 if (txres
!= MEMTX_OK
) {
6320 /* BusFault trying to write the data */
6321 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.STKERR\n");
6322 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_STKERR_MASK
;
6323 exc
= ARMV7M_EXCP_BUS
;
6330 /* By pending the exception at this point we are making
6331 * the IMPDEF choice "overridden exceptions pended" (see the
6332 * MergeExcInfo() pseudocode). The other choice would be to not
6333 * pend them now and then make a choice about which to throw away
6334 * later if we have two derived exceptions.
6335 * The only case when we must not pend the exception but instead
6336 * throw it away is if we are doing the push of the callee registers
6337 * and we've already generated a derived exception. Even in this
6338 * case we will still update the fault status registers.
6341 armv7m_nvic_set_pending_derived(env
->nvic
, exc
, exc_secure
);
6346 static bool v7m_stack_read(ARMCPU
*cpu
, uint32_t *dest
, uint32_t addr
,
6349 CPUState
*cs
= CPU(cpu
);
6350 CPUARMState
*env
= &cpu
->env
;
6351 MemTxAttrs attrs
= {};
6353 target_ulong page_size
;
6357 bool secure
= mmu_idx
& ARM_MMU_IDX_M_S
;
6362 if (get_phys_addr(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &physaddr
,
6363 &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
6364 /* MPU/SAU lookup failed */
6365 if (fi
.type
== ARMFault_QEMU_SFault
) {
6366 qemu_log_mask(CPU_LOG_INT
,
6367 "...SecureFault with SFSR.AUVIOL during unstack\n");
6368 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
| R_V7M_SFSR_SFARVALID_MASK
;
6369 env
->v7m
.sfar
= addr
;
6370 exc
= ARMV7M_EXCP_SECURE
;
6373 qemu_log_mask(CPU_LOG_INT
,
6374 "...MemManageFault with CFSR.MUNSTKERR\n");
6375 env
->v7m
.cfsr
[secure
] |= R_V7M_CFSR_MUNSTKERR_MASK
;
6376 exc
= ARMV7M_EXCP_MEM
;
6377 exc_secure
= secure
;
6382 value
= address_space_ldl(arm_addressspace(cs
, attrs
), physaddr
,
6384 if (txres
!= MEMTX_OK
) {
6385 /* BusFault trying to read the data */
6386 qemu_log_mask(CPU_LOG_INT
, "...BusFault with BFSR.UNSTKERR\n");
6387 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_UNSTKERR_MASK
;
6388 exc
= ARMV7M_EXCP_BUS
;
6397 /* By pending the exception at this point we are making
6398 * the IMPDEF choice "overridden exceptions pended" (see the
6399 * MergeExcInfo() pseudocode). The other choice would be to not
6400 * pend them now and then make a choice about which to throw away
6401 * later if we have two derived exceptions.
6403 armv7m_nvic_set_pending(env
->nvic
, exc
, exc_secure
);
6407 /* Return true if we're using the process stack pointer (not the MSP) */
6408 static bool v7m_using_psp(CPUARMState
*env
)
6410 /* Handler mode always uses the main stack; for thread mode
6411 * the CONTROL.SPSEL bit determines the answer.
6412 * Note that in v7M it is not possible to be in Handler mode with
6413 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
6415 return !arm_v7m_is_handler_mode(env
) &&
6416 env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_SPSEL_MASK
;
6419 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6420 * This may change the current stack pointer between Main and Process
6421 * stack pointers if it is done for the CONTROL register for the current
6424 static void write_v7m_control_spsel_for_secstate(CPUARMState
*env
,
6428 bool old_is_psp
= v7m_using_psp(env
);
6430 env
->v7m
.control
[secstate
] =
6431 deposit32(env
->v7m
.control
[secstate
],
6432 R_V7M_CONTROL_SPSEL_SHIFT
,
6433 R_V7M_CONTROL_SPSEL_LENGTH
, new_spsel
);
6435 if (secstate
== env
->v7m
.secure
) {
6436 bool new_is_psp
= v7m_using_psp(env
);
6439 if (old_is_psp
!= new_is_psp
) {
6440 tmp
= env
->v7m
.other_sp
;
6441 env
->v7m
.other_sp
= env
->regs
[13];
6442 env
->regs
[13] = tmp
;
6447 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6448 * stack pointer between Main and Process stack pointers.
6450 static void write_v7m_control_spsel(CPUARMState
*env
, bool new_spsel
)
6452 write_v7m_control_spsel_for_secstate(env
, new_spsel
, env
->v7m
.secure
);
6455 void write_v7m_exception(CPUARMState
*env
, uint32_t new_exc
)
6457 /* Write a new value to v7m.exception, thus transitioning into or out
6458 * of Handler mode; this may result in a change of active stack pointer.
6460 bool new_is_psp
, old_is_psp
= v7m_using_psp(env
);
6463 env
->v7m
.exception
= new_exc
;
6465 new_is_psp
= v7m_using_psp(env
);
6467 if (old_is_psp
!= new_is_psp
) {
6468 tmp
= env
->v7m
.other_sp
;
6469 env
->v7m
.other_sp
= env
->regs
[13];
6470 env
->regs
[13] = tmp
;
6474 /* Switch M profile security state between NS and S */
6475 static void switch_v7m_security_state(CPUARMState
*env
, bool new_secstate
)
6477 uint32_t new_ss_msp
, new_ss_psp
;
6479 if (env
->v7m
.secure
== new_secstate
) {
6483 /* All the banked state is accessed by looking at env->v7m.secure
6484 * except for the stack pointer; rearrange the SP appropriately.
6486 new_ss_msp
= env
->v7m
.other_ss_msp
;
6487 new_ss_psp
= env
->v7m
.other_ss_psp
;
6489 if (v7m_using_psp(env
)) {
6490 env
->v7m
.other_ss_psp
= env
->regs
[13];
6491 env
->v7m
.other_ss_msp
= env
->v7m
.other_sp
;
6493 env
->v7m
.other_ss_msp
= env
->regs
[13];
6494 env
->v7m
.other_ss_psp
= env
->v7m
.other_sp
;
6497 env
->v7m
.secure
= new_secstate
;
6499 if (v7m_using_psp(env
)) {
6500 env
->regs
[13] = new_ss_psp
;
6501 env
->v7m
.other_sp
= new_ss_msp
;
6503 env
->regs
[13] = new_ss_msp
;
6504 env
->v7m
.other_sp
= new_ss_psp
;
6508 void HELPER(v7m_bxns
)(CPUARMState
*env
, uint32_t dest
)
6511 * - if the return value is a magic value, do exception return (like BX)
6512 * - otherwise bit 0 of the return value is the target security state
6516 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6517 /* Covers FNC_RETURN and EXC_RETURN magic */
6518 min_magic
= FNC_RETURN_MIN_MAGIC
;
6520 /* EXC_RETURN magic only */
6521 min_magic
= EXC_RETURN_MIN_MAGIC
;
6524 if (dest
>= min_magic
) {
6525 /* This is an exception return magic value; put it where
6526 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6527 * Note that if we ever add gen_ss_advance() singlestep support to
6528 * M profile this should count as an "instruction execution complete"
6529 * event (compare gen_bx_excret_final_code()).
6531 env
->regs
[15] = dest
& ~1;
6532 env
->thumb
= dest
& 1;
6533 HELPER(exception_internal
)(env
, EXCP_EXCEPTION_EXIT
);
6537 /* translate.c should have made BXNS UNDEF unless we're secure */
6538 assert(env
->v7m
.secure
);
6540 switch_v7m_security_state(env
, dest
& 1);
6542 env
->regs
[15] = dest
& ~1;
6545 void HELPER(v7m_blxns
)(CPUARMState
*env
, uint32_t dest
)
6547 /* Handle v7M BLXNS:
6548 * - bit 0 of the destination address is the target security state
6551 /* At this point regs[15] is the address just after the BLXNS */
6552 uint32_t nextinst
= env
->regs
[15] | 1;
6553 uint32_t sp
= env
->regs
[13] - 8;
6556 /* translate.c will have made BLXNS UNDEF unless we're secure */
6557 assert(env
->v7m
.secure
);
6560 /* target is Secure, so this is just a normal BLX,
6561 * except that the low bit doesn't indicate Thumb/not.
6563 env
->regs
[14] = nextinst
;
6565 env
->regs
[15] = dest
& ~1;
6569 /* Target is non-secure: first push a stack frame */
6570 if (!QEMU_IS_ALIGNED(sp
, 8)) {
6571 qemu_log_mask(LOG_GUEST_ERROR
,
6572 "BLXNS with misaligned SP is UNPREDICTABLE\n");
6575 saved_psr
= env
->v7m
.exception
;
6576 if (env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SFPA_MASK
) {
6577 saved_psr
|= XPSR_SFPA
;
6580 /* Note that these stores can throw exceptions on MPU faults */
6581 cpu_stl_data(env
, sp
, nextinst
);
6582 cpu_stl_data(env
, sp
+ 4, saved_psr
);
6585 env
->regs
[14] = 0xfeffffff;
6586 if (arm_v7m_is_handler_mode(env
)) {
6587 /* Write a dummy value to IPSR, to avoid leaking the current secure
6588 * exception number to non-secure code. This is guaranteed not
6589 * to cause write_v7m_exception() to actually change stacks.
6591 write_v7m_exception(env
, 1);
6593 switch_v7m_security_state(env
, 0);
6595 env
->regs
[15] = dest
;
6598 static uint32_t *get_v7m_sp_ptr(CPUARMState
*env
, bool secure
, bool threadmode
,
6601 /* Return a pointer to the location where we currently store the
6602 * stack pointer for the requested security state and thread mode.
6603 * This pointer will become invalid if the CPU state is updated
6604 * such that the stack pointers are switched around (eg changing
6605 * the SPSEL control bit).
6606 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6607 * Unlike that pseudocode, we require the caller to pass us in the
6608 * SPSEL control bit value; this is because we also use this
6609 * function in handling of pushing of the callee-saves registers
6610 * part of the v8M stack frame (pseudocode PushCalleeStack()),
6611 * and in the tailchain codepath the SPSEL bit comes from the exception
6612 * return magic LR value from the previous exception. The pseudocode
6613 * opencodes the stack-selection in PushCalleeStack(), but we prefer
6614 * to make this utility function generic enough to do the job.
6616 bool want_psp
= threadmode
&& spsel
;
6618 if (secure
== env
->v7m
.secure
) {
6619 if (want_psp
== v7m_using_psp(env
)) {
6620 return &env
->regs
[13];
6622 return &env
->v7m
.other_sp
;
6626 return &env
->v7m
.other_ss_psp
;
6628 return &env
->v7m
.other_ss_msp
;
6633 static bool arm_v7m_load_vector(ARMCPU
*cpu
, int exc
, bool targets_secure
,
6636 CPUState
*cs
= CPU(cpu
);
6637 CPUARMState
*env
= &cpu
->env
;
6639 uint32_t addr
= env
->v7m
.vecbase
[targets_secure
] + exc
* 4;
6640 uint32_t vector_entry
;
6641 MemTxAttrs attrs
= {};
6645 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targets_secure
, true);
6647 /* We don't do a get_phys_addr() here because the rules for vector
6648 * loads are special: they always use the default memory map, and
6649 * the default memory map permits reads from all addresses.
6650 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
6651 * that we want this special case which would always say "yes",
6652 * we just do the SAU lookup here followed by a direct physical load.
6654 attrs
.secure
= targets_secure
;
6657 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6658 V8M_SAttributes sattrs
= {};
6660 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
6662 attrs
.secure
= false;
6663 } else if (!targets_secure
) {
6664 /* NS access to S memory */
6669 vector_entry
= address_space_ldl(arm_addressspace(cs
, attrs
), addr
,
6671 if (result
!= MEMTX_OK
) {
6674 *pvec
= vector_entry
;
6678 /* All vector table fetch fails are reported as HardFault, with
6679 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
6680 * technically the underlying exception is a MemManage or BusFault
6681 * that is escalated to HardFault.) This is a terminal exception,
6682 * so we will either take the HardFault immediately or else enter
6683 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
6685 exc_secure
= targets_secure
||
6686 !(cpu
->env
.v7m
.aircr
& R_V7M_AIRCR_BFHFNMINS_MASK
);
6687 env
->v7m
.hfsr
|= R_V7M_HFSR_VECTTBL_MASK
| R_V7M_HFSR_FORCED_MASK
;
6688 armv7m_nvic_set_pending_derived(env
->nvic
, ARMV7M_EXCP_HARD
, exc_secure
);
6692 static bool v7m_push_callee_stack(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
6695 /* For v8M, push the callee-saves register part of the stack frame.
6696 * Compare the v8M pseudocode PushCalleeStack().
6697 * In the tailchaining case this may not be the current stack.
6699 CPUARMState
*env
= &cpu
->env
;
6700 uint32_t *frame_sp_p
;
6706 bool mode
= lr
& R_V7M_EXCRET_MODE_MASK
;
6707 bool priv
= !(env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_NPRIV_MASK
) ||
6710 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, M_REG_S
, priv
);
6711 frame_sp_p
= get_v7m_sp_ptr(env
, M_REG_S
, mode
,
6712 lr
& R_V7M_EXCRET_SPSEL_MASK
);
6714 mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
6715 frame_sp_p
= &env
->regs
[13];
6718 frameptr
= *frame_sp_p
- 0x28;
6720 /* Write as much of the stack frame as we can. A write failure may
6721 * cause us to pend a derived exception.
6724 v7m_stack_write(cpu
, frameptr
, 0xfefa125b, mmu_idx
, ignore_faults
) &&
6725 v7m_stack_write(cpu
, frameptr
+ 0x8, env
->regs
[4], mmu_idx
,
6727 v7m_stack_write(cpu
, frameptr
+ 0xc, env
->regs
[5], mmu_idx
,
6729 v7m_stack_write(cpu
, frameptr
+ 0x10, env
->regs
[6], mmu_idx
,
6731 v7m_stack_write(cpu
, frameptr
+ 0x14, env
->regs
[7], mmu_idx
,
6733 v7m_stack_write(cpu
, frameptr
+ 0x18, env
->regs
[8], mmu_idx
,
6735 v7m_stack_write(cpu
, frameptr
+ 0x1c, env
->regs
[9], mmu_idx
,
6737 v7m_stack_write(cpu
, frameptr
+ 0x20, env
->regs
[10], mmu_idx
,
6739 v7m_stack_write(cpu
, frameptr
+ 0x24, env
->regs
[11], mmu_idx
,
6742 /* Update SP regardless of whether any of the stack accesses failed.
6743 * When we implement v8M stack limit checking then this attempt to
6744 * update SP might also fail and result in a derived exception.
6746 *frame_sp_p
= frameptr
;
6751 static void v7m_exception_taken(ARMCPU
*cpu
, uint32_t lr
, bool dotailchain
,
6752 bool ignore_stackfaults
)
6754 /* Do the "take the exception" parts of exception entry,
6755 * but not the pushing of state to the stack. This is
6756 * similar to the pseudocode ExceptionTaken() function.
6758 CPUARMState
*env
= &cpu
->env
;
6760 bool targets_secure
;
6762 bool push_failed
= false;
6764 armv7m_nvic_get_pending_irq_info(env
->nvic
, &exc
, &targets_secure
);
6766 if (arm_feature(env
, ARM_FEATURE_V8
)) {
6767 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
6768 (lr
& R_V7M_EXCRET_S_MASK
)) {
6769 /* The background code (the owner of the registers in the
6770 * exception frame) is Secure. This means it may either already
6771 * have or now needs to push callee-saves registers.
6773 if (targets_secure
) {
6774 if (dotailchain
&& !(lr
& R_V7M_EXCRET_ES_MASK
)) {
6775 /* We took an exception from Secure to NonSecure
6776 * (which means the callee-saved registers got stacked)
6777 * and are now tailchaining to a Secure exception.
6778 * Clear DCRS so eventual return from this Secure
6779 * exception unstacks the callee-saved registers.
6781 lr
&= ~R_V7M_EXCRET_DCRS_MASK
;
6784 /* We're going to a non-secure exception; push the
6785 * callee-saves registers to the stack now, if they're
6786 * not already saved.
6788 if (lr
& R_V7M_EXCRET_DCRS_MASK
&&
6789 !(dotailchain
&& (lr
& R_V7M_EXCRET_ES_MASK
))) {
6790 push_failed
= v7m_push_callee_stack(cpu
, lr
, dotailchain
,
6791 ignore_stackfaults
);
6793 lr
|= R_V7M_EXCRET_DCRS_MASK
;
6797 lr
&= ~R_V7M_EXCRET_ES_MASK
;
6798 if (targets_secure
|| !arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6799 lr
|= R_V7M_EXCRET_ES_MASK
;
6801 lr
&= ~R_V7M_EXCRET_SPSEL_MASK
;
6802 if (env
->v7m
.control
[targets_secure
] & R_V7M_CONTROL_SPSEL_MASK
) {
6803 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
6806 /* Clear registers if necessary to prevent non-secure exception
6807 * code being able to see register values from secure code.
6808 * Where register values become architecturally UNKNOWN we leave
6809 * them with their previous values.
6811 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6812 if (!targets_secure
) {
6813 /* Always clear the caller-saved registers (they have been
6814 * pushed to the stack earlier in v7m_push_stack()).
6815 * Clear callee-saved registers if the background code is
6816 * Secure (in which case these regs were saved in
6817 * v7m_push_callee_stack()).
6821 for (i
= 0; i
< 13; i
++) {
6822 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
6823 if (i
< 4 || i
> 11 || (lr
& R_V7M_EXCRET_S_MASK
)) {
6828 xpsr_write(env
, 0, XPSR_NZCV
| XPSR_Q
| XPSR_GE
| XPSR_IT
);
6833 if (push_failed
&& !ignore_stackfaults
) {
6834 /* Derived exception on callee-saves register stacking:
6835 * we might now want to take a different exception which
6836 * targets a different security state, so try again from the top.
6838 v7m_exception_taken(cpu
, lr
, true, true);
6842 if (!arm_v7m_load_vector(cpu
, exc
, targets_secure
, &addr
)) {
6843 /* Vector load failed: derived exception */
6844 v7m_exception_taken(cpu
, lr
, true, true);
6848 /* Now we've done everything that might cause a derived exception
6849 * we can go ahead and activate whichever exception we're going to
6850 * take (which might now be the derived exception).
6852 armv7m_nvic_acknowledge_irq(env
->nvic
);
6854 /* Switch to target security state -- must do this before writing SPSEL */
6855 switch_v7m_security_state(env
, targets_secure
);
6856 write_v7m_control_spsel(env
, 0);
6857 arm_clear_exclusive(env
);
6859 env
->condexec_bits
= 0;
6861 env
->regs
[15] = addr
& 0xfffffffe;
6862 env
->thumb
= addr
& 1;
6865 static bool v7m_push_stack(ARMCPU
*cpu
)
6867 /* Do the "set up stack frame" part of exception entry,
6868 * similar to pseudocode PushStack().
6869 * Return true if we generate a derived exception (and so
6870 * should ignore further stack faults trying to process
6871 * that derived exception.)
6874 CPUARMState
*env
= &cpu
->env
;
6875 uint32_t xpsr
= xpsr_read(env
);
6876 uint32_t frameptr
= env
->regs
[13];
6877 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
6879 /* Align stack pointer if the guest wants that */
6880 if ((frameptr
& 4) &&
6881 (env
->v7m
.ccr
[env
->v7m
.secure
] & R_V7M_CCR_STKALIGN_MASK
)) {
6883 xpsr
|= XPSR_SPREALIGN
;
6888 /* Write as much of the stack frame as we can. If we fail a stack
6889 * write this will result in a derived exception being pended
6890 * (which may be taken in preference to the one we started with
6891 * if it has higher priority).
6894 v7m_stack_write(cpu
, frameptr
, env
->regs
[0], mmu_idx
, false) &&
6895 v7m_stack_write(cpu
, frameptr
+ 4, env
->regs
[1], mmu_idx
, false) &&
6896 v7m_stack_write(cpu
, frameptr
+ 8, env
->regs
[2], mmu_idx
, false) &&
6897 v7m_stack_write(cpu
, frameptr
+ 12, env
->regs
[3], mmu_idx
, false) &&
6898 v7m_stack_write(cpu
, frameptr
+ 16, env
->regs
[12], mmu_idx
, false) &&
6899 v7m_stack_write(cpu
, frameptr
+ 20, env
->regs
[14], mmu_idx
, false) &&
6900 v7m_stack_write(cpu
, frameptr
+ 24, env
->regs
[15], mmu_idx
, false) &&
6901 v7m_stack_write(cpu
, frameptr
+ 28, xpsr
, mmu_idx
, false);
6903 /* Update SP regardless of whether any of the stack accesses failed.
6904 * When we implement v8M stack limit checking then this attempt to
6905 * update SP might also fail and result in a derived exception.
6907 env
->regs
[13] = frameptr
;
6912 static void do_v7m_exception_exit(ARMCPU
*cpu
)
6914 CPUARMState
*env
= &cpu
->env
;
6915 CPUState
*cs
= CPU(cpu
);
6918 bool ufault
= false;
6919 bool sfault
= false;
6920 bool return_to_sp_process
;
6921 bool return_to_handler
;
6922 bool rettobase
= false;
6923 bool exc_secure
= false;
6924 bool return_to_secure
;
6926 /* If we're not in Handler mode then jumps to magic exception-exit
6927 * addresses don't have magic behaviour. However for the v8M
6928 * security extensions the magic secure-function-return has to
6929 * work in thread mode too, so to avoid doing an extra check in
6930 * the generated code we allow exception-exit magic to also cause the
6931 * internal exception and bring us here in thread mode. Correct code
6932 * will never try to do this (the following insn fetch will always
6933 * fault) so we the overhead of having taken an unnecessary exception
6936 if (!arm_v7m_is_handler_mode(env
)) {
6940 /* In the spec pseudocode ExceptionReturn() is called directly
6941 * from BXWritePC() and gets the full target PC value including
6942 * bit zero. In QEMU's implementation we treat it as a normal
6943 * jump-to-register (which is then caught later on), and so split
6944 * the target value up between env->regs[15] and env->thumb in
6945 * gen_bx(). Reconstitute it.
6947 excret
= env
->regs
[15];
6952 qemu_log_mask(CPU_LOG_INT
, "Exception return: magic PC %" PRIx32
6953 " previous exception %d\n",
6954 excret
, env
->v7m
.exception
);
6956 if ((excret
& R_V7M_EXCRET_RES1_MASK
) != R_V7M_EXCRET_RES1_MASK
) {
6957 qemu_log_mask(LOG_GUEST_ERROR
, "M profile: zero high bits in exception "
6958 "exit PC value 0x%" PRIx32
" are UNPREDICTABLE\n",
6962 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6963 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
6964 * we pick which FAULTMASK to clear.
6966 if (!env
->v7m
.secure
&&
6967 ((excret
& R_V7M_EXCRET_ES_MASK
) ||
6968 !(excret
& R_V7M_EXCRET_DCRS_MASK
))) {
6970 /* For all other purposes, treat ES as 0 (R_HXSR) */
6971 excret
&= ~R_V7M_EXCRET_ES_MASK
;
6975 if (env
->v7m
.exception
!= ARMV7M_EXCP_NMI
) {
6976 /* Auto-clear FAULTMASK on return from other than NMI.
6977 * If the security extension is implemented then this only
6978 * happens if the raw execution priority is >= 0; the
6979 * value of the ES bit in the exception return value indicates
6980 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
6982 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
6983 exc_secure
= excret
& R_V7M_EXCRET_ES_MASK
;
6984 if (armv7m_nvic_raw_execution_priority(env
->nvic
) >= 0) {
6985 env
->v7m
.faultmask
[exc_secure
] = 0;
6988 env
->v7m
.faultmask
[M_REG_NS
] = 0;
6992 switch (armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
,
6995 /* attempt to exit an exception that isn't active */
6999 /* still an irq active now */
7002 /* we returned to base exception level, no nesting.
7003 * (In the pseudocode this is written using "NestedActivation != 1"
7004 * where we have 'rettobase == false'.)
7009 g_assert_not_reached();
7012 return_to_handler
= !(excret
& R_V7M_EXCRET_MODE_MASK
);
7013 return_to_sp_process
= excret
& R_V7M_EXCRET_SPSEL_MASK
;
7014 return_to_secure
= arm_feature(env
, ARM_FEATURE_M_SECURITY
) &&
7015 (excret
& R_V7M_EXCRET_S_MASK
);
7017 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7018 if (!arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
7019 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
7020 * we choose to take the UsageFault.
7022 if ((excret
& R_V7M_EXCRET_S_MASK
) ||
7023 (excret
& R_V7M_EXCRET_ES_MASK
) ||
7024 !(excret
& R_V7M_EXCRET_DCRS_MASK
)) {
7028 if (excret
& R_V7M_EXCRET_RES0_MASK
) {
7032 /* For v7M we only recognize certain combinations of the low bits */
7033 switch (excret
& 0xf) {
7034 case 1: /* Return to Handler */
7036 case 13: /* Return to Thread using Process stack */
7037 case 9: /* Return to Thread using Main stack */
7038 /* We only need to check NONBASETHRDENA for v7M, because in
7039 * v8M this bit does not exist (it is RES1).
7042 !(env
->v7m
.ccr
[env
->v7m
.secure
] &
7043 R_V7M_CCR_NONBASETHRDENA_MASK
)) {
7053 env
->v7m
.sfsr
|= R_V7M_SFSR_INVER_MASK
;
7054 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7055 v7m_exception_taken(cpu
, excret
, true, false);
7056 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7057 "stackframe: failed EXC_RETURN.ES validity check\n");
7062 /* Bad exception return: instead of popping the exception
7063 * stack, directly take a usage fault on the current stack.
7065 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7066 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7067 v7m_exception_taken(cpu
, excret
, true, false);
7068 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7069 "stackframe: failed exception return integrity check\n");
7073 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
7074 * Handler mode (and will be until we write the new XPSR.Interrupt
7075 * field) this does not switch around the current stack pointer.
7077 write_v7m_control_spsel_for_secstate(env
, return_to_sp_process
, exc_secure
);
7079 switch_v7m_security_state(env
, return_to_secure
);
7082 /* The stack pointer we should be reading the exception frame from
7083 * depends on bits in the magic exception return type value (and
7084 * for v8M isn't necessarily the stack pointer we will eventually
7085 * end up resuming execution with). Get a pointer to the location
7086 * in the CPU state struct where the SP we need is currently being
7087 * stored; we will use and modify it in place.
7088 * We use this limited C variable scope so we don't accidentally
7089 * use 'frame_sp_p' after we do something that makes it invalid.
7091 uint32_t *frame_sp_p
= get_v7m_sp_ptr(env
,
7094 return_to_sp_process
);
7095 uint32_t frameptr
= *frame_sp_p
;
7099 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, return_to_secure
,
7100 !return_to_handler
);
7102 if (!QEMU_IS_ALIGNED(frameptr
, 8) &&
7103 arm_feature(env
, ARM_FEATURE_V8
)) {
7104 qemu_log_mask(LOG_GUEST_ERROR
,
7105 "M profile exception return with non-8-aligned SP "
7106 "for destination state is UNPREDICTABLE\n");
7109 /* Do we need to pop callee-saved registers? */
7110 if (return_to_secure
&&
7111 ((excret
& R_V7M_EXCRET_ES_MASK
) == 0 ||
7112 (excret
& R_V7M_EXCRET_DCRS_MASK
) == 0)) {
7113 uint32_t expected_sig
= 0xfefa125b;
7114 uint32_t actual_sig
= ldl_phys(cs
->as
, frameptr
);
7116 if (expected_sig
!= actual_sig
) {
7117 /* Take a SecureFault on the current stack */
7118 env
->v7m
.sfsr
|= R_V7M_SFSR_INVIS_MASK
;
7119 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7120 v7m_exception_taken(cpu
, excret
, true, false);
7121 qemu_log_mask(CPU_LOG_INT
, "...taking SecureFault on existing "
7122 "stackframe: failed exception return integrity "
7123 "signature check\n");
7128 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7129 v7m_stack_read(cpu
, &env
->regs
[4], frameptr
+ 0x8, mmu_idx
) &&
7130 v7m_stack_read(cpu
, &env
->regs
[5], frameptr
+ 0xc, mmu_idx
) &&
7131 v7m_stack_read(cpu
, &env
->regs
[6], frameptr
+ 0x10, mmu_idx
) &&
7132 v7m_stack_read(cpu
, &env
->regs
[7], frameptr
+ 0x14, mmu_idx
) &&
7133 v7m_stack_read(cpu
, &env
->regs
[8], frameptr
+ 0x18, mmu_idx
) &&
7134 v7m_stack_read(cpu
, &env
->regs
[9], frameptr
+ 0x1c, mmu_idx
) &&
7135 v7m_stack_read(cpu
, &env
->regs
[10], frameptr
+ 0x20, mmu_idx
) &&
7136 v7m_stack_read(cpu
, &env
->regs
[11], frameptr
+ 0x24, mmu_idx
);
7143 v7m_stack_read(cpu
, &env
->regs
[0], frameptr
, mmu_idx
) &&
7144 v7m_stack_read(cpu
, &env
->regs
[1], frameptr
+ 0x4, mmu_idx
) &&
7145 v7m_stack_read(cpu
, &env
->regs
[2], frameptr
+ 0x8, mmu_idx
) &&
7146 v7m_stack_read(cpu
, &env
->regs
[3], frameptr
+ 0xc, mmu_idx
) &&
7147 v7m_stack_read(cpu
, &env
->regs
[12], frameptr
+ 0x10, mmu_idx
) &&
7148 v7m_stack_read(cpu
, &env
->regs
[14], frameptr
+ 0x14, mmu_idx
) &&
7149 v7m_stack_read(cpu
, &env
->regs
[15], frameptr
+ 0x18, mmu_idx
) &&
7150 v7m_stack_read(cpu
, &xpsr
, frameptr
+ 0x1c, mmu_idx
);
7153 /* v7m_stack_read() pended a fault, so take it (as a tail
7154 * chained exception on the same stack frame)
7156 v7m_exception_taken(cpu
, excret
, true, false);
7160 /* Returning from an exception with a PC with bit 0 set is defined
7161 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
7162 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
7163 * the lsbit, and there are several RTOSes out there which incorrectly
7164 * assume the r15 in the stack frame should be a Thumb-style "lsbit
7165 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
7166 * complain about the badly behaved guest.
7168 if (env
->regs
[15] & 1) {
7169 env
->regs
[15] &= ~1U;
7170 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
7171 qemu_log_mask(LOG_GUEST_ERROR
,
7172 "M profile return from interrupt with misaligned "
7173 "PC is UNPREDICTABLE on v7M\n");
7177 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7178 /* For v8M we have to check whether the xPSR exception field
7179 * matches the EXCRET value for return to handler/thread
7180 * before we commit to changing the SP and xPSR.
7182 bool will_be_handler
= (xpsr
& XPSR_EXCP
) != 0;
7183 if (return_to_handler
!= will_be_handler
) {
7184 /* Take an INVPC UsageFault on the current stack.
7185 * By this point we will have switched to the security state
7186 * for the background state, so this UsageFault will target
7189 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7191 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7192 v7m_exception_taken(cpu
, excret
, true, false);
7193 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on existing "
7194 "stackframe: failed exception return integrity "
7200 /* Commit to consuming the stack frame */
7202 /* Undo stack alignment (the SPREALIGN bit indicates that the original
7203 * pre-exception SP was not 8-aligned and we added a padding word to
7204 * align it, so we undo this by ORing in the bit that increases it
7205 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
7206 * would work too but a logical OR is how the pseudocode specifies it.)
7208 if (xpsr
& XPSR_SPREALIGN
) {
7211 *frame_sp_p
= frameptr
;
7213 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
7214 xpsr_write(env
, xpsr
, ~XPSR_SPREALIGN
);
7216 /* The restored xPSR exception field will be zero if we're
7217 * resuming in Thread mode. If that doesn't match what the
7218 * exception return excret specified then this is a UsageFault.
7219 * v7M requires we make this check here; v8M did it earlier.
7221 if (return_to_handler
!= arm_v7m_is_handler_mode(env
)) {
7222 /* Take an INVPC UsageFault by pushing the stack again;
7223 * we know we're v7M so this is never a Secure UsageFault.
7225 bool ignore_stackfaults
;
7227 assert(!arm_feature(env
, ARM_FEATURE_V8
));
7228 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, false);
7229 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7230 ignore_stackfaults
= v7m_push_stack(cpu
);
7231 v7m_exception_taken(cpu
, excret
, false, ignore_stackfaults
);
7232 qemu_log_mask(CPU_LOG_INT
, "...taking UsageFault on new stackframe: "
7233 "failed exception return integrity check\n");
7237 /* Otherwise, we have a successful exception exit. */
7238 arm_clear_exclusive(env
);
7239 qemu_log_mask(CPU_LOG_INT
, "...successful exception return\n");
7242 static bool do_v7m_function_return(ARMCPU
*cpu
)
7244 /* v8M security extensions magic function return.
7246 * (1) throw an exception (longjump)
7247 * (2) return true if we successfully handled the function return
7248 * (3) return false if we failed a consistency check and have
7249 * pended a UsageFault that needs to be taken now
7251 * At this point the magic return value is split between env->regs[15]
7252 * and env->thumb. We don't bother to reconstitute it because we don't
7253 * need it (all values are handled the same way).
7255 CPUARMState
*env
= &cpu
->env
;
7256 uint32_t newpc
, newpsr
, newpsr_exc
;
7258 qemu_log_mask(CPU_LOG_INT
, "...really v7M secure function return\n");
7261 bool threadmode
, spsel
;
7264 uint32_t *frame_sp_p
;
7267 /* Pull the return address and IPSR from the Secure stack */
7268 threadmode
= !arm_v7m_is_handler_mode(env
);
7269 spsel
= env
->v7m
.control
[M_REG_S
] & R_V7M_CONTROL_SPSEL_MASK
;
7271 frame_sp_p
= get_v7m_sp_ptr(env
, true, threadmode
, spsel
);
7272 frameptr
= *frame_sp_p
;
7274 /* These loads may throw an exception (for MPU faults). We want to
7275 * do them as secure, so work out what MMU index that is.
7277 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7278 oi
= make_memop_idx(MO_LE
, arm_to_core_mmu_idx(mmu_idx
));
7279 newpc
= helper_le_ldul_mmu(env
, frameptr
, oi
, 0);
7280 newpsr
= helper_le_ldul_mmu(env
, frameptr
+ 4, oi
, 0);
7282 /* Consistency checks on new IPSR */
7283 newpsr_exc
= newpsr
& XPSR_EXCP
;
7284 if (!((env
->v7m
.exception
== 0 && newpsr_exc
== 0) ||
7285 (env
->v7m
.exception
== 1 && newpsr_exc
!= 0))) {
7286 /* Pend the fault and tell our caller to take it */
7287 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVPC_MASK
;
7288 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
,
7290 qemu_log_mask(CPU_LOG_INT
,
7291 "...taking INVPC UsageFault: "
7292 "IPSR consistency check failed\n");
7296 *frame_sp_p
= frameptr
+ 8;
7299 /* This invalidates frame_sp_p */
7300 switch_v7m_security_state(env
, true);
7301 env
->v7m
.exception
= newpsr_exc
;
7302 env
->v7m
.control
[M_REG_S
] &= ~R_V7M_CONTROL_SFPA_MASK
;
7303 if (newpsr
& XPSR_SFPA
) {
7304 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_SFPA_MASK
;
7306 xpsr_write(env
, 0, XPSR_IT
);
7307 env
->thumb
= newpc
& 1;
7308 env
->regs
[15] = newpc
& ~1;
7310 qemu_log_mask(CPU_LOG_INT
, "...function return successful\n");
7314 static void arm_log_exception(int idx
)
7316 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
7317 const char *exc
= NULL
;
7318 static const char * const excnames
[] = {
7319 [EXCP_UDEF
] = "Undefined Instruction",
7321 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
7322 [EXCP_DATA_ABORT
] = "Data Abort",
7325 [EXCP_BKPT
] = "Breakpoint",
7326 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
7327 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
7328 [EXCP_HVC
] = "Hypervisor Call",
7329 [EXCP_HYP_TRAP
] = "Hypervisor Trap",
7330 [EXCP_SMC
] = "Secure Monitor Call",
7331 [EXCP_VIRQ
] = "Virtual IRQ",
7332 [EXCP_VFIQ
] = "Virtual FIQ",
7333 [EXCP_SEMIHOST
] = "Semihosting call",
7334 [EXCP_NOCP
] = "v7M NOCP UsageFault",
7335 [EXCP_INVSTATE
] = "v7M INVSTATE UsageFault",
7338 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
7339 exc
= excnames
[idx
];
7344 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
7348 static bool v7m_read_half_insn(ARMCPU
*cpu
, ARMMMUIdx mmu_idx
,
7349 uint32_t addr
, uint16_t *insn
)
7351 /* Load a 16-bit portion of a v7M instruction, returning true on success,
7352 * or false on failure (in which case we will have pended the appropriate
7354 * We need to do the instruction fetch's MPU and SAU checks
7355 * like this because there is no MMU index that would allow
7356 * doing the load with a single function call. Instead we must
7357 * first check that the security attributes permit the load
7358 * and that they don't mismatch on the two halves of the instruction,
7359 * and then we do the load as a secure load (ie using the security
7360 * attributes of the address, not the CPU, as architecturally required).
7362 CPUState
*cs
= CPU(cpu
);
7363 CPUARMState
*env
= &cpu
->env
;
7364 V8M_SAttributes sattrs
= {};
7365 MemTxAttrs attrs
= {};
7366 ARMMMUFaultInfo fi
= {};
7368 target_ulong page_size
;
7372 v8m_security_lookup(env
, addr
, MMU_INST_FETCH
, mmu_idx
, &sattrs
);
7373 if (!sattrs
.nsc
|| sattrs
.ns
) {
7374 /* This must be the second half of the insn, and it straddles a
7375 * region boundary with the second half not being S&NSC.
7377 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7378 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7379 qemu_log_mask(CPU_LOG_INT
,
7380 "...really SecureFault with SFSR.INVEP\n");
7383 if (get_phys_addr(env
, addr
, MMU_INST_FETCH
, mmu_idx
,
7384 &physaddr
, &attrs
, &prot
, &page_size
, &fi
, NULL
)) {
7385 /* the MPU lookup failed */
7386 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7387 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
, env
->v7m
.secure
);
7388 qemu_log_mask(CPU_LOG_INT
, "...really MemManage with CFSR.IACCVIOL\n");
7391 *insn
= address_space_lduw_le(arm_addressspace(cs
, attrs
), physaddr
,
7393 if (txres
!= MEMTX_OK
) {
7394 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7395 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7396 qemu_log_mask(CPU_LOG_INT
, "...really BusFault with CFSR.IBUSERR\n");
7402 static bool v7m_handle_execute_nsc(ARMCPU
*cpu
)
7404 /* Check whether this attempt to execute code in a Secure & NS-Callable
7405 * memory region is for an SG instruction; if so, then emulate the
7406 * effect of the SG instruction and return true. Otherwise pend
7407 * the correct kind of exception and return false.
7409 CPUARMState
*env
= &cpu
->env
;
7413 /* We should never get here unless get_phys_addr_pmsav8() caused
7414 * an exception for NS executing in S&NSC memory.
7416 assert(!env
->v7m
.secure
);
7417 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7419 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7420 mmu_idx
= arm_v7m_mmu_idx_for_secstate(env
, true);
7422 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15], &insn
)) {
7430 if (insn
!= 0xe97f) {
7431 /* Not an SG instruction first half (we choose the IMPDEF
7432 * early-SG-check option).
7437 if (!v7m_read_half_insn(cpu
, mmu_idx
, env
->regs
[15] + 2, &insn
)) {
7441 if (insn
!= 0xe97f) {
7442 /* Not an SG instruction second half (yes, both halves of the SG
7443 * insn have the same hex value)
7448 /* OK, we have confirmed that we really have an SG instruction.
7449 * We know we're NS in S memory so don't need to repeat those checks.
7451 qemu_log_mask(CPU_LOG_INT
, "...really an SG instruction at 0x%08" PRIx32
7452 ", executing it\n", env
->regs
[15]);
7453 env
->regs
[14] &= ~1;
7454 switch_v7m_security_state(env
, true);
7455 xpsr_write(env
, 0, XPSR_IT
);
7460 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7461 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7462 qemu_log_mask(CPU_LOG_INT
,
7463 "...really SecureFault with SFSR.INVEP\n");
7467 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
7469 ARMCPU
*cpu
= ARM_CPU(cs
);
7470 CPUARMState
*env
= &cpu
->env
;
7472 bool ignore_stackfaults
;
7474 arm_log_exception(cs
->exception_index
);
7476 /* For exceptions we just mark as pending on the NVIC, and let that
7478 switch (cs
->exception_index
) {
7480 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7481 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_UNDEFINSTR_MASK
;
7484 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7485 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_NOCP_MASK
;
7488 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
, env
->v7m
.secure
);
7489 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_INVSTATE_MASK
;
7492 /* The PC already points to the next instruction. */
7493 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
, env
->v7m
.secure
);
7495 case EXCP_PREFETCH_ABORT
:
7496 case EXCP_DATA_ABORT
:
7497 /* Note that for M profile we don't have a guest facing FSR, but
7498 * the env->exception.fsr will be populated by the code that
7499 * raises the fault, in the A profile short-descriptor format.
7501 switch (env
->exception
.fsr
& 0xf) {
7502 case M_FAKE_FSR_NSC_EXEC
:
7503 /* Exception generated when we try to execute code at an address
7504 * which is marked as Secure & Non-Secure Callable and the CPU
7505 * is in the Non-Secure state. The only instruction which can
7506 * be executed like this is SG (and that only if both halves of
7507 * the SG instruction have the same security attributes.)
7508 * Everything else must generate an INVEP SecureFault, so we
7509 * emulate the SG instruction here.
7511 if (v7m_handle_execute_nsc(cpu
)) {
7515 case M_FAKE_FSR_SFAULT
:
7516 /* Various flavours of SecureFault for attempts to execute or
7517 * access data in the wrong security state.
7519 switch (cs
->exception_index
) {
7520 case EXCP_PREFETCH_ABORT
:
7521 if (env
->v7m
.secure
) {
7522 env
->v7m
.sfsr
|= R_V7M_SFSR_INVTRAN_MASK
;
7523 qemu_log_mask(CPU_LOG_INT
,
7524 "...really SecureFault with SFSR.INVTRAN\n");
7526 env
->v7m
.sfsr
|= R_V7M_SFSR_INVEP_MASK
;
7527 qemu_log_mask(CPU_LOG_INT
,
7528 "...really SecureFault with SFSR.INVEP\n");
7531 case EXCP_DATA_ABORT
:
7532 /* This must be an NS access to S memory */
7533 env
->v7m
.sfsr
|= R_V7M_SFSR_AUVIOL_MASK
;
7534 qemu_log_mask(CPU_LOG_INT
,
7535 "...really SecureFault with SFSR.AUVIOL\n");
7538 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SECURE
, false);
7540 case 0x8: /* External Abort */
7541 switch (cs
->exception_index
) {
7542 case EXCP_PREFETCH_ABORT
:
7543 env
->v7m
.cfsr
[M_REG_NS
] |= R_V7M_CFSR_IBUSERR_MASK
;
7544 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IBUSERR\n");
7546 case EXCP_DATA_ABORT
:
7547 env
->v7m
.cfsr
[M_REG_NS
] |=
7548 (R_V7M_CFSR_PRECISERR_MASK
| R_V7M_CFSR_BFARVALID_MASK
);
7549 env
->v7m
.bfar
= env
->exception
.vaddress
;
7550 qemu_log_mask(CPU_LOG_INT
,
7551 "...with CFSR.PRECISERR and BFAR 0x%x\n",
7555 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_BUS
, false);
7558 /* All other FSR values are either MPU faults or "can't happen
7559 * for M profile" cases.
7561 switch (cs
->exception_index
) {
7562 case EXCP_PREFETCH_ABORT
:
7563 env
->v7m
.cfsr
[env
->v7m
.secure
] |= R_V7M_CFSR_IACCVIOL_MASK
;
7564 qemu_log_mask(CPU_LOG_INT
, "...with CFSR.IACCVIOL\n");
7566 case EXCP_DATA_ABORT
:
7567 env
->v7m
.cfsr
[env
->v7m
.secure
] |=
7568 (R_V7M_CFSR_DACCVIOL_MASK
| R_V7M_CFSR_MMARVALID_MASK
);
7569 env
->v7m
.mmfar
[env
->v7m
.secure
] = env
->exception
.vaddress
;
7570 qemu_log_mask(CPU_LOG_INT
,
7571 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7572 env
->v7m
.mmfar
[env
->v7m
.secure
]);
7575 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
,
7581 if (semihosting_enabled()) {
7583 nr
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
)) & 0xff;
7586 qemu_log_mask(CPU_LOG_INT
,
7587 "...handling as semihosting call 0x%x\n",
7589 env
->regs
[0] = do_arm_semihosting(env
);
7593 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
, false);
7597 case EXCP_EXCEPTION_EXIT
:
7598 if (env
->regs
[15] < EXC_RETURN_MIN_MAGIC
) {
7599 /* Must be v8M security extension function return */
7600 assert(env
->regs
[15] >= FNC_RETURN_MIN_MAGIC
);
7601 assert(arm_feature(env
, ARM_FEATURE_M_SECURITY
));
7602 if (do_v7m_function_return(cpu
)) {
7606 do_v7m_exception_exit(cpu
);
7611 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7612 return; /* Never happens. Keep compiler happy. */
7615 if (arm_feature(env
, ARM_FEATURE_V8
)) {
7616 lr
= R_V7M_EXCRET_RES1_MASK
|
7617 R_V7M_EXCRET_DCRS_MASK
|
7618 R_V7M_EXCRET_FTYPE_MASK
;
7619 /* The S bit indicates whether we should return to Secure
7620 * or NonSecure (ie our current state).
7621 * The ES bit indicates whether we're taking this exception
7622 * to Secure or NonSecure (ie our target state). We set it
7623 * later, in v7m_exception_taken().
7624 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
7625 * This corresponds to the ARM ARM pseudocode for v8M setting
7626 * some LR bits in PushStack() and some in ExceptionTaken();
7627 * the distinction matters for the tailchain cases where we
7628 * can take an exception without pushing the stack.
7630 if (env
->v7m
.secure
) {
7631 lr
|= R_V7M_EXCRET_S_MASK
;
7634 lr
= R_V7M_EXCRET_RES1_MASK
|
7635 R_V7M_EXCRET_S_MASK
|
7636 R_V7M_EXCRET_DCRS_MASK
|
7637 R_V7M_EXCRET_FTYPE_MASK
|
7638 R_V7M_EXCRET_ES_MASK
;
7639 if (env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
) {
7640 lr
|= R_V7M_EXCRET_SPSEL_MASK
;
7643 if (!arm_v7m_is_handler_mode(env
)) {
7644 lr
|= R_V7M_EXCRET_MODE_MASK
;
7647 ignore_stackfaults
= v7m_push_stack(cpu
);
7648 v7m_exception_taken(cpu
, lr
, false, ignore_stackfaults
);
7649 qemu_log_mask(CPU_LOG_INT
, "... as %d\n", env
->v7m
.exception
);
7652 /* Function used to synchronize QEMU's AArch64 register set with AArch32
7653 * register set. This is necessary when switching between AArch32 and AArch64
7656 void aarch64_sync_32_to_64(CPUARMState
*env
)
7659 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7661 /* We can blanket copy R[0:7] to X[0:7] */
7662 for (i
= 0; i
< 8; i
++) {
7663 env
->xregs
[i
] = env
->regs
[i
];
7666 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7667 * Otherwise, they come from the banked user regs.
7669 if (mode
== ARM_CPU_MODE_FIQ
) {
7670 for (i
= 8; i
< 13; i
++) {
7671 env
->xregs
[i
] = env
->usr_regs
[i
- 8];
7674 for (i
= 8; i
< 13; i
++) {
7675 env
->xregs
[i
] = env
->regs
[i
];
7679 /* Registers x13-x23 are the various mode SP and FP registers. Registers
7680 * r13 and r14 are only copied if we are in that mode, otherwise we copy
7681 * from the mode banked register.
7683 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7684 env
->xregs
[13] = env
->regs
[13];
7685 env
->xregs
[14] = env
->regs
[14];
7687 env
->xregs
[13] = env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)];
7688 /* HYP is an exception in that it is copied from r14 */
7689 if (mode
== ARM_CPU_MODE_HYP
) {
7690 env
->xregs
[14] = env
->regs
[14];
7692 env
->xregs
[14] = env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)];
7696 if (mode
== ARM_CPU_MODE_HYP
) {
7697 env
->xregs
[15] = env
->regs
[13];
7699 env
->xregs
[15] = env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)];
7702 if (mode
== ARM_CPU_MODE_IRQ
) {
7703 env
->xregs
[16] = env
->regs
[14];
7704 env
->xregs
[17] = env
->regs
[13];
7706 env
->xregs
[16] = env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)];
7707 env
->xregs
[17] = env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)];
7710 if (mode
== ARM_CPU_MODE_SVC
) {
7711 env
->xregs
[18] = env
->regs
[14];
7712 env
->xregs
[19] = env
->regs
[13];
7714 env
->xregs
[18] = env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)];
7715 env
->xregs
[19] = env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)];
7718 if (mode
== ARM_CPU_MODE_ABT
) {
7719 env
->xregs
[20] = env
->regs
[14];
7720 env
->xregs
[21] = env
->regs
[13];
7722 env
->xregs
[20] = env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)];
7723 env
->xregs
[21] = env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)];
7726 if (mode
== ARM_CPU_MODE_UND
) {
7727 env
->xregs
[22] = env
->regs
[14];
7728 env
->xregs
[23] = env
->regs
[13];
7730 env
->xregs
[22] = env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)];
7731 env
->xregs
[23] = env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)];
7734 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7735 * mode, then we can copy from r8-r14. Otherwise, we copy from the
7736 * FIQ bank for r8-r14.
7738 if (mode
== ARM_CPU_MODE_FIQ
) {
7739 for (i
= 24; i
< 31; i
++) {
7740 env
->xregs
[i
] = env
->regs
[i
- 16]; /* X[24:30] <- R[8:14] */
7743 for (i
= 24; i
< 29; i
++) {
7744 env
->xregs
[i
] = env
->fiq_regs
[i
- 24];
7746 env
->xregs
[29] = env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)];
7747 env
->xregs
[30] = env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)];
7750 env
->pc
= env
->regs
[15];
7753 /* Function used to synchronize QEMU's AArch32 register set with AArch64
7754 * register set. This is necessary when switching between AArch32 and AArch64
7757 void aarch64_sync_64_to_32(CPUARMState
*env
)
7760 uint32_t mode
= env
->uncached_cpsr
& CPSR_M
;
7762 /* We can blanket copy X[0:7] to R[0:7] */
7763 for (i
= 0; i
< 8; i
++) {
7764 env
->regs
[i
] = env
->xregs
[i
];
7767 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7768 * Otherwise, we copy x8-x12 into the banked user regs.
7770 if (mode
== ARM_CPU_MODE_FIQ
) {
7771 for (i
= 8; i
< 13; i
++) {
7772 env
->usr_regs
[i
- 8] = env
->xregs
[i
];
7775 for (i
= 8; i
< 13; i
++) {
7776 env
->regs
[i
] = env
->xregs
[i
];
7780 /* Registers r13 & r14 depend on the current mode.
7781 * If we are in a given mode, we copy the corresponding x registers to r13
7782 * and r14. Otherwise, we copy the x register to the banked r13 and r14
7785 if (mode
== ARM_CPU_MODE_USR
|| mode
== ARM_CPU_MODE_SYS
) {
7786 env
->regs
[13] = env
->xregs
[13];
7787 env
->regs
[14] = env
->xregs
[14];
7789 env
->banked_r13
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[13];
7791 /* HYP is an exception in that it does not have its own banked r14 but
7792 * shares the USR r14
7794 if (mode
== ARM_CPU_MODE_HYP
) {
7795 env
->regs
[14] = env
->xregs
[14];
7797 env
->banked_r14
[bank_number(ARM_CPU_MODE_USR
)] = env
->xregs
[14];
7801 if (mode
== ARM_CPU_MODE_HYP
) {
7802 env
->regs
[13] = env
->xregs
[15];
7804 env
->banked_r13
[bank_number(ARM_CPU_MODE_HYP
)] = env
->xregs
[15];
7807 if (mode
== ARM_CPU_MODE_IRQ
) {
7808 env
->regs
[14] = env
->xregs
[16];
7809 env
->regs
[13] = env
->xregs
[17];
7811 env
->banked_r14
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[16];
7812 env
->banked_r13
[bank_number(ARM_CPU_MODE_IRQ
)] = env
->xregs
[17];
7815 if (mode
== ARM_CPU_MODE_SVC
) {
7816 env
->regs
[14] = env
->xregs
[18];
7817 env
->regs
[13] = env
->xregs
[19];
7819 env
->banked_r14
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[18];
7820 env
->banked_r13
[bank_number(ARM_CPU_MODE_SVC
)] = env
->xregs
[19];
7823 if (mode
== ARM_CPU_MODE_ABT
) {
7824 env
->regs
[14] = env
->xregs
[20];
7825 env
->regs
[13] = env
->xregs
[21];
7827 env
->banked_r14
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[20];
7828 env
->banked_r13
[bank_number(ARM_CPU_MODE_ABT
)] = env
->xregs
[21];
7831 if (mode
== ARM_CPU_MODE_UND
) {
7832 env
->regs
[14] = env
->xregs
[22];
7833 env
->regs
[13] = env
->xregs
[23];
7835 env
->banked_r14
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[22];
7836 env
->banked_r13
[bank_number(ARM_CPU_MODE_UND
)] = env
->xregs
[23];
7839 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
7840 * mode, then we can copy to r8-r14. Otherwise, we copy to the
7841 * FIQ bank for r8-r14.
7843 if (mode
== ARM_CPU_MODE_FIQ
) {
7844 for (i
= 24; i
< 31; i
++) {
7845 env
->regs
[i
- 16] = env
->xregs
[i
]; /* X[24:30] -> R[8:14] */
7848 for (i
= 24; i
< 29; i
++) {
7849 env
->fiq_regs
[i
- 24] = env
->xregs
[i
];
7851 env
->banked_r13
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[29];
7852 env
->banked_r14
[bank_number(ARM_CPU_MODE_FIQ
)] = env
->xregs
[30];
7855 env
->regs
[15] = env
->pc
;
7858 static void arm_cpu_do_interrupt_aarch32(CPUState
*cs
)
7860 ARMCPU
*cpu
= ARM_CPU(cs
);
7861 CPUARMState
*env
= &cpu
->env
;
7868 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
7869 switch (env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
) {
7871 case EC_BREAKPOINT_SAME_EL
:
7875 case EC_WATCHPOINT_SAME_EL
:
7881 case EC_VECTORCATCH
:
7890 env
->cp15
.mdscr_el1
= deposit64(env
->cp15
.mdscr_el1
, 2, 4, moe
);
7893 /* TODO: Vectored interrupt controller. */
7894 switch (cs
->exception_index
) {
7896 new_mode
= ARM_CPU_MODE_UND
;
7905 new_mode
= ARM_CPU_MODE_SVC
;
7908 /* The PC already points to the next instruction. */
7912 env
->exception
.fsr
= 2;
7913 /* Fall through to prefetch abort. */
7914 case EXCP_PREFETCH_ABORT
:
7915 A32_BANKED_CURRENT_REG_SET(env
, ifsr
, env
->exception
.fsr
);
7916 A32_BANKED_CURRENT_REG_SET(env
, ifar
, env
->exception
.vaddress
);
7917 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
7918 env
->exception
.fsr
, (uint32_t)env
->exception
.vaddress
);
7919 new_mode
= ARM_CPU_MODE_ABT
;
7921 mask
= CPSR_A
| CPSR_I
;
7924 case EXCP_DATA_ABORT
:
7925 A32_BANKED_CURRENT_REG_SET(env
, dfsr
, env
->exception
.fsr
);
7926 A32_BANKED_CURRENT_REG_SET(env
, dfar
, env
->exception
.vaddress
);
7927 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
7929 (uint32_t)env
->exception
.vaddress
);
7930 new_mode
= ARM_CPU_MODE_ABT
;
7932 mask
= CPSR_A
| CPSR_I
;
7936 new_mode
= ARM_CPU_MODE_IRQ
;
7938 /* Disable IRQ and imprecise data aborts. */
7939 mask
= CPSR_A
| CPSR_I
;
7941 if (env
->cp15
.scr_el3
& SCR_IRQ
) {
7942 /* IRQ routed to monitor mode */
7943 new_mode
= ARM_CPU_MODE_MON
;
7948 new_mode
= ARM_CPU_MODE_FIQ
;
7950 /* Disable FIQ, IRQ and imprecise data aborts. */
7951 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7952 if (env
->cp15
.scr_el3
& SCR_FIQ
) {
7953 /* FIQ routed to monitor mode */
7954 new_mode
= ARM_CPU_MODE_MON
;
7959 new_mode
= ARM_CPU_MODE_IRQ
;
7961 /* Disable IRQ and imprecise data aborts. */
7962 mask
= CPSR_A
| CPSR_I
;
7966 new_mode
= ARM_CPU_MODE_FIQ
;
7968 /* Disable FIQ, IRQ and imprecise data aborts. */
7969 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7973 new_mode
= ARM_CPU_MODE_MON
;
7975 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
7979 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
7980 return; /* Never happens. Keep compiler happy. */
7983 if (new_mode
== ARM_CPU_MODE_MON
) {
7984 addr
+= env
->cp15
.mvbar
;
7985 } else if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
7986 /* High vectors. When enabled, base address cannot be remapped. */
7989 /* ARM v7 architectures provide a vector base address register to remap
7990 * the interrupt vector table.
7991 * This register is only followed in non-monitor mode, and is banked.
7992 * Note: only bits 31:5 are valid.
7994 addr
+= A32_BANKED_CURRENT_REG_GET(env
, vbar
);
7997 if ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_MON
) {
7998 env
->cp15
.scr_el3
&= ~SCR_NS
;
8001 switch_mode (env
, new_mode
);
8002 /* For exceptions taken to AArch32 we must clear the SS bit in both
8003 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8005 env
->uncached_cpsr
&= ~PSTATE_SS
;
8006 env
->spsr
= cpsr_read(env
);
8007 /* Clear IT bits. */
8008 env
->condexec_bits
= 0;
8009 /* Switch to the new mode, and to the correct instruction set. */
8010 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
8011 /* Set new mode endianness */
8012 env
->uncached_cpsr
&= ~CPSR_E
;
8013 if (env
->cp15
.sctlr_el
[arm_current_el(env
)] & SCTLR_EE
) {
8014 env
->uncached_cpsr
|= CPSR_E
;
8017 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
8018 * and we should just guard the thumb mode on V4 */
8019 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
8020 env
->thumb
= (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_TE
) != 0;
8022 env
->regs
[14] = env
->regs
[15] + offset
;
8023 env
->regs
[15] = addr
;
8026 /* Handle exception entry to a target EL which is using AArch64 */
8027 static void arm_cpu_do_interrupt_aarch64(CPUState
*cs
)
8029 ARMCPU
*cpu
= ARM_CPU(cs
);
8030 CPUARMState
*env
= &cpu
->env
;
8031 unsigned int new_el
= env
->exception
.target_el
;
8032 target_ulong addr
= env
->cp15
.vbar_el
[new_el
];
8033 unsigned int new_mode
= aarch64_pstate_mode(new_el
, true);
8035 if (arm_current_el(env
) < new_el
) {
8036 /* Entry vector offset depends on whether the implemented EL
8037 * immediately lower than the target level is using AArch32 or AArch64
8043 is_aa64
= (env
->cp15
.scr_el3
& SCR_RW
) != 0;
8046 is_aa64
= (env
->cp15
.hcr_el2
& HCR_RW
) != 0;
8049 is_aa64
= is_a64(env
);
8052 g_assert_not_reached();
8060 } else if (pstate_read(env
) & PSTATE_SP
) {
8064 switch (cs
->exception_index
) {
8065 case EXCP_PREFETCH_ABORT
:
8066 case EXCP_DATA_ABORT
:
8067 env
->cp15
.far_el
[new_el
] = env
->exception
.vaddress
;
8068 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
8069 env
->cp15
.far_el
[new_el
]);
8077 env
->cp15
.esr_el
[new_el
] = env
->exception
.syndrome
;
8088 qemu_log_mask(CPU_LOG_INT
,
8089 "...handling as semihosting call 0x%" PRIx64
"\n",
8091 env
->xregs
[0] = do_arm_semihosting(env
);
8094 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
8098 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = pstate_read(env
);
8099 aarch64_save_sp(env
, arm_current_el(env
));
8100 env
->elr_el
[new_el
] = env
->pc
;
8102 env
->banked_spsr
[aarch64_banked_spsr_index(new_el
)] = cpsr_read(env
);
8103 env
->elr_el
[new_el
] = env
->regs
[15];
8105 aarch64_sync_32_to_64(env
);
8107 env
->condexec_bits
= 0;
8109 qemu_log_mask(CPU_LOG_INT
, "...with ELR 0x%" PRIx64
"\n",
8110 env
->elr_el
[new_el
]);
8112 pstate_write(env
, PSTATE_DAIF
| new_mode
);
8114 aarch64_restore_sp(env
, new_el
);
8118 qemu_log_mask(CPU_LOG_INT
, "...to EL%d PC 0x%" PRIx64
" PSTATE 0x%x\n",
8119 new_el
, env
->pc
, pstate_read(env
));
8122 static inline bool check_for_semihosting(CPUState
*cs
)
8124 /* Check whether this exception is a semihosting call; if so
8125 * then handle it and return true; otherwise return false.
8127 ARMCPU
*cpu
= ARM_CPU(cs
);
8128 CPUARMState
*env
= &cpu
->env
;
8131 if (cs
->exception_index
== EXCP_SEMIHOST
) {
8132 /* This is always the 64-bit semihosting exception.
8133 * The "is this usermode" and "is semihosting enabled"
8134 * checks have been done at translate time.
8136 qemu_log_mask(CPU_LOG_INT
,
8137 "...handling as semihosting call 0x%" PRIx64
"\n",
8139 env
->xregs
[0] = do_arm_semihosting(env
);
8146 /* Only intercept calls from privileged modes, to provide some
8147 * semblance of security.
8149 if (cs
->exception_index
!= EXCP_SEMIHOST
&&
8150 (!semihosting_enabled() ||
8151 ((env
->uncached_cpsr
& CPSR_M
) == ARM_CPU_MODE_USR
))) {
8155 switch (cs
->exception_index
) {
8157 /* This is always a semihosting call; the "is this usermode"
8158 * and "is semihosting enabled" checks have been done at
8163 /* Check for semihosting interrupt. */
8165 imm
= arm_lduw_code(env
, env
->regs
[15] - 2, arm_sctlr_b(env
))
8171 imm
= arm_ldl_code(env
, env
->regs
[15] - 4, arm_sctlr_b(env
))
8173 if (imm
== 0x123456) {
8179 /* See if this is a semihosting syscall. */
8181 imm
= arm_lduw_code(env
, env
->regs
[15], arm_sctlr_b(env
))
8193 qemu_log_mask(CPU_LOG_INT
,
8194 "...handling as semihosting call 0x%x\n",
8196 env
->regs
[0] = do_arm_semihosting(env
);
8201 /* Handle a CPU exception for A and R profile CPUs.
8202 * Do any appropriate logging, handle PSCI calls, and then hand off
8203 * to the AArch64-entry or AArch32-entry function depending on the
8204 * target exception level's register width.
8206 void arm_cpu_do_interrupt(CPUState
*cs
)
8208 ARMCPU
*cpu
= ARM_CPU(cs
);
8209 CPUARMState
*env
= &cpu
->env
;
8210 unsigned int new_el
= env
->exception
.target_el
;
8212 assert(!arm_feature(env
, ARM_FEATURE_M
));
8214 arm_log_exception(cs
->exception_index
);
8215 qemu_log_mask(CPU_LOG_INT
, "...from EL%d to EL%d\n", arm_current_el(env
),
8217 if (qemu_loglevel_mask(CPU_LOG_INT
)
8218 && !excp_is_internal(cs
->exception_index
)) {
8219 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%x/0x%" PRIx32
"\n",
8220 env
->exception
.syndrome
>> ARM_EL_EC_SHIFT
,
8221 env
->exception
.syndrome
);
8224 if (arm_is_psci_call(cpu
, cs
->exception_index
)) {
8225 arm_handle_psci_call(cpu
);
8226 qemu_log_mask(CPU_LOG_INT
, "...handled as PSCI call\n");
8230 /* Semihosting semantics depend on the register width of the
8231 * code that caused the exception, not the target exception level,
8232 * so must be handled here.
8234 if (check_for_semihosting(cs
)) {
8238 assert(!excp_is_internal(cs
->exception_index
));
8239 if (arm_el_is_aa64(env
, new_el
)) {
8240 arm_cpu_do_interrupt_aarch64(cs
);
8242 arm_cpu_do_interrupt_aarch32(cs
);
8245 /* Hooks may change global state so BQL should be held, also the
8246 * BQL needs to be held for any modification of
8247 * cs->interrupt_request.
8249 g_assert(qemu_mutex_iothread_locked());
8251 arm_call_el_change_hook(cpu
);
8253 if (!kvm_enabled()) {
8254 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
8258 /* Return the exception level which controls this address translation regime */
8259 static inline uint32_t regime_el(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8262 case ARMMMUIdx_S2NS
:
8263 case ARMMMUIdx_S1E2
:
8265 case ARMMMUIdx_S1E3
:
8267 case ARMMMUIdx_S1SE0
:
8268 return arm_el_is_aa64(env
, 3) ? 1 : 3;
8269 case ARMMMUIdx_S1SE1
:
8270 case ARMMMUIdx_S1NSE0
:
8271 case ARMMMUIdx_S1NSE1
:
8272 case ARMMMUIdx_MPrivNegPri
:
8273 case ARMMMUIdx_MUserNegPri
:
8274 case ARMMMUIdx_MPriv
:
8275 case ARMMMUIdx_MUser
:
8276 case ARMMMUIdx_MSPrivNegPri
:
8277 case ARMMMUIdx_MSUserNegPri
:
8278 case ARMMMUIdx_MSPriv
:
8279 case ARMMMUIdx_MSUser
:
8282 g_assert_not_reached();
8286 /* Return the SCTLR value which controls this address translation regime */
8287 static inline uint32_t regime_sctlr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8289 return env
->cp15
.sctlr_el
[regime_el(env
, mmu_idx
)];
8292 /* Return true if the specified stage of address translation is disabled */
8293 static inline bool regime_translation_disabled(CPUARMState
*env
,
8296 if (arm_feature(env
, ARM_FEATURE_M
)) {
8297 switch (env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)] &
8298 (R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
)) {
8299 case R_V7M_MPU_CTRL_ENABLE_MASK
:
8300 /* Enabled, but not for HardFault and NMI */
8301 return mmu_idx
& ARM_MMU_IDX_M_NEGPRI
;
8302 case R_V7M_MPU_CTRL_ENABLE_MASK
| R_V7M_MPU_CTRL_HFNMIENA_MASK
:
8303 /* Enabled for all cases */
8307 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8308 * we warned about that in armv7m_nvic.c when the guest set it.
8314 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8315 return (env
->cp15
.hcr_el2
& HCR_VM
) == 0;
8317 return (regime_sctlr(env
, mmu_idx
) & SCTLR_M
) == 0;
8320 static inline bool regime_translation_big_endian(CPUARMState
*env
,
8323 return (regime_sctlr(env
, mmu_idx
) & SCTLR_EE
) != 0;
8326 /* Return the TCR controlling this translation regime */
8327 static inline TCR
*regime_tcr(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8329 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8330 return &env
->cp15
.vtcr_el2
;
8332 return &env
->cp15
.tcr_el
[regime_el(env
, mmu_idx
)];
8335 /* Convert a possible stage1+2 MMU index into the appropriate
8338 static inline ARMMMUIdx
stage_1_mmu_idx(ARMMMUIdx mmu_idx
)
8340 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
8341 mmu_idx
+= (ARMMMUIdx_S1NSE0
- ARMMMUIdx_S12NSE0
);
8346 /* Returns TBI0 value for current regime el */
8347 uint32_t arm_regime_tbi0(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8352 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8353 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8355 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8357 tcr
= regime_tcr(env
, mmu_idx
);
8358 el
= regime_el(env
, mmu_idx
);
8361 return extract64(tcr
->raw_tcr
, 20, 1);
8363 return extract64(tcr
->raw_tcr
, 37, 1);
8367 /* Returns TBI1 value for current regime el */
8368 uint32_t arm_regime_tbi1(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8373 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8374 * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8376 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8378 tcr
= regime_tcr(env
, mmu_idx
);
8379 el
= regime_el(env
, mmu_idx
);
8384 return extract64(tcr
->raw_tcr
, 38, 1);
8388 /* Return the TTBR associated with this translation regime */
8389 static inline uint64_t regime_ttbr(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8392 if (mmu_idx
== ARMMMUIdx_S2NS
) {
8393 return env
->cp15
.vttbr_el2
;
8396 return env
->cp15
.ttbr0_el
[regime_el(env
, mmu_idx
)];
8398 return env
->cp15
.ttbr1_el
[regime_el(env
, mmu_idx
)];
8402 /* Return true if the translation regime is using LPAE format page tables */
8403 static inline bool regime_using_lpae_format(CPUARMState
*env
,
8406 int el
= regime_el(env
, mmu_idx
);
8407 if (el
== 2 || arm_el_is_aa64(env
, el
)) {
8410 if (arm_feature(env
, ARM_FEATURE_LPAE
)
8411 && (regime_tcr(env
, mmu_idx
)->raw_tcr
& TTBCR_EAE
)) {
8417 /* Returns true if the stage 1 translation regime is using LPAE format page
8418 * tables. Used when raising alignment exceptions, whose FSR changes depending
8419 * on whether the long or short descriptor format is in use. */
8420 bool arm_s1_regime_using_lpae_format(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8422 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
8424 return regime_using_lpae_format(env
, mmu_idx
);
8427 static inline bool regime_is_user(CPUARMState
*env
, ARMMMUIdx mmu_idx
)
8430 case ARMMMUIdx_S1SE0
:
8431 case ARMMMUIdx_S1NSE0
:
8432 case ARMMMUIdx_MUser
:
8433 case ARMMMUIdx_MSUser
:
8434 case ARMMMUIdx_MUserNegPri
:
8435 case ARMMMUIdx_MSUserNegPri
:
8439 case ARMMMUIdx_S12NSE0
:
8440 case ARMMMUIdx_S12NSE1
:
8441 g_assert_not_reached();
8445 /* Translate section/page access permissions to page
8446 * R/W protection flags
8449 * @mmu_idx: MMU index indicating required translation regime
8450 * @ap: The 3-bit access permissions (AP[2:0])
8451 * @domain_prot: The 2-bit domain access permissions
8453 static inline int ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8454 int ap
, int domain_prot
)
8456 bool is_user
= regime_is_user(env
, mmu_idx
);
8458 if (domain_prot
== 3) {
8459 return PAGE_READ
| PAGE_WRITE
;
8464 if (arm_feature(env
, ARM_FEATURE_V7
)) {
8467 switch (regime_sctlr(env
, mmu_idx
) & (SCTLR_S
| SCTLR_R
)) {
8469 return is_user
? 0 : PAGE_READ
;
8476 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8481 return PAGE_READ
| PAGE_WRITE
;
8484 return PAGE_READ
| PAGE_WRITE
;
8485 case 4: /* Reserved. */
8488 return is_user
? 0 : PAGE_READ
;
8492 if (!arm_feature(env
, ARM_FEATURE_V6K
)) {
8497 g_assert_not_reached();
8501 /* Translate section/page access permissions to page
8502 * R/W protection flags.
8504 * @ap: The 2-bit simple AP (AP[2:1])
8505 * @is_user: TRUE if accessing from PL0
8507 static inline int simple_ap_to_rw_prot_is_user(int ap
, bool is_user
)
8511 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
8513 return PAGE_READ
| PAGE_WRITE
;
8515 return is_user
? 0 : PAGE_READ
;
8519 g_assert_not_reached();
8524 simple_ap_to_rw_prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, int ap
)
8526 return simple_ap_to_rw_prot_is_user(ap
, regime_is_user(env
, mmu_idx
));
8529 /* Translate S2 section/page access permissions to protection flags
8532 * @s2ap: The 2-bit stage2 access permissions (S2AP)
8533 * @xn: XN (execute-never) bit
8535 static int get_S2prot(CPUARMState
*env
, int s2ap
, int xn
)
8546 if (arm_el_is_aa64(env
, 2) || prot
& PAGE_READ
) {
8553 /* Translate section/page access permissions to protection flags
8556 * @mmu_idx: MMU index indicating required translation regime
8557 * @is_aa64: TRUE if AArch64
8558 * @ap: The 2-bit simple AP (AP[2:1])
8559 * @ns: NS (non-secure) bit
8560 * @xn: XN (execute-never) bit
8561 * @pxn: PXN (privileged execute-never) bit
8563 static int get_S1prot(CPUARMState
*env
, ARMMMUIdx mmu_idx
, bool is_aa64
,
8564 int ap
, int ns
, int xn
, int pxn
)
8566 bool is_user
= regime_is_user(env
, mmu_idx
);
8567 int prot_rw
, user_rw
;
8571 assert(mmu_idx
!= ARMMMUIdx_S2NS
);
8573 user_rw
= simple_ap_to_rw_prot_is_user(ap
, true);
8577 prot_rw
= simple_ap_to_rw_prot_is_user(ap
, false);
8580 if (ns
&& arm_is_secure(env
) && (env
->cp15
.scr_el3
& SCR_SIF
)) {
8584 /* TODO have_wxn should be replaced with
8585 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8586 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8587 * compatible processors have EL2, which is required for [U]WXN.
8589 have_wxn
= arm_feature(env
, ARM_FEATURE_LPAE
);
8592 wxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_WXN
;
8596 switch (regime_el(env
, mmu_idx
)) {
8599 xn
= pxn
|| (user_rw
& PAGE_WRITE
);
8606 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
8607 switch (regime_el(env
, mmu_idx
)) {
8611 xn
= xn
|| !(user_rw
& PAGE_READ
);
8615 uwxn
= regime_sctlr(env
, mmu_idx
) & SCTLR_UWXN
;
8617 xn
= xn
|| !(prot_rw
& PAGE_READ
) || pxn
||
8618 (uwxn
&& (user_rw
& PAGE_WRITE
));
8628 if (xn
|| (wxn
&& (prot_rw
& PAGE_WRITE
))) {
8631 return prot_rw
| PAGE_EXEC
;
8634 static bool get_level1_table_address(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8635 uint32_t *table
, uint32_t address
)
8637 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8638 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
8640 if (address
& tcr
->mask
) {
8641 if (tcr
->raw_tcr
& TTBCR_PD1
) {
8642 /* Translation table walk disabled for TTBR1 */
8645 *table
= regime_ttbr(env
, mmu_idx
, 1) & 0xffffc000;
8647 if (tcr
->raw_tcr
& TTBCR_PD0
) {
8648 /* Translation table walk disabled for TTBR0 */
8651 *table
= regime_ttbr(env
, mmu_idx
, 0) & tcr
->base_mask
;
8653 *table
|= (address
>> 18) & 0x3ffc;
8657 /* Translate a S1 pagetable walk through S2 if needed. */
8658 static hwaddr
S1_ptw_translate(CPUARMState
*env
, ARMMMUIdx mmu_idx
,
8659 hwaddr addr
, MemTxAttrs txattrs
,
8660 ARMMMUFaultInfo
*fi
)
8662 if ((mmu_idx
== ARMMMUIdx_S1NSE0
|| mmu_idx
== ARMMMUIdx_S1NSE1
) &&
8663 !regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
8664 target_ulong s2size
;
8669 ret
= get_phys_addr_lpae(env
, addr
, 0, ARMMMUIdx_S2NS
, &s2pa
,
8670 &txattrs
, &s2prot
, &s2size
, fi
, NULL
);
8672 assert(fi
->type
!= ARMFault_None
);
8683 /* All loads done in the course of a page table walk go through here.
8684 * TODO: rather than ignoring errors from physical memory reads (which
8685 * are external aborts in ARM terminology) we should propagate this
8686 * error out so that we can turn it into a Data Abort if this walk
8687 * was being done for a CPU load/store or an address translation instruction
8688 * (but not if it was for a debug access).
8690 static uint32_t arm_ldl_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8691 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8693 ARMCPU
*cpu
= ARM_CPU(cs
);
8694 CPUARMState
*env
= &cpu
->env
;
8695 MemTxAttrs attrs
= {};
8696 MemTxResult result
= MEMTX_OK
;
8700 attrs
.secure
= is_secure
;
8701 as
= arm_addressspace(cs
, attrs
);
8702 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8706 if (regime_translation_big_endian(env
, mmu_idx
)) {
8707 data
= address_space_ldl_be(as
, addr
, attrs
, &result
);
8709 data
= address_space_ldl_le(as
, addr
, attrs
, &result
);
8711 if (result
== MEMTX_OK
) {
8714 fi
->type
= ARMFault_SyncExternalOnWalk
;
8715 fi
->ea
= arm_extabort_type(result
);
8719 static uint64_t arm_ldq_ptw(CPUState
*cs
, hwaddr addr
, bool is_secure
,
8720 ARMMMUIdx mmu_idx
, ARMMMUFaultInfo
*fi
)
8722 ARMCPU
*cpu
= ARM_CPU(cs
);
8723 CPUARMState
*env
= &cpu
->env
;
8724 MemTxAttrs attrs
= {};
8725 MemTxResult result
= MEMTX_OK
;
8729 attrs
.secure
= is_secure
;
8730 as
= arm_addressspace(cs
, attrs
);
8731 addr
= S1_ptw_translate(env
, mmu_idx
, addr
, attrs
, fi
);
8735 if (regime_translation_big_endian(env
, mmu_idx
)) {
8736 data
= address_space_ldq_be(as
, addr
, attrs
, &result
);
8738 data
= address_space_ldq_le(as
, addr
, attrs
, &result
);
8740 if (result
== MEMTX_OK
) {
8743 fi
->type
= ARMFault_SyncExternalOnWalk
;
8744 fi
->ea
= arm_extabort_type(result
);
8748 static bool get_phys_addr_v5(CPUARMState
*env
, uint32_t address
,
8749 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8750 hwaddr
*phys_ptr
, int *prot
,
8751 target_ulong
*page_size
,
8752 ARMMMUFaultInfo
*fi
)
8754 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
8765 /* Pagetable walk. */
8766 /* Lookup l1 descriptor. */
8767 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
8768 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8769 fi
->type
= ARMFault_Translation
;
8772 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8774 if (fi
->type
!= ARMFault_None
) {
8778 domain
= (desc
>> 5) & 0x0f;
8779 if (regime_el(env
, mmu_idx
) == 1) {
8780 dacr
= env
->cp15
.dacr_ns
;
8782 dacr
= env
->cp15
.dacr_s
;
8784 domain_prot
= (dacr
>> (domain
* 2)) & 3;
8786 /* Section translation fault. */
8787 fi
->type
= ARMFault_Translation
;
8793 if (domain_prot
== 0 || domain_prot
== 2) {
8794 fi
->type
= ARMFault_Domain
;
8799 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
8800 ap
= (desc
>> 10) & 3;
8801 *page_size
= 1024 * 1024;
8803 /* Lookup l2 entry. */
8805 /* Coarse pagetable. */
8806 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
8808 /* Fine pagetable. */
8809 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
8811 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8813 if (fi
->type
!= ARMFault_None
) {
8817 case 0: /* Page translation fault. */
8818 fi
->type
= ARMFault_Translation
;
8820 case 1: /* 64k page. */
8821 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
8822 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
8823 *page_size
= 0x10000;
8825 case 2: /* 4k page. */
8826 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8827 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
8828 *page_size
= 0x1000;
8830 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
8832 /* ARMv6/XScale extended small page format */
8833 if (arm_feature(env
, ARM_FEATURE_XSCALE
)
8834 || arm_feature(env
, ARM_FEATURE_V6
)) {
8835 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8836 *page_size
= 0x1000;
8838 /* UNPREDICTABLE in ARMv5; we choose to take a
8839 * page translation fault.
8841 fi
->type
= ARMFault_Translation
;
8845 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
8848 ap
= (desc
>> 4) & 3;
8851 /* Never happens, but compiler isn't smart enough to tell. */
8855 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
8856 *prot
|= *prot
? PAGE_EXEC
: 0;
8857 if (!(*prot
& (1 << access_type
))) {
8858 /* Access permission fault. */
8859 fi
->type
= ARMFault_Permission
;
8862 *phys_ptr
= phys_addr
;
8865 fi
->domain
= domain
;
8870 static bool get_phys_addr_v6(CPUARMState
*env
, uint32_t address
,
8871 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
8872 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
8873 target_ulong
*page_size
, ARMMMUFaultInfo
*fi
)
8875 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
8889 /* Pagetable walk. */
8890 /* Lookup l1 descriptor. */
8891 if (!get_level1_table_address(env
, mmu_idx
, &table
, address
)) {
8892 /* Section translation fault if page walk is disabled by PD0 or PD1 */
8893 fi
->type
= ARMFault_Translation
;
8896 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8898 if (fi
->type
!= ARMFault_None
) {
8902 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
8903 /* Section translation fault, or attempt to use the encoding
8904 * which is Reserved on implementations without PXN.
8906 fi
->type
= ARMFault_Translation
;
8909 if ((type
== 1) || !(desc
& (1 << 18))) {
8910 /* Page or Section. */
8911 domain
= (desc
>> 5) & 0x0f;
8913 if (regime_el(env
, mmu_idx
) == 1) {
8914 dacr
= env
->cp15
.dacr_ns
;
8916 dacr
= env
->cp15
.dacr_s
;
8921 domain_prot
= (dacr
>> (domain
* 2)) & 3;
8922 if (domain_prot
== 0 || domain_prot
== 2) {
8923 /* Section or Page domain fault */
8924 fi
->type
= ARMFault_Domain
;
8928 if (desc
& (1 << 18)) {
8930 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
8931 phys_addr
|= (uint64_t)extract32(desc
, 20, 4) << 32;
8932 phys_addr
|= (uint64_t)extract32(desc
, 5, 4) << 36;
8933 *page_size
= 0x1000000;
8936 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
8937 *page_size
= 0x100000;
8939 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
8940 xn
= desc
& (1 << 4);
8942 ns
= extract32(desc
, 19, 1);
8944 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
8945 pxn
= (desc
>> 2) & 1;
8947 ns
= extract32(desc
, 3, 1);
8948 /* Lookup l2 entry. */
8949 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
8950 desc
= arm_ldl_ptw(cs
, table
, regime_is_secure(env
, mmu_idx
),
8952 if (fi
->type
!= ARMFault_None
) {
8955 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
8957 case 0: /* Page translation fault. */
8958 fi
->type
= ARMFault_Translation
;
8960 case 1: /* 64k page. */
8961 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
8962 xn
= desc
& (1 << 15);
8963 *page_size
= 0x10000;
8965 case 2: case 3: /* 4k page. */
8966 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
8968 *page_size
= 0x1000;
8971 /* Never happens, but compiler isn't smart enough to tell. */
8975 if (domain_prot
== 3) {
8976 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
8978 if (pxn
&& !regime_is_user(env
, mmu_idx
)) {
8981 if (xn
&& access_type
== MMU_INST_FETCH
) {
8982 fi
->type
= ARMFault_Permission
;
8986 if (arm_feature(env
, ARM_FEATURE_V6K
) &&
8987 (regime_sctlr(env
, mmu_idx
) & SCTLR_AFE
)) {
8988 /* The simplified model uses AP[0] as an access control bit. */
8989 if ((ap
& 1) == 0) {
8990 /* Access flag fault. */
8991 fi
->type
= ARMFault_AccessFlag
;
8994 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
>> 1);
8996 *prot
= ap_to_rw_prot(env
, mmu_idx
, ap
, domain_prot
);
9001 if (!(*prot
& (1 << access_type
))) {
9002 /* Access permission fault. */
9003 fi
->type
= ARMFault_Permission
;
9008 /* The NS bit will (as required by the architecture) have no effect if
9009 * the CPU doesn't support TZ or this is a non-secure translation
9010 * regime, because the attribute will already be non-secure.
9012 attrs
->secure
= false;
9014 *phys_ptr
= phys_addr
;
9017 fi
->domain
= domain
;
9023 * check_s2_mmu_setup
9025 * @is_aa64: True if the translation regime is in AArch64 state
9026 * @startlevel: Suggested starting level
9027 * @inputsize: Bitsize of IPAs
9028 * @stride: Page-table stride (See the ARM ARM)
9030 * Returns true if the suggested S2 translation parameters are OK and
9033 static bool check_s2_mmu_setup(ARMCPU
*cpu
, bool is_aa64
, int level
,
9034 int inputsize
, int stride
)
9036 const int grainsize
= stride
+ 3;
9039 /* Negative levels are never allowed. */
9044 startsizecheck
= inputsize
- ((3 - level
) * stride
+ grainsize
);
9045 if (startsizecheck
< 1 || startsizecheck
> stride
+ 4) {
9050 CPUARMState
*env
= &cpu
->env
;
9051 unsigned int pamax
= arm_pamax(cpu
);
9054 case 13: /* 64KB Pages. */
9055 if (level
== 0 || (level
== 1 && pamax
<= 42)) {
9059 case 11: /* 16KB Pages. */
9060 if (level
== 0 || (level
== 1 && pamax
<= 40)) {
9064 case 9: /* 4KB Pages. */
9065 if (level
== 0 && pamax
<= 42) {
9070 g_assert_not_reached();
9073 /* Inputsize checks. */
9074 if (inputsize
> pamax
&&
9075 (arm_el_is_aa64(env
, 1) || inputsize
> 40)) {
9076 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
9080 /* AArch32 only supports 4KB pages. Assert on that. */
9081 assert(stride
== 9);
9090 /* Translate from the 4-bit stage 2 representation of
9091 * memory attributes (without cache-allocation hints) to
9092 * the 8-bit representation of the stage 1 MAIR registers
9093 * (which includes allocation hints).
9095 * ref: shared/translation/attrs/S2AttrDecode()
9096 * .../S2ConvertAttrsHints()
9098 static uint8_t convert_stage2_attrs(CPUARMState
*env
, uint8_t s2attrs
)
9100 uint8_t hiattr
= extract32(s2attrs
, 2, 2);
9101 uint8_t loattr
= extract32(s2attrs
, 0, 2);
9102 uint8_t hihint
= 0, lohint
= 0;
9104 if (hiattr
!= 0) { /* normal memory */
9105 if ((env
->cp15
.hcr_el2
& HCR_CD
) != 0) { /* cache disabled */
9106 hiattr
= loattr
= 1; /* non-cacheable */
9108 if (hiattr
!= 1) { /* Write-through or write-back */
9109 hihint
= 3; /* RW allocate */
9111 if (loattr
!= 1) { /* Write-through or write-back */
9112 lohint
= 3; /* RW allocate */
9117 return (hiattr
<< 6) | (hihint
<< 4) | (loattr
<< 2) | lohint
;
9120 static bool get_phys_addr_lpae(CPUARMState
*env
, target_ulong address
,
9121 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9122 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
, int *prot
,
9123 target_ulong
*page_size_ptr
,
9124 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
9126 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9127 CPUState
*cs
= CPU(cpu
);
9128 /* Read an LPAE long-descriptor translation table. */
9129 ARMFaultType fault_type
= ARMFault_Translation
;
9136 hwaddr descaddr
, indexmask
, indexmask_grainsize
;
9137 uint32_t tableattrs
;
9138 target_ulong page_size
;
9144 TCR
*tcr
= regime_tcr(env
, mmu_idx
);
9145 int ap
, ns
, xn
, pxn
;
9146 uint32_t el
= regime_el(env
, mmu_idx
);
9147 bool ttbr1_valid
= true;
9148 uint64_t descaddrmask
;
9149 bool aarch64
= arm_el_is_aa64(env
, el
);
9152 * This code does not handle the different format TCR for VTCR_EL2.
9153 * This code also does not support shareability levels.
9154 * Attribute and permission bit handling should also be checked when adding
9155 * support for those page table walks.
9161 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9162 tbi
= extract64(tcr
->raw_tcr
, 20, 1);
9165 if (extract64(address
, 55, 1)) {
9166 tbi
= extract64(tcr
->raw_tcr
, 38, 1);
9168 tbi
= extract64(tcr
->raw_tcr
, 37, 1);
9173 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9177 ttbr1_valid
= false;
9182 /* There is no TTBR1 for EL2 */
9184 ttbr1_valid
= false;
9188 /* Determine whether this address is in the region controlled by
9189 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
9190 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
9191 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
9194 /* AArch64 translation. */
9195 t0sz
= extract32(tcr
->raw_tcr
, 0, 6);
9196 t0sz
= MIN(t0sz
, 39);
9197 t0sz
= MAX(t0sz
, 16);
9198 } else if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9199 /* AArch32 stage 1 translation. */
9200 t0sz
= extract32(tcr
->raw_tcr
, 0, 3);
9202 /* AArch32 stage 2 translation. */
9203 bool sext
= extract32(tcr
->raw_tcr
, 4, 1);
9204 bool sign
= extract32(tcr
->raw_tcr
, 3, 1);
9205 /* Address size is 40-bit for a stage 2 translation,
9206 * and t0sz can be negative (from -8 to 7),
9207 * so we need to adjust it to use the TTBR selecting logic below.
9210 t0sz
= sextract32(tcr
->raw_tcr
, 0, 4) + 8;
9212 /* If the sign-extend bit is not the same as t0sz[3], the result
9213 * is unpredictable. Flag this as a guest error. */
9215 qemu_log_mask(LOG_GUEST_ERROR
,
9216 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9219 t1sz
= extract32(tcr
->raw_tcr
, 16, 6);
9221 t1sz
= MIN(t1sz
, 39);
9222 t1sz
= MAX(t1sz
, 16);
9224 if (t0sz
&& !extract64(address
, addrsize
- t0sz
, t0sz
- tbi
)) {
9225 /* there is a ttbr0 region and we are in it (high bits all zero) */
9227 } else if (ttbr1_valid
&& t1sz
&&
9228 !extract64(~address
, addrsize
- t1sz
, t1sz
- tbi
)) {
9229 /* there is a ttbr1 region and we are in it (high bits all one) */
9232 /* ttbr0 region is "everything not in the ttbr1 region" */
9234 } else if (!t1sz
&& ttbr1_valid
) {
9235 /* ttbr1 region is "everything not in the ttbr0 region" */
9238 /* in the gap between the two regions, this is a Translation fault */
9239 fault_type
= ARMFault_Translation
;
9243 /* Note that QEMU ignores shareability and cacheability attributes,
9244 * so we don't need to do anything with the SH, ORGN, IRGN fields
9245 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
9246 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9247 * implement any ASID-like capability so we can ignore it (instead
9248 * we will always flush the TLB any time the ASID is changed).
9250 if (ttbr_select
== 0) {
9251 ttbr
= regime_ttbr(env
, mmu_idx
, 0);
9253 epd
= extract32(tcr
->raw_tcr
, 7, 1);
9255 inputsize
= addrsize
- t0sz
;
9257 tg
= extract32(tcr
->raw_tcr
, 14, 2);
9258 if (tg
== 1) { /* 64KB pages */
9261 if (tg
== 2) { /* 16KB pages */
9265 /* We should only be here if TTBR1 is valid */
9266 assert(ttbr1_valid
);
9268 ttbr
= regime_ttbr(env
, mmu_idx
, 1);
9269 epd
= extract32(tcr
->raw_tcr
, 23, 1);
9270 inputsize
= addrsize
- t1sz
;
9272 tg
= extract32(tcr
->raw_tcr
, 30, 2);
9273 if (tg
== 3) { /* 64KB pages */
9276 if (tg
== 1) { /* 16KB pages */
9281 /* Here we should have set up all the parameters for the translation:
9282 * inputsize, ttbr, epd, stride, tbi
9286 /* Translation table walk disabled => Translation fault on TLB miss
9287 * Note: This is always 0 on 64-bit EL2 and EL3.
9292 if (mmu_idx
!= ARMMMUIdx_S2NS
) {
9293 /* The starting level depends on the virtual address size (which can
9294 * be up to 48 bits) and the translation granule size. It indicates
9295 * the number of strides (stride bits at a time) needed to
9296 * consume the bits of the input address. In the pseudocode this is:
9297 * level = 4 - RoundUp((inputsize - grainsize) / stride)
9298 * where their 'inputsize' is our 'inputsize', 'grainsize' is
9299 * our 'stride + 3' and 'stride' is our 'stride'.
9300 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9301 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9302 * = 4 - (inputsize - 4) / stride;
9304 level
= 4 - (inputsize
- 4) / stride
;
9306 /* For stage 2 translations the starting level is specified by the
9307 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9309 uint32_t sl0
= extract32(tcr
->raw_tcr
, 6, 2);
9310 uint32_t startlevel
;
9313 if (!aarch64
|| stride
== 9) {
9314 /* AArch32 or 4KB pages */
9315 startlevel
= 2 - sl0
;
9317 /* 16KB or 64KB pages */
9318 startlevel
= 3 - sl0
;
9321 /* Check that the starting level is valid. */
9322 ok
= check_s2_mmu_setup(cpu
, aarch64
, startlevel
,
9325 fault_type
= ARMFault_Translation
;
9331 indexmask_grainsize
= (1ULL << (stride
+ 3)) - 1;
9332 indexmask
= (1ULL << (inputsize
- (stride
* (4 - level
)))) - 1;
9334 /* Now we can extract the actual base address from the TTBR */
9335 descaddr
= extract64(ttbr
, 0, 48);
9336 descaddr
&= ~indexmask
;
9338 /* The address field in the descriptor goes up to bit 39 for ARMv7
9339 * but up to bit 47 for ARMv8, but we use the descaddrmask
9340 * up to bit 39 for AArch32, because we don't need other bits in that case
9341 * to construct next descriptor address (anyway they should be all zeroes).
9343 descaddrmask
= ((1ull << (aarch64
? 48 : 40)) - 1) &
9344 ~indexmask_grainsize
;
9346 /* Secure accesses start with the page table in secure memory and
9347 * can be downgraded to non-secure at any step. Non-secure accesses
9348 * remain non-secure. We implement this by just ORing in the NSTable/NS
9349 * bits at each step.
9351 tableattrs
= regime_is_secure(env
, mmu_idx
) ? 0 : (1 << 4);
9353 uint64_t descriptor
;
9356 descaddr
|= (address
>> (stride
* (4 - level
))) & indexmask
;
9358 nstable
= extract32(tableattrs
, 4, 1);
9359 descriptor
= arm_ldq_ptw(cs
, descaddr
, !nstable
, mmu_idx
, fi
);
9360 if (fi
->type
!= ARMFault_None
) {
9364 if (!(descriptor
& 1) ||
9365 (!(descriptor
& 2) && (level
== 3))) {
9366 /* Invalid, or the Reserved level 3 encoding */
9369 descaddr
= descriptor
& descaddrmask
;
9371 if ((descriptor
& 2) && (level
< 3)) {
9372 /* Table entry. The top five bits are attributes which may
9373 * propagate down through lower levels of the table (and
9374 * which are all arranged so that 0 means "no effect", so
9375 * we can gather them up by ORing in the bits at each level).
9377 tableattrs
|= extract64(descriptor
, 59, 5);
9379 indexmask
= indexmask_grainsize
;
9382 /* Block entry at level 1 or 2, or page entry at level 3.
9383 * These are basically the same thing, although the number
9384 * of bits we pull in from the vaddr varies.
9386 page_size
= (1ULL << ((stride
* (4 - level
)) + 3));
9387 descaddr
|= (address
& (page_size
- 1));
9388 /* Extract attributes from the descriptor */
9389 attrs
= extract64(descriptor
, 2, 10)
9390 | (extract64(descriptor
, 52, 12) << 10);
9392 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9393 /* Stage 2 table descriptors do not include any attribute fields */
9396 /* Merge in attributes from table descriptors */
9397 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
9398 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
9399 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9400 * means "force PL1 access only", which means forcing AP[1] to 0.
9402 if (extract32(tableattrs
, 2, 1)) {
9405 attrs
|= nstable
<< 3; /* NS */
9408 /* Here descaddr is the final physical address, and attributes
9411 fault_type
= ARMFault_AccessFlag
;
9412 if ((attrs
& (1 << 8)) == 0) {
9417 ap
= extract32(attrs
, 4, 2);
9418 xn
= extract32(attrs
, 12, 1);
9420 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9422 *prot
= get_S2prot(env
, ap
, xn
);
9424 ns
= extract32(attrs
, 3, 1);
9425 pxn
= extract32(attrs
, 11, 1);
9426 *prot
= get_S1prot(env
, mmu_idx
, aarch64
, ap
, ns
, xn
, pxn
);
9429 fault_type
= ARMFault_Permission
;
9430 if (!(*prot
& (1 << access_type
))) {
9435 /* The NS bit will (as required by the architecture) have no effect if
9436 * the CPU doesn't support TZ or this is a non-secure translation
9437 * regime, because the attribute will already be non-secure.
9439 txattrs
->secure
= false;
9442 if (cacheattrs
!= NULL
) {
9443 if (mmu_idx
== ARMMMUIdx_S2NS
) {
9444 cacheattrs
->attrs
= convert_stage2_attrs(env
,
9445 extract32(attrs
, 0, 4));
9447 /* Index into MAIR registers for cache attributes */
9448 uint8_t attrindx
= extract32(attrs
, 0, 3);
9449 uint64_t mair
= env
->cp15
.mair_el
[regime_el(env
, mmu_idx
)];
9450 assert(attrindx
<= 7);
9451 cacheattrs
->attrs
= extract64(mair
, attrindx
* 8, 8);
9453 cacheattrs
->shareability
= extract32(attrs
, 6, 2);
9456 *phys_ptr
= descaddr
;
9457 *page_size_ptr
= page_size
;
9461 fi
->type
= fault_type
;
9463 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
9464 fi
->stage2
= fi
->s1ptw
|| (mmu_idx
== ARMMMUIdx_S2NS
);
9468 static inline void get_phys_addr_pmsav7_default(CPUARMState
*env
,
9470 int32_t address
, int *prot
)
9472 if (!arm_feature(env
, ARM_FEATURE_M
)) {
9473 *prot
= PAGE_READ
| PAGE_WRITE
;
9475 case 0xF0000000 ... 0xFFFFFFFF:
9476 if (regime_sctlr(env
, mmu_idx
) & SCTLR_V
) {
9477 /* hivecs execing is ok */
9481 case 0x00000000 ... 0x7FFFFFFF:
9486 /* Default system address map for M profile cores.
9487 * The architecture specifies which regions are execute-never;
9488 * at the MPU level no other checks are defined.
9491 case 0x00000000 ... 0x1fffffff: /* ROM */
9492 case 0x20000000 ... 0x3fffffff: /* SRAM */
9493 case 0x60000000 ... 0x7fffffff: /* RAM */
9494 case 0x80000000 ... 0x9fffffff: /* RAM */
9495 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
9497 case 0x40000000 ... 0x5fffffff: /* Peripheral */
9498 case 0xa0000000 ... 0xbfffffff: /* Device */
9499 case 0xc0000000 ... 0xdfffffff: /* Device */
9500 case 0xe0000000 ... 0xffffffff: /* System */
9501 *prot
= PAGE_READ
| PAGE_WRITE
;
9504 g_assert_not_reached();
9509 static bool pmsav7_use_background_region(ARMCPU
*cpu
,
9510 ARMMMUIdx mmu_idx
, bool is_user
)
9512 /* Return true if we should use the default memory map as a
9513 * "background" region if there are no hits against any MPU regions.
9515 CPUARMState
*env
= &cpu
->env
;
9521 if (arm_feature(env
, ARM_FEATURE_M
)) {
9522 return env
->v7m
.mpu_ctrl
[regime_is_secure(env
, mmu_idx
)]
9523 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK
;
9525 return regime_sctlr(env
, mmu_idx
) & SCTLR_BR
;
9529 static inline bool m_is_ppb_region(CPUARMState
*env
, uint32_t address
)
9531 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9532 return arm_feature(env
, ARM_FEATURE_M
) &&
9533 extract32(address
, 20, 12) == 0xe00;
9536 static inline bool m_is_system_region(CPUARMState
*env
, uint32_t address
)
9538 /* True if address is in the M profile system region
9539 * 0xe0000000 - 0xffffffff
9541 return arm_feature(env
, ARM_FEATURE_M
) && extract32(address
, 29, 3) == 0x7;
9544 static bool get_phys_addr_pmsav7(CPUARMState
*env
, uint32_t address
,
9545 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9546 hwaddr
*phys_ptr
, int *prot
,
9547 ARMMMUFaultInfo
*fi
)
9549 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9551 bool is_user
= regime_is_user(env
, mmu_idx
);
9553 *phys_ptr
= address
;
9556 if (regime_translation_disabled(env
, mmu_idx
) ||
9557 m_is_ppb_region(env
, address
)) {
9558 /* MPU disabled or M profile PPB access: use default memory map.
9559 * The other case which uses the default memory map in the
9560 * v7M ARM ARM pseudocode is exception vector reads from the vector
9561 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9562 * which always does a direct read using address_space_ldl(), rather
9563 * than going via this function, so we don't need to check that here.
9565 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9566 } else { /* MPU enabled */
9567 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9569 uint32_t base
= env
->pmsav7
.drbar
[n
];
9570 uint32_t rsize
= extract32(env
->pmsav7
.drsr
[n
], 1, 5);
9574 if (!(env
->pmsav7
.drsr
[n
] & 0x1)) {
9579 qemu_log_mask(LOG_GUEST_ERROR
,
9580 "DRSR[%d]: Rsize field cannot be 0\n", n
);
9584 rmask
= (1ull << rsize
) - 1;
9587 qemu_log_mask(LOG_GUEST_ERROR
,
9588 "DRBAR[%d]: 0x%" PRIx32
" misaligned "
9589 "to DRSR region size, mask = 0x%" PRIx32
"\n",
9594 if (address
< base
|| address
> base
+ rmask
) {
9598 /* Region matched */
9600 if (rsize
>= 8) { /* no subregions for regions < 256 bytes */
9602 uint32_t srdis_mask
;
9604 rsize
-= 3; /* sub region size (power of 2) */
9605 snd
= ((address
- base
) >> rsize
) & 0x7;
9606 srdis
= extract32(env
->pmsav7
.drsr
[n
], snd
+ 8, 1);
9608 srdis_mask
= srdis
? 0x3 : 0x0;
9609 for (i
= 2; i
<= 8 && rsize
< TARGET_PAGE_BITS
; i
*= 2) {
9610 /* This will check in groups of 2, 4 and then 8, whether
9611 * the subregion bits are consistent. rsize is incremented
9612 * back up to give the region size, considering consistent
9613 * adjacent subregions as one region. Stop testing if rsize
9614 * is already big enough for an entire QEMU page.
9616 int snd_rounded
= snd
& ~(i
- 1);
9617 uint32_t srdis_multi
= extract32(env
->pmsav7
.drsr
[n
],
9618 snd_rounded
+ 8, i
);
9619 if (srdis_mask
^ srdis_multi
) {
9622 srdis_mask
= (srdis_mask
<< i
) | srdis_mask
;
9626 if (rsize
< TARGET_PAGE_BITS
) {
9627 qemu_log_mask(LOG_UNIMP
,
9628 "DRSR[%d]: No support for MPU (sub)region "
9629 "alignment of %" PRIu32
" bits. Minimum is %d\n",
9630 n
, rsize
, TARGET_PAGE_BITS
);
9639 if (n
== -1) { /* no hits */
9640 if (!pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9641 /* background fault */
9642 fi
->type
= ARMFault_Background
;
9645 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9646 } else { /* a MPU hit! */
9647 uint32_t ap
= extract32(env
->pmsav7
.dracr
[n
], 8, 3);
9648 uint32_t xn
= extract32(env
->pmsav7
.dracr
[n
], 12, 1);
9650 if (m_is_system_region(env
, address
)) {
9651 /* System space is always execute never */
9655 if (is_user
) { /* User mode AP bit decoding */
9660 break; /* no access */
9662 *prot
|= PAGE_WRITE
;
9666 *prot
|= PAGE_READ
| PAGE_EXEC
;
9669 /* for v7M, same as 6; for R profile a reserved value */
9670 if (arm_feature(env
, ARM_FEATURE_M
)) {
9671 *prot
|= PAGE_READ
| PAGE_EXEC
;
9676 qemu_log_mask(LOG_GUEST_ERROR
,
9677 "DRACR[%d]: Bad value for AP bits: 0x%"
9678 PRIx32
"\n", n
, ap
);
9680 } else { /* Priv. mode AP bits decoding */
9683 break; /* no access */
9687 *prot
|= PAGE_WRITE
;
9691 *prot
|= PAGE_READ
| PAGE_EXEC
;
9694 /* for v7M, same as 6; for R profile a reserved value */
9695 if (arm_feature(env
, ARM_FEATURE_M
)) {
9696 *prot
|= PAGE_READ
| PAGE_EXEC
;
9701 qemu_log_mask(LOG_GUEST_ERROR
,
9702 "DRACR[%d]: Bad value for AP bits: 0x%"
9703 PRIx32
"\n", n
, ap
);
9709 *prot
&= ~PAGE_EXEC
;
9714 fi
->type
= ARMFault_Permission
;
9716 return !(*prot
& (1 << access_type
));
9719 static bool v8m_is_sau_exempt(CPUARMState
*env
,
9720 uint32_t address
, MMUAccessType access_type
)
9722 /* The architecture specifies that certain address ranges are
9723 * exempt from v8M SAU/IDAU checks.
9726 (access_type
== MMU_INST_FETCH
&& m_is_system_region(env
, address
)) ||
9727 (address
>= 0xe0000000 && address
<= 0xe0002fff) ||
9728 (address
>= 0xe000e000 && address
<= 0xe000efff) ||
9729 (address
>= 0xe002e000 && address
<= 0xe002efff) ||
9730 (address
>= 0xe0040000 && address
<= 0xe0041fff) ||
9731 (address
>= 0xe00ff000 && address
<= 0xe00fffff);
9734 static void v8m_security_lookup(CPUARMState
*env
, uint32_t address
,
9735 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9736 V8M_SAttributes
*sattrs
)
9738 /* Look up the security attributes for this address. Compare the
9739 * pseudocode SecurityCheck() function.
9740 * We assume the caller has zero-initialized *sattrs.
9742 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9744 bool idau_exempt
= false, idau_ns
= true, idau_nsc
= true;
9745 int idau_region
= IREGION_NOTVALID
;
9748 IDAUInterfaceClass
*iic
= IDAU_INTERFACE_GET_CLASS(cpu
->idau
);
9749 IDAUInterface
*ii
= IDAU_INTERFACE(cpu
->idau
);
9751 iic
->check(ii
, address
, &idau_region
, &idau_exempt
, &idau_ns
,
9755 if (access_type
== MMU_INST_FETCH
&& extract32(address
, 28, 4) == 0xf) {
9756 /* 0xf0000000..0xffffffff is always S for insn fetches */
9760 if (idau_exempt
|| v8m_is_sau_exempt(env
, address
, access_type
)) {
9761 sattrs
->ns
= !regime_is_secure(env
, mmu_idx
);
9765 if (idau_region
!= IREGION_NOTVALID
) {
9766 sattrs
->irvalid
= true;
9767 sattrs
->iregion
= idau_region
;
9770 switch (env
->sau
.ctrl
& 3) {
9771 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
9773 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
9776 default: /* SAU.ENABLE == 1 */
9777 for (r
= 0; r
< cpu
->sau_sregion
; r
++) {
9778 if (env
->sau
.rlar
[r
] & 1) {
9779 uint32_t base
= env
->sau
.rbar
[r
] & ~0x1f;
9780 uint32_t limit
= env
->sau
.rlar
[r
] | 0x1f;
9782 if (base
<= address
&& limit
>= address
) {
9783 if (sattrs
->srvalid
) {
9784 /* If we hit in more than one region then we must report
9785 * as Secure, not NS-Callable, with no valid region
9789 sattrs
->nsc
= false;
9790 sattrs
->sregion
= 0;
9791 sattrs
->srvalid
= false;
9794 if (env
->sau
.rlar
[r
] & 2) {
9799 sattrs
->srvalid
= true;
9800 sattrs
->sregion
= r
;
9806 /* The IDAU will override the SAU lookup results if it specifies
9807 * higher security than the SAU does.
9810 if (sattrs
->ns
|| (!idau_nsc
&& sattrs
->nsc
)) {
9812 sattrs
->nsc
= idau_nsc
;
9819 static bool pmsav8_mpu_lookup(CPUARMState
*env
, uint32_t address
,
9820 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9821 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
9822 int *prot
, ARMMMUFaultInfo
*fi
, uint32_t *mregion
)
9824 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
9825 * that a full phys-to-virt translation does).
9826 * mregion is (if not NULL) set to the region number which matched,
9827 * or -1 if no region number is returned (MPU off, address did not
9828 * hit a region, address hit in multiple regions).
9830 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9831 bool is_user
= regime_is_user(env
, mmu_idx
);
9832 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
9834 int matchregion
= -1;
9837 *phys_ptr
= address
;
9843 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
9844 * was an exception vector read from the vector table (which is always
9845 * done using the default system address map), because those accesses
9846 * are done in arm_v7m_load_vector(), which always does a direct
9847 * read using address_space_ldl(), rather than going via this function.
9849 if (regime_translation_disabled(env
, mmu_idx
)) { /* MPU disabled */
9851 } else if (m_is_ppb_region(env
, address
)) {
9853 } else if (pmsav7_use_background_region(cpu
, mmu_idx
, is_user
)) {
9856 for (n
= (int)cpu
->pmsav7_dregion
- 1; n
>= 0; n
--) {
9858 /* Note that the base address is bits [31:5] from the register
9859 * with bits [4:0] all zeroes, but the limit address is bits
9860 * [31:5] from the register with bits [4:0] all ones.
9862 uint32_t base
= env
->pmsav8
.rbar
[secure
][n
] & ~0x1f;
9863 uint32_t limit
= env
->pmsav8
.rlar
[secure
][n
] | 0x1f;
9865 if (!(env
->pmsav8
.rlar
[secure
][n
] & 0x1)) {
9866 /* Region disabled */
9870 if (address
< base
|| address
> limit
) {
9875 /* Multiple regions match -- always a failure (unlike
9876 * PMSAv7 where highest-numbered-region wins)
9878 fi
->type
= ARMFault_Permission
;
9886 if (base
& ~TARGET_PAGE_MASK
) {
9887 qemu_log_mask(LOG_UNIMP
,
9888 "MPU_RBAR[%d]: No support for MPU region base"
9889 "address of 0x%" PRIx32
". Minimum alignment is "
9891 n
, base
, TARGET_PAGE_BITS
);
9894 if ((limit
+ 1) & ~TARGET_PAGE_MASK
) {
9895 qemu_log_mask(LOG_UNIMP
,
9896 "MPU_RBAR[%d]: No support for MPU region limit"
9897 "address of 0x%" PRIx32
". Minimum alignment is "
9899 n
, limit
, TARGET_PAGE_BITS
);
9906 /* background fault */
9907 fi
->type
= ARMFault_Background
;
9911 if (matchregion
== -1) {
9912 /* hit using the background region */
9913 get_phys_addr_pmsav7_default(env
, mmu_idx
, address
, prot
);
9915 uint32_t ap
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 1, 2);
9916 uint32_t xn
= extract32(env
->pmsav8
.rbar
[secure
][matchregion
], 0, 1);
9918 if (m_is_system_region(env
, address
)) {
9919 /* System space is always execute never */
9923 *prot
= simple_ap_to_rw_prot(env
, mmu_idx
, ap
);
9927 /* We don't need to look the attribute up in the MAIR0/MAIR1
9928 * registers because that only tells us about cacheability.
9931 *mregion
= matchregion
;
9935 fi
->type
= ARMFault_Permission
;
9937 return !(*prot
& (1 << access_type
));
9941 static bool get_phys_addr_pmsav8(CPUARMState
*env
, uint32_t address
,
9942 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
9943 hwaddr
*phys_ptr
, MemTxAttrs
*txattrs
,
9944 int *prot
, ARMMMUFaultInfo
*fi
)
9946 uint32_t secure
= regime_is_secure(env
, mmu_idx
);
9947 V8M_SAttributes sattrs
= {};
9949 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
9950 v8m_security_lookup(env
, address
, access_type
, mmu_idx
, &sattrs
);
9951 if (access_type
== MMU_INST_FETCH
) {
9952 /* Instruction fetches always use the MMU bank and the
9953 * transaction attribute determined by the fetch address,
9954 * regardless of CPU state. This is painful for QEMU
9955 * to handle, because it would mean we need to encode
9956 * into the mmu_idx not just the (user, negpri) information
9957 * for the current security state but also that for the
9958 * other security state, which would balloon the number
9959 * of mmu_idx values needed alarmingly.
9960 * Fortunately we can avoid this because it's not actually
9961 * possible to arbitrarily execute code from memory with
9962 * the wrong security attribute: it will always generate
9963 * an exception of some kind or another, apart from the
9964 * special case of an NS CPU executing an SG instruction
9965 * in S&NSC memory. So we always just fail the translation
9966 * here and sort things out in the exception handler
9967 * (including possibly emulating an SG instruction).
9969 if (sattrs
.ns
!= !secure
) {
9971 fi
->type
= ARMFault_QEMU_NSCExec
;
9973 fi
->type
= ARMFault_QEMU_SFault
;
9975 *phys_ptr
= address
;
9980 /* For data accesses we always use the MMU bank indicated
9981 * by the current CPU state, but the security attributes
9982 * might downgrade a secure access to nonsecure.
9985 txattrs
->secure
= false;
9986 } else if (!secure
) {
9987 /* NS access to S memory must fault.
9988 * Architecturally we should first check whether the
9989 * MPU information for this address indicates that we
9990 * are doing an unaligned access to Device memory, which
9991 * should generate a UsageFault instead. QEMU does not
9992 * currently check for that kind of unaligned access though.
9993 * If we added it we would need to do so as a special case
9994 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
9996 fi
->type
= ARMFault_QEMU_SFault
;
9997 *phys_ptr
= address
;
10004 return pmsav8_mpu_lookup(env
, address
, access_type
, mmu_idx
, phys_ptr
,
10005 txattrs
, prot
, fi
, NULL
);
10008 static bool get_phys_addr_pmsav5(CPUARMState
*env
, uint32_t address
,
10009 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10010 hwaddr
*phys_ptr
, int *prot
,
10011 ARMMMUFaultInfo
*fi
)
10016 bool is_user
= regime_is_user(env
, mmu_idx
);
10018 if (regime_translation_disabled(env
, mmu_idx
)) {
10019 /* MPU disabled. */
10020 *phys_ptr
= address
;
10021 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10025 *phys_ptr
= address
;
10026 for (n
= 7; n
>= 0; n
--) {
10027 base
= env
->cp15
.c6_region
[n
];
10028 if ((base
& 1) == 0) {
10031 mask
= 1 << ((base
>> 1) & 0x1f);
10032 /* Keep this shift separate from the above to avoid an
10033 (undefined) << 32. */
10034 mask
= (mask
<< 1) - 1;
10035 if (((base
^ address
) & ~mask
) == 0) {
10040 fi
->type
= ARMFault_Background
;
10044 if (access_type
== MMU_INST_FETCH
) {
10045 mask
= env
->cp15
.pmsav5_insn_ap
;
10047 mask
= env
->cp15
.pmsav5_data_ap
;
10049 mask
= (mask
>> (n
* 4)) & 0xf;
10052 fi
->type
= ARMFault_Permission
;
10057 fi
->type
= ARMFault_Permission
;
10061 *prot
= PAGE_READ
| PAGE_WRITE
;
10066 *prot
|= PAGE_WRITE
;
10070 *prot
= PAGE_READ
| PAGE_WRITE
;
10074 fi
->type
= ARMFault_Permission
;
10084 /* Bad permission. */
10085 fi
->type
= ARMFault_Permission
;
10089 *prot
|= PAGE_EXEC
;
10093 /* Combine either inner or outer cacheability attributes for normal
10094 * memory, according to table D4-42 and pseudocode procedure
10095 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10097 * NB: only stage 1 includes allocation hints (RW bits), leading to
10100 static uint8_t combine_cacheattr_nibble(uint8_t s1
, uint8_t s2
)
10102 if (s1
== 4 || s2
== 4) {
10103 /* non-cacheable has precedence */
10105 } else if (extract32(s1
, 2, 2) == 0 || extract32(s1
, 2, 2) == 2) {
10106 /* stage 1 write-through takes precedence */
10108 } else if (extract32(s2
, 2, 2) == 2) {
10109 /* stage 2 write-through takes precedence, but the allocation hint
10110 * is still taken from stage 1
10112 return (2 << 2) | extract32(s1
, 0, 2);
10113 } else { /* write-back */
10118 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10119 * and CombineS1S2Desc()
10121 * @s1: Attributes from stage 1 walk
10122 * @s2: Attributes from stage 2 walk
10124 static ARMCacheAttrs
combine_cacheattrs(ARMCacheAttrs s1
, ARMCacheAttrs s2
)
10126 uint8_t s1lo
= extract32(s1
.attrs
, 0, 4), s2lo
= extract32(s2
.attrs
, 0, 4);
10127 uint8_t s1hi
= extract32(s1
.attrs
, 4, 4), s2hi
= extract32(s2
.attrs
, 4, 4);
10130 /* Combine shareability attributes (table D4-43) */
10131 if (s1
.shareability
== 2 || s2
.shareability
== 2) {
10132 /* if either are outer-shareable, the result is outer-shareable */
10133 ret
.shareability
= 2;
10134 } else if (s1
.shareability
== 3 || s2
.shareability
== 3) {
10135 /* if either are inner-shareable, the result is inner-shareable */
10136 ret
.shareability
= 3;
10138 /* both non-shareable */
10139 ret
.shareability
= 0;
10142 /* Combine memory type and cacheability attributes */
10143 if (s1hi
== 0 || s2hi
== 0) {
10144 /* Device has precedence over normal */
10145 if (s1lo
== 0 || s2lo
== 0) {
10146 /* nGnRnE has precedence over anything */
10148 } else if (s1lo
== 4 || s2lo
== 4) {
10149 /* non-Reordering has precedence over Reordering */
10150 ret
.attrs
= 4; /* nGnRE */
10151 } else if (s1lo
== 8 || s2lo
== 8) {
10152 /* non-Gathering has precedence over Gathering */
10153 ret
.attrs
= 8; /* nGRE */
10155 ret
.attrs
= 0xc; /* GRE */
10158 /* Any location for which the resultant memory type is any
10159 * type of Device memory is always treated as Outer Shareable.
10161 ret
.shareability
= 2;
10162 } else { /* Normal memory */
10163 /* Outer/inner cacheability combine independently */
10164 ret
.attrs
= combine_cacheattr_nibble(s1hi
, s2hi
) << 4
10165 | combine_cacheattr_nibble(s1lo
, s2lo
);
10167 if (ret
.attrs
== 0x44) {
10168 /* Any location for which the resultant memory type is Normal
10169 * Inner Non-cacheable, Outer Non-cacheable is always treated
10170 * as Outer Shareable.
10172 ret
.shareability
= 2;
10180 /* get_phys_addr - get the physical address for this virtual address
10182 * Find the physical address corresponding to the given virtual address,
10183 * by doing a translation table walk on MMU based systems or using the
10184 * MPU state on MPU based systems.
10186 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10187 * prot and page_size may not be filled in, and the populated fsr value provides
10188 * information on why the translation aborted, in the format of a
10189 * DFSR/IFSR fault register, with the following caveats:
10190 * * we honour the short vs long DFSR format differences.
10191 * * the WnR bit is never set (the caller must do this).
10192 * * for PSMAv5 based systems we don't bother to return a full FSR format
10195 * @env: CPUARMState
10196 * @address: virtual address to get physical address for
10197 * @access_type: 0 for read, 1 for write, 2 for execute
10198 * @mmu_idx: MMU index indicating required translation regime
10199 * @phys_ptr: set to the physical address corresponding to the virtual address
10200 * @attrs: set to the memory transaction attributes to use
10201 * @prot: set to the permissions for the page containing phys_ptr
10202 * @page_size: set to the size of the page containing phys_ptr
10203 * @fi: set to fault info if the translation fails
10204 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10206 static bool get_phys_addr(CPUARMState
*env
, target_ulong address
,
10207 MMUAccessType access_type
, ARMMMUIdx mmu_idx
,
10208 hwaddr
*phys_ptr
, MemTxAttrs
*attrs
, int *prot
,
10209 target_ulong
*page_size
,
10210 ARMMMUFaultInfo
*fi
, ARMCacheAttrs
*cacheattrs
)
10212 if (mmu_idx
== ARMMMUIdx_S12NSE0
|| mmu_idx
== ARMMMUIdx_S12NSE1
) {
10213 /* Call ourselves recursively to do the stage 1 and then stage 2
10216 if (arm_feature(env
, ARM_FEATURE_EL2
)) {
10220 ARMCacheAttrs cacheattrs2
= {};
10222 ret
= get_phys_addr(env
, address
, access_type
,
10223 stage_1_mmu_idx(mmu_idx
), &ipa
, attrs
,
10224 prot
, page_size
, fi
, cacheattrs
);
10226 /* If S1 fails or S2 is disabled, return early. */
10227 if (ret
|| regime_translation_disabled(env
, ARMMMUIdx_S2NS
)) {
10232 /* S1 is done. Now do S2 translation. */
10233 ret
= get_phys_addr_lpae(env
, ipa
, access_type
, ARMMMUIdx_S2NS
,
10234 phys_ptr
, attrs
, &s2_prot
,
10236 cacheattrs
!= NULL
? &cacheattrs2
: NULL
);
10238 /* Combine the S1 and S2 perms. */
10241 /* Combine the S1 and S2 cache attributes, if needed */
10242 if (!ret
&& cacheattrs
!= NULL
) {
10243 *cacheattrs
= combine_cacheattrs(*cacheattrs
, cacheattrs2
);
10249 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10251 mmu_idx
= stage_1_mmu_idx(mmu_idx
);
10255 /* The page table entries may downgrade secure to non-secure, but
10256 * cannot upgrade an non-secure translation regime's attributes
10259 attrs
->secure
= regime_is_secure(env
, mmu_idx
);
10260 attrs
->user
= regime_is_user(env
, mmu_idx
);
10262 /* Fast Context Switch Extension. This doesn't exist at all in v8.
10263 * In v7 and earlier it affects all stage 1 translations.
10265 if (address
< 0x02000000 && mmu_idx
!= ARMMMUIdx_S2NS
10266 && !arm_feature(env
, ARM_FEATURE_V8
)) {
10267 if (regime_el(env
, mmu_idx
) == 3) {
10268 address
+= env
->cp15
.fcseidr_s
;
10270 address
+= env
->cp15
.fcseidr_ns
;
10274 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
10276 *page_size
= TARGET_PAGE_SIZE
;
10278 if (arm_feature(env
, ARM_FEATURE_V8
)) {
10280 ret
= get_phys_addr_pmsav8(env
, address
, access_type
, mmu_idx
,
10281 phys_ptr
, attrs
, prot
, fi
);
10282 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
10284 ret
= get_phys_addr_pmsav7(env
, address
, access_type
, mmu_idx
,
10285 phys_ptr
, prot
, fi
);
10288 ret
= get_phys_addr_pmsav5(env
, address
, access_type
, mmu_idx
,
10289 phys_ptr
, prot
, fi
);
10291 qemu_log_mask(CPU_LOG_MMU
, "PMSA MPU lookup for %s at 0x%08" PRIx32
10292 " mmu_idx %u -> %s (prot %c%c%c)\n",
10293 access_type
== MMU_DATA_LOAD
? "reading" :
10294 (access_type
== MMU_DATA_STORE
? "writing" : "execute"),
10295 (uint32_t)address
, mmu_idx
,
10296 ret
? "Miss" : "Hit",
10297 *prot
& PAGE_READ
? 'r' : '-',
10298 *prot
& PAGE_WRITE
? 'w' : '-',
10299 *prot
& PAGE_EXEC
? 'x' : '-');
10304 /* Definitely a real MMU, not an MPU */
10306 if (regime_translation_disabled(env
, mmu_idx
)) {
10307 /* MMU disabled. */
10308 *phys_ptr
= address
;
10309 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
10310 *page_size
= TARGET_PAGE_SIZE
;
10314 if (regime_using_lpae_format(env
, mmu_idx
)) {
10315 return get_phys_addr_lpae(env
, address
, access_type
, mmu_idx
,
10316 phys_ptr
, attrs
, prot
, page_size
,
10318 } else if (regime_sctlr(env
, mmu_idx
) & SCTLR_XP
) {
10319 return get_phys_addr_v6(env
, address
, access_type
, mmu_idx
,
10320 phys_ptr
, attrs
, prot
, page_size
, fi
);
10322 return get_phys_addr_v5(env
, address
, access_type
, mmu_idx
,
10323 phys_ptr
, prot
, page_size
, fi
);
10327 /* Walk the page table and (if the mapping exists) add the page
10328 * to the TLB. Return false on success, or true on failure. Populate
10329 * fsr with ARM DFSR/IFSR fault register format value on failure.
10331 bool arm_tlb_fill(CPUState
*cs
, vaddr address
,
10332 MMUAccessType access_type
, int mmu_idx
,
10333 ARMMMUFaultInfo
*fi
)
10335 ARMCPU
*cpu
= ARM_CPU(cs
);
10336 CPUARMState
*env
= &cpu
->env
;
10338 target_ulong page_size
;
10341 MemTxAttrs attrs
= {};
10343 ret
= get_phys_addr(env
, address
, access_type
,
10344 core_to_arm_mmu_idx(env
, mmu_idx
), &phys_addr
,
10345 &attrs
, &prot
, &page_size
, fi
, NULL
);
10347 /* Map a single [sub]page. */
10348 phys_addr
&= TARGET_PAGE_MASK
;
10349 address
&= TARGET_PAGE_MASK
;
10350 tlb_set_page_with_attrs(cs
, address
, phys_addr
, attrs
,
10351 prot
, mmu_idx
, page_size
);
10358 hwaddr
arm_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
10361 ARMCPU
*cpu
= ARM_CPU(cs
);
10362 CPUARMState
*env
= &cpu
->env
;
10364 target_ulong page_size
;
10367 ARMMMUFaultInfo fi
= {};
10368 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
10370 *attrs
= (MemTxAttrs
) {};
10372 ret
= get_phys_addr(env
, addr
, 0, mmu_idx
, &phys_addr
,
10373 attrs
, &prot
, &page_size
, &fi
, NULL
);
10381 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
10384 unsigned el
= arm_current_el(env
);
10386 /* First handle registers which unprivileged can read */
10389 case 0 ... 7: /* xPSR sub-fields */
10391 if ((reg
& 1) && el
) {
10392 mask
|= XPSR_EXCP
; /* IPSR (unpriv. reads as zero) */
10395 mask
|= XPSR_NZCV
| XPSR_Q
; /* APSR */
10397 /* EPSR reads as zero */
10398 return xpsr_read(env
) & mask
;
10400 case 20: /* CONTROL */
10401 return env
->v7m
.control
[env
->v7m
.secure
];
10402 case 0x94: /* CONTROL_NS */
10403 /* We have to handle this here because unprivileged Secure code
10404 * can read the NS CONTROL register.
10406 if (!env
->v7m
.secure
) {
10409 return env
->v7m
.control
[M_REG_NS
];
10413 return 0; /* unprivileged reads others as zero */
10416 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10418 case 0x88: /* MSP_NS */
10419 if (!env
->v7m
.secure
) {
10422 return env
->v7m
.other_ss_msp
;
10423 case 0x89: /* PSP_NS */
10424 if (!env
->v7m
.secure
) {
10427 return env
->v7m
.other_ss_psp
;
10428 case 0x8a: /* MSPLIM_NS */
10429 if (!env
->v7m
.secure
) {
10432 return env
->v7m
.msplim
[M_REG_NS
];
10433 case 0x8b: /* PSPLIM_NS */
10434 if (!env
->v7m
.secure
) {
10437 return env
->v7m
.psplim
[M_REG_NS
];
10438 case 0x90: /* PRIMASK_NS */
10439 if (!env
->v7m
.secure
) {
10442 return env
->v7m
.primask
[M_REG_NS
];
10443 case 0x91: /* BASEPRI_NS */
10444 if (!env
->v7m
.secure
) {
10447 return env
->v7m
.basepri
[M_REG_NS
];
10448 case 0x93: /* FAULTMASK_NS */
10449 if (!env
->v7m
.secure
) {
10452 return env
->v7m
.faultmask
[M_REG_NS
];
10453 case 0x98: /* SP_NS */
10455 /* This gives the non-secure SP selected based on whether we're
10456 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10458 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10460 if (!env
->v7m
.secure
) {
10463 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10464 return env
->v7m
.other_ss_psp
;
10466 return env
->v7m
.other_ss_msp
;
10476 return v7m_using_psp(env
) ? env
->v7m
.other_sp
: env
->regs
[13];
10478 return v7m_using_psp(env
) ? env
->regs
[13] : env
->v7m
.other_sp
;
10479 case 10: /* MSPLIM */
10480 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10483 return env
->v7m
.msplim
[env
->v7m
.secure
];
10484 case 11: /* PSPLIM */
10485 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10488 return env
->v7m
.psplim
[env
->v7m
.secure
];
10489 case 16: /* PRIMASK */
10490 return env
->v7m
.primask
[env
->v7m
.secure
];
10491 case 17: /* BASEPRI */
10492 case 18: /* BASEPRI_MAX */
10493 return env
->v7m
.basepri
[env
->v7m
.secure
];
10494 case 19: /* FAULTMASK */
10495 return env
->v7m
.faultmask
[env
->v7m
.secure
];
10498 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to read unknown special"
10499 " register %d\n", reg
);
10504 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t maskreg
, uint32_t val
)
10506 /* We're passed bits [11..0] of the instruction; extract
10507 * SYSm and the mask bits.
10508 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
10509 * we choose to treat them as if the mask bits were valid.
10510 * NB that the pseudocode 'mask' variable is bits [11..10],
10511 * whereas ours is [11..8].
10513 uint32_t mask
= extract32(maskreg
, 8, 4);
10514 uint32_t reg
= extract32(maskreg
, 0, 8);
10516 if (arm_current_el(env
) == 0 && reg
> 7) {
10517 /* only xPSR sub-fields may be written by unprivileged */
10521 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
10523 case 0x88: /* MSP_NS */
10524 if (!env
->v7m
.secure
) {
10527 env
->v7m
.other_ss_msp
= val
;
10529 case 0x89: /* PSP_NS */
10530 if (!env
->v7m
.secure
) {
10533 env
->v7m
.other_ss_psp
= val
;
10535 case 0x8a: /* MSPLIM_NS */
10536 if (!env
->v7m
.secure
) {
10539 env
->v7m
.msplim
[M_REG_NS
] = val
& ~7;
10541 case 0x8b: /* PSPLIM_NS */
10542 if (!env
->v7m
.secure
) {
10545 env
->v7m
.psplim
[M_REG_NS
] = val
& ~7;
10547 case 0x90: /* PRIMASK_NS */
10548 if (!env
->v7m
.secure
) {
10551 env
->v7m
.primask
[M_REG_NS
] = val
& 1;
10553 case 0x91: /* BASEPRI_NS */
10554 if (!env
->v7m
.secure
) {
10557 env
->v7m
.basepri
[M_REG_NS
] = val
& 0xff;
10559 case 0x93: /* FAULTMASK_NS */
10560 if (!env
->v7m
.secure
) {
10563 env
->v7m
.faultmask
[M_REG_NS
] = val
& 1;
10565 case 0x94: /* CONTROL_NS */
10566 if (!env
->v7m
.secure
) {
10569 write_v7m_control_spsel_for_secstate(env
,
10570 val
& R_V7M_CONTROL_SPSEL_MASK
,
10572 env
->v7m
.control
[M_REG_NS
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
10573 env
->v7m
.control
[M_REG_NS
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
10575 case 0x98: /* SP_NS */
10577 /* This gives the non-secure SP selected based on whether we're
10578 * currently in handler mode or not, using the NS CONTROL.SPSEL.
10580 bool spsel
= env
->v7m
.control
[M_REG_NS
] & R_V7M_CONTROL_SPSEL_MASK
;
10582 if (!env
->v7m
.secure
) {
10585 if (!arm_v7m_is_handler_mode(env
) && spsel
) {
10586 env
->v7m
.other_ss_psp
= val
;
10588 env
->v7m
.other_ss_msp
= val
;
10598 case 0 ... 7: /* xPSR sub-fields */
10599 /* only APSR is actually writable */
10601 uint32_t apsrmask
= 0;
10604 apsrmask
|= XPSR_NZCV
| XPSR_Q
;
10606 if ((mask
& 4) && arm_feature(env
, ARM_FEATURE_THUMB_DSP
)) {
10607 apsrmask
|= XPSR_GE
;
10609 xpsr_write(env
, val
, apsrmask
);
10613 if (v7m_using_psp(env
)) {
10614 env
->v7m
.other_sp
= val
;
10616 env
->regs
[13] = val
;
10620 if (v7m_using_psp(env
)) {
10621 env
->regs
[13] = val
;
10623 env
->v7m
.other_sp
= val
;
10626 case 10: /* MSPLIM */
10627 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10630 env
->v7m
.msplim
[env
->v7m
.secure
] = val
& ~7;
10632 case 11: /* PSPLIM */
10633 if (!arm_feature(env
, ARM_FEATURE_V8
)) {
10636 env
->v7m
.psplim
[env
->v7m
.secure
] = val
& ~7;
10638 case 16: /* PRIMASK */
10639 env
->v7m
.primask
[env
->v7m
.secure
] = val
& 1;
10641 case 17: /* BASEPRI */
10642 env
->v7m
.basepri
[env
->v7m
.secure
] = val
& 0xff;
10644 case 18: /* BASEPRI_MAX */
10646 if (val
!= 0 && (val
< env
->v7m
.basepri
[env
->v7m
.secure
]
10647 || env
->v7m
.basepri
[env
->v7m
.secure
] == 0)) {
10648 env
->v7m
.basepri
[env
->v7m
.secure
] = val
;
10651 case 19: /* FAULTMASK */
10652 env
->v7m
.faultmask
[env
->v7m
.secure
] = val
& 1;
10654 case 20: /* CONTROL */
10655 /* Writing to the SPSEL bit only has an effect if we are in
10656 * thread mode; other bits can be updated by any privileged code.
10657 * write_v7m_control_spsel() deals with updating the SPSEL bit in
10658 * env->v7m.control, so we only need update the others.
10659 * For v7M, we must just ignore explicit writes to SPSEL in handler
10660 * mode; for v8M the write is permitted but will have no effect.
10662 if (arm_feature(env
, ARM_FEATURE_V8
) ||
10663 !arm_v7m_is_handler_mode(env
)) {
10664 write_v7m_control_spsel(env
, (val
& R_V7M_CONTROL_SPSEL_MASK
) != 0);
10666 env
->v7m
.control
[env
->v7m
.secure
] &= ~R_V7M_CONTROL_NPRIV_MASK
;
10667 env
->v7m
.control
[env
->v7m
.secure
] |= val
& R_V7M_CONTROL_NPRIV_MASK
;
10671 qemu_log_mask(LOG_GUEST_ERROR
, "Attempt to write unknown special"
10672 " register %d\n", reg
);
10677 uint32_t HELPER(v7m_tt
)(CPUARMState
*env
, uint32_t addr
, uint32_t op
)
10679 /* Implement the TT instruction. op is bits [7:6] of the insn. */
10680 bool forceunpriv
= op
& 1;
10682 V8M_SAttributes sattrs
= {};
10684 bool r
, rw
, nsr
, nsrw
, mrvalid
;
10686 ARMMMUFaultInfo fi
= {};
10687 MemTxAttrs attrs
= {};
10692 bool targetsec
= env
->v7m
.secure
;
10694 /* Work out what the security state and privilege level we're
10695 * interested in is...
10698 targetsec
= !targetsec
;
10702 targetpriv
= false;
10704 targetpriv
= arm_v7m_is_handler_mode(env
) ||
10705 !(env
->v7m
.control
[targetsec
] & R_V7M_CONTROL_NPRIV_MASK
);
10708 /* ...and then figure out which MMU index this is */
10709 mmu_idx
= arm_v7m_mmu_idx_for_secstate_and_priv(env
, targetsec
, targetpriv
);
10711 /* We know that the MPU and SAU don't care about the access type
10712 * for our purposes beyond that we don't want to claim to be
10713 * an insn fetch, so we arbitrarily call this a read.
10716 /* MPU region info only available for privileged or if
10717 * inspecting the other MPU state.
10719 if (arm_current_el(env
) != 0 || alt
) {
10720 /* We can ignore the return value as prot is always set */
10721 pmsav8_mpu_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
,
10722 &phys_addr
, &attrs
, &prot
, &fi
, &mregion
);
10723 if (mregion
== -1) {
10729 r
= prot
& PAGE_READ
;
10730 rw
= prot
& PAGE_WRITE
;
10738 if (env
->v7m
.secure
) {
10739 v8m_security_lookup(env
, addr
, MMU_DATA_LOAD
, mmu_idx
, &sattrs
);
10740 nsr
= sattrs
.ns
&& r
;
10741 nsrw
= sattrs
.ns
&& rw
;
10748 tt_resp
= (sattrs
.iregion
<< 24) |
10749 (sattrs
.irvalid
<< 23) |
10750 ((!sattrs
.ns
) << 22) |
10755 (sattrs
.srvalid
<< 17) |
10757 (sattrs
.sregion
<< 8) |
10765 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
10767 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
10768 * Note that we do not implement the (architecturally mandated)
10769 * alignment fault for attempts to use this on Device memory
10770 * (which matches the usual QEMU behaviour of not implementing either
10771 * alignment faults or any memory attribute handling).
10774 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10775 uint64_t blocklen
= 4 << cpu
->dcz_blocksize
;
10776 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
10778 #ifndef CONFIG_USER_ONLY
10780 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
10781 * the block size so we might have to do more than one TLB lookup.
10782 * We know that in fact for any v8 CPU the page size is at least 4K
10783 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
10784 * 1K as an artefact of legacy v5 subpage support being present in the
10785 * same QEMU executable.
10787 int maxidx
= DIV_ROUND_UP(blocklen
, TARGET_PAGE_SIZE
);
10788 void *hostaddr
[maxidx
];
10790 unsigned mmu_idx
= cpu_mmu_index(env
, false);
10791 TCGMemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
10793 for (try = 0; try < 2; try++) {
10795 for (i
= 0; i
< maxidx
; i
++) {
10796 hostaddr
[i
] = tlb_vaddr_to_host(env
,
10797 vaddr
+ TARGET_PAGE_SIZE
* i
,
10799 if (!hostaddr
[i
]) {
10804 /* If it's all in the TLB it's fair game for just writing to;
10805 * we know we don't need to update dirty status, etc.
10807 for (i
= 0; i
< maxidx
- 1; i
++) {
10808 memset(hostaddr
[i
], 0, TARGET_PAGE_SIZE
);
10810 memset(hostaddr
[i
], 0, blocklen
- (i
* TARGET_PAGE_SIZE
));
10813 /* OK, try a store and see if we can populate the tlb. This
10814 * might cause an exception if the memory isn't writable,
10815 * in which case we will longjmp out of here. We must for
10816 * this purpose use the actual register value passed to us
10817 * so that we get the fault address right.
10819 helper_ret_stb_mmu(env
, vaddr_in
, 0, oi
, GETPC());
10820 /* Now we can populate the other TLB entries, if any */
10821 for (i
= 0; i
< maxidx
; i
++) {
10822 uint64_t va
= vaddr
+ TARGET_PAGE_SIZE
* i
;
10823 if (va
!= (vaddr_in
& TARGET_PAGE_MASK
)) {
10824 helper_ret_stb_mmu(env
, va
, 0, oi
, GETPC());
10829 /* Slow path (probably attempt to do this to an I/O device or
10830 * similar, or clearing of a block of code we have translations
10831 * cached for). Just do a series of byte writes as the architecture
10832 * demands. It's not worth trying to use a cpu_physical_memory_map(),
10833 * memset(), unmap() sequence here because:
10834 * + we'd need to account for the blocksize being larger than a page
10835 * + the direct-RAM access case is almost always going to be dealt
10836 * with in the fastpath code above, so there's no speed benefit
10837 * + we would have to deal with the map returning NULL because the
10838 * bounce buffer was in use
10840 for (i
= 0; i
< blocklen
; i
++) {
10841 helper_ret_stb_mmu(env
, vaddr
+ i
, 0, oi
, GETPC());
10845 memset(g2h(vaddr
), 0, blocklen
);
10849 /* Note that signed overflow is undefined in C. The following routines are
10850 careful to use unsigned types where modulo arithmetic is required.
10851 Failure to do so _will_ break on newer gcc. */
10853 /* Signed saturating arithmetic. */
10855 /* Perform 16-bit signed saturating addition. */
10856 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
10861 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
10870 /* Perform 8-bit signed saturating addition. */
10871 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
10876 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
10885 /* Perform 16-bit signed saturating subtraction. */
10886 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
10891 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
10900 /* Perform 8-bit signed saturating subtraction. */
10901 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
10906 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
10915 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10916 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10917 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
10918 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
10921 #include "op_addsub.h"
10923 /* Unsigned saturating arithmetic. */
10924 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
10933 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
10941 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
10950 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
10958 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10959 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10960 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
10961 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
10964 #include "op_addsub.h"
10966 /* Signed modulo arithmetic. */
10967 #define SARITH16(a, b, n, op) do { \
10969 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
10970 RESULT(sum, n, 16); \
10972 ge |= 3 << (n * 2); \
10975 #define SARITH8(a, b, n, op) do { \
10977 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
10978 RESULT(sum, n, 8); \
10984 #define ADD16(a, b, n) SARITH16(a, b, n, +)
10985 #define SUB16(a, b, n) SARITH16(a, b, n, -)
10986 #define ADD8(a, b, n) SARITH8(a, b, n, +)
10987 #define SUB8(a, b, n) SARITH8(a, b, n, -)
10991 #include "op_addsub.h"
10993 /* Unsigned modulo arithmetic. */
10994 #define ADD16(a, b, n) do { \
10996 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10997 RESULT(sum, n, 16); \
10998 if ((sum >> 16) == 1) \
10999 ge |= 3 << (n * 2); \
11002 #define ADD8(a, b, n) do { \
11004 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11005 RESULT(sum, n, 8); \
11006 if ((sum >> 8) == 1) \
11010 #define SUB16(a, b, n) do { \
11012 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11013 RESULT(sum, n, 16); \
11014 if ((sum >> 16) == 0) \
11015 ge |= 3 << (n * 2); \
11018 #define SUB8(a, b, n) do { \
11020 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11021 RESULT(sum, n, 8); \
11022 if ((sum >> 8) == 0) \
11029 #include "op_addsub.h"
11031 /* Halved signed arithmetic. */
11032 #define ADD16(a, b, n) \
11033 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11034 #define SUB16(a, b, n) \
11035 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11036 #define ADD8(a, b, n) \
11037 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11038 #define SUB8(a, b, n) \
11039 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11042 #include "op_addsub.h"
11044 /* Halved unsigned arithmetic. */
11045 #define ADD16(a, b, n) \
11046 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11047 #define SUB16(a, b, n) \
11048 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11049 #define ADD8(a, b, n) \
11050 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11051 #define SUB8(a, b, n) \
11052 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11055 #include "op_addsub.h"
11057 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
11065 /* Unsigned sum of absolute byte differences. */
11066 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
11069 sum
= do_usad(a
, b
);
11070 sum
+= do_usad(a
>> 8, b
>> 8);
11071 sum
+= do_usad(a
>> 16, b
>>16);
11072 sum
+= do_usad(a
>> 24, b
>> 24);
11076 /* For ARMv6 SEL instruction. */
11077 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
11089 mask
|= 0xff000000;
11090 return (a
& mask
) | (b
& ~mask
);
11093 /* VFP support. We follow the convention used for VFP instructions:
11094 Single precision routines have a "s" suffix, double precision a
11097 /* Convert host exception flags to vfp form. */
11098 static inline int vfp_exceptbits_from_host(int host_bits
)
11100 int target_bits
= 0;
11102 if (host_bits
& float_flag_invalid
)
11104 if (host_bits
& float_flag_divbyzero
)
11106 if (host_bits
& float_flag_overflow
)
11108 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
11110 if (host_bits
& float_flag_inexact
)
11111 target_bits
|= 0x10;
11112 if (host_bits
& float_flag_input_denormal
)
11113 target_bits
|= 0x80;
11114 return target_bits
;
11117 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
11122 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
11123 | (env
->vfp
.vec_len
<< 16)
11124 | (env
->vfp
.vec_stride
<< 20);
11125 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
11126 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
11127 i
|= get_float_exception_flags(&env
->vfp
.fp_status_f16
);
11128 fpscr
|= vfp_exceptbits_from_host(i
);
11132 uint32_t vfp_get_fpscr(CPUARMState
*env
)
11134 return HELPER(vfp_get_fpscr
)(env
);
11137 /* Convert vfp exception flags to target form. */
11138 static inline int vfp_exceptbits_to_host(int target_bits
)
11142 if (target_bits
& 1)
11143 host_bits
|= float_flag_invalid
;
11144 if (target_bits
& 2)
11145 host_bits
|= float_flag_divbyzero
;
11146 if (target_bits
& 4)
11147 host_bits
|= float_flag_overflow
;
11148 if (target_bits
& 8)
11149 host_bits
|= float_flag_underflow
;
11150 if (target_bits
& 0x10)
11151 host_bits
|= float_flag_inexact
;
11152 if (target_bits
& 0x80)
11153 host_bits
|= float_flag_input_denormal
;
11157 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
11162 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
11163 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
11164 env
->vfp
.vec_len
= (val
>> 16) & 7;
11165 env
->vfp
.vec_stride
= (val
>> 20) & 3;
11168 if (changed
& (3 << 22)) {
11169 i
= (val
>> 22) & 3;
11171 case FPROUNDING_TIEEVEN
:
11172 i
= float_round_nearest_even
;
11174 case FPROUNDING_POSINF
:
11175 i
= float_round_up
;
11177 case FPROUNDING_NEGINF
:
11178 i
= float_round_down
;
11180 case FPROUNDING_ZERO
:
11181 i
= float_round_to_zero
;
11184 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
11185 set_float_rounding_mode(i
, &env
->vfp
.fp_status_f16
);
11187 if (changed
& FPCR_FZ16
) {
11188 bool ftz_enabled
= val
& FPCR_FZ16
;
11189 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
11190 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status_f16
);
11192 if (changed
& FPCR_FZ
) {
11193 bool ftz_enabled
= val
& FPCR_FZ
;
11194 set_flush_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
11195 set_flush_inputs_to_zero(ftz_enabled
, &env
->vfp
.fp_status
);
11197 if (changed
& FPCR_DN
) {
11198 bool dnan_enabled
= val
& FPCR_DN
;
11199 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status
);
11200 set_default_nan_mode(dnan_enabled
, &env
->vfp
.fp_status_f16
);
11203 /* The exception flags are ORed together when we read fpscr so we
11204 * only need to preserve the current state in one of our
11205 * float_status values.
11207 i
= vfp_exceptbits_to_host(val
);
11208 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
11209 set_float_exception_flags(0, &env
->vfp
.fp_status_f16
);
11210 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
11213 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
11215 HELPER(vfp_set_fpscr
)(env
, val
);
11218 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
11220 #define VFP_BINOP(name) \
11221 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
11223 float_status *fpst = fpstp; \
11224 return float32_ ## name(a, b, fpst); \
11226 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
11228 float_status *fpst = fpstp; \
11229 return float64_ ## name(a, b, fpst); \
11241 float32
VFP_HELPER(neg
, s
)(float32 a
)
11243 return float32_chs(a
);
11246 float64
VFP_HELPER(neg
, d
)(float64 a
)
11248 return float64_chs(a
);
11251 float32
VFP_HELPER(abs
, s
)(float32 a
)
11253 return float32_abs(a
);
11256 float64
VFP_HELPER(abs
, d
)(float64 a
)
11258 return float64_abs(a
);
11261 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
11263 return float32_sqrt(a
, &env
->vfp
.fp_status
);
11266 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
11268 return float64_sqrt(a
, &env
->vfp
.fp_status
);
11271 /* XXX: check quiet/signaling case */
11272 #define DO_VFP_cmp(p, type) \
11273 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
11276 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
11277 case 0: flags = 0x6; break; \
11278 case -1: flags = 0x8; break; \
11279 case 1: flags = 0x2; break; \
11280 default: case 2: flags = 0x3; break; \
11282 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11283 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11285 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
11288 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
11289 case 0: flags = 0x6; break; \
11290 case -1: flags = 0x8; break; \
11291 case 1: flags = 0x2; break; \
11292 default: case 2: flags = 0x3; break; \
11294 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11295 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11297 DO_VFP_cmp(s
, float32
)
11298 DO_VFP_cmp(d
, float64
)
11301 /* Integer to float and float to integer conversions */
11303 #define CONV_ITOF(name, fsz, sign) \
11304 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
11306 float_status *fpst = fpstp; \
11307 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
11310 #define CONV_FTOI(name, fsz, sign, round) \
11311 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
11313 float_status *fpst = fpstp; \
11314 if (float##fsz##_is_any_nan(x)) { \
11315 float_raise(float_flag_invalid, fpst); \
11318 return float##fsz##_to_##sign##int32##round(x, fpst); \
11321 #define FLOAT_CONVS(name, p, fsz, sign) \
11322 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
11323 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
11324 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
11326 FLOAT_CONVS(si
, h
, 16, )
11327 FLOAT_CONVS(si
, s
, 32, )
11328 FLOAT_CONVS(si
, d
, 64, )
11329 FLOAT_CONVS(ui
, h
, 16, u
)
11330 FLOAT_CONVS(ui
, s
, 32, u
)
11331 FLOAT_CONVS(ui
, d
, 64, u
)
11337 /* floating point conversion */
11338 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
11340 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
11341 /* ARM requires that S<->D conversion of any kind of NaN generates
11342 * a quiet NaN by forcing the most significant frac bit to 1.
11344 return float64_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
11347 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
11349 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
11350 /* ARM requires that S<->D conversion of any kind of NaN generates
11351 * a quiet NaN by forcing the most significant frac bit to 1.
11353 return float32_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
11356 /* VFP3 fixed point conversion. */
11357 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11358 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
11361 float_status *fpst = fpstp; \
11363 tmp = itype##_to_##float##fsz(x, fpst); \
11364 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
11367 /* Notice that we want only input-denormal exception flags from the
11368 * scalbn operation: the other possible flags (overflow+inexact if
11369 * we overflow to infinity, output-denormal) aren't correct for the
11370 * complete scale-and-convert operation.
11372 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
11373 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
11377 float_status *fpst = fpstp; \
11378 int old_exc_flags = get_float_exception_flags(fpst); \
11380 if (float##fsz##_is_any_nan(x)) { \
11381 float_raise(float_flag_invalid, fpst); \
11384 tmp = float##fsz##_scalbn(x, shift, fpst); \
11385 old_exc_flags |= get_float_exception_flags(fpst) \
11386 & float_flag_input_denormal; \
11387 set_float_exception_flags(old_exc_flags, fpst); \
11388 return float##fsz##_to_##itype##round(tmp, fpst); \
11391 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
11392 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11393 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
11394 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
11396 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
11397 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11398 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
11400 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
11401 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
11402 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
11403 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
11404 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
11405 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
11406 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
11407 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
11408 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
11409 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
11410 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
11411 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
11412 VFP_CONV_FIX_A64(sl
, h
, 16, 32, int32
)
11413 VFP_CONV_FIX_A64(ul
, h
, 16, 32, uint32
)
11414 #undef VFP_CONV_FIX
11415 #undef VFP_CONV_FIX_FLOAT
11416 #undef VFP_CONV_FLOAT_FIX_ROUND
11418 /* Set the current fp rounding mode and return the old one.
11419 * The argument is a softfloat float_round_ value.
11421 uint32_t HELPER(set_rmode
)(uint32_t rmode
, void *fpstp
)
11423 float_status
*fp_status
= fpstp
;
11425 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
11426 set_float_rounding_mode(rmode
, fp_status
);
11431 /* Set the current fp rounding mode in the standard fp status and return
11432 * the old one. This is for NEON instructions that need to change the
11433 * rounding mode but wish to use the standard FPSCR values for everything
11434 * else. Always set the rounding mode back to the correct value after
11436 * The argument is a softfloat float_round_ value.
11438 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
11440 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
11442 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
11443 set_float_rounding_mode(rmode
, fp_status
);
11448 /* Half precision conversions. */
11449 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
11451 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
11452 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
11454 return float32_maybe_silence_nan(r
, s
);
11459 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
11461 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
11462 float16 r
= float32_to_float16(a
, ieee
, s
);
11464 r
= float16_maybe_silence_nan(r
, s
);
11466 return float16_val(r
);
11469 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
11471 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
11474 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
11476 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
11479 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
11481 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
11484 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
11486 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
11489 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
11491 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
11492 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
11494 return float64_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
11499 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
11501 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
11502 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
11504 r
= float16_maybe_silence_nan(r
, &env
->vfp
.fp_status
);
11506 return float16_val(r
);
11509 #define float32_two make_float32(0x40000000)
11510 #define float32_three make_float32(0x40400000)
11511 #define float32_one_point_five make_float32(0x3fc00000)
11513 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11515 float_status
*s
= &env
->vfp
.standard_fp_status
;
11516 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11517 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11518 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11519 float_raise(float_flag_input_denormal
, s
);
11521 return float32_two
;
11523 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
11526 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
11528 float_status
*s
= &env
->vfp
.standard_fp_status
;
11530 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
11531 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
11532 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
11533 float_raise(float_flag_input_denormal
, s
);
11535 return float32_one_point_five
;
11537 product
= float32_mul(a
, b
, s
);
11538 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
11541 /* NEON helpers. */
11543 /* Constants 256 and 512 are used in some helpers; we avoid relying on
11544 * int->float conversions at run-time. */
11545 #define float64_256 make_float64(0x4070000000000000LL)
11546 #define float64_512 make_float64(0x4080000000000000LL)
11547 #define float16_maxnorm make_float16(0x7bff)
11548 #define float32_maxnorm make_float32(0x7f7fffff)
11549 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
11551 /* Reciprocal functions
11553 * The algorithm that must be used to calculate the estimate
11554 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
11557 /* See RecipEstimate()
11559 * input is a 9 bit fixed point number
11560 * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
11561 * result range 256 .. 511 for a number from 1.0 to 511/256.
11564 static int recip_estimate(int input
)
11567 assert(256 <= input
&& input
< 512);
11568 a
= (input
* 2) + 1;
11571 assert(256 <= r
&& r
< 512);
11576 * Common wrapper to call recip_estimate
11578 * The parameters are exponent and 64 bit fraction (without implicit
11579 * bit) where the binary point is nominally at bit 52. Returns a
11580 * float64 which can then be rounded to the appropriate size by the
11584 static uint64_t call_recip_estimate(int *exp
, int exp_off
, uint64_t frac
)
11586 uint32_t scaled
, estimate
;
11587 uint64_t result_frac
;
11590 /* Handle sub-normals */
11592 if (extract64(frac
, 51, 1) == 0) {
11600 /* scaled = UInt('1':fraction<51:44>) */
11601 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
11602 estimate
= recip_estimate(scaled
);
11604 result_exp
= exp_off
- *exp
;
11605 result_frac
= deposit64(0, 44, 8, estimate
);
11606 if (result_exp
== 0) {
11607 result_frac
= deposit64(result_frac
>> 1, 51, 1, 1);
11608 } else if (result_exp
== -1) {
11609 result_frac
= deposit64(result_frac
>> 2, 50, 2, 1);
11615 return result_frac
;
11618 static bool round_to_inf(float_status
*fpst
, bool sign_bit
)
11620 switch (fpst
->float_rounding_mode
) {
11621 case float_round_nearest_even
: /* Round to Nearest */
11623 case float_round_up
: /* Round to +Inf */
11625 case float_round_down
: /* Round to -Inf */
11627 case float_round_to_zero
: /* Round to Zero */
11631 g_assert_not_reached();
11634 float16
HELPER(recpe_f16
)(float16 input
, void *fpstp
)
11636 float_status
*fpst
= fpstp
;
11637 float16 f16
= float16_squash_input_denormal(input
, fpst
);
11638 uint32_t f16_val
= float16_val(f16
);
11639 uint32_t f16_sign
= float16_is_neg(f16
);
11640 int f16_exp
= extract32(f16_val
, 10, 5);
11641 uint32_t f16_frac
= extract32(f16_val
, 0, 10);
11644 if (float16_is_any_nan(f16
)) {
11646 if (float16_is_signaling_nan(f16
, fpst
)) {
11647 float_raise(float_flag_invalid
, fpst
);
11648 nan
= float16_maybe_silence_nan(f16
, fpst
);
11650 if (fpst
->default_nan_mode
) {
11651 nan
= float16_default_nan(fpst
);
11654 } else if (float16_is_infinity(f16
)) {
11655 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
11656 } else if (float16_is_zero(f16
)) {
11657 float_raise(float_flag_divbyzero
, fpst
);
11658 return float16_set_sign(float16_infinity
, float16_is_neg(f16
));
11659 } else if (float16_abs(f16
) < (1 << 8)) {
11660 /* Abs(value) < 2.0^-16 */
11661 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11662 if (round_to_inf(fpst
, f16_sign
)) {
11663 return float16_set_sign(float16_infinity
, f16_sign
);
11665 return float16_set_sign(float16_maxnorm
, f16_sign
);
11667 } else if (f16_exp
>= 29 && fpst
->flush_to_zero
) {
11668 float_raise(float_flag_underflow
, fpst
);
11669 return float16_set_sign(float16_zero
, float16_is_neg(f16
));
11672 f64_frac
= call_recip_estimate(&f16_exp
, 29,
11673 ((uint64_t) f16_frac
) << (52 - 10));
11675 /* result = sign : result_exp<4:0> : fraction<51:42> */
11676 f16_val
= deposit32(0, 15, 1, f16_sign
);
11677 f16_val
= deposit32(f16_val
, 10, 5, f16_exp
);
11678 f16_val
= deposit32(f16_val
, 0, 10, extract64(f64_frac
, 52 - 10, 10));
11679 return make_float16(f16_val
);
11682 float32
HELPER(recpe_f32
)(float32 input
, void *fpstp
)
11684 float_status
*fpst
= fpstp
;
11685 float32 f32
= float32_squash_input_denormal(input
, fpst
);
11686 uint32_t f32_val
= float32_val(f32
);
11687 bool f32_sign
= float32_is_neg(f32
);
11688 int f32_exp
= extract32(f32_val
, 23, 8);
11689 uint32_t f32_frac
= extract32(f32_val
, 0, 23);
11692 if (float32_is_any_nan(f32
)) {
11694 if (float32_is_signaling_nan(f32
, fpst
)) {
11695 float_raise(float_flag_invalid
, fpst
);
11696 nan
= float32_maybe_silence_nan(f32
, fpst
);
11698 if (fpst
->default_nan_mode
) {
11699 nan
= float32_default_nan(fpst
);
11702 } else if (float32_is_infinity(f32
)) {
11703 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
11704 } else if (float32_is_zero(f32
)) {
11705 float_raise(float_flag_divbyzero
, fpst
);
11706 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
11707 } else if (float32_abs(f32
) < (1ULL << 21)) {
11708 /* Abs(value) < 2.0^-128 */
11709 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11710 if (round_to_inf(fpst
, f32_sign
)) {
11711 return float32_set_sign(float32_infinity
, f32_sign
);
11713 return float32_set_sign(float32_maxnorm
, f32_sign
);
11715 } else if (f32_exp
>= 253 && fpst
->flush_to_zero
) {
11716 float_raise(float_flag_underflow
, fpst
);
11717 return float32_set_sign(float32_zero
, float32_is_neg(f32
));
11720 f64_frac
= call_recip_estimate(&f32_exp
, 253,
11721 ((uint64_t) f32_frac
) << (52 - 23));
11723 /* result = sign : result_exp<7:0> : fraction<51:29> */
11724 f32_val
= deposit32(0, 31, 1, f32_sign
);
11725 f32_val
= deposit32(f32_val
, 23, 8, f32_exp
);
11726 f32_val
= deposit32(f32_val
, 0, 23, extract64(f64_frac
, 52 - 23, 23));
11727 return make_float32(f32_val
);
11730 float64
HELPER(recpe_f64
)(float64 input
, void *fpstp
)
11732 float_status
*fpst
= fpstp
;
11733 float64 f64
= float64_squash_input_denormal(input
, fpst
);
11734 uint64_t f64_val
= float64_val(f64
);
11735 bool f64_sign
= float64_is_neg(f64
);
11736 int f64_exp
= extract64(f64_val
, 52, 11);
11737 uint64_t f64_frac
= extract64(f64_val
, 0, 52);
11739 /* Deal with any special cases */
11740 if (float64_is_any_nan(f64
)) {
11742 if (float64_is_signaling_nan(f64
, fpst
)) {
11743 float_raise(float_flag_invalid
, fpst
);
11744 nan
= float64_maybe_silence_nan(f64
, fpst
);
11746 if (fpst
->default_nan_mode
) {
11747 nan
= float64_default_nan(fpst
);
11750 } else if (float64_is_infinity(f64
)) {
11751 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
11752 } else if (float64_is_zero(f64
)) {
11753 float_raise(float_flag_divbyzero
, fpst
);
11754 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
11755 } else if ((f64_val
& ~(1ULL << 63)) < (1ULL << 50)) {
11756 /* Abs(value) < 2.0^-1024 */
11757 float_raise(float_flag_overflow
| float_flag_inexact
, fpst
);
11758 if (round_to_inf(fpst
, f64_sign
)) {
11759 return float64_set_sign(float64_infinity
, f64_sign
);
11761 return float64_set_sign(float64_maxnorm
, f64_sign
);
11763 } else if (f64_exp
>= 2045 && fpst
->flush_to_zero
) {
11764 float_raise(float_flag_underflow
, fpst
);
11765 return float64_set_sign(float64_zero
, float64_is_neg(f64
));
11768 f64_frac
= call_recip_estimate(&f64_exp
, 2045, f64_frac
);
11770 /* result = sign : result_exp<10:0> : fraction<51:0>; */
11771 f64_val
= deposit64(0, 63, 1, f64_sign
);
11772 f64_val
= deposit64(f64_val
, 52, 11, f64_exp
);
11773 f64_val
= deposit64(f64_val
, 0, 52, f64_frac
);
11774 return make_float64(f64_val
);
11777 /* The algorithm that must be used to calculate the estimate
11778 * is specified by the ARM ARM.
11781 static int do_recip_sqrt_estimate(int a
)
11785 assert(128 <= a
&& a
< 512);
11793 while (a
* (b
+ 1) * (b
+ 1) < (1 << 28)) {
11796 estimate
= (b
+ 1) / 2;
11797 assert(256 <= estimate
&& estimate
< 512);
11803 static uint64_t recip_sqrt_estimate(int *exp
, int exp_off
, uint64_t frac
)
11809 while (extract64(frac
, 51, 1) == 0) {
11813 frac
= extract64(frac
, 0, 51) << 1;
11817 /* scaled = UInt('01':fraction<51:45>) */
11818 scaled
= deposit32(1 << 7, 0, 7, extract64(frac
, 45, 7));
11820 /* scaled = UInt('1':fraction<51:44>) */
11821 scaled
= deposit32(1 << 8, 0, 8, extract64(frac
, 44, 8));
11823 estimate
= do_recip_sqrt_estimate(scaled
);
11825 *exp
= (exp_off
- *exp
) / 2;
11826 return extract64(estimate
, 0, 8) << 44;
11829 float16
HELPER(rsqrte_f16
)(float16 input
, void *fpstp
)
11831 float_status
*s
= fpstp
;
11832 float16 f16
= float16_squash_input_denormal(input
, s
);
11833 uint16_t val
= float16_val(f16
);
11834 bool f16_sign
= float16_is_neg(f16
);
11835 int f16_exp
= extract32(val
, 10, 5);
11836 uint16_t f16_frac
= extract32(val
, 0, 10);
11839 if (float16_is_any_nan(f16
)) {
11841 if (float16_is_signaling_nan(f16
, s
)) {
11842 float_raise(float_flag_invalid
, s
);
11843 nan
= float16_maybe_silence_nan(f16
, s
);
11845 if (s
->default_nan_mode
) {
11846 nan
= float16_default_nan(s
);
11849 } else if (float16_is_zero(f16
)) {
11850 float_raise(float_flag_divbyzero
, s
);
11851 return float16_set_sign(float16_infinity
, f16_sign
);
11852 } else if (f16_sign
) {
11853 float_raise(float_flag_invalid
, s
);
11854 return float16_default_nan(s
);
11855 } else if (float16_is_infinity(f16
)) {
11856 return float16_zero
;
11859 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11860 * preserving the parity of the exponent. */
11862 f64_frac
= ((uint64_t) f16_frac
) << (52 - 10);
11864 f64_frac
= recip_sqrt_estimate(&f16_exp
, 44, f64_frac
);
11866 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
11867 val
= deposit32(0, 15, 1, f16_sign
);
11868 val
= deposit32(val
, 10, 5, f16_exp
);
11869 val
= deposit32(val
, 2, 8, extract64(f64_frac
, 52 - 8, 8));
11870 return make_float16(val
);
11873 float32
HELPER(rsqrte_f32
)(float32 input
, void *fpstp
)
11875 float_status
*s
= fpstp
;
11876 float32 f32
= float32_squash_input_denormal(input
, s
);
11877 uint32_t val
= float32_val(f32
);
11878 uint32_t f32_sign
= float32_is_neg(f32
);
11879 int f32_exp
= extract32(val
, 23, 8);
11880 uint32_t f32_frac
= extract32(val
, 0, 23);
11883 if (float32_is_any_nan(f32
)) {
11885 if (float32_is_signaling_nan(f32
, s
)) {
11886 float_raise(float_flag_invalid
, s
);
11887 nan
= float32_maybe_silence_nan(f32
, s
);
11889 if (s
->default_nan_mode
) {
11890 nan
= float32_default_nan(s
);
11893 } else if (float32_is_zero(f32
)) {
11894 float_raise(float_flag_divbyzero
, s
);
11895 return float32_set_sign(float32_infinity
, float32_is_neg(f32
));
11896 } else if (float32_is_neg(f32
)) {
11897 float_raise(float_flag_invalid
, s
);
11898 return float32_default_nan(s
);
11899 } else if (float32_is_infinity(f32
)) {
11900 return float32_zero
;
11903 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
11904 * preserving the parity of the exponent. */
11906 f64_frac
= ((uint64_t) f32_frac
) << 29;
11908 f64_frac
= recip_sqrt_estimate(&f32_exp
, 380, f64_frac
);
11910 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
11911 val
= deposit32(0, 31, 1, f32_sign
);
11912 val
= deposit32(val
, 23, 8, f32_exp
);
11913 val
= deposit32(val
, 15, 8, extract64(f64_frac
, 52 - 8, 8));
11914 return make_float32(val
);
11917 float64
HELPER(rsqrte_f64
)(float64 input
, void *fpstp
)
11919 float_status
*s
= fpstp
;
11920 float64 f64
= float64_squash_input_denormal(input
, s
);
11921 uint64_t val
= float64_val(f64
);
11922 bool f64_sign
= float64_is_neg(f64
);
11923 int f64_exp
= extract64(val
, 52, 11);
11924 uint64_t f64_frac
= extract64(val
, 0, 52);
11926 if (float64_is_any_nan(f64
)) {
11928 if (float64_is_signaling_nan(f64
, s
)) {
11929 float_raise(float_flag_invalid
, s
);
11930 nan
= float64_maybe_silence_nan(f64
, s
);
11932 if (s
->default_nan_mode
) {
11933 nan
= float64_default_nan(s
);
11936 } else if (float64_is_zero(f64
)) {
11937 float_raise(float_flag_divbyzero
, s
);
11938 return float64_set_sign(float64_infinity
, float64_is_neg(f64
));
11939 } else if (float64_is_neg(f64
)) {
11940 float_raise(float_flag_invalid
, s
);
11941 return float64_default_nan(s
);
11942 } else if (float64_is_infinity(f64
)) {
11943 return float64_zero
;
11946 f64_frac
= recip_sqrt_estimate(&f64_exp
, 3068, f64_frac
);
11948 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
11949 val
= deposit64(0, 61, 1, f64_sign
);
11950 val
= deposit64(val
, 52, 11, f64_exp
);
11951 val
= deposit64(val
, 44, 8, extract64(f64_frac
, 52 - 8, 8));
11952 return make_float64(val
);
11955 uint32_t HELPER(recpe_u32
)(uint32_t a
, void *fpstp
)
11957 /* float_status *s = fpstp; */
11958 int input
, estimate
;
11960 if ((a
& 0x80000000) == 0) {
11964 input
= extract32(a
, 23, 9);
11965 estimate
= recip_estimate(input
);
11967 return deposit32(0, (32 - 9), 9, estimate
);
11970 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, void *fpstp
)
11974 if ((a
& 0xc0000000) == 0) {
11978 estimate
= do_recip_sqrt_estimate(extract32(a
, 23, 9));
11980 return deposit32(0, 23, 9, estimate
);
11983 /* VFPv4 fused multiply-accumulate */
11984 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
11986 float_status
*fpst
= fpstp
;
11987 return float32_muladd(a
, b
, c
, 0, fpst
);
11990 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
11992 float_status
*fpst
= fpstp
;
11993 return float64_muladd(a
, b
, c
, 0, fpst
);
11996 /* ARMv8 round to integral */
11997 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
11999 return float32_round_to_int(x
, fp_status
);
12002 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
12004 return float64_round_to_int(x
, fp_status
);
12007 float32
HELPER(rints
)(float32 x
, void *fp_status
)
12009 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12012 ret
= float32_round_to_int(x
, fp_status
);
12014 /* Suppress any inexact exceptions the conversion produced */
12015 if (!(old_flags
& float_flag_inexact
)) {
12016 new_flags
= get_float_exception_flags(fp_status
);
12017 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12023 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
12025 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
12028 ret
= float64_round_to_int(x
, fp_status
);
12030 new_flags
= get_float_exception_flags(fp_status
);
12032 /* Suppress any inexact exceptions the conversion produced */
12033 if (!(old_flags
& float_flag_inexact
)) {
12034 new_flags
= get_float_exception_flags(fp_status
);
12035 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
12041 /* Convert ARM rounding mode to softfloat */
12042 int arm_rmode_to_sf(int rmode
)
12045 case FPROUNDING_TIEAWAY
:
12046 rmode
= float_round_ties_away
;
12048 case FPROUNDING_ODD
:
12049 /* FIXME: add support for TIEAWAY and ODD */
12050 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
12052 case FPROUNDING_TIEEVEN
:
12054 rmode
= float_round_nearest_even
;
12056 case FPROUNDING_POSINF
:
12057 rmode
= float_round_up
;
12059 case FPROUNDING_NEGINF
:
12060 rmode
= float_round_down
;
12062 case FPROUNDING_ZERO
:
12063 rmode
= float_round_to_zero
;
12070 * The upper bytes of val (above the number specified by 'bytes') must have
12071 * been zeroed out by the caller.
12073 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12077 stl_le_p(buf
, val
);
12079 /* zlib crc32 converts the accumulator and output to one's complement. */
12080 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
12083 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
12087 stl_le_p(buf
, val
);
12089 /* Linux crc32c converts the output to one's complement. */
12090 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
12093 /* Return the exception level to which FP-disabled exceptions should
12094 * be taken, or 0 if FP is enabled.
12096 static inline int fp_exception_el(CPUARMState
*env
)
12098 #ifndef CONFIG_USER_ONLY
12100 int cur_el
= arm_current_el(env
);
12102 /* CPACR and the CPTR registers don't exist before v6, so FP is
12103 * always accessible
12105 if (!arm_feature(env
, ARM_FEATURE_V6
)) {
12109 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12110 * 0, 2 : trap EL0 and EL1/PL1 accesses
12111 * 1 : trap only EL0 accesses
12112 * 3 : trap no accesses
12114 fpen
= extract32(env
->cp15
.cpacr_el1
, 20, 2);
12118 if (cur_el
== 0 || cur_el
== 1) {
12119 /* Trap to PL1, which might be EL1 or EL3 */
12120 if (arm_is_secure(env
) && !arm_el_is_aa64(env
, 3)) {
12125 if (cur_el
== 3 && !is_a64(env
)) {
12126 /* Secure PL1 running at EL3 */
12139 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12140 * check because zero bits in the registers mean "don't trap".
12143 /* CPTR_EL2 : present in v7VE or v8 */
12144 if (cur_el
<= 2 && extract32(env
->cp15
.cptr_el
[2], 10, 1)
12145 && !arm_is_secure_below_el3(env
)) {
12146 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12150 /* CPTR_EL3 : present in v8 */
12151 if (extract32(env
->cp15
.cptr_el
[3], 10, 1)) {
12152 /* Trap all FP ops to EL3 */
12159 void cpu_get_tb_cpu_state(CPUARMState
*env
, target_ulong
*pc
,
12160 target_ulong
*cs_base
, uint32_t *pflags
)
12162 ARMMMUIdx mmu_idx
= core_to_arm_mmu_idx(env
, cpu_mmu_index(env
, false));
12163 int fp_el
= fp_exception_el(env
);
12167 int sve_el
= sve_exception_el(env
);
12171 flags
= ARM_TBFLAG_AARCH64_STATE_MASK
;
12172 /* Get control bits for tagged addresses */
12173 flags
|= (arm_regime_tbi0(env
, mmu_idx
) << ARM_TBFLAG_TBI0_SHIFT
);
12174 flags
|= (arm_regime_tbi1(env
, mmu_idx
) << ARM_TBFLAG_TBI1_SHIFT
);
12175 flags
|= sve_el
<< ARM_TBFLAG_SVEEXC_EL_SHIFT
;
12177 /* If SVE is disabled, but FP is enabled,
12178 then the effective len is 0. */
12179 if (sve_el
!= 0 && fp_el
== 0) {
12182 int current_el
= arm_current_el(env
);
12184 zcr_len
= env
->vfp
.zcr_el
[current_el
<= 1 ? 1 : current_el
];
12186 if (current_el
< 2 && arm_feature(env
, ARM_FEATURE_EL2
)) {
12187 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[2]);
12189 if (current_el
< 3 && arm_feature(env
, ARM_FEATURE_EL3
)) {
12190 zcr_len
= MIN(zcr_len
, 0xf & (uint32_t)env
->vfp
.zcr_el
[3]);
12193 flags
|= zcr_len
<< ARM_TBFLAG_ZCR_LEN_SHIFT
;
12195 *pc
= env
->regs
[15];
12196 flags
= (env
->thumb
<< ARM_TBFLAG_THUMB_SHIFT
)
12197 | (env
->vfp
.vec_len
<< ARM_TBFLAG_VECLEN_SHIFT
)
12198 | (env
->vfp
.vec_stride
<< ARM_TBFLAG_VECSTRIDE_SHIFT
)
12199 | (env
->condexec_bits
<< ARM_TBFLAG_CONDEXEC_SHIFT
)
12200 | (arm_sctlr_b(env
) << ARM_TBFLAG_SCTLR_B_SHIFT
);
12201 if (!(access_secure_reg(env
))) {
12202 flags
|= ARM_TBFLAG_NS_MASK
;
12204 if (env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)
12205 || arm_el_is_aa64(env
, 1)) {
12206 flags
|= ARM_TBFLAG_VFPEN_MASK
;
12208 flags
|= (extract32(env
->cp15
.c15_cpar
, 0, 2)
12209 << ARM_TBFLAG_XSCALE_CPAR_SHIFT
);
12212 flags
|= (arm_to_core_mmu_idx(mmu_idx
) << ARM_TBFLAG_MMUIDX_SHIFT
);
12214 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12215 * states defined in the ARM ARM for software singlestep:
12216 * SS_ACTIVE PSTATE.SS State
12217 * 0 x Inactive (the TB flag for SS is always 0)
12218 * 1 0 Active-pending
12219 * 1 1 Active-not-pending
12221 if (arm_singlestep_active(env
)) {
12222 flags
|= ARM_TBFLAG_SS_ACTIVE_MASK
;
12224 if (env
->pstate
& PSTATE_SS
) {
12225 flags
|= ARM_TBFLAG_PSTATE_SS_MASK
;
12228 if (env
->uncached_cpsr
& PSTATE_SS
) {
12229 flags
|= ARM_TBFLAG_PSTATE_SS_MASK
;
12233 if (arm_cpu_data_is_big_endian(env
)) {
12234 flags
|= ARM_TBFLAG_BE_DATA_MASK
;
12236 flags
|= fp_el
<< ARM_TBFLAG_FPEXC_EL_SHIFT
;
12238 if (arm_v7m_is_handler_mode(env
)) {
12239 flags
|= ARM_TBFLAG_HANDLER_MASK
;